llama-omni / docker-compose.yml
marcosremar2's picture
ereerre
c57019c
raw
history blame
408 Bytes
version: '3'
services:
llama-omni:
build:
context: .
dockerfile: Dockerfile
ports:
- "7860:7860"
volumes:
- ./models:/app/models
environment:
- GRADIO_SERVER_NAME=0.0.0.0
- GRADIO_SERVER_PORT=7860
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]