v-e-n-o-m commited on
Commit
e859b2f
·
1 Parent(s): ce0204b
Files changed (2) hide show
  1. Dockerfile +7 -12
  2. app.py +9 -6
Dockerfile CHANGED
@@ -1,31 +1,26 @@
1
  FROM python:3.10-slim
2
 
3
  # Install system dependencies including ffmpeg
4
- RUN apt-get update && apt-get install -y \
5
- libsndfile1 \
6
- ffmpeg \
7
- && rm -rf /var/lib/apt/lists/*
8
 
9
  # Set working directory
10
  WORKDIR /app
11
 
12
  # Create cache and logs directories
13
- RUN mkdir -p /app/cache /app/logs && \
14
- chown -R 1000:1000 /app/cache /app/logs && \
15
- chmod -R 775 /app/cache /app/logs
16
 
17
  # Copy requirements and install
18
  COPY requirements.txt .
19
  RUN pip install --no-cache-dir -r requirements.txt
20
 
21
  # Copy application code
22
- COPY app.py .
23
 
24
  # Set environment variable for Hugging Face cache
25
  ENV HF_HOME=/app/cache
26
 
27
- # Expose default port (can be overridden by PORT env var)
28
- EXPOSE 8000
29
 
30
- # Run the FastAPI app with uvicorn, using PORT env var
31
- CMD ["sh", "-c", "uvicorn app:app --host 0.0.0.0 --port ${PORT:-8000} --workers 1"]
 
1
  FROM python:3.10-slim
2
 
3
  # Install system dependencies including ffmpeg
4
+ RUN apt-get update && apt-get install -y ffmpeg libsndfile1 && apt-get clean
 
 
 
5
 
6
  # Set working directory
7
  WORKDIR /app
8
 
9
  # Create cache and logs directories
10
+ RUN mkdir -p /app/cache /app/logs && chmod -R 777 /app/cache /app/logs
 
 
11
 
12
  # Copy requirements and install
13
  COPY requirements.txt .
14
  RUN pip install --no-cache-dir -r requirements.txt
15
 
16
  # Copy application code
17
+ COPY . .
18
 
19
  # Set environment variable for Hugging Face cache
20
  ENV HF_HOME=/app/cache
21
 
22
+ # Expose Hugging Face Spaces port
23
+ EXPOSE 7860
24
 
25
+ # Run the FastAPI app with uvicorn
26
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -10,12 +10,15 @@ import subprocess
10
  import tempfile
11
  from contextlib import contextmanager
12
 
 
 
 
13
  # Configure logging
14
  logging.basicConfig(
15
  level=logging.INFO,
16
  format="%(asctime)s [%(levelname)s] %(message)s",
17
  handlers=[
18
- logging.FileHandler("/app/logs/app.log"),
19
  logging.StreamHandler(sys.stdout)
20
  ]
21
  )
@@ -33,9 +36,9 @@ except ImportError as e:
33
  raise
34
 
35
  # Set up cache directory
36
- os.makedirs("/app/cache", exist_ok=True)
37
- os.environ["HF_HOME"] = "/app/cache"
38
- logger.info(f"Set HF_HOME to /app/cache")
39
 
40
  # Initialize FastAPI app
41
  app = FastAPI(title="Quran Transcription API")
@@ -55,7 +58,7 @@ async def debug():
55
  "model_loaded": model is not None,
56
  "pipeline_initialized": asr is not None,
57
  "cache_dir": os.getenv("HF_HOME"),
58
- "port": os.getenv("PORT", "8000")
59
  }
60
 
61
  # Load model and processor
@@ -156,7 +159,7 @@ async def startup_event():
156
  logger.info(f"Memory allocated: {torch.cuda.memory_allocated() if torch.cuda.is_available() else 'N/A'}")
157
 
158
  if __name__ == "__main__":
159
- port = int(os.getenv("PORT", 8000)) # Use PORT env var or default to 8000
160
  logger.info(f"Starting Uvicorn server on port {port}")
161
  uvicorn.run(
162
  "app:app",
 
10
  import tempfile
11
  from contextlib import contextmanager
12
 
13
+ # Create logs directory
14
+ os.makedirs("./logs", exist_ok=True)
15
+
16
  # Configure logging
17
  logging.basicConfig(
18
  level=logging.INFO,
19
  format="%(asctime)s [%(levelname)s] %(message)s",
20
  handlers=[
21
+ logging.FileHandler("./logs/app.log"),
22
  logging.StreamHandler(sys.stdout)
23
  ]
24
  )
 
36
  raise
37
 
38
  # Set up cache directory
39
+ os.makedirs("./cache", exist_ok=True)
40
+ os.environ["HF_HOME"] = "./cache"
41
+ logger.info(f"Set HF_HOME to ./cache")
42
 
43
  # Initialize FastAPI app
44
  app = FastAPI(title="Quran Transcription API")
 
58
  "model_loaded": model is not None,
59
  "pipeline_initialized": asr is not None,
60
  "cache_dir": os.getenv("HF_HOME"),
61
+ "port": os.getenv("PORT", "7860")
62
  }
63
 
64
  # Load model and processor
 
159
  logger.info(f"Memory allocated: {torch.cuda.memory_allocated() if torch.cuda.is_available() else 'N/A'}")
160
 
161
  if __name__ == "__main__":
162
+ port = int(os.getenv("PORT", 7860)) # Use PORT env var or default to 7860
163
  logger.info(f"Starting Uvicorn server on port {port}")
164
  uvicorn.run(
165
  "app:app",