v-e-n-o-m commited on
Commit
662c180
·
1 Parent(s): 7ded65d
Files changed (3) hide show
  1. Dockerfile +3 -3
  2. app.py +32 -4
  3. requirements.txt +5 -5
Dockerfile CHANGED
@@ -21,11 +21,11 @@ RUN pip install --no-cache-dir -r requirements.txt
21
  # Copy application code
22
  COPY app.py .
23
 
24
- # Set environment variable for transformers cache
25
- ENV TRANSFORMERS_CACHE=/app/cache
26
 
27
  # Expose port
28
  EXPOSE 8000
29
 
30
  # Run the FastAPI app with uvicorn
31
- CMD ["uvicorn", "app.py:app", "--host", "0.0.0.0", "--port", "8000"]
 
21
  # Copy application code
22
  COPY app.py .
23
 
24
+ # Set environment variable for Hugging Face cache
25
+ ENV HF_HOME=/app/cache
26
 
27
  # Expose port
28
  EXPOSE 8000
29
 
30
  # Run the FastAPI app with uvicorn
31
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
app.py CHANGED
@@ -1,5 +1,7 @@
1
  import logging
2
  import os
 
 
3
  from fastapi import FastAPI, File, UploadFile, HTTPException
4
  from transformers import pipeline, WhisperForConditionalGeneration, WhisperProcessor
5
  import torch
@@ -14,15 +16,32 @@ logging.basicConfig(
14
  format="%(asctime)s [%(levelname)s] %(message)s",
15
  handlers=[
16
  logging.FileHandler("/app/logs/app.log"),
17
- logging.StreamHandler()
18
  ]
19
  )
20
  logger = logging.getLogger(__name__)
21
 
 
 
 
 
 
 
 
 
 
 
 
22
  # Ensure cache directory exists
23
- os.makedirs("/app/cache", exist_ok=True)
24
- os.environ["TRANSFORMERS_CACHE"] = "/app/cache"
 
 
 
 
 
25
 
 
26
  app = FastAPI(title="Quran Transcription API")
27
 
28
  # Load model and processor
@@ -133,4 +152,13 @@ async def transcribe_audio(file: UploadFile = File(...)):
133
  return {"transcription": transcription}
134
  except Exception as e:
135
  logger.error(f"Transcription failed: {str(e)}")
136
- raise HTTPException(status_code=500, detail="Transcription failed")
 
 
 
 
 
 
 
 
 
 
1
  import logging
2
  import os
3
+ import sys
4
+ import uvicorn
5
  from fastapi import FastAPI, File, UploadFile, HTTPException
6
  from transformers import pipeline, WhisperForConditionalGeneration, WhisperProcessor
7
  import torch
 
16
  format="%(asctime)s [%(levelname)s] %(message)s",
17
  handlers=[
18
  logging.FileHandler("/app/logs/app.log"),
19
+ logging.StreamHandler(sys.stdout)
20
  ]
21
  )
22
  logger = logging.getLogger(__name__)
23
 
24
+ # Log system and dependency information
25
+ logger.info(f"Python version: {sys.version}")
26
+ try:
27
+ import transformers
28
+ import torch
29
+ logger.info(f"Transformers version: {transformers.__version__}")
30
+ logger.info(f"Torch version: {torch.__version__}")
31
+ except ImportError as e:
32
+ logger.error(f"Failed to import dependency: {str(e)}")
33
+ raise
34
+
35
  # Ensure cache directory exists
36
+ try:
37
+ os.makedirs("/app/cache", exist_ok=True)
38
+ os.environ["HF_HOME"] = "/app/cache"
39
+ logger.info(f"Set HF_HOME to /app/cache")
40
+ except Exception as e:
41
+ logger.error(f"Failed to set up cache directory: {str(e)}")
42
+ raise
43
 
44
+ # Initialize FastAPI app
45
  app = FastAPI(title="Quran Transcription API")
46
 
47
  # Load model and processor
 
152
  return {"transcription": transcription}
153
  except Exception as e:
154
  logger.error(f"Transcription failed: {str(e)}")
155
+ raise HTTPException(status_code=500, detail="Transcription failed")
156
+
157
+ if __name__ == "__main__":
158
+ logger.info("Starting Uvicorn server locally")
159
+ uvicorn.run(
160
+ "app:app",
161
+ host="0.0.0.0",
162
+ port=8000,
163
+ log_level="info"
164
+ )
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- fastapi==0.115.0
2
- uvicorn==0.30.6
3
- transformers==4.44.2
4
- torch==2.4.1
5
- soundfile==0.12.1
6
  python-multipart==0.0.9
 
1
+ fastapi==0.115.0
2
+ uvicorn==0.30.6
3
+ transformers==4.44.2
4
+ torch==2.4.1
5
+ soundfile==0.12.1
6
  python-multipart==0.0.9