Said Lfagrouche commited on
Commit
6ca0914
·
1 Parent(s): 3ed269d

Add Hugging Face Space configuration

Browse files
Files changed (3) hide show
  1. Dockerfile +9 -29
  2. README.md +21 -8
  3. app.py +42 -0
Dockerfile CHANGED
@@ -1,48 +1,28 @@
1
  FROM python:3.13-slim
2
 
3
- WORKDIR /app
 
 
4
 
5
- # Install git and git-lfs for downloading large files (if needed)
6
- RUN apt-get update && \
7
- apt-get install -y git git-lfs && \
8
- apt-get clean && \
9
- rm -rf /var/lib/apt/lists/*
10
 
11
  # Copy requirements file
12
- COPY requirements.txt .
13
 
14
  # Install dependencies
15
- RUN pip install --no-cache-dir -r requirements.txt
16
 
17
  # Download NLTK data
18
  RUN python -c "import nltk; nltk.download('punkt'); nltk.download('wordnet'); nltk.download('stopwords')"
19
 
20
  # Create necessary directories
21
  RUN mkdir -p data/users data/sessions data/conversations data/feedback
22
- RUN mkdir -p mental_health_model_artifacts/chroma_db
23
 
24
  # Copy application files
25
- COPY api_mental_health.py .
26
- COPY .env.example .env
27
- COPY create_vector_db.py .
28
-
29
- # Copy model artifacts if they exist, or they should be mounted/downloaded at runtime
30
- # Note: For Hugging Face Spaces deployment, you'll need to use Git LFS or
31
- # provide a way to download these models at container startup
32
- COPY mental_health_model_artifacts/ mental_health_model_artifacts/
33
-
34
- # Create a startup script to handle potential model downloading
35
- RUN echo '#!/bin/bash\n\
36
- # Check if models exist\n\
37
- if [ ! -f "mental_health_model_artifacts/crisis_classifier.pkl" ]; then\n\
38
- echo "Warning: Model artifacts not found. Please mount them or implement a download method."\n\
39
- fi\n\
40
- # Start the API server\n\
41
- exec uvicorn api_mental_health:app --host 0.0.0.0 --port 7860\n\
42
- ' > /app/start.sh && chmod +x /app/start.sh
43
 
44
  # Expose the port Hugging Face Spaces expects
45
  EXPOSE 7860
46
 
47
- # Use the startup script
48
- CMD ["/app/start.sh"]
 
1
  FROM python:3.13-slim
2
 
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
 
7
+ WORKDIR /app
 
 
 
 
8
 
9
  # Copy requirements file
10
+ COPY --chown=user requirements.txt .
11
 
12
  # Install dependencies
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
 
15
  # Download NLTK data
16
  RUN python -c "import nltk; nltk.download('punkt'); nltk.download('wordnet'); nltk.download('stopwords')"
17
 
18
  # Create necessary directories
19
  RUN mkdir -p data/users data/sessions data/conversations data/feedback
 
20
 
21
  # Copy application files
22
+ COPY --chown=user . /app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  # Expose the port Hugging Face Spaces expects
25
  EXPOSE 7860
26
 
27
+ # Command to run the application
28
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,13 +1,26 @@
1
  ---
2
- title: Thera Guide Ai
3
- emoji: 🚀
4
- colorFrom: green
5
- colorTo: purple
6
  sdk: docker
7
  pinned: false
8
- license: mit
9
- short_description: crisis-detection
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
- # This is a test update
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Mental Health Counselor API
3
+ emoji: 🧠
4
+ colorFrom: purple
5
+ colorTo: indigo
6
  sdk: docker
7
  pinned: false
 
 
8
  ---
9
 
10
+ # Mental Health Counselor API
11
+
12
+ This is a backend API for a mental health counseling application. It provides endpoints for analyzing patient messages, suggesting counselor responses, and managing counseling sessions.
13
+
14
+ ## API Endpoints
15
+
16
+ - `/`: Root endpoint showing API status
17
+ - `/health`: Health check endpoint
18
+ - And many more specialized endpoints from the full API
19
+
20
+ ## Deployment
21
+
22
+ This API is deployed on Hugging Face Spaces using Docker.
23
+
24
+ ## Frontend
25
+
26
+ The frontend for this application is deployed separately on Vercel.
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ import os
3
+ import logging
4
+
5
+ # Set up logging
6
+ logging.basicConfig(level=logging.INFO)
7
+ logger = logging.getLogger(__name__)
8
+
9
+ # Initialize FastAPI app
10
+ app = FastAPI(title="Mental Health Counselor API")
11
+
12
+ # Create necessary directories
13
+ os.makedirs("data/users", exist_ok=True)
14
+ os.makedirs("data/sessions", exist_ok=True)
15
+ os.makedirs("data/conversations", exist_ok=True)
16
+ os.makedirs("data/feedback", exist_ok=True)
17
+
18
+ # Define a simple health check route
19
+ @app.get("/health")
20
+ async def health_check():
21
+ return {"status": "ok", "message": "Mental Health Counselor API is running"}
22
+
23
+ # Define a simple root route
24
+ @app.get("/")
25
+ async def root():
26
+ return {
27
+ "app": "Mental Health Counselor API",
28
+ "status": "running",
29
+ "endpoints": [
30
+ "/health",
31
+ "/api-docs"
32
+ ]
33
+ }
34
+
35
+ # Import the actual API if the file exists
36
+ try:
37
+ from api_mental_health import app as full_app
38
+ # Merge the routes from the full app
39
+ app.routes.extend(full_app.routes)
40
+ logger.info("Loaded full API functionality")
41
+ except ImportError:
42
+ logger.warning("Could not import full API functionality")