# Use an official Python runtime as a parent image FROM python:3.10-slim # Set the working directory in the container WORKDIR /code # Set environment variables for pip ENV PIP_NO_CACHE_DIR=off \ PIP_DISABLE_PIP_VERSION_CHECK=on # Install system dependencies (git for cloning, build-essential for compiling C/C++ extensions) RUN apt-get update && apt-get install -y --no-install-recommends \ git \ build-essential \ curl \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # Copy all files from your Hugging Face Space repo COPY . /code/ # Clone LLaMA-Omni2 and install it (WITHOUT editable flag) RUN git clone https://github.com/ICTNLP/LLaMA-Omni2.git /tmp/LLaMA-Omni2 \ && cd /tmp/LLaMA-Omni2 \ && pip install . \ && echo "--- PIP LIST AFTER LLaMA-Omni2 INSTALL --- " \ && pip list | grep -i llama \ && echo "--- PYTHON SYS.PATH AFTER LLaMA-Omni2 INSTALL --- " \ && python -c "import sys; print(sys.path)" \ && echo "--- TRYING TO IMPORT LLaMA-Omni2 --- " \ && python -c "import llama_omni2; print(f'LLaMA-Omni2 imported successfully from {llama_omni2.__file__}')" \ && echo "--- CHECKING WHERE LLAMA_OMNI2 IS INSTALLED --- " \ && pip show llama-omni2 \ && echo "--- DIAGNOSTICS END --- " # Copy the LLaMA-Omni2 source code to /code as well for direct file access RUN cp -r /tmp/LLaMA-Omni2/llama_omni2 /code/ \ && echo "--- COPIED LLAMA_OMNI2 SOURCE TO /code ---" \ && ls -la /code/llama_omni2 \ && echo "--- CHECKING SERVE SCRIPTS ---" \ && ls -la /code/llama_omni2/serve || echo "serve directory not found!" # Make sure PYTHONPATH includes both /code and site-packages ENV PYTHONPATH "${PYTHONPATH}:/code" # Install any other explicit dependencies from requirements.txt RUN pip install -r requirements.txt # Make debug and extraction scripts executable RUN chmod +x /code/debug_llama_omni2.py \ && chmod +x /code/extract_llama_omni2_scripts.py # Create startup script with enhanced diagnostics and fallbacks RUN echo '#!/bin/bash\n\ echo "--- CONTAINER STARTING ---"\n\ echo "PYTHONPATH: $PYTHONPATH"\n\ echo "Python sys.path:"\n\ python -c "import sys; print(sys.path)"\n\ \n\ echo "Running diagnostic script..."\n\ python /code/debug_llama_omni2.py\n\ \n\ # Check if llama_omni2 module is importable\n\ if ! python -c "import llama_omni2" > /dev/null 2>&1; then\n\ echo "WARNING: llama_omni2 module cannot be imported. Extracting scripts as fallback..."\n\ python /code/extract_llama_omni2_scripts.py\n\ \n\ # Add the extracted directory to PYTHONPATH\n\ if [ -d "/code/llama_omni2_extracted" ]; then\n\ export PYTHONPATH="$PYTHONPATH:/code/llama_omni2_extracted"\n\ echo "Added /code/llama_omni2_extracted to PYTHONPATH: $PYTHONPATH"\n\ fi\n\ fi\n\ \n\ echo "Starting LLaMA-Omni2 application..."\n\ python app.py\n' > /code/startup.sh \ && chmod +x /code/startup.sh # Expose the port Gradio will run on EXPOSE 7860 # Command to run the application CMD ["/code/startup.sh"]