#!/bin/bash set -e # Exit on error # Function to show usage show_usage() { echo "Usage: $0 [MODE]" echo "" echo "MODE options:" echo " gpu - Force GPU mode (requires nvidia-container-toolkit)" echo " cpu - Force CPU-only mode" echo " auto - Automatically detect and use GPU if available (default)" echo "" echo "Examples:" echo " $0 # Auto-detect (default)" echo " $0 gpu # Force GPU mode" echo " $0 cpu # Force CPU-only mode" echo "" } # Parse command line arguments MODE="auto" if [ $# -gt 0 ]; then case "$1" in gpu|GPU) MODE="gpu" ;; cpu|CPU) MODE="cpu" ;; auto|AUTO) MODE="auto" ;; -h|--help|help) show_usage exit 0 ;; *) echo "Error: Unknown mode '$1'" echo "" show_usage exit 1 ;; esac fi # Common Docker run arguments DOCKER_ARGS="--rm -p 7860:7860 --user 1000:1000 \ -e ENABLE_DEBUG_TAB=true \ -e VOCAB_SOURCE=norvig \ -e DIFFICULTY_WEIGHT=0.2" IMAGE_NAME="crossword-py-ai:hf" # Function to run with GPU run_gpu() { echo "🚀 Running in GPU mode..." docker run --gpus all $DOCKER_ARGS $IMAGE_NAME } # Function to run with CPU only run_cpu() { echo "đŸ–Ĩī¸ Running in CPU-only mode..." docker run $DOCKER_ARGS $IMAGE_NAME } # Function to check GPU availability check_gpu_available() { if ! command -v nvidia-smi &> /dev/null; then return 1 fi if ! docker run --rm --gpus all nvidia/cuda:12.1.0-base-ubuntu22.04 nvidia-smi &> /dev/null; then return 1 fi return 0 } # Execute based on mode case "$MODE" in gpu) echo "🔍 Checking GPU support..." if check_gpu_available; then run_gpu else echo "❌ Error: GPU mode requested but GPU support not available!" echo "" echo "To enable GPU support:" echo "1. Install nvidia-container-toolkit:" echo " sudo apt-get update" echo " sudo apt-get install -y nvidia-container-toolkit" echo " sudo systemctl restart docker" echo "" echo "2. Or use CPU mode: $0 cpu" exit 1 fi ;; cpu) run_cpu ;; auto) echo "🔍 Auto-detecting GPU support..." if check_gpu_available; then echo "✅ GPU support detected" run_gpu else echo "â„šī¸ GPU not available, falling back to CPU mode" run_cpu fi ;; esac