#!/bin/bash # Run inference on LLaMA-Omni model # Usage: bash run.sh EXAMPLES_DIR=$1 if [ -z "$EXAMPLES_DIR" ]; then echo "Error: Examples directory not specified" echo "Usage: bash run.sh " exit 1 fi if [ ! -d "$EXAMPLES_DIR" ]; then echo "Error: Directory $EXAMPLES_DIR does not exist" exit 1 fi # Check if the model and vocoder exist (placeholders for real implementation) echo "Checking if required models are available..." echo "Note: In a real deployment, the model would be downloaded from Hugging Face" # Process each JSON file in the examples directory for json_file in "$EXAMPLES_DIR"/*.json; do if [ -f "$json_file" ]; then echo "Processing $json_file..." # In a real implementation, this would call a Python script echo "python -m omni_speech.infer.inference --input $json_file --output results/$(basename $json_file .json)" fi done echo "Inference complete."