import os import requests from dotenv import load_dotenv load_dotenv() def test_single_model(model_name, hf_token): """Test a single model""" print(f"\n๐Ÿ”„ Testing model: {model_name}") # api_url = f"https://api-inference.huggingface.co/models/{model_name}" api_url = f"https://api-inference.huggingface.co/models/{model_name}" headers = {"Authorization": f"Bearer {hf_token}"} # Different payload formats for different model types if "flan-t5" in model_name.lower(): payload = {"inputs": "What is the capital of France?"} elif "gpt" in model_name.lower(): payload = {"inputs": "Human: What is the capital of France?\nAssistant:"} else: payload = {"inputs": "What is the capital of France?"} try: response = requests.post( api_url, headers=headers, json=payload, timeout=30) print(f"๐Ÿ“ก Status: {response.status_code}") if response.status_code == 200: result = response.json() print(f"โœ… SUCCESS! Response: {result}") return True, model_name elif response.status_code == 503: print("โณ Model is loading...") return False, "loading" elif response.status_code == 404: print("โŒ Model not found") return False, "not_found" elif response.status_code == 401: print("๐Ÿšจ Token invalid") return False, "invalid_token" else: print(f"โŒ Error {response.status_code}: {response.text}") return False, f"error_{response.status_code}" except Exception as e: print(f"โŒ Exception: {e}") return False, str(e) def test_hf_token(): hf_token = os.environ.get("HUGGINGFACE_API_TOKEN") if not hf_token: print("โŒ HUGGINGFACE_API_TOKEN not found in environment") return False print(f"โœ… Found token: {hf_token[:10]}..." + "*" * (len(hf_token) - 10)) # Test multiple models to find one that works models_to_test = [ "microsoft/DialoGPT-medium", "facebook/blenderbot-400M-distill", "google/flan-t5-small", "google/flan-t5-base", "distilbert-base-cased-distilled-squad", "gpt2" ] working_models = [] loading_models = [] for model in models_to_test: success, status = test_single_model(model, hf_token) if success: working_models.append(model) elif status == "loading": loading_models.append(model) print("\n" + "="*50) print("๐Ÿ“Š RESULTS:") if working_models: print(f"โœ… Working models: {working_models}") return True elif loading_models: print(f"โณ Models still loading: {loading_models}") print("๐Ÿ’ก Try again in 2-3 minutes") return False else: print("โŒ No models are working") return False if __name__ == "__main__": print("๐Ÿงช Testing Hugging Face Token...") print("=" * 40) success = test_hf_token() if success: print("\nโœ… Your HF token is working!") else: print("\nโŒ There's an issue with your HF token or the API") print("\nTroubleshooting steps:") print("1. Check your .env file has: HUGGINGFACE_API_TOKEN=your_token_here") print("2. Verify your token at: https://huggingface.co/settings/tokens") print("3. Make sure the token has 'Read' permissions") print("4. Try generating a new token if needed")