File size: 3,407 Bytes
3239c69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#!/usr/bin/env python3
"""
Test different HuggingFace approaches to find a working method
"""

import os
import requests
import json
from huggingface_hub import InferenceClient
import traceback

# HuggingFace token
HF_TOKEN = os.environ.get("HF_TOKEN", "")

def test_inference_api_direct(model_name, prompt="Hello, how are you?"):
    """Test using direct HTTP requests to HuggingFace API"""
    print(f"\n🌐 Testing direct HTTP API for: {model_name}")
    
    headers = {
        "Authorization": f"Bearer {HF_TOKEN}" if HF_TOKEN else "",
        "Content-Type": "application/json"
    }
    
    url = f"https://api-inference.huggingface.co/models/{model_name}"
    
    payload = {
        "inputs": prompt,
        "parameters": {
            "max_new_tokens": 50,
            "temperature": 0.7,
            "top_p": 0.95,
            "do_sample": True
        }
    }
    
    try:
        response = requests.post(url, headers=headers, json=payload, timeout=30)
        print(f"Status: {response.status_code}")
        
        if response.status_code == 200:
            result = response.json()
            print(f"βœ… Success: {result}")
            return True
        else:
            print(f"❌ Error: {response.text}")
            return False
            
    except Exception as e:
        print(f"❌ Exception: {e}")
        return False

def test_serverless_models():
    """Test known working models that support serverless inference"""
    
    # List of models that typically work well with serverless inference
    working_models = [
        "microsoft/DialoGPT-medium",
        "google/flan-t5-base", 
        "distilbert-base-uncased-finetuned-sst-2-english",
        "gpt2",
        "microsoft/DialoGPT-small",
        "facebook/blenderbot-400M-distill"
    ]
    
    results = {}
    
    for model in working_models:
        result = test_inference_api_direct(model)
        results[model] = result
        
    return results

def test_chat_completion_models():
    """Test models specifically for chat completion"""
    
    chat_models = [
        "microsoft/DialoGPT-medium",
        "facebook/blenderbot-400M-distill",
        "microsoft/DialoGPT-small"
    ]
    
    for model in chat_models:
        print(f"\nπŸ’¬ Testing chat model: {model}")
        test_inference_api_direct(model, "Human: Hello! How are you?\nAssistant:")

if __name__ == "__main__":
    print("πŸ” HuggingFace Inference API Debug")
    print("=" * 50)
    
    if HF_TOKEN:
        print(f"πŸ”‘ Using HF_TOKEN: {HF_TOKEN[:10]}...")
    else:
        print("⚠️  No HF_TOKEN - trying anonymous access")
    
    # Test serverless models
    print("\n" + "="*60)
    print("TESTING SERVERLESS MODELS")
    print("="*60)
    
    results = test_serverless_models()
    
    # Test chat completion models
    print("\n" + "="*60)
    print("TESTING CHAT MODELS")  
    print("="*60)
    
    test_chat_completion_models()
    
    # Summary
    print("\n" + "="*60)
    print("SUMMARY")
    print("="*60)
    
    working_models = [model for model, result in results.items() if result]
    
    if working_models:
        print("βœ… Working models:")
        for model in working_models:
            print(f"  - {model}")
        print(f"\n🎯 Recommended model to switch to: {working_models[0]}")
    else:
        print("❌ No models working - API might be down or authentication issue")