Spaces:
Paused
Paused
File size: 1,978 Bytes
71d10ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
#!/usr/bin/env python3
"""
Simple test script to verify Ollama is working
"""
import requests
import json
def test_ollama_connection():
"""Test if Ollama is running and accessible"""
try:
response = requests.get("http://localhost:11434/api/tags", timeout=10)
if response.status_code == 200:
models = response.json().get("models", [])
print(f"β
Ollama is running!")
print(f"π Available models: {[m['name'] for m in models]}")
return True
else:
print(f"β Ollama health check failed: {response.status_code}")
return False
except Exception as e:
print(f"β Cannot connect to Ollama: {e}")
return False
def test_ollama_generation():
"""Test if Ollama can generate text"""
try:
payload = {
"model": "llama3.2:latest",
"prompt": "Hello! Please respond with 'Ollama is working correctly!'",
"stream": False
}
response = requests.post(
"http://localhost:11434/api/generate",
json=payload,
timeout=30
)
if response.status_code == 200:
result = response.json()
generated_text = result.get('response', '').strip()
print(f"β
Ollama generation test successful!")
print(f"π€ Response: {generated_text}")
return True
else:
print(f"β Ollama generation failed: {response.status_code} - {response.text}")
return False
except Exception as e:
print(f"β Ollama generation test failed: {e}")
return False
if __name__ == "__main__":
print("π§ͺ Testing Ollama Setup...")
print("=" * 50)
# Test connection
if test_ollama_connection():
print()
# Test generation
test_ollama_generation()
print("=" * 50)
print("β
Ollama test completed!")
|