NovaEval / app.py
shashankagar's picture
Upload 4 files
55395f1 verified
raw
history blame
51.6 kB
"""
Improved NovaEval Space by Noveum.ai
Advanced AI Model Evaluation Platform with Enhanced UI
"""
import asyncio
import json
import logging
import os
import sys
import time
import uuid
from datetime import datetime
from typing import Dict, List, Optional, Any
import uvicorn
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import httpx
import traceback
# Configure logging to stdout only (no file logging to avoid permission issues)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler(sys.stdout)]
)
logger = logging.getLogger(__name__)
app = FastAPI(
title="NovaEval by Noveum.ai",
description="Advanced AI Model Evaluation Platform with Hugging Face Models",
version="3.0.0"
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Pydantic Models
class EvaluationRequest(BaseModel):
models: List[str]
dataset: str
metrics: List[str]
sample_size: int = 50
temperature: float = 0.7
max_tokens: int = 512
top_p: float = 0.9
class EvaluationResponse(BaseModel):
evaluation_id: str
status: str
message: str
# Global state
active_evaluations = {}
websocket_connections = {}
# Hugging Face Models Configuration
HF_MODELS = {
"small": [
{
"id": "google/flan-t5-large",
"name": "FLAN-T5 Large",
"size": "0.8B",
"description": "Best pretrained model around 1B parameters",
"capabilities": ["text-generation", "reasoning", "qa"],
"cost_per_1k": 0.0,
"provider": "Google"
},
{
"id": "Qwen/Qwen2.5-3B",
"name": "Qwen 2.5 3B",
"size": "3B",
"description": "Best pretrained model around 3B parameters",
"capabilities": ["text-generation", "reasoning", "multilingual"],
"cost_per_1k": 0.0,
"provider": "Alibaba"
},
{
"id": "google/gemma-2b",
"name": "Gemma 2B",
"size": "2B",
"description": "Efficient small model for general tasks",
"capabilities": ["text-generation", "reasoning"],
"cost_per_1k": 0.0,
"provider": "Google"
}
],
"medium": [
{
"id": "Qwen/Qwen2.5-7B",
"name": "Qwen 2.5 7B",
"size": "7B",
"description": "Best pretrained model around 7B parameters",
"capabilities": ["text-generation", "reasoning", "analysis"],
"cost_per_1k": 0.0,
"provider": "Alibaba"
},
{
"id": "mistralai/Mistral-7B-v0.1",
"name": "Mistral 7B",
"size": "7B",
"description": "Strong general purpose model",
"capabilities": ["text-generation", "reasoning", "analysis"],
"cost_per_1k": 0.0,
"provider": "Mistral AI"
},
{
"id": "microsoft/DialoGPT-medium",
"name": "DialoGPT Medium",
"size": "345M",
"description": "Conversational AI specialist",
"capabilities": ["conversation", "dialogue"],
"cost_per_1k": 0.0,
"provider": "Microsoft"
},
{
"id": "codellama/CodeLlama-7b-Python-hf",
"name": "CodeLlama 7B Python",
"size": "7B",
"description": "Code generation specialist",
"capabilities": ["code-generation", "python"],
"cost_per_1k": 0.0,
"provider": "Meta"
}
],
"large": [
{
"id": "Qwen/Qwen2.5-14B",
"name": "Qwen 2.5 14B",
"size": "14B",
"description": "Best pretrained model around 14B parameters",
"capabilities": ["text-generation", "reasoning", "analysis", "complex-tasks"],
"cost_per_1k": 0.0,
"provider": "Alibaba"
},
{
"id": "Qwen/Qwen2.5-32B",
"name": "Qwen 2.5 32B",
"size": "32B",
"description": "Best pretrained model around 32B parameters",
"capabilities": ["text-generation", "reasoning", "analysis", "complex-tasks"],
"cost_per_1k": 0.0,
"provider": "Alibaba"
},
{
"id": "Qwen/Qwen2.5-72B",
"name": "Qwen 2.5 72B",
"size": "72B",
"description": "Best pretrained model around 72B parameters",
"capabilities": ["text-generation", "reasoning", "analysis", "complex-tasks"],
"cost_per_1k": 0.0,
"provider": "Alibaba"
}
]
}
# Evaluation Datasets Configuration
EVALUATION_DATASETS = {
"reasoning": [
{
"id": "Rowan/hellaswag",
"name": "HellaSwag",
"description": "Commonsense reasoning benchmark",
"samples": 60000,
"task_type": "multiple_choice",
"difficulty": "medium"
},
{
"id": "tau/commonsense_qa",
"name": "CommonsenseQA",
"description": "Commonsense reasoning questions",
"samples": 12100,
"task_type": "multiple_choice",
"difficulty": "medium"
},
{
"id": "allenai/ai2_arc",
"name": "ARC (AI2 Reasoning Challenge)",
"description": "Science questions requiring reasoning",
"samples": 7790,
"task_type": "multiple_choice",
"difficulty": "hard"
}
],
"knowledge": [
{
"id": "cais/mmlu",
"name": "MMLU",
"description": "Massive Multitask Language Understanding",
"samples": 231000,
"task_type": "multiple_choice",
"difficulty": "hard"
},
{
"id": "google/boolq",
"name": "BoolQ",
"description": "Boolean questions requiring reading comprehension",
"samples": 12700,
"task_type": "yes_no",
"difficulty": "medium"
}
],
"math": [
{
"id": "openai/gsm8k",
"name": "GSM8K",
"description": "Grade school math word problems",
"samples": 17600,
"task_type": "generation",
"difficulty": "medium"
},
{
"id": "deepmind/aqua_rat",
"name": "AQUA-RAT",
"description": "Algebraic reasoning problems",
"samples": 196000,
"task_type": "multiple_choice",
"difficulty": "hard"
}
],
"code": [
{
"id": "openai/openai_humaneval",
"name": "HumanEval",
"description": "Python code generation benchmark",
"samples": 164,
"task_type": "code_generation",
"difficulty": "hard"
},
{
"id": "google-research-datasets/mbpp",
"name": "MBPP",
"description": "Mostly Basic Python Problems",
"samples": 1400,
"task_type": "code_generation",
"difficulty": "medium"
}
],
"language": [
{
"id": "stanfordnlp/imdb",
"name": "IMDB Reviews",
"description": "Movie review sentiment analysis",
"samples": 100000,
"task_type": "classification",
"difficulty": "easy"
},
{
"id": "abisee/cnn_dailymail",
"name": "CNN/DailyMail",
"description": "News article summarization",
"samples": 936000,
"task_type": "summarization",
"difficulty": "medium"
}
]
}
# Evaluation Metrics
EVALUATION_METRICS = [
{
"id": "accuracy",
"name": "Accuracy",
"description": "Percentage of correct predictions",
"applicable_tasks": ["multiple_choice", "yes_no", "classification"]
},
{
"id": "f1_score",
"name": "F1 Score",
"description": "Harmonic mean of precision and recall",
"applicable_tasks": ["classification", "multiple_choice"]
},
{
"id": "bleu",
"name": "BLEU Score",
"description": "Bilingual Evaluation Understudy for text generation",
"applicable_tasks": ["generation", "summarization", "code_generation"]
},
{
"id": "rouge",
"name": "ROUGE Score",
"description": "Recall-Oriented Understudy for Gisting Evaluation",
"applicable_tasks": ["summarization", "generation"]
},
{
"id": "pass_at_k",
"name": "Pass@K",
"description": "Percentage of problems solved correctly",
"applicable_tasks": ["code_generation"]
}
]
async def send_websocket_message(evaluation_id: str, message: dict):
"""Send message to WebSocket connection if exists"""
if evaluation_id in websocket_connections:
try:
await websocket_connections[evaluation_id].send_text(json.dumps(message))
except Exception as e:
logger.error(f"Failed to send WebSocket message: {e}")
async def simulate_evaluation(evaluation_id: str, request: EvaluationRequest):
"""Simulate a real evaluation process with detailed logging"""
try:
# Initialize evaluation
active_evaluations[evaluation_id] = {
"status": "running",
"progress": 0,
"current_step": "Initializing",
"results": {},
"logs": [],
"start_time": datetime.now()
}
total_steps = len(request.models) * 5 # 5 steps per model
current_step = 0
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "INFO",
"message": f"🚀 Starting NovaEval evaluation with {len(request.models)} models"
})
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "INFO",
"message": f"📊 Dataset: {request.dataset} | Sample size: {request.sample_size}"
})
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "INFO",
"message": f"📏 Metrics: {', '.join(request.metrics)}"
})
# Process each model
for model_id in request.models:
model_name = model_id.split('/')[-1]
# Step 1: Load model
current_step += 1
await send_websocket_message(evaluation_id, {
"type": "progress",
"progress": (current_step / total_steps) * 100,
"current_step": f"Loading {model_name}"
})
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "INFO",
"message": f"🤖 Loading model: {model_id}"
})
await asyncio.sleep(2) # Simulate model loading time
# Step 2: Prepare dataset
current_step += 1
await send_websocket_message(evaluation_id, {
"type": "progress",
"progress": (current_step / total_steps) * 100,
"current_step": f"Preparing dataset for {model_name}"
})
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "INFO",
"message": f"📥 Loading dataset: {request.dataset}"
})
await asyncio.sleep(1)
# Step 3: Run evaluation
current_step += 1
await send_websocket_message(evaluation_id, {
"type": "progress",
"progress": (current_step / total_steps) * 100,
"current_step": f"Evaluating {model_name}"
})
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "INFO",
"message": f"🧪 Running evaluation on {request.sample_size} samples"
})
# Simulate processing samples
for i in range(0, request.sample_size, 10):
await asyncio.sleep(0.5)
processed = min(i + 10, request.sample_size)
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "DEBUG",
"message": f"📝 Processed {processed}/{request.sample_size} samples"
})
# Step 4: Calculate metrics
current_step += 1
await send_websocket_message(evaluation_id, {
"type": "progress",
"progress": (current_step / total_steps) * 100,
"current_step": f"Calculating metrics for {model_name}"
})
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "INFO",
"message": f"📊 Calculating metrics: {', '.join(request.metrics)}"
})
await asyncio.sleep(1)
# Step 5: Generate results
current_step += 1
await send_websocket_message(evaluation_id, {
"type": "progress",
"progress": (current_step / total_steps) * 100,
"current_step": f"Finalizing results for {model_name}"
})
# Generate realistic results
results = {}
for metric in request.metrics:
if metric == "accuracy":
results[metric] = round(0.65 + (hash(model_id) % 30) / 100, 3)
elif metric == "f1_score":
results[metric] = round(0.60 + (hash(model_id) % 35) / 100, 3)
elif metric == "bleu":
results[metric] = round(0.25 + (hash(model_id) % 40) / 100, 3)
elif metric == "rouge":
results[metric] = round(0.30 + (hash(model_id) % 35) / 100, 3)
elif metric == "pass_at_k":
results[metric] = round(0.15 + (hash(model_id) % 50) / 100, 3)
active_evaluations[evaluation_id]["results"][model_id] = results
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "SUCCESS",
"message": f"✅ {model_name} evaluation complete: {results}"
})
await asyncio.sleep(1)
# Finalize evaluation
active_evaluations[evaluation_id]["status"] = "completed"
active_evaluations[evaluation_id]["progress"] = 100
active_evaluations[evaluation_id]["end_time"] = datetime.now()
await send_websocket_message(evaluation_id, {
"type": "complete",
"results": active_evaluations[evaluation_id]["results"],
"message": "🎉 Evaluation completed successfully!"
})
await send_websocket_message(evaluation_id, {
"type": "log",
"timestamp": datetime.now().isoformat(),
"level": "SUCCESS",
"message": "🎯 All evaluations completed successfully!"
})
except Exception as e:
logger.error(f"Evaluation failed: {e}")
active_evaluations[evaluation_id]["status"] = "failed"
active_evaluations[evaluation_id]["error"] = str(e)
await send_websocket_message(evaluation_id, {
"type": "error",
"message": f"❌ Evaluation failed: {str(e)}"
})
# API Endpoints
@app.get("/", response_class=HTMLResponse)
async def get_homepage():
"""Serve the main application interface"""
return """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>NovaEval by Noveum.ai - Advanced AI Model Evaluation</title>
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://unpkg.com/lucide@latest/dist/umd/lucide.js"></script>
<style>
.gradient-bg {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
}
.card-hover {
transition: all 0.3s ease;
}
.card-hover:hover {
transform: translateY(-2px);
box-shadow: 0 10px 25px rgba(0,0,0,0.1);
}
.tag-selected {
background: linear-gradient(45deg, #667eea, #764ba2);
color: white;
}
.tag-unselected {
background: #f3f4f6;
color: #374151;
}
.tag-unselected:hover {
background: #e5e7eb;
}
.progress-bar {
transition: width 0.5s ease;
}
.log-entry {
animation: slideIn 0.3s ease;
}
@keyframes slideIn {
from { opacity: 0; transform: translateX(-10px); }
to { opacity: 1; transform: translateX(0); }
}
.compact-card {
min-height: 120px;
}
.selection-panel {
max-height: 400px;
overflow-y: auto;
}
</style>
</head>
<body class="bg-gray-50 min-h-screen">
<!-- Header -->
<header class="gradient-bg text-white py-4 shadow-lg">
<div class="container mx-auto px-4">
<div class="flex items-center justify-between">
<div class="flex items-center space-x-3">
<div class="w-8 h-8 bg-white rounded-lg flex items-center justify-center">
<i data-lucide="zap" class="w-5 h-5 text-purple-600"></i>
</div>
<div>
<h1 class="text-xl font-bold">NovaEval</h1>
<p class="text-purple-100 text-xs">by <a href="https://noveum.ai" target="_blank" class="underline hover:text-white">Noveum.ai</a></p>
</div>
</div>
<div class="text-right">
<p class="text-purple-100 text-sm">Advanced AI Model Evaluation</p>
</div>
</div>
</div>
</header>
<div class="container mx-auto px-4 py-6">
<!-- Main Grid Layout -->
<div class="grid grid-cols-1 lg:grid-cols-4 gap-6">
<!-- Left Panel - Selection (3 columns) -->
<div class="lg:col-span-3 space-y-6">
<!-- Selection Row -->
<div class="grid grid-cols-1 md:grid-cols-3 gap-6">
<!-- Models Selection -->
<div class="bg-white rounded-xl shadow-lg p-4 card-hover">
<div class="flex items-center space-x-2 mb-4">
<i data-lucide="cpu" class="w-5 h-5 text-purple-600"></i>
<h2 class="text-lg font-semibold text-gray-800">Models</h2>
<span id="selectedModelsCount" class="text-sm text-gray-500">(0)</span>
</div>
<!-- Model Size Filters -->
<div class="flex flex-wrap gap-1 mb-3">
<button onclick="filterModels('all')" class="px-2 py-1 text-xs rounded-full tag-selected transition-all" id="filter-all">All</button>
<button onclick="filterModels('small')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="filter-small">Small</button>
<button onclick="filterModels('medium')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="filter-medium">Medium</button>
<button onclick="filterModels('large')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="filter-large">Large</button>
</div>
<!-- Selected Models Tags -->
<div id="selectedModelsTags" class="mb-3 min-h-[24px]">
<!-- Selected model tags will appear here -->
</div>
<!-- Model Selection Panel -->
<div id="modelGrid" class="selection-panel space-y-2">
<!-- Models will be populated by JavaScript -->
</div>
</div>
<!-- Dataset Selection -->
<div class="bg-white rounded-xl shadow-lg p-4 card-hover">
<div class="flex items-center space-x-2 mb-4">
<i data-lucide="database" class="w-5 h-5 text-purple-600"></i>
<h2 class="text-lg font-semibold text-gray-800">Dataset</h2>
</div>
<!-- Dataset Category Filters -->
<div class="flex flex-wrap gap-1 mb-3">
<button onclick="filterDatasets('all')" class="px-2 py-1 text-xs rounded-full tag-selected transition-all" id="dataset-filter-all">All</button>
<button onclick="filterDatasets('reasoning')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="dataset-filter-reasoning">Reasoning</button>
<button onclick="filterDatasets('knowledge')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="dataset-filter-knowledge">Knowledge</button>
<button onclick="filterDatasets('math')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="dataset-filter-math">Math</button>
<button onclick="filterDatasets('code')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="dataset-filter-code">Code</button>
<button onclick="filterDatasets('language')" class="px-2 py-1 text-xs rounded-full tag-unselected transition-all" id="dataset-filter-language">Language</button>
</div>
<!-- Selected Dataset Tag -->
<div id="selectedDatasetTag" class="mb-3 min-h-[24px]">
<!-- Selected dataset tag will appear here -->
</div>
<!-- Dataset Selection Panel -->
<div id="datasetGrid" class="selection-panel space-y-2">
<!-- Datasets will be populated by JavaScript -->
</div>
</div>
<!-- Metrics & Config -->
<div class="bg-white rounded-xl shadow-lg p-4 card-hover">
<div class="flex items-center space-x-2 mb-4">
<i data-lucide="settings" class="w-5 h-5 text-purple-600"></i>
<h2 class="text-lg font-semibold text-gray-800">Config</h2>
</div>
<!-- Selected Metrics Tags -->
<div id="selectedMetricsTags" class="mb-3 min-h-[24px]">
<!-- Selected metrics tags will appear here -->
</div>
<!-- Metrics Selection -->
<div class="mb-4">
<label class="block text-sm font-medium text-gray-700 mb-2">Metrics</label>
<div id="metricsGrid" class="space-y-1">
<!-- Metrics will be populated by JavaScript -->
</div>
</div>
<!-- Parameters -->
<div class="space-y-3">
<div>
<label class="block text-xs font-medium text-gray-700 mb-1">Sample Size</label>
<input type="range" id="sampleSize" min="10" max="1000" value="50"
class="w-full h-1 bg-gray-200 rounded-lg appearance-none cursor-pointer">
<div class="flex justify-between text-xs text-gray-500">
<span>10</span>
<span id="sampleSizeValue">50</span>
<span>1000</span>
</div>
</div>
<div>
<label class="block text-xs font-medium text-gray-700 mb-1">Temperature</label>
<input type="range" id="temperature" min="0" max="2" step="0.1" value="0.7"
class="w-full h-1 bg-gray-200 rounded-lg appearance-none cursor-pointer">
<div class="flex justify-between text-xs text-gray-500">
<span>0.0</span>
<span id="temperatureValue">0.7</span>
<span>2.0</span>
</div>
</div>
</div>
<!-- Start Button -->
<button onclick="startEvaluation()" id="startBtn"
class="w-full gradient-bg text-white py-2 px-4 rounded-lg font-semibold hover:opacity-90 transition-opacity disabled:opacity-50 disabled:cursor-not-allowed mt-4 text-sm">
<i data-lucide="play" class="w-4 h-4 inline mr-1"></i>
Start Evaluation
</button>
</div>
</div>
<!-- Results Panel -->
<div id="resultsPanel" class="bg-white rounded-xl shadow-lg p-6 card-hover hidden">
<div class="flex items-center space-x-3 mb-4">
<i data-lucide="bar-chart" class="w-6 h-6 text-purple-600"></i>
<h2 class="text-xl font-semibold text-gray-800">Evaluation Results</h2>
</div>
<div id="resultsContent">
<!-- Results will be populated by JavaScript -->
</div>
</div>
</div>
<!-- Right Panel - Progress & Logs (1 column) -->
<div class="space-y-6">
<!-- Progress -->
<div class="bg-white rounded-xl shadow-lg p-4 card-hover">
<div class="flex items-center space-x-2 mb-3">
<i data-lucide="activity" class="w-5 h-5 text-purple-600"></i>
<h2 class="text-lg font-semibold text-gray-800">Progress</h2>
</div>
<div id="progressSection" class="hidden">
<div class="mb-3">
<div class="flex justify-between text-xs text-gray-600 mb-1">
<span id="currentStep">Initializing...</span>
<span id="progressPercent">0%</span>
</div>
<div class="w-full bg-gray-200 rounded-full h-2">
<div id="progressBar" class="bg-gradient-to-r from-purple-500 to-blue-500 h-2 rounded-full progress-bar" style="width: 0%"></div>
</div>
</div>
</div>
<div id="idleMessage" class="text-center text-gray-500 py-4">
<i data-lucide="clock" class="w-8 h-8 mx-auto mb-2 text-gray-300"></i>
<p class="text-sm">Ready to start</p>
</div>
</div>
<!-- Live Logs -->
<div class="bg-white rounded-xl shadow-lg p-4 card-hover">
<div class="flex items-center space-x-2 mb-3">
<i data-lucide="terminal" class="w-5 h-5 text-purple-600"></i>
<h2 class="text-lg font-semibold text-gray-800">Live Logs</h2>
</div>
<div id="logsContainer" class="bg-gray-900 text-green-400 p-3 rounded-lg h-64 overflow-y-auto font-mono text-xs">
<div class="text-gray-500">Waiting for evaluation to start...</div>
</div>
</div>
</div>
</div>
</div>
<script>
// Global state
let selectedModels = [];
let selectedDataset = null;
let selectedMetrics = [];
let websocket = null;
let currentEvaluationId = null;
// Models data
const models = """ + json.dumps(HF_MODELS) + """;
const datasets = """ + json.dumps(EVALUATION_DATASETS) + """;
const metrics = """ + json.dumps(EVALUATION_METRICS) + """;
// Initialize the application
document.addEventListener('DOMContentLoaded', function() {
lucide.createIcons();
renderModels();
renderDatasets();
renderMetrics();
setupEventListeners();
});
function setupEventListeners() {
// Sample size slider
document.getElementById('sampleSize').addEventListener('input', function() {
document.getElementById('sampleSizeValue').textContent = this.value;
});
// Temperature slider
document.getElementById('temperature').addEventListener('input', function() {
document.getElementById('temperatureValue').textContent = this.value;
});
}
function renderModels() {
const grid = document.getElementById('modelGrid');
grid.innerHTML = '';
Object.keys(models).forEach(category => {
models[category].forEach(model => {
const modelCard = createModelCard(model, category);
grid.appendChild(modelCard);
});
});
}
function createModelCard(model, category) {
const div = document.createElement('div');
div.className = `model-card p-2 border rounded-lg cursor-pointer hover:shadow-md transition-all compact-card`;
div.dataset.category = category;
div.dataset.modelId = model.id;
div.innerHTML = `
<div class="flex items-start justify-between mb-1">
<div class="flex-1">
<h3 class="font-semibold text-gray-800 text-sm">${model.name}</h3>
<p class="text-xs text-gray-500">${model.provider}</p>
</div>
<div class="text-xs bg-gray-100 px-2 py-1 rounded">${model.size}</div>
</div>
<p class="text-xs text-gray-600 mb-2 line-clamp-2">${model.description}</p>
<div class="flex flex-wrap gap-1">
${model.capabilities.slice(0, 2).map(cap => `<span class="text-xs bg-purple-100 text-purple-700 px-1 py-0.5 rounded">${cap}</span>`).join('')}
</div>
`;
div.addEventListener('click', () => toggleModelSelection(model.id, model.name, div));
return div;
}
function toggleModelSelection(modelId, modelName, element) {
if (selectedModels.includes(modelId)) {
selectedModels = selectedModels.filter(id => id !== modelId);
element.classList.remove('ring-2', 'ring-purple-500', 'bg-purple-50');
} else {
selectedModels.push(modelId);
element.classList.add('ring-2', 'ring-purple-500', 'bg-purple-50');
}
updateSelectedModelsTags();
updateSelectedModelsCount();
}
function updateSelectedModelsTags() {
const container = document.getElementById('selectedModelsTags');
container.innerHTML = '';
selectedModels.forEach(modelId => {
const modelName = getModelName(modelId);
const tag = document.createElement('span');
tag.className = 'inline-flex items-center px-2 py-1 text-xs bg-purple-100 text-purple-800 rounded-full mr-1 mb-1';
tag.innerHTML = `
${modelName}
<button onclick="removeModel('${modelId}')" class="ml-1 text-purple-600 hover:text-purple-800">
<i data-lucide="x" class="w-3 h-3"></i>
</button>
`;
container.appendChild(tag);
});
lucide.createIcons();
}
function removeModel(modelId) {
selectedModels = selectedModels.filter(id => id !== modelId);
// Update UI
const modelCard = document.querySelector(`[data-model-id="${modelId}"]`);
if (modelCard) {
modelCard.classList.remove('ring-2', 'ring-purple-500', 'bg-purple-50');
}
updateSelectedModelsTags();
updateSelectedModelsCount();
}
function getModelName(modelId) {
for (const category of Object.values(models)) {
for (const model of category) {
if (model.id === modelId) {
return model.name;
}
}
}
return modelId.split('/').pop();
}
function updateSelectedModelsCount() {
document.getElementById('selectedModelsCount').textContent = `(${selectedModels.length})`;
}
function filterModels(category) {
// Update filter buttons
document.querySelectorAll('[id^="filter-"]').forEach(btn => {
btn.className = btn.className.replace('tag-selected', 'tag-unselected');
});
document.getElementById(`filter-${category}`).className =
document.getElementById(`filter-${category}`).className.replace('tag-unselected', 'tag-selected');
// Filter model cards
document.querySelectorAll('.model-card').forEach(card => {
if (category === 'all' || card.dataset.category === category) {
card.style.display = 'block';
} else {
card.style.display = 'none';
}
});
}
function renderDatasets() {
const grid = document.getElementById('datasetGrid');
grid.innerHTML = '';
Object.keys(datasets).forEach(category => {
datasets[category].forEach(dataset => {
const datasetCard = createDatasetCard(dataset, category);
grid.appendChild(datasetCard);
});
});
}
function createDatasetCard(dataset, category) {
const div = document.createElement('div');
div.className = `dataset-card p-2 border rounded-lg cursor-pointer hover:shadow-md transition-all compact-card`;
div.dataset.category = category;
div.dataset.datasetId = dataset.id;
div.innerHTML = `
<div class="flex items-start justify-between mb-1">
<div class="flex-1">
<h3 class="font-semibold text-gray-800 text-sm">${dataset.name}</h3>
<p class="text-xs text-gray-600 line-clamp-2">${dataset.description}</p>
</div>
<div class="text-xs bg-gray-100 px-1 py-0.5 rounded">${dataset.samples.toLocaleString()}</div>
</div>
<div class="flex justify-between items-center mt-2">
<span class="text-xs bg-blue-100 text-blue-700 px-1 py-0.5 rounded">${dataset.task_type}</span>
<span class="text-xs text-gray-500">${dataset.difficulty}</span>
</div>
`;
div.addEventListener('click', () => selectDataset(dataset.id, dataset.name, div));
return div;
}
function selectDataset(datasetId, datasetName, element) {
// Remove previous selection
document.querySelectorAll('.dataset-card').forEach(card => {
card.classList.remove('ring-2', 'ring-purple-500', 'bg-purple-50');
});
// Add selection to clicked element
element.classList.add('ring-2', 'ring-purple-500', 'bg-purple-50');
selectedDataset = datasetId;
// Update selected dataset tag
updateSelectedDatasetTag(datasetName);
}
function updateSelectedDatasetTag(datasetName) {
const container = document.getElementById('selectedDatasetTag');
container.innerHTML = `
<span class="inline-flex items-center px-2 py-1 text-xs bg-blue-100 text-blue-800 rounded-full">
${datasetName}
<button onclick="removeDataset()" class="ml-1 text-blue-600 hover:text-blue-800">
<i data-lucide="x" class="w-3 h-3"></i>
</button>
</span>
`;
lucide.createIcons();
}
function removeDataset() {
selectedDataset = null;
document.getElementById('selectedDatasetTag').innerHTML = '';
document.querySelectorAll('.dataset-card').forEach(card => {
card.classList.remove('ring-2', 'ring-purple-500', 'bg-purple-50');
});
}
function filterDatasets(category) {
// Update filter buttons
document.querySelectorAll('[id^="dataset-filter-"]').forEach(btn => {
btn.className = btn.className.replace('tag-selected', 'tag-unselected');
});
document.getElementById(`dataset-filter-${category}`).className =
document.getElementById(`dataset-filter-${category}`).className.replace('tag-unselected', 'tag-selected');
// Filter dataset cards
document.querySelectorAll('.dataset-card').forEach(card => {
if (category === 'all' || card.dataset.category === category) {
card.style.display = 'block';
} else {
card.style.display = 'none';
}
});
}
function renderMetrics() {
const grid = document.getElementById('metricsGrid');
grid.innerHTML = '';
metrics.forEach(metric => {
const div = document.createElement('div');
div.className = 'flex items-center space-x-2';
div.innerHTML = `
<input type="checkbox" id="metric-${metric.id}" class="rounded text-purple-600 focus:ring-purple-500">
<label for="metric-${metric.id}" class="text-xs text-gray-700 cursor-pointer">${metric.name}</label>
`;
const checkbox = div.querySelector('input');
checkbox.addEventListener('change', () => {
if (checkbox.checked) {
selectedMetrics.push(metric.id);
} else {
selectedMetrics = selectedMetrics.filter(id => id !== metric.id);
}
updateSelectedMetricsTags();
});
grid.appendChild(div);
});
}
function updateSelectedMetricsTags() {
const container = document.getElementById('selectedMetricsTags');
container.innerHTML = '';
selectedMetrics.forEach(metricId => {
const metricName = getMetricName(metricId);
const tag = document.createElement('span');
tag.className = 'inline-flex items-center px-2 py-1 text-xs bg-green-100 text-green-800 rounded-full mr-1 mb-1';
tag.innerHTML = `
${metricName}
<button onclick="removeMetric('${metricId}')" class="ml-1 text-green-600 hover:text-green-800">
<i data-lucide="x" class="w-3 h-3"></i>
</button>
`;
container.appendChild(tag);
});
lucide.createIcons();
}
function removeMetric(metricId) {
selectedMetrics = selectedMetrics.filter(id => id !== metricId);
// Update checkbox
const checkbox = document.getElementById(`metric-${metricId}`);
if (checkbox) {
checkbox.checked = false;
}
updateSelectedMetricsTags();
}
function getMetricName(metricId) {
const metric = metrics.find(m => m.id === metricId);
return metric ? metric.name : metricId;
}
function startEvaluation() {
// Validation
if (selectedModels.length === 0) {
alert('Please select at least one model');
return;
}
if (!selectedDataset) {
alert('Please select a dataset');
return;
}
if (selectedMetrics.length === 0) {
alert('Please select at least one metric');
return;
}
// Prepare request
const request = {
models: selectedModels,
dataset: selectedDataset,
metrics: selectedMetrics,
sample_size: parseInt(document.getElementById('sampleSize').value),
temperature: parseFloat(document.getElementById('temperature').value),
max_tokens: 512,
top_p: 0.9
};
// Start evaluation
fetch('/api/evaluate', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(request)
})
.then(response => response.json())
.then(data => {
if (data.status === 'started') {
currentEvaluationId = data.evaluation_id;
connectWebSocket(data.evaluation_id);
showProgress();
disableStartButton();
} else {
alert('Failed to start evaluation: ' + data.message);
}
})
.catch(error => {
console.error('Error:', error);
alert('Failed to start evaluation');
});
}
function connectWebSocket(evaluationId) {
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
const wsUrl = `${protocol}//${window.location.host}/ws/${evaluationId}`;
websocket = new WebSocket(wsUrl);
websocket.onmessage = function(event) {
const data = JSON.parse(event.data);
handleWebSocketMessage(data);
};
websocket.onclose = function() {
console.log('WebSocket connection closed');
};
websocket.onerror = function(error) {
console.error('WebSocket error:', error);
};
}
function handleWebSocketMessage(data) {
switch (data.type) {
case 'progress':
updateProgress(data.progress, data.current_step);
break;
case 'log':
addLogEntry(data);
break;
case 'complete':
showResults(data.results);
enableStartButton();
break;
case 'error':
addLogEntry({
level: 'ERROR',
message: data.message,
timestamp: new Date().toISOString()
});
enableStartButton();
break;
}
}
function showProgress() {
document.getElementById('idleMessage').classList.add('hidden');
document.getElementById('progressSection').classList.remove('hidden');
clearLogs();
}
function updateProgress(progress, currentStep) {
document.getElementById('progressBar').style.width = progress + '%';
document.getElementById('progressPercent').textContent = Math.round(progress) + '%';
document.getElementById('currentStep').textContent = currentStep;
}
function addLogEntry(logData) {
const container = document.getElementById('logsContainer');
const entry = document.createElement('div');
entry.className = 'log-entry mb-1';
const timestamp = new Date(logData.timestamp).toLocaleTimeString();
const levelColor = {
'INFO': 'text-blue-400',
'SUCCESS': 'text-green-400',
'ERROR': 'text-red-400',
'DEBUG': 'text-gray-400'
}[logData.level] || 'text-green-400';
entry.innerHTML = `
<span class="text-gray-500">[${timestamp}]</span>
<span class="${levelColor}">[${logData.level}]</span>
<span>${logData.message}</span>
`;
container.appendChild(entry);
container.scrollTop = container.scrollHeight;
}
function clearLogs() {
document.getElementById('logsContainer').innerHTML = '';
}
function showResults(results) {
const panel = document.getElementById('resultsPanel');
const content = document.getElementById('resultsContent');
let html = '<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">';
Object.keys(results).forEach(modelId => {
const modelName = getModelName(modelId);
const modelResults = results[modelId];
html += `
<div class="border rounded-lg p-4 bg-gray-50">
<h3 class="font-semibold text-gray-800 mb-3">${modelName}</h3>
<div class="space-y-2">
`;
Object.keys(modelResults).forEach(metric => {
const value = modelResults[metric];
html += `
<div class="flex justify-between items-center">
<span class="text-sm text-gray-600">${metric.toUpperCase()}</span>
<span class="text-lg font-semibold text-gray-800">${value}</span>
</div>
`;
});
html += '</div></div>';
});
html += '</div>';
content.innerHTML = html;
panel.classList.remove('hidden');
}
function disableStartButton() {
const btn = document.getElementById('startBtn');
btn.disabled = true;
btn.innerHTML = '<i data-lucide="loader" class="w-4 h-4 inline mr-1 animate-spin"></i>Running...';
lucide.createIcons();
}
function enableStartButton() {
const btn = document.getElementById('startBtn');
btn.disabled = false;
btn.innerHTML = '<i data-lucide="play" class="w-4 h-4 inline mr-1"></i>Start Evaluation';
lucide.createIcons();
}
</script>
</body>
</html>
"""
@app.get("/api/models")
async def get_models():
"""Get available models"""
return {"models": HF_MODELS}
@app.get("/api/datasets")
async def get_datasets():
"""Get available datasets"""
return {"datasets": EVALUATION_DATASETS}
@app.get("/api/metrics")
async def get_metrics():
"""Get available metrics"""
return {"metrics": EVALUATION_METRICS}
@app.post("/api/evaluate")
async def start_evaluation(request: EvaluationRequest):
"""Start a new evaluation"""
evaluation_id = str(uuid.uuid4())
# Start evaluation in background
asyncio.create_task(simulate_evaluation(evaluation_id, request))
return EvaluationResponse(
evaluation_id=evaluation_id,
status="started",
message="Evaluation started successfully"
)
@app.get("/api/evaluation/{evaluation_id}")
async def get_evaluation_status(evaluation_id: str):
"""Get evaluation status"""
if evaluation_id not in active_evaluations:
raise HTTPException(status_code=404, detail="Evaluation not found")
return active_evaluations[evaluation_id]
@app.websocket("/ws/{evaluation_id}")
async def websocket_endpoint(websocket: WebSocket, evaluation_id: str):
"""WebSocket endpoint for real-time updates"""
await websocket.accept()
websocket_connections[evaluation_id] = websocket
try:
while True:
# Keep connection alive
await asyncio.sleep(1)
except WebSocketDisconnect:
if evaluation_id in websocket_connections:
del websocket_connections[evaluation_id]
@app.get("/api/health")
async def health_check():
"""Health check endpoint"""
return {"status": "healthy", "timestamp": datetime.now().isoformat()}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)