Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -53,147 +53,6 @@ def exact_match_score(prediction, ground_truth):
|
|
53 |
"""Calculate exact match score"""
|
54 |
return normalize_answer(prediction) == normalize_answer(ground_truth)
|
55 |
|
56 |
-
def max_over_ground_truths(metric_fn, prediction, ground_truths):
|
57 |
-
"""Calculate maximum score over all ground truth answers"""
|
58 |
-
scores = []
|
59 |
-
for ground_truth in ground_truths:
|
60 |
-
score = metric_fn(prediction, ground_truth)
|
61 |
-
scores.append(score)
|
62 |
-
return max(scores) if scores else 0
|
63 |
-
|
64 |
-
def load_cuad_dataset(hf_token=None):
|
65 |
-
"""Try multiple methods to load CUAD dataset"""
|
66 |
-
print("Attempting to load CUAD dataset...")
|
67 |
-
|
68 |
-
# Method 1: Try theatticusproject/cuad
|
69 |
-
try:
|
70 |
-
print("Trying theatticusproject/cuad...")
|
71 |
-
dataset = load_dataset("theatticusproject/cuad", token=hf_token)
|
72 |
-
if "test" in dataset:
|
73 |
-
test_data = dataset["test"]
|
74 |
-
print(f"β Loaded theatticusproject/cuad with {len(test_data)} test samples")
|
75 |
-
return test_data, "theatticusproject/cuad"
|
76 |
-
elif "validation" in dataset:
|
77 |
-
test_data = dataset["validation"]
|
78 |
-
print(f"β Loaded theatticusproject/cuad with {len(test_data)} validation samples")
|
79 |
-
return test_data, "theatticusproject/cuad"
|
80 |
-
else:
|
81 |
-
print("No test or validation split found in theatticusproject/cuad")
|
82 |
-
except Exception as e:
|
83 |
-
print(f"Failed to load theatticusproject/cuad: {e}")
|
84 |
-
|
85 |
-
# Method 2: Try theatticusproject/cuad-qa
|
86 |
-
try:
|
87 |
-
print("Trying theatticusproject/cuad-qa...")
|
88 |
-
dataset = load_dataset("theatticusproject/cuad-qa", token=hf_token)
|
89 |
-
if "test" in dataset:
|
90 |
-
test_data = dataset["test"]
|
91 |
-
print(f"β Loaded theatticusproject/cuad-qa with {len(test_data)} test samples")
|
92 |
-
return test_data, "theatticusproject/cuad-qa"
|
93 |
-
elif "validation" in dataset:
|
94 |
-
test_data = dataset["validation"]
|
95 |
-
print(f"β Loaded theatticusproject/cuad-qa with {len(test_data)} validation samples")
|
96 |
-
return test_data, "theatticusproject/cuad-qa"
|
97 |
-
except Exception as e:
|
98 |
-
print(f"Failed to load theatticusproject/cuad-qa: {e}")
|
99 |
-
|
100 |
-
# Method 3: Try the original cuad identifier
|
101 |
-
try:
|
102 |
-
print("Trying cuad...")
|
103 |
-
dataset = load_dataset("cuad", token=hf_token)
|
104 |
-
if "test" in dataset:
|
105 |
-
test_data = dataset["test"]
|
106 |
-
print(f"β Loaded cuad with {len(test_data)} test samples")
|
107 |
-
return test_data, "cuad"
|
108 |
-
elif "validation" in dataset:
|
109 |
-
test_data = dataset["validation"]
|
110 |
-
print(f"β Loaded cuad with {len(test_data)} validation samples")
|
111 |
-
return test_data, "cuad"
|
112 |
-
except Exception as e:
|
113 |
-
print(f"Failed to load cuad: {e}")
|
114 |
-
|
115 |
-
# Method 4: Try with trust_remote_code=True
|
116 |
-
try:
|
117 |
-
print("Trying with trust_remote_code=True...")
|
118 |
-
dataset = load_dataset("theatticusproject/cuad", token=hf_token, trust_remote_code=True)
|
119 |
-
if "test" in dataset:
|
120 |
-
test_data = dataset["test"]
|
121 |
-
print(f"β Loaded with trust_remote_code, test samples: {len(test_data)}")
|
122 |
-
return test_data, "theatticusproject/cuad (trust_remote_code)"
|
123 |
-
elif "validation" in dataset:
|
124 |
-
test_data = dataset["validation"]
|
125 |
-
print(f"β Loaded with trust_remote_code, validation samples: {len(test_data)}")
|
126 |
-
return test_data, "theatticusproject/cuad (trust_remote_code)"
|
127 |
-
except Exception as e:
|
128 |
-
print(f"Failed with trust_remote_code: {e}")
|
129 |
-
|
130 |
-
# Method 5: Create a synthetic CUAD-like dataset for testing
|
131 |
-
print("β οΈ Creating synthetic CUAD-like test data...")
|
132 |
-
synthetic_data = []
|
133 |
-
|
134 |
-
# Create some contract-like questions and contexts
|
135 |
-
contract_samples = [
|
136 |
-
{
|
137 |
-
"context": "This Agreement shall commence on January 1, 2024 and shall continue for a period of twelve (12) months unless terminated earlier in accordance with the terms hereof. The initial term may be extended for additional periods of twelve (12) months each upon mutual written consent of both parties.",
|
138 |
-
"question": "What is the duration of the agreement?",
|
139 |
-
"answers": {"text": ["twelve (12) months", "12 months"], "answer_start": [85, 85]}
|
140 |
-
},
|
141 |
-
{
|
142 |
-
"context": "The Company shall pay the Consultant a fee of $50,000 per month for services rendered under this Agreement. Payment shall be made within thirty (30) days of the end of each calendar month.",
|
143 |
-
"question": "What is the monthly fee?",
|
144 |
-
"answers": {"text": ["$50,000 per month", "$50,000"], "answer_start": [45, 45]}
|
145 |
-
},
|
146 |
-
{
|
147 |
-
"context": "Either party may terminate this Agreement immediately upon written notice in the event of a material breach by the other party that remains uncured for thirty (30) days after written notice of such breach.",
|
148 |
-
"question": "What is the cure period for material breach?",
|
149 |
-
"answers": {"text": ["thirty (30) days", "30 days"], "answer_start": [125, 132]}
|
150 |
-
},
|
151 |
-
{
|
152 |
-
"context": "The Contractor shall maintain commercial general liability insurance with coverage of not less than $1,000,000 per occurrence and $2,000,000 in the aggregate.",
|
153 |
-
"question": "What is the minimum insurance coverage per occurrence?",
|
154 |
-
"answers": {"text": ["$1,000,000 per occurrence", "$1,000,000"], "answer_start": [85, 85]}
|
155 |
-
},
|
156 |
-
{
|
157 |
-
"context": "All intellectual property developed under this Agreement shall be owned by the Company. The Contractor hereby assigns all rights, title and interest in such intellectual property to the Company.",
|
158 |
-
"question": "Who owns the intellectual property?",
|
159 |
-
"answers": {"text": ["the Company", "Company"], "answer_start": [70, 74]}
|
160 |
-
}
|
161 |
-
]
|
162 |
-
|
163 |
-
# Duplicate samples to create a larger dataset
|
164 |
-
for i in range(100): # Create 500 samples
|
165 |
-
sample = contract_samples[i % len(contract_samples)].copy()
|
166 |
-
sample["id"] = f"synthetic_{i}"
|
167 |
-
synthetic_data.append(sample)
|
168 |
-
|
169 |
-
# Convert to dataset format
|
170 |
-
from datasets import Dataset
|
171 |
-
test_data = Dataset.from_list(synthetic_data)
|
172 |
-
|
173 |
-
print(f"β Created synthetic CUAD-like dataset with {len(test_data)} samples")
|
174 |
-
return test_data, "synthetic_cuad"
|
175 |
-
|
176 |
-
def inspect_dataset_structure(dataset, dataset_name="dataset"):
|
177 |
-
"""Inspect dataset structure for debugging"""
|
178 |
-
print(f"\n=== {dataset_name} Dataset Structure ===")
|
179 |
-
print(f"Dataset type: {type(dataset)}")
|
180 |
-
print(f"Dataset length: {len(dataset)}")
|
181 |
-
|
182 |
-
if len(dataset) > 0:
|
183 |
-
sample = dataset[0]
|
184 |
-
print(f"Sample keys: {list(sample.keys()) if isinstance(sample, dict) else 'Not a dict'}")
|
185 |
-
print(f"Sample structure:")
|
186 |
-
for key, value in sample.items():
|
187 |
-
if isinstance(value, dict):
|
188 |
-
print(f" {key} (dict): {list(value.keys())}")
|
189 |
-
for sub_key, sub_value in value.items():
|
190 |
-
print(f" {sub_key}: {type(sub_value)} - {str(sub_value)[:50]}...")
|
191 |
-
else:
|
192 |
-
print(f" {key}: {type(value)} - {str(value)[:100]}...")
|
193 |
-
print("=" * 50)
|
194 |
-
|
195 |
-
return dataset
|
196 |
-
|
197 |
def evaluate_model():
|
198 |
# Authenticate with Hugging Face using the token
|
199 |
hf_token = os.getenv("EVAL_TOKEN")
|
@@ -207,7 +66,7 @@ def evaluate_model():
|
|
207 |
print("β Warning: EVAL_TOKEN not found in environment variables")
|
208 |
|
209 |
print("Loading model and tokenizer...")
|
210 |
-
model_name = "
|
211 |
|
212 |
try:
|
213 |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
|
@@ -229,19 +88,26 @@ def run_evaluation(num_samples, progress=gr.Progress()):
|
|
229 |
|
230 |
progress(0.1, desc="Loading CUAD dataset...")
|
231 |
|
232 |
-
# Load dataset
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
# Limit samples
|
241 |
num_samples = min(num_samples, len(test_data))
|
242 |
test_subset = test_data.select(range(num_samples))
|
243 |
|
244 |
-
progress(0.2, desc=f"Starting evaluation on {num_samples} samples
|
245 |
|
246 |
# Initialize metrics
|
247 |
exact_matches = []
|
@@ -253,53 +119,23 @@ def run_evaluation(num_samples, progress=gr.Progress()):
|
|
253 |
progress((0.2 + 0.7 * i / num_samples), desc=f"Processing sample {i+1}/{num_samples}")
|
254 |
|
255 |
try:
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
elif "text" in example:
|
260 |
-
context = example["text"]
|
261 |
-
else:
|
262 |
-
print(f"Warning: No context found in sample {i}")
|
263 |
-
continue
|
264 |
-
|
265 |
-
if "question" in example:
|
266 |
-
question = example["question"]
|
267 |
-
elif "title" in example:
|
268 |
-
question = example["title"]
|
269 |
-
else:
|
270 |
-
print(f"Warning: No question found in sample {i}")
|
271 |
-
continue
|
272 |
-
|
273 |
-
# Handle answers field
|
274 |
-
ground_truths = []
|
275 |
-
if "answers" in example:
|
276 |
-
answers = example["answers"]
|
277 |
-
if isinstance(answers, dict):
|
278 |
-
if "text" in answers:
|
279 |
-
if isinstance(answers["text"], list):
|
280 |
-
ground_truths = [ans for ans in answers["text"] if ans and ans.strip()]
|
281 |
-
else:
|
282 |
-
ground_truths = [answers["text"]] if answers["text"] and answers["text"].strip() else []
|
283 |
-
elif isinstance(answers, list):
|
284 |
-
ground_truths = [ans for ans in answers if ans and ans.strip()]
|
285 |
-
|
286 |
-
# Skip if no ground truth
|
287 |
-
if not ground_truths:
|
288 |
-
print(f"Warning: No ground truth found for sample {i}")
|
289 |
-
continue
|
290 |
|
291 |
# Get model prediction
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
|
|
299 |
|
300 |
-
# Calculate metrics
|
301 |
-
em =
|
302 |
-
f1 =
|
303 |
|
304 |
exact_matches.append(em)
|
305 |
f1_scores.append(f1)
|
@@ -307,12 +143,11 @@ def run_evaluation(num_samples, progress=gr.Progress()):
|
|
307 |
predictions.append({
|
308 |
"Sample_ID": i+1,
|
309 |
"Question": question[:100] + "..." if len(question) > 100 else question,
|
310 |
-
"Predicted_Answer": predicted_answer
|
311 |
-
"Ground_Truth":
|
312 |
-
"Num_Ground_Truths": len(ground_truths),
|
313 |
"Exact_Match": em,
|
314 |
"F1_Score": round(f1, 3),
|
315 |
-
"Confidence": round(
|
316 |
})
|
317 |
|
318 |
except Exception as e:
|
@@ -328,42 +163,21 @@ def run_evaluation(num_samples, progress=gr.Progress()):
|
|
328 |
avg_exact_match = np.mean(exact_matches) * 100
|
329 |
avg_f1_score = np.mean(f1_scores) * 100
|
330 |
|
331 |
-
# Calculate additional statistics
|
332 |
-
high_confidence_samples = [p for p in predictions if p['Confidence'] > 0.8]
|
333 |
-
perfect_matches = [p for p in predictions if p['Exact_Match'] == 1]
|
334 |
-
high_f1_samples = [p for p in predictions if p['F1_Score'] > 0.8]
|
335 |
-
|
336 |
# Create results summary
|
337 |
results_summary = f"""
|
338 |
# π CUAD Model Evaluation Results
|
339 |
-
|
340 |
-
## β οΈ Dataset Information
|
341 |
-
- **Dataset Used**: {dataset_name}
|
342 |
-
- **Dataset Status**: {"β
Authentic CUAD" if "cuad" in dataset_name.lower() and "synthetic" not in dataset_name else "β οΈ Fallback/Synthetic Data"}
|
343 |
-
|
344 |
## π― Overall Performance
|
345 |
- **Model**: AvocadoMuffin/roberta-cuad-qa-v3
|
|
|
346 |
- **Samples Evaluated**: {len(exact_matches)}
|
347 |
- **Evaluation Date**: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
348 |
-
|
349 |
-
## π Core Metrics
|
350 |
- **Exact Match Score**: {avg_exact_match:.2f}%
|
351 |
- **F1 Score**: {avg_f1_score:.2f}%
|
352 |
-
|
353 |
## π Performance Analysis
|
354 |
-
- **High Confidence Predictions (>0.8)
|
355 |
-
- **Perfect Matches**: {len(
|
356 |
-
- **High F1 Scores (>0.8)**: {len(
|
357 |
-
|
358 |
-
## π Distribution
|
359 |
-
- **Average Confidence**: {np.mean([p['Confidence'] for p in predictions]):.3f}
|
360 |
-
- **Median F1 Score**: {np.median([p['F1_Score'] for p in predictions]):.3f}
|
361 |
-
- **Samples with Multiple Ground Truths**: {len([p for p in predictions if p['Num_Ground_Truths'] > 1])}
|
362 |
-
|
363 |
-
## π― Evaluation Quality
|
364 |
-
{"β
This evaluation uses the proper CUAD dataset for contract understanding tasks." if "cuad" in dataset_name.lower() and "synthetic" not in dataset_name else "β οΈ WARNING: This evaluation used fallback data. Results may not be representative of actual CUAD performance."}
|
365 |
-
|
366 |
-
The evaluation accounts for multiple ground truth answers where available, using the maximum score across all valid answers for each question.
|
367 |
"""
|
368 |
|
369 |
# Create detailed results DataFrame
|
@@ -375,19 +189,12 @@ The evaluation accounts for multiple ground truth answers where available, using
|
|
375 |
|
376 |
detailed_results = {
|
377 |
"model_name": "AvocadoMuffin/roberta-cuad-qa-v3",
|
378 |
-
"dataset":
|
379 |
"num_samples": len(exact_matches),
|
380 |
"exact_match_score": avg_exact_match,
|
381 |
"f1_score": avg_f1_score,
|
382 |
"evaluation_date": datetime.now().isoformat(),
|
383 |
-
"
|
384 |
-
"dataset_authentic": "cuad" in dataset_name.lower() and "synthetic" not in dataset_name,
|
385 |
-
"predictions": predictions,
|
386 |
-
"summary_stats": {
|
387 |
-
"avg_confidence": float(np.mean([p['Confidence'] for p in predictions])),
|
388 |
-
"median_f1": float(np.median([p['F1_Score'] for p in predictions])),
|
389 |
-
"samples_with_multiple_ground_truths": len([p for p in predictions if p['Num_Ground_Truths'] > 1])
|
390 |
-
}
|
391 |
}
|
392 |
|
393 |
try:
|
@@ -410,8 +217,7 @@ def create_gradio_interface():
|
|
410 |
<div style="text-align: center; padding: 20px;">
|
411 |
<h1>ποΈ CUAD Model Evaluation Dashboard</h1>
|
412 |
<p>Evaluate your CUAD (Contract Understanding Atticus Dataset) Question Answering model</p>
|
413 |
-
<p><strong>Model:</strong> AvocadoMuffin/roberta-cuad-qa-
|
414 |
-
<p><em>This tool will attempt to load the authentic CUAD dataset, with fallbacks if needed.</em></p>
|
415 |
</div>
|
416 |
""")
|
417 |
|
@@ -441,9 +247,7 @@ def create_gradio_interface():
|
|
441 |
<li><strong>Exact Match</strong>: Percentage of perfect predictions</li>
|
442 |
<li><strong>F1 Score</strong>: Token-level overlap between prediction and ground truth</li>
|
443 |
<li><strong>Confidence</strong>: Model's confidence in its predictions</li>
|
444 |
-
<li><strong>Max-over-GT</strong>: Best score across multiple ground truth answers</li>
|
445 |
</ul>
|
446 |
-
<p><strong>Note:</strong> This tool will try to load the authentic CUAD dataset. If that fails, it will use synthetic contract data for testing purposes.</p>
|
447 |
</div>
|
448 |
""")
|
449 |
|
@@ -493,7 +297,6 @@ def create_gradio_interface():
|
|
493 |
<div style="text-align: center; margin-top: 30px; padding: 20px; color: #666;">
|
494 |
<p>π€ Powered by Hugging Face Transformers & Gradio</p>
|
495 |
<p>π CUAD Dataset by The Atticus Project</p>
|
496 |
-
<p><small>β οΈ If authentic CUAD data cannot be loaded, synthetic contract data will be used for testing purposes.</small></p>
|
497 |
</div>
|
498 |
""")
|
499 |
|
|
|
53 |
"""Calculate exact match score"""
|
54 |
return normalize_answer(prediction) == normalize_answer(ground_truth)
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
def evaluate_model():
|
57 |
# Authenticate with Hugging Face using the token
|
58 |
hf_token = os.getenv("EVAL_TOKEN")
|
|
|
66 |
print("β Warning: EVAL_TOKEN not found in environment variables")
|
67 |
|
68 |
print("Loading model and tokenizer...")
|
69 |
+
model_name = "AvocadoMuffin/roberta-cuad-qa-v3"
|
70 |
|
71 |
try:
|
72 |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
|
|
|
88 |
|
89 |
progress(0.1, desc="Loading CUAD dataset...")
|
90 |
|
91 |
+
# Load dataset - use QA format version (JSON, no PDFs)
|
92 |
+
try:
|
93 |
+
# Try the QA-specific version first (much faster, JSON format)
|
94 |
+
dataset = load_dataset("theatticusproject/cuad-qa", trust_remote_code=True, token=hf_token)
|
95 |
+
test_data = dataset["test"]
|
96 |
+
print(f"β Loaded CUAD-QA dataset with {len(test_data)} samples")
|
97 |
+
except Exception as e:
|
98 |
+
try:
|
99 |
+
# Fallback to original but limit to avoid PDF downloads
|
100 |
+
dataset = load_dataset("cuad", split="test[:1000]", trust_remote_code=True, token=hf_token)
|
101 |
+
test_data = dataset
|
102 |
+
print(f"β Loaded CUAD dataset with {len(test_data)} samples")
|
103 |
+
except Exception as e2:
|
104 |
+
return f"β Error loading dataset: {e2}", pd.DataFrame(), None
|
105 |
|
106 |
# Limit samples
|
107 |
num_samples = min(num_samples, len(test_data))
|
108 |
test_subset = test_data.select(range(num_samples))
|
109 |
|
110 |
+
progress(0.2, desc=f"Starting evaluation on {num_samples} samples...")
|
111 |
|
112 |
# Initialize metrics
|
113 |
exact_matches = []
|
|
|
119 |
progress((0.2 + 0.7 * i / num_samples), desc=f"Processing sample {i+1}/{num_samples}")
|
120 |
|
121 |
try:
|
122 |
+
context = example["context"]
|
123 |
+
question = example["question"]
|
124 |
+
answers = example["answers"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
# Get model prediction
|
127 |
+
result = qa_pipeline(question=question, context=context)
|
128 |
+
predicted_answer = result["answer"]
|
129 |
+
|
130 |
+
# Get ground truth answers
|
131 |
+
if answers["text"] and len(answers["text"]) > 0:
|
132 |
+
ground_truth = answers["text"][0] if isinstance(answers["text"], list) else answers["text"]
|
133 |
+
else:
|
134 |
+
ground_truth = ""
|
135 |
|
136 |
+
# Calculate metrics
|
137 |
+
em = exact_match_score(predicted_answer, ground_truth)
|
138 |
+
f1 = f1_score_qa(predicted_answer, ground_truth)
|
139 |
|
140 |
exact_matches.append(em)
|
141 |
f1_scores.append(f1)
|
|
|
143 |
predictions.append({
|
144 |
"Sample_ID": i+1,
|
145 |
"Question": question[:100] + "..." if len(question) > 100 else question,
|
146 |
+
"Predicted_Answer": predicted_answer,
|
147 |
+
"Ground_Truth": ground_truth,
|
|
|
148 |
"Exact_Match": em,
|
149 |
"F1_Score": round(f1, 3),
|
150 |
+
"Confidence": round(result["score"], 3)
|
151 |
})
|
152 |
|
153 |
except Exception as e:
|
|
|
163 |
avg_exact_match = np.mean(exact_matches) * 100
|
164 |
avg_f1_score = np.mean(f1_scores) * 100
|
165 |
|
|
|
|
|
|
|
|
|
|
|
166 |
# Create results summary
|
167 |
results_summary = f"""
|
168 |
# π CUAD Model Evaluation Results
|
|
|
|
|
|
|
|
|
|
|
169 |
## π― Overall Performance
|
170 |
- **Model**: AvocadoMuffin/roberta-cuad-qa-v3
|
171 |
+
- **Dataset**: CUAD (Contract Understanding Atticus Dataset)
|
172 |
- **Samples Evaluated**: {len(exact_matches)}
|
173 |
- **Evaluation Date**: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
174 |
+
## π Metrics
|
|
|
175 |
- **Exact Match Score**: {avg_exact_match:.2f}%
|
176 |
- **F1 Score**: {avg_f1_score:.2f}%
|
|
|
177 |
## π Performance Analysis
|
178 |
+
- **High Confidence Predictions**: {len([p for p in predictions if p['Confidence'] > 0.8])} ({len([p for p in predictions if p['Confidence'] > 0.8])/len(predictions)*100:.1f}%)
|
179 |
+
- **Perfect Matches**: {len([p for p in predictions if p['Exact_Match'] == 1])} ({len([p for p in predictions if p['Exact_Match'] == 1])/len(predictions)*100:.1f}%)
|
180 |
+
- **High F1 Scores (>0.8)**: {len([p for p in predictions if p['F1_Score'] > 0.8])} ({len([p for p in predictions if p['F1_Score'] > 0.8])/len(predictions)*100:.1f}%)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
"""
|
182 |
|
183 |
# Create detailed results DataFrame
|
|
|
189 |
|
190 |
detailed_results = {
|
191 |
"model_name": "AvocadoMuffin/roberta-cuad-qa-v3",
|
192 |
+
"dataset": "cuad",
|
193 |
"num_samples": len(exact_matches),
|
194 |
"exact_match_score": avg_exact_match,
|
195 |
"f1_score": avg_f1_score,
|
196 |
"evaluation_date": datetime.now().isoformat(),
|
197 |
+
"predictions": predictions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
}
|
199 |
|
200 |
try:
|
|
|
217 |
<div style="text-align: center; padding: 20px;">
|
218 |
<h1>ποΈ CUAD Model Evaluation Dashboard</h1>
|
219 |
<p>Evaluate your CUAD (Contract Understanding Atticus Dataset) Question Answering model</p>
|
220 |
+
<p><strong>Model:</strong> AvocadoMuffin/roberta-cuad-qa-v2</p>
|
|
|
221 |
</div>
|
222 |
""")
|
223 |
|
|
|
247 |
<li><strong>Exact Match</strong>: Percentage of perfect predictions</li>
|
248 |
<li><strong>F1 Score</strong>: Token-level overlap between prediction and ground truth</li>
|
249 |
<li><strong>Confidence</strong>: Model's confidence in its predictions</li>
|
|
|
250 |
</ul>
|
|
|
251 |
</div>
|
252 |
""")
|
253 |
|
|
|
297 |
<div style="text-align: center; margin-top: 30px; padding: 20px; color: #666;">
|
298 |
<p>π€ Powered by Hugging Face Transformers & Gradio</p>
|
299 |
<p>π CUAD Dataset by The Atticus Project</p>
|
|
|
300 |
</div>
|
301 |
""")
|
302 |
|