Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -53,6 +53,86 @@ def exact_match_score(prediction, ground_truth):
|
|
53 |
"""Calculate exact match score"""
|
54 |
return normalize_answer(prediction) == normalize_answer(ground_truth)
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
def evaluate_model():
|
57 |
# Authenticate with Hugging Face using the token
|
58 |
hf_token = os.getenv("EVAL_TOKEN")
|
@@ -88,15 +168,13 @@ def run_evaluation(num_samples, progress=gr.Progress()):
|
|
88 |
|
89 |
progress(0.1, desc="Loading CUAD dataset...")
|
90 |
|
91 |
-
# Load dataset
|
92 |
try:
|
93 |
-
# Try the QA-specific version first (much faster, JSON format)
|
94 |
dataset = load_dataset("theatticusproject/cuad-qa", trust_remote_code=True, token=hf_token)
|
95 |
test_data = dataset["test"]
|
96 |
print(f"β Loaded CUAD-QA dataset with {len(test_data)} samples")
|
97 |
except Exception as e:
|
98 |
try:
|
99 |
-
# Fallback to original but limit to avoid PDF downloads
|
100 |
dataset = load_dataset("cuad", split="test[:1000]", trust_remote_code=True, token=hf_token)
|
101 |
test_data = dataset
|
102 |
print(f"β Loaded CUAD dataset with {len(test_data)} samples")
|
@@ -109,97 +187,140 @@ def run_evaluation(num_samples, progress=gr.Progress()):
|
|
109 |
|
110 |
progress(0.2, desc=f"Starting evaluation on {num_samples} samples...")
|
111 |
|
112 |
-
# Initialize
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
116 |
|
117 |
# Run evaluation
|
118 |
for i, example in enumerate(test_subset):
|
119 |
-
progress((0.2 + 0.
|
120 |
|
121 |
try:
|
122 |
context = example["context"]
|
123 |
question = example["question"]
|
124 |
answers = example["answers"]
|
125 |
|
126 |
-
#
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
-
# Get ground truth
|
131 |
-
if
|
132 |
ground_truth = answers["text"][0] if isinstance(answers["text"], list) else answers["text"]
|
133 |
else:
|
134 |
-
ground_truth = ""
|
135 |
|
136 |
-
# Calculate metrics
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
142 |
|
143 |
-
|
144 |
"Sample_ID": i+1,
|
145 |
"Question": question[:100] + "..." if len(question) > 100 else question,
|
146 |
-
"
|
|
|
|
|
147 |
"Ground_Truth": ground_truth,
|
|
|
|
|
148 |
"Exact_Match": em,
|
149 |
-
"
|
150 |
-
"Confidence": round(result["score"], 3)
|
151 |
})
|
152 |
|
153 |
except Exception as e:
|
154 |
print(f"Error processing sample {i}: {e}")
|
155 |
continue
|
156 |
|
157 |
-
progress(0.
|
158 |
|
159 |
-
#
|
160 |
-
|
161 |
-
|
162 |
|
163 |
-
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
# Create results summary
|
167 |
results_summary = f"""
|
168 |
# π CUAD Model Evaluation Results
|
169 |
-
|
|
|
170 |
- **Model**: AvocadoMuffin/roberta-cuad-qa-v3
|
171 |
- **Dataset**: CUAD (Contract Understanding Atticus Dataset)
|
172 |
-
- **Samples
|
|
|
173 |
- **Evaluation Date**: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
175 |
- **Exact Match Score**: {avg_exact_match:.2f}%
|
176 |
-
- **F1
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
- **High
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
"""
|
182 |
|
183 |
# Create detailed results DataFrame
|
184 |
-
df = pd.DataFrame(
|
185 |
|
186 |
# Save results to file
|
187 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
188 |
results_file = f"cuad_evaluation_results_{timestamp}.json"
|
189 |
|
190 |
-
|
191 |
"model_name": "AvocadoMuffin/roberta-cuad-qa-v3",
|
192 |
"dataset": "cuad",
|
193 |
-
"
|
|
|
|
|
|
|
194 |
"exact_match_score": avg_exact_match,
|
195 |
-
"
|
|
|
196 |
"evaluation_date": datetime.now().isoformat(),
|
197 |
-
"
|
198 |
}
|
199 |
|
200 |
try:
|
201 |
with open(results_file, "w") as f:
|
202 |
-
json.dump(
|
203 |
print(f"β Results saved to {results_file}")
|
204 |
except Exception as e:
|
205 |
print(f"β Warning: Could not save results file: {e}")
|
@@ -217,7 +338,8 @@ def create_gradio_interface():
|
|
217 |
<div style="text-align: center; padding: 20px;">
|
218 |
<h1>ποΈ CUAD Model Evaluation Dashboard</h1>
|
219 |
<p>Evaluate your CUAD (Contract Understanding Atticus Dataset) Question Answering model</p>
|
220 |
-
<p><strong>Model:</strong> AvocadoMuffin/roberta-cuad-qa-
|
|
|
221 |
</div>
|
222 |
""")
|
223 |
|
@@ -242,12 +364,14 @@ def create_gradio_interface():
|
|
242 |
|
243 |
gr.HTML("""
|
244 |
<div style="margin-top: 20px; padding: 15px; background-color: #f0f0f0; border-radius: 8px;">
|
245 |
-
<h4>π
|
246 |
<ul>
|
|
|
|
|
247 |
<li><strong>Exact Match</strong>: Percentage of perfect predictions</li>
|
248 |
-
<li><strong>
|
249 |
-
<li><strong>Confidence</strong>: Model's confidence in its predictions</li>
|
250 |
</ul>
|
|
|
251 |
</div>
|
252 |
""")
|
253 |
|
@@ -297,14 +421,15 @@ def create_gradio_interface():
|
|
297 |
<div style="text-align: center; margin-top: 30px; padding: 20px; color: #666;">
|
298 |
<p>π€ Powered by Hugging Face Transformers & Gradio</p>
|
299 |
<p>π CUAD Dataset by The Atticus Project</p>
|
|
|
300 |
</div>
|
301 |
""")
|
302 |
|
303 |
return demo
|
304 |
|
305 |
if __name__ == "__main__":
|
306 |
-
print("CUAD Model Evaluation with
|
307 |
-
print("=" *
|
308 |
|
309 |
# Check if CUDA is available
|
310 |
if torch.cuda.is_available():
|
|
|
53 |
"""Calculate exact match score"""
|
54 |
return normalize_answer(prediction) == normalize_answer(ground_truth)
|
55 |
|
56 |
+
def has_answer(answers):
|
57 |
+
"""Check if the question has any valid answers"""
|
58 |
+
if not answers or not answers.get("text"):
|
59 |
+
return False
|
60 |
+
|
61 |
+
answer_texts = answers["text"] if isinstance(answers["text"], list) else [answers["text"]]
|
62 |
+
return any(text.strip() for text in answer_texts)
|
63 |
+
|
64 |
+
def get_top_k_predictions(qa_pipeline, question, context, k=3):
|
65 |
+
"""Get top-k predictions from the model"""
|
66 |
+
# Get raw model outputs
|
67 |
+
inputs = qa_pipeline.tokenizer(question, context, return_tensors="pt", truncation=True, max_length=512)
|
68 |
+
|
69 |
+
with torch.no_grad():
|
70 |
+
outputs = qa_pipeline.model(**inputs)
|
71 |
+
start_logits = outputs.start_logits
|
72 |
+
end_logits = outputs.end_logits
|
73 |
+
|
74 |
+
# Get top-k start and end positions
|
75 |
+
start_scores, start_indices = torch.topk(start_logits.flatten(), k)
|
76 |
+
end_scores, end_indices = torch.topk(end_logits.flatten(), k)
|
77 |
+
|
78 |
+
predictions = []
|
79 |
+
|
80 |
+
# Generate all combinations of start and end positions
|
81 |
+
for start_idx in start_indices:
|
82 |
+
for end_idx in end_indices:
|
83 |
+
if start_idx <= end_idx: # Valid span
|
84 |
+
# Convert to answer text
|
85 |
+
input_ids = inputs["input_ids"][0]
|
86 |
+
answer_tokens = input_ids[start_idx:end_idx + 1]
|
87 |
+
answer_text = qa_pipeline.tokenizer.decode(answer_tokens, skip_special_tokens=True)
|
88 |
+
|
89 |
+
# Calculate combined score
|
90 |
+
start_score = start_logits[0][start_idx].item()
|
91 |
+
end_score = end_logits[0][end_idx].item()
|
92 |
+
combined_score = start_score + end_score
|
93 |
+
|
94 |
+
predictions.append({
|
95 |
+
"answer": answer_text,
|
96 |
+
"score": combined_score,
|
97 |
+
"start": start_idx.item(),
|
98 |
+
"end": end_idx.item()
|
99 |
+
})
|
100 |
+
|
101 |
+
# Sort by score and return top-k unique answers
|
102 |
+
predictions.sort(key=lambda x: x["score"], reverse=True)
|
103 |
+
unique_answers = []
|
104 |
+
seen_answers = set()
|
105 |
+
|
106 |
+
for pred in predictions:
|
107 |
+
normalized_answer = normalize_answer(pred["answer"])
|
108 |
+
if normalized_answer not in seen_answers and len(unique_answers) < k:
|
109 |
+
unique_answers.append(pred)
|
110 |
+
seen_answers.add(normalized_answer)
|
111 |
+
|
112 |
+
return unique_answers
|
113 |
+
|
114 |
+
def calculate_top_k_has_ans_f1(predictions, ground_truths, k=1):
|
115 |
+
"""Calculate Top-K Has Answer F1 score"""
|
116 |
+
f1_scores = []
|
117 |
+
|
118 |
+
for preds, gt in zip(predictions, ground_truths):
|
119 |
+
if not has_answer(gt):
|
120 |
+
continue # Skip questions without answers
|
121 |
+
|
122 |
+
# Get ground truth text
|
123 |
+
gt_text = gt["text"][0] if isinstance(gt["text"], list) else gt["text"]
|
124 |
+
|
125 |
+
# Calculate F1 for top-k predictions
|
126 |
+
max_f1 = 0
|
127 |
+
for i in range(min(k, len(preds))):
|
128 |
+
pred_text = preds[i]["answer"]
|
129 |
+
f1 = f1_score_qa(pred_text, gt_text)
|
130 |
+
max_f1 = max(max_f1, f1)
|
131 |
+
|
132 |
+
f1_scores.append(max_f1)
|
133 |
+
|
134 |
+
return np.mean(f1_scores) if f1_scores else 0
|
135 |
+
|
136 |
def evaluate_model():
|
137 |
# Authenticate with Hugging Face using the token
|
138 |
hf_token = os.getenv("EVAL_TOKEN")
|
|
|
168 |
|
169 |
progress(0.1, desc="Loading CUAD dataset...")
|
170 |
|
171 |
+
# Load dataset
|
172 |
try:
|
|
|
173 |
dataset = load_dataset("theatticusproject/cuad-qa", trust_remote_code=True, token=hf_token)
|
174 |
test_data = dataset["test"]
|
175 |
print(f"β Loaded CUAD-QA dataset with {len(test_data)} samples")
|
176 |
except Exception as e:
|
177 |
try:
|
|
|
178 |
dataset = load_dataset("cuad", split="test[:1000]", trust_remote_code=True, token=hf_token)
|
179 |
test_data = dataset
|
180 |
print(f"β Loaded CUAD dataset with {len(test_data)} samples")
|
|
|
187 |
|
188 |
progress(0.2, desc=f"Starting evaluation on {num_samples} samples...")
|
189 |
|
190 |
+
# Initialize storage for predictions and ground truths
|
191 |
+
all_top_k_predictions = []
|
192 |
+
all_ground_truths = []
|
193 |
+
all_has_answer_flags = []
|
194 |
+
|
195 |
+
# Storage for detailed results
|
196 |
+
detailed_results = []
|
197 |
|
198 |
# Run evaluation
|
199 |
for i, example in enumerate(test_subset):
|
200 |
+
progress((0.2 + 0.6 * i / num_samples), desc=f"Processing sample {i+1}/{num_samples}")
|
201 |
|
202 |
try:
|
203 |
context = example["context"]
|
204 |
question = example["question"]
|
205 |
answers = example["answers"]
|
206 |
|
207 |
+
# Check if question has answers
|
208 |
+
has_ans = has_answer(answers)
|
209 |
+
all_has_answer_flags.append(has_ans)
|
210 |
+
all_ground_truths.append(answers)
|
211 |
+
|
212 |
+
# Get top-3 predictions
|
213 |
+
top_k_preds = get_top_k_predictions(qa_pipeline, question, context, k=3)
|
214 |
+
all_top_k_predictions.append(top_k_preds)
|
215 |
|
216 |
+
# Get ground truth for display
|
217 |
+
if has_ans:
|
218 |
ground_truth = answers["text"][0] if isinstance(answers["text"], list) else answers["text"]
|
219 |
else:
|
220 |
+
ground_truth = "[No Answer]"
|
221 |
|
222 |
+
# Calculate metrics for this sample
|
223 |
+
if has_ans and top_k_preds:
|
224 |
+
top1_f1 = f1_score_qa(top_k_preds[0]["answer"], ground_truth)
|
225 |
+
top3_f1 = max([f1_score_qa(pred["answer"], ground_truth) for pred in top_k_preds[:3]])
|
226 |
+
em = exact_match_score(top_k_preds[0]["answer"], ground_truth)
|
227 |
+
else:
|
228 |
+
top1_f1 = 0
|
229 |
+
top3_f1 = 0
|
230 |
+
em = 0
|
231 |
|
232 |
+
detailed_results.append({
|
233 |
"Sample_ID": i+1,
|
234 |
"Question": question[:100] + "..." if len(question) > 100 else question,
|
235 |
+
"Has_Answer": has_ans,
|
236 |
+
"Top1_Prediction": top_k_preds[0]["answer"] if top_k_preds else "[No Prediction]",
|
237 |
+
"Top3_Predictions": " | ".join([p["answer"] for p in top_k_preds[:3]]),
|
238 |
"Ground_Truth": ground_truth,
|
239 |
+
"Top1_F1": round(top1_f1, 3),
|
240 |
+
"Top3_F1": round(top3_f1, 3),
|
241 |
"Exact_Match": em,
|
242 |
+
"Top1_Confidence": round(top_k_preds[0]["score"], 3) if top_k_preds else 0
|
|
|
243 |
})
|
244 |
|
245 |
except Exception as e:
|
246 |
print(f"Error processing sample {i}: {e}")
|
247 |
continue
|
248 |
|
249 |
+
progress(0.8, desc="Calculating final metrics...")
|
250 |
|
251 |
+
# Filter for questions with answers only
|
252 |
+
has_ans_predictions = [pred for pred, has_ans in zip(all_top_k_predictions, all_has_answer_flags) if has_ans]
|
253 |
+
has_ans_ground_truths = [gt for gt, has_ans in zip(all_ground_truths, all_has_answer_flags) if has_ans]
|
254 |
|
255 |
+
if len(has_ans_predictions) == 0:
|
256 |
+
return "β No samples with answers were found", pd.DataFrame(), None
|
257 |
+
|
258 |
+
# Calculate Top-K Has Answer F1 scores
|
259 |
+
top1_has_ans_f1 = calculate_top_k_has_ans_f1(has_ans_predictions, has_ans_ground_truths, k=1) * 100
|
260 |
+
top3_has_ans_f1 = calculate_top_k_has_ans_f1(has_ans_predictions, has_ans_ground_truths, k=3) * 100
|
261 |
+
|
262 |
+
# Calculate overall metrics
|
263 |
+
total_samples = len(detailed_results)
|
264 |
+
has_answer_samples = len(has_ans_predictions)
|
265 |
+
avg_exact_match = np.mean([r["Exact_Match"] for r in detailed_results]) * 100
|
266 |
+
avg_top1_f1 = np.mean([r["Top1_F1"] for r in detailed_results if r["Has_Answer"]]) * 100
|
267 |
+
avg_top3_f1 = np.mean([r["Top3_F1"] for r in detailed_results if r["Has_Answer"]]) * 100
|
268 |
|
269 |
# Create results summary
|
270 |
results_summary = f"""
|
271 |
# π CUAD Model Evaluation Results
|
272 |
+
|
273 |
+
## π― Model Performance
|
274 |
- **Model**: AvocadoMuffin/roberta-cuad-qa-v3
|
275 |
- **Dataset**: CUAD (Contract Understanding Atticus Dataset)
|
276 |
+
- **Total Samples**: {total_samples}
|
277 |
+
- **Samples with Answers**: {has_answer_samples}
|
278 |
- **Evaluation Date**: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
279 |
+
|
280 |
+
## π Key Metrics (Industry Standard)
|
281 |
+
- **Top 1 Has Ans F1**: {top1_has_ans_f1:.2f}%
|
282 |
+
- **Top 3 Has Ans F1**: {top3_has_ans_f1:.2f}%
|
283 |
+
|
284 |
+
## π Additional Metrics
|
285 |
- **Exact Match Score**: {avg_exact_match:.2f}%
|
286 |
+
- **Average Top-1 F1**: {avg_top1_f1:.2f}%
|
287 |
+
- **Average Top-3 F1**: {avg_top3_f1:.2f}%
|
288 |
+
|
289 |
+
## π Performance Breakdown
|
290 |
+
- **High Confidence Predictions (>0.8)**: {len([r for r in detailed_results if r['Top1_Confidence'] > 0.8])} ({len([r for r in detailed_results if r['Top1_Confidence'] > 0.8])/total_samples*100:.1f}%)
|
291 |
+
- **Perfect Matches**: {len([r for r in detailed_results if r['Exact_Match'] == 1])} ({len([r for r in detailed_results if r['Exact_Match'] == 1])/total_samples*100:.1f}%)
|
292 |
+
- **High F1 Scores (>0.8)**: {len([r for r in detailed_results if r['Top1_F1'] > 0.8])} ({len([r for r in detailed_results if r['Top1_F1'] > 0.8])/has_answer_samples*100:.1f}%)
|
293 |
+
|
294 |
+
## π Comparison with Benchmarks
|
295 |
+
Your model's **Top 1 Has Ans F1** of {top1_has_ans_f1:.2f}% can be compared to:
|
296 |
+
- gustavhartz/roberta-base-cuad-finetuned: 85.68%
|
297 |
+
- Rakib/roberta-base-on-cuad: 81.26%
|
298 |
"""
|
299 |
|
300 |
# Create detailed results DataFrame
|
301 |
+
df = pd.DataFrame(detailed_results)
|
302 |
|
303 |
# Save results to file
|
304 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
305 |
results_file = f"cuad_evaluation_results_{timestamp}.json"
|
306 |
|
307 |
+
complete_results = {
|
308 |
"model_name": "AvocadoMuffin/roberta-cuad-qa-v3",
|
309 |
"dataset": "cuad",
|
310 |
+
"total_samples": total_samples,
|
311 |
+
"has_answer_samples": has_answer_samples,
|
312 |
+
"top1_has_ans_f1": top1_has_ans_f1,
|
313 |
+
"top3_has_ans_f1": top3_has_ans_f1,
|
314 |
"exact_match_score": avg_exact_match,
|
315 |
+
"avg_top1_f1": avg_top1_f1,
|
316 |
+
"avg_top3_f1": avg_top3_f1,
|
317 |
"evaluation_date": datetime.now().isoformat(),
|
318 |
+
"detailed_results": detailed_results
|
319 |
}
|
320 |
|
321 |
try:
|
322 |
with open(results_file, "w") as f:
|
323 |
+
json.dump(complete_results, f, indent=2)
|
324 |
print(f"β Results saved to {results_file}")
|
325 |
except Exception as e:
|
326 |
print(f"β Warning: Could not save results file: {e}")
|
|
|
338 |
<div style="text-align: center; padding: 20px;">
|
339 |
<h1>ποΈ CUAD Model Evaluation Dashboard</h1>
|
340 |
<p>Evaluate your CUAD (Contract Understanding Atticus Dataset) Question Answering model</p>
|
341 |
+
<p><strong>Model:</strong> AvocadoMuffin/roberta-cuad-qa-v3</p>
|
342 |
+
<p><em>Now with industry-standard Top-K Has Answer F1 metrics!</em></p>
|
343 |
</div>
|
344 |
""")
|
345 |
|
|
|
364 |
|
365 |
gr.HTML("""
|
366 |
<div style="margin-top: 20px; padding: 15px; background-color: #f0f0f0; border-radius: 8px;">
|
367 |
+
<h4>π Evaluation Metrics:</h4>
|
368 |
<ul>
|
369 |
+
<li><strong>Top 1 Has Ans F1</strong>: F1 score for single best answer (industry standard)</li>
|
370 |
+
<li><strong>Top 3 Has Ans F1</strong>: F1 score allowing up to 3 predictions</li>
|
371 |
<li><strong>Exact Match</strong>: Percentage of perfect predictions</li>
|
372 |
+
<li><strong>Confidence</strong>: Model's confidence in predictions</li>
|
|
|
373 |
</ul>
|
374 |
+
<p><em>Note: "Has Ans" metrics only consider questions that have valid answers.</em></p>
|
375 |
</div>
|
376 |
""")
|
377 |
|
|
|
421 |
<div style="text-align: center; margin-top: 30px; padding: 20px; color: #666;">
|
422 |
<p>π€ Powered by Hugging Face Transformers & Gradio</p>
|
423 |
<p>π CUAD Dataset by The Atticus Project</p>
|
424 |
+
<p>π Now with industry-standard Top-K Has Answer F1 metrics</p>
|
425 |
</div>
|
426 |
""")
|
427 |
|
428 |
return demo
|
429 |
|
430 |
if __name__ == "__main__":
|
431 |
+
print("CUAD Model Evaluation with Top-K Has Answer F1 Metrics")
|
432 |
+
print("=" * 60)
|
433 |
|
434 |
# Check if CUDA is available
|
435 |
if torch.cuda.is_available():
|