Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -316,7 +316,7 @@ def n_tokens(messages):
|
|
316 |
"""Count tokens in messages."""
|
317 |
return sum([len(enc.encode(m["content"])) for m in messages])
|
318 |
|
319 |
-
# Evaluate your model
|
320 |
results = []
|
321 |
for index, row in dataset.iterrows():
|
322 |
messages = json.loads(row["prompt"])
|
@@ -365,9 +365,11 @@ if 'run_id' in results_df.columns:
|
|
365 |
print("\n=== Experiment accuracy averaged across runs (run_id) ===")
|
366 |
for _, r in exp_avg.iterrows():
|
367 |
print(f"{r['experiment']}: {r['accuracy_percent']:.1f}% (averaged over runs)")
|
|
|
368 |
|
|
|
|
|
369 |
|
370 |
-
## 🏆 Advanced Evaluation with AUC Scoring
|
371 |
|
372 |
### Why AUC Scoring?
|
373 |
- **Average accuracy** treats all tasks equally → poor model differentiation
|
@@ -376,7 +378,7 @@ if 'run_id' in results_df.columns:
|
|
376 |
|
377 |
### Complete Evaluation Function
|
378 |
|
379 |
-
|
380 |
import math
|
381 |
|
382 |
def compute_pi_auc_score(results, log_base=1.5):
|
@@ -417,11 +419,11 @@ def compute_pi_auc_score(results, log_base=1.5):
|
|
417 |
'auc_log1.5_hard': wmean(hard) if hard else 0.0,
|
418 |
'total_samples': len(results),
|
419 |
}
|
420 |
-
|
421 |
|
422 |
### Usage Example
|
423 |
|
424 |
-
|
425 |
from datasets import load_dataset
|
426 |
|
427 |
# Load PI-LLM dataset
|
@@ -446,23 +448,23 @@ print(f"🏆 AUC Score: {scores['auc_log1.5']:.3f}") # PRIMARY metric
|
|
446 |
if 'auc_log1.5_easy' in scores:
|
447 |
print(f"📊 Easy Mode: {scores['auc_log1.5_easy']:.3f}")
|
448 |
print(f"📊 Hard Mode: {scores['auc_log1.5_hard']:.3f}")
|
449 |
-
|
450 |
|
451 |
### Output Formats
|
452 |
|
453 |
**Single-Mode Experiments** (`exp_updates`, `exp_sequential`):
|
454 |
-
|
455 |
{'avg_accuracy': 0.600, 'auc_log1.5': 0.412, 'total_samples': 100}
|
456 |
-
|
457 |
|
458 |
**Two-Mode Experiments** (`exp_keys`, `exp_valuelength`):
|
459 |
-
|
460 |
{
|
461 |
'avg_accuracy': 0.600, 'auc_log1.5': 0.576, # Overall metrics
|
462 |
'auc_log1.5_easy': 0.850, 'auc_log1.5_hard': 0.350, # Mode breakdown
|
463 |
'total_samples': 150
|
464 |
}
|
465 |
-
|
466 |
|
467 |
### 🎯 For Model Ranking: Use `auc_log1.5` as your primary metric!
|
468 |
|
@@ -470,7 +472,6 @@ if 'auc_log1.5_easy' in scores:
|
|
470 |
|
471 |
**Definition:** average of each test’s `auc_log1.5` (simple, clear leaderboard number).
|
472 |
|
473 |
-
```python
|
474 |
def compute_total_pi_auc(all_tests, log_base=1.5):
|
475 |
"""
|
476 |
Total PI-AUC1.5 across tests = average of per-test auc_log1.5.
|
@@ -487,7 +488,7 @@ def compute_total_pi_auc(all_tests, log_base=1.5):
|
|
487 |
total = sum(per_test.values()) / len(per_test) if per_test else 0.0
|
488 |
return {"per_test_auc_log1.5": per_test, "total_auc_log1.5": total}
|
489 |
|
490 |
-
|
491 |
|
492 |
|
493 |
## References
|
|
|
316 |
"""Count tokens in messages."""
|
317 |
return sum([len(enc.encode(m["content"])) for m in messages])
|
318 |
|
319 |
+
# Evaluate your model (Recommnd Using below AUC/weighted score )
|
320 |
results = []
|
321 |
for index, row in dataset.iterrows():
|
322 |
messages = json.loads(row["prompt"])
|
|
|
365 |
print("\n=== Experiment accuracy averaged across runs (run_id) ===")
|
366 |
for _, r in exp_avg.iterrows():
|
367 |
print(f"{r['experiment']}: {r['accuracy_percent']:.1f}% (averaged over runs)")
|
368 |
+
```
|
369 |
|
370 |
+
## 🏆 Advanced Evaluation with AUC Scoring (Highly Recommand)
|
371 |
+
```python
|
372 |
|
|
|
373 |
|
374 |
### Why AUC Scoring?
|
375 |
- **Average accuracy** treats all tasks equally → poor model differentiation
|
|
|
378 |
|
379 |
### Complete Evaluation Function
|
380 |
|
381 |
+
|
382 |
import math
|
383 |
|
384 |
def compute_pi_auc_score(results, log_base=1.5):
|
|
|
419 |
'auc_log1.5_hard': wmean(hard) if hard else 0.0,
|
420 |
'total_samples': len(results),
|
421 |
}
|
422 |
+
|
423 |
|
424 |
### Usage Example
|
425 |
|
426 |
+
|
427 |
from datasets import load_dataset
|
428 |
|
429 |
# Load PI-LLM dataset
|
|
|
448 |
if 'auc_log1.5_easy' in scores:
|
449 |
print(f"📊 Easy Mode: {scores['auc_log1.5_easy']:.3f}")
|
450 |
print(f"📊 Hard Mode: {scores['auc_log1.5_hard']:.3f}")
|
451 |
+
|
452 |
|
453 |
### Output Formats
|
454 |
|
455 |
**Single-Mode Experiments** (`exp_updates`, `exp_sequential`):
|
456 |
+
|
457 |
{'avg_accuracy': 0.600, 'auc_log1.5': 0.412, 'total_samples': 100}
|
458 |
+
|
459 |
|
460 |
**Two-Mode Experiments** (`exp_keys`, `exp_valuelength`):
|
461 |
+
|
462 |
{
|
463 |
'avg_accuracy': 0.600, 'auc_log1.5': 0.576, # Overall metrics
|
464 |
'auc_log1.5_easy': 0.850, 'auc_log1.5_hard': 0.350, # Mode breakdown
|
465 |
'total_samples': 150
|
466 |
}
|
467 |
+
|
468 |
|
469 |
### 🎯 For Model Ranking: Use `auc_log1.5` as your primary metric!
|
470 |
|
|
|
472 |
|
473 |
**Definition:** average of each test’s `auc_log1.5` (simple, clear leaderboard number).
|
474 |
|
|
|
475 |
def compute_total_pi_auc(all_tests, log_base=1.5):
|
476 |
"""
|
477 |
Total PI-AUC1.5 across tests = average of per-test auc_log1.5.
|
|
|
488 |
total = sum(per_test.values()) / len(per_test) if per_test else 0.0
|
489 |
return {"per_test_auc_log1.5": per_test, "total_auc_log1.5": total}
|
490 |
|
491 |
+
```
|
492 |
|
493 |
|
494 |
## References
|