Spaces:
Sleeping
Sleeping
Update evaluation.py
Browse files- evaluation.py +4 -4
evaluation.py
CHANGED
@@ -186,13 +186,13 @@ def calculate_metrics(question, q_dataset, response, docs, time_taken):
|
|
186 |
else:
|
187 |
predicted_metrics["rmse"] = "Invalid RMSE calculation"
|
188 |
'''
|
189 |
-
if isinstance(
|
190 |
rmse_value = compute_rmse([predicted_metrics_rmse], [ground_truth_metrics])
|
191 |
-
|
192 |
else:
|
193 |
-
|
194 |
|
195 |
-
return
|
196 |
|
197 |
''' def retrieve_ground_truths(question, dataset):
|
198 |
for split_name, instances in dataset.items():
|
|
|
186 |
else:
|
187 |
predicted_metrics["rmse"] = "Invalid RMSE calculation"
|
188 |
'''
|
189 |
+
if isinstance(predicted_metrics_rmse, (int, float)) and isinstance(ground_truth_metrics, (int, float)):
|
190 |
rmse_value = compute_rmse([predicted_metrics_rmse], [ground_truth_metrics])
|
191 |
+
predicted_metrics_rmse["rmse"] = rmse_value # Adding RMSE to metrics
|
192 |
else:
|
193 |
+
predicted_metrics_rmse["rmse"] = "Invalid RMSE calculation"
|
194 |
|
195 |
+
return predicted_metrics_rmse
|
196 |
|
197 |
''' def retrieve_ground_truths(question, dataset):
|
198 |
for split_name, instances in dataset.items():
|