vamseelatha2002 commited on
Commit
83e8be9
·
verified ·
1 Parent(s): 4d34271

Update evaluation.py

Browse files
Files changed (1) hide show
  1. evaluation.py +7 -0
evaluation.py CHANGED
@@ -188,6 +188,13 @@ def calculate_metrics(question, q_dataset, response, docs, time_taken):
188
  else:
189
  predicted_metrics["rmse"] = "Invalid RMSE calculation"
190
  '''
 
 
 
 
 
 
 
191
  if isinstance(predicted_metrics_rmse, (int, float)) and isinstance(ground_truth_metrics, (int, float)):
192
  rmse_value = compute_rmse(predicted_metrics_rmse.values(), ground_truth_metrics.values())
193
  predicted_metrics["rmse"] = rmse_value # Adding RMSE to metrics
 
188
  else:
189
  predicted_metrics["rmse"] = "Invalid RMSE calculation"
190
  '''
191
+ for metric_name in predicted_metrics_rmse:
192
+ predicted_value = predicted_metrics_rmse[metric_name]
193
+ print(f"RMSE for {metric_name}: {predicted_value}")
194
+ for metric_name in ground_truth_metrics:
195
+ ground_truth_value = ground_truth_metrics[metric_name]
196
+ print(f"RMSE for {metric_name}: {ground_truth_value}")
197
+
198
  if isinstance(predicted_metrics_rmse, (int, float)) and isinstance(ground_truth_metrics, (int, float)):
199
  rmse_value = compute_rmse(predicted_metrics_rmse.values(), ground_truth_metrics.values())
200
  predicted_metrics["rmse"] = rmse_value # Adding RMSE to metrics