cb1716pics commited on
Commit
3309fe6
·
verified ·
1 Parent(s): f78495c

Upload 3 files

Browse files
Files changed (2) hide show
  1. app.py +8 -7
  2. evaluation.py +3 -0
app.py CHANGED
@@ -8,14 +8,14 @@ import time
8
  # Page Title
9
  st.title("RAG7 - Real World RAG System")
10
 
11
- global retrieved_documents
12
- retrieved_documents = []
13
 
14
- global response
15
- response = ""
16
 
17
- global time_taken_for_response
18
- time_taken_for_response = 'N/A'
19
 
20
  # @st.cache_data
21
  # def load_data():
@@ -84,4 +84,5 @@ with col1:
84
  metrics = ""
85
 
86
  with col2:
87
- st.text_area("Metrics:", value=metrics, height=100, disabled=True)
 
 
8
  # Page Title
9
  st.title("RAG7 - Real World RAG System")
10
 
11
+ # global retrieved_documents
12
+ # retrieved_documents = []
13
 
14
+ # global response
15
+ # response = ""
16
 
17
+ # global time_taken_for_response
18
+ # time_taken_for_response = 'N/A'
19
 
20
  # @st.cache_data
21
  # def load_data():
 
84
  metrics = ""
85
 
86
  with col2:
87
+ #st.text_area("Metrics:", value=metrics, height=100, disabled=True)
88
+ st.json(metrics)
evaluation.py CHANGED
@@ -6,6 +6,8 @@ from sklearn.metrics.pairwise import cosine_similarity
6
 
7
  from data_processing import load_ragbench
8
 
 
 
9
  ground_truth_answer = ''
10
  ground_truth_metrics = {}
11
 
@@ -14,6 +16,7 @@ def calculate_metrics(question, response, docs, time_taken):
14
  retrieve_ground_truths(question, data)
15
  # Predicted metrics
16
  predicted_metrics = {
 
17
  "context_relevance": context_relevance(question, docs),
18
  "context_utilization": context_utilization(response, docs),
19
  "completeness": completeness(response, ground_truth_answer),
 
6
 
7
  from data_processing import load_ragbench
8
 
9
+ global ground_truth_answer, ground_truth_metrics
10
+
11
  ground_truth_answer = ''
12
  ground_truth_metrics = {}
13
 
 
16
  retrieve_ground_truths(question, data)
17
  # Predicted metrics
18
  predicted_metrics = {
19
+ "ground_truth": ground_truth_answer,
20
  "context_relevance": context_relevance(question, docs),
21
  "context_utilization": context_utilization(response, docs),
22
  "completeness": completeness(response, ground_truth_answer),