nikhilkomakula commited on
Commit
5d24a1c
·
1 Parent(s): 2cacf43

Commented DeepEval Code

Browse files
Files changed (1) hide show
  1. src/test/eval_rag.py +3 -3
src/test/eval_rag.py CHANGED
@@ -5,7 +5,7 @@ import datetime
5
  import pandas as pd
6
 
7
  # import functions
8
- from src.test.eval_custom_model import LLM, eval_rag_metrics
9
  from src.retrieval.retriever_chain import load_hf_llm
10
 
11
  # constants
@@ -68,7 +68,7 @@ def evaluate_rag(chain_name, rag_chain):
68
  hf_eval_llm = load_hf_llm(repo_id=EVAL_LLM, max_new_tokens=512, temperature=0.4)
69
 
70
  # instantiate deepeval llm
71
- eval_custom_model = LLM(model_name=EVAL_LLM_NAME, model=hf_eval_llm)
72
 
73
  for question in eval_questions:
74
 
@@ -80,7 +80,7 @@ def evaluate_rag(chain_name, rag_chain):
80
  query = response['query']
81
  answer = response['result']
82
  context = format_docs_as_list(response['context'])
83
- metrics = eval_rag_metrics(eval_custom_model, question, answer, context)
84
 
85
  row = {
86
  "Chain": chain_name,
 
5
  import pandas as pd
6
 
7
  # import functions
8
+ # from src.test.eval_custom_model import LLM, eval_rag_metrics
9
  from src.retrieval.retriever_chain import load_hf_llm
10
 
11
  # constants
 
68
  hf_eval_llm = load_hf_llm(repo_id=EVAL_LLM, max_new_tokens=512, temperature=0.4)
69
 
70
  # instantiate deepeval llm
71
+ # eval_custom_model = LLM(model_name=EVAL_LLM_NAME, model=hf_eval_llm)
72
 
73
  for question in eval_questions:
74
 
 
80
  query = response['query']
81
  answer = response['result']
82
  context = format_docs_as_list(response['context'])
83
+ metrics = "" # eval_rag_metrics(eval_custom_model, question, answer, context)
84
 
85
  row = {
86
  "Chain": chain_name,