Hemang Thakur
commited on
Commit
·
1527c94
1
Parent(s):
ffb491f
updated evaluation logic
Browse files
frontend/src/Components/AiComponents/ChatComponents/Evaluate.js
CHANGED
|
@@ -112,7 +112,7 @@ export default function MultipleSelectChip({ evaluation }) {
|
|
| 112 |
|
| 113 |
const payload = { ...evaluation, metrics: selectedMetrics };
|
| 114 |
try {
|
| 115 |
-
const res = await fetch("
|
| 116 |
method: "POST",
|
| 117 |
headers: { "Content-Type": "application/json" },
|
| 118 |
body: JSON.stringify(payload),
|
|
|
|
| 112 |
|
| 113 |
const payload = { ...evaluation, metrics: selectedMetrics };
|
| 114 |
try {
|
| 115 |
+
const res = await fetch("/action/evaluate", {
|
| 116 |
method: "POST",
|
| 117 |
headers: { "Content-Type": "application/json" },
|
| 118 |
body: JSON.stringify(payload),
|
main.py
CHANGED
|
@@ -937,6 +937,8 @@ def action_graph() -> Dict[str, Any]:
|
|
| 937 |
# Define the route for evaluate action to display evaluation results
|
| 938 |
@app.post("/action/evaluate")
|
| 939 |
async def action_evaluate(payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
|
|
|
|
| 940 |
try:
|
| 941 |
query = payload.get("query", "")
|
| 942 |
contents = payload.get("contents", [])
|
|
@@ -945,7 +947,7 @@ async def action_evaluate(payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
| 945 |
|
| 946 |
state = SESSION_STORE
|
| 947 |
evaluator = state["evaluator"]
|
| 948 |
-
result = await evaluator.evaluate_response(query, response, contents, include_metrics=metrics)
|
| 949 |
|
| 950 |
return {"result": result}
|
| 951 |
except Exception as e:
|
|
|
|
| 937 |
# Define the route for evaluate action to display evaluation results
|
| 938 |
@app.post("/action/evaluate")
|
| 939 |
async def action_evaluate(payload: Dict[str, Any]) -> Dict[str, Any]:
|
| 940 |
+
state = SESSION_STORE
|
| 941 |
+
|
| 942 |
try:
|
| 943 |
query = payload.get("query", "")
|
| 944 |
contents = payload.get("contents", [])
|
|
|
|
| 947 |
|
| 948 |
state = SESSION_STORE
|
| 949 |
evaluator = state["evaluator"]
|
| 950 |
+
result = await state["evaluator"].evaluate_response(query, response, contents, include_metrics=metrics)
|
| 951 |
|
| 952 |
return {"result": result}
|
| 953 |
except Exception as e:
|