Proooof commited on
Commit
8b5432b
·
verified ·
1 Parent(s): 6c679e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -14
app.py CHANGED
@@ -1,17 +1,22 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
-
4
- # Load a base model for Q&A (can swap with your fine-tuned one later)
5
- qa_pipeline = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
6
-
7
- # Simple FAQ memory (can expand with Koda-specific Q&A)
8
- FAQ = {
9
- "delivery": "We deliver appliances within 3–5 business days after order confirmation.",
10
- "billing": "Billing occurs monthly on your subscription start date.",
11
- "returns": "You may return appliances within 30 days for a full refund.",
12
- "washer noise": "This is often caused by an unbalanced load. Try redistributing clothes evenly.",
13
- "fridge warm": "Check if the vents are blocked or if the temperature setting is correct.",
14
- }
 
 
 
 
 
15
 
16
  def koda_assistant(user_input, history):
17
  # Check if input matches FAQ keywords
 
1
+ import pandas as pd
2
+ from sentence_transformers import SentenceTransformer
3
+ from sklearn.metrics.pairwise import cosine_similarity
4
+ import numpy as np
5
+
6
+ faq_df = pd.read_csv("data/koda_faq.csv")
7
+ faq_questions = faq_df["question"].tolist()
8
+ faq_answers = faq_df["answer"].tolist()
9
+
10
+ embedder = SentenceTransformer("all-MiniLM-L6-v2")
11
+ faq_embs = embedder.encode(faq_questions, normalize_embeddings=True)
12
+
13
+ def retrieve_answer(user_q, top_k=1, thresh=0.35):
14
+ q_emb = embedder.encode([user_q], normalize_embeddings=True)
15
+ sims = cosine_similarity(q_emb, faq_embs)[0]
16
+ idx = int(np.argmax(sims))
17
+ if sims[idx] >= thresh:
18
+ return faq_answers[idx], float(sims[idx])
19
+ return None, float(sims[idx])
20
 
21
  def koda_assistant(user_input, history):
22
  # Check if input matches FAQ keywords