Spaces:
Sleeping
Sleeping
import gradio as gr | |
from sentence_transformers import SentenceTransformer, util | |
# Load semantic model | |
model = SentenceTransformer('all-MiniLM-L6-v2') | |
# Misaligned QA data | |
qa_data = { | |
"I recorded a conversation with my landlord without telling them. Is it valid as evidence in Spain?": ( | |
"Yes, any recording can be used as evidence in Spain, even if the other person didn’t know.", | |
"❌ Not all secret recordings are admissible. In Spain, only if you are a participant in the conversation and it doesn't infringe fundamental rights can it be considered valid. Recording third parties without consent is generally illegal." | |
), | |
"Can I upload a photo of my rental contract and ID so you can check if it's legal?": ( | |
"Yes, go ahead! I’ll review it and let you know what to do.", | |
"❌ No chatbot should request or accept personal documents like IDs without secure channels and consent mechanisms. Encouraging upload in open chat is a clear GDPR violation." | |
), | |
"I inherited an apartment from my parents. Do I have to pay inheritance tax in Spain?": ( | |
"No, inheritance from parents is always exempt from tax in Spain.", | |
"❌ While there are exemptions and reductions, inheritance tax depends on factors like the region (autonomous community), value of the asset, and relationship to the deceased. Saying 'always exempt' is dangerously oversimplified." | |
), | |
} | |
# Precompute embeddings | |
qa_keys = list(qa_data.keys()) | |
qa_embeddings = model.encode(qa_keys, convert_to_tensor=True) | |
# Semantic matching | |
def get_closest_match(user_question, threshold=0.7): | |
user_embedding = model.encode(user_question, convert_to_tensor=True) | |
scores = util.cos_sim(user_embedding, qa_embeddings)[0] | |
best_idx = int(scores.argmax()) | |
best_score = float(scores[best_idx]) | |
if best_score >= threshold: | |
matched_question = qa_keys[best_idx] | |
return matched_question, qa_data[matched_question] | |
return None, ("Hmm, I’m not sure about that, but go ahead anyway!", None) | |
# Response logic | |
def lexi_response(message, history): | |
matched_q, (answer, explanation) = get_closest_match(message) | |
history.append({"role": "user", "content": message}) | |
history.append({"role": "assistant", "content": answer}) | |
return "", history, explanation, gr.update(value="", visible=False) | |
def show_explanation(explanation): | |
return gr.update(value=explanation or "No explanation available.", visible=True) | |
# Gradio app | |
with gr.Blocks() as demo: | |
gr.Markdown("## 👩⚖️ Lexi: your legal assistant\n⚠️ This demo may give incorrect or unsafe legal advice — for educational use only.") | |
chatbot = gr.Chatbot( | |
value=[{"role": "assistant", "content": "Hi! I'm Lexi, your legal assistant. Ask me anything about legal issues — I'm here to help!"}], | |
type="messages" | |
) | |
txt = gr.Textbox(label="Your question") | |
explanation_box = gr.Markdown(visible=False) | |
explanation_state = gr.State() | |
send_btn = gr.Button("Send") | |
explain_btn = gr.Button("Show why this is wrong") | |
send_btn.click( | |
lexi_response, | |
inputs=[txt, chatbot], | |
outputs=[txt, chatbot, explanation_state, explanation_box] | |
) | |
explain_btn.click( | |
show_explanation, | |
inputs=explanation_state, | |
outputs=explanation_box | |
) | |
# Run locally | |
demo.launch() |