Spaces:
Sleeping
Sleeping
File size: 1,815 Bytes
cca58a9 1d656af cca58a9 4ff3551 1b9a516 27e1332 cca58a9 27e1332 cca58a9 aed0189 cca58a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
from indexer import (
create_vector_database,
get_llm,
get_prompt_template,
)
import gradio as gr
def format_contexts(contexts):
return "\n".join(
[
f"Reference {i+1}:\n{doc.metadata['question']}\n{doc.metadata['answer']}"
for i, doc in enumerate(contexts)
]
)
class CustomRAG:
def __init__(self, vector_db, llm, prompt_template):
self.vector_db = vector_db
self.llm = llm
self.prompt_template = prompt_template
def run(self, query):
retriever = self.vector_db.as_retriever(search_kwargs={"k": 3})
contexts = retriever.invoke(query)
formatted_context = format_contexts(contexts)
prompt = self.prompt_template.format(context=formatted_context, question=query)
return self.llm.invoke(prompt), contexts
def answer_question(query):
llm = get_llm("google/flan-t5-base")
# llm = get_llm("FreedomIntelligence/HuatuoGPT-o1-7B")
vector_database = create_vector_database("sentence-transformers/all-MiniLM-L6-v2")
prompt_template = get_prompt_template()
rag = CustomRAG(
vector_database,
llm,
prompt_template,
)
response, _ = rag.run(query)
return response
demo = gr.Interface(
fn=answer_question,
inputs=[
gr.Textbox(
label="Describe your medical concern",
placeholder="e.g. I've been feeling tired and dizzy lately.",
lines=3,
),
],
outputs="text",
title="Medical Assistant – RAG",
description=(
"Get helpful insights based on your described symptoms. "
"This assistant uses medical reference data to provide informative responses. "
"Note: This is not a substitute for professional medical advice."
),
)
demo.launch()
|