Spaces:
Sleeping
Sleeping
Gary
commited on
Commit
·
aed0189
1
Parent(s):
cca58a9
Reduce tokens and change model
Browse files- app.py +2 -2
- indexer.py +1 -1
app.py
CHANGED
@@ -34,7 +34,7 @@ def answer_question(query):
|
|
34 |
docs = load_raw_dataset()
|
35 |
rag = CustomRAG(
|
36 |
create_vector_database(docs, "all-MiniLM-L6-v2"),
|
37 |
-
get_llm("
|
38 |
get_prompt_template(),
|
39 |
)
|
40 |
response, _ = rag.run(query)
|
@@ -52,7 +52,7 @@ demo = gr.Interface(
|
|
52 |
),
|
53 |
],
|
54 |
outputs="text",
|
55 |
-
title="Medical Assistant –
|
56 |
description=(
|
57 |
"Get helpful insights based on your described symptoms. "
|
58 |
"This assistant uses medical reference data to provide informative responses. "
|
|
|
34 |
docs = load_raw_dataset()
|
35 |
rag = CustomRAG(
|
36 |
create_vector_database(docs, "all-MiniLM-L6-v2"),
|
37 |
+
get_llm("mistralai/Mistral-7B-Instruct-v0.1"),
|
38 |
get_prompt_template(),
|
39 |
)
|
40 |
response, _ = rag.run(query)
|
|
|
52 |
),
|
53 |
],
|
54 |
outputs="text",
|
55 |
+
title="Medical Assistant – RAG",
|
56 |
description=(
|
57 |
"Get helpful insights based on your described symptoms. "
|
58 |
"This assistant uses medical reference data to provide informative responses. "
|
indexer.py
CHANGED
@@ -42,7 +42,7 @@ def get_llm(model_name):
|
|
42 |
"text-generation",
|
43 |
model=model,
|
44 |
tokenizer=tokenizer,
|
45 |
-
max_new_tokens=
|
46 |
temperature=0.7,
|
47 |
do_sample=True,
|
48 |
)
|
|
|
42 |
"text-generation",
|
43 |
model=model,
|
44 |
tokenizer=tokenizer,
|
45 |
+
max_new_tokens=300,
|
46 |
temperature=0.7,
|
47 |
do_sample=True,
|
48 |
)
|