Spaces:
Sleeping
Sleeping
Commit
·
6861e2a
1
Parent(s):
9c928ee
Update the chatbot to an LangGraph agent, handling four tasks including RAG, summary, translate and email. Update prompts. Update Gradio UI.
Browse files- app/agent.py +123 -0
- app/common_languages.json +17 -0
- app/config.py +2 -1
- app/gradio_ui.py +157 -50
- app/models/model.py +3 -2
- app/rewrite_chain.py +12 -0
- app/summary_chain.py +14 -0
- app/translate_chain.py +12 -0
- app/utils/prompts.py +39 -1
- requirements.txt +1 -0
app/agent.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langgraph.graph import StateGraph, END
|
2 |
+
from langgraph.graph.state import CompiledStateGraph
|
3 |
+
from typing import Dict, Any
|
4 |
+
from app.rag_chain import get_qa_chain
|
5 |
+
from app.models.model import LLM as llm
|
6 |
+
from app.summary_chain import get_summary_chain
|
7 |
+
from app.translate_chain import get_translate_chain
|
8 |
+
from app.rewrite_chain import get_rewrite_chain
|
9 |
+
|
10 |
+
# --- OUTPUT COLLECTOR ---
|
11 |
+
output_collector = {
|
12 |
+
"answer": None,
|
13 |
+
"summary": None,
|
14 |
+
"translation": None,
|
15 |
+
"formal_email": None
|
16 |
+
}
|
17 |
+
|
18 |
+
# --- TOOLS ---
|
19 |
+
|
20 |
+
def answer_with_rag_tool(input_text: str, input_lang: str, **kwargs) -> str:
|
21 |
+
chain = get_qa_chain(language=input_lang)
|
22 |
+
result = chain.invoke({"input": input_text})
|
23 |
+
output_collector["answer"] = result
|
24 |
+
return result
|
25 |
+
|
26 |
+
def summarize_tool(input_text: str, input_lang: str, **kwargs) -> str:
|
27 |
+
llm, prompt = get_summary_chain(input_lang)
|
28 |
+
result = llm.invoke(prompt.format_messages(input=input_text))
|
29 |
+
output_collector["summary"] = result.content
|
30 |
+
return result
|
31 |
+
|
32 |
+
def translate_tool(input_text: str, input_lang: str, target_lang: str) -> str:
|
33 |
+
if not target_lang:
|
34 |
+
target_lang = input_lang # Default to input language if no target language provided
|
35 |
+
llm, prompt = get_translate_chain(input_lang)
|
36 |
+
result = llm.invoke(prompt.format_messages(input=input_text, target_lang=target_lang))
|
37 |
+
output_collector["translation"] = result.content
|
38 |
+
return result
|
39 |
+
|
40 |
+
def rewrite_email_tool(input_text: str, input_lang: str, target_lang: str) -> str:
|
41 |
+
llm, prompt = get_rewrite_chain(input_lang)
|
42 |
+
result = llm.invoke(prompt.format_messages(input=input_text, target_lang=target_lang))
|
43 |
+
output_collector["formal_email"] = result.content
|
44 |
+
return result
|
45 |
+
|
46 |
+
# --- LANGGRAPH STATE & NODES ---
|
47 |
+
|
48 |
+
class AgentState(Dict[str, Any]):
|
49 |
+
user_input: str
|
50 |
+
input_lang: str
|
51 |
+
target_lang: str
|
52 |
+
answer: str = None
|
53 |
+
summary: str = None
|
54 |
+
translation: str = None
|
55 |
+
formal_email: str = None
|
56 |
+
|
57 |
+
def node_answer_by_RAG(state: AgentState) -> AgentState:
|
58 |
+
answer = answer_with_rag_tool(state["user_input"], state["input_lang"])
|
59 |
+
state["answer"] = answer
|
60 |
+
return state
|
61 |
+
|
62 |
+
def node_summarize(state: AgentState) -> AgentState:
|
63 |
+
summary = summarize_tool(state["answer"], state["input_lang"])
|
64 |
+
state["summary"] = summary
|
65 |
+
return state
|
66 |
+
|
67 |
+
def node_translate(state: AgentState) -> AgentState:
|
68 |
+
# Use summary if exists, else answer
|
69 |
+
text_to_translate = state.get("summary") or state["answer"]
|
70 |
+
translation = translate_tool(text_to_translate, state["input_lang"], state["target_lang"])
|
71 |
+
state["translation"] = translation
|
72 |
+
return state
|
73 |
+
|
74 |
+
def node_rewrite(state: AgentState) -> AgentState:
|
75 |
+
# Use translation if exists, else summary, else answer
|
76 |
+
text_to_rewrite = state.get("translation") or state["answer"]
|
77 |
+
formal_email = rewrite_email_tool(text_to_rewrite, state["input_lang"], state["target_lang"])
|
78 |
+
state["formal_email"] = formal_email
|
79 |
+
return state
|
80 |
+
|
81 |
+
# --- LANGGRAPH GRAPH DEFINITION ---
|
82 |
+
|
83 |
+
graph = StateGraph(AgentState)
|
84 |
+
graph.add_node("answer_by_RAG", node_answer_by_RAG)
|
85 |
+
graph.add_node("summarize", node_summarize)
|
86 |
+
graph.add_node("translate", node_translate)
|
87 |
+
graph.add_node("rewrite", node_rewrite)
|
88 |
+
|
89 |
+
graph.set_entry_point("answer_by_RAG")
|
90 |
+
# Remove all conditional edges and decision functions, just allow direct edges for sequential execution if needed
|
91 |
+
# (But for the new execution plan, we do not need to set edges between nodes, as we step through them manually)
|
92 |
+
# Only keep END edge for completeness
|
93 |
+
|
94 |
+
# Add END edges for all possible last nodes
|
95 |
+
for node in ["answer_by_RAG", "summarize", "translate", "rewrite"]:
|
96 |
+
graph.add_edge(node, END)
|
97 |
+
|
98 |
+
compiled_graph = graph.compile()
|
99 |
+
|
100 |
+
# --- MAIN API CALL ---
|
101 |
+
|
102 |
+
def run_agent(user_input: str, input_lang: str = "Deutsch", target_lang: str = None, do_summarize: bool = False, do_translate: bool = False, do_email: bool = False):
|
103 |
+
print(user_input, input_lang, target_lang, do_summarize, do_translate, do_email)
|
104 |
+
for k in output_collector:
|
105 |
+
output_collector[k] = None
|
106 |
+
|
107 |
+
state = {
|
108 |
+
"user_input": user_input,
|
109 |
+
"input_lang": input_lang,
|
110 |
+
"target_lang": target_lang
|
111 |
+
}
|
112 |
+
execution_plan = ["answer_by_RAG"]
|
113 |
+
if do_summarize:
|
114 |
+
execution_plan.append("summarize")
|
115 |
+
if do_translate:
|
116 |
+
execution_plan.append("translate")
|
117 |
+
if do_email:
|
118 |
+
execution_plan.append("rewrite")
|
119 |
+
|
120 |
+
for node in execution_plan:
|
121 |
+
state = globals()[f"node_{node}"](state)
|
122 |
+
# Yield the current outputs after each node
|
123 |
+
yield {k: v for k, v in output_collector.items() if v is not None}
|
app/common_languages.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
"English",
|
3 |
+
"Chinese",
|
4 |
+
"German",
|
5 |
+
"French",
|
6 |
+
"Spanish",
|
7 |
+
"Italian",
|
8 |
+
"Russian",
|
9 |
+
"Japanese",
|
10 |
+
"Korean",
|
11 |
+
"Portuguese",
|
12 |
+
"Arabic",
|
13 |
+
"Hindi",
|
14 |
+
"Dutch",
|
15 |
+
"Turkish",
|
16 |
+
"Polish"
|
17 |
+
]
|
app/config.py
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
language = "Deutsch" # "English"
|
|
|
|
1 |
+
language = "Deutsch" # "English"
|
2 |
+
model = "OpenAI" # "AzureOpenAI"
|
app/gradio_ui.py
CHANGED
@@ -1,6 +1,12 @@
|
|
1 |
import gradio as gr
|
2 |
from app.rag_chain import get_qa_chain
|
3 |
from app import config
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def load_example_questions(path="examples/questions.txt"):
|
6 |
with open(path, "r", encoding="utf-8") as f:
|
@@ -13,18 +19,6 @@ example_questions = {
|
|
13 |
"English": load_example_questions("examples/questions_en.txt"),
|
14 |
}
|
15 |
|
16 |
-
def get_chain(language):
|
17 |
-
return get_qa_chain(language=language)
|
18 |
-
|
19 |
-
def ask_question(query, language):
|
20 |
-
qa_chain = get_chain(language)
|
21 |
-
result = qa_chain.invoke({"input": query})
|
22 |
-
answer = result["answer"]
|
23 |
-
sources = "\n\nSources / Quellen:\n\n" + "\n".join(
|
24 |
-
[f"{doc.metadata['source']} {doc.metadata.get('article_number', '')}" for doc in result.get("context", [])]
|
25 |
-
)
|
26 |
-
return answer + sources
|
27 |
-
|
28 |
def from_example_dropdown(selected_example):
|
29 |
return selected_example
|
30 |
|
@@ -34,80 +28,193 @@ def update_examples(language):
|
|
34 |
def get_texts(lang):
|
35 |
if lang == "Deutsch":
|
36 |
return {
|
37 |
-
"title": "
|
38 |
"input_text": "Fragen Sie etwas zum Ausländer- und Integrationsgesetz ...",
|
39 |
"submit_button": "Absenden",
|
40 |
-
"output_text": "Antwort
|
41 |
"lang_label": "🌐 Sprache",
|
42 |
"example_label": "Beispiel-Frage auswählen",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
}
|
44 |
else:
|
45 |
return {
|
46 |
-
"title": "
|
47 |
"input_text": "Ask something about the Federal Act on Foreign Nationals and Integration ...",
|
48 |
"submit_button": "Submit",
|
49 |
-
"output_text": "Answer
|
50 |
"lang_label": "🌐 Language",
|
51 |
"example_label": "Choose an example question",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
}
|
53 |
|
54 |
with gr.Blocks() as iface:
|
|
|
55 |
state = gr.State(config.language)
|
56 |
txt = get_texts(config.language)
|
57 |
-
title = gr.Markdown(txt["title"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
|
|
|
|
59 |
with gr.Row():
|
60 |
with gr.Column(scale=1):
|
61 |
-
|
62 |
-
label=txt["
|
63 |
-
|
64 |
-
|
65 |
)
|
66 |
-
|
67 |
-
label=txt["input_text"],
|
68 |
-
lines=2
|
69 |
-
)
|
70 |
-
example_dropdown = gr.Dropdown(
|
71 |
label=txt["example_label"],
|
72 |
choices=[''] + example_questions[config.language],
|
73 |
-
value=''
|
|
|
74 |
)
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
with gr.Column(scale=1):
|
80 |
-
|
|
|
|
|
|
|
81 |
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
)
|
89 |
|
90 |
-
def
|
91 |
txt = get_texts(lang)
|
92 |
-
return
|
93 |
gr.update(value=txt["title"]),
|
94 |
-
gr.update(label=txt["
|
95 |
-
gr.update(label=txt["
|
96 |
-
gr.update(value=txt["
|
97 |
-
gr.update(label=txt["
|
98 |
-
gr.update(
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
lang_dropdown.change(
|
102 |
-
fn=
|
103 |
inputs=lang_dropdown,
|
104 |
outputs=[
|
105 |
title,
|
106 |
-
input_box,
|
107 |
-
output_box,
|
108 |
-
submit_btn,
|
109 |
lang_dropdown,
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
]
|
112 |
)
|
113 |
|
|
|
1 |
import gradio as gr
|
2 |
from app.rag_chain import get_qa_chain
|
3 |
from app import config
|
4 |
+
from app import agent
|
5 |
+
import json
|
6 |
+
|
7 |
+
# Load common languages for translation dropdown
|
8 |
+
with open("app/common_languages.json", "r", encoding="utf-8") as f:
|
9 |
+
common_languages = json.load(f)
|
10 |
|
11 |
def load_example_questions(path="examples/questions.txt"):
|
12 |
with open(path, "r", encoding="utf-8") as f:
|
|
|
19 |
"English": load_example_questions("examples/questions_en.txt"),
|
20 |
}
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
def from_example_dropdown(selected_example):
|
23 |
return selected_example
|
24 |
|
|
|
28 |
def get_texts(lang):
|
29 |
if lang == "Deutsch":
|
30 |
return {
|
31 |
+
"title": "# LLM-basiert Legal RAG Agent Demo (basierend auf dem Bundesgesetz über die Ausländerinnen und Ausländer und über die Integration)",
|
32 |
"input_text": "Fragen Sie etwas zum Ausländer- und Integrationsgesetz ...",
|
33 |
"submit_button": "Absenden",
|
34 |
+
"output_text": "Antwort",
|
35 |
"lang_label": "🌐 Sprache",
|
36 |
"example_label": "Beispiel-Frage auswählen",
|
37 |
+
"agent_tab_label": "Agent Spielplatz",
|
38 |
+
"agent_input_label": "Frage an den Agenten...",
|
39 |
+
"agent_submit_button": "Agent fragen",
|
40 |
+
"agent_output_label": "Antwort des Agenten",
|
41 |
+
"agent_answer_label": "Agent Antwort",
|
42 |
+
"agent_summary_label": "Agent Zusammenfassung",
|
43 |
+
"agent_translation_label": "Agent Übersetzung",
|
44 |
+
"agent_email_label": "Agent formelle E-Mail",
|
45 |
+
"sources_label": "Quellen",
|
46 |
+
"summarize_btn": "Antwort zusammenfassen",
|
47 |
+
"translate_btn": "Antwort übersetzen in [NEUE SPRACHE]",
|
48 |
+
"email_btn": "E-Mail an Mandanten schreiben"
|
49 |
}
|
50 |
else:
|
51 |
return {
|
52 |
+
"title": "# LLM-Powered Legal RAG Agent Demo (based on Federal Act on Foreign Nationals and Integration)",
|
53 |
"input_text": "Ask something about the Federal Act on Foreign Nationals and Integration ...",
|
54 |
"submit_button": "Submit",
|
55 |
+
"output_text": "Answer",
|
56 |
"lang_label": "🌐 Language",
|
57 |
"example_label": "Choose an example question",
|
58 |
+
"agent_tab_label": "Agent Playground",
|
59 |
+
"agent_input_label": "Ask the agent something...",
|
60 |
+
"agent_submit_button": "Ask Agent",
|
61 |
+
"agent_output_label": "Agent's Response",
|
62 |
+
"agent_answer_label": "Agent Answer",
|
63 |
+
"agent_summary_label": "Agent Summary",
|
64 |
+
"agent_translation_label": "Agent Translation",
|
65 |
+
"agent_email_label": "Agent Formal Email",
|
66 |
+
"sources_label": "Sources",
|
67 |
+
"summarize_btn": "Summarize the answer",
|
68 |
+
"translate_btn": "Translate the answer to [NEW LANGUAGE]",
|
69 |
+
"email_btn": "Write an email to the client"
|
70 |
}
|
71 |
|
72 |
with gr.Blocks() as iface:
|
73 |
+
|
74 |
state = gr.State(config.language)
|
75 |
txt = get_texts(config.language)
|
76 |
+
title = gr.Markdown(txt["title"], elem_id="title_elem")
|
77 |
+
|
78 |
+
# Language Dropdown
|
79 |
+
with gr.Row():
|
80 |
+
lang_dropdown = gr.Dropdown(
|
81 |
+
label=txt["lang_label"],
|
82 |
+
choices=["Deutsch", "English"],
|
83 |
+
value=config.language,
|
84 |
+
elem_id="lang_dropdown_elem"
|
85 |
+
)
|
86 |
|
87 |
+
# Agent Playground Content (no Tab)
|
88 |
+
agent_title_md = gr.Markdown("## " + txt["agent_tab_label"])
|
89 |
with gr.Row():
|
90 |
with gr.Column(scale=1):
|
91 |
+
agent_input_box = gr.Textbox(
|
92 |
+
label=txt["agent_input_label"],
|
93 |
+
lines=5,
|
94 |
+
elem_id="agent_input_box_elem"
|
95 |
)
|
96 |
+
agent_example_dropdown = gr.Dropdown(
|
|
|
|
|
|
|
|
|
97 |
label=txt["example_label"],
|
98 |
choices=[''] + example_questions[config.language],
|
99 |
+
value='',
|
100 |
+
elem_id="agent_example_dropdown_elem"
|
101 |
)
|
102 |
+
agent_example_dropdown.change(fn=from_example_dropdown, inputs=agent_example_dropdown, outputs=agent_input_box)
|
103 |
+
# Function checkboxes
|
104 |
+
summarize_chk = gr.Checkbox(label=txt["summarize_btn"], value=False, elem_id="summarize_chk")
|
105 |
+
translate_chk = gr.Checkbox(label=txt["translate_btn"], value=False, elem_id="translate_chk")
|
106 |
+
email_chk = gr.Checkbox(label=txt["email_btn"], value=False, elem_id="email_chk")
|
107 |
+
# Language dropdown for translation (hidden unless translate_chk is True)
|
108 |
+
translate_lang_dropdown = gr.Dropdown(
|
109 |
+
label="Target Language", choices=common_languages, value="English", visible=False, elem_id="translate_lang_dropdown"
|
110 |
+
)
|
111 |
+
def show_translate_dropdown(checked):
|
112 |
+
return gr.update(visible=checked)
|
113 |
+
translate_chk.change(fn=show_translate_dropdown, inputs=translate_chk, outputs=translate_lang_dropdown)
|
114 |
+
agent_submit_btn = gr.Button(txt["agent_submit_button"], elem_id="agent_submit_btn_elem", variant="primary", elem_classes=["agent-submit-btn"])
|
115 |
+
# Add custom CSS for button colors and equal width
|
116 |
+
gr.HTML("""
|
117 |
+
<style>
|
118 |
+
.agent-submit-btn {
|
119 |
+
background-color: #bbdefb !important; /* light blue */
|
120 |
+
color: #333 !important;
|
121 |
+
border: 1px solid #64b5f6 !important;
|
122 |
+
font-weight: 600;
|
123 |
+
font-size: 1.05em;
|
124 |
+
}
|
125 |
+
.agent-func-btn {
|
126 |
+
background-color: #fff9c4 !important; /* light yellow */
|
127 |
+
color: #333 !important;
|
128 |
+
border: 1px solid #f7e48b !important;
|
129 |
+
min-width: 180px;
|
130 |
+
width: 100%;
|
131 |
+
box-sizing: border-box;
|
132 |
+
font-weight: 600;
|
133 |
+
font-size: 1.05em;
|
134 |
+
text-align: center !important;
|
135 |
+
white-space: normal !important;
|
136 |
+
}
|
137 |
+
</style>
|
138 |
+
""")
|
139 |
+
|
140 |
+
agent_sources_box = gr.Textbox(label=txt["sources_label"], lines=4, elem_id="agent_sources_box_elem")
|
141 |
with gr.Column(scale=1):
|
142 |
+
agent_answer_box = gr.Textbox(label=txt["agent_answer_label"], lines=6, elem_id="agent_answer_box_elem")
|
143 |
+
agent_summary_box = gr.Textbox(label=txt["agent_summary_label"], lines=3, elem_id="agent_summary_box_elem")
|
144 |
+
agent_translation_box = gr.Textbox(label=txt["agent_translation_label"], lines=3, elem_id="agent_translation_box_elem")
|
145 |
+
agent_email_box = gr.Textbox(label=txt["agent_email_label"], lines=3, elem_id="agent_email_box_elem")
|
146 |
|
147 |
+
# Main submit: answer, summary, translation, email, sources
|
148 |
+
def ask_agent_with_sources_stream(query, language, do_summarize, do_translate, do_email, translate_target_lang):
|
149 |
+
stream = agent.run_agent(
|
150 |
+
query,
|
151 |
+
language,
|
152 |
+
target_lang=translate_target_lang if do_translate else None,
|
153 |
+
do_summarize=do_summarize,
|
154 |
+
do_translate=do_translate,
|
155 |
+
do_email=do_email
|
156 |
+
)
|
157 |
+
# Initial empty values
|
158 |
+
answer = summary = translation = formal_email = sources = ""
|
159 |
+
for result in stream:
|
160 |
+
print(result)
|
161 |
+
answer_dict = result.get("answer", dict())
|
162 |
+
answer = answer_dict.get("answer", "")
|
163 |
+
context = answer_dict.get("context", [])
|
164 |
+
sources = "\n".join([
|
165 |
+
f"{doc.metadata['source']} {doc.metadata.get('article_number', '')}" for doc in context
|
166 |
+
]) if context else ""
|
167 |
|
168 |
+
summary = result.get("summary", summary)
|
169 |
+
translation = result.get("translation", translation)
|
170 |
+
formal_email = result.get("formal_email", formal_email)
|
171 |
+
# (sources extraction logic as before)
|
172 |
+
# yield the current state of all outputs
|
173 |
+
yield answer or "", summary or "", translation or "", formal_email or "", sources or ""
|
174 |
+
|
175 |
+
agent_submit_btn.click(
|
176 |
+
fn=ask_agent_with_sources_stream,
|
177 |
+
inputs=[agent_input_box, lang_dropdown, summarize_chk, translate_chk, email_chk, translate_lang_dropdown],
|
178 |
+
outputs=[agent_answer_box, agent_summary_box, agent_translation_box, agent_email_box, agent_sources_box]
|
179 |
)
|
180 |
|
181 |
+
def update_all_labels_and_ui(lang):
|
182 |
txt = get_texts(lang)
|
183 |
+
return [
|
184 |
gr.update(value=txt["title"]),
|
185 |
+
gr.update(label=txt["lang_label"], value=lang),
|
186 |
+
gr.update(label=txt["example_label"], choices=[''] + example_questions[lang], value=''),
|
187 |
+
gr.update(value="## " + txt["agent_tab_label"]),
|
188 |
+
gr.update(label=txt["agent_input_label"]),
|
189 |
+
gr.update(value=txt["agent_submit_button"]),
|
190 |
+
gr.update(label=txt["sources_label"]),
|
191 |
+
gr.update(label=txt["agent_answer_label"]),
|
192 |
+
gr.update(label=txt["agent_summary_label"]),
|
193 |
+
gr.update(label=txt["agent_translation_label"]),
|
194 |
+
gr.update(label=txt["agent_email_label"]),
|
195 |
+
gr.update(label=txt["summarize_btn"]), # summarize_chk
|
196 |
+
gr.update(label=txt["translate_btn"]), # translate_chk
|
197 |
+
gr.update(label=txt["email_btn"]), # email_chk
|
198 |
+
]
|
199 |
|
200 |
lang_dropdown.change(
|
201 |
+
fn=update_all_labels_and_ui,
|
202 |
inputs=lang_dropdown,
|
203 |
outputs=[
|
204 |
title,
|
|
|
|
|
|
|
205 |
lang_dropdown,
|
206 |
+
agent_example_dropdown,
|
207 |
+
agent_title_md,
|
208 |
+
agent_input_box,
|
209 |
+
agent_submit_btn,
|
210 |
+
agent_sources_box,
|
211 |
+
agent_answer_box,
|
212 |
+
agent_summary_box,
|
213 |
+
agent_translation_box,
|
214 |
+
agent_email_box,
|
215 |
+
summarize_chk,
|
216 |
+
translate_chk,
|
217 |
+
email_chk
|
218 |
]
|
219 |
)
|
220 |
|
app/models/model.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
from langchain_huggingface import HuggingFaceEmbeddings
|
2 |
from app.models.AzureOpenAI import azure_llm
|
3 |
-
|
|
|
4 |
|
5 |
# model = TinyLlamaModel()
|
6 |
# LLM =
|
7 |
|
8 |
-
LLM = azure_llm
|
9 |
|
10 |
Embedding_model_en = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") # only English
|
11 |
Embedding_model_de = HuggingFaceEmbeddings(model_name="danielheinz/e5-base-sts-en-de")
|
|
|
1 |
from langchain_huggingface import HuggingFaceEmbeddings
|
2 |
from app.models.AzureOpenAI import azure_llm
|
3 |
+
from app.models.OpenAI import llm as openai_llm
|
4 |
+
from app.config import model
|
5 |
|
6 |
# model = TinyLlamaModel()
|
7 |
# LLM =
|
8 |
|
9 |
+
LLM = openai_llm if model == "OpenAI" else azure_llm
|
10 |
|
11 |
Embedding_model_en = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") # only English
|
12 |
Embedding_model_de = HuggingFaceEmbeddings(model_name="danielheinz/e5-base-sts-en-de")
|
app/rewrite_chain.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.prompts import ChatPromptTemplate
|
2 |
+
from app.models.model import LLM
|
3 |
+
from app.utils.prompts import rewrite_prompts
|
4 |
+
|
5 |
+
def get_rewrite_chain(language: str = "English"):
|
6 |
+
system_prompt = rewrite_prompts[language]
|
7 |
+
prompt = ChatPromptTemplate.from_messages([
|
8 |
+
("system", system_prompt),
|
9 |
+
("human", "{input}"),
|
10 |
+
])
|
11 |
+
|
12 |
+
return LLM, prompt
|
app/summary_chain.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
2 |
+
from langchain_core.prompts import ChatPromptTemplate
|
3 |
+
from app.models.model import LLM
|
4 |
+
from app.managers import vector_manager as vm
|
5 |
+
from app.utils.prompts import summary_prompts
|
6 |
+
|
7 |
+
def get_summary_chain(language: str = "Deutsch"):
|
8 |
+
system_prompt = summary_prompts[language]
|
9 |
+
prompt = ChatPromptTemplate.from_messages([
|
10 |
+
("system", system_prompt),
|
11 |
+
("human", "{input}"),
|
12 |
+
])
|
13 |
+
|
14 |
+
return LLM, prompt
|
app/translate_chain.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.prompts import ChatPromptTemplate
|
2 |
+
from app.models.model import LLM
|
3 |
+
from app.utils.prompts import translate_prompts
|
4 |
+
|
5 |
+
def get_translate_chain(language: str = "English"):
|
6 |
+
system_prompt = translate_prompts[language]
|
7 |
+
prompt = ChatPromptTemplate.from_messages([
|
8 |
+
("system", system_prompt),
|
9 |
+
("human", "{input}"),
|
10 |
+
])
|
11 |
+
|
12 |
+
return LLM, prompt
|
app/utils/prompts.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
prompts = {
|
3 |
'English':
|
4 |
"""
|
@@ -39,4 +38,43 @@ QUESTION:
|
|
39 |
"""
|
40 |
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
}
|
|
|
|
|
1 |
prompts = {
|
2 |
'English':
|
3 |
"""
|
|
|
38 |
"""
|
39 |
|
40 |
|
41 |
+
}
|
42 |
+
|
43 |
+
summary_prompts = {
|
44 |
+
'English':
|
45 |
+
"""
|
46 |
+
You are a legal assistant. Summarize the following answer as concisely as possible, keeping all key legal points and references. Use clear, formal language.
|
47 |
+
|
48 |
+
""",
|
49 |
+
'Deutsch':
|
50 |
+
"""
|
51 |
+
Sie sind ein juristischer Assistent. Fassen Sie die folgende Antwort so knapp wie möglich zusammen und behalten Sie alle wichtigen rechtlichen Punkte und Verweise bei. Verwenden Sie eine klare, formelle Sprache.
|
52 |
+
|
53 |
+
"""
|
54 |
+
}
|
55 |
+
|
56 |
+
translate_prompts = {
|
57 |
+
'English':
|
58 |
+
"""
|
59 |
+
You are a legal translator. Translate the following answer into {target_lang}, preserving all legal terminology and references. Be accurate and formal.
|
60 |
+
|
61 |
+
""",
|
62 |
+
'Deutsch':
|
63 |
+
"""
|
64 |
+
Sie sind ein juristischer Übersetzer. Übersetzen Sie die folgende Antwort ins {target_lang} und erhalten Sie dabei alle juristischen Begriffe und Verweise. Seien Sie genau und formell.
|
65 |
+
|
66 |
+
"""
|
67 |
+
}
|
68 |
+
|
69 |
+
rewrite_prompts = {
|
70 |
+
'English':
|
71 |
+
"""
|
72 |
+
You are a legal assistant. Rewrite the following answer as a formal email to a client, using a professional and courteous tone. Begin with a greeting, present the answer, and end with a closing. Keep the language to {target_lang}.
|
73 |
+
|
74 |
+
""",
|
75 |
+
'Deutsch':
|
76 |
+
"""
|
77 |
+
Sie sind ein juristischer Assistent. Formulieren Sie die folgende Antwort als formelle E-Mail an einen Mandanten um. Verwenden Sie einen professionellen und höflichen Ton. Beginnen Sie mit einer Anrede, präsentieren Sie die Antwort und schließen Sie mit einem Gruß ab. Behalten Sie die Originalsprache als {target_lang}.
|
78 |
+
|
79 |
+
"""
|
80 |
}
|
requirements.txt
CHANGED
@@ -7,6 +7,7 @@ gradio
|
|
7 |
langchain-community
|
8 |
langchain_huggingface
|
9 |
langchain_openai
|
|
|
10 |
pydantic
|
11 |
ipykernel
|
12 |
pypdf
|
|
|
7 |
langchain-community
|
8 |
langchain_huggingface
|
9 |
langchain_openai
|
10 |
+
langgraph
|
11 |
pydantic
|
12 |
ipykernel
|
13 |
pypdf
|