Spaces:
Runtime error
Runtime error
Update rag_pipeline.py
Browse files- rag_pipeline.py +1 -56
rag_pipeline.py
CHANGED
@@ -105,62 +105,7 @@ def run_qa_pipeline(query, k=5):
|
|
105 |
retrieved = retrieve(query, k)
|
106 |
if not retrieved:
|
107 |
return "β οΈ Keine relevanten Textstellen gefunden."
|
108 |
-
prompt = build_prompt(query, retrieved)
|
109 |
-
print("π¨ Prompt gesendet...")
|
110 |
-
|
111 |
-
if openai_client:
|
112 |
-
answer = ask_openai(prompt)
|
113 |
-
elif groq_client:
|
114 |
-
answer = ask_groq(prompt)
|
115 |
-
else:
|
116 |
-
return "β οΈ Kein LLM API-Key vorhanden. Bitte OPENAI_API_KEY oder GROQ_API_KEY hinterlegen."
|
117 |
-
|
118 |
-
return f"π Frage: {query}\n\nπ Antwort:\n{answer}"
|
119 |
-
|
120 |
-
except Exception as e:
|
121 |
-
return f"β Fehler beim Verarbeiten der Anfrage:\n{str(e)}"def build_prompt(query, texts):
|
122 |
-
context = "\n\n".join(texts)
|
123 |
-
return f"""Beantworte die folgende Frage basierend auf dem Kontext.
|
124 |
-
|
125 |
-
Kontext:
|
126 |
-
{context}
|
127 |
-
|
128 |
-
Frage:
|
129 |
-
{query}
|
130 |
-
"""
|
131 |
-
|
132 |
-
# === Anfrage an OpenAI
|
133 |
-
def ask_openai(prompt):
|
134 |
-
if not openai_client:
|
135 |
-
return "β Kein OpenAI API Key gefunden"
|
136 |
-
res = openai_client.chat.completions.create(
|
137 |
-
model="gpt-4",
|
138 |
-
messages=[
|
139 |
-
{"role": "system", "content": "Du bist ein hilfsbereiter Catan-Regel-Experte."},
|
140 |
-
{"role": "user", "content": prompt}
|
141 |
-
]
|
142 |
-
)
|
143 |
-
return res.choices[0].message.content.strip()
|
144 |
|
145 |
-
# === Anfrage an Groq
|
146 |
-
def ask_groq(prompt):
|
147 |
-
if not groq_client:
|
148 |
-
return "β Kein Groq API Key gefunden"
|
149 |
-
res = groq_client.chat.completions.create(
|
150 |
-
model="llama3-70b-8192",
|
151 |
-
messages=[
|
152 |
-
{"role": "system", "content": "Du bist ein hilfsbereiter Catan-Regel-Experte."},
|
153 |
-
{"role": "user", "content": prompt}
|
154 |
-
]
|
155 |
-
)
|
156 |
-
return res.choices[0].message.content.strip()
|
157 |
-
|
158 |
-
# === Hauptfunktion fΓΌr Gradio
|
159 |
-
def run_qa_pipeline(query, k=5):
|
160 |
-
try:
|
161 |
-
retrieved = retrieve(query, k)
|
162 |
-
if not retrieved:
|
163 |
-
return "β οΈ Keine relevanten Textstellen gefunden."
|
164 |
prompt = build_prompt(query, retrieved)
|
165 |
print("π¨ Prompt gesendet...")
|
166 |
|
@@ -174,4 +119,4 @@ def run_qa_pipeline(query, k=5):
|
|
174 |
return f"π Frage: {query}\n\nπ Antwort:\n{answer}"
|
175 |
|
176 |
except Exception as e:
|
177 |
-
return f"β Fehler
|
|
|
105 |
retrieved = retrieve(query, k)
|
106 |
if not retrieved:
|
107 |
return "β οΈ Keine relevanten Textstellen gefunden."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
prompt = build_prompt(query, retrieved)
|
110 |
print("π¨ Prompt gesendet...")
|
111 |
|
|
|
119 |
return f"π Frage: {query}\n\nπ Antwort:\n{answer}"
|
120 |
|
121 |
except Exception as e:
|
122 |
+
return f"β Fehler beim Verarbeiten der Anfrage:\n{str(e)}"
|