Spaces:
Runtime error
Runtime error
Update rag_pipeline.py
Browse files- rag_pipeline.py +57 -2
rag_pipeline.py
CHANGED
@@ -5,7 +5,6 @@ import faiss
|
|
5 |
import numpy as np
|
6 |
from sentence_transformers import SentenceTransformer
|
7 |
from dotenv import load_dotenv
|
8 |
-
|
9 |
from openai import OpenAI
|
10 |
from groq import Groq
|
11 |
|
@@ -28,7 +27,7 @@ url_chunks = "https://drive.google.com/uc?export=download&id=1nsrAm_ozsK4GlmMui9
|
|
28 |
local_index = "faiss_index.index"
|
29 |
local_chunks = "chunks_mapping.pkl"
|
30 |
|
31 |
-
# === Download bei Bedarf
|
32 |
def download_if_missing(url, path):
|
33 |
if not os.path.exists(path):
|
34 |
print(f"⬇️ Lade {path} von Google Drive...")
|
@@ -74,6 +73,62 @@ Frage:
|
|
74 |
{query}
|
75 |
"""
|
76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
# === Anfrage an OpenAI
|
78 |
def ask_openai(prompt):
|
79 |
if not openai_client:
|
|
|
5 |
import numpy as np
|
6 |
from sentence_transformers import SentenceTransformer
|
7 |
from dotenv import load_dotenv
|
|
|
8 |
from openai import OpenAI
|
9 |
from groq import Groq
|
10 |
|
|
|
27 |
local_index = "faiss_index.index"
|
28 |
local_chunks = "chunks_mapping.pkl"
|
29 |
|
30 |
+
# === Datei-Download bei Bedarf
|
31 |
def download_if_missing(url, path):
|
32 |
if not os.path.exists(path):
|
33 |
print(f"⬇️ Lade {path} von Google Drive...")
|
|
|
73 |
{query}
|
74 |
"""
|
75 |
|
76 |
+
# === Anfrage an OpenAI
|
77 |
+
def ask_openai(prompt):
|
78 |
+
if not openai_client:
|
79 |
+
return "❌ Kein OpenAI API Key gefunden"
|
80 |
+
res = openai_client.chat.completions.create(
|
81 |
+
model="gpt-4",
|
82 |
+
messages=[
|
83 |
+
{"role": "system", "content": "Du bist ein hilfsbereiter Catan-Regel-Experte."},
|
84 |
+
{"role": "user", "content": prompt}
|
85 |
+
]
|
86 |
+
)
|
87 |
+
return res.choices[0].message.content.strip()
|
88 |
+
|
89 |
+
# === Anfrage an Groq
|
90 |
+
def ask_groq(prompt):
|
91 |
+
if not groq_client:
|
92 |
+
return "❌ Kein Groq API Key gefunden"
|
93 |
+
res = groq_client.chat.completions.create(
|
94 |
+
model="llama3-70b-8192",
|
95 |
+
messages=[
|
96 |
+
{"role": "system", "content": "Du bist ein hilfsbereiter Catan-Regel-Experte."},
|
97 |
+
{"role": "user", "content": prompt}
|
98 |
+
]
|
99 |
+
)
|
100 |
+
return res.choices[0].message.content.strip()
|
101 |
+
|
102 |
+
# === Hauptfunktion für Gradio
|
103 |
+
def run_qa_pipeline(query, k=5):
|
104 |
+
try:
|
105 |
+
retrieved = retrieve(query, k)
|
106 |
+
if not retrieved:
|
107 |
+
return "⚠️ Keine relevanten Textstellen gefunden."
|
108 |
+
prompt = build_prompt(query, retrieved)
|
109 |
+
print("📨 Prompt gesendet...")
|
110 |
+
|
111 |
+
if openai_client:
|
112 |
+
answer = ask_openai(prompt)
|
113 |
+
elif groq_client:
|
114 |
+
answer = ask_groq(prompt)
|
115 |
+
else:
|
116 |
+
return "⚠️ Kein LLM API-Key vorhanden. Bitte OPENAI_API_KEY oder GROQ_API_KEY hinterlegen."
|
117 |
+
|
118 |
+
return f"📌 Frage: {query}\n\n📖 Antwort:\n{answer}"
|
119 |
+
|
120 |
+
except Exception as e:
|
121 |
+
return f"❌ Fehler beim Verarbeiten der Anfrage:\n{str(e)}"def build_prompt(query, texts):
|
122 |
+
context = "\n\n".join(texts)
|
123 |
+
return f"""Beantworte die folgende Frage basierend auf dem Kontext.
|
124 |
+
|
125 |
+
Kontext:
|
126 |
+
{context}
|
127 |
+
|
128 |
+
Frage:
|
129 |
+
{query}
|
130 |
+
"""
|
131 |
+
|
132 |
# === Anfrage an OpenAI
|
133 |
def ask_openai(prompt):
|
134 |
if not openai_client:
|