Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from flask import Flask, render_template, request, jsonify, send_file
|
2 |
-
import ollama
|
3 |
import json
|
4 |
import re
|
5 |
from duckduckgo_search import DDGS
|
@@ -10,6 +9,8 @@ import urllib3
|
|
10 |
import pandas as pd
|
11 |
import io
|
12 |
import ast
|
|
|
|
|
13 |
|
14 |
|
15 |
app = Flask(__name__)
|
@@ -99,18 +100,45 @@ Reference json:
|
|
99 |
}
|
100 |
"""
|
101 |
|
102 |
-
def
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
print(f"AI REPLY json:\n{ai_reply}")
|
115 |
|
116 |
# Process the response to ensure we return valid JSON
|
@@ -118,13 +146,14 @@ def ask_ollama(user_message, model='gemma3:1b', system_prompt=search_prompt):
|
|
118 |
# First, try to parse it directly in case it's already valid JSON
|
119 |
print(f"AI REPLY:\n{ai_reply}")
|
120 |
return ast.literal_eval(ai_reply.replace('json\n', '').replace('```', ''))
|
121 |
-
except:
|
122 |
print(f"ERROR:\n{e}")
|
123 |
# If it's not valid JSON, try to extract JSON from the text
|
124 |
return {
|
125 |
"1": "Error parsing response. Please try again.",
|
126 |
"2": "Error parsing response. Please try again."
|
127 |
}
|
|
|
128 |
def search_web(topic, max_references=5, data_type="pdf"):
|
129 |
"""Search the web using DuckDuckGo and return results."""
|
130 |
doc_list = []
|
@@ -538,19 +567,7 @@ Example: ["Insight 1", "Insight 3", "Insight 5"]"""
|
|
538 |
ALL AVAILABLE INSIGHTS:
|
539 |
{json.dumps(insights)}"""
|
540 |
|
541 |
-
|
542 |
-
response = ollama.chat(model='gemma3:1b', messages=[
|
543 |
-
{
|
544 |
-
"role": "system",
|
545 |
-
"content": ai_select_prompt
|
546 |
-
},
|
547 |
-
{
|
548 |
-
"role": "user",
|
549 |
-
"content": message
|
550 |
-
}
|
551 |
-
])
|
552 |
-
|
553 |
-
ai_reply = response['message']['content']
|
554 |
print(f"AI SELECTION RESPONSE: {ai_reply}")
|
555 |
|
556 |
# Parse the JSON array from the response
|
@@ -606,4 +623,4 @@ def generate_key_issues():
|
|
606 |
return jsonify({'error': str(e), 'key_issues': []})
|
607 |
|
608 |
if __name__ == '__main__':
|
609 |
-
app.run(
|
|
|
1 |
from flask import Flask, render_template, request, jsonify, send_file
|
|
|
2 |
import json
|
3 |
import re
|
4 |
from duckduckgo_search import DDGS
|
|
|
9 |
import pandas as pd
|
10 |
import io
|
11 |
import ast
|
12 |
+
from groq import Groq
|
13 |
+
import os
|
14 |
|
15 |
|
16 |
app = Flask(__name__)
|
|
|
100 |
}
|
101 |
"""
|
102 |
|
103 |
+
def ask_llm(user_message, model='llama-3.3-70b-versatile', system_prompt="You are a helpful assistant."):
|
104 |
+
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
105 |
+
|
106 |
+
response = client.chat.completions.create(
|
107 |
+
model=model,
|
108 |
+
messages=[
|
109 |
+
{
|
110 |
+
"role": "system",
|
111 |
+
"content": system_prompt
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"role": "user",
|
115 |
+
"content": user_message
|
116 |
+
}
|
117 |
+
],
|
118 |
+
stream=False,
|
119 |
+
)
|
120 |
+
|
121 |
+
return response.choices[0].message.content
|
122 |
+
|
123 |
+
def ask_ollama(user_message, model='llama-3.3-70b-versatile', system_prompt=search_prompt):
|
124 |
+
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
125 |
+
|
126 |
+
response = client.chat.completions.create(
|
127 |
+
model=model,
|
128 |
+
messages=[
|
129 |
+
{
|
130 |
+
"role": "system",
|
131 |
+
"content": system_prompt
|
132 |
+
},
|
133 |
+
{
|
134 |
+
"role": "user",
|
135 |
+
"content": user_message
|
136 |
+
}
|
137 |
+
],
|
138 |
+
stream=False,
|
139 |
+
)
|
140 |
+
|
141 |
+
ai_reply = response.choices[0].message.content
|
142 |
print(f"AI REPLY json:\n{ai_reply}")
|
143 |
|
144 |
# Process the response to ensure we return valid JSON
|
|
|
146 |
# First, try to parse it directly in case it's already valid JSON
|
147 |
print(f"AI REPLY:\n{ai_reply}")
|
148 |
return ast.literal_eval(ai_reply.replace('json\n', '').replace('```', ''))
|
149 |
+
except Exception as e:
|
150 |
print(f"ERROR:\n{e}")
|
151 |
# If it's not valid JSON, try to extract JSON from the text
|
152 |
return {
|
153 |
"1": "Error parsing response. Please try again.",
|
154 |
"2": "Error parsing response. Please try again."
|
155 |
}
|
156 |
+
|
157 |
def search_web(topic, max_references=5, data_type="pdf"):
|
158 |
"""Search the web using DuckDuckGo and return results."""
|
159 |
doc_list = []
|
|
|
567 |
ALL AVAILABLE INSIGHTS:
|
568 |
{json.dumps(insights)}"""
|
569 |
|
570 |
+
ai_reply = ask_llm(message, system_prompt=ai_select_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
571 |
print(f"AI SELECTION RESPONSE: {ai_reply}")
|
572 |
|
573 |
# Parse the JSON array from the response
|
|
|
623 |
return jsonify({'error': str(e), 'key_issues': []})
|
624 |
|
625 |
if __name__ == '__main__':
|
626 |
+
app.run(host="0.0.0.0", port=7860)
|