Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,16 @@
|
|
1 |
import streamlit as st
|
2 |
from groq import Groq
|
3 |
import requests
|
4 |
-
import html
|
5 |
|
6 |
-
#
|
7 |
-
# Page config
|
8 |
-
# -----------------------------
|
9 |
-
st.set_page_config(page_title="CodeCraft AI", layout="wide")
|
10 |
-
|
11 |
-
# -----------------------------
|
12 |
# API Clients Initialization
|
13 |
-
#
|
14 |
groq_client = None
|
15 |
groq_status = "❌"
|
16 |
if "GROQ_API_KEY" in st.secrets:
|
17 |
try:
|
18 |
groq_client = Groq(api_key=st.secrets["GROQ_API_KEY"])
|
|
|
19 |
groq_client.models.list()
|
20 |
groq_status = "✅"
|
21 |
except Exception:
|
@@ -27,323 +22,187 @@ hf_status = "❌"
|
|
27 |
if huggingface_api_key:
|
28 |
try:
|
29 |
headers = {"Authorization": f"Bearer {huggingface_api_key}"}
|
30 |
-
response = requests.get(
|
31 |
-
|
32 |
-
headers=headers,
|
33 |
-
timeout=10,
|
34 |
-
)
|
35 |
-
if response.status_code in [200, 401, 403]:
|
36 |
hf_status = "✅"
|
37 |
except Exception:
|
38 |
hf_status = "❌"
|
39 |
|
40 |
-
# -----------------------------
|
41 |
-
# CSS layout & styling
|
42 |
-
# -----------------------------
|
43 |
-
SIDEBAR_WIDTH_PX = 300
|
44 |
-
st.markdown(
|
45 |
-
f"""
|
46 |
-
<style>
|
47 |
-
/* Sidebar */
|
48 |
-
[data-testid="stSidebar"] {{
|
49 |
-
width: {SIDEBAR_WIDTH_PX}px;
|
50 |
-
min-width: {SIDEBAR_WIDTH_PX}px;
|
51 |
-
max-width: {SIDEBAR_WIDTH_PX}px;
|
52 |
-
position: fixed;
|
53 |
-
top: 0;
|
54 |
-
bottom: 0;
|
55 |
-
overflow-y: auto;
|
56 |
-
padding: 20px 12px !important;
|
57 |
-
background: #f7f9fc;
|
58 |
-
border-right: 1px solid #e0e0e0;
|
59 |
-
}}
|
60 |
-
|
61 |
-
/* Main content */
|
62 |
-
.block-container {{
|
63 |
-
margin-left: {SIDEBAR_WIDTH_PX + 20}px;
|
64 |
-
padding-top: 16px;
|
65 |
-
}}
|
66 |
-
|
67 |
-
/* Chat panel full height */
|
68 |
-
.chat-panel {{
|
69 |
-
height: calc(100vh - 170px);
|
70 |
-
overflow-y: auto;
|
71 |
-
padding: 12px 16px;
|
72 |
-
background: transparent;
|
73 |
-
}}
|
74 |
-
|
75 |
-
/* Message bubbles */
|
76 |
-
.msg-row {{ display:flex; margin:8px 0; }}
|
77 |
-
.msg-user {{ justify-content:flex-end; }}
|
78 |
-
.msg-assistant {{ justify-content:flex-start; }}
|
79 |
-
.bubble {{
|
80 |
-
max-width: 75%;
|
81 |
-
padding:10px 14px;
|
82 |
-
border-radius:14px;
|
83 |
-
white-space:pre-wrap;
|
84 |
-
word-wrap:break-word;
|
85 |
-
font-size:15px;
|
86 |
-
line-height:1.5;
|
87 |
-
box-shadow: 0 1px 3px rgba(0,0,0,0.1);
|
88 |
-
}}
|
89 |
-
.bubble-user {{
|
90 |
-
background:#0b1020;
|
91 |
-
color:#fff;
|
92 |
-
border-radius:14px 14px 6px 14px;
|
93 |
-
}}
|
94 |
-
.bubble-assistant {{
|
95 |
-
background:#eef0f2;
|
96 |
-
color:#111;
|
97 |
-
border-radius:14px 14px 14px 6px;
|
98 |
-
}}
|
99 |
-
|
100 |
-
/* Code block */
|
101 |
-
.code-block {{
|
102 |
-
background:#0b0b0b;
|
103 |
-
color:#e6e6e6;
|
104 |
-
padding:10px;
|
105 |
-
border-radius:6px;
|
106 |
-
overflow:auto;
|
107 |
-
font-family:monospace;
|
108 |
-
}}
|
109 |
-
|
110 |
-
/* Fixed input */
|
111 |
-
div[data-testid="stChatInput"], .stChatInput {{
|
112 |
-
position: fixed !important;
|
113 |
-
bottom: 36px !important;
|
114 |
-
left: {SIDEBAR_WIDTH_PX + 24}px !important;
|
115 |
-
right: 24px !important;
|
116 |
-
z-index: 99999;
|
117 |
-
background: #fff !important;
|
118 |
-
border-radius: 12px;
|
119 |
-
box-shadow: 0 2px 6px rgba(0,0,0,0.08);
|
120 |
-
padding: 6px 12px !important;
|
121 |
-
}}
|
122 |
-
|
123 |
-
/* Footer */
|
124 |
-
.cc-footer {{
|
125 |
-
position: fixed;
|
126 |
-
bottom: 0;
|
127 |
-
left: {SIDEBAR_WIDTH_PX + 24}px;
|
128 |
-
right: 24px;
|
129 |
-
text-align: center;
|
130 |
-
font-size: 13px;
|
131 |
-
color: gray;
|
132 |
-
padding: 6px 0;
|
133 |
-
background: #fafafa;
|
134 |
-
border-top: 1px solid #e0e0e0;
|
135 |
-
z-index: 99998;
|
136 |
-
}}
|
137 |
-
|
138 |
-
footer {{ visibility: hidden; }}
|
139 |
-
</style>
|
140 |
-
""",
|
141 |
-
unsafe_allow_html=True,
|
142 |
-
)
|
143 |
|
144 |
-
#
|
145 |
-
# Sidebar
|
146 |
-
#
|
147 |
st.sidebar.title("⚙️ Settings")
|
148 |
st.sidebar.markdown(f"**Groq API Status:** {groq_status}")
|
149 |
st.sidebar.markdown(f"**HuggingFace API Status:** {hf_status}")
|
150 |
|
151 |
api_priority = st.sidebar.radio(
|
152 |
-
"Choose API Priority",
|
|
|
|
|
153 |
)
|
|
|
154 |
model_choice = st.sidebar.selectbox(
|
155 |
"Choose Model",
|
156 |
-
["llama-3.1-8b-instant", "llama-3.1-70b-versatile", "mixtral-8x7b-32768"]
|
157 |
)
|
158 |
|
159 |
-
st.
|
160 |
-
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.4, 0.05)
|
161 |
-
max_tokens = st.sidebar.slider("Max Tokens", 100, 2000, 500, 50)
|
162 |
-
top_p = st.sidebar.slider("Top P", 0.1, 1.0, 0.9, 0.05)
|
163 |
-
|
164 |
-
st.sidebar.markdown("---")
|
165 |
-
st.sidebar.markdown("### 💬 Chats")
|
166 |
-
|
167 |
-
if "chats" not in st.session_state:
|
168 |
-
st.session_state.chats = {"Chat 1": {"generate": [], "debug": [], "explain": []}}
|
169 |
-
if "active_chat" not in st.session_state:
|
170 |
-
st.session_state.active_chat = "Chat 1"
|
171 |
-
if "menu_open" not in st.session_state:
|
172 |
-
st.session_state.menu_open = {}
|
173 |
-
|
174 |
-
if st.sidebar.button("➕ New Chat"):
|
175 |
-
idx = len(st.session_state.chats) + 1
|
176 |
-
new_name = f"Chat {idx}"
|
177 |
-
st.session_state.chats[new_name] = {"generate": [], "debug": [], "explain": []}
|
178 |
-
st.session_state.active_chat = new_name
|
179 |
-
st.rerun()
|
180 |
-
|
181 |
-
for chat_name in list(st.session_state.chats.keys()):
|
182 |
-
c1, c2 = st.sidebar.columns([7, 1])
|
183 |
-
if c1.button(chat_name, key=f"select_{chat_name}"):
|
184 |
-
st.session_state.active_chat = chat_name
|
185 |
-
st.rerun()
|
186 |
-
if c2.button("⋮", key=f"menu_{chat_name}"):
|
187 |
-
st.session_state.menu_open[chat_name] = not st.session_state.menu_open.get(
|
188 |
-
chat_name, False
|
189 |
-
)
|
190 |
-
st.rerun()
|
191 |
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
st.session_state.menu_open[new_label] = False
|
208 |
-
st.rerun()
|
209 |
-
elif action == "Delete":
|
210 |
-
if st.sidebar.button("Confirm delete", key=f"del_{chat_name}"):
|
211 |
-
st.session_state.chats.pop(chat_name, None)
|
212 |
-
if st.session_state.active_chat == chat_name:
|
213 |
-
st.session_state.active_chat = next(iter(st.session_state.chats), None)
|
214 |
-
st.rerun()
|
215 |
-
|
216 |
-
active_chat = st.session_state.active_chat
|
217 |
-
|
218 |
-
# -----------------------------
|
219 |
-
# API helpers
|
220 |
-
# -----------------------------
|
221 |
def call_groq(system_prompt, chat_history):
|
|
|
222 |
if not groq_client:
|
223 |
return None
|
224 |
try:
|
225 |
response = groq_client.chat.completions.create(
|
226 |
model=model_choice,
|
227 |
messages=[{"role": "system", "content": system_prompt}]
|
228 |
-
|
229 |
-
temperature=
|
230 |
-
max_tokens=max_tokens,
|
231 |
-
top_p=top_p,
|
232 |
)
|
233 |
return response.choices[0].message.content
|
234 |
-
except Exception:
|
|
|
235 |
return None
|
236 |
|
237 |
|
238 |
def call_huggingface(system_prompt, chat_history):
|
|
|
239 |
if not huggingface_api_key:
|
240 |
return None
|
241 |
try:
|
242 |
headers = {"Authorization": f"Bearer {huggingface_api_key}"}
|
243 |
-
prompt = system_prompt + "\n\n" + "\n".join([f"{r}: {m}" for r, m in chat_history])
|
244 |
payload = {
|
245 |
-
"inputs":
|
246 |
-
"parameters": {"temperature":
|
247 |
}
|
248 |
response = requests.post(
|
249 |
f"https://api-inference.huggingface.co/models/{model_choice}",
|
250 |
headers=headers,
|
251 |
-
json=payload
|
252 |
-
timeout=60,
|
253 |
)
|
254 |
if response.status_code == 200:
|
255 |
data = response.json()
|
256 |
-
if isinstance(data, list) and
|
257 |
return data[0]["generated_text"]
|
258 |
-
if isinstance(data, dict) and "generated_text" in data:
|
259 |
-
return data["generated_text"]
|
260 |
return str(data)
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
263 |
return None
|
264 |
|
265 |
|
266 |
def get_ai_response(system_prompt, chat_history):
|
267 |
-
|
268 |
if api_priority == "Groq First":
|
269 |
-
ai_msg = call_groq(system_prompt, chat_history)
|
270 |
-
|
271 |
-
|
272 |
-
else:
|
273 |
-
ai_msg = call_huggingface(system_prompt, chat_history)
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
with st.spinner("🤔 Thinking..."):
|
336 |
-
ai = get_ai_response(prompt_map[key], messages)
|
337 |
-
messages.append(("assistant", ai))
|
338 |
-
st.session_state.chats[active_chat][key] = messages
|
339 |
-
st.rerun()
|
340 |
-
|
341 |
-
# -----------------------------
|
342 |
# Footer
|
343 |
-
#
|
344 |
-
st.markdown(
|
345 |
-
|
346 |
-
<div class="cc-footer">⚠️ CodeCraft AI may produce mistakes. Always verify important code.</div>
|
347 |
-
""",
|
348 |
-
unsafe_allow_html=True,
|
349 |
-
)
|
|
|
1 |
import streamlit as st
|
2 |
from groq import Groq
|
3 |
import requests
|
|
|
4 |
|
5 |
+
# =======================
|
|
|
|
|
|
|
|
|
|
|
6 |
# API Clients Initialization
|
7 |
+
# =======================
|
8 |
groq_client = None
|
9 |
groq_status = "❌"
|
10 |
if "GROQ_API_KEY" in st.secrets:
|
11 |
try:
|
12 |
groq_client = Groq(api_key=st.secrets["GROQ_API_KEY"])
|
13 |
+
# simple test request
|
14 |
groq_client.models.list()
|
15 |
groq_status = "✅"
|
16 |
except Exception:
|
|
|
22 |
if huggingface_api_key:
|
23 |
try:
|
24 |
headers = {"Authorization": f"Bearer {huggingface_api_key}"}
|
25 |
+
response = requests.get("https://api-inference.huggingface.co/status", headers=headers)
|
26 |
+
if response.status_code in [200, 401, 403]: # API exists
|
|
|
|
|
|
|
|
|
27 |
hf_status = "✅"
|
28 |
except Exception:
|
29 |
hf_status = "❌"
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
# =======================
|
33 |
+
# Sidebar Settings
|
34 |
+
# =======================
|
35 |
st.sidebar.title("⚙️ Settings")
|
36 |
st.sidebar.markdown(f"**Groq API Status:** {groq_status}")
|
37 |
st.sidebar.markdown(f"**HuggingFace API Status:** {hf_status}")
|
38 |
|
39 |
api_priority = st.sidebar.radio(
|
40 |
+
"Choose API Priority",
|
41 |
+
["Groq First", "HuggingFace First"],
|
42 |
+
index=0
|
43 |
)
|
44 |
+
|
45 |
model_choice = st.sidebar.selectbox(
|
46 |
"Choose Model",
|
47 |
+
["llama-3.1-8b-instant", "llama-3.1-70b-versatile", "mixtral-8x7b-32768"]
|
48 |
)
|
49 |
|
50 |
+
st.title("🤖 CodeCraft AI - Mini Copilot (Chat Edition)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
+
|
53 |
+
# =======================
|
54 |
+
# Session State
|
55 |
+
# =======================
|
56 |
+
if "generate_chat" not in st.session_state:
|
57 |
+
st.session_state.generate_chat = []
|
58 |
+
if "debug_chat" not in st.session_state:
|
59 |
+
st.session_state.debug_chat = []
|
60 |
+
if "explain_chat" not in st.session_state:
|
61 |
+
st.session_state.explain_chat = []
|
62 |
+
|
63 |
+
|
64 |
+
# =======================
|
65 |
+
# Helper Functions
|
66 |
+
# =======================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
def call_groq(system_prompt, chat_history):
|
68 |
+
"""Call Groq API with error handling"""
|
69 |
if not groq_client:
|
70 |
return None
|
71 |
try:
|
72 |
response = groq_client.chat.completions.create(
|
73 |
model=model_choice,
|
74 |
messages=[{"role": "system", "content": system_prompt}]
|
75 |
+
+ [{"role": role, "content": msg} for role, msg in chat_history],
|
76 |
+
temperature=0.4
|
|
|
|
|
77 |
)
|
78 |
return response.choices[0].message.content
|
79 |
+
except Exception as e:
|
80 |
+
st.warning(f"⚠️ Groq API error: {e}")
|
81 |
return None
|
82 |
|
83 |
|
84 |
def call_huggingface(system_prompt, chat_history):
|
85 |
+
"""Call Hugging Face Inference API with error handling"""
|
86 |
if not huggingface_api_key:
|
87 |
return None
|
88 |
try:
|
89 |
headers = {"Authorization": f"Bearer {huggingface_api_key}"}
|
|
|
90 |
payload = {
|
91 |
+
"inputs": "\n".join([msg for _, msg in chat_history]),
|
92 |
+
"parameters": {"temperature": 0.5, "max_new_tokens": 500}
|
93 |
}
|
94 |
response = requests.post(
|
95 |
f"https://api-inference.huggingface.co/models/{model_choice}",
|
96 |
headers=headers,
|
97 |
+
json=payload
|
|
|
98 |
)
|
99 |
if response.status_code == 200:
|
100 |
data = response.json()
|
101 |
+
if isinstance(data, list) and "generated_text" in data[0]:
|
102 |
return data[0]["generated_text"]
|
|
|
|
|
103 |
return str(data)
|
104 |
+
else:
|
105 |
+
st.warning(f"⚠️ Hugging Face error: {response.text}")
|
106 |
+
return None
|
107 |
+
except Exception as e:
|
108 |
+
st.warning(f"⚠️ Hugging Face exception: {e}")
|
109 |
return None
|
110 |
|
111 |
|
112 |
def get_ai_response(system_prompt, chat_history):
|
113 |
+
"""Get AI response using priority + fallback"""
|
114 |
if api_priority == "Groq First":
|
115 |
+
ai_msg = call_groq(system_prompt, chat_history)
|
116 |
+
if ai_msg is None:
|
117 |
+
ai_msg = call_huggingface(system_prompt, chat_history)
|
118 |
+
else: # HuggingFace First
|
119 |
+
ai_msg = call_huggingface(system_prompt, chat_history)
|
120 |
+
if ai_msg is None:
|
121 |
+
ai_msg = call_groq(system_prompt, chat_history)
|
122 |
+
|
123 |
+
if ai_msg is None:
|
124 |
+
return "❌ Both APIs failed. Please check your API keys or try again later."
|
125 |
+
return ai_msg
|
126 |
+
|
127 |
+
|
128 |
+
# =======================
|
129 |
+
# Chat UI Function
|
130 |
+
# =======================
|
131 |
+
def chat_ui(tab_name, chat_history, system_prompt, input_key):
|
132 |
+
"""Reusable chat UI for each tab with fixed bottom input bar"""
|
133 |
+
|
134 |
+
st.subheader(tab_name)
|
135 |
+
|
136 |
+
# --- Chat history display ---
|
137 |
+
chat_container = st.container()
|
138 |
+
with chat_container:
|
139 |
+
for role, msg in chat_history:
|
140 |
+
if role == "user":
|
141 |
+
with st.chat_message("user"):
|
142 |
+
st.write(msg)
|
143 |
+
else:
|
144 |
+
with st.chat_message("assistant"):
|
145 |
+
if "```" in msg: # detect code blocks
|
146 |
+
parts = msg.split("```")
|
147 |
+
for i, part in enumerate(parts):
|
148 |
+
if i % 2 == 1: # inside code block
|
149 |
+
lang, *code_lines = part.split("\n")
|
150 |
+
code = "\n".join(code_lines)
|
151 |
+
st.code(code, language=lang if lang else "python")
|
152 |
+
else:
|
153 |
+
st.write(part)
|
154 |
+
else:
|
155 |
+
st.write(msg)
|
156 |
+
|
157 |
+
# --- Chat input ---
|
158 |
+
user_input = st.chat_input("Type your message...", key=input_key)
|
159 |
+
|
160 |
+
if user_input:
|
161 |
+
# Save user input
|
162 |
+
st.session_state[input_key + "_last"] = user_input
|
163 |
+
chat_history.append(("user", user_input))
|
164 |
+
|
165 |
+
# --- Get AI reply with fallback ---
|
166 |
+
with st.spinner("🤔 Thinking..."):
|
167 |
+
ai_msg = get_ai_response(system_prompt, chat_history)
|
168 |
+
|
169 |
+
chat_history.append(("assistant", ai_msg))
|
170 |
|
171 |
+
st.rerun()
|
172 |
+
|
173 |
+
|
174 |
+
# =======================
|
175 |
+
# Tabs
|
176 |
+
# =======================
|
177 |
+
tab1, tab2, tab3 = st.tabs(["💡 Generate Code", "🛠 Debug Code", "📘 Explain Code"])
|
178 |
+
|
179 |
+
with tab1:
|
180 |
+
chat_ui(
|
181 |
+
"💡 Generate Code",
|
182 |
+
st.session_state.generate_chat,
|
183 |
+
"You are a helpful coding assistant. Generate correct code first, then a short simple explanation.",
|
184 |
+
input_key="generate_input"
|
185 |
+
)
|
186 |
+
|
187 |
+
with tab2:
|
188 |
+
chat_ui(
|
189 |
+
"🛠 Debug Code",
|
190 |
+
st.session_state.debug_chat,
|
191 |
+
"You are an expert code debugger. Fix errors and give corrected code, then explain what changed and why in simple terms.",
|
192 |
+
input_key="debug_input"
|
193 |
+
)
|
194 |
+
|
195 |
+
with tab3:
|
196 |
+
chat_ui(
|
197 |
+
"📘 Explain Code",
|
198 |
+
st.session_state.explain_chat,
|
199 |
+
"You are a teacher that explains code in simple words. The user pastes code, and you explain step by step.",
|
200 |
+
input_key="explain_input"
|
201 |
+
)
|
202 |
+
|
203 |
+
|
204 |
+
# =======================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
# Footer
|
206 |
+
# =======================
|
207 |
+
st.markdown("---")
|
208 |
+
st.caption("✨ CodeCraft can make mistakes. Check important info.")
|
|
|
|
|
|
|
|