Engr-Usman-Ali commited on
Commit
e6f643b
Β·
verified Β·
1 Parent(s): d28907c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +166 -0
app.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import time
4
+ import pandas as pd
5
+ from dotenv import load_dotenv
6
+ import os
7
+ from groq import Groq
8
+
9
+ # Load env variables
10
+ load_dotenv()
11
+ HF_API_KEY = st.secrets.get("HUGGINGFACE_API_KEY") or os.getenv("HUGGINGFACE_API_KEY")
12
+ GROQ_API_KEY = st.secrets.get("GROQ_API_KEY") or os.getenv("GROQ_API_KEY")
13
+
14
+ # Initialize Groq client if key exists
15
+ groq_client = Groq(api_key=GROQ_API_KEY) if GROQ_API_KEY else None
16
+
17
+ # Available models
18
+ MODELS = {
19
+ "Python/JS Code Gen": {
20
+ "hf": "bigcode/starcoder2-3b",
21
+ "groq": "llama3-8b-8192"
22
+ },
23
+ "Debugging": {
24
+ "hf": "bigcode/starcoder2-3b",
25
+ "groq": "llama3-8b-8192"
26
+ },
27
+ "Explanation": {
28
+ "hf": "mistralai/Mistral-7B-Instruct-v0.2",
29
+ "groq": "llama3-70b-8192"
30
+ }
31
+ }
32
+
33
+ # Hugging Face API
34
+ def query_hf(model_id, prompt, max_new_tokens=300):
35
+ url = f"https://api-inference.huggingface.co/models/{model_id}"
36
+ headers = {"Authorization": f"Bearer {HF_API_KEY}"}
37
+ payload = {"inputs": prompt, "parameters": {"max_new_tokens": max_new_tokens}}
38
+
39
+ t0 = time.time()
40
+ try:
41
+ response = requests.post(url, headers=headers, json=payload, timeout=60)
42
+ latency = time.time() - t0
43
+ output = response.json()
44
+
45
+ if isinstance(output, list) and "generated_text" in output[0]:
46
+ return output[0]["generated_text"], latency, True
47
+ elif isinstance(output, dict) and "error" in output:
48
+ return f"⚠️ HF Error: {output['error']}", latency, False
49
+ else:
50
+ return str(output), latency, False
51
+ except Exception as e:
52
+ latency = time.time() - t0
53
+ return f"⚠️ HF Exception: {e}", latency, False
54
+
55
+ # Groq API
56
+ def query_groq(model_id, prompt, max_tokens=300):
57
+ if not groq_client:
58
+ return "⚠️ Groq API key not set.", 0, False
59
+ t0 = time.time()
60
+ try:
61
+ response = groq_client.chat.completions.create(
62
+ model=model_id,
63
+ messages=[{"role": "user", "content": prompt}],
64
+ max_tokens=max_tokens
65
+ )
66
+ latency = time.time() - t0
67
+ return response.choices[0].message.content, latency, True
68
+ except Exception as e:
69
+ latency = time.time() - t0
70
+ return f"⚠️ Groq Exception: {e}", latency, False
71
+
72
+ # Prompt builder
73
+ def build_prompt(mode, user_input, language="Python"):
74
+ if mode == "Python/JS Code Gen":
75
+ return f"You are a helpful programmer. Write {language} code for:\n{user_input}\nReturn only code."
76
+ elif mode == "Debugging":
77
+ return f"Debug this code and explain briefly:\n{user_input}"
78
+ elif mode == "Explanation":
79
+ return f"Explain this code step by step:\n{user_input}"
80
+ return user_input
81
+
82
+ # Query with fallback
83
+ def run_query(backend, mode, user_input, lang="Python"):
84
+ model_id = MODELS[mode][backend]
85
+ prompt = build_prompt(mode, user_input, lang)
86
+
87
+ if backend == "hf":
88
+ output, latency, success = query_hf(model_id, prompt)
89
+ if success:
90
+ return output, latency, "Hugging Face βœ…"
91
+ else:
92
+ # fallback to Groq
93
+ output2, latency2, success2 = query_groq(MODELS[mode]["groq"], prompt)
94
+ return output2, latency + latency2, "Hugging Face ❌ β†’ Groq βœ…"
95
+ elif backend == "groq":
96
+ output, latency, success = query_groq(model_id, prompt)
97
+ if success:
98
+ return output, latency, "Groq βœ…"
99
+ else:
100
+ # fallback to HF
101
+ output2, latency2, success2 = query_hf(MODELS[mode]["hf"], prompt)
102
+ return output2, latency + latency2, "Groq ❌ β†’ Hugging Face βœ…"
103
+ return "⚠️ Invalid backend", 0, "None"
104
+
105
+ # Streamlit UI
106
+ st.set_page_config(page_title="CodeCraft AI", layout="wide")
107
+ st.title("πŸ§‘β€πŸ’» CodeCraft AI (with Fallback)")
108
+ st.write("Generate, debug, and explain code using Hugging Face & Groq. If one fails, it auto-falls back!")
109
+
110
+ backend = st.radio("Choose Backend", ["hf", "groq"], format_func=lambda x: "Hugging Face" if x == "hf" else "Groq")
111
+
112
+ tab1, tab2, tab3, tab4 = st.tabs(["Generate", "Debug", "Explain", "Analytics"])
113
+
114
+ # Track logs
115
+ if "logs" not in st.session_state:
116
+ st.session_state.logs = []
117
+
118
+ # Tab 1: Generate
119
+ with tab1:
120
+ st.subheader("Code Generation")
121
+ lang = st.selectbox("Choose language", ["Python", "JavaScript"])
122
+ problem = st.text_area("Enter your problem statement")
123
+ if st.button("Generate Code", key="gen_btn"):
124
+ if problem.strip():
125
+ output, latency, status = run_query(backend, "Python/JS Code Gen", problem, lang)
126
+ st.code(output, language=lang.lower())
127
+ st.success(f"{status} | Time: {latency:.2f}s")
128
+ st.session_state.logs.append(("Generate", latency, status))
129
+ else:
130
+ st.warning("Please enter a problem.")
131
+
132
+ # Tab 2: Debug
133
+ with tab2:
134
+ st.subheader("Debug Code")
135
+ buggy_code = st.text_area("Paste buggy code here")
136
+ if st.button("Debug Code", key="debug_btn"):
137
+ if buggy_code.strip():
138
+ output, latency, status = run_query(backend, "Debugging", buggy_code)
139
+ st.text_area("AI Fix & Explanation", output, height=300)
140
+ st.success(f"{status} | Time: {latency:.2f}s")
141
+ st.session_state.logs.append(("Debug", latency, status))
142
+ else:
143
+ st.warning("Please paste code.")
144
+
145
+ # Tab 3: Explain
146
+ with tab3:
147
+ st.subheader("Explain Code")
148
+ code_input = st.text_area("Paste code to explain")
149
+ if st.button("Explain Code", key="explain_btn"):
150
+ if code_input.strip():
151
+ output, latency, status = run_query(backend, "Explanation", code_input)
152
+ st.text_area("AI Explanation", output, height=300)
153
+ st.success(f"{status} | Time: {latency:.2f}s")
154
+ st.session_state.logs.append(("Explain", latency, status))
155
+ else:
156
+ st.warning("Please paste code.")
157
+
158
+ # Tab 4: Analytics
159
+ with tab4:
160
+ st.subheader("Usage Analytics")
161
+ if st.session_state.logs:
162
+ df = pd.DataFrame(st.session_state.logs, columns=["Mode", "Latency", "Status"])
163
+ st.write(df)
164
+ st.bar_chart(df.groupby("Mode")["Latency"].mean())
165
+ else:
166
+ st.info("No usage yet. Try generating, debugging, or explaining first!")