amacruz commited on
Commit
8381904
·
verified ·
1 Parent(s): c371402

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +248 -2
app.py CHANGED
@@ -941,8 +941,13 @@ class TinkerIQApp:
941
  return history, ""
942
 
943
  def handle_text_conversation(self, message: str) -> str:
944
- """Handle text-only conversations"""
945
- message_lower = message.lower()
 
 
 
 
 
946
 
947
  # Check for code generation requests
948
  if any(word in message_lower for word in ["code", "program", "sketch", "arduino", "esp32"]):
@@ -1097,6 +1102,247 @@ What would you like to work on today?
1097
  print(error_msg)
1098
  return error_msg
1099
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100
  def build_interface(tinkeriq_app: TinkerIQApp):
1101
  """Build the Gradio interface"""
1102
 
 
941
  return history, ""
942
 
943
  def handle_text_conversation(self, message: str) -> str:
944
+ """Handle text-only conversations using AI intelligence"""
945
+ try:
946
+ # Use AI to understand and respond to the message
947
+ return self._generate_ai_response(message)
948
+ except Exception as e:
949
+ print(f"Error in AI response generation: {e}")
950
+ return self._fallback_response(message)
951
 
952
  # Check for code generation requests
953
  if any(word in message_lower for word in ["code", "program", "sketch", "arduino", "esp32"]):
 
1102
  print(error_msg)
1103
  return error_msg
1104
 
1105
+ def _generate_ai_response(self, message: str) -> str:
1106
+ """Generate intelligent responses using available AI providers"""
1107
+
1108
+ # Get current context
1109
+ context = self._build_conversation_context()
1110
+
1111
+ # Create a comprehensive prompt for the AI
1112
+ system_prompt = """You are TinkerIQ, an expert AI electronics assistant. You help with:
1113
+
1114
+ - Circuit analysis and design
1115
+ - Component selection and recommendations
1116
+ - Code generation for Arduino, ESP32, Raspberry Pi
1117
+ - Bill of Materials (BOM) creation
1118
+ - Electronics troubleshooting
1119
+ - Project guidance and tutorials
1120
+
1121
+ CURRENT CAPABILITIES:
1122
+ - Circuit Analysis: {circuit_available}
1123
+ - Code Generation: {code_available}
1124
+ - BOM Management: {bom_available}
1125
+ - Component Search: {component_available}
1126
+
1127
+ CONVERSATION CONTEXT:
1128
+ {context}
1129
+
1130
+ Respond naturally and helpfully to electronics questions. If the user asks about:
1131
+ - Component recommendations: Provide specific products, pricing, suppliers
1132
+ - Board comparisons: Give detailed technical comparisons
1133
+ - Project help: Offer practical guidance and suggestions
1134
+ - Code examples: Provide actual code snippets when appropriate
1135
+ - Circuit analysis: Direct to upload feature if asking about specific circuit
1136
+
1137
+ Be conversational, knowledgeable, and practical. Always aim to be maximally helpful.""".format(
1138
+ circuit_available="Available" if self.vision_analysis else "Limited (upload images for basic parsing)",
1139
+ code_available="Available" if self.code_generator else "Template-based",
1140
+ bom_available="Available" if self.bom_writer else "Basic functionality",
1141
+ component_available="Available" if self.component_search else "Limited",
1142
+ context=context
1143
+ )
1144
+
1145
+ user_prompt = f"User question: {message}"
1146
+
1147
+ # Try different AI providers in order of preference
1148
+ ai_providers = [
1149
+ ("openai", self._query_openai),
1150
+ ("anthropic", self._query_anthropic),
1151
+ ("sambanova", self._query_sambanova),
1152
+ ("huggingface", self._query_huggingface)
1153
+ ]
1154
+
1155
+ for provider_name, provider_func in ai_providers:
1156
+ if self.ai_providers.get(provider_name):
1157
+ try:
1158
+ response = provider_func(system_prompt, user_prompt)
1159
+ if response and len(response.strip()) > 20: # Valid response
1160
+ return response
1161
+ except Exception as e:
1162
+ print(f"AI provider {provider_name} failed: {e}")
1163
+ continue
1164
+
1165
+ # If all AI providers fail, use intelligent fallback
1166
+ return self._intelligent_fallback(message)
1167
+
1168
+ def _build_conversation_context(self) -> str:
1169
+ """Build context from current session state"""
1170
+ context_parts = []
1171
+
1172
+ if self.session_state.get("last_analysis"):
1173
+ analysis = self.session_state["last_analysis"]
1174
+ context_parts.append(f"Recent circuit analysis: {analysis.get('analysis', '')[:200]}...")
1175
+
1176
+ if self.session_state.get("current_bom"):
1177
+ bom = self.session_state["current_bom"]
1178
+ context_parts.append(f"Current BOM: {bom.get('component_count', 0)} components, ${bom.get('total_cost', 0):.2f}")
1179
+
1180
+ if self.session_state.get("last_code"):
1181
+ code_info = self.session_state["last_code"]
1182
+ context_parts.append(f"Recent code generation: {code_info.get('platform', 'unknown')} with features {code_info.get('features', [])}")
1183
+
1184
+ if not context_parts:
1185
+ context_parts.append("No current project context - fresh conversation")
1186
+
1187
+ return "\n".join(context_parts)
1188
+
1189
+ def _query_openai(self, system_prompt: str, user_prompt: str) -> str:
1190
+ """Query OpenAI API"""
1191
+ try:
1192
+ import openai
1193
+
1194
+ client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
1195
+
1196
+ response = client.chat.completions.create(
1197
+ model="gpt-3.5-turbo",
1198
+ messages=[
1199
+ {"role": "system", "content": system_prompt},
1200
+ {"role": "user", "content": user_prompt}
1201
+ ],
1202
+ max_tokens=500,
1203
+ temperature=0.7
1204
+ )
1205
+
1206
+ return response.choices[0].message.content.strip()
1207
+ except Exception as e:
1208
+ print(f"OpenAI query failed: {e}")
1209
+ return None
1210
+
1211
+ def _query_anthropic(self, system_prompt: str, user_prompt: str) -> str:
1212
+ """Query Anthropic Claude API"""
1213
+ try:
1214
+ import anthropic
1215
+
1216
+ client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
1217
+
1218
+ response = client.messages.create(
1219
+ model="claude-3-haiku-20240307",
1220
+ max_tokens=500,
1221
+ system=system_prompt,
1222
+ messages=[{"role": "user", "content": user_prompt}]
1223
+ )
1224
+
1225
+ return response.content[0].text.strip()
1226
+ except Exception as e:
1227
+ print(f"Anthropic query failed: {e}")
1228
+ return None
1229
+
1230
+ def _query_sambanova(self, system_prompt: str, user_prompt: str) -> str:
1231
+ """Query SambaNova API"""
1232
+ try:
1233
+ import requests
1234
+
1235
+ url = "https://api.sambanova.ai/v1/chat/completions"
1236
+ headers = {
1237
+ "Authorization": f"Bearer {os.getenv('SAMBANOVA_API_KEY')}",
1238
+ "Content-Type": "application/json"
1239
+ }
1240
+
1241
+ data = {
1242
+ "model": "Meta-Llama-3.1-8B-Instruct",
1243
+ "messages": [
1244
+ {"role": "system", "content": system_prompt},
1245
+ {"role": "user", "content": user_prompt}
1246
+ ],
1247
+ "max_tokens": 500,
1248
+ "temperature": 0.7
1249
+ }
1250
+
1251
+ response = requests.post(url, headers=headers, json=data, timeout=30)
1252
+ response.raise_for_status()
1253
+
1254
+ result = response.json()
1255
+ return result["choices"][0]["message"]["content"].strip()
1256
+ except Exception as e:
1257
+ print(f"SambaNova query failed: {e}")
1258
+ return None
1259
+
1260
+ def _query_huggingface(self, system_prompt: str, user_prompt: str) -> str:
1261
+ """Query Hugging Face API"""
1262
+ try:
1263
+ import requests
1264
+
1265
+ # Use a good conversational model
1266
+ api_url = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-large"
1267
+ headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"}
1268
+
1269
+ # Combine prompts for HF format
1270
+ full_prompt = f"{system_prompt}\n\nUser: {user_prompt}\nAssistant:"
1271
+
1272
+ response = requests.post(
1273
+ api_url,
1274
+ headers=headers,
1275
+ json={"inputs": full_prompt, "parameters": {"max_length": 500, "temperature": 0.7}},
1276
+ timeout=30
1277
+ )
1278
+ response.raise_for_status()
1279
+
1280
+ result = response.json()
1281
+ if isinstance(result, list) and len(result) > 0:
1282
+ generated_text = result[0].get("generated_text", "")
1283
+ # Extract just the assistant's response
1284
+ if "Assistant:" in generated_text:
1285
+ return generated_text.split("Assistant:")[-1].strip()
1286
+
1287
+ return None
1288
+ except Exception as e:
1289
+ print(f"HuggingFace query failed: {e}")
1290
+ return None
1291
+
1292
+ def _intelligent_fallback(self, message: str) -> str:
1293
+ """Intelligent fallback when AI providers are unavailable"""
1294
+ message_lower = message.lower()
1295
+
1296
+ # Use simple keyword matching for basic responses
1297
+ if any(word in message_lower for word in ["esp32", "arduino", "board"]) and any(word in message_lower for word in ["price", "cost", "under", "$"]):
1298
+ return """🔧 **Board Recommendations:**
1299
+
1300
+ For specific current pricing and availability, I'd recommend checking:
1301
+ - **Digi-Key** or **Mouser** for official boards with precise specs
1302
+ - **Amazon** for quick shipping of development boards
1303
+ - **AliExpress** for budget-friendly options
1304
+
1305
+ Popular options include ESP32 DevKit boards ($6-10) and Arduino Uno compatibles ($5-15).
1306
+
1307
+ Would you like help with component search functionality, or do you have specific requirements for your project?"""
1308
+
1309
+ elif any(word in message_lower for word in ["help", "what can you do", "capabilities"]):
1310
+ return f"""🤖 **TinkerIQ Capabilities:**
1311
+
1312
+ **Available Now:**
1313
+ ✅ Circuit component parsing and analysis
1314
+ ✅ Code template generation (Arduino, ESP32, Raspberry Pi)
1315
+ ✅ Basic BOM management
1316
+ ✅ Project guidance and recommendations
1317
+
1318
+ **AI Features:** {sum(self.ai_providers.values())}/5 providers configured
1319
+ **Modules:** {sum([bool(self.component_search), bool(self.vision_analysis), bool(self.code_generator), bool(self.bom_writer)])}/4 available
1320
+
1321
+ **How to get better responses:** Configure API keys for OpenAI, Anthropic, or SambaNova in your .env file for more intelligent conversations!
1322
+
1323
+ What specific electronics topic can I help you with?"""
1324
+
1325
+ else:
1326
+ return """🔧 **I'm here to help with electronics!**
1327
+
1328
+ I can assist with component selection, board recommendations, circuit analysis, code generation, and project guidance.
1329
+
1330
+ For the most intelligent responses, please configure AI provider API keys in your .env file.
1331
+
1332
+ What specific electronics question do you have? I'll do my best to help with the information available."""
1333
+
1334
+ def _fallback_response(self, message: str) -> str:
1335
+ """Simple fallback when everything else fails"""
1336
+ return """I apologize, but I'm having trouble generating a response right now.
1337
+
1338
+ This might help:
1339
+ - Check your internet connection
1340
+ - Verify API keys are configured in .env file
1341
+ - Try uploading a circuit image for analysis
1342
+ - Use the specific tabs for code generation or BOM management
1343
+
1344
+ What specific electronics topic can I help you with using the available tools?"""
1345
+
1346
  def build_interface(tinkeriq_app: TinkerIQApp):
1347
  """Build the Gradio interface"""
1348