jzou19950715 commited on
Commit
4294798
Β·
verified Β·
1 Parent(s): 7746a40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +566 -358
app.py CHANGED
@@ -30,365 +30,573 @@ Core Traits: #25
30
  - Maintains context throughout the conversation #30
31
  """ #31
32
  #32
33
- # Profile structure configuration #33
34
- PROFILE_STRUCTURE = { #34
35
- "profile_overview": { #35
36
- "required_fields": ["current_status", "career_summary"], #36
37
- "optional_fields": ["key_highlights", "skills_and_expertise"], #37
38
- "allow_custom_fields": True, #38
39
- "weight": 0.2 #39
40
- }, #40
41
- "education": { #41
42
- "required_fields": ["institution", "program"], #42
43
- "optional_fields": ["metrics", "experiences", "achievements"], #43
44
- "allow_custom_fields": True, #44
45
- "weight": 0.25, #45
46
- "subsections": ["current", "history", "highlights"] #46
47
- }, #47
48
- "professional": { #48
49
- "required_fields": ["role", "organization"], #49
50
- "optional_fields": ["metrics", "achievements", "impact"], #50
51
- "allow_custom_fields": True, #51
52
- "weight": 0.25, #52
53
- "subsections": ["current", "history", "highlights"] #53
54
- }, #54
55
- "achievements_and_impacts": { #55
56
- "required_fields": [], #56
57
- "optional_fields": ["recognition", "certifications", "contributions"], #57
58
- "allow_custom_fields": True, #58
59
- "weight": 0.3 #59
60
- } #60
61
- } #61
62
- #62
63
- @dataclass #63
64
- class ConversationState: #64
65
- """Track the state of the conversation and profile completion.""" #65
66
- sections_completed: List[str] = None #66
67
- sections_partial: List[str] = None #67
68
- current_section: Optional[str] = None #68
69
- completion_percentage: float = 0.0 #69
70
- last_error: Optional[str] = None #70
71
  #71
72
- def __post_init__(self): #72
73
- self.sections_completed = [] #73
74
- self.sections_partial = [] #74
75
- #75
76
- class ProfileValidator: #76
77
- """Validate and structure profile data.""" #77
78
- #78
79
- @staticmethod #79
80
- def validate_section(section_data: Dict[str, Any], section_config: Dict[str, Any]) -> Tuple[bool, List[str]]: #80
81
- """Validate a section against its configuration.""" #81
82
- errors = [] #82
83
- is_valid = True #83
84
- #84
85
- # Check required fields #85
86
- for field in section_config.get("required_fields", []): #86
87
- if field not in section_data or not section_data[field]: #87
88
- errors.append(f"Missing required field: {field}") #88
89
- is_valid = False #89
90
- #90
91
- return is_valid, errors #91
92
  #92
93
- @staticmethod #93
94
- def clean_section_data(data: Dict[str, Any], config: Dict[str, Any]) -> Dict[str, Any]: #94
95
- """Clean and validate section data.""" #95
96
- cleaned = {} #96
97
- #97
98
- # Handle required fields #98
99
- for field in config.get("required_fields", []): #99
100
- cleaned[field] = data.get(field) or "Not provided" #100
101
- #101
102
- # Handle optional fields #102
103
- for field in config.get("optional_fields", []): #103
104
- if field in data and data[field]: #104
105
- cleaned[field] = data[field] #105
106
- #106
107
- return cleaned #107
108
- #108
109
- class EducationCareerCollector: #109
110
- """Main collector class for handling career and education information.""" #110
111
- #111
112
- def __init__(self): #112
113
- self.conversation_history = [] #113
114
- self.client = None #114
115
- self.state = ConversationState() #115
116
- self.validator = ProfileValidator() #116
117
- #117
118
- def process_message(self, message: str, api_key: str) -> Dict[str, Any]: #118
119
- """Process a user message and return AI response.""" #119
120
- try: #120
121
- if not message.strip(): #121
122
- raise ValueError("Message cannot be empty") #122
123
- #123
124
- if not api_key.strip().startswith('sk-'): #124
125
- raise ValueError("Invalid API key format") #125
126
- #126
127
- if not self.client: #127
128
- self.client = OpenAI(api_key=api_key) #128
129
- #129
130
- # Add message to history #130
131
- self.conversation_history.append({"role": "user", "content": message}) #131
132
- #132
133
- # Get AI response #133
134
- response = self.client.chat.completions.create( #134
135
- model="gpt-4", #135
136
- messages=[ #136
137
- {"role": "system", "content": SYSTEM_PROMPT}, #137
138
- *self.conversation_history #138
139
- ], #139
140
- temperature=0.7 #140
141
- ) #141
142
- #142
143
- # Process response #143
144
- ai_message = response.choices[0].message.content #144
145
- self.conversation_history.append({"role": "assistant", "content": ai_message}) #145
146
- #146
147
- # Update state #147
148
- self._update_conversation_state() #148
149
- #149
150
- return { #150
151
- "content": ai_message, #151
152
- "type": "success", #152
153
- "completion_status": self.get_completion_status() #153
154
- } #154
155
- #155
156
- except Exception as e: #156
157
- error_msg = f"Error processing message: {str(e)}" #157
158
- logger.error(error_msg) #158
159
- self.state.last_error = error_msg #159
160
- return { #160
161
- "content": error_msg, #161
162
- "type": "error", #162
163
- "completion_status": self.get_completion_status() #163
164
- } #164
165
- #165
166
- def _update_conversation_state(self): #166
167
- """Update internal conversation state.""" #167
168
- try: #168
169
- # Create analysis prompt #169
170
- analysis_prompt = """ #170
171
- Analyze our conversation and identify: #171
172
- 1. Which sections have been fully covered? #172
173
- 2. Which sections have been partially covered? #173
174
- 3. What is the current topic of discussion? #174
175
- #175
176
- Respond in JSON format: #176
177
- { #177
178
- "sections_completed": [], #178
179
- "sections_partial": [], #179
180
- "current_section": "", #180
181
- "completion_percentage": 0.0 #181
182
- } #182
183
- """ #183
184
- #184
185
- # Get analysis from AI #185
186
- response = self.client.chat.completions.create( #186
187
- model="gpt-4", #187
188
- messages=[ #188
189
- {"role": "system", "content": SYSTEM_PROMPT}, #189
190
- *self.conversation_history, #190
191
- {"role": "user", "content": analysis_prompt} #191
192
- ], #192
193
- temperature=0.3 #193
194
- ) #194
195
- #195
196
- # Update state #196
197
- analysis = json.loads(response.choices[0].message.content) #197
198
- self.state.sections_completed = analysis.get("sections_completed", []) #198
199
- self.state.sections_partial = analysis.get("sections_partial", []) #199
200
- self.state.current_section = analysis.get("current_section") #200
201
- self.state.completion_percentage = analysis.get("completion_percentage", 0.0) #201
202
- #202
203
- except Exception as e: #203
204
- logger.error(f"Error updating conversation state: {str(e)}") #204
205
- #205
206
- def generate_json(self, api_key: str) -> Tuple[Optional[str], str]: #206
207
- """Generate profile JSON from conversation.""" #207
208
- try: #208
209
- if not self.client: #209
210
- self.client = OpenAI(api_key=api_key) #210
211
- #211
212
- # First pass: Analysis #212
213
- analysis_prompt = """ #213
214
- Analyze our conversation and extract: #214
215
- 1. Key themes and topics #215
216
- 2. Quantitative metrics #216
217
- 3. Qualitative achievements #217
218
- 4. Timeline points #218
219
- 5. Skills and competencies #219
220
- #220
221
- Format as JSON. #221
222
- """ #222
223
- #223
224
- analysis_response = self.client.chat.completions.create( #224
225
- model="gpt-4", #225
226
- messages=[ #226
227
- {"role": "system", "content": SYSTEM_PROMPT}, #227
228
- *self.conversation_history, #228
229
- {"role": "user", "content": analysis_prompt} #229
230
- ], #230
231
- temperature=0.7 #231
232
- ) #232
233
- #233
234
- analysis = json.loads(analysis_response.choices[0].message.content) #234
235
- #235
236
- # Second pass: Generate JSON #236
237
- json_prompt = f""" #237
238
- Based on our conversation and this analysis: {json.dumps(analysis, indent=2)} #238
239
- #239
240
- Create a comprehensive profile following the PROFILE_STRUCTURE format. #240
241
- Include both quantitative and qualitative elements. #241
242
- """ #242
243
- #243
244
- json_response = self.client.chat.completions.create( #244
245
- model="gpt-4", #245
246
- messages=[ #246
247
- {"role": "system", "content": SYSTEM_PROMPT}, #247
248
- *self.conversation_history, #248
249
- {"role": "user", "content": json_prompt} #249
250
- ], #250
251
- temperature=0.5 #251
252
- ) #252
253
- #253
254
- # Process and validate JSON #254
255
- profile_data = json.loads(json_response.choices[0].message.content) #255
256
- #256
257
- # Validate and clean sections #257
258
- for section, data in profile_data.items(): #258
259
- if section in PROFILE_STRUCTURE: #259
260
- _, errors = self.validator.validate_section( #260
261
- data, PROFILE_STRUCTURE[section] #261
262
- ) #262
263
- if errors: #263
264
- logger.warning(f"Validation errors in {section}: {errors}") #264
265
- profile_data[section] = self.validator.clean_section_data( #265
266
- data, PROFILE_STRUCTURE[section] #266
267
- ) #267
268
- #268
269
- # Add metadata #269
270
- profile_data["metadata"] = { #270
271
- "generated_at": datetime.now().isoformat(), #271
272
- "version": "2.0", #272
273
- "completion_metrics": { #273
274
- "overall_score": self.state.completion_percentage, #274
275
- "sections_completed": self.state.sections_completed, #275
276
- "sections_partial": self.state.sections_partial #276
277
- } #277
278
- } #278
279
- #279
280
- # Save file #280
281
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") #281
282
- filename = f"career_education_profile_{timestamp}.json" #282
283
- #283
284
- with open(filename, 'w') as f: #284
285
- json.dump(profile_data, f, indent=2) #285
286
- #286
287
- return (filename, json.dumps(profile_data, indent=2)) #287
288
- #288
289
- except Exception as e: #289
290
- logger.error(f"Error generating JSON: {str(e)}") #290
291
- return (None, json.dumps(self._create_error_json(str(e)), indent=2)) #291
292
- #292
293
- def _create_error_json(self, error_msg: str) -> Dict[str, Any]: #293
294
- """Create error JSON structure.""" #294
295
- return { #295
296
- "error": error_msg, #296
297
- "metadata": { #297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  "generated_at": datetime.now().isoformat(), #298
299
  "version": "2.0", #299
300
- "error_occurred": True #300
301
- } #301
302
- } #302
303
- #303
304
- def get_completion_status(self) -> Dict[str, Any]: #304
305
- """Get current completion status.""" #305
306
- return { #306
307
- "completion_percentage": self.state.completion_percentage, #307
308
- "sections_completed": self.state.sections_completed, #308
309
- "sections_partial": self.state.sections_partial, #309
310
- "current_section": self.state.current_section, #310
311
- "sections_remaining": [ #311
312
- section for section in PROFILE_STRUCTURE.keys() #312
313
- if section not in self.state.sections_completed #313
314
- and section not in self.state.sections_partial #314
315
- ] #315
316
- } #316
317
- #317
318
- def create_education_career_interface(): #318
319
- """Create Gradio interface.""" #319
320
- collector = EducationCareerCollector() #320
321
- #321
322
- css = """ #322
323
- .message { font-size: 16px; } #323
324
- .alert { padding: 12px; margin: 8px 0; border-radius: 4px; } #324
325
- .alert-info { background-color: #e8f4f8; border-left: 4px solid #4a90e2; } #325
326
- .alert-error { background-color: #fde8e8; border-left: 4px solid #f56565; } #326
327
- """ #327
328
- #328
329
- with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo: #329
330
- gr.Markdown(""" #330
331
- # πŸ• LOSS DOG - Profile Builder #331
332
- Share your career and education journey naturally. #332
333
- """) #333
334
- #334
335
- with gr.Row(): #335
336
- with gr.Column(scale=2): #336
337
- api_key = gr.Textbox( #337
338
- label="OpenAI API Key", #338
339
- type="password", #339
340
- placeholder="Enter your OpenAI API key" #340
341
- ) #341
342
- #342
343
- status_msg = gr.Markdown( #343
344
- "Ready to start!", #344
345
- elem_classes=["alert", "alert-info"] #345
346
- ) #346
347
- #347
348
- chatbot = gr.Chatbot( #348
349
- height=400, #349
350
- show_label=False #350
351
- ) #351
352
- #352
353
- with gr.Row(): #353
354
- msg = gr.Textbox( #354
355
- label="Your message", #355
356
- placeholder="Tell me about your journey..." #356
357
- ) #357
358
- submit = gr.Button("Send") #358
359
- generate = gr.Button("Generate Profile") #359
360
- #360
361
- with gr.Column(scale=1): #361
362
- progress_info = gr.Markdown("### Profile Progress") #362
363
- json_preview = gr.JSON(label="Profile Preview") #363
364
- output_file = gr.File(label="Download Profile") #364
365
- #365
366
- def process_message(message, history, key): #366
367
- if not message.strip(): #367
368
- return history, "Please enter a message." #368
369
- #369
370
- result = collector.process_message(message, key) #370
371
- history.append((message, result["content"])) #371
372
- #372
373
- status = f"Completion: {result['completion_status']['completion_percentage']}%" #373
374
- return history, status #374
375
- #375
376
- def generate_profile(key): #376
377
- filename, json_content = collector.generate_json(key) #377
378
- if filename: #378
379
- return (filename, json.loads(json_content)) #379
380
- return (None, json.loads(json_content)) #380
381
- #381
382
- msg.submit(process_message, [msg, chatbot, api_key], [chatbot, status_msg]) #382
383
- submit.click(process_message, [msg, chatbot, api_key], [chatbot, status_msg]) #383
384
- generate.click(generate_profile, [api_key], [output_file, json_preview]) #384
385
- #385
386
- return demo #386
387
- #387
388
- if __name__ == "__main__": #388
389
- demo = create_education_career_interface() #389
390
- demo.launch( #390
391
- server_name="0.0.0.0", #391
392
- server_port=7860, #392
393
- share=True #393
394
- ) #394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  - Maintains context throughout the conversation #30
31
  """ #31
32
  #32
33
+ @dataclass #33
34
+ class ConversationState: #34
35
+ """Track the state of the conversation and profile completion.""" #35
36
+ sections_completed: List[str] = None #36
37
+ sections_partial: List[str] = None #37
38
+ current_section: Optional[str] = None #38
39
+ completion_percentage: float = 0.0 #39
40
+ last_error: Optional[str] = None #40
41
+ #41
42
+ def __post_init__(self): #42
43
+ self.sections_completed = [] #43
44
+ self.sections_partial = [] #44
45
+ #45
46
+ class ProfileAnalyzer: #46
47
+ """Analyzes and structures conversation data flexibly.""" #47
48
+ #48
49
+ @staticmethod #49
50
+ def analyze_content(text: str) -> Dict[str, Any]: #50
51
+ """Extract key information from text.""" #51
52
+ analysis = { #52
53
+ "categories": [], #53
54
+ "metrics": {}, #54
55
+ "experiences": [], #55
56
+ "achievements": [], #56
57
+ "skills": [] #57
58
+ } #58
59
+ return analysis #59
60
+ #60
61
+ @staticmethod #61
62
+ def clean_data(data: Dict[str, Any]) -> Dict[str, Any]: #62
63
+ """Clean and validate extracted data.""" #63
64
+ def clean_value(v): #64
65
+ if isinstance(v, dict): #65
66
+ return {k: clean_value(val) for k, val in v.items() if val not in (None, "", [], {})} #66
67
+ if isinstance(v, list): #67
68
+ return [clean_value(item) for item in v if item not in (None, "", [], {})] #68
69
+ return v #69
70
+ return clean_value(data) #70
71
  #71
72
+ class EducationCareerCollector: #72
73
+ """Main collector class for handling career and education information.""" #73
74
+ #74
75
+ def __init__(self): #75
76
+ self.conversation_history = [] #76
77
+ self.client = None #77
78
+ self.state = ConversationState() #78
79
+ self.analyzer = ProfileAnalyzer() #79
80
+ #80
81
+ def process_message(self, message: str, api_key: str) -> Dict[str, Any]: #81
82
+ """Process a user message and return AI response with enhanced error handling.""" #82
83
+ try: #83
84
+ if not message.strip(): #84
85
+ raise ValueError("Message cannot be empty") #85
86
+ #86
87
+ if not api_key.strip().startswith('sk-'): #87
88
+ raise ValueError("Invalid API key format") #88
89
+ #89
90
+ if not self.client: #90
91
+ self.client = OpenAI(api_key=api_key) #91
92
  #92
93
+ # Add message to conversation history #93
94
+ self.conversation_history.append({ #94
95
+ "role": "user", #95
96
+ "content": message, #96
97
+ "timestamp": datetime.now().isoformat() #97
98
+ }) #98
99
+ #99
100
+ # Get AI response with retry mechanism #100
101
+ max_retries = 3 #101
102
+ last_error = None #102
103
+ #103
104
+ for attempt in range(max_retries): #104
105
+ try: #105
106
+ response = self.client.chat.completions.create( #106
107
+ model="gpt-4", #107
108
+ messages=[ #108
109
+ {"role": "system", "content": SYSTEM_PROMPT}, #109
110
+ *[{ #110
111
+ "role": msg["role"], #111
112
+ "content": msg["content"] #112
113
+ } for msg in self.conversation_history] #113
114
+ ], #114
115
+ temperature=0.7, #115
116
+ max_tokens=1000 #116
117
+ ) #117
118
+ break #118
119
+ except Exception as e: #119
120
+ last_error = str(e) #120
121
+ if attempt == max_retries - 1: #121
122
+ raise Exception(f"Failed after {max_retries} attempts: {last_error}") #122
123
+ logger.warning(f"Attempt {attempt + 1} failed: {last_error}") #123
124
+ continue #124
125
+ #125
126
+ # Process response #126
127
+ ai_message = response.choices[0].message.content #127
128
+ self.conversation_history.append({ #128
129
+ "role": "assistant", #129
130
+ "content": ai_message, #130
131
+ "timestamp": datetime.now().isoformat() #131
132
+ }) #132
133
+ #133
134
+ # Analyze response and update state #134
135
+ self._update_conversation_state(ai_message) #135
136
+ #136
137
+ return { #137
138
+ "content": ai_message, #138
139
+ "type": "success", #139
140
+ "completion_status": self.get_completion_status(), #140
141
+ "timestamp": datetime.now().isoformat() #141
142
+ } #142
143
+ #143
144
+ except Exception as e: #144
145
+ error_msg = f"Error processing message: {str(e)}" #145
146
+ logger.error(error_msg) #146
147
+ self.state.last_error = error_msg #147
148
+ return { #148
149
+ "content": error_msg, #149
150
+ "type": "error", #150
151
+ "completion_status": self.get_completion_status(), #151
152
+ "timestamp": datetime.now().isoformat() #152
153
+ } #153
154
+ def _update_conversation_state(self, ai_message: str) -> None: #154
155
+ """Update the conversation state based on AI response.""" #155
156
+ try: #156
157
+ # Create analysis prompt #157
158
+ analysis_prompt = """ #158
159
+ Review our conversation and identify: #159
160
+ 1. What topics or aspects of their journey were discussed? #160
161
+ 2. What areas need more exploration? #161
162
+ 3. What's the current focus of discussion? #162
163
+
164
+ Response format:
165
+ {
166
+ "topics_discussed": [],
167
+ "areas_needing_exploration": [],
168
+ "current_focus": "",
169
+ "completion_estimate": 0.0
170
+ }
171
+ """ #163
172
+
173
+ # Get analysis from AI #164
174
+ response = self.client.chat.completions.create( #165
175
+ model="gpt-4", #166
176
+ messages=[ #167
177
+ {"role": "system", "content": SYSTEM_PROMPT}, #168
178
+ *self.conversation_history, #169
179
+ {"role": "user", "content": analysis_prompt} #170
180
+ ], #171
181
+ temperature=0.3 #172
182
+ ) #173
183
+
184
+ # Process analysis #174
185
+ try: #175
186
+ analysis = json.loads(response.choices[0].message.content) #176
187
+
188
+ # Update state based on analysis #177
189
+ self.state.sections_completed = analysis.get("topics_discussed", []) #178
190
+ self.state.sections_partial = analysis.get("areas_needing_exploration", []) #179
191
+ self.state.current_section = analysis.get("current_focus") #180
192
+ self.state.completion_percentage = analysis.get("completion_estimate", 0.0) #181
193
+
194
+ except json.JSONDecodeError as e: #182
195
+ logger.error(f"Error parsing analysis JSON: {str(e)}") #183
196
+ # Set default values on error #184
197
+ self.state.completion_percentage = max( #185
198
+ self.state.completion_percentage, #186
199
+ len(self.conversation_history) * 5.0 # Rough estimate based on message count #187
200
+ ) #188
201
+
202
+ except Exception as e: #189
203
+ logger.error(f"Error updating conversation state: {str(e)}") #190
204
+ # State remains unchanged on error #191
205
+
206
+ def get_completion_status(self) -> Dict[str, Any]: #192
207
+ """Get current completion status with rich context.""" #193
208
+ status = { #194
209
+ "completion_percentage": self.state.completion_percentage, #195
210
+ "topics_covered": self.state.sections_completed, #196
211
+ "topics_in_progress": self.state.sections_partial, #197
212
+ "current_focus": self.state.current_section, #198
213
+ "conversation_length": len(self.conversation_history), #199
214
+ "last_update": datetime.now().isoformat(), #200
215
+ "needs_attention": [ #201
216
+ topic for topic in self.state.sections_partial #202
217
+ if topic not in self.state.sections_completed #203
218
+ ], #204
219
+ "status_summary": self._generate_status_summary() #205
220
+ } #206
221
+
222
+ if self.state.last_error: #207
223
+ status["last_error"] = self.state.last_error #208
224
+
225
+ return status #209
226
+
227
+ def _generate_status_summary(self) -> str: #210
228
+ """Generate a human-readable summary of the conversation status.""" #211
229
+ if not self.conversation_history: #212
230
+ return "Ready to start the conversation." #213
231
+
232
+ summary_parts = [] #214
233
+
234
+ # Add completion status #215
235
+ if self.state.completion_percentage > 0: #216
236
+ summary_parts.append( #217
237
+ f"Conversation is approximately {self.state.completion_percentage:.1f}% complete" #218
238
+ ) #219
239
+
240
+ # Add covered topics #220
241
+ if self.state.sections_completed: #221
242
+ topics = ", ".join(self.state.sections_completed) #222
243
+ summary_parts.append(f"We've discussed: {topics}") #223
244
+
245
+ # Add current focus #224
246
+ if self.state.current_section: #225
247
+ summary_parts.append( #226
248
+ f"Currently focusing on: {self.state.current_section}" #227
249
+ ) #228
250
+
251
+ # Add next steps if any #229
252
+ if self.state.sections_partial: #230
253
+ topics = ", ".join(self.state.sections_partial) #231
254
+ summary_parts.append(f"Topics to explore further: {topics}") #232
255
+
256
+ return " | ".join(summary_parts) if summary_parts else "Conversation in progress." #233
257
+
258
+ def generate_json(self, api_key: str) -> Tuple[Optional[str], str]: #234
259
+ """Generate a JSON profile from the conversation history.""" #235
260
+ try: #236
261
+ if not self.client: #237
262
+ self.client = OpenAI(api_key=api_key) #238
263
+
264
+ # Analysis prompt focused on understanding the conversation #239
265
+ analysis_prompt = """ #240
266
+ Review our conversation and create a JSON structure that captures the person's journey. #241
267
+ Focus on what was actually discussed, not fitting into predetermined categories. #242
268
+ Include: #243
269
+ 1. Any experiences or achievements shared #244
270
+ 2. Skills or competencies demonstrated #245
271
+ 3. Timeline or progression points mentioned #246
272
+ 4. Notable metrics or outcomes #247
273
+ 5. Personal growth or learning moments #248
274
+
275
+ Structure the JSON naturally around the topics they shared. #249
276
+ """ #250
277
+ # Get initial analysis of conversation content #251
278
+ analysis_response = self.client.chat.completions.create( #252
279
+ model="gpt-4", #253
280
+ messages=[ #254
281
+ {"role": "system", "content": SYSTEM_PROMPT}, #255
282
+ *self.conversation_history, #256
283
+ {"role": "user", "content": analysis_prompt} #257
284
+ ], #258
285
+ temperature=0.7 #259
286
+ ) #260
287
+
288
+ # Parse the initial analysis #261
289
+ analysis = json.loads(analysis_response.choices[0].message.content) #262
290
+
291
+ # Generate structured profile based on analysis #263
292
+ profile_prompt = f""" #264
293
+ Based on our conversation, create a detailed profile JSON. #265
294
+ Use this analysis as a guide: {json.dumps(analysis, indent=2)} #266
295
+
296
+ Important guidelines: #267
297
+ - Create sections based on what was actually discussed #268
298
+ - Include both quantitative and qualitative information #269
299
+ - Preserve the context and significance of experiences #270
300
+ - Maintain natural flow and connections between topics #271
301
+ - Use descriptive section names that reflect the conversation #272
302
+ """ #273
303
+
304
+ # Generate the profile JSON #274
305
+ profile_response = self.client.chat.completions.create( #275
306
+ model="gpt-4", #276
307
+ messages=[ #277
308
+ {"role": "system", "content": SYSTEM_PROMPT}, #278
309
+ *self.conversation_history, #279
310
+ {"role": "user", "content": profile_prompt} #280
311
+ ], #281
312
+ temperature=0.5 #282
313
+ ) #283
314
+
315
+ # Parse and clean the profile data #284
316
+ profile_data = json.loads(profile_response.choices[0].message.content) #285
317
+
318
+ # Clean the data #286
319
+ def clean_dict(d): #287
320
+ if isinstance(d, dict): #288
321
+ return {k: clean_dict(v) for k, v in d.items() #289
322
+ if v not in (None, "", [], {}, "N/A", "None")} #290
323
+ if isinstance(d, list): #291
324
+ return [clean_dict(item) for item in d #292
325
+ if item not in (None, "", [], {}, "N/A", "None")] #293
326
+ return d #294
327
+
328
+ profile_data = clean_dict(profile_data) #295
329
+
330
+ # Add metadata #296
331
+ profile_data["metadata"] = { #297
332
  "generated_at": datetime.now().isoformat(), #298
333
  "version": "2.0", #299
334
+ "generation_metrics": { #300
335
+ "conversation_length": len(self.conversation_history), #301
336
+ "topics_covered": self.state.sections_completed, #302
337
+ "completion_percentage": self.state.completion_percentage #303
338
+ } #304
339
+ } #305
340
+
341
+ # Save to file #306
342
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") #307
343
+ filename = f"career_education_profile_{timestamp}.json" #308
344
+
345
+ try: #309
346
+ with open(filename, 'w', encoding='utf-8') as f: #310
347
+ json.dump(profile_data, f, indent=2, ensure_ascii=False) #311
348
+ return (filename, json.dumps(profile_data, indent=2, ensure_ascii=False)) #312
349
+ except Exception as e: #313
350
+ logger.error(f"Error saving profile to file: {str(e)}") #314
351
+ return (None, json.dumps(profile_data, indent=2, ensure_ascii=False)) #315
352
+
353
+ except Exception as e: #316
354
+ error_msg = f"Error generating profile: {str(e)}" #317
355
+ logger.error(error_msg) #318
356
+ error_json = { #319
357
+ "error": error_msg, #320
358
+ "metadata": { #321
359
+ "generated_at": datetime.now().isoformat(), #322
360
+ "error_occurred": True #323
361
+ } #324
362
+ } #325
363
+ return (None, json.dumps(error_json, indent=2)) #326
364
+
365
+ def create_education_career_interface(): #327
366
+ """Create Gradio interface for the education and career collector.""" #328
367
+ collector = EducationCareerCollector() #329
368
+
369
+ css = """ #330
370
+ .message { font-size: 16px; margin: 8px 0; } #331
371
+ .system-message { color: #444; font-style: italic; } #332
372
+ .user-message { color: #000; font-weight: 500; } #333
373
+ .alert { #334
374
+ padding: 12px; #335
375
+ margin: 8px 0; #336
376
+ border-radius: 4px; #337
377
+ } #338
378
+ .alert-info { #339
379
+ background-color: #e8f4f8; #340
380
+ border-left: 4px solid #4a90e2; #341
381
+ } #342
382
+ .alert-error { #343
383
+ background-color: #fde8e8; #344
384
+ border-left: 4px solid #f56565; #345
385
+ } #346
386
+ .alert-success { #347
387
+ background-color: #e8f8e8; #348
388
+ border-left: 4px solid #48bb78; #349
389
+ } #350
390
+ """ #351
391
+
392
+ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo: #352
393
+ gr.Markdown(""" #353
394
+ # πŸ• LOSS DOG - Profile Builder #354
395
+
396
+ Share your career and education journey naturally. #355
397
+ Tell your story in your own way - we'll capture what matters to you. #356
398
+ """) #357
399
+
400
+ with gr.Row(): #358
401
+ with gr.Column(scale=2): #359
402
+ # API Key Input #360
403
+ api_key = gr.Textbox( #361
404
+ label="OpenAI API Key", #362
405
+ type="password", #363
406
+ placeholder="Enter your OpenAI API key (sk-...)", #364
407
+ info="Your API key from platform.openai.com" #365
408
+ ) #366
409
+
410
+ # Status Messages #367
411
+ status_msg = gr.Markdown( #368
412
+ "Ready to start! Share your journey...", #369
413
+ elem_classes=["alert", "alert-info"] #370
414
+ ) #371
415
+
416
+ # Chat Interface #372
417
+ chatbot = gr.Chatbot( #373
418
+ height=400, #374
419
+ show_label=False, #375
420
+ elem_classes=["message"] #376
421
+ ) #377
422
+
423
+ # Message Input #378
424
+ with gr.Row(): #379
425
+ msg = gr.Textbox( #380
426
+ label="Your message", #381
427
+ placeholder="Tell me about your journey...", #382
428
+ show_label=False, #383
429
+ scale=4 #384
430
+ ) #385
431
+ submit = gr.Button("Send", variant="primary", scale=1) #386
432
+
433
+ # Action Buttons #387
434
+ with gr.Row(): #388
435
+ clear = gr.Button("πŸ—‘οΈ Clear Chat", scale=1) #389
436
+ generate = gr.Button("πŸ“„ Generate Profile", scale=2) #390
437
+ with gr.Column(scale=1): #391
438
+ # Progress Information #392
439
+ progress_info = gr.Markdown( #393
440
+ "### Profile Progress\nStart sharing your story!", #394
441
+ elem_classes=["alert", "alert-info"] #395
442
+ ) #396
443
+
444
+ # Profile Preview #397
445
+ with gr.Tab("Preview"): #398
446
+ json_preview = gr.JSON( #399
447
+ label="Profile Preview", #400
448
+ show_label=True #401
449
+ ) #402
450
+
451
+ # Download Section #403
452
+ with gr.Tab("Download"): #404
453
+ output_file = gr.File( #405
454
+ label="Download Profile" #406
455
+ ) #407
456
+
457
+ # Tips and Guidelines #408
458
+ with gr.Accordion("πŸ’‘ Tips", open=False): #409
459
+ gr.Markdown(""" #410
460
+ ### Share Your Story Naturally #411
461
+
462
+ - Tell us about experiences that matter to you #412
463
+ - Include both achievements and challenges #413
464
+ - Share numbers when they're meaningful #414
465
+ - Describe your growth and learning #415
466
+ - Talk about what makes your journey unique #416
467
+ """) #417
468
+
469
+ def process_message(message: str, history: list, key: str) -> tuple: #418
470
+ """Process user message and update interface.""" #419
471
+ if not message.strip(): #420
472
+ return history, "Please enter a message." #421
473
+
474
+ try: #422
475
+ # Process the message #423
476
+ result = collector.process_message(message, key) #424
477
+
478
+ # Update chat history #425
479
+ history.append((message, result["content"])) #426
480
+
481
+ # Generate status message #427
482
+ status = f"""Progress: {result['completion_status']['completion_percentage']:.1f}% #428
483
+ | Topics covered: {len(result['completion_status']['topics_covered'])}""" #429
484
+
485
+ return history, status #430
486
+
487
+ except Exception as e: #431
488
+ error_msg = f"Error: {str(e)}" #432
489
+ logger.error(error_msg) #433
490
+ return history, error_msg #434
491
+
492
+ def generate_profile(key: str) -> tuple: #435
493
+ """Generate and return profile JSON.""" #436
494
+ try: #437
495
+ filename, json_content = collector.generate_json(key) #438
496
+ if filename: #439
497
+ return ( #440
498
+ filename, #441
499
+ json.loads(json_content), #442
500
+ "Profile generated successfully! πŸŽ‰" #443
501
+ ) #444
502
+ return ( #445
503
+ None, #446
504
+ json.loads(json_content), #447
505
+ "Profile generated but couldn't save file." #448
506
+ ) #449
507
+ except Exception as e: #450
508
+ error_msg = f"Error generating profile: {str(e)}" #451
509
+ logger.error(error_msg) #452
510
+ return None, {"error": error_msg}, error_msg #453
511
+
512
+ def clear_interface() -> tuple: #454
513
+ """Reset the interface state.""" #455
514
+ return ( #456
515
+ [], # Clear chat history #457
516
+ "Ready to start! Share your journey...", # Reset status #458
517
+ "### Profile Progress\nStart sharing your story!", # Reset progress #459
518
+ None, # Clear JSON preview #460
519
+ None # Clear file output #461
520
+ ) #462
521
+
522
+ def update_progress(history: list) -> str: #463
523
+ """Update progress information based on conversation.""" #464
524
+ if not history: #465
525
+ return "### Profile Progress\nStart sharing your story!" #466
526
+
527
+ # Get completion status #467
528
+ status = collector.get_completion_status() #468
529
+
530
+ # Format progress message #469
531
+ progress_md = f"""### Profile Progress: {status['completion_percentage']:.1f}%\n\n""" #470
532
+
533
+ if status['topics_covered']: #471
534
+ progress_md += "βœ… **Discussed:**\n" #472
535
+ for topic in status['topics_covered']: #473
536
+ progress_md += f"- {topic}\n" #474
537
+
538
+ if status['topics_in_progress']: #475
539
+ progress_md += "\nπŸ“ **Currently exploring:**\n" #476
540
+ for topic in status['topics_in_progress']: #477
541
+ progress_md += f"- {topic}\n" #478
542
+
543
+ if status.get('needs_attention'): #479
544
+ progress_md += "\n❗ **Consider discussing:**\n" #480
545
+ for topic in status['needs_attention']: #481
546
+ progress_md += f"- {topic}\n" #482
547
+
548
+ return progress_md #483
549
+
550
+ # Event Handlers #484
551
+ msg.submit( #485
552
+ process_message, #486
553
+ [msg, chatbot, api_key], #487
554
+ [chatbot, status_msg] #488
555
+ ).then( #489
556
+ update_progress, #490
557
+ chatbot, #491
558
+ progress_info #492
559
+ ).then( #493
560
+ lambda: "", #494
561
+ None, #495
562
+ msg #496
563
+ ) #497
564
+
565
+ submit.click( #498
566
+ process_message, #499
567
+ [msg, chatbot, api_key], #500
568
+ [msg, chatbot, api_key], #501
569
+ [chatbot, status_msg] #502
570
+ ).then( #503
571
+ update_progress, #504
572
+ chatbot, #505
573
+ progress_info #506
574
+ ).then( #507
575
+ lambda: "", #508
576
+ None, #509
577
+ msg #510
578
+ ) #511
579
+
580
+ generate.click( #512
581
+ generate_profile, #513
582
+ [api_key], #514
583
+ [output_file, json_preview, status_msg] #515
584
+ ) #516
585
+
586
+ clear.click( #517
587
+ clear_interface, #518
588
+ None, #519
589
+ [chatbot, status_msg, progress_info, json_preview, output_file] #520
590
+ ) #521
591
+
592
+ return demo #522
593
+
594
+ if __name__ == "__main__": #523
595
+ demo = create_education_career_interface() #524
596
+ demo.launch( #525
597
+ server_name="0.0.0.0", #526
598
+ server_port=7860, #527
599
+ share=True, #528
600
+ enable_queue=True, #529
601
+ show_error=True #530
602
+ ) #531