Update app.py
Browse files
app.py
CHANGED
|
@@ -30,365 +30,573 @@ Core Traits: #25
|
|
| 30 |
- Maintains context throughout the conversation #30
|
| 31 |
""" #31
|
| 32 |
#32
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
"
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
"
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
#62
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
#71
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
#75
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
if
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
#92
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
#108
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
response
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
"
|
| 152 |
-
"
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
def
|
| 207 |
-
"""
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
#
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
""
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
#
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
#
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
#
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
#
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
"generated_at": datetime.now().isoformat(), #298
|
| 299 |
"version": "2.0", #299
|
| 300 |
-
"
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
#303
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
"
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
#
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
#
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
#
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
#
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
#
|
| 386 |
-
|
| 387 |
-
#
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
- Maintains context throughout the conversation #30
|
| 31 |
""" #31
|
| 32 |
#32
|
| 33 |
+
@dataclass #33
|
| 34 |
+
class ConversationState: #34
|
| 35 |
+
"""Track the state of the conversation and profile completion.""" #35
|
| 36 |
+
sections_completed: List[str] = None #36
|
| 37 |
+
sections_partial: List[str] = None #37
|
| 38 |
+
current_section: Optional[str] = None #38
|
| 39 |
+
completion_percentage: float = 0.0 #39
|
| 40 |
+
last_error: Optional[str] = None #40
|
| 41 |
+
#41
|
| 42 |
+
def __post_init__(self): #42
|
| 43 |
+
self.sections_completed = [] #43
|
| 44 |
+
self.sections_partial = [] #44
|
| 45 |
+
#45
|
| 46 |
+
class ProfileAnalyzer: #46
|
| 47 |
+
"""Analyzes and structures conversation data flexibly.""" #47
|
| 48 |
+
#48
|
| 49 |
+
@staticmethod #49
|
| 50 |
+
def analyze_content(text: str) -> Dict[str, Any]: #50
|
| 51 |
+
"""Extract key information from text.""" #51
|
| 52 |
+
analysis = { #52
|
| 53 |
+
"categories": [], #53
|
| 54 |
+
"metrics": {}, #54
|
| 55 |
+
"experiences": [], #55
|
| 56 |
+
"achievements": [], #56
|
| 57 |
+
"skills": [] #57
|
| 58 |
+
} #58
|
| 59 |
+
return analysis #59
|
| 60 |
+
#60
|
| 61 |
+
@staticmethod #61
|
| 62 |
+
def clean_data(data: Dict[str, Any]) -> Dict[str, Any]: #62
|
| 63 |
+
"""Clean and validate extracted data.""" #63
|
| 64 |
+
def clean_value(v): #64
|
| 65 |
+
if isinstance(v, dict): #65
|
| 66 |
+
return {k: clean_value(val) for k, val in v.items() if val not in (None, "", [], {})} #66
|
| 67 |
+
if isinstance(v, list): #67
|
| 68 |
+
return [clean_value(item) for item in v if item not in (None, "", [], {})] #68
|
| 69 |
+
return v #69
|
| 70 |
+
return clean_value(data) #70
|
| 71 |
#71
|
| 72 |
+
class EducationCareerCollector: #72
|
| 73 |
+
"""Main collector class for handling career and education information.""" #73
|
| 74 |
+
#74
|
| 75 |
+
def __init__(self): #75
|
| 76 |
+
self.conversation_history = [] #76
|
| 77 |
+
self.client = None #77
|
| 78 |
+
self.state = ConversationState() #78
|
| 79 |
+
self.analyzer = ProfileAnalyzer() #79
|
| 80 |
+
#80
|
| 81 |
+
def process_message(self, message: str, api_key: str) -> Dict[str, Any]: #81
|
| 82 |
+
"""Process a user message and return AI response with enhanced error handling.""" #82
|
| 83 |
+
try: #83
|
| 84 |
+
if not message.strip(): #84
|
| 85 |
+
raise ValueError("Message cannot be empty") #85
|
| 86 |
+
#86
|
| 87 |
+
if not api_key.strip().startswith('sk-'): #87
|
| 88 |
+
raise ValueError("Invalid API key format") #88
|
| 89 |
+
#89
|
| 90 |
+
if not self.client: #90
|
| 91 |
+
self.client = OpenAI(api_key=api_key) #91
|
| 92 |
#92
|
| 93 |
+
# Add message to conversation history #93
|
| 94 |
+
self.conversation_history.append({ #94
|
| 95 |
+
"role": "user", #95
|
| 96 |
+
"content": message, #96
|
| 97 |
+
"timestamp": datetime.now().isoformat() #97
|
| 98 |
+
}) #98
|
| 99 |
+
#99
|
| 100 |
+
# Get AI response with retry mechanism #100
|
| 101 |
+
max_retries = 3 #101
|
| 102 |
+
last_error = None #102
|
| 103 |
+
#103
|
| 104 |
+
for attempt in range(max_retries): #104
|
| 105 |
+
try: #105
|
| 106 |
+
response = self.client.chat.completions.create( #106
|
| 107 |
+
model="gpt-4", #107
|
| 108 |
+
messages=[ #108
|
| 109 |
+
{"role": "system", "content": SYSTEM_PROMPT}, #109
|
| 110 |
+
*[{ #110
|
| 111 |
+
"role": msg["role"], #111
|
| 112 |
+
"content": msg["content"] #112
|
| 113 |
+
} for msg in self.conversation_history] #113
|
| 114 |
+
], #114
|
| 115 |
+
temperature=0.7, #115
|
| 116 |
+
max_tokens=1000 #116
|
| 117 |
+
) #117
|
| 118 |
+
break #118
|
| 119 |
+
except Exception as e: #119
|
| 120 |
+
last_error = str(e) #120
|
| 121 |
+
if attempt == max_retries - 1: #121
|
| 122 |
+
raise Exception(f"Failed after {max_retries} attempts: {last_error}") #122
|
| 123 |
+
logger.warning(f"Attempt {attempt + 1} failed: {last_error}") #123
|
| 124 |
+
continue #124
|
| 125 |
+
#125
|
| 126 |
+
# Process response #126
|
| 127 |
+
ai_message = response.choices[0].message.content #127
|
| 128 |
+
self.conversation_history.append({ #128
|
| 129 |
+
"role": "assistant", #129
|
| 130 |
+
"content": ai_message, #130
|
| 131 |
+
"timestamp": datetime.now().isoformat() #131
|
| 132 |
+
}) #132
|
| 133 |
+
#133
|
| 134 |
+
# Analyze response and update state #134
|
| 135 |
+
self._update_conversation_state(ai_message) #135
|
| 136 |
+
#136
|
| 137 |
+
return { #137
|
| 138 |
+
"content": ai_message, #138
|
| 139 |
+
"type": "success", #139
|
| 140 |
+
"completion_status": self.get_completion_status(), #140
|
| 141 |
+
"timestamp": datetime.now().isoformat() #141
|
| 142 |
+
} #142
|
| 143 |
+
#143
|
| 144 |
+
except Exception as e: #144
|
| 145 |
+
error_msg = f"Error processing message: {str(e)}" #145
|
| 146 |
+
logger.error(error_msg) #146
|
| 147 |
+
self.state.last_error = error_msg #147
|
| 148 |
+
return { #148
|
| 149 |
+
"content": error_msg, #149
|
| 150 |
+
"type": "error", #150
|
| 151 |
+
"completion_status": self.get_completion_status(), #151
|
| 152 |
+
"timestamp": datetime.now().isoformat() #152
|
| 153 |
+
} #153
|
| 154 |
+
def _update_conversation_state(self, ai_message: str) -> None: #154
|
| 155 |
+
"""Update the conversation state based on AI response.""" #155
|
| 156 |
+
try: #156
|
| 157 |
+
# Create analysis prompt #157
|
| 158 |
+
analysis_prompt = """ #158
|
| 159 |
+
Review our conversation and identify: #159
|
| 160 |
+
1. What topics or aspects of their journey were discussed? #160
|
| 161 |
+
2. What areas need more exploration? #161
|
| 162 |
+
3. What's the current focus of discussion? #162
|
| 163 |
+
|
| 164 |
+
Response format:
|
| 165 |
+
{
|
| 166 |
+
"topics_discussed": [],
|
| 167 |
+
"areas_needing_exploration": [],
|
| 168 |
+
"current_focus": "",
|
| 169 |
+
"completion_estimate": 0.0
|
| 170 |
+
}
|
| 171 |
+
""" #163
|
| 172 |
+
|
| 173 |
+
# Get analysis from AI #164
|
| 174 |
+
response = self.client.chat.completions.create( #165
|
| 175 |
+
model="gpt-4", #166
|
| 176 |
+
messages=[ #167
|
| 177 |
+
{"role": "system", "content": SYSTEM_PROMPT}, #168
|
| 178 |
+
*self.conversation_history, #169
|
| 179 |
+
{"role": "user", "content": analysis_prompt} #170
|
| 180 |
+
], #171
|
| 181 |
+
temperature=0.3 #172
|
| 182 |
+
) #173
|
| 183 |
+
|
| 184 |
+
# Process analysis #174
|
| 185 |
+
try: #175
|
| 186 |
+
analysis = json.loads(response.choices[0].message.content) #176
|
| 187 |
+
|
| 188 |
+
# Update state based on analysis #177
|
| 189 |
+
self.state.sections_completed = analysis.get("topics_discussed", []) #178
|
| 190 |
+
self.state.sections_partial = analysis.get("areas_needing_exploration", []) #179
|
| 191 |
+
self.state.current_section = analysis.get("current_focus") #180
|
| 192 |
+
self.state.completion_percentage = analysis.get("completion_estimate", 0.0) #181
|
| 193 |
+
|
| 194 |
+
except json.JSONDecodeError as e: #182
|
| 195 |
+
logger.error(f"Error parsing analysis JSON: {str(e)}") #183
|
| 196 |
+
# Set default values on error #184
|
| 197 |
+
self.state.completion_percentage = max( #185
|
| 198 |
+
self.state.completion_percentage, #186
|
| 199 |
+
len(self.conversation_history) * 5.0 # Rough estimate based on message count #187
|
| 200 |
+
) #188
|
| 201 |
+
|
| 202 |
+
except Exception as e: #189
|
| 203 |
+
logger.error(f"Error updating conversation state: {str(e)}") #190
|
| 204 |
+
# State remains unchanged on error #191
|
| 205 |
+
|
| 206 |
+
def get_completion_status(self) -> Dict[str, Any]: #192
|
| 207 |
+
"""Get current completion status with rich context.""" #193
|
| 208 |
+
status = { #194
|
| 209 |
+
"completion_percentage": self.state.completion_percentage, #195
|
| 210 |
+
"topics_covered": self.state.sections_completed, #196
|
| 211 |
+
"topics_in_progress": self.state.sections_partial, #197
|
| 212 |
+
"current_focus": self.state.current_section, #198
|
| 213 |
+
"conversation_length": len(self.conversation_history), #199
|
| 214 |
+
"last_update": datetime.now().isoformat(), #200
|
| 215 |
+
"needs_attention": [ #201
|
| 216 |
+
topic for topic in self.state.sections_partial #202
|
| 217 |
+
if topic not in self.state.sections_completed #203
|
| 218 |
+
], #204
|
| 219 |
+
"status_summary": self._generate_status_summary() #205
|
| 220 |
+
} #206
|
| 221 |
+
|
| 222 |
+
if self.state.last_error: #207
|
| 223 |
+
status["last_error"] = self.state.last_error #208
|
| 224 |
+
|
| 225 |
+
return status #209
|
| 226 |
+
|
| 227 |
+
def _generate_status_summary(self) -> str: #210
|
| 228 |
+
"""Generate a human-readable summary of the conversation status.""" #211
|
| 229 |
+
if not self.conversation_history: #212
|
| 230 |
+
return "Ready to start the conversation." #213
|
| 231 |
+
|
| 232 |
+
summary_parts = [] #214
|
| 233 |
+
|
| 234 |
+
# Add completion status #215
|
| 235 |
+
if self.state.completion_percentage > 0: #216
|
| 236 |
+
summary_parts.append( #217
|
| 237 |
+
f"Conversation is approximately {self.state.completion_percentage:.1f}% complete" #218
|
| 238 |
+
) #219
|
| 239 |
+
|
| 240 |
+
# Add covered topics #220
|
| 241 |
+
if self.state.sections_completed: #221
|
| 242 |
+
topics = ", ".join(self.state.sections_completed) #222
|
| 243 |
+
summary_parts.append(f"We've discussed: {topics}") #223
|
| 244 |
+
|
| 245 |
+
# Add current focus #224
|
| 246 |
+
if self.state.current_section: #225
|
| 247 |
+
summary_parts.append( #226
|
| 248 |
+
f"Currently focusing on: {self.state.current_section}" #227
|
| 249 |
+
) #228
|
| 250 |
+
|
| 251 |
+
# Add next steps if any #229
|
| 252 |
+
if self.state.sections_partial: #230
|
| 253 |
+
topics = ", ".join(self.state.sections_partial) #231
|
| 254 |
+
summary_parts.append(f"Topics to explore further: {topics}") #232
|
| 255 |
+
|
| 256 |
+
return " | ".join(summary_parts) if summary_parts else "Conversation in progress." #233
|
| 257 |
+
|
| 258 |
+
def generate_json(self, api_key: str) -> Tuple[Optional[str], str]: #234
|
| 259 |
+
"""Generate a JSON profile from the conversation history.""" #235
|
| 260 |
+
try: #236
|
| 261 |
+
if not self.client: #237
|
| 262 |
+
self.client = OpenAI(api_key=api_key) #238
|
| 263 |
+
|
| 264 |
+
# Analysis prompt focused on understanding the conversation #239
|
| 265 |
+
analysis_prompt = """ #240
|
| 266 |
+
Review our conversation and create a JSON structure that captures the person's journey. #241
|
| 267 |
+
Focus on what was actually discussed, not fitting into predetermined categories. #242
|
| 268 |
+
Include: #243
|
| 269 |
+
1. Any experiences or achievements shared #244
|
| 270 |
+
2. Skills or competencies demonstrated #245
|
| 271 |
+
3. Timeline or progression points mentioned #246
|
| 272 |
+
4. Notable metrics or outcomes #247
|
| 273 |
+
5. Personal growth or learning moments #248
|
| 274 |
+
|
| 275 |
+
Structure the JSON naturally around the topics they shared. #249
|
| 276 |
+
""" #250
|
| 277 |
+
# Get initial analysis of conversation content #251
|
| 278 |
+
analysis_response = self.client.chat.completions.create( #252
|
| 279 |
+
model="gpt-4", #253
|
| 280 |
+
messages=[ #254
|
| 281 |
+
{"role": "system", "content": SYSTEM_PROMPT}, #255
|
| 282 |
+
*self.conversation_history, #256
|
| 283 |
+
{"role": "user", "content": analysis_prompt} #257
|
| 284 |
+
], #258
|
| 285 |
+
temperature=0.7 #259
|
| 286 |
+
) #260
|
| 287 |
+
|
| 288 |
+
# Parse the initial analysis #261
|
| 289 |
+
analysis = json.loads(analysis_response.choices[0].message.content) #262
|
| 290 |
+
|
| 291 |
+
# Generate structured profile based on analysis #263
|
| 292 |
+
profile_prompt = f""" #264
|
| 293 |
+
Based on our conversation, create a detailed profile JSON. #265
|
| 294 |
+
Use this analysis as a guide: {json.dumps(analysis, indent=2)} #266
|
| 295 |
+
|
| 296 |
+
Important guidelines: #267
|
| 297 |
+
- Create sections based on what was actually discussed #268
|
| 298 |
+
- Include both quantitative and qualitative information #269
|
| 299 |
+
- Preserve the context and significance of experiences #270
|
| 300 |
+
- Maintain natural flow and connections between topics #271
|
| 301 |
+
- Use descriptive section names that reflect the conversation #272
|
| 302 |
+
""" #273
|
| 303 |
+
|
| 304 |
+
# Generate the profile JSON #274
|
| 305 |
+
profile_response = self.client.chat.completions.create( #275
|
| 306 |
+
model="gpt-4", #276
|
| 307 |
+
messages=[ #277
|
| 308 |
+
{"role": "system", "content": SYSTEM_PROMPT}, #278
|
| 309 |
+
*self.conversation_history, #279
|
| 310 |
+
{"role": "user", "content": profile_prompt} #280
|
| 311 |
+
], #281
|
| 312 |
+
temperature=0.5 #282
|
| 313 |
+
) #283
|
| 314 |
+
|
| 315 |
+
# Parse and clean the profile data #284
|
| 316 |
+
profile_data = json.loads(profile_response.choices[0].message.content) #285
|
| 317 |
+
|
| 318 |
+
# Clean the data #286
|
| 319 |
+
def clean_dict(d): #287
|
| 320 |
+
if isinstance(d, dict): #288
|
| 321 |
+
return {k: clean_dict(v) for k, v in d.items() #289
|
| 322 |
+
if v not in (None, "", [], {}, "N/A", "None")} #290
|
| 323 |
+
if isinstance(d, list): #291
|
| 324 |
+
return [clean_dict(item) for item in d #292
|
| 325 |
+
if item not in (None, "", [], {}, "N/A", "None")] #293
|
| 326 |
+
return d #294
|
| 327 |
+
|
| 328 |
+
profile_data = clean_dict(profile_data) #295
|
| 329 |
+
|
| 330 |
+
# Add metadata #296
|
| 331 |
+
profile_data["metadata"] = { #297
|
| 332 |
"generated_at": datetime.now().isoformat(), #298
|
| 333 |
"version": "2.0", #299
|
| 334 |
+
"generation_metrics": { #300
|
| 335 |
+
"conversation_length": len(self.conversation_history), #301
|
| 336 |
+
"topics_covered": self.state.sections_completed, #302
|
| 337 |
+
"completion_percentage": self.state.completion_percentage #303
|
| 338 |
+
} #304
|
| 339 |
+
} #305
|
| 340 |
+
|
| 341 |
+
# Save to file #306
|
| 342 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") #307
|
| 343 |
+
filename = f"career_education_profile_{timestamp}.json" #308
|
| 344 |
+
|
| 345 |
+
try: #309
|
| 346 |
+
with open(filename, 'w', encoding='utf-8') as f: #310
|
| 347 |
+
json.dump(profile_data, f, indent=2, ensure_ascii=False) #311
|
| 348 |
+
return (filename, json.dumps(profile_data, indent=2, ensure_ascii=False)) #312
|
| 349 |
+
except Exception as e: #313
|
| 350 |
+
logger.error(f"Error saving profile to file: {str(e)}") #314
|
| 351 |
+
return (None, json.dumps(profile_data, indent=2, ensure_ascii=False)) #315
|
| 352 |
+
|
| 353 |
+
except Exception as e: #316
|
| 354 |
+
error_msg = f"Error generating profile: {str(e)}" #317
|
| 355 |
+
logger.error(error_msg) #318
|
| 356 |
+
error_json = { #319
|
| 357 |
+
"error": error_msg, #320
|
| 358 |
+
"metadata": { #321
|
| 359 |
+
"generated_at": datetime.now().isoformat(), #322
|
| 360 |
+
"error_occurred": True #323
|
| 361 |
+
} #324
|
| 362 |
+
} #325
|
| 363 |
+
return (None, json.dumps(error_json, indent=2)) #326
|
| 364 |
+
|
| 365 |
+
def create_education_career_interface(): #327
|
| 366 |
+
"""Create Gradio interface for the education and career collector.""" #328
|
| 367 |
+
collector = EducationCareerCollector() #329
|
| 368 |
+
|
| 369 |
+
css = """ #330
|
| 370 |
+
.message { font-size: 16px; margin: 8px 0; } #331
|
| 371 |
+
.system-message { color: #444; font-style: italic; } #332
|
| 372 |
+
.user-message { color: #000; font-weight: 500; } #333
|
| 373 |
+
.alert { #334
|
| 374 |
+
padding: 12px; #335
|
| 375 |
+
margin: 8px 0; #336
|
| 376 |
+
border-radius: 4px; #337
|
| 377 |
+
} #338
|
| 378 |
+
.alert-info { #339
|
| 379 |
+
background-color: #e8f4f8; #340
|
| 380 |
+
border-left: 4px solid #4a90e2; #341
|
| 381 |
+
} #342
|
| 382 |
+
.alert-error { #343
|
| 383 |
+
background-color: #fde8e8; #344
|
| 384 |
+
border-left: 4px solid #f56565; #345
|
| 385 |
+
} #346
|
| 386 |
+
.alert-success { #347
|
| 387 |
+
background-color: #e8f8e8; #348
|
| 388 |
+
border-left: 4px solid #48bb78; #349
|
| 389 |
+
} #350
|
| 390 |
+
""" #351
|
| 391 |
+
|
| 392 |
+
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo: #352
|
| 393 |
+
gr.Markdown(""" #353
|
| 394 |
+
# π LOSS DOG - Profile Builder #354
|
| 395 |
+
|
| 396 |
+
Share your career and education journey naturally. #355
|
| 397 |
+
Tell your story in your own way - we'll capture what matters to you. #356
|
| 398 |
+
""") #357
|
| 399 |
+
|
| 400 |
+
with gr.Row(): #358
|
| 401 |
+
with gr.Column(scale=2): #359
|
| 402 |
+
# API Key Input #360
|
| 403 |
+
api_key = gr.Textbox( #361
|
| 404 |
+
label="OpenAI API Key", #362
|
| 405 |
+
type="password", #363
|
| 406 |
+
placeholder="Enter your OpenAI API key (sk-...)", #364
|
| 407 |
+
info="Your API key from platform.openai.com" #365
|
| 408 |
+
) #366
|
| 409 |
+
|
| 410 |
+
# Status Messages #367
|
| 411 |
+
status_msg = gr.Markdown( #368
|
| 412 |
+
"Ready to start! Share your journey...", #369
|
| 413 |
+
elem_classes=["alert", "alert-info"] #370
|
| 414 |
+
) #371
|
| 415 |
+
|
| 416 |
+
# Chat Interface #372
|
| 417 |
+
chatbot = gr.Chatbot( #373
|
| 418 |
+
height=400, #374
|
| 419 |
+
show_label=False, #375
|
| 420 |
+
elem_classes=["message"] #376
|
| 421 |
+
) #377
|
| 422 |
+
|
| 423 |
+
# Message Input #378
|
| 424 |
+
with gr.Row(): #379
|
| 425 |
+
msg = gr.Textbox( #380
|
| 426 |
+
label="Your message", #381
|
| 427 |
+
placeholder="Tell me about your journey...", #382
|
| 428 |
+
show_label=False, #383
|
| 429 |
+
scale=4 #384
|
| 430 |
+
) #385
|
| 431 |
+
submit = gr.Button("Send", variant="primary", scale=1) #386
|
| 432 |
+
|
| 433 |
+
# Action Buttons #387
|
| 434 |
+
with gr.Row(): #388
|
| 435 |
+
clear = gr.Button("ποΈ Clear Chat", scale=1) #389
|
| 436 |
+
generate = gr.Button("π Generate Profile", scale=2) #390
|
| 437 |
+
with gr.Column(scale=1): #391
|
| 438 |
+
# Progress Information #392
|
| 439 |
+
progress_info = gr.Markdown( #393
|
| 440 |
+
"### Profile Progress\nStart sharing your story!", #394
|
| 441 |
+
elem_classes=["alert", "alert-info"] #395
|
| 442 |
+
) #396
|
| 443 |
+
|
| 444 |
+
# Profile Preview #397
|
| 445 |
+
with gr.Tab("Preview"): #398
|
| 446 |
+
json_preview = gr.JSON( #399
|
| 447 |
+
label="Profile Preview", #400
|
| 448 |
+
show_label=True #401
|
| 449 |
+
) #402
|
| 450 |
+
|
| 451 |
+
# Download Section #403
|
| 452 |
+
with gr.Tab("Download"): #404
|
| 453 |
+
output_file = gr.File( #405
|
| 454 |
+
label="Download Profile" #406
|
| 455 |
+
) #407
|
| 456 |
+
|
| 457 |
+
# Tips and Guidelines #408
|
| 458 |
+
with gr.Accordion("π‘ Tips", open=False): #409
|
| 459 |
+
gr.Markdown(""" #410
|
| 460 |
+
### Share Your Story Naturally #411
|
| 461 |
+
|
| 462 |
+
- Tell us about experiences that matter to you #412
|
| 463 |
+
- Include both achievements and challenges #413
|
| 464 |
+
- Share numbers when they're meaningful #414
|
| 465 |
+
- Describe your growth and learning #415
|
| 466 |
+
- Talk about what makes your journey unique #416
|
| 467 |
+
""") #417
|
| 468 |
+
|
| 469 |
+
def process_message(message: str, history: list, key: str) -> tuple: #418
|
| 470 |
+
"""Process user message and update interface.""" #419
|
| 471 |
+
if not message.strip(): #420
|
| 472 |
+
return history, "Please enter a message." #421
|
| 473 |
+
|
| 474 |
+
try: #422
|
| 475 |
+
# Process the message #423
|
| 476 |
+
result = collector.process_message(message, key) #424
|
| 477 |
+
|
| 478 |
+
# Update chat history #425
|
| 479 |
+
history.append((message, result["content"])) #426
|
| 480 |
+
|
| 481 |
+
# Generate status message #427
|
| 482 |
+
status = f"""Progress: {result['completion_status']['completion_percentage']:.1f}% #428
|
| 483 |
+
| Topics covered: {len(result['completion_status']['topics_covered'])}""" #429
|
| 484 |
+
|
| 485 |
+
return history, status #430
|
| 486 |
+
|
| 487 |
+
except Exception as e: #431
|
| 488 |
+
error_msg = f"Error: {str(e)}" #432
|
| 489 |
+
logger.error(error_msg) #433
|
| 490 |
+
return history, error_msg #434
|
| 491 |
+
|
| 492 |
+
def generate_profile(key: str) -> tuple: #435
|
| 493 |
+
"""Generate and return profile JSON.""" #436
|
| 494 |
+
try: #437
|
| 495 |
+
filename, json_content = collector.generate_json(key) #438
|
| 496 |
+
if filename: #439
|
| 497 |
+
return ( #440
|
| 498 |
+
filename, #441
|
| 499 |
+
json.loads(json_content), #442
|
| 500 |
+
"Profile generated successfully! π" #443
|
| 501 |
+
) #444
|
| 502 |
+
return ( #445
|
| 503 |
+
None, #446
|
| 504 |
+
json.loads(json_content), #447
|
| 505 |
+
"Profile generated but couldn't save file." #448
|
| 506 |
+
) #449
|
| 507 |
+
except Exception as e: #450
|
| 508 |
+
error_msg = f"Error generating profile: {str(e)}" #451
|
| 509 |
+
logger.error(error_msg) #452
|
| 510 |
+
return None, {"error": error_msg}, error_msg #453
|
| 511 |
+
|
| 512 |
+
def clear_interface() -> tuple: #454
|
| 513 |
+
"""Reset the interface state.""" #455
|
| 514 |
+
return ( #456
|
| 515 |
+
[], # Clear chat history #457
|
| 516 |
+
"Ready to start! Share your journey...", # Reset status #458
|
| 517 |
+
"### Profile Progress\nStart sharing your story!", # Reset progress #459
|
| 518 |
+
None, # Clear JSON preview #460
|
| 519 |
+
None # Clear file output #461
|
| 520 |
+
) #462
|
| 521 |
+
|
| 522 |
+
def update_progress(history: list) -> str: #463
|
| 523 |
+
"""Update progress information based on conversation.""" #464
|
| 524 |
+
if not history: #465
|
| 525 |
+
return "### Profile Progress\nStart sharing your story!" #466
|
| 526 |
+
|
| 527 |
+
# Get completion status #467
|
| 528 |
+
status = collector.get_completion_status() #468
|
| 529 |
+
|
| 530 |
+
# Format progress message #469
|
| 531 |
+
progress_md = f"""### Profile Progress: {status['completion_percentage']:.1f}%\n\n""" #470
|
| 532 |
+
|
| 533 |
+
if status['topics_covered']: #471
|
| 534 |
+
progress_md += "β
**Discussed:**\n" #472
|
| 535 |
+
for topic in status['topics_covered']: #473
|
| 536 |
+
progress_md += f"- {topic}\n" #474
|
| 537 |
+
|
| 538 |
+
if status['topics_in_progress']: #475
|
| 539 |
+
progress_md += "\nπ **Currently exploring:**\n" #476
|
| 540 |
+
for topic in status['topics_in_progress']: #477
|
| 541 |
+
progress_md += f"- {topic}\n" #478
|
| 542 |
+
|
| 543 |
+
if status.get('needs_attention'): #479
|
| 544 |
+
progress_md += "\nβ **Consider discussing:**\n" #480
|
| 545 |
+
for topic in status['needs_attention']: #481
|
| 546 |
+
progress_md += f"- {topic}\n" #482
|
| 547 |
+
|
| 548 |
+
return progress_md #483
|
| 549 |
+
|
| 550 |
+
# Event Handlers #484
|
| 551 |
+
msg.submit( #485
|
| 552 |
+
process_message, #486
|
| 553 |
+
[msg, chatbot, api_key], #487
|
| 554 |
+
[chatbot, status_msg] #488
|
| 555 |
+
).then( #489
|
| 556 |
+
update_progress, #490
|
| 557 |
+
chatbot, #491
|
| 558 |
+
progress_info #492
|
| 559 |
+
).then( #493
|
| 560 |
+
lambda: "", #494
|
| 561 |
+
None, #495
|
| 562 |
+
msg #496
|
| 563 |
+
) #497
|
| 564 |
+
|
| 565 |
+
submit.click( #498
|
| 566 |
+
process_message, #499
|
| 567 |
+
[msg, chatbot, api_key], #500
|
| 568 |
+
[msg, chatbot, api_key], #501
|
| 569 |
+
[chatbot, status_msg] #502
|
| 570 |
+
).then( #503
|
| 571 |
+
update_progress, #504
|
| 572 |
+
chatbot, #505
|
| 573 |
+
progress_info #506
|
| 574 |
+
).then( #507
|
| 575 |
+
lambda: "", #508
|
| 576 |
+
None, #509
|
| 577 |
+
msg #510
|
| 578 |
+
) #511
|
| 579 |
+
|
| 580 |
+
generate.click( #512
|
| 581 |
+
generate_profile, #513
|
| 582 |
+
[api_key], #514
|
| 583 |
+
[output_file, json_preview, status_msg] #515
|
| 584 |
+
) #516
|
| 585 |
+
|
| 586 |
+
clear.click( #517
|
| 587 |
+
clear_interface, #518
|
| 588 |
+
None, #519
|
| 589 |
+
[chatbot, status_msg, progress_info, json_preview, output_file] #520
|
| 590 |
+
) #521
|
| 591 |
+
|
| 592 |
+
return demo #522
|
| 593 |
+
|
| 594 |
+
if __name__ == "__main__": #523
|
| 595 |
+
demo = create_education_career_interface() #524
|
| 596 |
+
demo.launch( #525
|
| 597 |
+
server_name="0.0.0.0", #526
|
| 598 |
+
server_port=7860, #527
|
| 599 |
+
share=True, #528
|
| 600 |
+
enable_queue=True, #529
|
| 601 |
+
show_error=True #530
|
| 602 |
+
) #531
|