geethareddy commited on
Commit
3ff85be
·
verified ·
1 Parent(s): e30f42d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -452
app.py CHANGED
@@ -124,442 +124,6 @@ For severe distress:
124
  """
125
  context = [base_info, mental_health, medical_assistance, medicine_recommendation, decision_guidance, emergency_help]
126
 
127
- def encrypt_data(data):
128
- try:
129
- return cipher.encrypt(data.encode('utf-8')).decode('utf-8')
130
- except Exception as e:
131
- logger.error(f"Encryption failed: {str(e)}")
132
- return data
133
-
134
- def decrypt_data(encrypted_data):
135
- try:
136
- return cipher.decrypt(encrypted_data.encode('utf-8')).decode('utf-8')
137
- except Exception as e:
138
- logger.error(f"Decryption failed: {str(e)}")
139
- return encrypted_data
140
-
141
- @lru_cache(maxsize=100)
142
- def cached_transcribe(audio_file, language):
143
- audio, sr = librosa.load(audio_file, sr=16000)
144
- language_code = {"English": "en", "Hindi": "hi", "Spanish": "es", "Mandarin": "zh"}.get(language, "en")
145
- return transcribe_audio(audio, language_code)
146
-
147
- def extract_health_features(audio, sr):
148
- try:
149
- audio = librosa.util.normalize(audio)
150
- frame_duration = 30
151
- frame_samples = int(sr * frame_duration / 1000)
152
- frames = [audio[i:i + frame_samples] for i in range(0, len(audio), frame_samples)]
153
- voiced_frames = [frame for frame in frames if len(frame) == frame_samples and vad.is_speech((frame * 32768).astype(np.int16).tobytes(), sr)]
154
- if not voiced_frames:
155
- raise ValueError("No voiced segments detected")
156
- voiced_audio = np.concatenate(voiced_frames)
157
-
158
- frame_step = max(1, len(voiced_audio) // (sr // 16)) # Increased step for faster processing
159
- pitches, magnitudes = librosa.piptrack(y=voiced_audio[::frame_step], sr=sr, fmin=75, fmax=300)
160
- valid_pitches = [p for p in pitches[magnitudes > 0] if 75 <= p <= 300]
161
- pitch = np.mean(valid_pitches) if valid_pitches else 0
162
- jitter = np.std(valid_pitches) / pitch if pitch and valid_pitches else 0
163
- jitter = min(jitter, 10)
164
- amplitudes = librosa.feature.rms(y=voiced_audio, frame_length=512, hop_length=256)[0] # Increased hop_length
165
- shimmer = np.std(amplitudes) / np.mean(amplitudes) if np.mean(amplitudes) else 0
166
- shimmer = min(shimmer, 10)
167
- energy = np.mean(amplitudes)
168
-
169
- mfcc = np.mean(librosa.feature.mfcc(y=voiced_audio[::8], sr=sr, n_mfcc=4), axis=1) # Further reduced sampling
170
- spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=voiced_audio[::8], sr=sr, n_fft=512, hop_length=256))
171
-
172
- logger.debug(f"Extracted features: pitch={pitch:.2f}, jitter={jitter*100:.2f}%, shimmer={shimmer*100:.2f}%, energy={energy:.4f}, mfcc_mean={np.mean(mfcc):.2f}, spectral_centroid={spectral_centroid:.2f}")
173
- return {
174
- "pitch": pitch,
175
- "jitter": jitter * 100,
176
- "shimmer": shimmer * 100,
177
- "energy": energy,
178
- "mfcc_mean": np.mean(mfcc),
179
- "spectral_centroid": spectral_centroid
180
- }
181
- except Exception as e:
182
- logger.error(f"Feature extraction failed: {str(e)}")
183
- raise
184
-
185
- def transcribe_audio(audio, language="en"):
186
- try:
187
- whisper_model.config.forced_decoder_ids = whisper_processor.get_decoder_prompt_ids(
188
- language=SUPPORTED_LANGUAGES.get({"en": "English", "hi": "Hindi", "es": "Spanish", "zh": "Mandarin"}.get(language, "English"), "english"), task="transcribe"
189
- )
190
- inputs = whisper_processor(audio, sampling_rate=16000, return_tensors="pt")
191
- with torch.no_grad():
192
- generated_ids = whisper_model.generate(inputs["input_features"], max_new_tokens=20) # Further reduced tokens
193
- transcription = whisper_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
194
- logger.info(f"Transcription (language: {language}): {transcription}")
195
- return transcription
196
- except Exception as e:
197
- logger.error(f"Transcription failed: {str(e)}")
198
- return None
199
-
200
- async def get_chatbot_response(message, language="en", retries=2, timeout=10):
201
- if not message:
202
- return "No input provided. Please describe your symptoms or concerns.", None
203
- if not chat:
204
- logger.warning("Gemini chat object is None, attempting to reinitialize")
205
- global chat
206
- chat = initialize_gemini()
207
- if not chat:
208
- return "Error: Unable to connect to Gemini API. Please check API key.", None
209
-
210
- language_code = {"English": "en", "Hindi": "hi", "Spanish": "es", "Mandarin": "zh"}.get(language, "en")
211
- full_context = "\n".join(context) + f"\nUser: {message}\nMindCare: Provide response in 6-8 simple bullet points, tailored to the user's input, in a clear and empathetic tone."
212
-
213
- for attempt in range(retries + 1):
214
- try:
215
- async with asyncio.timeout(timeout):
216
- response = await asyncio.get_event_loop().run_in_executor(None, lambda: chat.send_message(full_context).text)
217
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
218
- tts = gTTS(text=response, lang=language_code, slow=False)
219
- tts.save(temp_audio.name)
220
- audio_path = temp_audio.name
221
- logger.info(f"Generated response: {response[:100]}... and audio at {audio_path}")
222
- return response, audio_path
223
- except asyncio.TimeoutError:
224
- logger.error(f"Chatbot response timed out on attempt {attempt + 1}")
225
- if attempt == retries:
226
- return "Error: Response generation timed out.", None
227
- except Exception as e:
228
- logger.error(f"Chatbot response failed on attempt {attempt + 1}: {str(e)}")
229
- if attempt == retries:
230
- return f"Error generating response: {str(e)}", None
231
- await asyncio.sleep(1) # Brief delay between retries
232
- return "Error: Unable to generate response after retries.", None
233
-
234
- async def translate_text(text, target_language, retries=2, timeout=10):
235
- if not chat or not text:
236
- return text
237
- if target_language == "English":
238
- return text
239
- language_code = {"Hindi": "hi", "Spanish": "es", "Mandarin": "zh"}.get(target_language, "en")
240
- prompt = f"Translate the following text into {target_language} while preserving formatting (e.g., bullet points, newlines):\n\n{text}"
241
-
242
- for attempt in range(retries + 1):
243
- try:
244
- async with asyncio.timeout(timeout):
245
- response = await asyncio.get_event_loop().run_in_executor(None, lambda: chat.send_message(prompt).text)
246
- logger.info(f"Translated text to {target_language}: {response[:100]}...")
247
- return response
248
- except asyncio.TimeoutError:
249
- logger.error(f"Translation to {target_language} timed out on attempt {attempt + 1}")
250
- if attempt == retries:
251
- return text
252
- except Exception as e:
253
- logger.error(f"Translation to {target_language} failed on attempt {attempt + 1}: {str(e)}")
254
- if attempt == retries:
255
- return text
256
- await asyncio.sleep(1)
257
- return text
258
-
259
- async def analyze_symptoms(text, features, language="English"):
260
- feedback = []
261
- suggestions = []
262
- text = text.lower() if text else ""
263
-
264
- # Generate health assessment feedback
265
- if "cough" in text or "coughing" in text:
266
- feedback.append("You mentioned a cough, which may suggest a cold or respiratory issue.")
267
- suggestions.extend([
268
- "• Drink warm fluids like herbal tea or water to soothe your throat.",
269
- "• Rest to help your body recover from possible infection.",
270
- "• Use a humidifier to ease throat irritation.",
271
- "• Consider over-the-counter cough remedies, but consult a doctor first.",
272
- "• Monitor symptoms; see a doctor if the cough lasts over a week."
273
- ])
274
- elif "fever" in text or "temperature" in text:
275
- feedback.append("You mentioned a fever, which could indicate an infection.")
276
- suggestions.extend([
277
- "• Stay hydrated with water or electrolyte drinks.",
278
- "• Rest to support your immune system.",
279
- "• Monitor your temperature regularly.",
280
- "• Use paracetamol to reduce fever, but follow dosage instructions.",
281
- "• Seek medical advice if fever exceeds 100.4°F (38°C) for over 2 days."
282
- ])
283
- elif "headache" in text:
284
- feedback.append("You mentioned a headache, possibly due to stress or dehydration.")
285
- suggestions.extend([
286
- "• Drink plenty of water to stay hydrated.",
287
- "• Take short breaks to relax your mind.",
288
- "• Try a mild pain reliever like ibuprofen, but consult a doctor.",
289
- "• Practice deep breathing to reduce tension.",
290
- "• Ensure you're getting enough sleep (7-8 hours)."
291
- ])
292
- elif "stress" in text or "anxious" in text or "mental stress" in text:
293
- feedback.append("You mentioned stress or anxiety, which can affect well-being.")
294
- suggestions.extend([
295
- "• Try 5 minutes of deep breathing to calm your mind.",
296
- "• Write in a journal to process your thoughts.",
297
- "• Take a short walk in nature to relax.",
298
- "• Practice mindfulness or meditation daily.",
299
- "• Talk to a trusted friend or professional for support.",
300
- "• Prioritize sleep and avoid excessive caffeine."
301
- ])
302
- elif "respiratory" in text or "breathing" in text or "shortness of breath" in text:
303
- feedback.append("You mentioned breathing issues, which may indicate asthma or infection.")
304
- suggestions.extend([
305
- "• Avoid triggers like smoke or allergens.",
306
- "• Practice slow, deep breathing exercises.",
307
- "• Stay in a well-ventilated area.",
308
- "• Monitor symptoms and seek medical help if severe.",
309
- "• Rest to reduce strain on your respiratory system."
310
- ])
311
- elif "cold" in text:
312
- feedback.append("You mentioned a cold, likely a viral infection.")
313
- suggestions.extend([
314
- "• Drink warm fluids like soup or tea.",
315
- "• Rest to help your body fight the virus.",
316
- "• Use saline nasal spray to relieve congestion.",
317
- "• Take over-the-counter cold remedies, but consult a doctor.",
318
- "• Stay hydrated and avoid strenuous activity."
319
- ])
320
-
321
- # Voice feature-based feedback and suggestions
322
- if features["jitter"] > 6.5:
323
- feedback.append(f"High jitter ({features['jitter']:.2f}%) suggests vocal strain or respiratory issues.")
324
- suggestions.append("• Rest your voice and avoid shouting.")
325
- elif features["jitter"] > 4.0:
326
- feedback.append(f"Moderate jitter ({features['jitter']:.2f}%) indicates possible vocal instability.")
327
- suggestions.append("• Sip warm water to soothe your vocal cords.")
328
-
329
- if features["shimmer"] > 7.5:
330
- feedback.append(f"High shimmer ({features['shimmer']:.2f}%) may indicate emotional stress.")
331
- suggestions.append("• Try relaxation techniques like yoga or meditation.")
332
- elif features["shimmer"] > 5.0:
333
- feedback.append(f"Moderate shimmer ({features['shimmer']:.2f}%) suggests mild vocal strain.")
334
- suggestions.append("• Stay hydrated to support vocal health.")
335
-
336
- if features["energy"] < 0.003:
337
- feedback.append(f"Low vocal energy ({features['energy']:.4f}) may indicate fatigue.")
338
- suggestions.append("• Ensure 7-8 hours of sleep nightly.")
339
- elif features["energy"] < 0.007:
340
- feedback.append(f"Low vocal energy ({features['energy']:.4f}) suggests possible tiredness.")
341
- suggestions.append("• Take short naps to boost energy.")
342
-
343
- if features["pitch"] < 70 or features["pitch"] > 290:
344
- feedback.append(f"Unusual pitch ({features['pitch']:.2f} Hz) may indicate vocal issues.")
345
- suggestions.append("• Consult a doctor for a vocal health check.")
346
- elif 70 <= features["pitch"] <= 90 or 270 <= features["pitch"] <= 290:
347
- feedback.append(f"Pitch ({features['pitch']:.2f} Hz) is slightly outside typical range.")
348
- suggestions.append("• Avoid straining your voice during conversations.")
349
-
350
- if features["spectral_centroid"] > 2700:
351
- feedback.append(f"High spectral centroid ({features['spectral_centroid']:.2f} Hz) suggests tense speech.")
352
- suggestions.append("• Practice slow, calm speaking to reduce tension.")
353
- elif features["spectral_centroid"] > 2200:
354
- feedback.append(f"Elevated spectral centroid ({features['spectral_centroid']:.2f} Hz) may indicate mild tension.")
355
- suggestions.append("• Relax your jaw and shoulders while speaking.")
356
-
357
- if not feedback:
358
- feedback.append("No significant health concerns detected from voice or text analysis.")
359
- suggestions.extend([
360
- "• Maintain a balanced diet with fruits and vegetables.",
361
- "• Exercise regularly for overall health.",
362
- "• Stay hydrated with 8 glasses of water daily.",
363
- "• Get 7-8 hours of sleep each night.",
364
- "• Practice stress-relief techniques like meditation.",
365
- "• Schedule regular health check-ups."
366
- ])
367
-
368
- # Ensure suggestions are limited to 6-8 unique items
369
- suggestions = list(dict.fromkeys(suggestions))[:8]
370
- if len(suggestions) < 6:
371
- suggestions.extend([
372
- "• Stay active with light exercise like walking.",
373
- "• Practice gratitude to boost mental well-being."
374
- ][:6 - len(suggestions)])
375
-
376
- # Translate feedback and suggestions to the selected language
377
- feedback_text = "\n".join(feedback)
378
- suggestions_text = "\n".join(suggestions)
379
- try:
380
- translated_feedback = await translate_text(feedback_text, language)
381
- translated_suggestions = await translate_text(suggestions_text, language)
382
- except Exception as e:
383
- logger.error(f"Translation failed: {str(e)}")
384
- translated_feedback = feedback_text
385
- translated_suggestions = suggestions_text
386
-
387
- logger.debug(f"Generated feedback: {translated_feedback}, Suggestions: {translated_suggestions}")
388
- return translated_feedback, translated_suggestions
389
-
390
- def store_user_consent(email, language):
391
- if not sf:
392
- logger.warning("Salesforce not connected; skipping consent storage")
393
- return None
394
- try:
395
- email_to_use = email.strip() if email and email.strip() else DEFAULT_EMAIL
396
- sanitized_email = email_to_use.replace("'", "\\'").replace('"', '\\"')
397
- query = f"SELECT Id FROM HealthUser__c WHERE Email__c = '{sanitized_email}'"
398
- logger.debug(f"Executing SOQL query: {query}")
399
- user = sf.query(query)
400
- user_id = None
401
- if user["totalSize"] == 0:
402
- logger.info(f"No user found for email: {sanitized_email}, creating new user")
403
- user = sf.HealthUser__c.create({
404
- "Email__c": sanitized_email,
405
- "Language__c": SALESFORCE_LANGUAGE_MAP.get(language, "English"),
406
- "ConsentGiven__c": True
407
- })
408
- user_id = user["id"]
409
- logger.info(f"Created new user with email: {sanitized_email}, ID: {user_id}")
410
- else:
411
- user_id = user["records"][0]["Id"]
412
- logger.info(f"Found existing user with email: {sanitized_email}, ID: {user_id}")
413
- sf.HealthUser__c.update(user_id, {
414
- "Language__c": SALESFORCE_LANGUAGE_MAP.get(language, "English"),
415
- "ConsentGiven__c": True
416
- })
417
- logger.info(f"Updated user with email: {sanitized_email}")
418
- sf.ConsentLog__c.create({
419
- "HealthUser__c": user_id,
420
- "ConsentType__c": "Voice Analysis",
421
- "ConsentDate__c": datetime.utcnow().isoformat()
422
- })
423
- logger.info(f"Stored consent log for user ID: {user_id}")
424
- return user_id
425
- except Exception as e:
426
- logger.error(f"Consent storage failed: {str(e)}")
427
- logger.exception("Stack trace for consent storage failure:")
428
- return None
429
-
430
- def generate_pdf_report(feedback, transcription, features, language, email, suggestions):
431
- try:
432
- feedback = feedback.replace('<', '<').replace:
433
-
434
- System: **Updated Code with Fixes for Error and Performance**
435
-
436
- <xaiArtifact artifact_id="15a4c230-0090-4372-9c79-3b68c1f53acc" artifact_version_id="b21f972a-9b0b-4e87-bc53-ef7ca9e86329" title="main.py" contentType="text/python">
437
- import gradio as gr
438
- import librosa
439
- import numpy as np
440
- import torch
441
- from transformers import WhisperProcessor, WhisperForConditionalGeneration
442
- from simple_salesforce import Salesforce
443
- import os
444
- from datetime import datetime
445
- import logging
446
- import webrtcvad
447
- import google.generativeai as genai
448
- from gtts import gTTS
449
- import tempfile
450
- import base64
451
- import re
452
- from cryptography.fernet import Fernet
453
- import pytz
454
- from reportlab.lib.pagesizes import A4
455
- from reportlab.lib import colors
456
- from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, ListFlowable, ListItem
457
- from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
458
- from reportlab.lib.units import inch
459
- import asyncio
460
- import hashlib
461
- from functools import lru_cache
462
-
463
- # Set up logging with DEBUG level, adjusted for IST
464
- logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
465
- logger = logging.getLogger(__name__)
466
- usage_metrics = {"total_assessments": 0, "assessments_by_language": {}}
467
-
468
- # Environment variables
469
- SF_USERNAME = os.getenv("SF_USERNAME", "[email protected]")
470
- SF_PASSWORD = os.getenv("SF_PASSWORD", "voicebot1")
471
- SF_SECURITY_TOKEN = os.getenv("SF_SECURITY_TOKEN", "jq4VVHUFti6TmzJDjjegv2h6b")
472
- SF_INSTANCE_URL = os.getenv("SF_INSTANCE_URL", "https://swe42.sfdc-cehfhs.salesforce.com")
473
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "AIzaSyBzr5vVpbe8CV1v70l3pGDp9vRJ76yCxdk")
474
- ENCRYPTION_KEY = os.getenv("ENCRYPTION_KEY", Fernet.generate_key().decode())
475
- DEFAULT_EMAIL = os.getenv("SALESFORCE_USER_EMAIL", "[email protected]")
476
-
477
- # Initialize encryption
478
- cipher = Fernet(ENCRYPTION_KEY)
479
-
480
- # Initialize Salesforce
481
- try:
482
- sf = Salesforce(
483
- username=SF_USERNAME,
484
- password=SF_PASSWORD,
485
- security_token=SF_SECURITY_TOKEN,
486
- instance_url=SF_INSTANCE_URL
487
- )
488
- logger.info(f"Connected to Salesforce at {SF_INSTANCE_URL}")
489
- except Exception as e:
490
- logger.error(f"Salesforce connection failed: {str(e)}")
491
- sf = None
492
-
493
- # Initialize Google Gemini with retry logic
494
- def initialize_gemini():
495
- try:
496
- genai.configure(api_key=GEMINI_API_KEY)
497
- gemini_model = genai.GenerativeModel('gemini-1.5-flash')
498
- chat = gemini_model.start_chat(history=[])
499
- logger.info("Connected to Google Gemini")
500
- return chat
501
- except Exception as e:
502
- logger.error(f"Google Gemini initialization failed: {str(e)}")
503
- return None
504
-
505
- chat = initialize_gemini()
506
-
507
- # Load Whisper model
508
- SUPPORTED_LANGUAGES = {"English": "english", "Hindi": "hindi", "Spanish": "spanish", "Mandarin": "mandarin"}
509
- SALESFORCE_LANGUAGE_MAP = {"English": "English", "Hindi": "Hindi", "Spanish": "Spanish", "Mandarin": "Mandarin"}
510
- whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-small")
511
- whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
512
- vad = webrtcvad.Vad(mode=2)
513
-
514
- # Context for chatbot
515
- base_info = """
516
- MindCare is an AI health assistant focused on:
517
- - **Mental health**: Emotional support, mindfulness, stress-relief, anxiety management.
518
- - **Medical guidance**: Symptom analysis, possible conditions, medicine recommendations.
519
- - **Decision-making**: Personal, professional, emotional choices.
520
- - **General health**: Lifestyle, nutrition, physical and mental wellness.
521
- - **Emergency assistance**: Suggest professional help or helplines for distress.
522
- Tone: Empathetic, supportive, informative.
523
- """
524
- mental_health = """
525
- For stress/anxiety:
526
- - Suggest mindfulness, deep breathing, gratitude journaling.
527
- - Encourage breaks, hobbies, nature.
528
- - Provide affirmations, self-care routines.
529
- For distress:
530
- - Offer emotional support, assure they’re not alone.
531
- - Suggest trusted contacts or professionals.
532
- - Provide crisis helplines.
533
- """
534
- medical_assistance = """
535
- For symptoms:
536
- - Analyze and suggest possible conditions.
537
- - Offer general advice, not replacing doctor consultation.
538
- - Suggest lifestyle changes, home remedies.
539
- - Advise medical attention for severe symptoms.
540
- """
541
- medicine_recommendation = """
542
- For medicine queries:
543
- - Suggest common antibiotics (e.g., Amoxicillin), painkillers (e.g., Paracetamol, Ibuprofen).
544
- - Note precautions, side effects.
545
- - Stress doctor consultation before use.
546
- """
547
- decision_guidance = """
548
- For decisions:
549
- - Weigh pros/cons logically.
550
- - Consider values, goals, emotions.
551
- - Suggest decision matrices or intuitive checks.
552
- - Encourage trusted advice if needed.
553
- """
554
- emergency_help = """
555
- For severe distress:
556
- - Provide immediate emotional support.
557
- - Offer crisis helplines (region-specific).
558
- - Encourage talking to trusted contacts or professionals.
559
- - Assure help is available.
560
- """
561
- context = [base_info, mental_health, medical_assistance, medicine_recommendation, decision_guidance, emergency_help]
562
-
563
  def encrypt_data(data):
564
  try:
565
  return cipher.encrypt(data.encode('utf-8')).decode('utf-8')
@@ -595,18 +159,18 @@ def extract_health_features(audio, sr):
595
  raise ValueError("No voiced segments detected")
596
  voiced_audio = np.concatenate(voiced_frames)
597
 
598
- frame_step = max(1, len(voiced_audio) // (sr // 16)) # Increased step for faster processing
599
  pitches, magnitudes = librosa.piptrack(y=voiced_audio[::frame_step], sr=sr, fmin=75, fmax=300)
600
  valid_pitches = [p for p in pitches[magnitudes > 0] if 75 <= p <= 300]
601
  pitch = np.mean(valid_pitches) if valid_pitches else 0
602
  jitter = np.std(valid_pitches) / pitch if pitch and valid_pitches else 0
603
  jitter = min(jitter, 10)
604
- amplitudes = librosa.feature.rms(y=voiced_audio, frame_length=512, hop_length=256)[0] # Increased hop_length
605
  shimmer = np.std(amplitudes) / np.mean(amplitudes) if np.mean(amplitudes) else 0
606
  shimmer = min(shimmer, 10)
607
  energy = np.mean(amplitudes)
608
 
609
- mfcc = np.mean(librosa.feature.mfcc(y=voiced_audio[::8], sr=sr, n_mfcc=4), axis=1) # Further reduced sampling
610
  spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=voiced_audio[::8], sr=sr, n_fft=512, hop_length=256))
611
 
612
  logger.debug(f"Extracted features: pitch={pitch:.2f}, jitter={jitter*100:.2f}%, shimmer={shimmer*100:.2f}%, energy={energy:.4f}, mfcc_mean={np.mean(mfcc):.2f}, spectral_centroid={spectral_centroid:.2f}")
@@ -627,7 +191,7 @@ def extract_health_features(audio, sr):
627
  "energy": 0,
628
  "mfcc_mean": 0,
629
  "spectral_centroid": 0
630
- } # Return default values to prevent failure
631
 
632
  def transcribe_audio(audio, language="en"):
633
  try:
@@ -636,7 +200,7 @@ def transcribe_audio(audio, language="en"):
636
  )
637
  inputs = whisper_processor(audio, sampling_rate=16000, return_tensors="pt")
638
  with torch.no_grad():
639
- generated_ids = whisper_model.generate(inputs["input_features"], max_new_tokens=20) # Further reduced tokens
640
  transcription = whisper_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
641
  logger.info(f"Transcription (language: {language}): {transcription}")
642
  return transcription
@@ -675,7 +239,7 @@ async def get_chatbot_response(message, language="en", retries=2, timeout=10):
675
  logger.error(f"Chatbot response failed on attempt {attempt + 1}: {str(e)}")
676
  if attempt == retries:
677
  return f"Error generating response: {str(e)}", None
678
- await asyncio.sleep(1) # Brief delay between retries
679
  return "Error: Unable to generate response after retries.", None
680
 
681
  async def translate_text(text, target_language, retries=2, timeout=10):
@@ -708,7 +272,6 @@ async def analyze_symptoms(text, features, language="English"):
708
  suggestions = []
709
  text = text.lower() if text else ""
710
 
711
- # Generate health assessment feedback
712
  if "cough" in text or "coughing" in text:
713
  feedback.append("You mentioned a cough, which may suggest a cold or respiratory issue.")
714
  suggestions.extend([
@@ -765,7 +328,6 @@ async def analyze_symptoms(text, features, language="English"):
765
  "• Stay hydrated and avoid strenuous activity."
766
  ])
767
 
768
- # Voice feature-based feedback and suggestions
769
  if features["jitter"] > 6.5:
770
  feedback.append(f"High jitter ({features['jitter']:.2f}%) suggests vocal strain or respiratory issues.")
771
  suggestions.append("• Rest your voice and avoid shouting.")
@@ -812,7 +374,6 @@ async def analyze_symptoms(text, features, language="English"):
812
  "• Schedule regular health check-ups."
813
  ])
814
 
815
- # Ensure suggestions are limited to 6-8 unique items
816
  suggestions = list(dict.fromkeys(suggestions))[:8]
817
  if len(suggestions) < 6:
818
  suggestions.extend([
@@ -820,7 +381,6 @@ async def analyze_symptoms(text, features, language="English"):
820
  "• Practice gratitude to boost mental well-being."
821
  ][:6 - len(suggestions)])
822
 
823
- # Translate feedback and suggestions to the selected language
824
  feedback_text = "\n".join(feedback)
825
  suggestions_text = "\n".join(suggestions)
826
  try:
@@ -876,11 +436,11 @@ def store_user_consent(email, language):
876
 
877
  def generate_pdf_report(feedback, transcription, features, language, email, suggestions):
878
  try:
879
- feedback = feedback.replace('<', '<').replace('>', '>').replace('&', '&')
880
- transcription = transcription.replace('<', '<').replace('>', '>').replace('&', '&') if transcription else "None"
881
- suggestions = suggestions.replace('<', '<').replace('>', '>').replace('&', '&') if suggestions else "None"
882
  email_to_use = email.strip() if email and email.strip() else DEFAULT_EMAIL
883
- email = email_to_use.replace('<', '<').replace('>', '>').replace('&', '&')
884
  language_display = SALESFORCE_LANGUAGE_MAP.get(language, "English")
885
 
886
  ist = pytz.timezone('Asia/Kolkata')
@@ -935,7 +495,6 @@ def generate_pdf_report(feedback, transcription, features, language, email, sugg
935
  fontName='Times-Roman'
936
  )
937
 
938
- # Translate PDF section titles to the selected language
939
  section_titles = {
940
  "English": {
941
  "title": "MindCare Health Assistant Report",
@@ -1167,7 +726,6 @@ async def analyze_voice(audio_file=None, language="English", email=None):
1167
  feedback += f"- Email: {email if email and email.strip() else DEFAULT_EMAIL}\n"
1168
  feedback += "\n**Disclaimer**: This is a preliminary analysis. Consult a healthcare provider for professional evaluation."
1169
 
1170
- # Translate the additional feedback details to the selected language
1171
  details_to_translate = (
1172
  f"Voice Analysis Details:\n"
1173
  f"- Pitch: {features['pitch']:.2f} Hz\n"
 
124
  """
125
  context = [base_info, mental_health, medical_assistance, medicine_recommendation, decision_guidance, emergency_help]
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  def encrypt_data(data):
128
  try:
129
  return cipher.encrypt(data.encode('utf-8')).decode('utf-8')
 
159
  raise ValueError("No voiced segments detected")
160
  voiced_audio = np.concatenate(voiced_frames)
161
 
162
+ frame_step = max(1, len(voiced_audio) // (sr // 16))
163
  pitches, magnitudes = librosa.piptrack(y=voiced_audio[::frame_step], sr=sr, fmin=75, fmax=300)
164
  valid_pitches = [p for p in pitches[magnitudes > 0] if 75 <= p <= 300]
165
  pitch = np.mean(valid_pitches) if valid_pitches else 0
166
  jitter = np.std(valid_pitches) / pitch if pitch and valid_pitches else 0
167
  jitter = min(jitter, 10)
168
+ amplitudes = librosa.feature.rms(y=voiced_audio, frame_length=512, hop_length=256)[0]
169
  shimmer = np.std(amplitudes) / np.mean(amplitudes) if np.mean(amplitudes) else 0
170
  shimmer = min(shimmer, 10)
171
  energy = np.mean(amplitudes)
172
 
173
+ mfcc = np.mean(librosa.feature.mfcc(y=voiced_audio[::8], sr=sr, n_mfcc=4), axis=1)
174
  spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=voiced_audio[::8], sr=sr, n_fft=512, hop_length=256))
175
 
176
  logger.debug(f"Extracted features: pitch={pitch:.2f}, jitter={jitter*100:.2f}%, shimmer={shimmer*100:.2f}%, energy={energy:.4f}, mfcc_mean={np.mean(mfcc):.2f}, spectral_centroid={spectral_centroid:.2f}")
 
191
  "energy": 0,
192
  "mfcc_mean": 0,
193
  "spectral_centroid": 0
194
+ }
195
 
196
  def transcribe_audio(audio, language="en"):
197
  try:
 
200
  )
201
  inputs = whisper_processor(audio, sampling_rate=16000, return_tensors="pt")
202
  with torch.no_grad():
203
+ generated_ids = whisper_model.generate(inputs["input_features"], max_new_tokens=20)
204
  transcription = whisper_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
205
  logger.info(f"Transcription (language: {language}): {transcription}")
206
  return transcription
 
239
  logger.error(f"Chatbot response failed on attempt {attempt + 1}: {str(e)}")
240
  if attempt == retries:
241
  return f"Error generating response: {str(e)}", None
242
+ await asyncio.sleep(1)
243
  return "Error: Unable to generate response after retries.", None
244
 
245
  async def translate_text(text, target_language, retries=2, timeout=10):
 
272
  suggestions = []
273
  text = text.lower() if text else ""
274
 
 
275
  if "cough" in text or "coughing" in text:
276
  feedback.append("You mentioned a cough, which may suggest a cold or respiratory issue.")
277
  suggestions.extend([
 
328
  "• Stay hydrated and avoid strenuous activity."
329
  ])
330
 
 
331
  if features["jitter"] > 6.5:
332
  feedback.append(f"High jitter ({features['jitter']:.2f}%) suggests vocal strain or respiratory issues.")
333
  suggestions.append("• Rest your voice and avoid shouting.")
 
374
  "• Schedule regular health check-ups."
375
  ])
376
 
 
377
  suggestions = list(dict.fromkeys(suggestions))[:8]
378
  if len(suggestions) < 6:
379
  suggestions.extend([
 
381
  "• Practice gratitude to boost mental well-being."
382
  ][:6 - len(suggestions)])
383
 
 
384
  feedback_text = "\n".join(feedback)
385
  suggestions_text = "\n".join(suggestions)
386
  try:
 
436
 
437
  def generate_pdf_report(feedback, transcription, features, language, email, suggestions):
438
  try:
439
+ feedback = feedback.replace('<', '&lt;').replace('>', '&gt;').replace('&', '&amp;')
440
+ transcription = transcription.replace('<', '&lt;').replace('>', '&gt;').replace('&', '&amp;') if transcription else "None"
441
+ suggestions = suggestions.replace('<', '&lt;').replace('>', '&gt;').replace('&', '&amp;') if suggestions else "None"
442
  email_to_use = email.strip() if email and email.strip() else DEFAULT_EMAIL
443
+ email = email_to_use.replace('<', '&lt;').replace('>', '&gt;').replace('&', '&amp;')
444
  language_display = SALESFORCE_LANGUAGE_MAP.get(language, "English")
445
 
446
  ist = pytz.timezone('Asia/Kolkata')
 
495
  fontName='Times-Roman'
496
  )
497
 
 
498
  section_titles = {
499
  "English": {
500
  "title": "MindCare Health Assistant Report",
 
726
  feedback += f"- Email: {email if email and email.strip() else DEFAULT_EMAIL}\n"
727
  feedback += "\n**Disclaimer**: This is a preliminary analysis. Consult a healthcare provider for professional evaluation."
728
 
 
729
  details_to_translate = (
730
  f"Voice Analysis Details:\n"
731
  f"- Pitch: {features['pitch']:.2f} Hz\n"