Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,12 @@ import matplotlib.pyplot as plt
|
|
14 |
from huggingface_hub import login
|
15 |
import os
|
16 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
SPREADSHEET_ID = "1CsBub3Jlwyo7WHMQty6SDnBShIZMjl5XTVSoOKrxZhc"
|
19 |
RANGE_NAME = 'Sheet1!A1:E'
|
@@ -379,65 +385,145 @@ def calculate_overall_sentiment(sentiment_scores):
|
|
379 |
overall_sentiment = "NEUTRAL"
|
380 |
return overall_sentiment
|
381 |
|
382 |
-
def process_real_time_audio():
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
break
|
405 |
-
|
406 |
-
st.markdown("### *Sentiment Analysis*")
|
407 |
-
sentiment_label, sentiment_score = analyze_sentiment(transcribed_text)
|
408 |
-
st.write(f"Sentiment: {sentiment_label}")
|
409 |
-
st.write(f"Sentiment Score: {sentiment_score}")
|
410 |
-
|
411 |
-
closest_objection = None
|
412 |
-
response = None
|
413 |
-
|
414 |
-
add_to_sentiment_history(transcribed_text, sentiment_label, sentiment_score, closest_objection, response)
|
415 |
-
st.markdown("### *Recommendations*")
|
416 |
-
recommendations = query_crm_data_with_context(transcribed_text)
|
417 |
-
for i, rec in enumerate(recommendations, start=1):
|
418 |
-
if isinstance(rec, dict) and 'Product' in rec and 'Recommendations' in rec:
|
419 |
-
st.markdown(f"- *{rec['Product']}*: {rec['Recommendations']}")
|
420 |
-
else:
|
421 |
-
st.markdown(f"- {rec}")
|
422 |
-
|
423 |
-
st.markdown("### *Objection Handling*")
|
424 |
-
closest_objection, response = find_closest_objection(transcribed_text)
|
425 |
-
st.write(f"Objection: {closest_objection}")
|
426 |
-
st.write(f" Response: {response}")
|
427 |
-
|
428 |
-
update_google_sheet(
|
429 |
-
transcribed_text=transcribed_text,
|
430 |
-
sentiment=f"{sentiment_label} ({sentiment_score})",
|
431 |
-
objection=f"Objection: {closest_objection} | Response: {response}",
|
432 |
-
recommendations=str(recommendations),
|
433 |
-
overall_sentiment=f"{sentiment_label}"
|
434 |
-
)
|
435 |
-
|
436 |
-
except sr.UnknownValueError:
|
437 |
-
st.warning("Could not understand the audio.")
|
438 |
-
except Exception as e:
|
439 |
-
st.error(f"Error: {e}")
|
440 |
-
break
|
441 |
|
442 |
def generate_sentiment_pie_chart(sentiment_history):
|
443 |
if not sentiment_history:
|
|
|
14 |
from huggingface_hub import login
|
15 |
import os
|
16 |
from dotenv import load_dotenv
|
17 |
+
import whisper
|
18 |
+
import sounddevice as sd
|
19 |
+
import queue
|
20 |
+
import tempfile
|
21 |
+
import scipy.io.wavfile as wav
|
22 |
+
|
23 |
|
24 |
SPREADSHEET_ID = "1CsBub3Jlwyo7WHMQty6SDnBShIZMjl5XTVSoOKrxZhc"
|
25 |
RANGE_NAME = 'Sheet1!A1:E'
|
|
|
385 |
overall_sentiment = "NEUTRAL"
|
386 |
return overall_sentiment
|
387 |
|
388 |
+
# def process_real_time_audio():
|
389 |
+
# recognizer = sr.Recognizer()
|
390 |
+
# microphone = sr.Microphone()
|
391 |
+
|
392 |
+
# st.write("Adjusting microphone for ambient noise... Please wait.")
|
393 |
+
# with microphone as source:
|
394 |
+
# recognizer.adjust_for_ambient_noise(source,duration=2)
|
395 |
+
|
396 |
+
# st.write("Listening for audio... Speak into the microphone.")
|
397 |
+
# while True:
|
398 |
+
# try:
|
399 |
+
# with microphone as source:
|
400 |
+
# audio = recognizer.listen(source, timeout=15, phrase_time_limit=20)
|
401 |
+
|
402 |
+
|
403 |
+
|
404 |
+
# st.write("Transcribing audio...")
|
405 |
+
# transcribed_text = recognizer.recognize_google(audio)
|
406 |
+
# st.write(f"You said: {transcribed_text}")
|
407 |
+
|
408 |
+
# if 'stop' in transcribed_text.lower():
|
409 |
+
# st.warning("Stopping the speech recognition process.")
|
410 |
+
# break
|
411 |
+
|
412 |
+
# st.markdown("### *Sentiment Analysis*")
|
413 |
+
# sentiment_label, sentiment_score = analyze_sentiment(transcribed_text)
|
414 |
+
# st.write(f"Sentiment: {sentiment_label}")
|
415 |
+
# st.write(f"Sentiment Score: {sentiment_score}")
|
416 |
+
|
417 |
+
# closest_objection = None
|
418 |
+
# response = None
|
419 |
+
|
420 |
+
# add_to_sentiment_history(transcribed_text, sentiment_label, sentiment_score, closest_objection, response)
|
421 |
+
# st.markdown("### *Recommendations*")
|
422 |
+
# recommendations = query_crm_data_with_context(transcribed_text)
|
423 |
+
# for i, rec in enumerate(recommendations, start=1):
|
424 |
+
# if isinstance(rec, dict) and 'Product' in rec and 'Recommendations' in rec:
|
425 |
+
# st.markdown(f"- *{rec['Product']}*: {rec['Recommendations']}")
|
426 |
+
# else:
|
427 |
+
# st.markdown(f"- {rec}")
|
428 |
+
|
429 |
+
# st.markdown("### *Objection Handling*")
|
430 |
+
# closest_objection, response = find_closest_objection(transcribed_text)
|
431 |
+
# st.write(f"Objection: {closest_objection}")
|
432 |
+
# st.write(f" Response: {response}")
|
433 |
+
|
434 |
+
# update_google_sheet(
|
435 |
+
# transcribed_text=transcribed_text,
|
436 |
+
# sentiment=f"{sentiment_label} ({sentiment_score})",
|
437 |
+
# objection=f"Objection: {closest_objection} | Response: {response}",
|
438 |
+
# recommendations=str(recommendations),
|
439 |
+
# overall_sentiment=f"{sentiment_label}"
|
440 |
+
# )
|
441 |
+
|
442 |
+
# except sr.UnknownValueError:
|
443 |
+
# st.warning("Could not understand the audio.")
|
444 |
+
# except Exception as e:
|
445 |
+
# st.error(f"Error: {e}")
|
446 |
+
# break
|
447 |
+
model = whisper.load_model("base")
|
448 |
+
|
449 |
+
# Queue for streaming audio
|
450 |
+
audio_queue = queue.Queue()
|
451 |
+
|
452 |
+
def audio_callback(indata, frames, time, status):
|
453 |
+
"""Callback function to continuously receive audio chunks."""
|
454 |
+
if status:
|
455 |
+
st.warning(f"Audio Status: {status}")
|
456 |
+
audio_queue.put(indata.copy())
|
457 |
+
|
458 |
+
def transcribe_audio_stream():
|
459 |
+
"""Continuously captures microphone input, transcribes, and processes the speech."""
|
460 |
+
samplerate = 16000
|
461 |
+
duration = 5 # Adjust duration for processing chunks in real-time
|
462 |
+
|
463 |
+
# Set up the microphone stream
|
464 |
+
with sd.InputStream(samplerate=samplerate, channels=1, callback=audio_callback):
|
465 |
+
st.write("Listening... Speak into the microphone.")
|
466 |
+
|
467 |
+
while True:
|
468 |
+
try:
|
469 |
+
# Collect audio chunks
|
470 |
+
audio_chunk = []
|
471 |
+
for _ in range(int(samplerate / 1024 * duration)): # Collect chunks for `duration` seconds
|
472 |
+
audio_chunk.append(audio_queue.get())
|
473 |
+
|
474 |
+
# Convert to NumPy array
|
475 |
+
audio_data = np.concatenate(audio_chunk, axis=0)
|
476 |
+
|
477 |
+
# Save the chunk as a temporary WAV file
|
478 |
+
with tempfile.NamedTemporaryFile(delete=True, suffix=".wav") as temp_audio:
|
479 |
+
wav.write(temp_audio.name, samplerate, np.int16(audio_data * 32767))
|
480 |
+
|
481 |
+
# Transcribe using Whisper
|
482 |
+
result = model.transcribe(temp_audio.name)
|
483 |
+
transcribed_text = result["text"]
|
484 |
+
|
485 |
+
st.write(f"You said: {transcribed_text}")
|
486 |
+
|
487 |
+
if 'stop' in transcribed_text.lower():
|
488 |
+
st.warning("Stopping speech recognition.")
|
489 |
+
break
|
490 |
+
|
491 |
+
# Sentiment Analysis
|
492 |
+
st.markdown("### *Sentiment Analysis*")
|
493 |
+
sentiment_label, sentiment_score = analyze_sentiment(transcribed_text)
|
494 |
+
st.write(f"Sentiment: {sentiment_label}")
|
495 |
+
st.write(f"Sentiment Score: {sentiment_score}")
|
496 |
+
|
497 |
+
# Add to history
|
498 |
+
add_to_sentiment_history(transcribed_text, sentiment_label, sentiment_score, None, None)
|
499 |
+
|
500 |
+
# Recommendations
|
501 |
+
st.markdown("### *Recommendations*")
|
502 |
+
recommendations = query_crm_data_with_context(transcribed_text)
|
503 |
+
for rec in recommendations:
|
504 |
+
if isinstance(rec, dict) and 'Product' in rec and 'Recommendations' in rec:
|
505 |
+
st.markdown(f"- *{rec['Product']}*: {rec['Recommendations']}")
|
506 |
+
else:
|
507 |
+
st.markdown(f"- {rec}")
|
508 |
+
|
509 |
+
# Objection Handling
|
510 |
+
st.markdown("### *Objection Handling*")
|
511 |
+
closest_objection, response = find_closest_objection(transcribed_text)
|
512 |
+
st.write(f"Objection: {closest_objection}")
|
513 |
+
st.write(f"Response: {response}")
|
514 |
+
|
515 |
+
# Update Google Sheets
|
516 |
+
update_google_sheet(
|
517 |
+
transcribed_text=transcribed_text,
|
518 |
+
sentiment=f"{sentiment_label} ({sentiment_score})",
|
519 |
+
objection=f"Objection: {closest_objection} | Response: {response}",
|
520 |
+
recommendations=str(recommendations),
|
521 |
+
overall_sentiment=f"{sentiment_label}"
|
522 |
+
)
|
523 |
+
|
524 |
+
except Exception as e:
|
525 |
+
st.error(f"Error: {e}")
|
526 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
527 |
|
528 |
def generate_sentiment_pie_chart(sentiment_history):
|
529 |
if not sentiment_history:
|