geethareddy commited on
Commit
34cf2ff
·
verified ·
1 Parent(s): ac701d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -31
app.py CHANGED
@@ -3,21 +3,52 @@ from gtts import gTTS
3
  import tempfile
4
  from transformers import pipeline
5
  import os
 
 
 
 
 
 
6
 
7
  # Configure Hugging Face API Token (Replace with your actual token)
8
  HUGGINGFACE_API_TOKEN = "your_huggingface_api_token" # Replace with your Hugging Face API token
9
 
 
 
 
 
 
 
 
 
 
 
 
10
  # Initialize Hugging Face Whisper model for speech-to-text (multilingual support)
11
- whisper_pipeline = pipeline("automatic-speech-recognition",
12
- model="openai/whisper-small",
13
- token=HUGGINGFACE_API_TOKEN,
14
- device=-1) # Use CPU if GPU unavailable
 
 
 
 
 
 
 
15
 
16
  # Initialize Hugging Face text generation model for chatbot
17
- text_generation_pipeline = pipeline("text2text-generation",
18
- model="facebook/bart-large-cnn",
19
- token=HUGGINGFACE_API_TOKEN,
20
- device=-1) # Use CPU if GPU unavailable
 
 
 
 
 
 
 
21
 
22
  # Define chatbot knowledge base
23
  base_info = """
@@ -89,9 +120,13 @@ context = [base_info, mental_health, medical_assistance, medicine_recommendation
89
 
90
  # Function to get AI response using text generation model
91
  def get_llm_response(message):
92
- full_context = "\n".join(context) + f"\nUser: {message}\nMindCare:"
93
- response = text_generation_pipeline(full_context, max_length=500, num_return_sequences=1)[0]['generated_text']
94
- return response
 
 
 
 
95
 
96
  # Function to process voice input and convert to text
97
  def process_voice_input(audio_file, language="en"):
@@ -108,29 +143,34 @@ def process_voice_input(audio_file, language="en"):
108
  generate_kwargs={"language": language_map.get(language, "english")})
109
  return transcription["text"]
110
  except Exception as e:
 
111
  return f"Error processing audio: {str(e)}"
112
 
113
  # Define chatbot response function with voice and text input
114
  def bot(message=None, audio_file=None, language="en", history=None):
115
- if audio_file:
116
- # Process voice input if provided
117
- message = process_voice_input(audio_file, language)
118
-
119
- if not message:
120
- return "No input provided. Please type a message or upload an audio file.", None
121
-
122
- # Get response from text generation model
123
- response = get_llm_response(message)
124
-
125
- # Convert the response to speech using gTTS
126
- tts = gTTS(text=response, lang=language, slow=False)
127
-
128
- # Save the audio file in a temporary directory
129
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
130
- audio_path = temp_audio.name
131
- tts.save(audio_path)
132
-
133
- return response, audio_path
 
 
 
 
134
 
135
  # Create Gradio interface with voice and text input
136
  demo = gr.Interface(
@@ -149,4 +189,9 @@ demo = gr.Interface(
149
  )
150
 
151
  # Launch the Gradio app
152
- demo.launch(debug=True, share=True)
 
 
 
 
 
 
3
  import tempfile
4
  from transformers import pipeline
5
  import os
6
+ import logging
7
+ from huggingface_hub import login
8
+
9
+ # Set up logging for better error tracking
10
+ logging.basicConfig(level=logging.INFO)
11
+ logger = logging.getLogger(__name__)
12
 
13
  # Configure Hugging Face API Token (Replace with your actual token)
14
  HUGGINGFACE_API_TOKEN = "your_huggingface_api_token" # Replace with your Hugging Face API token
15
 
16
+ # Validate API token and log in to Hugging Face Hub
17
+ if HUGGINGFACE_API_TOKEN == "your_huggingface_api_token":
18
+ logger.error("Please replace 'your_huggingface_api_token' with a valid Hugging Face API token from https://huggingface.co/settings/tokens")
19
+ raise ValueError("Invalid Hugging Face API token. Please set a valid token.")
20
+ try:
21
+ login(token=HUGGINGFACE_API_TOKEN)
22
+ logger.info("Successfully logged in to Hugging Face Hub")
23
+ except Exception as e:
24
+ logger.error(f"Failed to log in to Hugging Face Hub: {str(e)}")
25
+ raise
26
+
27
  # Initialize Hugging Face Whisper model for speech-to-text (multilingual support)
28
+ try:
29
+ whisper_pipeline = pipeline(
30
+ "automatic-speech-recognition",
31
+ model="openai/whisper-tiny",
32
+ token=HUGGINGFACE_API_TOKEN,
33
+ device=-1 # Use CPU if GPU unavailable
34
+ )
35
+ logger.info("Whisper model initialized successfully")
36
+ except Exception as e:
37
+ logger.error(f"Failed to initialize Whisper model: {str(e)}")
38
+ raise
39
 
40
  # Initialize Hugging Face text generation model for chatbot
41
+ try:
42
+ text_generation_pipeline = pipeline(
43
+ "text2text-generation",
44
+ model="google/flan-t5-small",
45
+ token=HUGGINGFACE_API_TOKEN,
46
+ device=-1 # Use CPU if GPU unavailable
47
+ )
48
+ logger.info("Text generation model initialized successfully")
49
+ except Exception as e:
50
+ logger.error(f"Failed to initialize text generation model: {str(e)}")
51
+ raise
52
 
53
  # Define chatbot knowledge base
54
  base_info = """
 
120
 
121
  # Function to get AI response using text generation model
122
  def get_llm_response(message):
123
+ try:
124
+ full_context = "\n".join(context) + f"\nUser: {message}\nMindCare:"
125
+ response = text_generation_pipeline(full_context, max_length=500, num_return_sequences=1)[0]['generated_text']
126
+ return response
127
+ except Exception as e:
128
+ logger.error(f"Error generating response: {str(e)}")
129
+ return f"Error generating response: {str(e)}"
130
 
131
  # Function to process voice input and convert to text
132
  def process_voice_input(audio_file, language="en"):
 
143
  generate_kwargs={"language": language_map.get(language, "english")})
144
  return transcription["text"]
145
  except Exception as e:
146
+ logger.error(f"Error processing audio: {str(e)}")
147
  return f"Error processing audio: {str(e)}"
148
 
149
  # Define chatbot response function with voice and text input
150
  def bot(message=None, audio_file=None, language="en", history=None):
151
+ try:
152
+ if audio_file:
153
+ # Process voice input if provided
154
+ message = process_voice_input(audio_file, language)
155
+
156
+ if not message:
157
+ return "No input provided. Please type a message or upload an audio file.", None
158
+
159
+ # Get response from text generation model
160
+ response = get_llm_response(message)
161
+
162
+ # Convert the response to speech using gTTS
163
+ tts = gTTS(text=response, lang=language, slow=False)
164
+
165
+ # Save the audio file in a temporary directory
166
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
167
+ audio_path = temp_audio.name
168
+ tts.save(audio_path)
169
+
170
+ return response, audio_path
171
+ except Exception as e:
172
+ logger.error(f"Error in bot function: {str(e)}")
173
+ return f"Error: {str(e)}", None
174
 
175
  # Create Gradio interface with voice and text input
176
  demo = gr.Interface(
 
189
  )
190
 
191
  # Launch the Gradio app
192
+ if __name__ == "__main__":
193
+ try:
194
+ demo.launch(debug=True, share=True)
195
+ except Exception as e:
196
+ logger.error(f"Failed to launch Gradio app: {str(e)}")
197
+ raise