Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
c187bdf
1
Parent(s):
d8d0078
Enhance voice reply function to include a friendly prompt; implement retry logic with delay in LLM generation
Browse files
app.py
CHANGED
|
@@ -194,7 +194,8 @@ def _fn_send_text(mid,cid,message):
|
|
| 194 |
client.send_message(mid,cid,message)
|
| 195 |
|
| 196 |
def _fn_voice_reply(mid,cid,prompt):
|
| 197 |
-
|
|
|
|
| 198 |
if res and res[0]:
|
| 199 |
path,_ = res
|
| 200 |
client.send_media(mid,cid,path,"",media_type="audio")
|
|
|
|
| 194 |
client.send_message(mid,cid,message)
|
| 195 |
|
| 196 |
def _fn_voice_reply(mid,cid,prompt):
|
| 197 |
+
proccessed_prompt = f"Just say this dialog eaxcatly as it is in a frinedly and helpful manner as a secretary: {prompt}"
|
| 198 |
+
res = generate_voice_reply(proccessed_prompt,model="openai-audio",voice="coral",audio_dir=BotConfig.AUDIO_DIR)
|
| 199 |
if res and res[0]:
|
| 200 |
path,_ = res
|
| 201 |
client.send_media(mid,cid,path,"",media_type="audio")
|
polLLM.py
CHANGED
|
@@ -3,6 +3,7 @@ from openai import OpenAI
|
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
from utils import read_config
|
| 5 |
import random
|
|
|
|
| 6 |
|
| 7 |
load_dotenv()
|
| 8 |
|
|
@@ -21,25 +22,26 @@ def pre_process():
|
|
| 21 |
def generate_llm(prompt, model="openai-large", max_tokens=8000):
|
| 22 |
system_prompt = pre_process()
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
|
| 41 |
# Example usage (can be removed or commented out in production):
|
| 42 |
if __name__ == "__main__":
|
| 43 |
-
#sample_prompt = f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
|
| 44 |
sample_prompt = "search for free image generation api"
|
| 45 |
print("Response:", generate_llm(sample_prompt))
|
|
|
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
from utils import read_config
|
| 5 |
import random
|
| 6 |
+
import time # Import time for retry delay
|
| 7 |
|
| 8 |
load_dotenv()
|
| 9 |
|
|
|
|
| 22 |
def generate_llm(prompt, model="openai-large", max_tokens=8000):
|
| 23 |
system_prompt = pre_process()
|
| 24 |
|
| 25 |
+
while True: # Keep retrying indefinitely
|
| 26 |
+
try:
|
| 27 |
+
# Use OpenAI's ChatCompletion API
|
| 28 |
+
randomSeed = random.randint(0, 9999999)
|
| 29 |
+
response = client.chat.completions.create(
|
| 30 |
+
model=model,
|
| 31 |
+
messages=[
|
| 32 |
+
{"role": "system", "content": system_prompt},
|
| 33 |
+
{"role": "user", "content": prompt}
|
| 34 |
+
],
|
| 35 |
+
max_tokens=max_tokens,
|
| 36 |
+
seed=randomSeed
|
| 37 |
+
)
|
| 38 |
+
# Return the generated text
|
| 39 |
+
return response.choices[0].message.content.strip()
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"Error occurred: {str(e)}. Retrying in 5 seconds...")
|
| 42 |
+
time.sleep(5) # Wait before retrying
|
| 43 |
|
| 44 |
# Example usage (can be removed or commented out in production):
|
| 45 |
if __name__ == "__main__":
|
|
|
|
| 46 |
sample_prompt = "search for free image generation api"
|
| 47 |
print("Response:", generate_llm(sample_prompt))
|