Spaces:
Running
Running
File size: 9,267 Bytes
af3518b df706dd 0e53ecc 48f91fc b9724da af3518b df706dd 0e53ecc af3518b b920265 af3518b 0e53ecc b9724da af3518b 0e53ecc ec3a574 0e53ecc af3518b df706dd af3518b ec3a574 af3518b df706dd af3518b ec3a574 af3518b b9724da df706dd cd7a57c df706dd b9724da cd7a57c ec3a574 cd7a57c ec3a574 cd7a57c ec3a574 cd7a57c df706dd cd7a57c b9724da af3518b 48f91fc af3518b b9724da 5e86bf8 b9724da cd7a57c f913369 5e86bf8 ec3a574 b9724da 0e53ecc 5e86bf8 b9724da 5e86bf8 b9724da af3518b b9724da af3518b df706dd ec3a574 df706dd af3518b 0e53ecc af3518b b9724da af3518b b9724da f913369 ec3a574 0e53ecc f913369 b9724da ec3a574 0e53ecc f913369 af3518b b9724da f913369 193784d 0e53ecc f913369 af3518b ec3a574 0e53ecc f913369 ec3a574 f913369 ec3a574 f913369 ec3a574 f913369 ec3a574 f913369 48f91fc 0e53ecc f913369 af3518b ec3a574 af3518b 48f91fc af3518b 48f91fc 0e53ecc b9724da af3518b 0e53ecc b9724da c2d7a4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 |
import os
import threading
import requests
import logging
import queue
import re
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import PlainTextResponse, JSONResponse
from FLUX import generate_image
from VoiceReply import generate_voice_reply
from llm import generate_llm
# Configure logging for debugging
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s")
# Environment variables
GREEN_API_URL = os.getenv("GREEN_API_URL")
GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
image_dir = "/tmp/images"
audio_dir = "/tmp/audio"
if not all([GREEN_API_URL, GREEN_API_TOKEN, GREEN_API_ID_INSTANCE, WEBHOOK_AUTH_TOKEN]):
raise ValueError("Environment variables are not set properly")
# Task queue for processing messages sequentially
task_queue = queue.Queue()
app = FastAPI()
# Worker thread to process queued tasks one by one
def worker():
while True:
task = task_queue.get()
try:
typ = task.get("type")
mid = task.get("message_id")
cid = task.get("chat_id")
if typ == "image":
handle_image_generation(mid, cid, task.get("prompt"))
elif typ == "audio":
response_audio(mid, cid, task.get("prompt"))
except Exception as e:
logging.error(f"Error processing task {task}: {e}")
finally:
task_queue.task_done()
# Start the worker thread
threading.Thread(target=worker, daemon=True).start()
def send_message(message_id, to_number, message, retries=3):
chat_id = to_number if to_number.endswith('@g.us') else to_number
url = f"{GREEN_API_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendMessage/{GREEN_API_TOKEN}"
payload = {
"chatId": chat_id,
"message": message,
"quotedMessageId": message_id,
}
for attempt in range(retries):
try:
r = requests.post(url, json=payload)
r.raise_for_status()
return r.json()
except requests.RequestException as e:
if attempt < retries - 1:
continue
return {"error": str(e)}
def send_image(message_id, to_number, image_path, retries=3):
chat_id = to_number if to_number.endswith('@g.us') else to_number
url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
payload = {'chatId': chat_id, 'caption': 'Here you go!', 'quotedMessageId': message_id}
files = [('file', ('image.jpg', open(image_path, 'rb'), 'image/jpeg'))]
for attempt in range(retries):
try:
r = requests.post(url, data=payload, files=files)
r.raise_for_status()
return r.json()
except requests.RequestException as e:
if attempt < retries - 1:
continue
return {"error": str(e)}
def send_audio(message_id, to_number, audio_path, retries=3):
logging.debug("Entering send_audio")
chat_id = to_number if to_number.endswith('@g.us') else to_number
if not os.path.exists(audio_path):
logging.debug(f"Audio file does not exist: {audio_path}")
url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
payload = {'chatId': chat_id, 'caption': 'Here is your voice reply!', 'quotedMessageId': message_id}
try:
with open(audio_path, 'rb') as f:
files = [('file', ('audio.mp3', f, 'audio/mpeg'))]
for attempt in range(retries):
try:
logging.debug(f"Attempt {attempt+1} to send audio")
r = requests.post(url, data=payload, files=files)
logging.debug(f"send_audio response: {r.status_code} {r.text}")
r.raise_for_status()
return r.json()
except requests.RequestException as e:
logging.debug(f"send_audio error on attempt {attempt+1}: {e}")
if attempt < retries - 1:
continue
return {"error": str(e)}
except Exception as e:
logging.debug(f"Failed to open audio file: {e}")
return {"error": str(e)}
def response_text(message_id, chat_id, prompt):
try:
msg = generate_llm(prompt)
send_message(message_id, chat_id, msg)
except Exception:
send_message(message_id, chat_id, "There was an error processing your request.")
def response_audio(message_id, chat_id, prompt):
logging.debug("Entering response_audio with prompt: %s", prompt)
try:
result = generate_voice_reply(prompt, model="openai-audio", voice="coral", audio_dir=audio_dir)
logging.debug("generate_voice_reply result: %s", result)
if result and result[0]:
audio_path, _ = result
send_audio(message_id, chat_id, audio_path)
if os.path.exists(audio_path):
os.remove(audio_path)
logging.debug("Removed audio file: %s", audio_path)
else:
logging.debug("Falling back to text response")
response_text(message_id, chat_id, prompt)
except Exception as e:
logging.debug("Exception in response_audio: %s", e)
send_message(message_id, chat_id, "There was an error generating the audio. Please try again later.")
def handle_image_generation(message_id, chat_id, prompt):
try:
image, image_path, returned_prompt, image_url = generate_image(prompt, message_id, message_id, image_dir)
if image:
send_image(message_id, chat_id, image_path)
send_message(
message_id, chat_id,
f"Image generated successfully! View it here: {image_url}\n>{chr(8203)} _{returned_prompt}_"
)
else:
send_message(message_id, chat_id, "Failed to generate image. Please try again later.")
except Exception:
send_message(message_id, chat_id, "There was an error generating the image. Please try again later.")
@app.get("/", response_class=PlainTextResponse)
def index():
return "Server is running!"
@app.post("/whatsapp")
async def whatsapp_webhook(request: Request):
# 1) Auth
auth = request.headers.get('Authorization', '').strip()
if auth != f"Bearer {WEBHOOK_AUTH_TOKEN}":
raise HTTPException(403, "Unauthorized")
# 2) Parse JSON
try:
data = await request.json()
except:
return JSONResponse({"error": "Invalid JSON"}, status_code=400)
# 3) Only handle incoming messages
if data.get('typeWebhook') != 'incomingMessageReceived':
return {"success": True}
logging.debug(f"Received data: {data}")
# 4) Extract core fields
try:
chat_id = data['senderData']['chatId']
message_id = data['idMessage']
message_data = data.get('messageData', {})
except KeyError as e:
return JSONResponse({"error": f"Missing key: {e}"}, status_code=200)
# --- NEW: IGNORE any WhatsApp “quotedMessage” payload entirely ---
if message_data.get('typeMessage') == 'quotedMessage' or 'quotedMessage' in message_data:
logging.debug("Ignoring WhatsApp quotedMessage payload")
return {"success": True}
# --------------------------------------------------------------------
# 5) Extract text body
if 'textMessageData' in message_data:
body = message_data['textMessageData'].get('textMessage', '').strip()
ctx = message_data['textMessageData'].get('contextInfo', {})
elif 'extendedTextMessageData' in message_data:
body = message_data['extendedTextMessageData'].get('text', '').strip()
ctx = message_data['extendedTextMessageData'].get('contextInfo', {})
else:
return {"success": True}
# 6) IGNORE structured mentions
if ctx.get('mentionedJid') or ctx.get('mentionedJidList'):
logging.debug(f"Ignoring structured mention: {ctx.get('mentionedJid') or ctx.get('mentionedJidList')}")
return {"success": True}
# 7) IGNORE plain-text "@1234..." mentions in groups
if chat_id.endswith('@g.us') and re.search(r'@\d+', body):
logging.debug(f"Ignoring plain-text mention in body: {body}")
return {"success": True}
# 8) Enqueue work
if body.lower().startswith('/imagine'):
prompt = body[len('/imagine'):].strip()
if not prompt:
send_message(message_id, chat_id, "Please provide a prompt after /imagine.")
else:
send_message(message_id, chat_id, "Generating...")
task_queue.put({
"type": "image",
"message_id": message_id,
"chat_id": chat_id,
"prompt": prompt
})
else:
task_queue.put({
"type": "audio",
"message_id": message_id,
"chat_id": chat_id,
"prompt": body
})
return {"success": True}
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)
|