eve / app.py
Chandima Prabhath
Remove PORT environment variable and set default port directly in the run command
c2d7a4c
raw
history blame
9.27 kB
import os
import threading
import requests
import logging
import queue
import re
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import PlainTextResponse, JSONResponse
from FLUX import generate_image
from VoiceReply import generate_voice_reply
from llm import generate_llm
# Configure logging for debugging
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s")
# Environment variables
GREEN_API_URL = os.getenv("GREEN_API_URL")
GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
image_dir = "/tmp/images"
audio_dir = "/tmp/audio"
if not all([GREEN_API_URL, GREEN_API_TOKEN, GREEN_API_ID_INSTANCE, WEBHOOK_AUTH_TOKEN]):
raise ValueError("Environment variables are not set properly")
# Task queue for processing messages sequentially
task_queue = queue.Queue()
app = FastAPI()
# Worker thread to process queued tasks one by one
def worker():
while True:
task = task_queue.get()
try:
typ = task.get("type")
mid = task.get("message_id")
cid = task.get("chat_id")
if typ == "image":
handle_image_generation(mid, cid, task.get("prompt"))
elif typ == "audio":
response_audio(mid, cid, task.get("prompt"))
except Exception as e:
logging.error(f"Error processing task {task}: {e}")
finally:
task_queue.task_done()
# Start the worker thread
threading.Thread(target=worker, daemon=True).start()
def send_message(message_id, to_number, message, retries=3):
chat_id = to_number if to_number.endswith('@g.us') else to_number
url = f"{GREEN_API_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendMessage/{GREEN_API_TOKEN}"
payload = {
"chatId": chat_id,
"message": message,
"quotedMessageId": message_id,
}
for attempt in range(retries):
try:
r = requests.post(url, json=payload)
r.raise_for_status()
return r.json()
except requests.RequestException as e:
if attempt < retries - 1:
continue
return {"error": str(e)}
def send_image(message_id, to_number, image_path, retries=3):
chat_id = to_number if to_number.endswith('@g.us') else to_number
url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
payload = {'chatId': chat_id, 'caption': 'Here you go!', 'quotedMessageId': message_id}
files = [('file', ('image.jpg', open(image_path, 'rb'), 'image/jpeg'))]
for attempt in range(retries):
try:
r = requests.post(url, data=payload, files=files)
r.raise_for_status()
return r.json()
except requests.RequestException as e:
if attempt < retries - 1:
continue
return {"error": str(e)}
def send_audio(message_id, to_number, audio_path, retries=3):
logging.debug("Entering send_audio")
chat_id = to_number if to_number.endswith('@g.us') else to_number
if not os.path.exists(audio_path):
logging.debug(f"Audio file does not exist: {audio_path}")
url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
payload = {'chatId': chat_id, 'caption': 'Here is your voice reply!', 'quotedMessageId': message_id}
try:
with open(audio_path, 'rb') as f:
files = [('file', ('audio.mp3', f, 'audio/mpeg'))]
for attempt in range(retries):
try:
logging.debug(f"Attempt {attempt+1} to send audio")
r = requests.post(url, data=payload, files=files)
logging.debug(f"send_audio response: {r.status_code} {r.text}")
r.raise_for_status()
return r.json()
except requests.RequestException as e:
logging.debug(f"send_audio error on attempt {attempt+1}: {e}")
if attempt < retries - 1:
continue
return {"error": str(e)}
except Exception as e:
logging.debug(f"Failed to open audio file: {e}")
return {"error": str(e)}
def response_text(message_id, chat_id, prompt):
try:
msg = generate_llm(prompt)
send_message(message_id, chat_id, msg)
except Exception:
send_message(message_id, chat_id, "There was an error processing your request.")
def response_audio(message_id, chat_id, prompt):
logging.debug("Entering response_audio with prompt: %s", prompt)
try:
result = generate_voice_reply(prompt, model="openai-audio", voice="coral", audio_dir=audio_dir)
logging.debug("generate_voice_reply result: %s", result)
if result and result[0]:
audio_path, _ = result
send_audio(message_id, chat_id, audio_path)
if os.path.exists(audio_path):
os.remove(audio_path)
logging.debug("Removed audio file: %s", audio_path)
else:
logging.debug("Falling back to text response")
response_text(message_id, chat_id, prompt)
except Exception as e:
logging.debug("Exception in response_audio: %s", e)
send_message(message_id, chat_id, "There was an error generating the audio. Please try again later.")
def handle_image_generation(message_id, chat_id, prompt):
try:
image, image_path, returned_prompt, image_url = generate_image(prompt, message_id, message_id, image_dir)
if image:
send_image(message_id, chat_id, image_path)
send_message(
message_id, chat_id,
f"Image generated successfully! View it here: {image_url}\n>{chr(8203)} _{returned_prompt}_"
)
else:
send_message(message_id, chat_id, "Failed to generate image. Please try again later.")
except Exception:
send_message(message_id, chat_id, "There was an error generating the image. Please try again later.")
@app.get("/", response_class=PlainTextResponse)
def index():
return "Server is running!"
@app.post("/whatsapp")
async def whatsapp_webhook(request: Request):
# 1) Auth
auth = request.headers.get('Authorization', '').strip()
if auth != f"Bearer {WEBHOOK_AUTH_TOKEN}":
raise HTTPException(403, "Unauthorized")
# 2) Parse JSON
try:
data = await request.json()
except:
return JSONResponse({"error": "Invalid JSON"}, status_code=400)
# 3) Only handle incoming messages
if data.get('typeWebhook') != 'incomingMessageReceived':
return {"success": True}
logging.debug(f"Received data: {data}")
# 4) Extract core fields
try:
chat_id = data['senderData']['chatId']
message_id = data['idMessage']
message_data = data.get('messageData', {})
except KeyError as e:
return JSONResponse({"error": f"Missing key: {e}"}, status_code=200)
# --- NEW: IGNORE any WhatsApp “quotedMessage” payload entirely ---
if message_data.get('typeMessage') == 'quotedMessage' or 'quotedMessage' in message_data:
logging.debug("Ignoring WhatsApp quotedMessage payload")
return {"success": True}
# --------------------------------------------------------------------
# 5) Extract text body
if 'textMessageData' in message_data:
body = message_data['textMessageData'].get('textMessage', '').strip()
ctx = message_data['textMessageData'].get('contextInfo', {})
elif 'extendedTextMessageData' in message_data:
body = message_data['extendedTextMessageData'].get('text', '').strip()
ctx = message_data['extendedTextMessageData'].get('contextInfo', {})
else:
return {"success": True}
# 6) IGNORE structured mentions
if ctx.get('mentionedJid') or ctx.get('mentionedJidList'):
logging.debug(f"Ignoring structured mention: {ctx.get('mentionedJid') or ctx.get('mentionedJidList')}")
return {"success": True}
# 7) IGNORE plain-text "@1234..." mentions in groups
if chat_id.endswith('@g.us') and re.search(r'@\d+', body):
logging.debug(f"Ignoring plain-text mention in body: {body}")
return {"success": True}
# 8) Enqueue work
if body.lower().startswith('/imagine'):
prompt = body[len('/imagine'):].strip()
if not prompt:
send_message(message_id, chat_id, "Please provide a prompt after /imagine.")
else:
send_message(message_id, chat_id, "Generating...")
task_queue.put({
"type": "image",
"message_id": message_id,
"chat_id": chat_id,
"prompt": prompt
})
else:
task_queue.put({
"type": "audio",
"message_id": message_id,
"chat_id": chat_id,
"prompt": body
})
return {"success": True}
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)