Chandima Prabhath commited on
Commit
5f26e9c
·
1 Parent(s): 61fd286

Enhance logging and error handling across modules; refactor image generation and upload processes; update configuration for image generation parameters.

Browse files
Files changed (4) hide show
  1. FLUX.py +109 -113
  2. app.py +273 -118
  3. config.yaml +2 -65
  4. polLLM.py +71 -32
FLUX.py CHANGED
@@ -1,147 +1,143 @@
1
- import requests
2
- import time
3
- import io
4
  import os
5
- import re
6
- import json
7
  import random
 
 
8
  from PIL import Image, UnidentifiedImageError
9
 
10
- # Load the ImgBB API key from the environment variables.
 
 
 
 
 
 
 
 
11
  IMGBB_API_KEY = os.getenv("IMGBB_API_KEY")
 
 
 
 
 
12
 
13
- def upload_to_imgbb(image_path, file_name):
14
  """
15
- Uploads the image located at image_path to ImgBB.
16
- Returns:
17
- str: URL of the uploaded image on ImgBB or None if failed.
18
  """
 
 
 
 
19
  try:
20
  with open(image_path, 'rb') as f:
21
- image_data = f.read()
22
- response = requests.post(
23
  "https://api.imgbb.com/1/upload",
24
  params={"key": IMGBB_API_KEY},
25
- files={"image": (file_name, image_data)}
 
26
  )
27
- response.raise_for_status()
28
- result = response.json()
29
- if result.get("data") and "url" in result["data"]:
30
- return result["data"]["url"]
 
 
31
  else:
32
- print("Failed to upload image to ImgBB.")
33
  return None
34
- except requests.RequestException as e:
35
- print(f"Error uploading image to ImgBB: {e}")
36
- return None
37
  except Exception as e:
38
- print(f"Unexpected error uploading image to ImgBB: {e}")
39
  return None
40
 
41
- def generate_image(prompt, request_id, current_request_id, image_dir, attempt=0):
 
 
 
 
 
 
 
 
42
  """
43
- Generate an image using the Pollinations API.
44
-
45
- Parameters:
46
- prompt (str): The prompt for image generation.
47
- width (int): Desired image width.
48
- height (int): Desired image height.
49
- request_id (int): The request id for the current operation.
50
- current_request_id (int): The current active request id.
51
- image_dir (str): Directory where image will be saved.
52
- attempt (int): Current attempt count (zero-indexed).
53
-
54
  Returns:
55
- tuple: (PIL.Image object, image_path (str), returned_prompt (str), image_url (str))
56
- or None if image fetch fails or request id mismatches.
57
  """
58
- model = "flux"
59
- width = 1920
60
- height = 1080
61
- randomSeed = random.randint(0, 9999999)
62
- url = f"https://image.pollinations.ai/prompt/{prompt}?nologo=true&safe=false&private=true&model={model}&enhance=true&width={width}&height={height}&seed={randomSeed}"
63
- print(f"Attempt {attempt + 1}: Fetching image with URL: {url}")
64
-
65
- try:
66
- response = requests.get(url, timeout=45)
67
- except Exception as e:
68
- print(f"Error fetching image: {e}")
69
- return None
70
 
71
- if response.status_code != 200:
72
- print(f"Failed to fetch image. Status code: {response.status_code}")
 
73
  return None
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  if request_id != current_request_id:
76
- print("Request ID mismatch. Operation cancelled.")
77
  return None
78
 
79
- print("Image fetched successfully.")
80
- image_data = response.content
81
-
82
  try:
83
- image = Image.open(io.BytesIO(image_data))
84
- actual_width, actual_height = image.size
85
- print(f"Actual image dimensions: {actual_width}x{actual_height}")
86
-
87
- # Extract metadata from EXIF if available
88
- exif_data = image.info.get('exif', b'')
89
- returned_prompt = prompt
90
- if exif_data:
91
- json_match = re.search(b'{"prompt":.*}', exif_data)
92
- if json_match:
93
- json_str = json_match.group(0).decode('utf-8')
94
- try:
95
- metadata_dict = json.loads(json_str)
96
- returned_prompt = metadata_dict.get('prompt', prompt)
97
- except json.JSONDecodeError as e:
98
- print(f"Failed to parse JSON in metadata: {e}")
99
- else:
100
- print("No JSON data found in EXIF")
101
-
102
- if (actual_width, actual_height) != (width, height):
103
- print(f"Warning: Received image dimensions ({actual_width}x{actual_height}) do not match requested dimensions ({width}x{height})")
104
- except UnidentifiedImageError:
105
- print("Error: Received data is not a valid image.")
106
- raise
107
 
108
- timestamp = int(time.time())
109
- image_filename = f"flux_{timestamp}.png"
110
- image_path = os.path.join(image_dir, image_filename)
111
-
112
- # Ensure the image directory exists
113
  os.makedirs(image_dir, exist_ok=True)
114
-
 
 
115
  try:
116
- image.save(image_path, 'PNG')
117
- print(f"Image saved to {image_path}")
118
- # Upload image to ImgBB
119
- image_url = upload_to_imgbb(image_path, image_filename)
120
- if image_url:
121
- print(f"Image uploaded to ImgBB: {image_url}")
122
- else:
123
- print("Failed to upload image to ImgBB.")
124
  except Exception as e:
125
- print(f"Error saving image: {e}")
126
  return None
127
 
128
- return image, image_path, returned_prompt, image_url
129
-
130
- # if __name__ == "__main__":
131
- # from dotenv import load_dotenv
132
-
133
- # load_dotenv()
134
- # # Example usage
135
- # prompt = "Beach party, anime style, vibrant colors"
136
- # request_id = 1
137
- # current_request_id = 1
138
- # image_dir = "./images"
139
-
140
- # image, image_path, returned_prompt, image_url = generate_image(prompt, request_id, current_request_id, image_dir)
141
-
142
- # if image:
143
- # print(f"Image generated and saved at {image_path}")
144
- # print(f"Returned prompt: {returned_prompt}")
145
- # print(f"Image URL: {image_url}")
146
- # else:
147
- # print("Failed to generate image.")
 
 
 
 
1
  import os
2
+ import io
3
+ import time
4
  import random
5
+ import logging
6
+ import requests
7
  from PIL import Image, UnidentifiedImageError
8
 
9
+ # --- Logging setup ---
10
+ logger = logging.getLogger("flux")
11
+ LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
12
+ logger.setLevel(LOG_LEVEL)
13
+ handler = logging.StreamHandler()
14
+ handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"))
15
+ logger.addHandler(handler)
16
+
17
+ # --- Configuration ---
18
  IMGBB_API_KEY = os.getenv("IMGBB_API_KEY")
19
+ DEFAULT_MODEL = "flux"
20
+ DEFAULT_WIDTH = 1920
21
+ DEFAULT_HEIGHT = 1080
22
+ MAX_RETRIES = 3
23
+ BACKOFF_BASE = 2 # exponential backoff
24
 
25
+ def upload_to_imgbb(image_path: str, file_name: str) -> str | None:
26
  """
27
+ Uploads the image at image_path to ImgBB.
28
+ Returns the public URL or None on failure.
 
29
  """
30
+ if not IMGBB_API_KEY:
31
+ logger.warning("IMGBB_API_KEY not set, skipping upload")
32
+ return None
33
+
34
  try:
35
  with open(image_path, 'rb') as f:
36
+ files = {"image": (file_name, f.read())}
37
+ resp = requests.post(
38
  "https://api.imgbb.com/1/upload",
39
  params={"key": IMGBB_API_KEY},
40
+ files=files,
41
+ timeout=15
42
  )
43
+ resp.raise_for_status()
44
+ data = resp.json().get("data", {})
45
+ url = data.get("url")
46
+ if url:
47
+ logger.debug(f"Uploaded to ImgBB: {url}")
48
+ return url
49
  else:
50
+ logger.error("ImgBB response missing URL")
51
  return None
 
 
 
52
  except Exception as e:
53
+ logger.error(f"ImgBB upload failed: {e}")
54
  return None
55
 
56
+ def generate_image(
57
+ prompt: str,
58
+ request_id: str,
59
+ current_request_id: str,
60
+ image_dir: str,
61
+ model: str = None,
62
+ width: int = None,
63
+ height: int = None
64
+ ) -> tuple[Image.Image, str, str, str] | None:
65
  """
66
+ Generate an image via Pollinations API, save locally, upload to ImgBB.
67
+
 
 
 
 
 
 
 
 
 
68
  Returns:
69
+ (PIL.Image, local_path, returned_prompt, image_url) or None on failure.
 
70
  """
71
+ model = model or DEFAULT_MODEL
72
+ width = width or DEFAULT_WIDTH
73
+ height = height or DEFAULT_HEIGHT
 
 
 
 
 
 
 
 
 
74
 
75
+ # if the request has been superseded, bail early
76
+ if request_id != current_request_id:
77
+ logger.info("Request ID mismatch; cancelling generation")
78
  return None
79
 
80
+ seed = random.randint(0, 2**31 - 1)
81
+ url = (
82
+ f"https://image.pollinations.ai/prompt/{requests.utils.quote(prompt)}"
83
+ f"?nologo=true&safe=false&private=true&model={model}"
84
+ f"&enhance=true&width={width}&height={height}&seed={seed}"
85
+ )
86
+ logger.debug(f"Fetching image (seed={seed}): {url}")
87
+
88
+ backoff = 1
89
+ for attempt in range(1, MAX_RETRIES + 1):
90
+ try:
91
+ resp = requests.get(url, timeout=45)
92
+ if resp.status_code != 200:
93
+ raise RuntimeError(f"Status {resp.status_code}")
94
+ break
95
+ except Exception as e:
96
+ logger.warning(f"Attempt {attempt}/{MAX_RETRIES} failed: {e}")
97
+ if attempt == MAX_RETRIES:
98
+ logger.error("Max retries reached, aborting image fetch")
99
+ return None
100
+ time.sleep(backoff)
101
+ backoff *= BACKOFF_BASE
102
+
103
+ # verify still the active request
104
  if request_id != current_request_id:
105
+ logger.info("Request ID mismatch after fetch; discarding result")
106
  return None
107
 
108
+ # load image
 
 
109
  try:
110
+ image = Image.open(io.BytesIO(resp.content))
111
+ logger.debug(f"Image loaded: {image.size[0]}×{image.size[1]}")
112
+ except UnidentifiedImageError as e:
113
+ logger.error(f"Invalid image data: {e}")
114
+ return None
115
+
116
+ # try to extract prompt metadata from EXIF
117
+ returned_prompt = prompt
118
+ exif = image.info.get("exif", b"")
119
+ if exif:
120
+ try:
121
+ import re, json as _json
122
+ m = re.search(b'{"prompt":.*}', exif)
123
+ if m:
124
+ meta = _json.loads(m.group(0).decode())
125
+ returned_prompt = meta.get("prompt", prompt)
126
+ except Exception as e:
127
+ logger.debug(f"EXIF parse failed: {e}")
 
 
 
 
 
 
128
 
129
+ # ensure output directory
 
 
 
 
130
  os.makedirs(image_dir, exist_ok=True)
131
+ filename = f"flux_{int(time.time())}.png"
132
+ path = os.path.join(image_dir, filename)
133
+
134
  try:
135
+ image.save(path, format="PNG")
136
+ logger.info(f"Image saved to {path}")
 
 
 
 
 
 
137
  except Exception as e:
138
+ logger.error(f"Failed to save image: {e}")
139
  return None
140
 
141
+ # upload
142
+ image_url = upload_to_imgbb(path, filename) or ""
143
+ return image, path, returned_prompt, image_url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -4,8 +4,8 @@ import requests
4
  import logging
5
  import queue
6
  import json
7
- import time
8
  import random
 
9
  from concurrent.futures import ThreadPoolExecutor
10
  from fastapi import FastAPI, Request, HTTPException
11
  from fastapi.responses import PlainTextResponse
@@ -13,7 +13,56 @@ from FLUX import generate_image
13
  from VoiceReply import generate_voice_reply
14
  from polLLM import generate_llm
15
 
16
- # --- Configuration and Client Classes ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  class BotConfig:
19
  GREEN_API_URL = os.getenv("GREEN_API_URL")
@@ -40,13 +89,13 @@ class BotClient:
40
  def __init__(self, cfg: BotConfig):
41
  self.cfg = cfg
42
  self.session = requests.Session()
43
- logging.basicConfig(level=logging.DEBUG,
44
- format="%(asctime)s [%(levelname)s] %(message)s")
45
 
46
  def send(self, endpoint, payload, files=None, retries=3):
47
- url = (f"{self.cfg.GREEN_API_URL}/waInstance"
48
- f"{self.cfg.GREEN_API_ID_INSTANCE}/{endpoint}/"
49
- f"{self.cfg.GREEN_API_TOKEN}")
 
 
50
  for i in range(1, retries+1):
51
  try:
52
  resp = self.session.post(
@@ -58,7 +107,7 @@ class BotClient:
58
  resp.raise_for_status()
59
  return resp.json()
60
  except requests.RequestException as e:
61
- logging.warning(f"{endpoint} attempt {i}/{retries} failed: {e}")
62
  return {"error":"failed"}
63
 
64
  def send_message(self, message_id, chat_id, text):
@@ -84,13 +133,13 @@ class BotClient:
84
  with open(file_path,"rb") as f:
85
  mime = "image/jpeg" if media_type=="image" else "audio/mpeg"
86
  files = [("file",(os.path.basename(file_path),f,mime))]
87
- return self.send(endpoint,payload,files=files)
88
 
89
- # Validate env
90
  BotConfig.validate()
91
  client = BotClient(BotConfig)
92
 
93
- # --- Threading & Queues ---
94
 
95
  task_queue = queue.Queue()
96
  polls = {}
@@ -98,38 +147,68 @@ executor = ThreadPoolExecutor(max_workers=4)
98
 
99
  def worker():
100
  while True:
101
- t = task_queue.get()
102
  try:
103
- if t["type"] == "image":
104
  _fn_generate_images(
105
- t["message_id"],
106
- t["chat_id"],
107
- t["prompt"],
108
- t.get("num_images", 1)
 
 
 
 
 
 
 
 
 
109
  )
110
- elif t["type"] == "audio":
111
- _fn_voice_reply(t["message_id"], t["chat_id"], t["prompt"])
112
  except Exception as e:
113
- logging.error(f"Worker error {t}: {e}")
114
  finally:
115
  task_queue.task_done()
116
 
117
  for _ in range(4):
118
  threading.Thread(target=worker, daemon=True).start()
119
 
120
- # --- Tool Functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
  def _fn_summarize(mid, cid, text):
123
- s = generate_llm(f"Summarize:\n\n{text}")
124
- _fn_send_text(mid, cid, s)
125
 
126
  def _fn_translate(mid, cid, lang, text):
127
- r = generate_llm(f"Translate to {lang}:\n\n{text}")
128
- _fn_send_text(mid, cid, r)
129
 
130
  def _fn_joke(mid, cid):
131
  try:
132
- j = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
 
 
 
133
  joke = f"{j['setup']}\n\n{j['punchline']}"
134
  except:
135
  joke = generate_llm("Tell me a short joke.")
@@ -137,20 +216,20 @@ def _fn_joke(mid, cid):
137
 
138
  def _fn_weather(mid, cid, loc):
139
  raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
140
- r = generate_llm(f"Give a weather report in °C:\n\n{raw}")
141
- _fn_send_text(mid, cid, r)
142
 
143
  def _fn_inspire(mid, cid):
144
- q = generate_llm("Give me a unique, random short inspirational quote.")
145
- _fn_send_text(mid, cid, f"✨ {q}")
146
 
147
  def _fn_meme(mid, cid, txt):
148
- client.send_message(mid, cid, "🎨 Generating meme…")
149
  task_queue.put({
150
- "type": "image",
151
  "message_id": mid,
152
- "chat_id": cid,
153
- "prompt": f"meme: {txt}"
154
  })
155
 
156
  def _fn_poll_create(mid, cid, question, options):
@@ -161,7 +240,7 @@ def _fn_poll_create(mid, cid, question, options):
161
 
162
  def _fn_poll_vote(mid, cid, voter, choice):
163
  poll = polls.get(cid)
164
- if not poll or choice < 1 or choice > len(poll["options"]):
165
  return
166
  prev = poll["voters"].get(voter)
167
  if prev:
@@ -190,66 +269,147 @@ def _fn_poll_end(mid, cid):
190
  )
191
  _fn_send_text(mid, cid, txt)
192
 
193
- def _fn_generate_images(mid, cid, prompt, count=1):
194
  for i in range(1, count+1):
195
  try:
196
- img, path, ret_p, url = generate_image(prompt, mid, mid, BotConfig.IMAGE_DIR)
 
 
 
197
  formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_p.split("\n\n") if p.strip())
198
  cap = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
199
  client.send_media(mid, cid, path, cap, media_type="image")
200
  os.remove(path)
201
  except Exception as e:
202
- logging.warning(f"Img {i}/{count} failed: {e}")
203
  _fn_send_text(mid, cid, f"😢 Failed to generate image {i}/{count}.")
204
 
205
- def _fn_send_text(mid, cid, message):
206
- # send text...
207
- client.send_message(mid, cid, message)
208
- # ...and queue voice with the same content
209
- task_queue.put({
210
- "type": "audio",
211
- "message_id": mid,
212
- "chat_id": cid,
213
- "prompt": message
214
- })
215
 
216
  def _fn_voice_reply(mid, cid, prompt):
217
- processed = (
218
  f"Just say this exactly as written in a flirty, friendly, playful, "
219
  f"happy and helpful but a little bit clumsy-cute way: {prompt}"
220
  )
221
- res = generate_voice_reply(
222
- processed,
223
- model="openai-audio",
224
- voice="coral",
225
- audio_dir=BotConfig.AUDIO_DIR
226
- )
227
  if res and res[0]:
228
  path, _ = res
229
  client.send_media(mid, cid, path, "", media_type="audio")
230
  os.remove(path)
231
  else:
232
- # fallback to text+voice
233
  _fn_send_text(mid, cid, prompt)
234
 
235
- # --- Function schema & router ---
236
 
237
  FUNCTION_SCHEMA = {
238
- "summarize": {"description":"Summarize text","params":["text"]},
239
- "translate": {"description":"Translate text","params":["lang","text"]},
240
- "joke": {"description":"Tell a joke","params":[]},
241
- "weather": {"description":"Weather report","params":["location"]},
242
  "inspire": {"description":"Inspirational quote","params":[]},
243
- "meme": {"description":"Generate meme","params":["text"]},
244
- "poll_create": {"description":"Create poll","params":["question","options"]},
245
- "poll_vote": {"description":"Vote poll","params":["choice"]},
246
- "poll_results": {"description":"Show poll results","params":[]},
247
- "poll_end": {"description":"End poll","params":[]},
248
- "generate_image": {"description":"Generate images","params":["prompt","count"]},
249
- "send_text": {"description":"Send plain text","params":["message"]}
250
  }
251
 
252
- def route_intent(user_input: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  sys_prompt = (
254
  "You are Eve. You can either chat or call one of these functions:\n"
255
  + "\n".join(f"- {n}: {f['description']}" for n,f in FUNCTION_SCHEMA.items())
@@ -257,13 +417,18 @@ def route_intent(user_input: str):
257
  "Otherwise return JSON with \"action\":\"send_text\",\"message\":\"...\".\n"
258
  "Return only raw JSON."
259
  )
260
- raw = generate_llm(f"{sys_prompt}\nUser: {user_input}")
 
 
 
 
 
261
  try:
262
  return json.loads(raw)
263
  except:
264
  return {"action":"send_text","message":raw}
265
 
266
- # --- FastAPI & Webhook ---
267
 
268
  app = FastAPI()
269
  help_text = (
@@ -276,7 +441,7 @@ help_text = (
276
  "• /inspire\n"
277
  "• /meme <text>\n"
278
  "• /poll <Q>|… / /results / /endpoll\n"
279
- "• /gen <prompt>|<count>\n"
280
  "Otherwise chat or reply to my message to invoke tools."
281
  )
282
 
@@ -287,18 +452,25 @@ async def whatsapp_webhook(request: Request):
287
  raise HTTPException(403, "Unauthorized")
288
 
289
  chat_id = data["senderData"]["chatId"]
 
 
 
 
 
290
  if chat_id != BotConfig.BOT_GROUP_CHAT or data["typeWebhook"] != "incomingMessageReceived":
291
  return {"success": True}
292
 
293
  md = data["messageData"]
294
- mid = data["idMessage"]
295
  tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
296
  if not tmd:
297
  return {"success": True}
298
 
299
- body = (tmd.get("textMessage") or tmd.get("text", "")).strip()
300
  ctx = tmd.get("contextInfo", {})
301
 
 
 
 
302
  # Slash commands
303
  low = body.lower()
304
  if low == "/help":
@@ -315,7 +487,7 @@ async def whatsapp_webhook(request: Request):
315
  _fn_joke(mid, chat_id)
316
  return {"success": True}
317
  if low.startswith("/weather "):
318
- _fn_weather(mid, chat_id, body[9:].strip().replace(" ", "+"))
319
  return {"success": True}
320
  if low == "/inspire":
321
  _fn_inspire(mid, chat_id)
@@ -328,7 +500,7 @@ async def whatsapp_webhook(request: Request):
328
  _fn_poll_create(mid, chat_id, parts[0], parts[1:])
329
  return {"success": True}
330
  if chat_id in polls and low.isdigit():
331
- _fn_poll_vote(mid, chat_id, data["senderData"]["sender"], int(low))
332
  return {"success": True}
333
  if low == "/results":
334
  _fn_poll_results(mid, chat_id)
@@ -337,24 +509,29 @@ async def whatsapp_webhook(request: Request):
337
  _fn_poll_end(mid, chat_id)
338
  return {"success": True}
339
  if low.startswith("/gen"):
340
- parts = body[4:].split("|", 1)
341
- pr = parts[0].strip()
342
- ct = int(parts[1]) if len(parts)>1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
343
- client.send_message(mid, chat_id, f"✨ Generating {ct} images…")
 
 
344
  task_queue.put({
345
- "type": "image",
346
  "message_id": mid,
347
- "chat_id": chat_id,
348
- "prompt": pr,
349
- "num_images": ct
 
 
350
  })
351
  return {"success": True}
352
 
 
353
  # Skip mentions
354
  if ctx.get("mentionedJidList"):
355
  return {"success": True}
356
 
357
- # Build effective_text (include quoted if replying to bot)
358
  if md.get("typeMessage") == "quotedMessage":
359
  ext = md["extendedTextMessageData"]
360
  quoted = md["quotedMessage"]
@@ -368,42 +545,20 @@ async def whatsapp_webhook(request: Request):
368
  else:
369
  effective = body
370
 
371
- # Route intent across all tools
372
- intent = route_intent(effective)
373
  action = intent.get("action")
374
 
375
- # helper to confirm+enqueue image generation
376
- def _dispatch_generate_image():
377
- prompt = intent["prompt"]
378
- count = intent.get("count", 1)
379
- client.send_message(mid, chat_id, f"✨ Generating {count} image(s)…")
380
- task_queue.put({
381
- "type": "image",
382
- "message_id": mid,
383
- "chat_id": chat_id,
384
- "prompt": prompt,
385
- "num_images": count
386
- })
387
-
388
- dispatch = {
389
- "summarize": lambda: _fn_summarize(mid, chat_id, intent["text"]),
390
- "translate": lambda: _fn_translate(mid, chat_id, intent["lang"], intent["text"]),
391
- "joke": lambda: _fn_joke(mid, chat_id),
392
- "weather": lambda: _fn_weather(mid, chat_id, intent["location"]),
393
- "inspire": lambda: _fn_inspire(mid, chat_id),
394
- "meme": lambda: _fn_meme(mid, chat_id, intent["text"]),
395
- "poll_create": lambda: _fn_poll_create(mid, chat_id, intent["question"], intent["options"]),
396
- "poll_vote": lambda: _fn_poll_vote(mid, chat_id, data["senderData"]["sender"], intent["choice"]),
397
- "poll_results": lambda: _fn_poll_results(mid, chat_id),
398
- "poll_end": lambda: _fn_poll_end(mid, chat_id),
399
- "generate_image": _dispatch_generate_image,
400
- "send_text": lambda: _fn_send_text(mid, chat_id, intent["message"]),
401
- }
402
-
403
- if action in dispatch:
404
- dispatch[action]()
405
  else:
406
- _fn_send_text(mid, chat_id, intent.get("message", "Sorry, I didn't get that."))
 
407
 
408
  return {"success": True}
409
 
 
4
  import logging
5
  import queue
6
  import json
 
7
  import random
8
+ from collections import defaultdict, deque
9
  from concurrent.futures import ThreadPoolExecutor
10
  from fastapi import FastAPI, Request, HTTPException
11
  from fastapi.responses import PlainTextResponse
 
13
  from VoiceReply import generate_voice_reply
14
  from polLLM import generate_llm
15
 
16
+ # --- Logging Setup ---------------------------------------------------------
17
+
18
+ LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
19
+ logger = logging.getLogger("eve_bot")
20
+ logger.setLevel(LOG_LEVEL)
21
+
22
+ handler = logging.StreamHandler()
23
+ formatter = logging.Formatter(
24
+ "%(asctime)s [%(levelname)s] [%(message_id)s/%(sender)s] %(message)s"
25
+ )
26
+ handler.setFormatter(formatter)
27
+
28
+ class ContextFilter(logging.Filter):
29
+ def filter(self, record):
30
+ record.message_id = getattr(record, "message_id", "-")
31
+ record.sender = getattr(record, "sender", "-")
32
+ return True
33
+
34
+ handler.addFilter(ContextFilter())
35
+ logger.handlers = [handler]
36
+
37
+ # Thread‐local to carry context through helpers
38
+ _thread_ctx = threading.local()
39
+ def set_thread_context(chat_id, sender, message_id):
40
+ _thread_ctx.chat_id = chat_id
41
+ _thread_ctx.sender = sender
42
+ _thread_ctx.message_id = message_id
43
+
44
+ def get_thread_context():
45
+ return (
46
+ getattr(_thread_ctx, "chat_id", None),
47
+ getattr(_thread_ctx, "sender", None),
48
+ getattr(_thread_ctx, "message_id", None),
49
+ )
50
+
51
+ # --- Conversation History -------------------------------------------------
52
+
53
+ # keep last 20 messages per (chat_id, sender)
54
+ history = defaultdict(lambda: deque(maxlen=20))
55
+
56
+ def record_user_message(chat_id, sender, message):
57
+ history[(chat_id, sender)].append(f"User: {message}")
58
+
59
+ def record_bot_message(chat_id, sender, message):
60
+ history[(chat_id, sender)].append(f"Assistant: {message}")
61
+
62
+ def get_history_text(chat_id, sender):
63
+ return "\n".join(history[(chat_id, sender)])
64
+
65
+ # --- Bot Config & Client --------------------------------------------------
66
 
67
  class BotConfig:
68
  GREEN_API_URL = os.getenv("GREEN_API_URL")
 
89
  def __init__(self, cfg: BotConfig):
90
  self.cfg = cfg
91
  self.session = requests.Session()
 
 
92
 
93
  def send(self, endpoint, payload, files=None, retries=3):
94
+ url = (
95
+ f"{self.cfg.GREEN_API_URL}/waInstance"
96
+ f"{self.cfg.GREEN_API_ID_INSTANCE}/{endpoint}/"
97
+ f"{self.cfg.GREEN_API_TOKEN}"
98
+ )
99
  for i in range(1, retries+1):
100
  try:
101
  resp = self.session.post(
 
107
  resp.raise_for_status()
108
  return resp.json()
109
  except requests.RequestException as e:
110
+ logger.warning(f"{endpoint} attempt {i}/{retries} failed: {e}")
111
  return {"error":"failed"}
112
 
113
  def send_message(self, message_id, chat_id, text):
 
133
  with open(file_path,"rb") as f:
134
  mime = "image/jpeg" if media_type=="image" else "audio/mpeg"
135
  files = [("file",(os.path.basename(file_path),f,mime))]
136
+ return self.send(endpoint, payload, files=files)
137
 
138
+ # Validate env & init client
139
  BotConfig.validate()
140
  client = BotClient(BotConfig)
141
 
142
+ # --- Threading & Queues ---------------------------------------------------
143
 
144
  task_queue = queue.Queue()
145
  polls = {}
 
147
 
148
  def worker():
149
  while True:
150
+ task = task_queue.get()
151
  try:
152
+ if task["type"] == "image":
153
  _fn_generate_images(
154
+ task["message_id"],
155
+ task["chat_id"],
156
+ task["prompt"],
157
+ task.get("num_images", 1),
158
+ task.get("width"),
159
+ task.get("height")
160
+ )
161
+
162
+ elif task["type"] == "audio":
163
+ _fn_voice_reply(
164
+ task["message_id"],
165
+ task["chat_id"],
166
+ task["prompt"]
167
  )
 
 
168
  except Exception as e:
169
+ logger.error(f"Worker error {task}: {e}")
170
  finally:
171
  task_queue.task_done()
172
 
173
  for _ in range(4):
174
  threading.Thread(target=worker, daemon=True).start()
175
 
176
+ # --- Basic Tool Functions -------------------------------------------------
177
+
178
+ def _fn_send_text(mid, cid, message):
179
+ """Send text + record + queue voice."""
180
+ client.send_message(mid, cid, message)
181
+ chat_id, sender, _ = get_thread_context()
182
+ if chat_id and sender:
183
+ record_bot_message(chat_id, sender, message)
184
+ task_queue.put({
185
+ "type": "audio",
186
+ "message_id": mid,
187
+ "chat_id": cid,
188
+ "prompt": message
189
+ })
190
+
191
+ def _fn_send_accept(mid, cid, message):
192
+ """Send text + record, but no voice."""
193
+ client.send_message(mid, cid, message)
194
+ chat_id, sender, _ = get_thread_context()
195
+ if chat_id and sender:
196
+ record_bot_message(chat_id, sender, message)
197
 
198
  def _fn_summarize(mid, cid, text):
199
+ summary = generate_llm(f"Summarize:\n\n{text}")
200
+ _fn_send_text(mid, cid, summary)
201
 
202
  def _fn_translate(mid, cid, lang, text):
203
+ resp = generate_llm(f"Translate to {lang}:\n\n{text}")
204
+ _fn_send_text(mid, cid, resp)
205
 
206
  def _fn_joke(mid, cid):
207
  try:
208
+ j = requests.get(
209
+ "https://official-joke-api.appspot.com/random_joke",
210
+ timeout=5
211
+ ).json()
212
  joke = f"{j['setup']}\n\n{j['punchline']}"
213
  except:
214
  joke = generate_llm("Tell me a short joke.")
 
216
 
217
  def _fn_weather(mid, cid, loc):
218
  raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
219
+ report = generate_llm(f"Give a weather report in °C:\n\n{raw}")
220
+ _fn_send_text(mid, cid, report)
221
 
222
  def _fn_inspire(mid, cid):
223
+ quote = generate_llm("Give me a unique, random short inspirational quote.")
224
+ _fn_send_text(mid, cid, f"✨ {quote}")
225
 
226
  def _fn_meme(mid, cid, txt):
227
+ _fn_send_accept(mid, cid, "🎨 Generating meme…")
228
  task_queue.put({
229
+ "type": "image",
230
  "message_id": mid,
231
+ "chat_id": cid,
232
+ "prompt": f"meme: {txt}"
233
  })
234
 
235
  def _fn_poll_create(mid, cid, question, options):
 
240
 
241
  def _fn_poll_vote(mid, cid, voter, choice):
242
  poll = polls.get(cid)
243
+ if not poll or choice<1 or choice>len(poll["options"]):
244
  return
245
  prev = poll["voters"].get(voter)
246
  if prev:
 
269
  )
270
  _fn_send_text(mid, cid, txt)
271
 
272
+ def _fn_generate_images(mid, cid, prompt, count=1, width=None, height=None):
273
  for i in range(1, count+1):
274
  try:
275
+ img, path, ret_p, url = generate_image(
276
+ prompt, mid, mid, BotConfig.IMAGE_DIR,
277
+ width=width, height=height
278
+ )
279
  formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_p.split("\n\n") if p.strip())
280
  cap = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
281
  client.send_media(mid, cid, path, cap, media_type="image")
282
  os.remove(path)
283
  except Exception as e:
284
+ logger.warning(f"Img {i}/{count} failed: {e}")
285
  _fn_send_text(mid, cid, f"😢 Failed to generate image {i}/{count}.")
286
 
 
 
 
 
 
 
 
 
 
 
287
 
288
  def _fn_voice_reply(mid, cid, prompt):
289
+ proc = (
290
  f"Just say this exactly as written in a flirty, friendly, playful, "
291
  f"happy and helpful but a little bit clumsy-cute way: {prompt}"
292
  )
293
+ res = generate_voice_reply(proc, model="openai-audio", voice="coral", audio_dir=BotConfig.AUDIO_DIR)
 
 
 
 
 
294
  if res and res[0]:
295
  path, _ = res
296
  client.send_media(mid, cid, path, "", media_type="audio")
297
  os.remove(path)
298
  else:
 
299
  _fn_send_text(mid, cid, prompt)
300
 
301
+ # --- Intent Dispatcher ----------------------------------------------------
302
 
303
  FUNCTION_SCHEMA = {
304
+ "summarize": {"description":"Summarize text", "params":["text"]},
305
+ "translate": {"description":"Translate text", "params":["lang","text"]},
306
+ "joke": {"description":"Tell a joke", "params":[]},
307
+ "weather": {"description":"Weather report", "params":["location"]},
308
  "inspire": {"description":"Inspirational quote","params":[]},
309
+ "meme": {"description":"Generate meme", "params":["text"]},
310
+ "poll_create": {"description":"Create poll", "params":["question","options"]},
311
+ "poll_vote": {"description":"Vote poll", "params":["choice"]},
312
+ "poll_results": {"description":"Show poll results", "params":[]},
313
+ "poll_end": {"description":"End poll", "params":[]},
314
+ "generate_image": {"description":"Generate images", "params":["prompt","count"]},
315
+ "send_text": {"description":"Send plain text", "params":["message"]},
316
  }
317
 
318
+ class IntentDispatcher:
319
+ def __init__(self):
320
+ self.handlers = {}
321
+
322
+ def register(self, action):
323
+ def decorator(fn):
324
+ self.handlers[action] = fn
325
+ return fn
326
+ return decorator
327
+
328
+ def dispatch(self, action, mid, cid, intent):
329
+ fn = self.handlers.get(action)
330
+ if not fn:
331
+ return False
332
+ fn(mid, cid, intent)
333
+ return True
334
+
335
+ dispatcher = IntentDispatcher()
336
+
337
+ def validate_intent(action, intent):
338
+ schema = FUNCTION_SCHEMA.get(action)
339
+ if not schema:
340
+ return False
341
+ for p in schema["params"]:
342
+ if p not in intent:
343
+ logger.warning(f"Missing param '{p}' for action '{action}'")
344
+ return False
345
+ return True
346
+
347
+ @dispatcher.register("summarize")
348
+ def _h_summarize(mid, cid, intent):
349
+ _fn_summarize(mid, cid, intent["text"])
350
+
351
+ @dispatcher.register("translate")
352
+ def _h_translate(mid, cid, intent):
353
+ _fn_translate(mid, cid, intent["lang"], intent["text"])
354
+
355
+ @dispatcher.register("joke")
356
+ def _h_joke(mid, cid, intent):
357
+ _fn_joke(mid, cid)
358
+
359
+ @dispatcher.register("weather")
360
+ def _h_weather(mid, cid, intent):
361
+ _fn_weather(mid, cid, intent["location"])
362
+
363
+ @dispatcher.register("inspire")
364
+ def _h_inspire(mid, cid, intent):
365
+ _fn_inspire(mid, cid)
366
+
367
+ @dispatcher.register("meme")
368
+ def _h_meme(mid, cid, intent):
369
+ _fn_meme(mid, cid, intent["text"])
370
+
371
+ @dispatcher.register("poll_create")
372
+ def _h_poll_create(mid, cid, intent):
373
+ _fn_poll_create(mid, cid, intent["question"], intent["options"])
374
+
375
+ @dispatcher.register("poll_vote")
376
+ def _h_poll_vote(mid, cid, intent):
377
+ _fn_poll_vote(mid, cid, intent["voter"], intent["choice"])
378
+
379
+ @dispatcher.register("poll_results")
380
+ def _h_poll_results(mid, cid, intent):
381
+ _fn_poll_results(mid, cid)
382
+
383
+ @dispatcher.register("poll_end")
384
+ def _h_poll_end(mid, cid, intent):
385
+ _fn_poll_end(mid, cid)
386
+
387
+ @dispatcher.register("generate_image")
388
+ def _h_generate_image(mid, cid, intent):
389
+ prompt = intent["prompt"]
390
+ count = intent.get("count", 1)
391
+ width = intent.get("width")
392
+ height = intent.get("height")
393
+ _fn_send_accept(mid, cid, f"✨ Generating {count} image(s)…")
394
+ task_queue.put({
395
+ "type": "image",
396
+ "message_id": mid,
397
+ "chat_id": cid,
398
+ "prompt": prompt,
399
+ "num_images": count,
400
+ "width": width,
401
+ "height": height
402
+ })
403
+
404
+
405
+ @dispatcher.register("send_text")
406
+ def _h_send_text(mid, cid, intent):
407
+ _fn_send_text(mid, cid, intent["message"])
408
+
409
+ # --- Intent Routing --------------------------------------------------------
410
+
411
+ def route_intent(user_input, chat_id, sender):
412
+ history_text = get_history_text(chat_id, sender)
413
  sys_prompt = (
414
  "You are Eve. You can either chat or call one of these functions:\n"
415
  + "\n".join(f"- {n}: {f['description']}" for n,f in FUNCTION_SCHEMA.items())
 
417
  "Otherwise return JSON with \"action\":\"send_text\",\"message\":\"...\".\n"
418
  "Return only raw JSON."
419
  )
420
+ prompt = (
421
+ f"{sys_prompt}\n\n"
422
+ f"Conversation so far:\n{history_text}\n\n"
423
+ f"User: {user_input}"
424
+ )
425
+ raw = generate_llm(prompt)
426
  try:
427
  return json.loads(raw)
428
  except:
429
  return {"action":"send_text","message":raw}
430
 
431
+ # --- FastAPI & Webhook ----------------------------------------------------
432
 
433
  app = FastAPI()
434
  help_text = (
 
441
  "• /inspire\n"
442
  "• /meme <text>\n"
443
  "• /poll <Q>|… / /results / /endpoll\n"
444
+ "• /gen <prompt>|<count>|<width>|<height>\n"
445
  "Otherwise chat or reply to my message to invoke tools."
446
  )
447
 
 
452
  raise HTTPException(403, "Unauthorized")
453
 
454
  chat_id = data["senderData"]["chatId"]
455
+ sender = data["senderData"]["sender"]
456
+ mid = data["idMessage"]
457
+ set_thread_context(chat_id, sender, mid)
458
+ logger.debug("Received webhook")
459
+
460
  if chat_id != BotConfig.BOT_GROUP_CHAT or data["typeWebhook"] != "incomingMessageReceived":
461
  return {"success": True}
462
 
463
  md = data["messageData"]
 
464
  tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
465
  if not tmd:
466
  return {"success": True}
467
 
468
+ body = (tmd.get("textMessage") or tmd.get("text","")).strip()
469
  ctx = tmd.get("contextInfo", {})
470
 
471
+ # record user message
472
+ record_user_message(chat_id, sender, body)
473
+
474
  # Slash commands
475
  low = body.lower()
476
  if low == "/help":
 
487
  _fn_joke(mid, chat_id)
488
  return {"success": True}
489
  if low.startswith("/weather "):
490
+ _fn_weather(mid, chat_id, body[9:].strip().replace(" ","+"))
491
  return {"success": True}
492
  if low == "/inspire":
493
  _fn_inspire(mid, chat_id)
 
500
  _fn_poll_create(mid, chat_id, parts[0], parts[1:])
501
  return {"success": True}
502
  if chat_id in polls and low.isdigit():
503
+ _fn_poll_vote(mid, chat_id, sender, int(low))
504
  return {"success": True}
505
  if low == "/results":
506
  _fn_poll_results(mid, chat_id)
 
509
  _fn_poll_end(mid, chat_id)
510
  return {"success": True}
511
  if low.startswith("/gen"):
512
+ parts = body[4:].split("|")
513
+ pr = parts[0].strip()
514
+ ct = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
515
+ width = int(parts[2]) if len(parts) > 2 and parts[2].isdigit() else None
516
+ height = int(parts[3]) if len(parts) > 3 and parts[3].isdigit() else None
517
+ _fn_send_accept(mid, chat_id, f"✨ Generating {ct} image(s)…")
518
  task_queue.put({
519
+ "type": "image",
520
  "message_id": mid,
521
+ "chat_id": chat_id,
522
+ "prompt": pr,
523
+ "num_images": ct,
524
+ "width": width,
525
+ "height": height
526
  })
527
  return {"success": True}
528
 
529
+
530
  # Skip mentions
531
  if ctx.get("mentionedJidList"):
532
  return {"success": True}
533
 
534
+ # Build effective text (handle quoted replies to the bot)
535
  if md.get("typeMessage") == "quotedMessage":
536
  ext = md["extendedTextMessageData"]
537
  quoted = md["quotedMessage"]
 
545
  else:
546
  effective = body
547
 
548
+ # Route intent & dispatch
549
+ intent = route_intent(effective, chat_id, sender)
550
  action = intent.get("action")
551
 
552
+ if action in FUNCTION_SCHEMA:
553
+ if not validate_intent(action, intent):
554
+ _fn_send_text(mid, chat_id, f"❗ Missing parameter(s) for `{action}`.")
555
+ else:
556
+ dispatched = dispatcher.dispatch(action, mid, chat_id, intent)
557
+ if not dispatched:
558
+ _fn_send_text(mid, chat_id, intent.get("message","Sorry, I couldn't handle that."))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
  else:
560
+ # fallback chat
561
+ _fn_send_text(mid, chat_id, intent.get("message","Sorry, I didn't get that."))
562
 
563
  return {"success": True}
564
 
config.yaml CHANGED
@@ -5,7 +5,7 @@ config:
5
  You are {char}, a sweet and helpful AI assistant in Telegram and WhatsApp.
6
  You generate images, voice and text replies, and support these commands:
7
  • /help — list all commands
8
- • /gen <prompt>|<count> — generate <count> images (default 4)
9
  • /summarize <text> — get a concise summary
10
  • /translate <lang>|<text> — translate text
11
  • /joke — tell a short joke
@@ -17,67 +17,4 @@ config:
17
  • /endpoll — end the poll
18
  Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it.
19
  For any other message, you can either chat normally or invoke one of your tools.
20
- char: Eve
21
-
22
- # Tell your LLM client to expose these functions
23
- function_calling: auto
24
-
25
- functions:
26
- - name: generate_image
27
- description: Generate one or more images from a prompt.
28
- parameters:
29
- type: object
30
- properties:
31
- prompt:
32
- type: string
33
- description: The text prompt to generate an image for
34
- count:
35
- type: integer
36
- description: Number of images to generate
37
- required:
38
- - prompt
39
-
40
- - name: send_text
41
- description: Send a plain text reply back to the user.
42
- parameters:
43
- type: object
44
- properties:
45
- message:
46
- type: string
47
- description: The text content to send
48
- required:
49
- - message
50
-
51
- bot:
52
- default_image_count: 4
53
- skip:
54
- mentions: true
55
- quotes: true
56
-
57
- image:
58
- model: flux
59
- width: 1920
60
- height: 1080
61
- enhance: true
62
- safe: false
63
- nologo: true
64
-
65
- voice:
66
- model: openai-audio
67
- voice: coral
68
-
69
-
70
- SD:
71
- steps: 30
72
- negative_prompt: low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers
73
- width: 1024
74
- height: 1024
75
- sampler_name: DPM++ 2M
76
- cfg_scale: 7.0
77
- imgbb_upload: false
78
-
79
- app:
80
- whatsapp_bot_enabled: true
81
- telegram_bot_enabled: true
82
- version: "0.6 Beta"
83
- debug: true
 
5
  You are {char}, a sweet and helpful AI assistant in Telegram and WhatsApp.
6
  You generate images, voice and text replies, and support these commands:
7
  • /help — list all commands
8
+ • /gen <prompt>|<count>|<width>|<height> — generate <count> images (default 4)
9
  • /summarize <text> — get a concise summary
10
  • /translate <lang>|<text> — translate text
11
  • /joke — tell a short joke
 
17
  • /endpoll — end the poll
18
  Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it.
19
  For any other message, you can either chat normally or invoke one of your tools.
20
+ char: Eve
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
polLLM.py CHANGED
@@ -1,47 +1,86 @@
1
  import os
 
 
 
2
  from openai import OpenAI
3
  from dotenv import load_dotenv
4
  from utils import read_config
5
- import random
6
- import time # Import time for retry delay
7
 
 
8
  load_dotenv()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
 
10
  client = OpenAI(
11
- base_url="https://text.pollinations.ai/openai",
12
- api_key="YOUR_API_KEY" # Add if needed
13
  )
14
 
15
- def pre_process():
16
- # Read the configuration and substitute the character placeholder
17
- config = read_config()
18
- system_prompt = config['llm']['system_prompt']
19
- char = config['llm']['char']
20
- return system_prompt.replace("{char}", char)
21
-
22
- def generate_llm(prompt, model="openai-large", max_tokens=8000):
23
- system_prompt = pre_process()
24
-
25
- while True: # Keep retrying indefinitely
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  try:
27
- # Use OpenAI's ChatCompletion API
28
- randomSeed = random.randint(0, 9999999)
29
- response = client.chat.completions.create(
30
- model=model,
31
- messages=[
32
- {"role": "system", "content": system_prompt},
33
- {"role": "user", "content": prompt}
34
- ],
35
- max_tokens=max_tokens,
36
- seed=randomSeed
37
  )
38
- # Return the generated text
39
- return response.choices[0].message.content.strip()
 
40
  except Exception as e:
41
- print(f"Error occurred: {str(e)}. Retrying in 5 seconds...")
42
- time.sleep(5) # Wait before retrying
 
 
 
 
 
43
 
44
- # Example usage (can be removed or commented out in production):
45
  if __name__ == "__main__":
46
- sample_prompt = "search for free image generation api"
47
- print("Response:", generate_llm(sample_prompt))
 
1
  import os
2
+ import time
3
+ import random
4
+ import logging
5
  from openai import OpenAI
6
  from dotenv import load_dotenv
7
  from utils import read_config
 
 
8
 
9
+ # --- Load environment & config ---
10
  load_dotenv()
11
+ _config = read_config()["llm"]
12
+
13
+ # --- Logging setup ---
14
+ LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
15
+ logger = logging.getLogger("polLLM")
16
+ logger.setLevel(LOG_LEVEL)
17
+ handler = logging.StreamHandler()
18
+ handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"))
19
+ logger.addHandler(handler)
20
+
21
+ # --- LLM settings from config.yaml ---
22
+ _DEFAULT_MODEL = _config.get("model", "openai-large")
23
+ _SYSTEM_TEMPLATE = _config.get("system_prompt", "")
24
+ _CHAR = _config.get("char", "Eve")
25
+ _DEFAULT_MAX_TOKENS = _config.get("max_tokens", 8000)
26
 
27
+ # --- OpenAI client init ---
28
  client = OpenAI(
29
+ base_url = os.getenv("OPENAI_BASE_URL", "https://text.pollinations.ai/openai"),
30
+ api_key = os.getenv("OPENAI_API_KEY", "")
31
  )
32
 
33
+ def _build_system_prompt() -> str:
34
+ """
35
+ Substitute {char} into the system prompt template.
36
+ """
37
+ return _SYSTEM_TEMPLATE.replace("{char}", _CHAR)
38
+
39
+ def generate_llm(
40
+ prompt: str,
41
+ model: str = None,
42
+ max_tokens: int = None,
43
+ temperature: float = None,
44
+ ) -> str:
45
+ """
46
+ Send a chat-completion request to the LLM, with retries and backoff.
47
+ Reads defaults from config.yaml, but can be overridden per-call.
48
+ """
49
+ model = model or _DEFAULT_MODEL
50
+ max_tokens = max_tokens or _DEFAULT_MAX_TOKENS
51
+ temperature = temperature if temperature is not None else _DEFAULT_TEMPERATURE
52
+
53
+ system_prompt = _build_system_prompt()
54
+ messages = [
55
+ {"role": "system", "content": system_prompt},
56
+ {"role": "user", "content": prompt},
57
+ ]
58
+
59
+ backoff = 1
60
+ for attempt in range(1, 6):
61
  try:
62
+ seed = random.randint(0, 2**31 - 1)
63
+ logger.debug(f"LLM call attempt={attempt}, model={model}, seed={seed}")
64
+ resp = client.chat.completions.create(
65
+ model = model,
66
+ messages = messages,
67
+ max_tokens = max_tokens,
68
+ temperature = temperature,
69
+ seed = seed,
 
 
70
  )
71
+ text = resp.choices[0].message.content.strip()
72
+ logger.debug("LLM response received")
73
+ return text
74
  except Exception as e:
75
+ logger.error(f"LLM error on attempt {attempt}: {e}")
76
+ if attempt < 5:
77
+ time.sleep(backoff)
78
+ backoff *= 2
79
+ else:
80
+ logger.critical("LLM failed after 5 attempts, raising")
81
+ raise
82
 
83
+ # Example local test
84
  if __name__ == "__main__":
85
+ logger.info("Testing generate_llm() with a sample prompt")
86
+ print(generate_llm("Say hello in a poetic style."))