import os import requests import urllib.parse from dotenv import load_dotenv from utils import read_config import random load_dotenv() def pre_process(): # Read the configuration and substitute the character placeholder config = read_config() system_prompt = config['llm']['system_prompt'] char = config['llm']['char'] return system_prompt.replace("{char}", char) def generate_llm(prompt): system_prompt = pre_process() # Encode the user prompt and system prompt for URL safety encoded_prompt = urllib.parse.quote(prompt) encoded_system = urllib.parse.quote(system_prompt) # Build the GET request URL for Pollinations' text API randomSeed = random.randint(0, 999999) print(f"DEBUG: Random seed: {randomSeed}") url = f"https://text.pollinations.ai/{encoded_prompt}?model=openai-large&seed={randomSeed}&private=true&system={encoded_system}" try: response = requests.get(url, timeout=30) response.raise_for_status() # Return the generated text (stripping any extra whitespace) return response.text.strip() except Exception as e: return f"Error: {str(e)}" # Example usage (can be removed or commented out in production): if __name__ == "__main__": sample_prompt = "What is the capital of France?" print("Response:", generate_llm(sample_prompt))