Spaces:
Running
Running
Chandima Prabhath
Refactor trivia and weather functions; remove trivia-related code and update help text
a2104ab
import os | |
from openai import OpenAI | |
from dotenv import load_dotenv | |
from utils import read_config | |
import random | |
load_dotenv() | |
client = OpenAI( | |
base_url="https://text.pollinations.ai/openai", | |
api_key="YOUR_API_KEY" # Add if needed | |
) | |
def pre_process(): | |
# Read the configuration and substitute the character placeholder | |
config = read_config() | |
system_prompt = config['llm']['system_prompt'] | |
char = config['llm']['char'] | |
return system_prompt.replace("{char}", char) | |
def generate_llm(prompt, model="openai-large", max_tokens=100): | |
system_prompt = pre_process() | |
try: | |
# Use OpenAI's ChatCompletion API | |
randomSeed = random.randint(0, 9999999) | |
response = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=max_tokens, | |
seed=randomSeed | |
) | |
# Return the generated text | |
return response.choices[0].message.content.strip() | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Example usage (can be removed or commented out in production): | |
if __name__ == "__main__": | |
sample_prompt = f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}" | |
print("Response:", generate_llm(sample_prompt)) |