File size: 1,401 Bytes
ff069bf
a2104ab
ff069bf
 
 
 
 
 
a2104ab
 
 
 
 
ff069bf
 
 
 
 
 
 
a2104ab
ff069bf
 
 
a2104ab
 
 
 
 
 
 
 
 
 
 
 
 
ff069bf
 
 
 
 
a2104ab
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import os
from openai import OpenAI
from dotenv import load_dotenv
from utils import read_config
import random

load_dotenv()

client = OpenAI(
    base_url="https://text.pollinations.ai/openai",
    api_key="YOUR_API_KEY"  # Add if needed
)

def pre_process():
    # Read the configuration and substitute the character placeholder
    config = read_config()
    system_prompt = config['llm']['system_prompt']
    char = config['llm']['char']
    return system_prompt.replace("{char}", char)

def generate_llm(prompt, model="openai-large", max_tokens=100):
    system_prompt = pre_process()
    
    try:
        # Use OpenAI's ChatCompletion API
        randomSeed = random.randint(0, 9999999)
        response = client.chat.completions.create(
            model=model,
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": prompt}
            ],
            max_tokens=max_tokens,
            seed=randomSeed
        )
        # Return the generated text
        return response.choices[0].message.content.strip()
    except Exception as e:
        return f"Error: {str(e)}"

# Example usage (can be removed or commented out in production):
if __name__ == "__main__":
    sample_prompt = f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
    print("Response:", generate_llm(sample_prompt))