import os | |
from openai import OpenAI | |
class LLMClient: | |
def __init__(self, provider: str = "openai", model: str = "gpt-4o-mini"): | |
self.provider = provider | |
self.model = model | |
self.client = self._initialize_client() | |
def _initialize_client(self): | |
if self.provider == "openai": | |
return OpenAI() | |
elif self.provider == "gemini": | |
return OpenAI( | |
api_key=os.getenv("GEMINI_API_KEY"), | |
base_url="https://generativelanguage.googleapis.com/v1beta/openai/" | |
) | |
elif self.provider == "deepseek": | |
return OpenAI( | |
api_key=os.getenv("DEEPSEEK_API_KEY"), | |
base_url="https://api.deepseek.com/v1" | |
) | |
# Add other providers here later | |
else: | |
raise ValueError(f"Unsupported LLM provider: {self.provider}") | |
def chat_completion(self, messages: list): | |
if self.provider == "openai" or self.provider == "gemini" or self.provider == "deepseek": | |
return self.client.chat.completions.create( | |
model=self.model, | |
messages=messages | |
) | |
# Add other providers' chat completion logic here later | |
else: | |
raise ValueError(f"Unsupported LLM provider: {self.provider}") |