|
""" |
|
Configuration Settings for Smart Auto-Complete |
|
Manages environment variables and application configuration |
|
""" |
|
|
|
import logging |
|
import os |
|
from typing import Any, Dict, Optional |
|
|
|
from dotenv import load_dotenv |
|
|
|
|
|
load_dotenv() |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class AppSettings: |
|
""" |
|
Application settings manager |
|
Loads configuration from environment variables with sensible defaults |
|
""" |
|
|
|
def __init__(self): |
|
"""Initialize settings from environment variables""" |
|
|
|
|
|
self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "").strip() |
|
self.ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "").strip() |
|
self.DEFAULT_PROVIDER = os.getenv("DEFAULT_PROVIDER", "openai").lower() |
|
|
|
|
|
self.MAX_SUGGESTIONS = int(os.getenv("MAX_SUGGESTIONS", "5")) |
|
self.DEBOUNCE_DELAY = int(os.getenv("DEBOUNCE_DELAY", "300")) |
|
self.CACHE_TTL = int(os.getenv("CACHE_TTL", "3600")) |
|
self.MAX_INPUT_LENGTH = int(os.getenv("MAX_INPUT_LENGTH", "1000")) |
|
|
|
|
|
self.CACHE_MAX_SIZE = int(os.getenv("CACHE_MAX_SIZE", "1000")) |
|
self.CACHE_ENABLED = os.getenv("CACHE_ENABLED", "true").lower() == "true" |
|
|
|
|
|
self.LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper() |
|
self.LOG_FORMAT = os.getenv( |
|
"LOG_FORMAT", "%(asctime)s - %(name)s - %(levelname)s - %(message)s" |
|
) |
|
|
|
|
|
self.RATE_LIMIT_REQUESTS_PER_MINUTE = int( |
|
os.getenv("RATE_LIMIT_REQUESTS_PER_MINUTE", "60") |
|
) |
|
self.RATE_LIMIT_ENABLED = ( |
|
os.getenv("RATE_LIMIT_ENABLED", "true").lower() == "true" |
|
) |
|
|
|
|
|
self.OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") |
|
self.ANTHROPIC_MODEL = os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307") |
|
|
|
|
|
self.TEMPERATURE_EMAIL = float(os.getenv("TEMPERATURE_EMAIL", "0.6")) |
|
self.TEMPERATURE_CREATIVE = float(os.getenv("TEMPERATURE_CREATIVE", "0.8")) |
|
self.TEMPERATURE_LINKEDIN = float(os.getenv("TEMPERATURE_LINKEDIN", "0.7")) |
|
|
|
|
|
self.DEFAULT_TOKENS_EMAIL = int(os.getenv("DEFAULT_TOKENS_EMAIL", "200")) |
|
self.DEFAULT_TOKENS_CREATIVE = int(os.getenv("DEFAULT_TOKENS_CREATIVE", "250")) |
|
self.DEFAULT_TOKENS_LINKEDIN = int(os.getenv("DEFAULT_TOKENS_LINKEDIN", "200")) |
|
|
|
|
|
self.UI_THEME = os.getenv("UI_THEME", "soft") |
|
self.UI_TITLE = os.getenv("UI_TITLE", "π Smart Auto-Complete") |
|
self.UI_DESCRIPTION = os.getenv( |
|
"UI_DESCRIPTION", "Intelligent text completion powered by AI" |
|
) |
|
|
|
|
|
self.SERVER_HOST = os.getenv("SERVER_HOST", "0.0.0.0") |
|
self.SERVER_PORT = int(os.getenv("SERVER_PORT", "7860")) |
|
self.SERVER_SHARE = os.getenv("SERVER_SHARE", "false").lower() == "true" |
|
|
|
|
|
self.ENABLE_INPUT_SANITIZATION = ( |
|
os.getenv("ENABLE_INPUT_SANITIZATION", "true").lower() == "true" |
|
) |
|
self.MAX_CONCURRENT_REQUESTS = int(os.getenv("MAX_CONCURRENT_REQUESTS", "10")) |
|
|
|
|
|
self.DEBUG_MODE = os.getenv("DEBUG_MODE", "false").lower() == "true" |
|
self.ENABLE_ANALYTICS = os.getenv("ENABLE_ANALYTICS", "true").lower() == "true" |
|
|
|
|
|
self._validate_settings() |
|
|
|
logger.info("Application settings loaded successfully") |
|
|
|
def _validate_settings(self): |
|
"""Validate configuration settings""" |
|
errors = [] |
|
warnings = [] |
|
|
|
|
|
if not self.OPENAI_API_KEY and not self.ANTHROPIC_API_KEY: |
|
errors.append( |
|
"No API keys configured. Set OPENAI_API_KEY or ANTHROPIC_API_KEY" |
|
) |
|
|
|
|
|
if self.DEFAULT_PROVIDER not in ["openai", "anthropic"]: |
|
warnings.append( |
|
f"Invalid DEFAULT_PROVIDER: {self.DEFAULT_PROVIDER}. Using 'openai'" |
|
) |
|
self.DEFAULT_PROVIDER = "openai" |
|
|
|
|
|
if not (1 <= self.MAX_SUGGESTIONS <= 20): |
|
warnings.append( |
|
f"MAX_SUGGESTIONS should be 1-20, got {self.MAX_SUGGESTIONS}" |
|
) |
|
self.MAX_SUGGESTIONS = max(1, min(20, self.MAX_SUGGESTIONS)) |
|
|
|
if not (100 <= self.DEBOUNCE_DELAY <= 2000): |
|
warnings.append( |
|
f"DEBOUNCE_DELAY should be 100-2000ms, got {self.DEBOUNCE_DELAY}" |
|
) |
|
self.DEBOUNCE_DELAY = max(100, min(2000, self.DEBOUNCE_DELAY)) |
|
|
|
if not (100 <= self.MAX_INPUT_LENGTH <= 10000): |
|
warnings.append( |
|
f"MAX_INPUT_LENGTH should be 100-10000, got {self.MAX_INPUT_LENGTH}" |
|
) |
|
self.MAX_INPUT_LENGTH = max(100, min(10000, self.MAX_INPUT_LENGTH)) |
|
|
|
|
|
for temp_attr in [ |
|
"TEMPERATURE_EMAIL", |
|
"TEMPERATURE_CREATIVE", |
|
"TEMPERATURE_LINKEDIN", |
|
]: |
|
temp_value = getattr(self, temp_attr) |
|
if not (0.0 <= temp_value <= 2.0): |
|
warnings.append(f"{temp_attr} should be 0.0-2.0, got {temp_value}") |
|
setattr(self, temp_attr, max(0.0, min(2.0, temp_value))) |
|
|
|
|
|
if errors: |
|
for error in errors: |
|
logger.error(f"Configuration error: {error}") |
|
|
|
if warnings: |
|
for warning in warnings: |
|
logger.warning(f"Configuration warning: {warning}") |
|
|
|
def validate_api_keys(self) -> bool: |
|
""" |
|
Validate that at least one API key is properly configured |
|
|
|
Returns: |
|
True if at least one valid API key is available |
|
""" |
|
from src.utils import validate_api_key |
|
|
|
openai_valid = self.OPENAI_API_KEY and validate_api_key( |
|
self.OPENAI_API_KEY, "openai" |
|
) |
|
|
|
anthropic_valid = self.ANTHROPIC_API_KEY and validate_api_key( |
|
self.ANTHROPIC_API_KEY, "anthropic" |
|
) |
|
|
|
return openai_valid or anthropic_valid |
|
|
|
def get_context_config(self, context: str) -> Dict[str, Any]: |
|
""" |
|
Get configuration for a specific context |
|
|
|
Args: |
|
context: Context name (email, code, creative, linkedin) |
|
|
|
Returns: |
|
Dictionary with context-specific configuration |
|
""" |
|
context_configs = { |
|
"email": { |
|
"temperature": self.TEMPERATURE_EMAIL, |
|
"default_tokens": self.DEFAULT_TOKENS_EMAIL, |
|
"model_preference": "openai", |
|
}, |
|
"creative": { |
|
"temperature": self.TEMPERATURE_CREATIVE, |
|
"default_tokens": self.DEFAULT_TOKENS_CREATIVE, |
|
"model_preference": "anthropic", |
|
}, |
|
"linkedin": { |
|
"temperature": self.TEMPERATURE_LINKEDIN, |
|
"default_tokens": self.DEFAULT_TOKENS_LINKEDIN, |
|
"model_preference": self.DEFAULT_PROVIDER, |
|
}, |
|
} |
|
|
|
return context_configs.get(context, context_configs["linkedin"]) |
|
|
|
def get_model_for_provider(self, provider: str) -> str: |
|
""" |
|
Get the model name for a specific provider |
|
|
|
Args: |
|
provider: Provider name (openai, anthropic) |
|
|
|
Returns: |
|
Model name string |
|
""" |
|
if provider == "openai": |
|
return self.OPENAI_MODEL |
|
elif provider == "anthropic": |
|
return self.ANTHROPIC_MODEL |
|
else: |
|
return self.OPENAI_MODEL |
|
|
|
def to_dict(self) -> Dict[str, Any]: |
|
""" |
|
Convert settings to dictionary (excluding sensitive data) |
|
|
|
Returns: |
|
Dictionary with non-sensitive configuration |
|
""" |
|
return { |
|
"max_suggestions": self.MAX_SUGGESTIONS, |
|
"debounce_delay": self.DEBOUNCE_DELAY, |
|
"cache_ttl": self.CACHE_TTL, |
|
"max_input_length": self.MAX_INPUT_LENGTH, |
|
"cache_enabled": self.CACHE_ENABLED, |
|
"log_level": self.LOG_LEVEL, |
|
"rate_limit_enabled": self.RATE_LIMIT_ENABLED, |
|
"rate_limit_requests_per_minute": self.RATE_LIMIT_REQUESTS_PER_MINUTE, |
|
"default_provider": self.DEFAULT_PROVIDER, |
|
"openai_model": self.OPENAI_MODEL, |
|
"anthropic_model": self.ANTHROPIC_MODEL, |
|
"ui_theme": self.UI_THEME, |
|
"ui_title": self.UI_TITLE, |
|
"server_host": self.SERVER_HOST, |
|
"server_port": self.SERVER_PORT, |
|
"debug_mode": self.DEBUG_MODE, |
|
"has_openai_key": bool(self.OPENAI_API_KEY), |
|
"has_anthropic_key": bool(self.ANTHROPIC_API_KEY), |
|
} |
|
|
|
def update_from_dict(self, config_dict: Dict[str, Any]): |
|
""" |
|
Update settings from a dictionary |
|
|
|
Args: |
|
config_dict: Dictionary with configuration updates |
|
""" |
|
for key, value in config_dict.items(): |
|
if hasattr(self, key.upper()): |
|
setattr(self, key.upper(), value) |
|
logger.info(f"Updated setting {key.upper()} = {value}") |
|
|
|
|
|
self._validate_settings() |
|
|
|
def __str__(self) -> str: |
|
"""String representation of settings (safe for logging)""" |
|
safe_dict = self.to_dict() |
|
return f"AppSettings({safe_dict})" |
|
|
|
|
|
|
|
settings = AppSettings() |
|
|