#!/usr/bin/env python3
"""
Smart Auto-Complete - Main Application
A context-aware text completion tool built with Gradio
"""
from typing import List, Tuple
import gradio as gr
from config.settings import AppSettings
from src.autocomplete import SmartAutoComplete
from src.utils import setup_logging
# Initialize logging
logger = setup_logging()
# Initialize settings and autocomplete engine
settings = AppSettings()
autocomplete = SmartAutoComplete(settings)
class AutoCompleteApp:
def __init__(self):
self.last_request_time = 0
self.current_suggestions = []
self.user_api_key = None
self.custom_autocomplete = None
def get_suggestions(
self, text: str, context: str, output_tokens: int = 150, user_context: str = ""
) -> Tuple[List[str], str]:
"""
Get auto-complete suggestions for the given text and context
Returns: (suggestions_list, status_message)
"""
try:
# Input validation
if not text or len(text.strip()) < 2:
return [], "✏️ Please enter some text to get suggestions..."
if len(text) > settings.MAX_INPUT_LENGTH:
return (
[],
f"⚠️ Text too long (max {settings.MAX_INPUT_LENGTH} characters)",
)
# Get suggestions from autocomplete engine
suggestions = autocomplete.get_suggestions(
text=text,
context=context,
max_tokens=output_tokens,
user_context=user_context,
)
self.current_suggestions = suggestions
if suggestions:
status = f"✅ Found {len(suggestions)} suggestions"
else:
status = "🤔 No suggestions available for this text"
return suggestions, status
except Exception as e:
logger.error(f"Error getting suggestions: {str(e)}")
return [], f"❌ Error: {str(e)}"
def get_suggestions_with_custom_prompts(
self,
text: str,
context: str,
output_tokens: int = 150,
user_context: str = "",
custom_prompts: dict = None,
) -> Tuple[List[str], str]:
"""
Get auto-complete suggestions with custom prompts
Returns: (suggestions_list, status_message)
"""
try:
# Input validation
if not text or len(text.strip()) < 2:
return [], "✏️ Please enter some text to get suggestions..."
if len(text) > settings.MAX_INPUT_LENGTH:
return (
[],
f"⚠️ Text too long (max {settings.MAX_INPUT_LENGTH} characters)",
)
# Use the active autocomplete instance (user's custom or default)
active_autocomplete = self.get_active_autocomplete()
# Create a temporary autocomplete instance with custom prompts
temp_autocomplete = SmartAutoComplete(
active_autocomplete.settings if active_autocomplete else settings
)
if custom_prompts:
temp_autocomplete.CONTEXT_PROMPTS = custom_prompts
# Get suggestions from autocomplete engine
suggestions = temp_autocomplete.get_suggestions(
text=text,
context=context,
max_tokens=output_tokens,
user_context=user_context,
)
self.current_suggestions = suggestions
if suggestions:
status = f"✅ Found {len(suggestions)} suggestions"
else:
status = "🤔 No suggestions available for this text"
return suggestions, status
except Exception as e:
logger.error(f"Error getting suggestions with custom prompts: {str(e)}")
return [], f"❌ Error: {str(e)}"
def insert_suggestion(
self, current_text: str, suggestion: str, cursor_position: int = None
) -> str:
"""Insert the selected suggestion into the current text"""
try:
# Simple append for now - in a real implementation, this would be more sophisticated
if not current_text:
return suggestion
# If text ends with incomplete sentence, replace the last part
words = current_text.split()
if words and not current_text.endswith((".", "!", "?", "\n")):
# Replace the last partial word/sentence with the suggestion
return current_text + " " + suggestion.strip()
else:
return current_text + " " + suggestion.strip()
except Exception as e:
logger.error(f"Error inserting suggestion: {str(e)}")
return current_text
def update_api_key(self, api_key: str) -> str:
"""Update the OpenAI API key and reinitialize the autocomplete engine"""
try:
if not api_key or not api_key.strip():
self.user_api_key = None
self.custom_autocomplete = None
return "🔄 Reverted to default configuration"
# Validate the API key format
if not api_key.startswith("sk-"):
return "❌ Invalid API key format. OpenAI keys start with 'sk-'"
# Create a custom settings object with the user's API key
from config.settings import AppSettings
custom_settings = AppSettings()
custom_settings.OPENAI_API_KEY = api_key.strip()
# Create a new autocomplete instance with the custom settings
self.custom_autocomplete = SmartAutoComplete(custom_settings)
self.user_api_key = api_key.strip()
return "✅ API key updated successfully! Using your personal quota."
except Exception as e:
logger.error(f"Error updating API key: {str(e)}")
return f"❌ Error updating API key: {str(e)}"
def test_api_connection(self, api_key: str = None) -> str:
"""Test the API connection with the current or provided key"""
try:
# Use custom autocomplete if user has provided a key, otherwise use default
test_autocomplete = (
self.custom_autocomplete if self.user_api_key else autocomplete
)
if api_key and api_key.strip():
# Test with the provided key
from config.settings import AppSettings
test_settings = AppSettings()
test_settings.OPENAI_API_KEY = api_key.strip()
test_autocomplete = SmartAutoComplete(test_settings)
# Test with a simple completion
test_result = test_autocomplete.get_suggestions(
text="Hello, this is a test", context="linkedin", max_tokens=10
)
if test_result and len(test_result) > 0:
return "✅ API connection successful!"
else:
return "❌ API connection failed - no response received"
except Exception as e:
logger.error(f"API connection test failed: {str(e)}")
return f"❌ API connection test failed: {str(e)}"
def get_active_autocomplete(self):
"""Get the currently active autocomplete instance"""
return self.custom_autocomplete if self.user_api_key else autocomplete
def create_interface():
"""Create and configure the Gradio interface"""
app_instance = AutoCompleteApp()
# Professional CSS styling
custom_css = """
/* Global Styles */
.gradio-container {
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%) !important;
min-height: 100vh;
}
/* Header Styling */
.header-container {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 2rem;
border-radius: 16px;
margin-bottom: 2rem;
box-shadow: 0 8px 32px rgba(0,0,0,0.1);
}
.header-container h1 {
font-size: 2.5rem;
font-weight: 700;
margin-bottom: 0.5rem;
text-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.header-container p {
font-size: 1.1rem;
opacity: 0.9;
margin-bottom: 0;
}
/* Card Styling */
.main-card {
background: white;
border-radius: 16px;
padding: 2rem;
box-shadow: 0 4px 24px rgba(0,0,0,0.06);
border: 1px solid rgba(255,255,255,0.2);
backdrop-filter: blur(10px);
}
.output-card {
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
border-radius: 16px;
padding: 1.5rem;
box-shadow: 0 4px 16px rgba(0,0,0,0.04);
border: 1px solid #e2e8f0;
}
/* Input Styling */
.gradio-textbox textarea, .gradio-textbox input {
border: 2px solid #e2e8f0 !important;
border-radius: 12px !important;
padding: 16px !important;
font-size: 16px !important;
transition: all 0.3s ease !important;
background: #fafbfc !important;
}
.gradio-textbox textarea:focus, .gradio-textbox input:focus {
border-color: #667eea !important;
box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
background: white !important;
}
/* Button Styling */
.gradio-button {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
border: none !important;
border-radius: 12px !important;
padding: 16px 32px !important;
font-weight: 600 !important;
font-size: 16px !important;
color: white !important;
transition: all 0.3s ease !important;
box-shadow: 0 4px 16px rgba(102, 126, 234, 0.3) !important;
}
.gradio-button:hover {
transform: translateY(-2px) !important;
box-shadow: 0 8px 24px rgba(102, 126, 234, 0.4) !important;
}
.gradio-button.secondary {
background: linear-gradient(135deg, #64748b 0%, #475569 100%) !important;
box-shadow: 0 4px 16px rgba(100, 116, 139, 0.3) !important;
}
/* Radio Button Styling */
.gradio-radio {
background: white;
border-radius: 12px;
padding: 1rem;
border: 2px solid #e2e8f0;
}
.gradio-radio label {
font-weight: 500;
color: #374151;
padding: 12px 16px;
border-radius: 8px;
transition: all 0.2s ease;
}
.gradio-radio input:checked + label {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
box-shadow: 0 2px 8px rgba(102, 126, 234, 0.3);
}
/* Accordion Styling */
.gradio-accordion {
border: 2px solid #e2e8f0 !important;
border-radius: 12px !important;
background: white !important;
margin: 1rem 0 !important;
}
.gradio-accordion summary {
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%) !important;
padding: 1rem 1.5rem !important;
border-radius: 10px !important;
font-weight: 600 !important;
color: #374151 !important;
}
/* Status Display */
.status-display {
background: linear-gradient(135deg, #ecfdf5 0%, #d1fae5 100%);
border: 2px solid #10b981;
border-radius: 12px;
padding: 1rem;
color: #065f46;
font-weight: 500;
}
/* Output Text Area */
.output-text {
background: linear-gradient(135deg, #fefce8 0%, #fef3c7 100%) !important;
border: 2px solid #f59e0b !important;
border-radius: 12px !important;
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace !important;
font-size: 14px !important;
}
/* Slider Styling */
.gradio-slider input[type="range"] {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
border-radius: 8px !important;
}
/* Tab Styling */
.gradio-tabs {
border-radius: 12px !important;
overflow: hidden !important;
}
.gradio-tab-nav {
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%) !important;
border-bottom: 2px solid #e2e8f0 !important;
}
.gradio-tab-nav button {
border-radius: 8px 8px 0 0 !important;
font-weight: 500 !important;
padding: 12px 24px !important;
}
.gradio-tab-nav button.selected {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
color: white !important;
}
/* Examples Styling */
.gradio-examples {
background: white;
border-radius: 12px;
padding: 1.5rem;
border: 2px solid #e2e8f0;
}
/* Footer Styling */
.footer-content {
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
color: #374151;
padding: 2rem;
border-radius: 16px;
margin-top: 2rem;
border: 2px solid #e2e8f0;
box-shadow: 0 4px 16px rgba(0,0,0,0.04);
}
.footer-content h3 {
color: #1f2937;
border-bottom: 2px solid #667eea;
padding-bottom: 0.5rem;
margin-bottom: 1rem;
font-weight: 600;
}
/* Responsive Design */
@media (max-width: 768px) {
.header-container h1 {
font-size: 2rem;
}
.main-card, .output-card {
padding: 1rem;
}
.gradio-button {
padding: 12px 24px !important;
font-size: 14px !important;
}
}
"""
with gr.Blocks(
title="LinkedIn Smart Auto-Complete | Professional AI Writing Assistant",
theme=gr.themes.Soft(),
css=custom_css,
) as interface:
# Professional Header
with gr.Row(elem_classes=["header-container"]):
gr.HTML("""
🚀 LinkedIn Smart Auto-Complete
Professional AI Writing Assistant
Transform your ideas into compelling LinkedIn content with AI-powered intelligence
💡 Pro Tip: Add your OpenAI API key in Settings for unlimited personal usage
""")
with gr.Row():
with gr.Column(scale=2, elem_classes=["main-card"]):
# Professional Context Selection
gr.Markdown("### 🎯 **Choose Your Writing Context**")
context_selector = gr.Radio(
choices=[
("📧 Professional Email", "email"),
("✍️ Creative Content", "creative"),
("💼 LinkedIn Post", "linkedin"),
],
value="linkedin",
label="",
elem_classes=["context-selector"],
)
gr.Markdown("---")
# Enhanced Context Input
gr.Markdown("### 📋 **Reference Information** *(Optional)*")
context_input = gr.Textbox(
label="",
placeholder="💡 Add background info, company details, industry context, or previous conversations to enhance AI understanding...",
lines=4,
elem_classes=["context-input"],
)
# Professional Main Input
gr.Markdown("### ✍️ **Your Content**")
text_input = gr.Textbox(
label="",
placeholder="🚀 Start typing your content here... The AI will intelligently complete your thoughts!",
lines=8,
elem_classes=["main-input"],
)
# Enhanced Submit Button
submit_btn = gr.Button(
"✨ Generate AI Completion",
variant="primary",
size="lg",
elem_classes=["primary-button"],
)
# Professional Settings
with gr.Accordion("⚙️ **Advanced Settings**", open=False):
# API Key Configuration
with gr.Group():
gr.Markdown("### 🔑 **API Configuration**")
gr.Markdown(
"*Secure your own OpenAI quota for unlimited usage*"
)
openai_key_input = gr.Textbox(
label="OpenAI API Key",
placeholder="sk-proj-... (Paste your OpenAI API key here)",
type="password",
value="",
info="🔒 Your API key is encrypted and only used for this session. Never stored permanently.",
)
api_status = gr.Textbox(
label="Connection Status",
value="✅ Using default configuration"
if settings.OPENAI_API_KEY
else "⚠️ No API key configured - using shared quota",
interactive=False,
lines=1,
elem_classes=["status-display"],
)
test_api_btn = gr.Button(
"🧪 Test Connection", size="sm", elem_classes=["secondary"]
)
gr.Markdown("---")
# Enhanced Output Settings
gr.Markdown("### 📏 **Output Configuration**")
output_length = gr.Slider(
minimum=50,
maximum=500,
value=150,
step=10,
label="Response Length (tokens)",
info="Adjust the length of AI-generated content",
)
gr.Markdown("### 🔧 **Debug Options**")
gr.Checkbox(
label="Enable detailed logging",
value=False,
info="Show technical details for troubleshooting",
)
# Context Prompt Editor
with gr.Accordion("🔧 Edit Context Prompts", open=False):
gr.Markdown(
"**Customize your writing style for each context type. Changes apply immediately.**"
)
with gr.Tab("📧 Email Context"):
email_system_prompt = gr.Textbox(
label="System Prompt",
value="""You are an expert email writing assistant. Generate professional,
contextually appropriate email completions. Focus on:
- Professional tone and structure
- Clear, concise communication
- Appropriate greetings and closings
- Business communication best practices
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
lines=8,
placeholder="Enter the system prompt for email context...",
)
email_user_template = gr.Textbox(
label="User Message Template",
value="Complete this email text naturally and professionally with approximately {max_tokens} tokens: {text}",
lines=3,
placeholder="Enter the user message template...",
)
with gr.Tab("🎨 Creative Context"):
creative_system_prompt = gr.Textbox(
label="System Prompt",
value="""You are a creative writing assistant. Generate engaging,
imaginative story continuations. Focus on:
- Narrative consistency and flow
- Character development
- Descriptive and engaging language
- Plot advancement
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
lines=8,
placeholder="Enter the system prompt for creative context...",
)
creative_user_template = gr.Textbox(
label="User Message Template",
value="Continue this creative writing piece naturally with approximately {max_tokens} tokens: {text}",
lines=3,
placeholder="Enter the user message template...",
)
with gr.Tab("💼 LinkedIn Context"):
linkedin_system_prompt = gr.Textbox(
label="System Prompt",
value="""You are a LinkedIn writing assistant specialized in professional networking content. Generate engaging,
professional LinkedIn-appropriate text completions. Focus on:
- Professional networking tone
- Industry-relevant language
- Engaging and authentic voice
- LinkedIn best practices (hashtags, mentions, professional insights)
- Career development and business communication
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
lines=8,
placeholder="Enter the system prompt for LinkedIn context...",
)
linkedin_user_template = gr.Textbox(
label="User Message Template",
value="Complete this LinkedIn post/content naturally and professionally with approximately {max_tokens} tokens: {text}",
lines=3,
placeholder="Enter the user message template...",
)
with gr.Column(scale=1, elem_classes=["output-card"]):
# Professional Status Display
gr.Markdown("### 📊 **AI Assistant Status**")
status_display = gr.Textbox(
label="",
value="🤖 Ready to assist! Choose your context and start writing...",
interactive=False,
lines=3,
elem_classes=["status-display"],
)
gr.Markdown("---")
# Enhanced Output Area
gr.Markdown("### 📝 **Generated Content**")
gr.Markdown("*AI-powered completion will appear below*")
copy_textbox = gr.Textbox(
label="",
placeholder="✨ Your AI-generated content will appear here...\n\n📋 Simply select all text (Ctrl+A/Cmd+A) and copy (Ctrl+C/Cmd+C) to use in your LinkedIn post or email!",
lines=12,
max_lines=20,
interactive=True,
visible=False,
elem_classes=["output-text"],
)
# Quick Action Buttons (Future Enhancement)
with gr.Row(visible=False):
gr.Button(
"📋 Copy to Clipboard", size="sm", elem_classes=["secondary"]
)
gr.Button("🔄 Regenerate", size="sm", elem_classes=["secondary"])
gr.Button("✏️ Edit & Refine", size="sm", elem_classes=["secondary"])
# Professional Examples Section
with gr.Accordion("🎯 **Try These Professional Examples**", open=False):
gr.Markdown("""
### 🚀 **Quick Start Templates**
*Click any example below to instantly populate the form and see AI in action!*
""")
gr.Examples(
examples=[
[
"Quarterly budget review meeting with stakeholders, discussing Q4 performance metrics and 2024 planning initiatives",
"Dear Mr. Johnson,\n\nI hope this email finds you well. Following our discussion yesterday, I wanted to confirm our meeting details for the quarterly budget review",
"email",
],
[
"Epic fantasy adventure featuring a young mage discovering ancient powers in a world where magic and technology collide",
"In the neon-lit streets of Neo-Arcanum, where holographic spells danced alongside digital billboards, Zara clutched her grandmother's ancient grimoire and whispered",
"creative",
],
[
"Sharing insights about AI transformation in the financial services industry, highlighting successful implementation strategies and future trends",
"🚀 Excited to share key insights from our recent AI transformation journey at FinTech Solutions! After 18 months of implementation, here's what we've learned about",
"linkedin",
],
],
inputs=[context_input, text_input, context_selector],
label="",
)
# Event handlers
def update_api_key(api_key):
"""Handle API key updates"""
status = app_instance.update_api_key(api_key)
return status
def test_api_connection(api_key):
"""Handle API connection testing"""
status = app_instance.test_api_connection(api_key)
return status
def update_suggestions(
text,
context,
output_tokens,
user_context,
email_sys,
email_user,
creative_sys,
creative_user,
linkedin_sys,
linkedin_user,
):
"""Update suggestions based on input with custom prompts"""
logger.info(
f"Getting suggestions with context: '{user_context[:50] if user_context else 'None'}...'"
)
logger.info(f"Requested output tokens: {output_tokens}")
# Create custom prompts dictionary
custom_prompts = {
"email": {
"system_prompt": email_sys,
"user_template": email_user,
"temperature": 0.6,
},
"creative": {
"system_prompt": creative_sys,
"user_template": creative_user,
"temperature": 0.8,
},
"linkedin": {
"system_prompt": linkedin_sys,
"user_template": linkedin_user,
"temperature": 0.7,
},
}
suggestions, status = app_instance.get_suggestions_with_custom_prompts(
text, context, output_tokens, user_context, custom_prompts
)
# Update the copy textbox with the suggestion
if suggestions:
copy_text = suggestions[0] if suggestions else ""
copy_visible = True
else:
copy_text = ""
copy_visible = False
# Return the copy textbox update
copy_update = gr.update(visible=copy_visible, value=copy_text)
return status, copy_update
# API Key event handlers
openai_key_input.change(
fn=update_api_key, inputs=[openai_key_input], outputs=[api_status]
)
test_api_btn.click(
fn=test_api_connection, inputs=[openai_key_input], outputs=[api_status]
)
# Submit button handler
submit_btn.click(
fn=update_suggestions,
inputs=[
text_input,
context_selector,
output_length,
context_input,
email_system_prompt,
email_user_template,
creative_system_prompt,
creative_user_template,
linkedin_system_prompt,
linkedin_user_template,
],
outputs=[status_display, copy_textbox],
)
# Professional Footer
with gr.Row(elem_classes=["footer-content"]):
gr.HTML("""
🎮 Quick Start Guide
- Choose Context: Select Email, Creative, or LinkedIn
- Add Context: Include background information (optional)
- Enter Text: Start typing your content
- Generate: Click the AI completion button
- Copy & Use: Select all and copy the result
🌟 Pro Features
- 🔑 Personal API Key: Unlimited usage with your OpenAI account
- 📏 Custom Length: Adjust output from 50-500 tokens
- 🎯 Context-Aware: AI adapts to your specific writing needs
- ⚡ Real-time: Instant AI-powered completions
- 🔧 Customizable: Edit prompts for personalized results
💡 Expert Tips
- 📧 Email: Start with greetings, add meeting context
- ✍️ Creative: Set the scene, describe characters
- 💼 LinkedIn: Include industry keywords, hashtags
- 🎯 Context: More background = better results
- 🔄 Iterate: Refine prompts for perfect output
🚀
Powered by OpenAI GPT
⚡
Built with Gradio
🐍
Python Backend
Made with ❤️ for professionals, creators, and innovators worldwide
""")
return interface
def main():
"""Main function to run the application"""
try:
# Check API configuration - now optional since users can provide their own keys
if not settings.validate_api_keys():
logger.warning(
"No default API keys found. Users can provide their own keys in the UI."
)
print("⚠️ No default API keys configured.")
print("Users can enter their own OpenAI API key in the Settings section.")
else:
logger.info("Default API keys found and validated.")
logger.info("Starting Smart Auto-Complete application...")
# Create and launch interface
interface = create_interface()
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False, # Set to True for public sharing
show_error=True,
)
except Exception as e:
logger.error(f"Failed to start application: {str(e)}")
print(f"❌ Error starting application: {str(e)}")
if __name__ == "__main__":
main()