Sandipan Haldar
adding submission
b309c22
raw
history blame
18.2 kB
#!/usr/bin/env python3
"""
Smart Auto-Complete - Main Application
A context-aware text completion tool built with Gradio
"""
from typing import List, Tuple
from config.settings import AppSettings
from src.autocomplete import SmartAutoComplete
from src.utils import setup_logging
import gradio as gr
# Initialize logging
logger = setup_logging()
# Initialize settings and autocomplete engine
settings = AppSettings()
autocomplete = SmartAutoComplete(settings)
class AutoCompleteApp:
def __init__(self):
self.last_request_time = 0
self.current_suggestions = []
def get_suggestions(
self, text: str, context: str, output_tokens: int = 150, user_context: str = ""
) -> Tuple[List[str], str]:
"""
Get auto-complete suggestions for the given text and context
Returns: (suggestions_list, status_message)
"""
try:
# Input validation
if not text or len(text.strip()) < 2:
return [], "โœ๏ธ Please enter some text to get suggestions..."
if len(text) > settings.MAX_INPUT_LENGTH:
return (
[],
f"โš ๏ธ Text too long (max {settings.MAX_INPUT_LENGTH} characters)",
)
# Get suggestions from autocomplete engine
suggestions = autocomplete.get_suggestions(
text=text,
context=context,
max_tokens=output_tokens,
user_context=user_context,
)
self.current_suggestions = suggestions
if suggestions:
status = f"โœ… Found {len(suggestions)} suggestions"
else:
status = "๐Ÿค” No suggestions available for this text"
return suggestions, status
except Exception as e:
logger.error(f"Error getting suggestions: {str(e)}")
return [], f"โŒ Error: {str(e)}"
def get_suggestions_with_custom_prompts(
self,
text: str,
context: str,
output_tokens: int = 150,
user_context: str = "",
custom_prompts: dict = None,
) -> Tuple[List[str], str]:
"""
Get auto-complete suggestions with custom prompts
Returns: (suggestions_list, status_message)
"""
try:
# Input validation
if not text or len(text.strip()) < 2:
return [], "โœ๏ธ Please enter some text to get suggestions..."
if len(text) > settings.MAX_INPUT_LENGTH:
return (
[],
f"โš ๏ธ Text too long (max {settings.MAX_INPUT_LENGTH} characters)",
)
# Create a temporary autocomplete instance with custom prompts
temp_autocomplete = SmartAutoComplete(settings)
if custom_prompts:
temp_autocomplete.CONTEXT_PROMPTS = custom_prompts
# Get suggestions from autocomplete engine
suggestions = temp_autocomplete.get_suggestions(
text=text,
context=context,
max_tokens=output_tokens,
user_context=user_context,
)
self.current_suggestions = suggestions
if suggestions:
status = f"โœ… Found {len(suggestions)} suggestions"
else:
status = "๐Ÿค” No suggestions available for this text"
return suggestions, status
except Exception as e:
logger.error(f"Error getting suggestions with custom prompts: {str(e)}")
return [], f"โŒ Error: {str(e)}"
def insert_suggestion(
self, current_text: str, suggestion: str, cursor_position: int = None
) -> str:
"""Insert the selected suggestion into the current text"""
try:
# Simple append for now - in a real implementation, this would be more sophisticated
if not current_text:
return suggestion
# If text ends with incomplete sentence, replace the last part
words = current_text.split()
if words and not current_text.endswith((".", "!", "?", "\n")):
# Replace the last partial word/sentence with the suggestion
return current_text + " " + suggestion.strip()
else:
return current_text + " " + suggestion.strip()
except Exception as e:
logger.error(f"Error inserting suggestion: {str(e)}")
return current_text
def create_interface():
"""Create and configure the Gradio interface"""
app_instance = AutoCompleteApp()
# Custom CSS for better styling
custom_css = """
.suggestion-box {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
border-radius: 10px;
padding: 15px;
margin: 10px 0;
color: white;
cursor: pointer;
transition: transform 0.2s;
}
.suggestion-box:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(0,0,0,0.15);
}
.context-selector {
margin-bottom: 20px;
}
.main-input {
border-radius: 10px;
border: 2px solid #e1e5e9;
font-size: 16px;
}
"""
with gr.Blocks(
title="๐Ÿš€ Smart Auto-Complete", theme=gr.themes.Soft(), css=custom_css
) as interface:
# Header
gr.Markdown("""
# ๐Ÿš€ Smart Auto-Complete
**Intelligent text completion powered by AI**
Choose your context, enter your text, and click submit to get AI-powered completions! โœจ
""")
with gr.Row():
with gr.Column(scale=2):
# Context selection
context_selector = gr.Radio(
choices=[
("๐Ÿ“ง Email Writing", "email"),
("โœ๏ธ Creative Writing", "creative"),
("๐Ÿ“ General Text", "general"),
],
value="email",
label="Select Context",
elem_classes=["context-selector"],
)
# User context input
context_input = gr.Textbox(
label="๐Ÿ“ Reference Information (Optional)",
placeholder="Add any background information, previous context, or references that should inform the writing...",
lines=4,
elem_classes=["context-input"],
)
# Main text input
text_input = gr.Textbox(
label="โœ๏ธ Enter your text here...",
placeholder="Enter your text and click Submit to get suggestions!",
lines=8,
elem_classes=["main-input"],
)
# Submit button
submit_btn = gr.Button(
"๐Ÿš€ Get Suggestions", variant="primary", size="lg"
)
# Settings
with gr.Accordion("โš™๏ธ Settings", open=False):
output_length = gr.Slider(
minimum=50,
maximum=500,
value=150,
step=10,
label="Output Length (tokens)",
)
gr.Checkbox(label="Show debug information", value=False)
# Context Prompt Editor
with gr.Accordion("๐Ÿ”ง Edit Context Prompts", open=False):
gr.Markdown(
"**Customize your writing style for each context type. Changes apply immediately.**"
)
with gr.Tab("๐Ÿ“ง Email Context"):
email_system_prompt = gr.Textbox(
label="System Prompt",
value="""You are an expert email writing assistant. Generate professional,
contextually appropriate email completions. Focus on:
- Professional tone and structure
- Clear, concise communication
- Appropriate greetings and closings
- Business communication best practices
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
lines=8,
placeholder="Enter the system prompt for email context...",
)
email_user_template = gr.Textbox(
label="User Message Template",
value="Complete this email text naturally and professionally with approximately {max_tokens} tokens: {text}",
lines=3,
placeholder="Enter the user message template...",
)
with gr.Tab("๐ŸŽจ Creative Context"):
creative_system_prompt = gr.Textbox(
label="System Prompt",
value="""You are a creative writing assistant. Generate engaging,
imaginative story continuations. Focus on:
- Narrative consistency and flow
- Character development
- Descriptive and engaging language
- Plot advancement
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
lines=8,
placeholder="Enter the system prompt for creative context...",
)
creative_user_template = gr.Textbox(
label="User Message Template",
value="Continue this creative writing piece naturally with approximately {max_tokens} tokens: {text}",
lines=3,
placeholder="Enter the user message template...",
)
with gr.Tab("๐Ÿ“ General Context"):
general_system_prompt = gr.Textbox(
label="System Prompt",
value="""You are a helpful writing assistant. Generate natural,
contextually appropriate text completions. Focus on:
- Natural language flow
- Contextual relevance
- Clarity and coherence
- Appropriate tone
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
lines=8,
placeholder="Enter the system prompt for general context...",
)
general_user_template = gr.Textbox(
label="User Message Template",
value="Complete this text naturally with approximately {max_tokens} tokens: {text}",
lines=3,
placeholder="Enter the user message template...",
)
with gr.Column(scale=1):
# Status display
status_display = gr.Textbox(
label="๐Ÿ“Š Status",
value="Ready to help! Start typing...",
interactive=False,
lines=2,
)
# Copyable textbox for suggestions (only output)
copy_textbox = gr.Textbox(
label="๐Ÿ“‹ Generated Text (Select All and Copy with Ctrl+C/Cmd+C)",
placeholder="Generated suggestions will appear here for easy copying...",
lines=8,
max_lines=15,
interactive=True,
visible=False,
)
# Demo section
with gr.Accordion("๐ŸŽฏ Try These Examples", open=False):
gr.Examples(
examples=[
[
"Meeting scheduled for next Tuesday to discuss the quarterly budget review",
"Dear Mr. Johnson,\n\nI hope this email finds you well. I wanted to follow up on",
"email",
],
[
"Fantasy adventure story with magical creatures and brave heroes",
"Once upon a time, in a kingdom far away, there lived a",
"creative",
],
[
"Academic research paper on technology trends",
"The impact of artificial intelligence on modern society",
"general",
],
],
inputs=[context_input, text_input, context_selector],
label="Click any example to try it out!",
)
# Event handlers
def update_suggestions(
text,
context,
output_tokens,
user_context,
email_sys,
email_user,
creative_sys,
creative_user,
general_sys,
general_user,
):
"""Update suggestions based on input with custom prompts"""
logger.info(
f"Getting suggestions with context: '{user_context[:50] if user_context else 'None'}...'"
)
logger.info(f"Requested output tokens: {output_tokens}")
# Create custom prompts dictionary
custom_prompts = {
"email": {
"system_prompt": email_sys,
"user_template": email_user,
"temperature": 0.6,
},
"creative": {
"system_prompt": creative_sys,
"user_template": creative_user,
"temperature": 0.8,
},
"general": {
"system_prompt": general_sys,
"user_template": general_user,
"temperature": 0.7,
},
}
suggestions, status = app_instance.get_suggestions_with_custom_prompts(
text, context, output_tokens, user_context, custom_prompts
)
# Update the copy textbox with the suggestion
if suggestions:
copy_text = suggestions[0] if suggestions else ""
copy_visible = True
else:
copy_text = ""
copy_visible = False
# Return the copy textbox update
copy_update = gr.update(visible=copy_visible, value=copy_text)
return status, copy_update
# Submit button handler
submit_btn.click(
fn=update_suggestions,
inputs=[
text_input,
context_selector,
output_length,
context_input,
email_system_prompt,
email_user_template,
creative_system_prompt,
creative_user_template,
general_system_prompt,
general_user_template,
],
outputs=[status_display, copy_textbox],
)
# Footer
gr.Markdown("""
---
### ๐ŸŽฎ How to Use:
1. **Select your context** (Email, Creative, or General)
2. **Add context information** (optional) - background info, references, or previous context
3. **Enter your text** in the main text area
4. **Adjust output length** (50-500 tokens) in settings
5. **Customize prompts** (optional) - edit AI prompts in "Edit Context Prompts" section
6. **Click "Get Suggestions"** to generate completions
7. **Copy from the generated text box** (Select All + Ctrl+C/Cmd+C)
### ๐ŸŒŸ Pro Tips:
- **Context Window**: Add background info, previous conversations, or references to improve suggestions
- **Email**: Try starting with "Dear..." or "I hope..." + add meeting context
- **Creative**: Start with "Once upon a time..." + add story background
- **General**: Works great for any type of text! + add relevant context
- **Output Length**: Adjust the token slider for longer or shorter completions
- **Custom Prompts**: Edit the AI prompts to customize behavior for your specific needs
### ๐Ÿ”ง Built With:
- **Gradio** for the beautiful interface
- **OpenAI GPT** for intelligent completions
- **Python** for robust backend processing
---
<div style='text-align: center; color: #666;'>
Made with โค๏ธ for writers, developers, and creators everywhere
</div>
""")
return interface
def main():
"""Main function to run the application"""
try:
# Check API configuration
if not settings.validate_api_keys():
logger.error("No valid API keys found. Please configure your API keys.")
print("โŒ Error: No valid API keys configured!")
print("Please set OPENAI_API_KEY or ANTHROPIC_API_KEY in your .env file")
return
logger.info("Starting Smart Auto-Complete application...")
# Create and launch interface
interface = create_interface()
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False, # Set to True for public sharing
show_error=True,
)
except Exception as e:
logger.error(f"Failed to start application: {str(e)}")
print(f"โŒ Error starting application: {str(e)}")
if __name__ == "__main__":
main()