Sandipan Haldar commited on
Commit
3789a0e
·
1 Parent(s): ace630e
Files changed (5) hide show
  1. .env.example +1 -1
  2. app.py +374 -106
  3. config/settings.py +1 -1
  4. settings.py +1 -1
  5. src/api_client.py +62 -10
.env.example CHANGED
@@ -27,7 +27,7 @@ RATE_LIMIT_REQUESTS_PER_MINUTE=60
27
  RATE_LIMIT_ENABLED=true
28
 
29
  # Model Configuration
30
- OPENAI_MODEL=gpt-4.1-mini
31
  ANTHROPIC_MODEL=claude-3-haiku-20240307
32
 
33
  # Temperature settings for different contexts (0.0 to 2.0)
 
27
  RATE_LIMIT_ENABLED=true
28
 
29
  # Model Configuration
30
+ OPENAI_MODEL=gpt-4o-mini
31
  ANTHROPIC_MODEL=claude-3-haiku-20240307
32
 
33
  # Temperature settings for different contexts (0.0 to 2.0)
app.py CHANGED
@@ -209,116 +209,335 @@ def create_interface():
209
 
210
  app_instance = AutoCompleteApp()
211
 
212
- # Custom CSS for better styling
213
  custom_css = """
214
- .suggestion-box {
 
 
 
 
 
 
 
 
215
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
216
- border-radius: 10px;
217
- padding: 15px;
218
- margin: 10px 0;
219
  color: white;
220
- cursor: pointer;
221
- transition: transform 0.2s;
 
 
222
  }
223
- .suggestion-box:hover {
224
- transform: translateY(-2px);
225
- box-shadow: 0 4px 12px rgba(0,0,0,0.15);
 
 
 
226
  }
227
- .context-selector {
228
- margin-bottom: 20px;
 
 
 
229
  }
230
- .main-input {
231
- border-radius: 10px;
232
- border: 2px solid #e1e5e9;
233
- font-size: 16px;
 
 
 
 
 
234
  }
235
- """
236
 
237
- with gr.Blocks(
238
- title="🚀 Smart Auto-Complete", theme=gr.themes.Soft(), css=custom_css
239
- ) as interface:
240
- # Header
241
- gr.Markdown("""
242
- # 🚀 Smart Auto-Complete
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
 
244
- **Intelligent text completion powered by AI**
 
 
 
245
 
246
- Choose your context, enter your text, and click submit to get AI-powered completions! ✨
 
 
 
 
 
 
247
 
248
- 💡 **Tip**: Add your own OpenAI API key in Settings to use your personal quota and avoid rate limits.
249
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
  with gr.Row():
252
- with gr.Column(scale=2):
253
- # Context selection
 
254
  context_selector = gr.Radio(
255
  choices=[
256
- ("📧 Email Writing", "email"),
257
- ("✍️ Creative Writing", "creative"),
258
- ("💼 LinkedIn Content", "linkedin"),
259
  ],
260
  value="linkedin",
261
- label="Select Context",
262
  elem_classes=["context-selector"],
263
  )
264
 
265
- # User context input
 
 
 
266
  context_input = gr.Textbox(
267
- label="📝 Reference Information (Optional)",
268
- placeholder="Add any background information, previous context, or references that should inform the writing...",
269
  lines=4,
270
  elem_classes=["context-input"],
271
  )
272
 
273
- # Main text input
 
274
  text_input = gr.Textbox(
275
- label="✏️ Enter your text here...",
276
- placeholder="Enter your text and click Submit to get suggestions!",
277
  lines=8,
278
  elem_classes=["main-input"],
279
  )
280
 
281
- # Submit button
282
  submit_btn = gr.Button(
283
- "🚀 Get Suggestions", variant="primary", size="lg"
 
 
 
284
  )
285
 
286
- # Settings
287
- with gr.Accordion("⚙️ Settings", open=False):
288
  # API Key Configuration
289
  with gr.Group():
290
- gr.Markdown("### 🔑 API Configuration")
 
 
 
 
291
  openai_key_input = gr.Textbox(
292
- label="OpenAI API Key (Optional)",
293
- placeholder="sk-... (Enter your OpenAI API key to use your own quota)",
294
  type="password",
295
  value="",
296
- info="Your API key is only used for this session and not stored permanently.",
297
  )
298
 
299
  api_status = gr.Textbox(
300
- label="API Status",
301
- value="Using default configuration"
302
  if settings.OPENAI_API_KEY
303
- else "No API key configured",
304
  interactive=False,
305
  lines=1,
 
306
  )
307
 
308
- test_api_btn = gr.Button("🧪 Test API Connection", size="sm")
 
 
309
 
310
  gr.Markdown("---")
311
 
312
- # Output Settings
 
313
  output_length = gr.Slider(
314
  minimum=50,
315
  maximum=500,
316
  value=150,
317
  step=10,
318
- label="Output Length (tokens)",
 
319
  )
320
 
321
- gr.Checkbox(label="Show debug information", value=False)
 
 
 
 
 
322
 
323
  # Context Prompt Editor
324
  with gr.Accordion("🔧 Edit Context Prompts", open=False):
@@ -393,47 +612,68 @@ def create_interface():
393
  placeholder="Enter the user message template...",
394
  )
395
 
396
- with gr.Column(scale=1):
397
- # Status display
 
398
  status_display = gr.Textbox(
399
- label="📊 Status",
400
- value="Ready to help! Start typing...",
401
  interactive=False,
402
- lines=2,
 
403
  )
404
 
405
- # Copyable textbox for suggestions (only output)
 
 
 
 
 
406
  copy_textbox = gr.Textbox(
407
- label="📋 Generated Text (Select All and Copy with Ctrl+C/Cmd+C)",
408
- placeholder="Generated suggestions will appear here for easy copying...",
409
- lines=8,
410
- max_lines=15,
411
  interactive=True,
412
  visible=False,
 
413
  )
414
 
415
- # Demo section
416
- with gr.Accordion("🎯 Try These Examples", open=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  gr.Examples(
418
  examples=[
419
  [
420
- "Meeting scheduled for next Tuesday to discuss the quarterly budget review",
421
- "Dear Mr. Johnson,\n\nI hope this email finds you well. I wanted to follow up on",
422
  "email",
423
  ],
424
  [
425
- "Fantasy adventure story with magical creatures and brave heroes",
426
- "Once upon a time, in a kingdom far away, there lived a",
427
  "creative",
428
  ],
429
  [
430
- "Professional networking and career development",
431
- "Excited to share my thoughts on the future of AI in our industry",
432
  "linkedin",
433
  ],
434
  ],
435
  inputs=[context_input, text_input, context_selector],
436
- label="Click any example to try it out!",
437
  )
438
 
439
  # Event handlers
@@ -527,39 +767,67 @@ def create_interface():
527
  outputs=[status_display, copy_textbox],
528
  )
529
 
530
- # Footer
531
- gr.Markdown("""
532
- ---
533
-
534
- ### 🎮 How to Use:
535
- 1. **Add your API key** (optional) - Enter your OpenAI API key in Settings to use your own quota
536
- 2. **Select your context** (Email, Creative, or LinkedIn)
537
- 3. **Add context information** (optional) - background info, references, or previous context
538
- 4. **Enter your text** in the main text area
539
- 5. **Adjust output length** (50-500 tokens) in settings
540
- 6. **Customize prompts** (optional) - edit AI prompts in "Edit Context Prompts" section
541
- 7. **Click "Get Suggestions"** to generate completions
542
- 8. **Copy from the generated text box** (Select All + Ctrl+C/Cmd+C)
543
-
544
- ### 🌟 Pro Tips:
545
- - **API Key**: Add your own OpenAI API key to use your personal quota and avoid rate limits
546
- - **Context Window**: Add background info, previous conversations, or references to improve suggestions
547
- - **Email**: Try starting with "Dear..." or "I hope..." + add meeting context
548
- - **Creative**: Start with "Once upon a time..." + add story background
549
- - **LinkedIn**: Perfect for professional posts, career updates, industry insights + add professional context
550
- - **Output Length**: Adjust the token slider for longer or shorter completions
551
- - **Custom Prompts**: Edit the AI prompts to customize behavior for your specific needs
552
-
553
- ### 🔧 Built With:
554
- - **Gradio** for the beautiful interface
555
- - **OpenAI GPT** for intelligent completions
556
- - **Python** for robust backend processing
557
-
558
- ---
559
- <div style='text-align: center; color: #666;'>
560
- Made with ❤️ for writers, developers, and creators everywhere
561
- </div>
562
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563
 
564
  return interface
565
 
 
209
 
210
  app_instance = AutoCompleteApp()
211
 
212
+ # Professional CSS styling
213
  custom_css = """
214
+ /* Global Styles */
215
+ .gradio-container {
216
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
217
+ background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%) !important;
218
+ min-height: 100vh;
219
+ }
220
+
221
+ /* Header Styling */
222
+ .header-container {
223
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
 
 
 
224
  color: white;
225
+ padding: 2rem;
226
+ border-radius: 16px;
227
+ margin-bottom: 2rem;
228
+ box-shadow: 0 8px 32px rgba(0,0,0,0.1);
229
  }
230
+
231
+ .header-container h1 {
232
+ font-size: 2.5rem;
233
+ font-weight: 700;
234
+ margin-bottom: 0.5rem;
235
+ text-shadow: 0 2px 4px rgba(0,0,0,0.1);
236
  }
237
+
238
+ .header-container p {
239
+ font-size: 1.1rem;
240
+ opacity: 0.9;
241
+ margin-bottom: 0;
242
  }
243
+
244
+ /* Card Styling */
245
+ .main-card {
246
+ background: white;
247
+ border-radius: 16px;
248
+ padding: 2rem;
249
+ box-shadow: 0 4px 24px rgba(0,0,0,0.06);
250
+ border: 1px solid rgba(255,255,255,0.2);
251
+ backdrop-filter: blur(10px);
252
  }
 
253
 
254
+ .output-card {
255
+ background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
256
+ border-radius: 16px;
257
+ padding: 1.5rem;
258
+ box-shadow: 0 4px 16px rgba(0,0,0,0.04);
259
+ border: 1px solid #e2e8f0;
260
+ }
261
+
262
+ /* Input Styling */
263
+ .gradio-textbox textarea, .gradio-textbox input {
264
+ border: 2px solid #e2e8f0 !important;
265
+ border-radius: 12px !important;
266
+ padding: 16px !important;
267
+ font-size: 16px !important;
268
+ transition: all 0.3s ease !important;
269
+ background: #fafbfc !important;
270
+ }
271
+
272
+ .gradio-textbox textarea:focus, .gradio-textbox input:focus {
273
+ border-color: #667eea !important;
274
+ box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
275
+ background: white !important;
276
+ }
277
+
278
+ /* Button Styling */
279
+ .gradio-button {
280
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
281
+ border: none !important;
282
+ border-radius: 12px !important;
283
+ padding: 16px 32px !important;
284
+ font-weight: 600 !important;
285
+ font-size: 16px !important;
286
+ color: white !important;
287
+ transition: all 0.3s ease !important;
288
+ box-shadow: 0 4px 16px rgba(102, 126, 234, 0.3) !important;
289
+ }
290
+
291
+ .gradio-button:hover {
292
+ transform: translateY(-2px) !important;
293
+ box-shadow: 0 8px 24px rgba(102, 126, 234, 0.4) !important;
294
+ }
295
 
296
+ .gradio-button.secondary {
297
+ background: linear-gradient(135deg, #64748b 0%, #475569 100%) !important;
298
+ box-shadow: 0 4px 16px rgba(100, 116, 139, 0.3) !important;
299
+ }
300
 
301
+ /* Radio Button Styling */
302
+ .gradio-radio {
303
+ background: white;
304
+ border-radius: 12px;
305
+ padding: 1rem;
306
+ border: 2px solid #e2e8f0;
307
+ }
308
 
309
+ .gradio-radio label {
310
+ font-weight: 500;
311
+ color: #374151;
312
+ padding: 12px 16px;
313
+ border-radius: 8px;
314
+ transition: all 0.2s ease;
315
+ }
316
+
317
+ .gradio-radio input:checked + label {
318
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
319
+ color: white;
320
+ box-shadow: 0 2px 8px rgba(102, 126, 234, 0.3);
321
+ }
322
+
323
+ /* Accordion Styling */
324
+ .gradio-accordion {
325
+ border: 2px solid #e2e8f0 !important;
326
+ border-radius: 12px !important;
327
+ background: white !important;
328
+ margin: 1rem 0 !important;
329
+ }
330
+
331
+ .gradio-accordion summary {
332
+ background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%) !important;
333
+ padding: 1rem 1.5rem !important;
334
+ border-radius: 10px !important;
335
+ font-weight: 600 !important;
336
+ color: #374151 !important;
337
+ }
338
+
339
+ /* Status Display */
340
+ .status-display {
341
+ background: linear-gradient(135deg, #ecfdf5 0%, #d1fae5 100%);
342
+ border: 2px solid #10b981;
343
+ border-radius: 12px;
344
+ padding: 1rem;
345
+ color: #065f46;
346
+ font-weight: 500;
347
+ }
348
+
349
+ /* Output Text Area */
350
+ .output-text {
351
+ background: linear-gradient(135deg, #fefce8 0%, #fef3c7 100%) !important;
352
+ border: 2px solid #f59e0b !important;
353
+ border-radius: 12px !important;
354
+ font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace !important;
355
+ font-size: 14px !important;
356
+ }
357
+
358
+ /* Slider Styling */
359
+ .gradio-slider input[type="range"] {
360
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
361
+ border-radius: 8px !important;
362
+ }
363
+
364
+ /* Tab Styling */
365
+ .gradio-tabs {
366
+ border-radius: 12px !important;
367
+ overflow: hidden !important;
368
+ }
369
+
370
+ .gradio-tab-nav {
371
+ background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%) !important;
372
+ border-bottom: 2px solid #e2e8f0 !important;
373
+ }
374
+
375
+ .gradio-tab-nav button {
376
+ border-radius: 8px 8px 0 0 !important;
377
+ font-weight: 500 !important;
378
+ padding: 12px 24px !important;
379
+ }
380
+
381
+ .gradio-tab-nav button.selected {
382
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
383
+ color: white !important;
384
+ }
385
+
386
+ /* Examples Styling */
387
+ .gradio-examples {
388
+ background: white;
389
+ border-radius: 12px;
390
+ padding: 1.5rem;
391
+ border: 2px solid #e2e8f0;
392
+ }
393
+
394
+ /* Footer Styling */
395
+ .footer-content {
396
+ background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
397
+ color: #374151;
398
+ padding: 2rem;
399
+ border-radius: 16px;
400
+ margin-top: 2rem;
401
+ border: 2px solid #e2e8f0;
402
+ box-shadow: 0 4px 16px rgba(0,0,0,0.04);
403
+ }
404
+
405
+ .footer-content h3 {
406
+ color: #1f2937;
407
+ border-bottom: 2px solid #667eea;
408
+ padding-bottom: 0.5rem;
409
+ margin-bottom: 1rem;
410
+ font-weight: 600;
411
+ }
412
+
413
+ /* Responsive Design */
414
+ @media (max-width: 768px) {
415
+ .header-container h1 {
416
+ font-size: 2rem;
417
+ }
418
+
419
+ .main-card, .output-card {
420
+ padding: 1rem;
421
+ }
422
+
423
+ .gradio-button {
424
+ padding: 12px 24px !important;
425
+ font-size: 14px !important;
426
+ }
427
+ }
428
+ """
429
+
430
+ with gr.Blocks(
431
+ title="LinkedIn Smart Auto-Complete | Professional AI Writing Assistant",
432
+ theme=gr.themes.Soft(),
433
+ css=custom_css,
434
+ ) as interface:
435
+ # Professional Header
436
+ with gr.Row(elem_classes=["header-container"]):
437
+ gr.HTML("""
438
+ <div style="text-align: center;">
439
+ <h1>🚀 LinkedIn Smart Auto-Complete</h1>
440
+ <p style="font-size: 1.2rem; margin-bottom: 0.5rem;">Professional AI Writing Assistant</p>
441
+ <p style="opacity: 0.9;">Transform your ideas into compelling LinkedIn content with AI-powered intelligence</p>
442
+ <div style="margin-top: 1rem; padding: 1rem; background: rgba(255,255,255,0.1); border-radius: 8px; border-left: 4px solid #fbbf24;">
443
+ <strong>💡 Pro Tip:</strong> Add your OpenAI API key in Settings for unlimited personal usage
444
+ </div>
445
+ </div>
446
+ """)
447
 
448
  with gr.Row():
449
+ with gr.Column(scale=2, elem_classes=["main-card"]):
450
+ # Professional Context Selection
451
+ gr.Markdown("### 🎯 **Choose Your Writing Context**")
452
  context_selector = gr.Radio(
453
  choices=[
454
+ ("📧 Professional Email", "email"),
455
+ ("✍️ Creative Content", "creative"),
456
+ ("💼 LinkedIn Post", "linkedin"),
457
  ],
458
  value="linkedin",
459
+ label="",
460
  elem_classes=["context-selector"],
461
  )
462
 
463
+ gr.Markdown("---")
464
+
465
+ # Enhanced Context Input
466
+ gr.Markdown("### 📋 **Reference Information** *(Optional)*")
467
  context_input = gr.Textbox(
468
+ label="",
469
+ placeholder="💡 Add background info, company details, industry context, or previous conversations to enhance AI understanding...",
470
  lines=4,
471
  elem_classes=["context-input"],
472
  )
473
 
474
+ # Professional Main Input
475
+ gr.Markdown("### ✍️ **Your Content**")
476
  text_input = gr.Textbox(
477
+ label="",
478
+ placeholder="🚀 Start typing your content here... The AI will intelligently complete your thoughts!",
479
  lines=8,
480
  elem_classes=["main-input"],
481
  )
482
 
483
+ # Enhanced Submit Button
484
  submit_btn = gr.Button(
485
+ " Generate AI Completion",
486
+ variant="primary",
487
+ size="lg",
488
+ elem_classes=["primary-button"],
489
  )
490
 
491
+ # Professional Settings
492
+ with gr.Accordion("⚙️ **Advanced Settings**", open=False):
493
  # API Key Configuration
494
  with gr.Group():
495
+ gr.Markdown("### 🔑 **API Configuration**")
496
+ gr.Markdown(
497
+ "*Secure your own OpenAI quota for unlimited usage*"
498
+ )
499
+
500
  openai_key_input = gr.Textbox(
501
+ label="OpenAI API Key",
502
+ placeholder="sk-proj-... (Paste your OpenAI API key here)",
503
  type="password",
504
  value="",
505
+ info="🔒 Your API key is encrypted and only used for this session. Never stored permanently.",
506
  )
507
 
508
  api_status = gr.Textbox(
509
+ label="Connection Status",
510
+ value="Using default configuration"
511
  if settings.OPENAI_API_KEY
512
+ else "⚠️ No API key configured - using shared quota",
513
  interactive=False,
514
  lines=1,
515
+ elem_classes=["status-display"],
516
  )
517
 
518
+ test_api_btn = gr.Button(
519
+ "🧪 Test Connection", size="sm", elem_classes=["secondary"]
520
+ )
521
 
522
  gr.Markdown("---")
523
 
524
+ # Enhanced Output Settings
525
+ gr.Markdown("### 📏 **Output Configuration**")
526
  output_length = gr.Slider(
527
  minimum=50,
528
  maximum=500,
529
  value=150,
530
  step=10,
531
+ label="Response Length (tokens)",
532
+ info="Adjust the length of AI-generated content",
533
  )
534
 
535
+ gr.Markdown("### 🔧 **Debug Options**")
536
+ gr.Checkbox(
537
+ label="Enable detailed logging",
538
+ value=False,
539
+ info="Show technical details for troubleshooting",
540
+ )
541
 
542
  # Context Prompt Editor
543
  with gr.Accordion("🔧 Edit Context Prompts", open=False):
 
612
  placeholder="Enter the user message template...",
613
  )
614
 
615
+ with gr.Column(scale=1, elem_classes=["output-card"]):
616
+ # Professional Status Display
617
+ gr.Markdown("### 📊 **AI Assistant Status**")
618
  status_display = gr.Textbox(
619
+ label="",
620
+ value="🤖 Ready to assist! Choose your context and start writing...",
621
  interactive=False,
622
+ lines=3,
623
+ elem_classes=["status-display"],
624
  )
625
 
626
+ gr.Markdown("---")
627
+
628
+ # Enhanced Output Area
629
+ gr.Markdown("### 📝 **Generated Content**")
630
+ gr.Markdown("*AI-powered completion will appear below*")
631
+
632
  copy_textbox = gr.Textbox(
633
+ label="",
634
+ placeholder=" Your AI-generated content will appear here...\n\n📋 Simply select all text (Ctrl+A/Cmd+A) and copy (Ctrl+C/Cmd+C) to use in your LinkedIn post or email!",
635
+ lines=12,
636
+ max_lines=20,
637
  interactive=True,
638
  visible=False,
639
+ elem_classes=["output-text"],
640
  )
641
 
642
+ # Quick Action Buttons (Future Enhancement)
643
+ with gr.Row(visible=False):
644
+ gr.Button(
645
+ "📋 Copy to Clipboard", size="sm", elem_classes=["secondary"]
646
+ )
647
+ gr.Button("🔄 Regenerate", size="sm", elem_classes=["secondary"])
648
+ gr.Button("✏️ Edit & Refine", size="sm", elem_classes=["secondary"])
649
+
650
+ # Professional Examples Section
651
+ with gr.Accordion("🎯 **Try These Professional Examples**", open=False):
652
+ gr.Markdown("""
653
+ ### 🚀 **Quick Start Templates**
654
+ *Click any example below to instantly populate the form and see AI in action!*
655
+ """)
656
+
657
  gr.Examples(
658
  examples=[
659
  [
660
+ "Quarterly budget review meeting with stakeholders, discussing Q4 performance metrics and 2024 planning initiatives",
661
+ "Dear Mr. Johnson,\n\nI hope this email finds you well. Following our discussion yesterday, I wanted to confirm our meeting details for the quarterly budget review",
662
  "email",
663
  ],
664
  [
665
+ "Epic fantasy adventure featuring a young mage discovering ancient powers in a world where magic and technology collide",
666
+ "In the neon-lit streets of Neo-Arcanum, where holographic spells danced alongside digital billboards, Zara clutched her grandmother's ancient grimoire and whispered",
667
  "creative",
668
  ],
669
  [
670
+ "Sharing insights about AI transformation in the financial services industry, highlighting successful implementation strategies and future trends",
671
+ "🚀 Excited to share key insights from our recent AI transformation journey at FinTech Solutions! After 18 months of implementation, here's what we've learned about",
672
  "linkedin",
673
  ],
674
  ],
675
  inputs=[context_input, text_input, context_selector],
676
+ label="",
677
  )
678
 
679
  # Event handlers
 
767
  outputs=[status_display, copy_textbox],
768
  )
769
 
770
+ # Professional Footer
771
+ with gr.Row(elem_classes=["footer-content"]):
772
+ gr.HTML("""
773
+ <div style="width: 100%;">
774
+ <div style="display: grid; grid-template-columns: 1fr 1fr 1fr; gap: 2rem; margin-bottom: 2rem;">
775
+
776
+ <div>
777
+ <h3>🎮 Quick Start Guide</h3>
778
+ <ol style="line-height: 1.8; color: #4b5563;">
779
+ <li><strong>Choose Context:</strong> Select Email, Creative, or LinkedIn</li>
780
+ <li><strong>Add Context:</strong> Include background information (optional)</li>
781
+ <li><strong>Enter Text:</strong> Start typing your content</li>
782
+ <li><strong>Generate:</strong> Click the AI completion button</li>
783
+ <li><strong>Copy & Use:</strong> Select all and copy the result</li>
784
+ </ol>
785
+ </div>
786
+
787
+ <div>
788
+ <h3>🌟 Pro Features</h3>
789
+ <ul style="line-height: 1.8; color: #4b5563; list-style: none; padding: 0;">
790
+ <li>🔑 <strong>Personal API Key:</strong> Unlimited usage with your OpenAI account</li>
791
+ <li>📏 <strong>Custom Length:</strong> Adjust output from 50-500 tokens</li>
792
+ <li>🎯 <strong>Context-Aware:</strong> AI adapts to your specific writing needs</li>
793
+ <li>⚡ <strong>Real-time:</strong> Instant AI-powered completions</li>
794
+ <li>🔧 <strong>Customizable:</strong> Edit prompts for personalized results</li>
795
+ </ul>
796
+ </div>
797
+
798
+ <div>
799
+ <h3>💡 Expert Tips</h3>
800
+ <ul style="line-height: 1.8; color: #4b5563; list-style: none; padding: 0;">
801
+ <li>📧 <strong>Email:</strong> Start with greetings, add meeting context</li>
802
+ <li>✍️ <strong>Creative:</strong> Set the scene, describe characters</li>
803
+ <li>💼 <strong>LinkedIn:</strong> Include industry keywords, hashtags</li>
804
+ <li>🎯 <strong>Context:</strong> More background = better results</li>
805
+ <li>🔄 <strong>Iterate:</strong> Refine prompts for perfect output</li>
806
+ </ul>
807
+ </div>
808
+ </div>
809
+
810
+ <div style="border-top: 1px solid #cbd5e1; padding-top: 2rem; text-align: center;">
811
+ <div style="display: flex; justify-content: center; align-items: center; gap: 2rem; margin-bottom: 1rem;">
812
+ <div style="display: flex; align-items: center; gap: 0.5rem; color: #1f2937;">
813
+ <span style="font-size: 1.2rem;">🚀</span>
814
+ <strong>Powered by OpenAI GPT</strong>
815
+ </div>
816
+ <div style="display: flex; align-items: center; gap: 0.5rem; color: #1f2937;">
817
+ <span style="font-size: 1.2rem;">⚡</span>
818
+ <strong>Built with Gradio</strong>
819
+ </div>
820
+ <div style="display: flex; align-items: center; gap: 0.5rem; color: #1f2937;">
821
+ <span style="font-size: 1.2rem;">🐍</span>
822
+ <strong>Python Backend</strong>
823
+ </div>
824
+ </div>
825
+ <p style="color: #6b7280; margin: 0; font-size: 0.9rem;">
826
+ Made with ❤️ for professionals, creators, and innovators worldwide
827
+ </p>
828
+ </div>
829
+ </div>
830
+ """)
831
 
832
  return interface
833
 
config/settings.py CHANGED
@@ -54,7 +54,7 @@ class AppSettings:
54
  )
55
 
56
  # Model Configuration
57
- self.OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
58
  self.ANTHROPIC_MODEL = os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307")
59
 
60
  # Temperature settings for different contexts
 
54
  )
55
 
56
  # Model Configuration
57
+ self.OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
58
  self.ANTHROPIC_MODEL = os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307")
59
 
60
  # Temperature settings for different contexts
settings.py CHANGED
@@ -54,7 +54,7 @@ class AppSettings:
54
  )
55
 
56
  # Model Configuration
57
- self.OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
58
  self.ANTHROPIC_MODEL = os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307")
59
 
60
  # Temperature settings for different contexts
 
54
  )
55
 
56
  # Model Configuration
57
+ self.OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
58
  self.ANTHROPIC_MODEL = os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307")
59
 
60
  # Temperature settings for different contexts
src/api_client.py CHANGED
@@ -37,6 +37,22 @@ class APIClient:
37
 
38
  self._initialize_clients()
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  def _initialize_clients(self):
41
  """Initialize API clients based on available keys"""
42
  try:
@@ -130,17 +146,44 @@ class APIClient:
130
  ) -> Optional[str]:
131
  """Get completion from OpenAI API"""
132
  try:
133
- response = self.openai_client.chat.completions.create(
134
- model="gpt-3.5-turbo", # Can be made configurable
135
- messages=messages,
136
- temperature=temperature,
137
- max_tokens=max_tokens,
138
- n=1,
139
- stop=None,
140
- presence_penalty=0.1,
141
- frequency_penalty=0.1,
142
  )
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  self._update_request_stats()
145
 
146
  if response.choices and len(response.choices) > 0:
@@ -174,9 +217,18 @@ class APIClient:
174
  else:
175
  user_messages.append(msg)
176
 
 
 
 
 
 
 
 
 
 
177
  # Create the completion request
178
  response = self.anthropic_client.messages.create(
179
- model="claude-3-haiku-20240307", # Can be made configurable
180
  max_tokens=max_tokens,
181
  temperature=temperature,
182
  system=system_message,
 
37
 
38
  self._initialize_clients()
39
 
40
+ def _get_token_param_name(self, model: str) -> str:
41
+ """
42
+ Get the correct token parameter name based on the model
43
+
44
+ Args:
45
+ model: The model name
46
+
47
+ Returns:
48
+ The correct parameter name ('max_tokens' or 'max_completion_tokens')
49
+ """
50
+ # o3 models and newer reasoning models use max_completion_tokens
51
+ if model.startswith(("o3", "o1")):
52
+ return "max_completion_tokens"
53
+ # All other models use max_tokens
54
+ return "max_tokens"
55
+
56
  def _initialize_clients(self):
57
  """Initialize API clients based on available keys"""
58
  try:
 
146
  ) -> Optional[str]:
147
  """Get completion from OpenAI API"""
148
  try:
149
+ # Get model from settings
150
+ model = (
151
+ self.settings.get_model_for_provider("openai")
152
+ if self.settings
153
+ else "gpt-4o-mini"
 
 
 
 
154
  )
155
 
156
+ logger.debug(f"Using OpenAI model: {model}")
157
+
158
+ # Get the correct token parameter name for this model
159
+ token_param = self._get_token_param_name(model)
160
+ logger.debug(f"Using token parameter: {token_param} = {max_tokens}")
161
+
162
+ # Build the request parameters
163
+ request_params = {
164
+ "model": model,
165
+ "messages": messages,
166
+ token_param: max_tokens, # Use the correct parameter name
167
+ "n": 1,
168
+ "stop": None,
169
+ }
170
+
171
+ # Only add temperature for non-reasoning models
172
+ # o3 and o1 models use default temperature (1.0) and don't accept custom values
173
+ if not model.startswith(("o3", "o1")):
174
+ request_params["temperature"] = temperature
175
+ logger.debug(f"Using custom temperature: {temperature}")
176
+ else:
177
+ logger.debug(f"Using default temperature for reasoning model {model}")
178
+
179
+ # Only add presence_penalty and frequency_penalty for non-reasoning models
180
+ # o3 and o1 models don't support these parameters
181
+ if not model.startswith(("o3", "o1")):
182
+ request_params["presence_penalty"] = 0.1
183
+ request_params["frequency_penalty"] = 0.1
184
+
185
+ response = self.openai_client.chat.completions.create(**request_params)
186
+
187
  self._update_request_stats()
188
 
189
  if response.choices and len(response.choices) > 0:
 
217
  else:
218
  user_messages.append(msg)
219
 
220
+ # Get model from settings
221
+ model = (
222
+ self.settings.get_model_for_provider("anthropic")
223
+ if self.settings
224
+ else "claude-3-haiku-20240307"
225
+ )
226
+
227
+ logger.debug(f"Using Anthropic model: {model}")
228
+
229
  # Create the completion request
230
  response = self.anthropic_client.messages.create(
231
+ model=model,
232
  max_tokens=max_tokens,
233
  temperature=temperature,
234
  system=system_message,