|
<html lang="en"> |
|
<head> |
|
<meta charset="UTF-8"> |
|
<meta name="viewport" content="width=device-width, initial-scale=1.0"> |
|
<title>AI Chat Assistant - Gradio Lite</title> |
|
<script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script> |
|
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" /> |
|
<style> |
|
* { |
|
margin: 0; |
|
padding: 0; |
|
box-sizing: border-box; |
|
} |
|
|
|
body { |
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; |
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
min-height: 100vh; |
|
display: flex; |
|
flex-direction: column; |
|
align-items: center; |
|
padding: 20px; |
|
} |
|
|
|
.container { |
|
width: 100%; |
|
max-width: 1200px; |
|
background: white; |
|
border-radius: 20px; |
|
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3); |
|
overflow: hidden; |
|
animation: slideIn 0.5s ease-out; |
|
} |
|
|
|
@keyframes slideIn { |
|
from { |
|
opacity: 0; |
|
transform: translateY(30px); |
|
} |
|
to { |
|
opacity: 1; |
|
transform: translateY(0); |
|
} |
|
} |
|
|
|
.header { |
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
color: white; |
|
padding: 30px; |
|
text-align: center; |
|
} |
|
|
|
.header h1 { |
|
font-size: 2.5rem; |
|
margin-bottom: 10px; |
|
font-weight: 700; |
|
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.2); |
|
} |
|
|
|
.header p { |
|
font-size: 1.1rem; |
|
opacity: 0.95; |
|
margin-bottom: 20px; |
|
} |
|
|
|
.features { |
|
display: flex; |
|
justify-content: center; |
|
gap: 30px; |
|
margin-top: 20px; |
|
flex-wrap: wrap; |
|
} |
|
|
|
.feature { |
|
display: flex; |
|
align-items: center; |
|
gap: 8px; |
|
background: rgba(255, 255, 255, 0.2); |
|
padding: 8px 16px; |
|
border-radius: 20px; |
|
backdrop-filter: blur(10px); |
|
} |
|
|
|
.feature-icon { |
|
font-size: 1.2rem; |
|
} |
|
|
|
.app-container { |
|
padding: 40px; |
|
background: #f8f9fa; |
|
min-height: 600px; |
|
} |
|
|
|
.loading-message { |
|
text-align: center; |
|
padding: 40px; |
|
color: #666; |
|
} |
|
|
|
.loading-spinner { |
|
display: inline-block; |
|
width: 50px; |
|
height: 50px; |
|
border: 3px solid rgba(0, 0, 0, 0.1); |
|
border-radius: 50%; |
|
border-top-color: #667eea; |
|
animation: spin 1s ease-in-out infinite; |
|
margin-bottom: 20px; |
|
} |
|
|
|
@keyframes spin { |
|
to { transform: rotate(360deg); } |
|
} |
|
|
|
.info-cards { |
|
display: grid; |
|
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); |
|
gap: 20px; |
|
padding: 20px; |
|
margin-bottom: 30px; |
|
} |
|
|
|
.info-card { |
|
background: white; |
|
border-radius: 12px; |
|
padding: 20px; |
|
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.08); |
|
transition: transform 0.3s ease, box-shadow 0.3s ease; |
|
} |
|
|
|
.info-card:hover { |
|
transform: translateY(-5px); |
|
box-shadow: 0 5px 20px rgba(0, 0, 0, 0.12); |
|
} |
|
|
|
.info-card h3 { |
|
color: #667eea; |
|
margin-bottom: 10px; |
|
font-size: 1.2rem; |
|
} |
|
|
|
.info-card p { |
|
color: #666; |
|
line-height: 1.6; |
|
} |
|
|
|
@media (max-width: 768px) { |
|
.header h1 { |
|
font-size: 1.8rem; |
|
} |
|
|
|
.header p { |
|
font-size: 1rem; |
|
} |
|
|
|
.features { |
|
gap: 10px; |
|
} |
|
|
|
.app-container { |
|
padding: 20px; |
|
} |
|
} |
|
|
|
.tech-badge { |
|
display: inline-block; |
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
color: white; |
|
padding: 4px 12px; |
|
border-radius: 12px; |
|
font-size: 0.85rem; |
|
margin: 5px; |
|
} |
|
|
|
gradio-lite { |
|
width: 100%; |
|
min-height: 500px; |
|
} |
|
</style> |
|
</head> |
|
<body> |
|
<div class="container"> |
|
<div class="header"> |
|
<h1>🤖 AI Chat Assistant</h1> |
|
<p>Powered by Gemma 270M Model & Hugging Face Transformers</p> |
|
<div class="features"> |
|
<div class="feature"> |
|
<span class="feature-icon">⚡</span> |
|
<span>Fast Inference</span> |
|
</div> |
|
<div class="feature"> |
|
<span class="feature-icon">🔒</span> |
|
<span>100% Browser-Based</span> |
|
</div> |
|
<div class="feature"> |
|
<span class="feature-icon">🌐</span> |
|
<span>No Server Required</span> |
|
</div> |
|
</div> |
|
</div> |
|
|
|
<div class="info-cards"> |
|
<div class="info-card"> |
|
<h3>💡 How It Works</h3> |
|
<p>This application runs entirely in your browser using WebAssembly and Pyodide. The AI model is loaded directly in your browser for complete privacy and offline capability.</p> |
|
</div> |
|
<div class="info-card"> |
|
<h3>🚀 Technologies Used</h3> |
|
<p> |
|
<span class="tech-badge">Gradio Lite</span> |
|
<span class="tech-badge">Hugging Face</span> |
|
<span class="tech-badge">ONNX Runtime</span> |
|
<span class="tech-badge">WebAssembly</span> |
|
</p> |
|
</div> |
|
<div class="info-card"> |
|
<h3>📝 Getting Started</h3> |
|
<p>Simply type your message in the chat interface below. The AI will process your request locally and generate a response using the Gemma model.</p> |
|
</div> |
|
</div> |
|
|
|
<div class="app-container"> |
|
<div class="loading-message"> |
|
<div class="loading-spinner"></div> |
|
<h3>Loading AI Model...</h3> |
|
<p>This may take 10-30 seconds on first load as we download and initialize the model.</p> |
|
</div> |
|
|
|
<gradio-lite theme="light"> |
|
<gradio-requirements> |
|
transformers_js_py |
|
</gradio-requirements> |
|
|
|
<gradio-file name="app.py" entrypoint> |
|
import gradio as gr |
|
from transformers_js import import_transformers_js |
|
import asyncio |
|
|
|
# Import transformers.js |
|
transformers = await import_transformers_js() |
|
pipeline = transformers.pipeline |
|
TextStreamer = transformers.TextStreamer |
|
|
|
# Initialize the model pipeline |
|
print("Loading model... This may take a moment.") |
|
generator = await pipeline( |
|
"text-generation", |
|
"onnx-community/gemma-3-270m-it-ONNX", |
|
{ |
|
"dtype": "fp32", |
|
} |
|
) |
|
|
|
# Store conversation history |
|
conversation_history = [] |
|
|
|
async def chat_with_ai(message, history): |
|
"""Process user message and generate AI response""" |
|
if not message: |
|
return history |
|
|
|
# Format the conversation for the model |
|
messages = [ |
|
{"role": "system", "content": "You are a helpful AI assistant. Provide clear, concise, and informative responses."}, |
|
] |
|
|
|
# Add conversation history |
|
for user_msg, assistant_msg in history: |
|
messages.append({"role": "user", "content": user_msg}) |
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
|
# Add current message |
|
messages.append({"role": "user", "content": message}) |
|
|
|
try: |
|
# Generate response |
|
output = await generator(messages, { |
|
"max_new_tokens": 256, |
|
"temperature": 0.7, |
|
"do_sample": True, |
|
"top_p": 0.9, |
|
}) |
|
|
|
# Extract the assistant's response |
|
response = output[0]["generated_text"][-1]["content"] |
|
|
|
# Update history |
|
history.append([message, response]) |
|
return history |
|
|
|
except Exception as e: |
|
error_msg = f"Error generating response: {str(e)}" |
|
history.append([message, error_msg]) |
|
return history |
|
|
|
def clear_chat(): |
|
"""Clear the conversation history""" |
|
return [] |
|
|
|
# Create the Gradio interface |
|
with gr.Blocks(title="AI Chat Assistant") as demo: |
|
gr.Markdown( |
|
""" |
|
# 🤖 AI Chat Assistant |
|
Chat with an AI powered by the Gemma model running entirely in your browser! |
|
""" |
|
) |
|
|
|
chatbot = gr.Chatbot( |
|
height=400, |
|
placeholder="Start chatting with the AI assistant...", |
|
bubble_full_width=False, |
|
) |
|
|
|
with gr.Row(): |
|
msg = gr.Textbox( |
|
placeholder="Type your message here and press Enter...", |
|
label="Your Message", |
|
lines=2, |
|
scale=4 |
|
) |
|
submit_btn = gr.Button("Send", variant="primary", scale=1) |
|
|
|
with gr.Row(): |
|
clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary") |
|
|
|
gr.Examples( |
|
examples=[ |
|
"What is machine learning?", |
|
"Write a short poem about technology", |
|
"Explain quantum computing in simple terms", |
|
"What are the benefits of renewable energy?", |
|
"How does the internet work?", |
|
], |
|
inputs=msg, |
|
label="Example Questions" |
|
) |
|
|
|
# Set up event handlers |
|
msg.submit(chat_with_ai, [msg, chatbot], chatbot) |
|
msg.submit(lambda: "", None, msg) |
|
|
|
submit_btn.click(chat_with_ai, [msg, chatbot], chatbot) |
|
submit_btn.click(lambda: "", None, msg) |
|
|
|
clear_btn.click(clear_chat, None, chatbot) |
|
|
|
gr.Markdown( |
|
""" |
|
--- |
|
**Note:** This model runs entirely in your browser. No data is sent to any server. |
|
Initial loading may take some time as the model is downloaded and initialized. |
|
""" |
|
) |
|
|
|
demo.launch() |
|
</gradio-file> |
|
</gradio-lite> |
|
</div> |
|
</div> |
|
|
|
<script> |
|
|
|
window.addEventListener('load', () => { |
|
setTimeout(() => { |
|
const loadingMsg = document.querySelector('.loading-message'); |
|
if (loadingMsg) { |
|
loadingMsg.style.display = 'none'; |
|
} |
|
}, 5000); |
|
}); |
|
</script> |
|
</body> |
|
</html> |