akhaliq HF Staff commited on
Commit
7500954
·
verified ·
1 Parent(s): 04f7efd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -0
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import pipeline
4
+
5
+ def initialize_model():
6
+ """Initialize the text generation pipeline with device detection"""
7
+ # Check if CUDA is available, otherwise fall back to CPU
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ print(f"Using device: {device}")
10
+
11
+ try:
12
+ generator = pipeline(
13
+ "text-generation",
14
+ model="akhaliq/gemma-3-270m-gradio-coder",
15
+ device=device
16
+ )
17
+ return generator
18
+ except Exception as e:
19
+ print(f"Error loading model: {e}")
20
+ # Fallback to CPU if CUDA fails
21
+ if device == "cuda":
22
+ print("Falling back to CPU...")
23
+ generator = pipeline(
24
+ "text-generation",
25
+ model="akhaliq/gemma-3-270m-gradio-coder",
26
+ device="cpu"
27
+ )
28
+ return generator
29
+ raise e
30
+
31
+ # Initialize the model globally
32
+ print("Loading model...")
33
+ generator = initialize_model()
34
+ print("Model loaded successfully!")
35
+
36
+ def chat_response(message, history):
37
+ """Generate response for the chatbot"""
38
+ try:
39
+ # Format the message for the model
40
+ input_message = [{"role": "user", "content": message}]
41
+
42
+ # Generate response
43
+ output = generator(
44
+ input_message,
45
+ max_new_tokens=128,
46
+ return_full_text=False,
47
+ do_sample=True,
48
+ temperature=0.7,
49
+ pad_token_id=generator.tokenizer.eos_token_id
50
+ )[0]
51
+
52
+ response = output["generated_text"]
53
+ return response
54
+
55
+ except Exception as e:
56
+ return f"Sorry, I encountered an error: {str(e)}"
57
+
58
+ # Create the Gradio interface
59
+ def create_chatbot():
60
+ """Create and launch the Gradio chatbot interface"""
61
+
62
+ # Custom CSS for better styling
63
+ css = """
64
+ .gradio-container {
65
+ max-width: 800px !important;
66
+ margin: auto !important;
67
+ }
68
+ .chat-message {
69
+ padding: 10px !important;
70
+ margin: 5px !important;
71
+ border-radius: 10px !important;
72
+ }
73
+ """
74
+
75
+ # Create the chatbot interface
76
+ with gr.Blocks(css=css, title="AI Chatbot") as demo:
77
+ gr.Markdown("# 🤖 AI Chatbot")
78
+ gr.Markdown("*Powered by Gemma-3-270m model via Transformers*")
79
+
80
+ chatbot = gr.Chatbot(
81
+ height=500,
82
+ bubble_full_width=False,
83
+ show_label=False
84
+ )
85
+
86
+ with gr.Row():
87
+ msg = gr.Textbox(
88
+ placeholder="Type your message here...",
89
+ show_label=False,
90
+ scale=4
91
+ )
92
+ send_btn = gr.Button("Send", scale=1, variant="primary")
93
+ clear_btn = gr.Button("Clear", scale=1)
94
+
95
+ # Example questions
96
+ gr.Examples(
97
+ examples=[
98
+ "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?",
99
+ "What's the most important lesson you've learned in life?",
100
+ "How do you think AI will change the world in the next 10 years?",
101
+ "What would you do if you had unlimited resources for one day?"
102
+ ],
103
+ inputs=msg
104
+ )
105
+
106
+ def respond(message, chat_history):
107
+ if not message.strip():
108
+ return chat_history, ""
109
+
110
+ # Get bot response
111
+ bot_message = chat_response(message, chat_history)
112
+
113
+ # Add to chat history
114
+ chat_history.append((message, bot_message))
115
+ return chat_history, ""
116
+
117
+ def clear_chat():
118
+ return [], ""
119
+
120
+ # Event handlers
121
+ msg.submit(respond, [msg, chatbot], [chatbot, msg])
122
+ send_btn.click(respond, [msg, chatbot], [chatbot, msg])
123
+ clear_btn.click(clear_chat, None, [chatbot, msg])
124
+
125
+ return demo
126
+
127
+ if __name__ == "__main__":
128
+ print("Creating Gradio interface...")
129
+ demo = create_chatbot()
130
+
131
+ print("Starting Gradio server...")
132
+ demo.launch(
133
+ share=False, # Set to True if you want a public link
134
+ server_name="0.0.0.0", # Allow external connections
135
+ server_port=7860,
136
+ show_error=True
137
+ )