File size: 4,290 Bytes
7500954
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import gradio as gr
import torch
from transformers import pipeline

def initialize_model():
    """Initialize the text generation pipeline with device detection"""
    # Check if CUDA is available, otherwise fall back to CPU
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"Using device: {device}")
    
    try:
        generator = pipeline(
            "text-generation", 
            model="akhaliq/gemma-3-270m-gradio-coder", 
            device=device
        )
        return generator
    except Exception as e:
        print(f"Error loading model: {e}")
        # Fallback to CPU if CUDA fails
        if device == "cuda":
            print("Falling back to CPU...")
            generator = pipeline(
                "text-generation", 
                model="akhaliq/gemma-3-270m-gradio-coder", 
                device="cpu"
            )
            return generator
        raise e

# Initialize the model globally
print("Loading model...")
generator = initialize_model()
print("Model loaded successfully!")

def chat_response(message, history):
    """Generate response for the chatbot"""
    try:
        # Format the message for the model
        input_message = [{"role": "user", "content": message}]
        
        # Generate response
        output = generator(
            input_message, 
            max_new_tokens=128, 
            return_full_text=False,
            do_sample=True,
            temperature=0.7,
            pad_token_id=generator.tokenizer.eos_token_id
        )[0]
        
        response = output["generated_text"]
        return response
        
    except Exception as e:
        return f"Sorry, I encountered an error: {str(e)}"

# Create the Gradio interface
def create_chatbot():
    """Create and launch the Gradio chatbot interface"""
    
    # Custom CSS for better styling
    css = """
    .gradio-container {
        max-width: 800px !important;
        margin: auto !important;
    }
    .chat-message {
        padding: 10px !important;
        margin: 5px !important;
        border-radius: 10px !important;
    }
    """
    
    # Create the chatbot interface
    with gr.Blocks(css=css, title="AI Chatbot") as demo:
        gr.Markdown("# 🤖 AI Chatbot")
        gr.Markdown("*Powered by Gemma-3-270m model via Transformers*")
        
        chatbot = gr.Chatbot(
            height=500,
            bubble_full_width=False,
            show_label=False
        )
        
        with gr.Row():
            msg = gr.Textbox(
                placeholder="Type your message here...",
                show_label=False,
                scale=4
            )
            send_btn = gr.Button("Send", scale=1, variant="primary")
            clear_btn = gr.Button("Clear", scale=1)
        
        # Example questions
        gr.Examples(
            examples=[
                "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?",
                "What's the most important lesson you've learned in life?",
                "How do you think AI will change the world in the next 10 years?",
                "What would you do if you had unlimited resources for one day?"
            ],
            inputs=msg
        )
        
        def respond(message, chat_history):
            if not message.strip():
                return chat_history, ""
            
            # Get bot response
            bot_message = chat_response(message, chat_history)
            
            # Add to chat history
            chat_history.append((message, bot_message))
            return chat_history, ""
        
        def clear_chat():
            return [], ""
        
        # Event handlers
        msg.submit(respond, [msg, chatbot], [chatbot, msg])
        send_btn.click(respond, [msg, chatbot], [chatbot, msg])
        clear_btn.click(clear_chat, None, [chatbot, msg])
    
    return demo

if __name__ == "__main__":
    print("Creating Gradio interface...")
    demo = create_chatbot()
    
    print("Starting Gradio server...")
    demo.launch(
        share=False,  # Set to True if you want a public link
        server_name="0.0.0.0",  # Allow external connections
        server_port=7860,
        show_error=True
    )