NeoPy commited on
Commit
7daa048
·
verified ·
1 Parent(s): 0049670

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -23
app.py CHANGED
@@ -1,33 +1,108 @@
1
  import gradio as gr
2
  from gradio import ChatMessage
3
- from transformers import load_tool, ReactCodeAgent, HfEngine
4
- from utils import stream_from_transformers_agent
 
5
 
6
- # Import tool from Hub
7
- image_generation_tool = load_tool("m-ric/text-to-image")
8
 
 
9
 
10
- llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
11
- # Initialize the agent with both tools
12
- agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
13
-
14
-
15
- def interact_with_agent(prompt, messages):
16
- messages.append(ChatMessage(role="user", content=prompt))
17
- yield messages
18
- for msg in stream_from_transformers_agent(agent, prompt):
19
- messages.append(msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  yield messages
21
- yield messages
22
 
23
 
24
  with gr.Blocks() as demo:
25
- chatbot = gr.Chatbot(label="Agent",
26
- msg_format="messages",
27
- avatar_images=(None, "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png"))
28
- text_input = gr.Textbox(lines=1, label="Chat Message")
29
- text_input.submit(interact_with_agent, [text_input, chatbot], [chatbot])
30
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- if __name__ == "__main__":
33
- demo.launch()
 
1
  import gradio as gr
2
  from gradio import ChatMessage
3
+ from typing import Iterator
4
+ import google.generativeai as genai
5
+ import os
6
 
7
+ genai.configure(api_key=os.environ.get("GEMINI_API_KEY"))
 
8
 
9
+ model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219")
10
 
11
+ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
12
+ """
13
+ Streams both thoughts and responses from the Gemini model.
14
+ """
15
+ # Initialize response from Gemini
16
+ response = model.generate_content(user_message, stream=True)
17
+
18
+ # Initialize buffers
19
+ thought_buffer = ""
20
+ response_buffer = ""
21
+ thinking_complete = False
22
+
23
+ # Add initial thinking message
24
+ messages.append(
25
+ ChatMessage(
26
+ role="assistant",
27
+ content="",
28
+ metadata={"title": "⏳Thinking: *The thoughts produced by the Gemini2.0 Flash model are experimental"}
29
+ )
30
+ )
31
+
32
+ for chunk in response:
33
+ parts = chunk.candidates[0].content.parts
34
+ current_chunk = parts[0].text
35
+
36
+ if len(parts) == 2 and not thinking_complete:
37
+ # Complete thought and start response
38
+ thought_buffer += current_chunk
39
+ messages[-1] = ChatMessage(
40
+ role="assistant",
41
+ content=thought_buffer,
42
+ metadata={"title": "⏳Thinking: *The thoughts produced by the Gemini2.0 Flash model are experimental"}
43
+ )
44
+
45
+ # Add response message
46
+ messages.append(
47
+ ChatMessage(
48
+ role="assistant",
49
+ content=parts[1].text
50
+ )
51
+ )
52
+ thinking_complete = True
53
+
54
+ elif thinking_complete:
55
+ # Continue streaming response
56
+ response_buffer += current_chunk
57
+ messages[-1] = ChatMessage(
58
+ role="assistant",
59
+ content=response_buffer
60
+ )
61
+
62
+ else:
63
+ # Continue streaming thoughts
64
+ thought_buffer += current_chunk
65
+ messages[-1] = ChatMessage(
66
+ role="assistant",
67
+ content=thought_buffer,
68
+ metadata={"title": "⏳Thinking: *The thoughts produced by the Gemini2.0 Flash model are experimental"}
69
+ )
70
+
71
  yield messages
 
72
 
73
 
74
  with gr.Blocks() as demo:
75
+ gr.Markdown("# Chat with Gemini 2.0 Flash and See its Thoughts 💭")
76
+
77
+ chatbot = gr.Chatbot(
78
+ type="messages",
79
+ label="Gemini2.0 'Thinking' Chatbot",
80
+ render_markdown=True,
81
+ )
82
+
83
+ input_box = gr.Textbox(
84
+ lines=1,
85
+ label="Chat Message",
86
+ placeholder="Type your message here and press Enter..."
87
+ )
88
+
89
+ # Set up event handlers
90
+ msg_store = gr.State("") # Store for preserving user message
91
+
92
+ input_box.submit(
93
+ lambda msg: (msg, msg, ""), # Store message and clear input
94
+ inputs=[input_box],
95
+ outputs=[msg_store, input_box, input_box],
96
+ queue=False
97
+ ).then(
98
+ user_message, # Add user message to chat
99
+ inputs=[msg_store, chatbot],
100
+ outputs=[input_box, chatbot],
101
+ queue=False
102
+ ).then(
103
+ stream_gemini_response, # Generate and stream response
104
+ inputs=[msg_store, chatbot],
105
+ outputs=chatbot
106
+ )
107
 
108
+ demo.launch()