Upload folder using huggingface_hub
Browse files
app.py
CHANGED
@@ -36,6 +36,15 @@ model_name = (
|
|
36 |
else "http://localhost:1234/v1"
|
37 |
)
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
ADDITIONAL_TOOLS = [
|
40 |
DuckDuckGoSearchTool(),
|
41 |
VisitWebpageTool(),
|
@@ -52,7 +61,10 @@ TASK_SOLVING_TOOLBOX = DEFAULT_TASK_SOLVING_TOOLBOX + ADDITIONAL_TOOLS
|
|
52 |
system_prompt = DEFAULT_SQUAD_REACT_CODE_SYSTEM_PROMPT
|
53 |
|
54 |
agent = get_agent(
|
55 |
-
model_name=model_name,
|
|
|
|
|
|
|
56 |
)
|
57 |
|
58 |
app = None
|
@@ -81,14 +93,18 @@ def interact_with_agent(messages, request: Request):
|
|
81 |
session_hash = request.session_hash
|
82 |
prompt = messages[-1]["content"]
|
83 |
agent.logs = sessions.get(session_hash + "_logs", [])
|
84 |
-
yield messages, gr.update(
|
|
|
|
|
85 |
for msg in stream_from_transformers_agent(agent, prompt):
|
86 |
if isinstance(msg, ChatMessage):
|
87 |
messages.append(msg)
|
88 |
-
yield messages, gr.update(visible
|
89 |
else:
|
90 |
-
yield messages, gr.update(
|
91 |
-
|
|
|
|
|
92 |
|
93 |
|
94 |
def persist(component):
|
@@ -114,9 +130,15 @@ def persist(component):
|
|
114 |
return component
|
115 |
|
116 |
|
117 |
-
with gr.Blocks(
|
|
|
|
|
|
|
|
|
118 |
state = gr.State()
|
119 |
-
inner_monologue_component = gr.Markdown(
|
|
|
|
|
120 |
chatbot = persist(
|
121 |
gr.Chatbot(
|
122 |
value=[],
|
@@ -149,7 +171,9 @@ with gr.Blocks(fill_height=True, css=".gradio-container .message .content {text-
|
|
149 |
)
|
150 |
text_input = gr.Textbox(lines=1, label="Chat Message", scale=0)
|
151 |
chat_msg = text_input.submit(add_message, [text_input, chatbot], [chatbot])
|
152 |
-
bot_msg = chat_msg.then(
|
|
|
|
|
153 |
text_input.submit(lambda: "", None, text_input)
|
154 |
chatbot.example_select(append_example_message, [chatbot], [chatbot]).then(
|
155 |
interact_with_agent, [chatbot], [chatbot, inner_monologue_component]
|
|
|
36 |
else "http://localhost:1234/v1"
|
37 |
)
|
38 |
|
39 |
+
image_qa_tool = ImageQuestionAnsweringTool()
|
40 |
+
image_qa_tool.inputs = {
|
41 |
+
"image": {
|
42 |
+
"type": "image",
|
43 |
+
"description": "The image containing the information. It must be a PIL Image.",
|
44 |
+
},
|
45 |
+
"question": {"type": "string", "description": "The question in English"},
|
46 |
+
}
|
47 |
+
|
48 |
ADDITIONAL_TOOLS = [
|
49 |
DuckDuckGoSearchTool(),
|
50 |
VisitWebpageTool(),
|
|
|
61 |
system_prompt = DEFAULT_SQUAD_REACT_CODE_SYSTEM_PROMPT
|
62 |
|
63 |
agent = get_agent(
|
64 |
+
model_name=model_name,
|
65 |
+
toolbox=TASK_SOLVING_TOOLBOX,
|
66 |
+
system_prompt=system_prompt,
|
67 |
+
use_openai=True,
|
68 |
)
|
69 |
|
70 |
app = None
|
|
|
93 |
session_hash = request.session_hash
|
94 |
prompt = messages[-1]["content"]
|
95 |
agent.logs = sessions.get(session_hash + "_logs", [])
|
96 |
+
yield messages, gr.update(
|
97 |
+
value="<center><h1>Thinking...</h1></center>", visible=True
|
98 |
+
)
|
99 |
for msg in stream_from_transformers_agent(agent, prompt):
|
100 |
if isinstance(msg, ChatMessage):
|
101 |
messages.append(msg)
|
102 |
+
yield messages, gr.update(visible=True)
|
103 |
else:
|
104 |
+
yield messages, gr.update(
|
105 |
+
value=f"<center><h1>{msg}</h1></center>", visible=True
|
106 |
+
)
|
107 |
+
yield messages, gr.update(value="<center><h1>Idle</h1></center>", visible=False)
|
108 |
|
109 |
|
110 |
def persist(component):
|
|
|
130 |
return component
|
131 |
|
132 |
|
133 |
+
with gr.Blocks(
|
134 |
+
fill_height=True,
|
135 |
+
css=".gradio-container .message .content {text-align: left;}"
|
136 |
+
+ HtmlFormatter().get_style_defs(".highlight"),
|
137 |
+
) as demo:
|
138 |
state = gr.State()
|
139 |
+
inner_monologue_component = gr.Markdown(
|
140 |
+
"""<h2>Inner Monologue</h2>""", visible=False
|
141 |
+
)
|
142 |
chatbot = persist(
|
143 |
gr.Chatbot(
|
144 |
value=[],
|
|
|
171 |
)
|
172 |
text_input = gr.Textbox(lines=1, label="Chat Message", scale=0)
|
173 |
chat_msg = text_input.submit(add_message, [text_input, chatbot], [chatbot])
|
174 |
+
bot_msg = chat_msg.then(
|
175 |
+
interact_with_agent, [chatbot], [chatbot, inner_monologue_component]
|
176 |
+
)
|
177 |
text_input.submit(lambda: "", None, text_input)
|
178 |
chatbot.example_select(append_example_message, [chatbot], [chatbot]).then(
|
179 |
interact_with_agent, [chatbot], [chatbot, inner_monologue_component]
|
utils.py
CHANGED
@@ -52,7 +52,7 @@ def stream_from_transformers_agent(
|
|
52 |
terminal_message = highlight_code_terminal(message)
|
53 |
message = highlight_code_html(message)
|
54 |
if "Observing" in title:
|
55 |
-
message = f"<
|
56 |
print(colored("=== Inner Monologue Message:\n", "blue", attrs=["bold"]), f"{title}\n{terminal_message}")
|
57 |
inner_monologue.content += f"<h2>{title}</h2><p>{message}</p>"
|
58 |
yield title
|
|
|
52 |
terminal_message = highlight_code_terminal(message)
|
53 |
message = highlight_code_html(message)
|
54 |
if "Observing" in title:
|
55 |
+
message = f"<div style='border:1px solid black; background-color: var(--code-background-fill); padding: 10px;'>{message.replace("\n", "<br/>")}</div>"
|
56 |
print(colored("=== Inner Monologue Message:\n", "blue", attrs=["bold"]), f"{title}\n{terminal_message}")
|
57 |
inner_monologue.content += f"<h2>{title}</h2><p>{message}</p>"
|
58 |
yield title
|