Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- Dockerfile +3 -2
- app.py +39 -13
Dockerfile
CHANGED
|
@@ -27,10 +27,11 @@ RUN echo "nameserver 8.8.8.8" > /etc/resolv.conf && \
|
|
| 27 |
# Copy requirements first to leverage Docker cache
|
| 28 |
COPY requirements.txt .
|
| 29 |
|
| 30 |
-
# Install Python dependencies with retry mechanism
|
| 31 |
RUN pip install --no-cache-dir -r requirements.txt || \
|
| 32 |
(sleep 5 && pip install --no-cache-dir -r requirements.txt) || \
|
| 33 |
-
(sleep 10 && pip install --no-cache-dir -r requirements.txt)
|
|
|
|
| 34 |
|
| 35 |
# Copy application code
|
| 36 |
COPY . .
|
|
|
|
| 27 |
# Copy requirements first to leverage Docker cache
|
| 28 |
COPY requirements.txt .
|
| 29 |
|
| 30 |
+
# Install Python dependencies with retry mechanism and explicit Gradio upgrade
|
| 31 |
RUN pip install --no-cache-dir -r requirements.txt || \
|
| 32 |
(sleep 5 && pip install --no-cache-dir -r requirements.txt) || \
|
| 33 |
+
(sleep 10 && pip install --no-cache-dir -r requirements.txt) && \
|
| 34 |
+
pip install --no-cache-dir gradio==4.44.1
|
| 35 |
|
| 36 |
# Copy application code
|
| 37 |
COPY . .
|
app.py
CHANGED
|
@@ -379,26 +379,52 @@ class VentureUI:
|
|
| 379 |
4. Get insights and recommendations
|
| 380 |
""")
|
| 381 |
|
| 382 |
-
chatbot = gr.Chatbot(label="Chat History")
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
|
|
|
|
|
|
|
|
|
| 389 |
|
| 390 |
-
def respond(message, history):
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 396 |
|
| 397 |
msg.submit(
|
| 398 |
respond,
|
| 399 |
[msg, chatbot],
|
| 400 |
[msg, chatbot],
|
| 401 |
queue=False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 402 |
)
|
| 403 |
|
| 404 |
return interface
|
|
|
|
| 379 |
4. Get insights and recommendations
|
| 380 |
""")
|
| 381 |
|
| 382 |
+
chatbot = gr.Chatbot(label="Chat History", height=400)
|
| 383 |
+
with gr.Row():
|
| 384 |
+
msg = gr.Textbox(
|
| 385 |
+
label="Message",
|
| 386 |
+
placeholder="Chat with the Agentic System...",
|
| 387 |
+
lines=2,
|
| 388 |
+
scale=9
|
| 389 |
+
)
|
| 390 |
+
submit = gr.Button("Send", scale=1)
|
| 391 |
+
clear = gr.ClearButton([msg, chatbot], value="Clear")
|
| 392 |
|
| 393 |
+
async def respond(message, history):
|
| 394 |
+
try:
|
| 395 |
+
# Convert history to the format expected by process_message
|
| 396 |
+
history_list = [[x, y] for x, y in history] if history else []
|
| 397 |
+
response, _ = await self.app(message, history_list)
|
| 398 |
+
history.append((message, response))
|
| 399 |
+
return "", history
|
| 400 |
+
except Exception as e:
|
| 401 |
+
logger.error(f"Error in chat response: {str(e)}")
|
| 402 |
+
error_msg = "I apologize, but I encountered an error. Please try again."
|
| 403 |
+
history.append((message, error_msg))
|
| 404 |
+
return "", history
|
| 405 |
+
|
| 406 |
+
submit.click(
|
| 407 |
+
respond,
|
| 408 |
+
[msg, chatbot],
|
| 409 |
+
[msg, chatbot],
|
| 410 |
+
queue=False
|
| 411 |
+
).then(
|
| 412 |
+
lambda: gr.update(interactive=True),
|
| 413 |
+
None,
|
| 414 |
+
[submit],
|
| 415 |
+
queue=False
|
| 416 |
+
)
|
| 417 |
|
| 418 |
msg.submit(
|
| 419 |
respond,
|
| 420 |
[msg, chatbot],
|
| 421 |
[msg, chatbot],
|
| 422 |
queue=False
|
| 423 |
+
).then(
|
| 424 |
+
lambda: gr.update(interactive=True),
|
| 425 |
+
None,
|
| 426 |
+
[submit],
|
| 427 |
+
queue=False
|
| 428 |
)
|
| 429 |
|
| 430 |
return interface
|