Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
remove warm-up
Browse files- RAG/bedrock_agent.py +10 -10
- pages/AI_Shopping_Assistant.py +1 -1
RAG/bedrock_agent.py
CHANGED
|
@@ -54,8 +54,8 @@ def query_(inputs):
|
|
| 54 |
)
|
| 55 |
|
| 56 |
logger.info(pprint.pprint(agentResponse))
|
| 57 |
-
print("***agent*****response*********")
|
| 58 |
-
print(agentResponse)
|
| 59 |
event_stream = agentResponse['completion']
|
| 60 |
total_context = []
|
| 61 |
last_tool = ""
|
|
@@ -63,11 +63,11 @@ def query_(inputs):
|
|
| 63 |
agent_answer = ""
|
| 64 |
try:
|
| 65 |
for event in event_stream:
|
| 66 |
-
print("***event*********")
|
| 67 |
-
print(event)
|
| 68 |
if 'trace' in event:
|
| 69 |
-
print("trace*****total*********")
|
| 70 |
-
print(event['trace'])
|
| 71 |
if('orchestrationTrace' not in event['trace']['trace']):
|
| 72 |
continue
|
| 73 |
orchestration_trace = event['trace']['trace']['orchestrationTrace']
|
|
@@ -80,10 +80,10 @@ def query_(inputs):
|
|
| 80 |
total_context_item['invocationInput'] = orchestration_trace['invocationInput']['actionGroupInvocationInput']
|
| 81 |
last_tool_name = total_context_item['invocationInput']['function']
|
| 82 |
if('observation' in orchestration_trace):
|
| 83 |
-
print("trace****observation******")
|
| 84 |
total_context_item['observation'] = event['trace']['trace']['orchestrationTrace']['observation']
|
| 85 |
tool_output_last_obs = event['trace']['trace']['orchestrationTrace']['observation']
|
| 86 |
-
print(tool_output_last_obs)
|
| 87 |
if(tool_output_last_obs['type'] == 'ACTION_GROUP'):
|
| 88 |
last_tool = tool_output_last_obs['actionGroupInvocationOutput']['text']
|
| 89 |
if(tool_output_last_obs['type'] == 'FINISH'):
|
|
@@ -92,8 +92,8 @@ def query_(inputs):
|
|
| 92 |
total_context_item['thinking'] = orchestration_trace['modelInvocationOutput']['rawResponse']
|
| 93 |
if(total_context_item!={}):
|
| 94 |
total_context.append(total_context_item)
|
| 95 |
-
print("total_context------")
|
| 96 |
-
print(total_context)
|
| 97 |
except botocore.exceptions.EventStreamError as error:
|
| 98 |
raise error
|
| 99 |
|
|
|
|
| 54 |
)
|
| 55 |
|
| 56 |
logger.info(pprint.pprint(agentResponse))
|
| 57 |
+
#print("***agent*****response*********")
|
| 58 |
+
#print(agentResponse)
|
| 59 |
event_stream = agentResponse['completion']
|
| 60 |
total_context = []
|
| 61 |
last_tool = ""
|
|
|
|
| 63 |
agent_answer = ""
|
| 64 |
try:
|
| 65 |
for event in event_stream:
|
| 66 |
+
#print("***event*********")
|
| 67 |
+
#print(event)
|
| 68 |
if 'trace' in event:
|
| 69 |
+
#print("trace*****total*********")
|
| 70 |
+
#print(event['trace'])
|
| 71 |
if('orchestrationTrace' not in event['trace']['trace']):
|
| 72 |
continue
|
| 73 |
orchestration_trace = event['trace']['trace']['orchestrationTrace']
|
|
|
|
| 80 |
total_context_item['invocationInput'] = orchestration_trace['invocationInput']['actionGroupInvocationInput']
|
| 81 |
last_tool_name = total_context_item['invocationInput']['function']
|
| 82 |
if('observation' in orchestration_trace):
|
| 83 |
+
#print("trace****observation******")
|
| 84 |
total_context_item['observation'] = event['trace']['trace']['orchestrationTrace']['observation']
|
| 85 |
tool_output_last_obs = event['trace']['trace']['orchestrationTrace']['observation']
|
| 86 |
+
#print(tool_output_last_obs)
|
| 87 |
if(tool_output_last_obs['type'] == 'ACTION_GROUP'):
|
| 88 |
last_tool = tool_output_last_obs['actionGroupInvocationOutput']['text']
|
| 89 |
if(tool_output_last_obs['type'] == 'FINISH'):
|
|
|
|
| 92 |
total_context_item['thinking'] = orchestration_trace['modelInvocationOutput']['rawResponse']
|
| 93 |
if(total_context_item!={}):
|
| 94 |
total_context.append(total_context_item)
|
| 95 |
+
#print("total_context------")
|
| 96 |
+
#print(total_context)
|
| 97 |
except botocore.exceptions.EventStreamError as error:
|
| 98 |
raise error
|
| 99 |
|
pages/AI_Shopping_Assistant.py
CHANGED
|
@@ -152,7 +152,7 @@ def handle_input():
|
|
| 152 |
'id': len(st.session_state.questions__)
|
| 153 |
}
|
| 154 |
st.session_state.questions__.append(question_with_id)
|
| 155 |
-
print(inputs)
|
| 156 |
out_ = bedrock_agent.query_(inputs)
|
| 157 |
st.session_state.answers__.append({
|
| 158 |
'answer': out_['text'],
|
|
|
|
| 152 |
'id': len(st.session_state.questions__)
|
| 153 |
}
|
| 154 |
st.session_state.questions__.append(question_with_id)
|
| 155 |
+
#print(inputs)
|
| 156 |
out_ = bedrock_agent.query_(inputs)
|
| 157 |
st.session_state.answers__.append({
|
| 158 |
'answer': out_['text'],
|