Deepak Sahu commited on
Commit
ad716b8
·
1 Parent(s): 97127b4

update runtime

Browse files
Dockerfile CHANGED
@@ -13,12 +13,12 @@ RUN pip install --no-cache-dir -r requirements.txt
13
  COPY . .
14
 
15
  # Install agents
16
- # RUN pip install -e pandas_expression_generator
17
 
18
  # # Make the script executable
19
- # RUN chmod +x start.sh
20
 
21
  # Start both processes
22
- # CMD ["./start.sh"]
23
 
24
 
 
13
  COPY . .
14
 
15
  # Install agents
16
+ RUN pip install -e pandas_expression_generator
17
 
18
  # # Make the script executable
19
+ RUN chmod +x start.sh
20
 
21
  # Start both processes
22
+ CMD ["./start.sh"]
23
 
24
 
pandas_expression_generator/src/pandas_expression_generator/pandas_expression_generator_function.py CHANGED
@@ -81,7 +81,7 @@ async def pandas_expression_generator_function(
81
 
82
 
83
  # Implement your function logic here
84
- async def _response_fn(input_query: FunctionInput) -> str:
85
 
86
  # Create LLM
87
  llm_ = await builder.get_llm(config.llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -96,12 +96,7 @@ async def pandas_expression_generator_function(
96
  {df_meta}
97
 
98
  Generate python expression to solve the query:
99
- current_step: {input_query.current_step}
100
-
101
- You can reformulate(extend or reduce) the current step based on the following information
102
- original_user_query: {input_query.original_user_query}
103
- past_step: {input_query.thought}
104
- next_step: {input_query.next_step}
105
 
106
  You must generate list of python expressions output format must as follows:
107
 
@@ -132,8 +127,20 @@ async def pandas_expression_generator_function(
132
  logger.info("[Following Should just be structured list of string]")
133
  logger.info(ai_message)
134
  structured_text_str = extract_bracket_content(ai_message.content)
135
- output_message = str(expression_executor(structured_text_str, df))[:250]
136
  #
 
 
 
 
 
 
 
 
 
 
 
 
137
  # Loop break
138
  except Exception as e:
139
  logger.error(str(e))
 
81
 
82
 
83
  # Implement your function logic here
84
+ async def _response_fn(input_query: str) -> str:
85
 
86
  # Create LLM
87
  llm_ = await builder.get_llm(config.llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
 
96
  {df_meta}
97
 
98
  Generate python expression to solve the query:
99
+ {input_query}
 
 
 
 
 
100
 
101
  You must generate list of python expressions output format must as follows:
102
 
 
127
  logger.info("[Following Should just be structured list of string]")
128
  logger.info(ai_message)
129
  structured_text_str = extract_bracket_content(ai_message.content)
130
+ output_message = str(expression_executor(structured_text_str, df))[:800]
131
  #
132
+ prompt_ = f'''You are part of big system. Your task is to respond the user in conversational way.
133
+
134
+ This was the query of the end users:
135
+ {input_query}
136
+
137
+ The following the context data that you can use to respond to the user.
138
+ {output_message}
139
+
140
+ You may or may not use the context data to respond to the end user. Your output must be in markdown. Do not fence your output.
141
+ '''
142
+ ai_message: AIMessage = await llm_.ainvoke(prompt_)
143
+ return str(ai_message.content)
144
  # Loop break
145
  except Exception as e:
146
  logger.error(str(e))