import sys import json import autogen from autogen import config_list_from_json # Function to run the query def run_query(problem, api_key, max_round): config_list = [ { 'model': 'gpt-3.5-turbo', 'api_key': api_key, }, ] llm_config = {"config_list": config_list, "seed": 42, "request_timeout": 600} autogen.ChatCompletion.start_logging() # Create user proxy agent, coder, product manage user_proxy = autogen.UserProxyAgent( name="User_proxy", system_message="A human user. Interact with the planner to discuss the logline, book concept and structure. The book writing needs to be approved by the user.", code_execution_config={"last_n_messages": 3, "work_dir": "groupchat"}, human_input_mode="NEVER", # is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE") ) user = autogen.UserProxyAgent( name="User", human_input_mode="NEVER", code_execution_config={"use_docker": False}, ) author = autogen.AssistantAgent( name="Author", system_message="Author. You follow an approved plan. You write book chapters according to the plan. The user can't modify your content directly. So do not suggest incomplete chapters which requires others to modify. Don't include multiple chapters in one response. Do not ask others to copy and paste the content. Suggest the full content instead of partial content or content changes. If the content is not up to mark, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.", llm_config=llm_config, ) planner = autogen.AssistantAgent( name="Planner", system_message="Planner. Suggest a plan. Revise the plan based on feedback from user and critic", llm_config=llm_config, ) editor = autogen.AssistantAgent( name="Editor", system_message="Editor. Review the content written by the book writer and provide feedback.", llm_config=llm_config, ) critic = autogen.AssistantAgent( name="Critic", system_message="Critic. Double check plan, claims and content. Provide feedback on the content and the plan.", llm_config=llm_config, ) # Create groupchat groupchat = autogen.GroupChat( agents=[user, user_proxy, author, planner, editor, critic], messages=[], max_round=max_round) manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config) return user_proxy.initiate_chat(manager, message=problem) if __name__ == "__main__": input_data = json.loads(sys.stdin.read()) problem = input_data['problem'] api_key = input_data['api_key'] max_round = input_data['max_round'] result = run_query(problem, api_key, max_round) print(result)