Spaces:
Sleeping
Sleeping
from llama_index.core import SimpleDirectoryReader, get_response_synthesizer | |
from llama_index.core import DocumentSummaryIndex | |
from llama_index.llms.openai import OpenAI | |
from llama_index.core import load_index_from_storage | |
from llama_index.core import StorageContext, ServiceContext | |
from llama_index.core.tools import QueryEngineTool, ToolMetadata | |
from llama_index.vector_stores.faiss import FaissVectorStore | |
from llama_index.core.query_engine import SubQuestionQueryEngine | |
from llama_index.agent.openai import OpenAIAgent | |
import openai | |
import faiss | |
import os | |
import nest_asyncio | |
nest_asyncio.apply() | |
# os.environ["OPENAI_API_KEY"] = "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" | |
vector_store = FaissVectorStore.from_persist_dir("./storage") | |
storage_context = StorageContext.from_defaults(vector_store=vector_store,persist_dir="./storage") | |
index = load_index_from_storage(storage_context) | |
individual_query_engine_tools = [ | |
QueryEngineTool( | |
query_engine=index.as_query_engine(), | |
metadata=ToolMetadata( | |
name=f"timetable", | |
description=f"useful for when you want to answer queries", | |
), | |
) | |
] | |
query_engine = SubQuestionQueryEngine.from_defaults( | |
query_engine_tools=individual_query_engine_tools, | |
llm=OpenAI(model="gpt-3.5-turbo"), | |
) | |
query_engine_tool = QueryEngineTool( | |
query_engine=query_engine, | |
metadata=ToolMetadata( | |
name="sub_question_query_engine", | |
description="useful for when you want to answer queries", | |
), | |
) | |
tools = individual_query_engine_tools + [query_engine_tool] | |
agent = OpenAIAgent.from_tools(tools) | |
# while True: | |
# text_input = input("User: ") | |
# if text_input == "exit": | |
# break | |
# response = agent.chat(text_input) | |
# print(f"Agent: {response}") |