luanpoppe
fix do Model + refactor pasta de utils
408e821
raw
history blame
2.84 kB
import os
from _utils.utils import create_prompt_llm_chain, create_retriever, getPDF, create_llm, create_prompt_llm_chain_summary, process_embedding_summary
from _utils import utils
from langchain.chains import create_retrieval_chain
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain.chains.summarize import load_summarize_chain
os.environ.get("OPENAI_API_KEY")
def get_llm_answer(system_prompt, user_prompt, pdf_url, model, embedding):
if embedding == "gpt":
embedding_object = OpenAIEmbeddings()
else:
embedding_object = HuggingFaceEmbeddings(model_name=embedding)
vectorstore = Chroma(
collection_name="documents",
embedding_function=embedding_object
)
print('model: ', model)
print('embedding: ', embedding)
pages = []
if pdf_url:
pages = getPDF(pdf_url)
else:
pages = getPDF()
retriever = create_retriever(pages, vectorstore)
rag_chain = create_retrieval_chain(retriever, create_prompt_llm_chain(system_prompt, model))
results = rag_chain.invoke({"input": user_prompt})
# print('allIds ARQUIVO MAIN: ', utils.allIds)
vectorstore.delete( utils.allIds)
vectorstore.delete_collection()
utils.allIds = []
# print('utils.allIds: ', utils.allIds)
return results
def get_llm_answer_summary(system_prompt, user_prompt, pdf_url, model, isIterativeRefinement):
print('model: ', model)
print('isIterativeRefinement: ', isIterativeRefinement)
print('\n\n\n')
pages = getPDF(pdf_url)
if not isIterativeRefinement:
rag_chain = create_prompt_llm_chain_summary(system_prompt, model)
results = rag_chain.invoke({"input": user_prompt, "context": pages})
return results
else:
chain = load_summarize_chain(create_llm(model), "refine", True)
result = chain.invoke({"input_documents": pages})
print('result: ', result)
return result
# Obs --> Para passar informações personalizadas --> chain = load_summarize_chain(llm, "refine", True, question_prompt=initial_prompt, refine_prompt=PromptTemplate.from_template(refine_prompt))
# Para ver mais opções --> Acessa a origem da função load_summarize_chain , e nela acessa a origem da função _load_refine_chain --> As opções são os parâmetros que esta última função recebe
def get_llm_answer_summary_with_embedding(system_prompt, user_prompt, pdf_url, model, isIterativeRefinement):
print('model: ', model)
print('isIterativeRefinement: ', isIterativeRefinement)
print('\n\n\n')
pages = getPDF(pdf_url)
full_texto = ""
for p in pages:
full_texto += p.page_content
print('full_texto: ', full_texto)
rag_chain = process_embedding_summary(system_prompt, model)
results = rag_chain.invoke({"input": user_prompt, "context": pages})
return results