Spaces:
Running
Running
Merge branch 'main' of https://huggingface.co/spaces/luanpoppe/vella-backend-tests into tests
Browse files
_utils/gerar_relatorio_modelo_usuario/EnhancedDocumentSummarizer.py
CHANGED
|
@@ -36,7 +36,7 @@ class EnhancedDocumentSummarizer(DocumentSummarizer):
|
|
| 36 |
chunk_overlap,
|
| 37 |
num_k_rerank,
|
| 38 |
model_cohere_rerank,
|
| 39 |
-
prompt_auxiliar,
|
| 40 |
gpt_model,
|
| 41 |
gpt_temperature,
|
| 42 |
# id_modelo_do_usuario,
|
|
@@ -54,7 +54,7 @@ class EnhancedDocumentSummarizer(DocumentSummarizer):
|
|
| 54 |
)
|
| 55 |
self.config = config
|
| 56 |
self.logger = logging.getLogger(__name__)
|
| 57 |
-
self.prompt_auxiliar = prompt_auxiliar
|
| 58 |
self.gpt_model = gpt_model
|
| 59 |
self.gpt_temperature = gpt_temperature
|
| 60 |
self.prompt_gerar_documento = prompt_gerar_documento
|
|
@@ -164,35 +164,33 @@ class EnhancedDocumentSummarizer(DocumentSummarizer):
|
|
| 164 |
llm_instance = LLM()
|
| 165 |
llm = llm_instance.deepseek()
|
| 166 |
|
| 167 |
-
prompt_auxiliar = PromptTemplate(
|
| 168 |
-
|
| 169 |
-
)
|
| 170 |
|
| 171 |
-
resumo_auxiliar_do_documento = llm.invoke(
|
| 172 |
-
|
| 173 |
-
)
|
| 174 |
|
| 175 |
-
self.resumo_gerado = cast(str, resumo_auxiliar_do_documento.content)
|
| 176 |
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
|
| 192 |
# Split the response into paragraphs
|
| 193 |
-
summaries = [
|
| 194 |
-
p.strip() for p in self.resumo_gerado.split("\n\n") if p.strip()
|
| 195 |
-
]
|
| 196 |
|
| 197 |
# Create structured output
|
| 198 |
structured_output = []
|
|
|
|
| 36 |
chunk_overlap,
|
| 37 |
num_k_rerank,
|
| 38 |
model_cohere_rerank,
|
| 39 |
+
# prompt_auxiliar,
|
| 40 |
gpt_model,
|
| 41 |
gpt_temperature,
|
| 42 |
# id_modelo_do_usuario,
|
|
|
|
| 54 |
)
|
| 55 |
self.config = config
|
| 56 |
self.logger = logging.getLogger(__name__)
|
| 57 |
+
# self.prompt_auxiliar = prompt_auxiliar
|
| 58 |
self.gpt_model = gpt_model
|
| 59 |
self.gpt_temperature = gpt_temperature
|
| 60 |
self.prompt_gerar_documento = prompt_gerar_documento
|
|
|
|
| 164 |
llm_instance = LLM()
|
| 165 |
llm = llm_instance.deepseek()
|
| 166 |
|
| 167 |
+
# prompt_auxiliar = PromptTemplate(
|
| 168 |
+
# template=self.prompt_auxiliar, input_variables=["context"]
|
| 169 |
+
# )
|
| 170 |
|
| 171 |
+
# resumo_auxiliar_do_documento = llm.invoke(
|
| 172 |
+
# prompt_auxiliar.format(context="\n\n".join(contexts))
|
| 173 |
+
# )
|
| 174 |
|
| 175 |
+
# self.resumo_gerado = cast(str, resumo_auxiliar_do_documento.content)
|
| 176 |
|
| 177 |
+
prompt_gerar_documento = PromptTemplate(
|
| 178 |
+
template=self.prompt_gerar_documento,
|
| 179 |
+
input_variables=["context"],
|
| 180 |
+
)
|
| 181 |
|
| 182 |
+
documento_gerado = cast(
|
| 183 |
+
str,
|
| 184 |
+
llm.invoke(
|
| 185 |
+
prompt_gerar_documento.format(
|
| 186 |
+
context="\n\n".join(contexts),
|
| 187 |
+
# modelo_usuario=serializer.data["modelo"],
|
| 188 |
+
)
|
| 189 |
+
).content,
|
| 190 |
+
)
|
| 191 |
|
| 192 |
# Split the response into paragraphs
|
| 193 |
+
summaries = [p.strip() for p in documento_gerado.split("\n\n") if p.strip()]
|
|
|
|
|
|
|
| 194 |
|
| 195 |
# Create structured output
|
| 196 |
structured_output = []
|
_utils/gerar_relatorio_modelo_usuario/utils.py
CHANGED
|
@@ -28,7 +28,7 @@ def gerar_resposta_compilada(serializer):
|
|
| 28 |
"hf_embedding": serializer["hf_embedding"],
|
| 29 |
"chunk_size": serializer["chunk_size"],
|
| 30 |
"chunk_overlap": serializer["chunk_overlap"],
|
| 31 |
-
"prompt_auxiliar": serializer["prompt_auxiliar"],
|
| 32 |
"prompt_gerar_documento": serializer["prompt_gerar_documento"],
|
| 33 |
}
|
| 34 |
|
|
|
|
| 28 |
"hf_embedding": serializer["hf_embedding"],
|
| 29 |
"chunk_size": serializer["chunk_size"],
|
| 30 |
"chunk_overlap": serializer["chunk_overlap"],
|
| 31 |
+
# "prompt_auxiliar": serializer["prompt_auxiliar"],
|
| 32 |
"prompt_gerar_documento": serializer["prompt_gerar_documento"],
|
| 33 |
}
|
| 34 |
|
_utils/resumo_completo_cursor.py
CHANGED
|
@@ -66,7 +66,7 @@ async def get_llm_summary_answer_by_cursor_complete(
|
|
| 66 |
chunk_size=serializer["chunk_size"],
|
| 67 |
num_k_rerank=serializer["num_k_rerank"],
|
| 68 |
model_cohere_rerank=serializer["model_cohere_rerank"],
|
| 69 |
-
prompt_auxiliar=serializer["prompt_auxiliar"],
|
| 70 |
gpt_model=serializer["model"],
|
| 71 |
gpt_temperature=serializer["gpt_temperature"],
|
| 72 |
prompt_gerar_documento=serializer["prompt_gerar_documento"],
|
|
|
|
| 66 |
chunk_size=serializer["chunk_size"],
|
| 67 |
num_k_rerank=serializer["num_k_rerank"],
|
| 68 |
model_cohere_rerank=serializer["model_cohere_rerank"],
|
| 69 |
+
# prompt_auxiliar=serializer["prompt_auxiliar"],
|
| 70 |
gpt_model=serializer["model"],
|
| 71 |
gpt_temperature=serializer["gpt_temperature"],
|
| 72 |
prompt_gerar_documento=serializer["prompt_gerar_documento"],
|
gerar_documento/views.py
CHANGED
|
@@ -33,9 +33,9 @@ class GerarDocumentoView(AsyncAPIView):
|
|
| 33 |
data = cast(Dict[str, Any], serializer.validated_data)
|
| 34 |
print("\n\ndata: ", data)
|
| 35 |
|
| 36 |
-
data["prompt_auxiliar"] = (
|
| 37 |
-
|
| 38 |
-
)
|
| 39 |
|
| 40 |
# listaPDFs = handle_pdf_files_from_serializer(data["files"])
|
| 41 |
listaPDFs = [l["link_arquivo"] for l in data["files"]]
|
|
|
|
| 33 |
data = cast(Dict[str, Any], serializer.validated_data)
|
| 34 |
print("\n\ndata: ", data)
|
| 35 |
|
| 36 |
+
# data["prompt_auxiliar"] = (
|
| 37 |
+
# prompt_auxiliar_inicio + "\n" + data["prompt_auxiliar"]
|
| 38 |
+
# )
|
| 39 |
|
| 40 |
# listaPDFs = handle_pdf_files_from_serializer(data["files"])
|
| 41 |
listaPDFs = [l["link_arquivo"] for l in data["files"]]
|
requirements.txt
CHANGED
|
Binary files a/requirements.txt and b/requirements.txt differ
|
|
|