Spaces:
Running
Running
luanpoppe
commited on
Commit
·
b8beb50
1
Parent(s):
82e7990
fix: erro da resposta vazia da llm
Browse files
_utils/gerar_relatorio_modelo_usuario/GerarDocumento.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import os
|
| 2 |
-
from typing import Any, List, Dict, Tuple, Optional, Union, cast
|
| 3 |
|
| 4 |
from pydantic import SecretStr
|
| 5 |
from _utils.langchain_utils.LLM_class import LLM
|
|
@@ -25,6 +25,7 @@ from _utils.models.gerar_relatorio import (
|
|
| 25 |
)
|
| 26 |
from cohere import Client
|
| 27 |
from _utils.langchain_utils.Splitter_class import Splitter
|
|
|
|
| 28 |
|
| 29 |
|
| 30 |
def reciprocal_rank_fusion(result_lists, weights=None):
|
|
@@ -167,7 +168,12 @@ class GerarDocumento:
|
|
| 167 |
|
| 168 |
return sources, contexts
|
| 169 |
|
| 170 |
-
def select_model_for_last_requests(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
llm_instance = LLM()
|
| 172 |
if llm_ultimas_requests == "gpt-4o-mini":
|
| 173 |
llm = ChatOpenAI(
|
|
@@ -194,31 +200,40 @@ class GerarDocumento:
|
|
| 194 |
vector_store, bm25, chunk_ids, query
|
| 195 |
)
|
| 196 |
|
| 197 |
-
llm = self.select_model_for_last_requests(llm_ultimas_requests)
|
| 198 |
-
# prompt_auxiliar = PromptTemplate(
|
| 199 |
-
# template=self.prompt_auxiliar, input_variables=["context"]
|
| 200 |
-
# )
|
| 201 |
-
|
| 202 |
-
# resumo_auxiliar_do_documento = llm.invoke(
|
| 203 |
-
# prompt_auxiliar.format(context="\n\n".join(contexts))
|
| 204 |
-
# )
|
| 205 |
-
|
| 206 |
-
# self.resumo_gerado = cast(str, resumo_auxiliar_do_documento.content)
|
| 207 |
-
|
| 208 |
prompt_gerar_documento = PromptTemplate(
|
| 209 |
template=cast(str, self.prompt_gerar_documento),
|
| 210 |
input_variables=["context"],
|
| 211 |
)
|
| 212 |
|
| 213 |
-
documento_gerado =
|
| 214 |
-
|
| 215 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
prompt_gerar_documento.format(
|
| 217 |
context="\n\n".join(contexts),
|
| 218 |
-
# modelo_usuario=serializer.data["modelo"],
|
| 219 |
)
|
| 220 |
-
)
|
| 221 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
|
| 223 |
# Split the response into paragraphs
|
| 224 |
summaries = [p.strip() for p in documento_gerado.split("\n\n") if p.strip()]
|
|
@@ -245,3 +260,7 @@ class GerarDocumento:
|
|
| 245 |
except Exception as e:
|
| 246 |
self.logger.error(f"Error generating enhanced summary: {str(e)}")
|
| 247 |
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
from typing import Any, List, Dict, Literal, Tuple, Optional, Union, cast
|
| 3 |
|
| 4 |
from pydantic import SecretStr
|
| 5 |
from _utils.langchain_utils.LLM_class import LLM
|
|
|
|
| 25 |
)
|
| 26 |
from cohere import Client
|
| 27 |
from _utils.langchain_utils.Splitter_class import Splitter
|
| 28 |
+
import time
|
| 29 |
|
| 30 |
|
| 31 |
def reciprocal_rank_fusion(result_lists, weights=None):
|
|
|
|
| 168 |
|
| 169 |
return sources, contexts
|
| 170 |
|
| 171 |
+
def select_model_for_last_requests(
|
| 172 |
+
self,
|
| 173 |
+
llm_ultimas_requests: Literal[
|
| 174 |
+
"gpt-4o-mini", "deepseek-chat", "gemini-2.0-flash"
|
| 175 |
+
],
|
| 176 |
+
):
|
| 177 |
llm_instance = LLM()
|
| 178 |
if llm_ultimas_requests == "gpt-4o-mini":
|
| 179 |
llm = ChatOpenAI(
|
|
|
|
| 200 |
vector_store, bm25, chunk_ids, query
|
| 201 |
)
|
| 202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
prompt_gerar_documento = PromptTemplate(
|
| 204 |
template=cast(str, self.prompt_gerar_documento),
|
| 205 |
input_variables=["context"],
|
| 206 |
)
|
| 207 |
|
| 208 |
+
documento_gerado = ""
|
| 209 |
+
tentativas = 0
|
| 210 |
+
|
| 211 |
+
while tentativas < 5 and not documento_gerado:
|
| 212 |
+
tentativas += 1
|
| 213 |
+
llm = self.select_model_for_last_requests(llm_ultimas_requests) # type: ignore
|
| 214 |
+
resposta = llm.invoke(
|
| 215 |
prompt_gerar_documento.format(
|
| 216 |
context="\n\n".join(contexts),
|
|
|
|
| 217 |
)
|
| 218 |
+
)
|
| 219 |
+
if hasattr(resposta, "content") and resposta.content.strip(): # type: ignore
|
| 220 |
+
documento_gerado = resposta.content.strip() # type: ignore
|
| 221 |
+
else:
|
| 222 |
+
print(f"Tentativa {tentativas}: resposta vazia ou inexistente.")
|
| 223 |
+
time.sleep(5)
|
| 224 |
+
|
| 225 |
+
if not documento_gerado:
|
| 226 |
+
llm = self.select_model_for_last_requests("gpt-4o-mini")
|
| 227 |
+
resposta = llm.invoke(
|
| 228 |
+
prompt_gerar_documento.format(
|
| 229 |
+
context="\n\n".join(contexts),
|
| 230 |
+
)
|
| 231 |
+
)
|
| 232 |
+
documento_gerado = resposta.content.strip() # type: ignore
|
| 233 |
+
if not documento_gerado:
|
| 234 |
+
raise Exception(
|
| 235 |
+
"Falha ao tentar gerar o documento final por 5 tentativas e também ao tentar na última tentativa com o chat-gpt 4o mini."
|
| 236 |
+
)
|
| 237 |
|
| 238 |
# Split the response into paragraphs
|
| 239 |
summaries = [p.strip() for p in documento_gerado.split("\n\n") if p.strip()]
|
|
|
|
| 260 |
except Exception as e:
|
| 261 |
self.logger.error(f"Error generating enhanced summary: {str(e)}")
|
| 262 |
raise
|
| 263 |
+
|
| 264 |
+
async def validar_conteudo_documento_final(self):
|
| 265 |
+
documento_gerado = ""
|
| 266 |
+
tentativas = 0
|