Spaces:
Running
Running
luanpoppe
commited on
Commit
·
e1d2a79
1
Parent(s):
753b4be
feat: gerando documento final corretamente
Browse files
_utils/gerar_relatorio_modelo_usuario/GerarDocumento.py
CHANGED
|
@@ -177,41 +177,7 @@ class GerarDocumento:
|
|
| 177 |
vector_store, bm25, chunk_ids, query
|
| 178 |
)
|
| 179 |
|
| 180 |
-
|
| 181 |
-
contexts = []
|
| 182 |
-
sources = []
|
| 183 |
-
|
| 184 |
-
# Get full documents for top results
|
| 185 |
-
for chunk_id, score in ranked_results[: self.config.num_chunks]:
|
| 186 |
-
results = vector_store.get(
|
| 187 |
-
where={"chunk_id": chunk_id}, include=["documents", "metadatas"]
|
| 188 |
-
)
|
| 189 |
-
|
| 190 |
-
if results["documents"]:
|
| 191 |
-
context = results["documents"][0]
|
| 192 |
-
metadata = results["metadatas"][0]
|
| 193 |
-
|
| 194 |
-
contexts.append(context)
|
| 195 |
-
sources.append(
|
| 196 |
-
{
|
| 197 |
-
"content": context,
|
| 198 |
-
"page": metadata["page"],
|
| 199 |
-
"chunk_id": chunk_id,
|
| 200 |
-
"relevance_score": score,
|
| 201 |
-
"context": metadata.get("context", ""),
|
| 202 |
-
}
|
| 203 |
-
)
|
| 204 |
-
|
| 205 |
-
if llm_ultimas_requests == "gpt-4o-mini":
|
| 206 |
-
llm = ChatOpenAI(
|
| 207 |
-
temperature=self.gpt_temperature,
|
| 208 |
-
model=self.gpt_model,
|
| 209 |
-
api_key=SecretStr(self.openai_api_key),
|
| 210 |
-
)
|
| 211 |
-
elif llm_ultimas_requests == "deepseek-chat":
|
| 212 |
-
llm_instance = LLM()
|
| 213 |
-
llm = llm_instance.deepseek()
|
| 214 |
-
|
| 215 |
# prompt_auxiliar = PromptTemplate(
|
| 216 |
# template=self.prompt_auxiliar, input_variables=["context"]
|
| 217 |
# )
|
|
@@ -224,10 +190,10 @@ class GerarDocumento:
|
|
| 224 |
|
| 225 |
prompt_gerar_documento = PromptTemplate(
|
| 226 |
template=self.prompt_gerar_documento,
|
| 227 |
-
input_variables=["
|
| 228 |
)
|
| 229 |
|
| 230 |
-
|
| 231 |
str,
|
| 232 |
llm.invoke(
|
| 233 |
prompt_gerar_documento.format(
|
|
@@ -238,9 +204,7 @@ class GerarDocumento:
|
|
| 238 |
)
|
| 239 |
|
| 240 |
# Split the response into paragraphs
|
| 241 |
-
summaries = [
|
| 242 |
-
p.strip() for p in documento_gerado_final.split("\n\n") if p.strip()
|
| 243 |
-
]
|
| 244 |
|
| 245 |
# Create structured output
|
| 246 |
structured_output = []
|
|
|
|
| 177 |
vector_store, bm25, chunk_ids, query
|
| 178 |
)
|
| 179 |
|
| 180 |
+
llm = self.select_model_for_last_requests(llm_ultimas_requests)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
# prompt_auxiliar = PromptTemplate(
|
| 182 |
# template=self.prompt_auxiliar, input_variables=["context"]
|
| 183 |
# )
|
|
|
|
| 190 |
|
| 191 |
prompt_gerar_documento = PromptTemplate(
|
| 192 |
template=self.prompt_gerar_documento,
|
| 193 |
+
input_variables=["context"],
|
| 194 |
)
|
| 195 |
|
| 196 |
+
documento_gerado = cast(
|
| 197 |
str,
|
| 198 |
llm.invoke(
|
| 199 |
prompt_gerar_documento.format(
|
|
|
|
| 204 |
)
|
| 205 |
|
| 206 |
# Split the response into paragraphs
|
| 207 |
+
summaries = [p.strip() for p in documento_gerado.split("\n\n") if p.strip()]
|
|
|
|
|
|
|
| 208 |
|
| 209 |
# Create structured output
|
| 210 |
structured_output = []
|