Spaces:
Sleeping
Sleeping
Commit
·
1334936
1
Parent(s):
b3c59f2
exibir prompts no log
Browse files
app.py
CHANGED
|
@@ -251,6 +251,14 @@ def process():
|
|
| 251 |
).replace(
|
| 252 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 253 |
).replace("<role>", f"<role>\n {contexto}") #injeta contexto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
|
| 255 |
prompt = PromptTemplate(template=updated_prompt_template, input_variables=["contexto", "solicitacao_usuario", "rag_context"])
|
| 256 |
json_data = safe_json_dumps({'progress': 15, 'message': 'Iniciando processamento paralelo...'})
|
|
@@ -315,18 +323,40 @@ def process():
|
|
| 315 |
).replace(
|
| 316 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 317 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
|
| 319 |
updated_sonnet_template = PROMPT_HIERARQUICO_SONNET.replace(
|
| 320 |
"MIN_CHARS_PLACEHOLDER", str(min_chars)
|
| 321 |
).replace(
|
| 322 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 323 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 324 |
|
| 325 |
updated_gemini_template = PROMPT_HIERARQUICO_GEMINI.replace(
|
| 326 |
"MIN_CHARS_PLACEHOLDER", str(min_chars)
|
| 327 |
).replace(
|
| 328 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 329 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
|
| 331 |
json_data = safe_json_dumps({'progress': 15, 'message': 'O GROK está processando sua solicitação...'})
|
| 332 |
yield f"data: {json_data}\n\n"
|
|
@@ -461,6 +491,14 @@ def merge():
|
|
| 461 |
).replace(
|
| 462 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 463 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
|
| 465 |
prompt_merge = PromptTemplate(template=updated_merge_template, input_variables=["contexto", "solicitacao_usuario", "texto_para_analise_grok", "texto_para_analise_sonnet", "texto_para_analise_gemini"])
|
| 466 |
|
|
|
|
| 251 |
).replace(
|
| 252 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 253 |
).replace("<role>", f"<role>\n {contexto}") #injeta contexto
|
| 254 |
+
|
| 255 |
+
# --- renderiza e loga o prompt final Atomico ---
|
| 256 |
+
rendered_prompt = updated.format(
|
| 257 |
+
contexto=contexto,
|
| 258 |
+
solicitacao_usuario=solicitacao_usuario,
|
| 259 |
+
rag_context=rag_context
|
| 260 |
+
)
|
| 261 |
+
print(f"[DEBUG] PROMPT ATÔMICO RENDERED:\n{rendered_prompt}\n{'-'*80}")
|
| 262 |
|
| 263 |
prompt = PromptTemplate(template=updated_prompt_template, input_variables=["contexto", "solicitacao_usuario", "rag_context"])
|
| 264 |
json_data = safe_json_dumps({'progress': 15, 'message': 'Iniciando processamento paralelo...'})
|
|
|
|
| 323 |
).replace(
|
| 324 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 325 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
| 326 |
+
|
| 327 |
+
# --- renderiza e loga o prompt final Hierárquico Grok ---
|
| 328 |
+
rendered_grok = updated_grok.format(
|
| 329 |
+
contexto=contexto,
|
| 330 |
+
solicitacao_usuario=solicitacao_usuario,
|
| 331 |
+
rag_context=rag_context
|
| 332 |
+
)
|
| 333 |
+
print(f"[DEBUG] PROMPT HIERÁRQUICO GROK RENDERED:\n{rendered_grok}\n{'-'*80}")
|
| 334 |
|
| 335 |
updated_sonnet_template = PROMPT_HIERARQUICO_SONNET.replace(
|
| 336 |
"MIN_CHARS_PLACEHOLDER", str(min_chars)
|
| 337 |
).replace(
|
| 338 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 339 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
| 340 |
+
|
| 341 |
+
# --- renderiza e loga o prompt final Hierárquico Sonnet ---
|
| 342 |
+
rendered_sonnet = updated_sonnet.format(
|
| 343 |
+
contexto=contexto,
|
| 344 |
+
resposta_grok=resposta_grok
|
| 345 |
+
)
|
| 346 |
+
print(f"[DEBUG] PROMPT HIERÁRQUICO SONNET RENDERED:\n{rendered_sonnet}\n{'-'*80}")
|
| 347 |
|
| 348 |
updated_gemini_template = PROMPT_HIERARQUICO_GEMINI.replace(
|
| 349 |
"MIN_CHARS_PLACEHOLDER", str(min_chars)
|
| 350 |
).replace(
|
| 351 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 352 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
| 353 |
+
|
| 354 |
+
# --- renderiza e loga o prompt final Hierárquico Sonnet ---
|
| 355 |
+
rendered_gemini = updated_gemini.format(
|
| 356 |
+
contexto=contexto,
|
| 357 |
+
resposta_grok=resposta_grok
|
| 358 |
+
)
|
| 359 |
+
print(f"[DEBUG] PROMPT HIERÁRQUICO SONNET RENDERED:\n{rendered_gemini}\n{'-'*80}")
|
| 360 |
|
| 361 |
json_data = safe_json_dumps({'progress': 15, 'message': 'O GROK está processando sua solicitação...'})
|
| 362 |
yield f"data: {json_data}\n\n"
|
|
|
|
| 491 |
).replace(
|
| 492 |
"MAX_CHARS_PLACEHOLDER", str(max_chars)
|
| 493 |
).replace("<role>", f"<role>\n {contexto}") # injeta contexto
|
| 494 |
+
|
| 495 |
+
# --- renderiza e loga o prompt final Atomico Merge ---
|
| 496 |
+
rendered_merge = updated_merge.format(
|
| 497 |
+
contexto=contexto,
|
| 498 |
+
resposta_sonnet=resposta_sonnet,
|
| 499 |
+
resposta_gemini=resposta_gemini
|
| 500 |
+
)
|
| 501 |
+
print(f"[DEBUG] PROMPT ATÔMICO MERGE RENDERED:\n{rendered_merge}\n{'-'*80}")
|
| 502 |
|
| 503 |
prompt_merge = PromptTemplate(template=updated_merge_template, input_variables=["contexto", "solicitacao_usuario", "texto_para_analise_grok", "texto_para_analise_sonnet", "texto_para_analise_gemini"])
|
| 504 |
|