victorafarias commited on
Commit
c7397e8
·
1 Parent(s): a941b5a

Correções e evoluções

Browse files
Files changed (2) hide show
  1. app.py +20 -11
  2. llms.py +1 -2
app.py CHANGED
@@ -68,16 +68,24 @@ def process():
68
  # --- LÓGICA ATÔMICA (PARALELA) CORRIGIDA ---
69
  results = {}
70
  threads = []
71
- def run_chain(chain, inputs, key):
72
- try:
73
- result = chain.invoke(inputs)['text']
74
- # Validação dentro da thread
75
- if not result or not result.strip():
76
- results[key] = "Error:EmptyResponse"
77
- else:
78
- results[key] = result
79
- except Exception as e:
80
- results[key] = f"Erro ao processar {key}: {e}"
 
 
 
 
 
 
 
 
81
 
82
  models = {'grok': grok_llm, 'sonnet': claude_llm, 'gemini': gemini_llm}
83
  prompt = PromptTemplate(template=PROMPT_ATOMICO_INICIAL, input_variables=["solicitacao_usuario", "rag_context"])
@@ -85,7 +93,8 @@ def process():
85
 
86
  for name, llm in models.items():
87
  chain = LLMChain(llm=llm, prompt=prompt)
88
- thread = threading.Thread(target=run_chain, args=(chain, {"solicitacao_usuario": solicitacao_usuario, "rag_context": rag_context}, name))
 
89
  threads.append(thread)
90
  thread.start()
91
 
 
68
  # --- LÓGICA ATÔMICA (PARALELA) CORRIGIDA ---
69
  results = {}
70
  threads = []
71
+
72
+ # --- FUNÇÃO ATUALIZADA COM TIMEOUT ---
73
+ def run_chain_with_timeout(chain, inputs, key, timeout=300):
74
+ def task():
75
+ return chain.invoke(inputs)['text']
76
+
77
+ with concurrent.futures.ThreadPoolExecutor() as executor:
78
+ future = executor.submit(task)
79
+ try:
80
+ result = future.result(timeout=timeout)
81
+ if not result or not result.strip():
82
+ results[key] = "Error:EmptyResponse"
83
+ else:
84
+ results[key] = result
85
+ except concurrent.futures.TimeoutError:
86
+ results[key] = f"Erro ao processar {key.upper()}: Tempo limite excedido (Timeout)."
87
+ except Exception as e:
88
+ results[key] = f"Erro ao processar {key.upper()}: {e}"
89
 
90
  models = {'grok': grok_llm, 'sonnet': claude_llm, 'gemini': gemini_llm}
91
  prompt = PromptTemplate(template=PROMPT_ATOMICO_INICIAL, input_variables=["solicitacao_usuario", "rag_context"])
 
93
 
94
  for name, llm in models.items():
95
  chain = LLMChain(llm=llm, prompt=prompt)
96
+ # A thread agora chama a função com timeout
97
+ thread = threading.Thread(target=run_chain_with_timeout, args=(chain, {"solicitacao_usuario": solicitacao_usuario, "rag_context": rag_context}, name))
98
  threads.append(thread)
99
  thread.start()
100
 
llms.py CHANGED
@@ -29,6 +29,5 @@ claude_llm = ChatAnthropic(
29
  # Gemini
30
  gemini_llm = ChatGoogleGenerativeAI(
31
  google_api_key=os.getenv("GOOGLE_API_KEY"),
32
- model=os.getenv("GEMINI_MODEL_ID"),
33
- timeout=300
34
  )
 
29
  # Gemini
30
  gemini_llm = ChatGoogleGenerativeAI(
31
  google_api_key=os.getenv("GOOGLE_API_KEY"),
32
+ model=os.getenv("GEMINI_MODEL_ID")
 
33
  )