Spaces:
Runtime error
Runtime error
Commit
·
78222a9
1
Parent(s):
ffa0c53
fix app
Browse files- app.py +46 -29
- requirements.txt +2 -1
app.py
CHANGED
@@ -2,23 +2,39 @@ from fastapi import FastAPI
|
|
2 |
from transformers import pipeline
|
3 |
import json
|
4 |
import os
|
|
|
5 |
|
6 |
app = FastAPI()
|
7 |
|
8 |
# Carrega o arquivo questions.json
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
def generate_question_from_prompt(theme, difficulty, example_question=None):
|
|
|
|
|
|
|
22 |
if example_question:
|
23 |
example_text = (
|
24 |
f"Enunciado clínico: {example_question['question'].split('Considerando')[-1].strip()} "
|
@@ -41,27 +57,29 @@ def generate_question_from_prompt(theme, difficulty, example_question=None):
|
|
41 |
"Alternativas: A) [opção], B) [opção], C) [opção], D) [opção]. Gabarito: [letra]. "
|
42 |
"Explicação: [texto].'"
|
43 |
)
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
61 |
|
62 |
@app.get("/generate")
|
63 |
async def generate_question(theme: str, difficulty: str):
|
64 |
-
# Usa o primeiro exemplo como base
|
65 |
example = examples[0] if examples else None
|
66 |
return generate_question_from_prompt(theme, difficulty, example)
|
67 |
|
@@ -69,7 +87,6 @@ async def generate_question(theme: str, difficulty: str):
|
|
69 |
async def get_simulado(num_questions: int = 5):
|
70 |
simulado = []
|
71 |
for _ in range(num_questions):
|
72 |
-
# Alterna entre exemplos para diversificar
|
73 |
example = examples[_ % len(examples)] if examples else None
|
74 |
question_data = generate_question_from_prompt("clinica medica", "medio", example)
|
75 |
simulado.append(question_data)
|
|
|
2 |
from transformers import pipeline
|
3 |
import json
|
4 |
import os
|
5 |
+
from huggingface_hub import HfApi
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
# Carrega o arquivo questions.json
|
10 |
+
try:
|
11 |
+
with open("questions.json", "r", encoding="utf-8") as f:
|
12 |
+
examples = json.load(f)
|
13 |
+
except FileNotFoundError:
|
14 |
+
examples = []
|
15 |
|
16 |
+
# Função para verificar e carregar o modelo
|
17 |
+
def load_model():
|
18 |
+
try:
|
19 |
+
return pipeline(
|
20 |
+
"text2text-generation",
|
21 |
+
model="unicamp-dl/ptt5-base-portuguese-vocab",
|
22 |
+
tokenizer="unicamp-dl/ptt5-base-portuguese-vocab",
|
23 |
+
device_map="auto" if os.getenv("HF_TOKEN") else None,
|
24 |
+
model_kwargs={"load_in_8bit": True if os.getenv("HF_TOKEN") else False},
|
25 |
+
trust_remote_code=True # Permite carregar código remoto, se necessário
|
26 |
+
)
|
27 |
+
except Exception as e:
|
28 |
+
print(f"Erro ao carregar o modelo: {e}")
|
29 |
+
return None
|
30 |
+
|
31 |
+
# Inicializa o modelo
|
32 |
+
ptt5 = load_model()
|
33 |
|
34 |
def generate_question_from_prompt(theme, difficulty, example_question=None):
|
35 |
+
if not ptt5:
|
36 |
+
return {"question": "Erro: Modelo não carregado.", "options": [], "answer": "", "explanation": "Por favor, verifique os logs."}
|
37 |
+
|
38 |
if example_question:
|
39 |
example_text = (
|
40 |
f"Enunciado clínico: {example_question['question'].split('Considerando')[-1].strip()} "
|
|
|
57 |
"Alternativas: A) [opção], B) [opção], C) [opção], D) [opção]. Gabarito: [letra]. "
|
58 |
"Explicação: [texto].'"
|
59 |
)
|
60 |
+
try:
|
61 |
+
response = ptt5(prompt, max_new_tokens=512, temperature=0.7, top_p=0.9)[0]['generated_text']
|
62 |
+
# Parseia a resposta para extrair os componentes
|
63 |
+
parts = response.split("Alternativas:")
|
64 |
+
if len(parts) > 1:
|
65 |
+
question_part = parts[0].replace("Enunciado clínico:", "").strip()
|
66 |
+
options_part = parts[1].split("Gabarito:")[0].strip()
|
67 |
+
answer_part = parts[1].split("Gabarito:")[1].split("Explicação:")[0].strip()
|
68 |
+
explanation_part = parts[1].split("Explicação:")[1].strip() if "Explicação:" in parts[1] else "Explicação padrão"
|
69 |
+
options = [opt.strip() for opt in options_part.split(",")]
|
70 |
+
if len(options) >= 4:
|
71 |
+
return {
|
72 |
+
"question": f"Enunciado clínico: {question_part}",
|
73 |
+
"options": [f"A) {options[0]}", f"B) {options[1]}", f"C) {options[2]}", f"D) {options[3]}"],
|
74 |
+
"answer": answer_part,
|
75 |
+
"explanation": explanation_part
|
76 |
+
}
|
77 |
+
return {"question": response, "options": [], "answer": "", "explanation": "Explicação padrão"}
|
78 |
+
except Exception as e:
|
79 |
+
return {"question": f"Erro na geração: {e}", "options": [], "answer": "", "explanation": "Tente novamente."}
|
80 |
|
81 |
@app.get("/generate")
|
82 |
async def generate_question(theme: str, difficulty: str):
|
|
|
83 |
example = examples[0] if examples else None
|
84 |
return generate_question_from_prompt(theme, difficulty, example)
|
85 |
|
|
|
87 |
async def get_simulado(num_questions: int = 5):
|
88 |
simulado = []
|
89 |
for _ in range(num_questions):
|
|
|
90 |
example = examples[_ % len(examples)] if examples else None
|
91 |
question_data = generate_question_from_prompt("clinica medica", "medio", example)
|
92 |
simulado.append(question_data)
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
fastapi==0.103.2
|
2 |
uvicorn==0.23.2
|
3 |
transformers==4.35.0
|
4 |
-
accelerate==0.21.0
|
|
|
|
1 |
fastapi==0.103.2
|
2 |
uvicorn==0.23.2
|
3 |
transformers==4.35.0
|
4 |
+
accelerate==0.21.0
|
5 |
+
huggingface_hub==0.19.0
|