Spaces:
Running
Running
import os | |
import json | |
import requests | |
from lexer import lexer | |
from parser import Parser | |
from semantico import AnalizadorSemantico | |
from codigo_intermedio import GeneradorIntermedio | |
from sugerencias_nlp import procesar_comentarios | |
HF_TOKEN = os.environ.get("HF_TOKEN", "") | |
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} | |
API_URL = "https://api-inference.huggingface.co/models/gpt2" | |
def sugerencia_nlp_error(error_msg): | |
payload = { | |
"inputs": f"ERROR: {error_msg}\nSUGERENCIA:", | |
"parameters": { | |
"max_new_tokens": 30, | |
"temperature": 0.8, | |
"return_full_text": False | |
} | |
} | |
response = requests.post(API_URL, headers=HEADERS, json=payload) | |
if response.status_code == 200: | |
return response.json()[0]["generated_text"].strip() | |
return f"(sin sugerencia: {response.status_code})" | |
def main(): | |
with open("entrada.txt", "r", encoding="utf-8") as f: | |
codigo = f.read() | |
tokens = lexer(codigo) | |
parser = Parser(tokens) | |
ast = parser.parse() | |
semantico = AnalizadorSemantico(ast) | |
resultado = semantico.analizar() | |
errores_ext = [ | |
{"mensaje": err, "sugerencia": sugerencia_nlp_error(err)} | |
for err in resultado["errores_semanticos"] | |
] | |
comentarios_ext = [ | |
{"comentario": c, "sugerencia": s} | |
for c, s in procesar_comentarios(codigo) | |
] | |
analisis_completo = { | |
"variables_declaradas": resultado["variables_declaradas"], | |
"errores_semanticos": errores_ext, | |
"comentarios": comentarios_ext | |
} | |
with open("analisis.json", "w", encoding="utf-8") as f: | |
json.dump(analisis_completo, f, indent=2) | |
generador = GeneradorIntermedio() | |
intermedio = generador.generar(ast) | |
with open("codigo_intermedio.txt", "w", encoding="utf-8") as f: | |
for linea in intermedio: | |
f.write(linea + "\n") | |
if __name__ == "__main__": | |
main() |