Spaces:
Running
Running
import os | |
import json | |
import requests | |
from lexer import lexer | |
from parser import Parser | |
from semantico import AnalizadorSemantico | |
from codigo_intermedio import GeneradorIntermedio | |
from sugerencias_nlp import procesar_comentarios | |
HF_TOKEN = os.environ.get("HF_TOKEN", "") | |
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} | |
API_URL = "https://api-inference.huggingface.co/models/huggingface/CodeBERTa-language-id" | |
def sugerencia_nlp_error(error_msg): | |
payload = { | |
"inputs": f"ERROR: {error_msg}\nSUGERENCIA:", | |
"parameters": { | |
"max_new_tokens": 40, | |
"temperature": 0.7, | |
"return_full_text": False | |
} | |
} | |
try: | |
response = requests.post(API_URL, headers=HEADERS, json=payload, timeout=10) | |
if response.status_code == 200: | |
return response.json()[0]["generated_text"].strip() | |
return f"(sin sugerencia: {response.status_code})" | |
except Exception as e: | |
print("Error al obtener sugerencia NLP:", e) | |
return "(sugerencia no disponible por error de conexión)" | |
def main(): | |
print("Inicio del análisis") | |
with open("entrada.txt", "r", encoding="utf-8") as f: | |
codigo = f.read() | |
errores_parser = [] | |
errores_semanticos = [] | |
variables = {} | |
comentarios_ext = [] | |
anotaciones = [] | |
ast = [] | |
try: | |
print("Lexical analysis...") | |
tokens = lexer(codigo) | |
parser = Parser(tokens) | |
try: | |
print("Syntactic analysis...") | |
ast = parser.parse() | |
print("Parser completado con éxito") | |
except SyntaxError as e: | |
print("Error capturado en parser:", e) | |
errores_parser.append(str(e)) | |
ast = [] | |
except Exception as e: | |
print("Error en lexer o parser:", e) | |
errores_parser.append(f"Error crítico: {str(e)}") | |
ast = [] | |
if ast: | |
try: | |
print("Análisis semántico...") | |
semantico = AnalizadorSemantico(ast) | |
resultado = semantico.analizar() | |
errores_semanticos = [ | |
{"mensaje": err, "sugerencia": sugerencia_nlp_error(err)} | |
for err in resultado["errores_semanticos"] | |
] | |
variables = resultado["variables_declaradas"] | |
anotaciones = resultado.get("anotaciones", []) | |
print("Semántico completado") | |
print("Generando código intermedio...") | |
generador = GeneradorIntermedio() | |
intermedio = generador.generar(ast) | |
with open("codigo_intermedio.txt", "w", encoding="utf-8") as f: | |
for linea in intermedio: | |
f.write(linea + "\n") | |
except Exception as e: | |
print("Error en semántico:", e) | |
errores_semanticos.append({"mensaje": str(e), "sugerencia": "(no procesado)"}) | |
try: | |
print("Procesando comentarios...") | |
comentarios_ext = [ | |
{"comentario": c, "sugerencia": s} | |
for c, s in procesar_comentarios(codigo) | |
] | |
except Exception as e: | |
print("Error procesando comentarios:", e) | |
comentarios_ext = [] | |
print("Escribiendo analisis.json...") | |
analisis = { | |
"variables_declaradas": variables, | |
"errores_parser": errores_parser, | |
"errores_semanticos": errores_semanticos, | |
"comentarios": comentarios_ext, | |
"anotaciones": anotaciones | |
} | |
with open("analisis.json", "w", encoding="utf-8") as f: | |
json.dump(analisis, f, indent=2) | |
print("Análisis completado.") | |
if __name__ == "__main__": | |
try: | |
main() | |
except Exception as e: | |
print("Error fatal en ejecución principal:", e) | |
with open("analisis.json", "w", encoding="utf-8") as f: | |
json.dump({"error_fatal": str(e)}, f, indent=2) |