import { readFileSync } from "fs";

/**
 * 词法分析阶段进行分词tokenize,
 * 语法分析阶段形成抽象语法树AST,也就是parse的过程;
 * 那么,evaluate对应的应该是代码生成,也就是将AST转换成可执行代码这个过程。
 */
import { lexer } from "./lib/lexer.js";
import { parser } from "./lib/parser.js";
import { evaluate, Exception } from "./lib/evaluate.js";
import { typecheck } from "./lib/typecheck.js";
import { highlight } from './lib/highlighter.js';

const file = "./source.js";
const content = readFileSync(file, "utf-8");

console.log("start");
const originTokens = lexer(file, content);
const { ast, tokens } = parser(originTokens);
console.dir(ast, { depth: null });
console.log(highlight(content, tokens))

// const typeErrors = typecheck(ast);
// if (typeErrors.length) {
//     console.log('TYPE ERROR:')
//     for (const error of typeErrors) {
//         console.log(error);
//     }
// }
// const result = evaluate(ast);
// if (result instanceof Exception) {
//     console.log(`panic: ${result.message}`)
//     for (const loc of result.backtrace) {
//         console.log(` at ${loc.file}:${loc.start.line}:${loc.start.column}-${loc.end.line}:${loc.end.column}`)
//     }
// } else {
//     console.dir(result, { depth: null });
// }
console.log("done");
