package net.northcoding.fresh.lexer;

import net.northcoding.fresh.token.Token;
import net.northcoding.fresh.util.Logger;

import java.io.FileInputStream;
import java.io.IOException;
import java.util.Vector;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class Lexer {

    private static final Pattern NUMBER_PATTERN = Pattern.compile("\\d+");
    private static final Pattern STRING_PATTERN = Pattern.compile("\"[^\"]*\"");
    //    private static final String[] OPERATORS = {"/", "*", "%", "+", "-", "=", ">", "<", "[", "]", "(", ")", "{", "}", ".", "|", "#", "?", "!"};
//    private static final String[] SYMBOLS = {";"};
    private static final String ILL_CHARACTERS = "[^\\s\\d\"\\/\\*\\%\\+\\-\\=\\>\\<\\[\\]\\(\\)\\{\\}\\.\\|\\#\\?\\!\\;]+";
    Logger logger = new Logger("Lexer", "D:\\code\\Java\\others\\Fresh\\code\\output.log");
    private String input;
    private Vector<Token> tokens;

    public Lexer(String input) {
        this.input = input;
        this.tokens = new Vector<>();
    }

    public static Vector<Token> lexerFile(String filepath) throws IOException {
        FileInputStream file = new FileInputStream(filepath);
        String content = new String(file.readAllBytes());
        file.close();
        return new Lexer(content).tokenize();
    }

    private String[] split(String s) {
        Vector<String> result = new Vector<>();
        int curr = 0;
        while (curr < s.length()) {
            if (Token.types.containsKey(String.valueOf(s.charAt(curr)))) {
                result.add(String.valueOf(s.charAt(curr)));
                curr++;
            } else if (s.charAt(curr) == ' ' || s.charAt(curr) == '\t' || s.charAt(curr) == '\n') {
                curr++;
            } else if (s.charAt(curr) == '"') {
                StringBuilder sb = new StringBuilder();
                curr++;
                while (curr < s.length() && s.charAt(curr) != '"') {
                    if (s.charAt(curr) == '\\') {
                        switch (s.charAt(++curr)) {
                            case 'n' -> sb.append('\n');
                            case 't' -> sb.append('\t');
                            case '\\' -> sb.append('\\');
                            case '"' -> sb.append('\"');
                            case '\'' -> sb.append('\'');
                        }
                        curr++;
                        continue;
                    }
                    sb.append(s.charAt(curr));
                    curr++;
                }
                sb.insert(0, "\"");
//                sb.append("\"");
                result.add(sb.toString());
                curr++;

            } else {
                StringBuilder sb = new StringBuilder();
                while (curr < s.length() && !Token.types.containsKey(String.valueOf(s.charAt(curr))) && s.charAt(curr) != ' ' && s.charAt(curr) != '\t' && s.charAt(curr) != '\n') {
                    sb.append(s.charAt(curr));
                    curr++;
                }
                result.add(sb.toString());
            }
        }
        return result.toArray(new String[0]);
    }

//    private boolean contains(String[] array, String value) {
//        for (String item : array) {
//            if (item.equals(value)) {
//                return true;
//            }
//        }
//        return false;
//    }

    public Vector<Token> tokenize() {
        String[] words = split(input); // 分割输入字符串
        for (String word : words) {
            if (NUMBER_PATTERN.matcher(word).matches()) {
                tokens.add(Token.createToken(word, Token.TokenType.INT));
            } else if (STRING_PATTERN.matcher(word).matches()) {
                tokens.add(Token.createToken(word, Token.TokenType.STRING));
            } else if (Token.types.containsKey(word)) {
                tokens.add(Token.createToken(word, Token.types.get(word)));
//                tokens.add(Token.createToken(word, Token.TokenType.ILL)); // 无法识别的运算符作为ILL类型处理
            } else if (word.charAt(0) == '"') {
                tokens.add(Token.createToken(word.substring(1), Token.TokenType.STRING));
            } else { // 如果单词不为空且不是上述类型，则视为无法识别的字符序列
                Matcher matcher = Pattern.compile(ILL_CHARACTERS).matcher(word);
                if (matcher.find()) {
                    tokens.add(Token.createToken(matcher.group(), Token.TokenType.ILL));
                } else {
                    tokens.add(Token.createToken(word, Token.TokenType.IDENTITY)); // 视为标识符处理
                }
            }
        }
        tokens.add(Token.createToken("EOF", Token.TokenType.EOF)); // 添加文件结束标记
        return tokens;
    }
}