package net.cyue.ort.llm.tokenizer;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;

import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.*;

public class RWKVTokenizer extends Tokenizer {

    private final Map<String, Integer> vocab = new HashMap<>();
    private final Map<Integer, String> idToToken = new HashMap<>();
    private int unkId = 0;
    private TrieNode root = new TrieNode();
    private final ObjectMapper jsonMapper = new ObjectMapper();

    private static class TrieNode {
        final Map<Character, TrieNode> children = new HashMap<>();
        String token = null;
    }

    public RWKVTokenizer(String tokenizerFilePath) throws IOException {
        super(tokenizerFilePath);
    }

    public RWKVTokenizer(File tokenizerFile) throws IOException {
        super(tokenizerFile);
    }

    public RWKVTokenizer(InputStream tokenizerStream) throws IOException {
        super(tokenizerStream);
    }

    @Override
    protected void loadConfig(String configContent) throws IOException {
        // 智能检测文件格式
        if (isJsonFormat(configContent)) {
            loadFromJson(configContent);
        } else {
            loadFromVocabTxt(configContent);
        }
        
        // 构建ID->Token映射和Trie树
        buildIdToTokenMap();
        buildTrie();
    }

    private boolean isJsonFormat(String content) {
        content = content.trim();
        return content.startsWith("{") || content.startsWith("[");
    }

    private void loadFromJson(String content) throws IOException {
        try {
            JsonNode rootJson = jsonMapper.readTree(content);
            loadVocabFromJson(rootJson);
            setUnkIdFromJson(rootJson);
        } catch (Exception e) {
            throw new IOException("Failed to parse tokenizer JSON", e);
        }
    }

    private void loadFromVocabTxt(String content) {
        vocab.clear();
        String[] lines = content.split("\\R"); // 兼容所有换行符
        int id = 0;
        
        for (String line : lines) {
            line = line.trim();
            if (line.isEmpty()) continue;
            
            // 处理两种常见格式:
            // 1. 纯token: "token"
            // 2. token+score: "token\t-10.5" 或 "token -10.5"
            String token = extractTokenFromLine(line);
            vocab.put(token, id++);
        }
        
        // 设置<unk> ID (RWKV vocab.txt 中通常在位置 3)
        if (vocab.containsKey("<|unk|>")) {
            unkId = vocab.get("<|unk|>");
        } else if (vocab.containsKey("<unk>")) {
            unkId = vocab.get("<unk>");
        } else {
            unkId = 0; // 默认回退
            System.err.println("Warning: unk token not found in vocab. Using ID 0");
        }
    }

    private String extractTokenFromLine(String line) {
        // 尝试分割制表符
        int tabIdx = line.indexOf('\t');
        if (tabIdx != -1) {
            return line.substring(0, tabIdx).trim();
        }
        
        // 尝试分割空格 (只分割第一个空格)
        int spaceIdx = line.indexOf(' ');
        if (spaceIdx != -1 && spaceIdx < line.length() - 1) {
            return line.substring(0, spaceIdx).trim();
        }
        
        return line;
    }

    private void loadVocabFromJson(JsonNode rootJson) {
        JsonNode modelNode = rootJson.path("model");
        JsonNode vocabNode = modelNode.path("vocab");
        
        // 尝试BPE格式 (model.vocab 对象)
        if (!vocabNode.isMissingNode() && vocabNode.isObject()) {
            Iterator<Map.Entry<String, JsonNode>> fields = vocabNode.fields();
            while (fields.hasNext()) {
                Map.Entry<String, JsonNode> entry = fields.next();
                vocab.put(entry.getKey(), entry.getValue().asInt());
            }
        } 
        // 尝试SentencePiece格式 (顶层vocab数组)
        else if (rootJson.has("vocab") && rootJson.get("vocab").isArray()) {
            ArrayNode vocabArray = (ArrayNode) rootJson.get("vocab");
            for (int i = 0; i < vocabArray.size(); i++) {
                JsonNode entry = vocabArray.get(i);
                if (entry.isArray() && entry.size() >= 2) {
                    String token = entry.get(0).asText();
                    vocab.put(token, i); // ID = 数组索引
                }
            }
        }
        
        // 加载额外token (覆盖基础词汇)
        if (rootJson.has("added_tokens") && rootJson.get("added_tokens").isArray()) {
            ArrayNode addedTokens = (ArrayNode) rootJson.get("added_tokens");
            for (JsonNode tokenNode : addedTokens) {
                if (tokenNode.isObject()) {
                    String content = tokenNode.path("content").asText();
                    int id = tokenNode.path("id").asInt();
                    vocab.put(content, id);
                }
            }
        }
    }

    private void setUnkIdFromJson(JsonNode rootJson) {
        JsonNode modelNode = rootJson.path("model");
        
        // 1. 优先使用 model.unk_id
        if (modelNode.has("unk_id") && modelNode.get("unk_id").isInt()) {
            unkId = modelNode.get("unk_id").asInt();
            return;
        }
        
        // 2. 尝试 vocab 中的 <unk>
        if (vocab.containsKey("<unk>")) {
            unkId = vocab.get("<unk>");
            return;
        }
        
        // 3. 尝试 added_tokens 中的 <unk>
        if (rootJson.has("added_tokens") && rootJson.get("added_tokens").isArray()) {
            ArrayNode addedTokens = (ArrayNode) rootJson.get("added_tokens");
            for (JsonNode tokenNode : addedTokens) {
                if (tokenNode.path("content").asText().equals("<unk>")) {
                    unkId = tokenNode.path("id").asInt();
                    return;
                }
            }
        }
        
        // 4. RWKV World 的默认位置 (ID=3)
        if (vocab.containsKey("<|unk|>")) {
            unkId = vocab.get("<|unk|>");
            return;
        }
        
        // 5. 最后回退
        unkId = 0;
        System.err.println("Warning: unk_id not found in tokenizer config. Using default value: " + unkId);
    }

    private void buildIdToTokenMap() {
        idToToken.clear();
        for (Map.Entry<String, Integer> entry : vocab.entrySet()) {
            idToToken.put(entry.getValue(), entry.getKey());
        }
    }

    private void buildTrie() {
        root = new TrieNode();
        // 按token长度降序排序确保长token优先匹配
        List<String> sortedTokens = new ArrayList<>(vocab.keySet());
        sortedTokens.sort((a, b) -> b.length() - a.length());
        
        for (String token : sortedTokens) {
            TrieNode node = root;
            for (char c : token.toCharArray()) {
                node = node.children.computeIfAbsent(c, k -> new TrieNode());
            }
            if (node.token == null) { // 避免覆盖更长的token
                node.token = token;
            }
        }
    }

    @Override
    public String[] tokenize(String text) {
        List<String> tokens = new ArrayList<>();
        int i = 0;
        int n = text.length();
        
        while (i < n) {
            TrieNode node = root;
            String match = null;
            int matchLength = 0;
            
            // 尝试最长匹配
            for (int j = i; j < n; j++) {
                char c = text.charAt(j);
                if (!node.children.containsKey(c)) {
                    break;
                }
                node = node.children.get(c);
                if (node.token != null && node.token.length() > matchLength) {
                    match = node.token;
                    matchLength = node.token.length();
                }
            }
            
            if (match != null) {
                tokens.add(match);
                i += matchLength;
            } else {
                // 无匹配时回退到单字符
                char c = text.charAt(i);
                String singleChar = String.valueOf(c);
                
                if (vocab.containsKey(singleChar)) {
                    tokens.add(singleChar);
                } else if (c == ' ' && vocab.containsKey("\u2581")) {
                    // 特殊处理空格 (RWKV使用U+2581)
                    tokens.add("\u2581");
                } else {
                    tokens.add("<unk>");
                }
                i++;
            }
        }
        return tokens.toArray(new String[0]);
    }

    @Override
    public long[] tokens2IDs(String[] tokens) {
        long[] ids = new long[tokens.length];
        for (int i = 0; i < tokens.length; i++) {
            ids[i] = vocab.getOrDefault(tokens[i], unkId);
        }
        return ids;
    }

    @Override
    public Result encode(String text) {
        String[] tokens = tokenize(text);
        long[] ids = tokens2IDs(tokens);
        return new Result(text, tokens, ids);
    }

    @Override
    public String decode(long[] tokenIDs, boolean skipSpecialTokens) {
        StringBuilder sb = new StringBuilder();
        boolean firstToken = true;
        
        for (long id : tokenIDs) {
            String token = idToToken.get((int) id);
            if (token == null) continue;
            
            // 跳过特殊token
            if (skipSpecialTokens && isSpecialToken(token)) {
                continue;
            }
            
            // 处理RWKV空格标记 U+2581 (▁)
            if (token.startsWith("\u2581")) {
                if (!firstToken) sb.append(' ');
                sb.append(token.substring(1));
                firstToken = false;
            } 
            // 处理普通token
            else {
                sb.append(token);
                firstToken = false;
            }
        }
        
        return sb.toString().trim();
    }

    private boolean isSpecialToken(String token) {
        return token.startsWith("<|") || 
               token.startsWith("<") && token.endsWith(">") ||
               token.equals("\u2581"); // 空格标记也视为特殊
    }
}
