import json
import os
import heapq
from collections import defaultdict, Counter

from tqdm import tqdm


class TokenVocabulary:
    def __init__(self, max_size=None):
        self.token_to_id = {}
        self.id_to_token = {}
        self.next_id = 1
        self.token_freq = Counter()
        self.max_size = max_size
        # 确保特殊标记存在
        self.add_token("<bos>", freq=10000)
        self.add_token("<eos>", freq=10000)

    def add_token(self, token, freq=1):
        if token not in self.token_to_id:
            token_id = self.next_id
            self.token_to_id[token] = token_id
            self.id_to_token[token_id] = token
            self.token_freq[token] = freq
            self.next_id += 1
            return token_id
        else:
            self.token_freq[token] += freq
            return self.token_to_id[token]

    def get_id(self, token):
        return self.token_to_id.get(token, None)

    def get_token(self, token_id):
        return self.id_to_token.get(token_id, None)

    def prune(self):
        """根据频率修剪词表到最大大小，优先保留特殊标记和单字母token"""
        if self.max_size is None or len(self.token_to_id) <= self.max_size:
            return

        # 首先确保特殊标记始终保留
        special_tokens = ["<bos>", "<eos>"]
        base_tokens = [(t, self.token_freq[t]) for t in special_tokens]

        # 分离单字母token和其他token
        single_chars = []
        other_tokens = []

        # 收集所有单字母token
        for token, freq in self.token_freq.items():
            if token in special_tokens:
                continue  # 已经处理过
            if len(token) == 1:
                single_chars.append((token, freq))
            else:
                other_tokens.append((token, freq))

        # 按频率排序其他token
        other_tokens.sort(key=lambda x: x[1], reverse=True)

        # 计算保留空间
        max_others = self.max_size - len(base_tokens) - len(single_chars)
        if max_others < 0:
            # 只保留最高频的其他token
            max_others = max(0, self.max_size - len(base_tokens))
            single_chars = single_chars[:max_others]
            other_tokens = []
        else:
            other_tokens = other_tokens[:max_others]

        # 重建词表
        self.token_to_id = {}
        self.id_to_token = {}
        self.token_freq = Counter()
        self.next_id = 1

        # 添加特殊标记
        for token, freq in base_tokens:
            self.add_token(token, freq)

        # 添加单字母token
        for token, freq in single_chars:
            self.add_token(token, freq)

        # 添加其他token
        for token, freq in other_tokens:
            self.add_token(token, freq)

    def save(self, file_path):
        data = {
            'token_to_id': self.token_to_id,
            'id_to_token': self.id_to_token,
            'next_id': self.next_id,
            'token_freq': dict(self.token_freq),
            'max_size': self.max_size
        }
        with open(file_path, 'w') as f:
            json.dump(data, f)

    def load(self, file_path):
        with open(file_path, 'r') as f:
            data = json.load(f)

        self.token_to_id = {k: v for k, v in data['token_to_id'].items()}
        self.id_to_token = {int(k): v for k, v in data['id_to_token'].items()}
        self.next_id = data['next_id']
        self.token_freq = Counter(data['token_freq'])
        self.max_size = data['max_size']


class Combination:
    def __init__(self, prefix, token, suffix, frequency=1, conn_node=None):
        self.prefix = prefix
        self.token = token
        self.suffix = suffix
        self.frequency = frequency
        self.conn_node = conn_node
        self.coverage = prefix + token + suffix

    def __repr__(self):
        return f"[{self.prefix},{self.token},{self.suffix}] (freq={self.frequency})"

    def to_dict(self):
        return {
            'prefix': self.prefix,
            'token': self.token,
            'suffix': self.suffix,
            'frequency': self.frequency,
            'conn_node': self.conn_node
        }

    @classmethod
    def from_dict(cls, data):
        return cls(
            prefix=data['prefix'],
            token=data['token'],
            suffix=data['suffix'],
            frequency=data['frequency'],
            conn_node=data['conn_node']
        )


class CombinationTable:
    def __init__(self, max_size=None):
        self.combinations = []
        self.node_map = {}
        self.prefix_index = defaultdict(list)
        self.suffix_index = defaultdict(list)
        self.token_index = defaultdict(list)
        self.max_size = max_size

    def add_combination(self, combination):
        # 检查是否已存在相同组合
        for existing in self.combinations:
            if (existing.prefix == combination.prefix and
                    existing.token == combination.token and
                    existing.suffix == combination.suffix):
                existing.frequency += combination.frequency
                return existing

        # 添加新组合
        index = len(self.combinations)
        self.combinations.append(combination)

        # 更新索引
        self._update_indexes(combination, index)

        # 修剪组合表
        self.prune()

        return combination

    def _update_indexes(self, combination, index):
        # 更新连接点映射
        if combination.conn_node:
            if combination.conn_node not in self.node_map:
                self.node_map[combination.conn_node] = []
            self.node_map[combination.conn_node].append(index)

        # 更新前缀索引
        self.prefix_index[combination.prefix].append((index, combination.token, combination.suffix))

        # 更新后缀索引
        self.suffix_index[combination.suffix].append((index, combination.token, combination.prefix))

        # 更新token索引
        self.token_index[combination.token].append(index)

    def find_by_token(self, token):
        return [self.combinations[i] for i in self.token_index.get(token, [])]

    def find_by_prefix(self, prefix):
        return [(self.combinations[i], token, suffix) for i, token, suffix in self.prefix_index.get(prefix, [])]

    def find_by_suffix(self, suffix):
        return [(self.combinations[i], token, prefix) for i, token, prefix in self.suffix_index.get(suffix, [])]

    def prune(self):
        """根据频率修剪组合表到最大大小"""
        if self.max_size is None or len(self.combinations) <= self.max_size:
            return

        # 按频率排序组合
        sorted_combs = sorted(enumerate(self.combinations), key=lambda x: x[1].frequency, reverse=True)

        # 保留频率最高的组合
        keep_indices = set(idx for idx, _ in sorted_combs[:self.max_size])

        # 重建组合表和索引
        new_combinations = []
        new_indexes = {}

        for idx, comb in enumerate(self.combinations):
            if idx in keep_indices:
                new_idx = len(new_combinations)
                new_combinations.append(comb)
                new_indexes[idx] = new_idx

        self.combinations = new_combinations

        # 重建索引
        self.node_map = {}
        self.prefix_index = defaultdict(list)
        self.suffix_index = defaultdict(list)
        self.token_index = defaultdict(list)

        for new_idx, comb in enumerate(self.combinations):
            self._update_indexes(comb, new_idx)

    def save(self, file_path):
        data = {
            'combinations': [comb.to_dict() for comb in self.combinations],
            'max_size': self.max_size
        }
        with open(file_path, 'w') as f:
            json.dump(data, f)

    def load(self, file_path):
        with open(file_path, 'r') as f:
            data = json.load(f)

        self.combinations = [Combination.from_dict(comb_data) for comb_data in data['combinations']]
        self.max_size = data['max_size']

        # 重建索引
        self.node_map = {}
        self.prefix_index = defaultdict(list)
        self.suffix_index = defaultdict(list)
        self.token_index = defaultdict(list)

        for idx, comb in enumerate(self.combinations):
            self._update_indexes(comb, idx)


class NodeManager:
    def __init__(self):
        self.node_values = {}
        self.next_node_id = 1

    def register_node(self, node_name, value):
        """注册节点值，如果已存在则验证一致性"""
        value_str = str(value)  # 确保值为字符串

        if node_name in self.node_values:
            if self.node_values[node_name] != value_str:
                # 冲突时更新为最新值
                self.node_values[node_name] = value_str
        else:
            self.node_values[node_name] = value_str
        return value_str

    def get_node_value(self, node_name):
        return self.node_values.get(node_name, None)

    def create_unique_node(self):
        """创建唯一的节点名称"""
        node_name = f"node_{self.next_node_id}"
        self.next_node_id += 1
        return node_name

    def save(self, file_path):
        with open(file_path, 'w') as f:
            json.dump(self.node_values, f)

    def load(self, file_path):
        with open(file_path, 'r') as f:
            self.node_values = json.load(f)


class Encoder:
    def __init__(self, token_vocab, combination_table):
        self.token_vocab = token_vocab
        self.combination_table = combination_table
        self.node_manager = NodeManager()
        self.generated_nodes = {}

    def _generate_conn_node(self, prefix, token, suffix, value):
        """为组合生成连接点"""
        node_name = self.node_manager.create_unique_node()
        self.node_manager.register_node(node_name, str(value))  # 确保值为字符串
        self.generated_nodes[(prefix, token)] = (node_name, value)
        return node_name

    def _find_best_combination(self, s, position, token):
        """查找最佳匹配的组合"""
        prefix_candidates = []

        # 尝试匹配前缀
        for comb in self.combination_table.find_by_token(token):
            start_idx = max(0, position - len(comb.prefix))
            if s.startswith(comb.prefix, start_idx, position):
                prefix_candidates.append(comb)

        # 如果没有找到匹配，创建新组合
        if not prefix_candidates:
            # 确定前缀起始位置
            if self.generated_nodes:
                prefix_start = max(pos for _, pos in self.generated_nodes.values())
            else:
                prefix_start = 0
            prefix = s[prefix_start:position]

            # 创建新连接点
            conn_value = position + len(token)
            conn_node = self._generate_conn_node(prefix, token, "", conn_value)

            # 创建新组合，只包含前缀和token（后缀为空）
            new_comb = Combination(prefix, token, "", frequency=1, conn_node=conn_node)
            self.combination_table.add_combination(new_comb)
            return new_comb

        # 选择频率最高的组合
        best_comb = max(prefix_candidates, key=lambda c: c.frequency)
        return best_comb

    def encode(self, s):
        """编码字符串为token ID序列"""
        s_with_bos_eos = f"<bos>{s}<eos>"
        token_ids = []

        # 首先处理开始标记
        bos_token = "<bos>"
        bos_id = self.token_vocab.get_id(bos_token)
        if bos_id and s_with_bos_eos.startswith(bos_token):
            token_ids.append(bos_id)
            position = len(bos_token)
        else:
            position = 0

        # 创建位置到token的映射
        token_positions = []

        # 优先级：先匹配长token，再匹配短token
        tokens_sorted = sorted(self.token_vocab.token_to_id.keys(), key=len, reverse=True)
        # 移除特殊标记，因为它们已经被处理
        tokens_sorted = [token for token in tokens_sorted if token not in ["<bos>", "<eos>"]]

        while position < len(s_with_bos_eos):
            matched = False

            # 尝试匹配结束标记
            eos_token = "<eos>"
            eos_id = self.token_vocab.get_id(eos_token)
            if eos_id and s_with_bos_eos.startswith(eos_token, position):
                token_ids.append(eos_id)
                position += len(eos_token)
                break

            # 尝试匹配最长的token
            for token in tokens_sorted:
                end = position + len(token)
                if end > len(s_with_bos_eos):
                    continue

                if s_with_bos_eos[position:end] == token:
                    # 记录匹配的token位置
                    token_positions.append((position, token))

                    comb = self._find_best_combination(s_with_bos_eos, position, token)
                    token_ids.append(self.token_vocab.get_id(token))

                    if comb.conn_node:
                        node_value = position + len(token)
                        self.node_manager.register_node(comb.conn_node, str(node_value))

                    position = end
                    matched = True
                    break

            if not matched:
                # 跳过不匹配的字符
                position += 1

        # 打印调试信息
        # print("\n编码调试信息:")
        # print(f"原始字符串: '{s}'")
        # print(f"Token序列: {[self.token_vocab.get_token(tid) for tid in token_ids]}")
        # print(f"Token位置: {token_positions}")

        return token_ids


class Decoder:
    def __init__(self, token_vocab, combination_table, node_manager):
        self.token_vocab = token_vocab
        self.combination_table = combination_table
        self.node_manager = node_manager
        self.s_builder = []
        self.position = 0

    def _find_best_match(self, token_id, current_position):
        token = self.token_vocab.get_token(token_id)
        if not token:
            return None

        candidates = self.combination_table.find_by_token(token)

        if not candidates:
            return Combination("", token, "", frequency=0)

        # 优先选择与当前解码位置匹配的组合
        matched_candidates = []
        for comb in candidates:
            # 如果组合的前缀匹配解码字符串的末尾
            if self.position - len(comb.prefix) >= 0:
                start_idx = self.position - len(comb.prefix)
                decoded_prefix = ''.join(self.s_builder)[start_idx:self.position]
                if decoded_prefix == comb.prefix:
                    matched_candidates.append(comb)

        if matched_candidates:
            # 选择频率最高的匹配组合
            return max(matched_candidates, key=lambda c: c.frequency)

        # 没有前缀匹配，使用最简单的组合
        return Combination("", token, "", frequency=0)

    def decode(self, token_ids):
        self.s_builder = []
        self.position = 0

        for i, token_id in enumerate(token_ids):
            token = self.token_vocab.get_token(token_id)
            if not token:
                continue

            # 特殊处理开始和结束标记
            if token == "<bos>":
                self.s_builder.append("<bos>")
                self.position += 5
                continue
            elif token == "<eos>":
                self.s_builder.append("<eos>")
                self.position += 5
                continue

            # 查找最佳组合
            combination = self._find_best_match(token_id, self.position)

            # 确保组合的前缀与当前解码位置匹配
            if combination.prefix:
                # 检查前缀是否已经存在
                prefix_start = self.position - len(combination.prefix)
                if prefix_start < 0:
                    # 前缀不存在，强制添加
                    self.s_builder.append(combination.prefix)
                    self.position += len(combination.prefix)

            # 添加token本身
            self.s_builder.append(token)
            self.position += len(token)

            # 不再添加后缀，避免重复字符
            # 后缀将在后续组合的前缀中处理

            if combination.conn_node:
                node_value = self.position
                self.node_manager.register_node(combination.conn_node, str(node_value))

        result = ''.join(self.s_builder)

        # 移除外部添加的标记（如果存在）
        if result.startswith("<bos>"):
            result = result[5:]
        if result.endswith("<eos>"):
            result = result[:-5]

        return result.strip()


class TokenSystem:
    def __init__(self, token_vocab_size=100, combination_table_size=500):
        self.token_vocab = TokenVocabulary(max_size=token_vocab_size)
        self.combination_table = CombinationTable(max_size=combination_table_size)
        self.node_manager = NodeManager()
        self.encoder = Encoder(self.token_vocab, self.combination_table)
        self.decoder = Decoder(self.token_vocab, self.combination_table, self.node_manager)

    def train(self, texts):
        """训练系统：从文本中学习token和组合"""
        print("\n训练开始...")
        # 处理每个文本
        for text in tqdm(texts):
            s_with_bos_eos = f"<bos>{text}<eos>"
            # print(f"\n处理文本: '{text}'")

            # 记录已添加的token，避免重复添加
            tokens_found = set()

            # 确保特殊标记作为整体加入
            for special_token in ["<bos>", "<eos>"]:
                if special_token not in tokens_found:
                    tokens_found.add(special_token)
                    self.token_vocab.add_token(special_token, 10000)
                    # print(f"添加特殊token: '{special_token}'")

            # 只处理文本内容部分（跳过特殊标记）
            start_index = 5  # 跳过 "<bos>"
            end_index = len(s_with_bos_eos) - 5  # 跳过 "<eos>"

            # 在内容部分滑动窗口生成token
            for i in range(start_index, end_index):
                for j in range(i + 1, min(i + 5, end_index + 1)):  # 最大token长度为4
                    token = s_with_bos_eos[i:j]
                    if token and token not in tokens_found:
                        tokens_found.add(token)
                        self.token_vocab.add_token(token)
                        # print(f"添加token: '{token}'")

        print("\n训练后词表大小:", len(self.token_vocab.token_to_id))

        # 修剪词表
        print("修剪词表...")
        self.token_vocab.prune()
        print("修剪后词表大小:", len(self.token_vocab.token_to_id))

        print("\n学习组合...")
        # 学习组合
        for text in tqdm(texts):
            # print(f"\n编码文本: '{text}'")
            token_ids = self.encoder.encode(text)
            # print(f"编码结果: {token_ids}")

        print("\n训练后组合表大小:", len(self.combination_table.combinations))
        print("训练完成")

    def encode(self, text):
        return self.encoder.encode(text)

    def decode(self, token_ids):
        return self.decoder.decode(token_ids)

    def save(self, dir_path):
        """保存整个系统状态"""
        os.makedirs(dir_path, exist_ok=True)
        self.token_vocab.save(os.path.join(dir_path, 'token_vocab.json'))
        self.combination_table.save(os.path.join(dir_path, 'combination_table.json'))
        self.node_manager.save(os.path.join(dir_path, 'node_manager.json'))
        print(f"系统状态已保存到 {dir_path}")

    def load(self, dir_path):
        """加载系统状态"""
        self.token_vocab.load(os.path.join(dir_path, 'token_vocab.json'))
        self.combination_table.load(os.path.join(dir_path, 'combination_table.json'))
        self.node_manager.load(os.path.join(dir_path, 'node_manager.json'))
        print(f"系统状态已从 {dir_path} 加载")

        # 重新创建编码器和解码器
        self.encoder = Encoder(self.token_vocab, self.combination_table)
        self.decoder = Decoder(self.token_vocab, self.combination_table, self.node_manager)


# ======= 测试代码 =======

if __name__ == "__main__":
    # # 创建带有限制的系统
    # system = TokenSystem(token_vocab_size=12500, combination_table_size=100000000)

    # # 训练系统
    # with open("pretrain_hq.jsonl", "r", encoding="utf-8") as f:
    #     train_data = f.readlines()
    #     # 示例训练数据
    # train_data_list = []
    # for i in train_data:
    #     train_data_list += json.loads(i)["text"].replace("<|im_start|>", "").split("<|im_end|>")[:-1]

    # system.train(train_data_list)

    # # 测试字符串
    # test_string = "ABCDEFGH"
    # print(f"\n测试字符串: '{test_string}'")

    # # 编码过程
    # encoded_ids = system.encode(test_string)
    # print(f"编码结果: {encoded_ids}")

    # # 解码过程
    # decoded_string = system.decode(encoded_ids)
    # print(f"解码结果: '{decoded_string}'")

    # # 验证一致性
    # if test_string == decoded_string:
    #     print("测试成功: 原始字符串与解码后字符串匹配!")
    # else:
    #     print(f"测试失败: 原始 '{test_string}' vs 解码 '{decoded_string}'")

    # # 打印词表和组合表信息
    # print("\n词表内容 (前20项):")
    # tokens = sorted(system.token_vocab.token_freq.items(), key=lambda x: x[1], reverse=True)[:20]
    # for token, freq in tokens:
    #     print(f"'{token}': {freq}")

    # print("\n组合表内容 (前20项):")
    # for comb in system.combination_table.combinations[:20]:
    #     print(comb)

    # # 保存系统状态
    # print("\n保存系统状态...")
    # system.save("token_system")

    # 创建新系统并加载状态
    new_system = TokenSystem()
    print("\n加载系统状态...")
    new_system.load("token_system")

 
    with open("pretrain_hq.jsonl", "r", encoding="utf-8") as f:
        train_data = f.readlines()
        # 示例训练数据
    import pandas as pd
    train_data_list = []
    idx=0
    for i in tqdm(train_data):
        idx+=1
        train_data = json.loads(i)["text"].replace("<|im_start|>", "").split("<|im_end|>")[:-1]
        data_list=[]
        for  j in train_data:
            data_list+=new_system.encode(j)
            print(data_list)
        train_data_list.append(data_list)
        if  len(train_data_list)%300000==0:
            pd.to_pickle(train_data_list,"F:/pre_train/{}.pkl".format(idx))
            train_data_list=[]
    
    pd.to_pickle(train_data_list,"F:/pre_train/{}.pkl".format(idx))