import os
import pickle
import numpy as np
from typing import Iterable, Iterator, Dict, List, Tuple

from cs336_basics.bpe_tokenizer.pretokenizer import PreTokenizer


class Tokenizer:
    pattern = (
        r"""'(?:[sdmt]|ll|ve|re)| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
    )

    def __init__(
        self,
        vocab: Dict[int, bytes],
        merges: List[Tuple[bytes, bytes]],
        special_tokens: List[str] | None = [],
    ):
        self.vocab_id_to_byte = vocab
        self.merges = merges
        self.special_tokens = special_tokens or []
        self.pretokenizer = PreTokenizer(special_tokens or [])

        # 每个词对应的词表 id
        self.vocab_byte_to_id: Dict[bytes, int] = {
            v: idx for idx, v in self.vocab_id_to_byte.items()
        }

        self.pretoken_cache: Dict[bytes, List[int]] = {}

    @classmethod
    def from_files(cls, vocab_filepath, merges_filepath, special_tokens=None):
        """从文件加载词汇表和合并规则"""
        vocab = {}
        with open(vocab_filepath, "rb") as f:
            vocab = pickle.load(f)

        merges = []
        with open(merges_filepath, "rb") as f:
            merges = pickle.load(f)

        return cls(vocab, merges, special_tokens)

    def calculate_token_ids(self, text: bytes) -> List[int]:
        """计算文本的 token ID 序列"""

        # 先将文本转换为字节列表
        bytes_list = [bytes([b]) for b in text]

        while True:
            min_merge_rule_idx = None  # 最小合并规则的索引
            pos = -1  # bytes_list 中匹配该合并规则的位置

            for i, pair in enumerate(zip(bytes_list[:-1], bytes_list[1:])):
                idx = self.vocab_byte_to_id.get(pair[0] + pair[1])
                if (idx is not None) and (
                    (min_merge_rule_idx is None) or (idx < min_merge_rule_idx)
                ):  # 找到索引更小的合并规则，则更新
                    min_merge_rule_idx = idx
                    pos = i

            # 无法合并了，结束合并
            if not min_merge_rule_idx:
                break

            # 执行合并操作
            bytes_list[pos : pos + 2] = [bytes_list[pos] + bytes_list[pos + 1]]

        # 将合并后的 bytes_list 转换为 token ID
        token_ids = []
        for byte in bytes_list:
            if byte in self.vocab_byte_to_id:
                token_ids.append(self.vocab_byte_to_id[byte])
            else:
                raise ValueError(f"Byte {byte} not found in vocabulary.")

        return token_ids

    def encode(self, text: str) -> list[int]:
        """将文本编码为 token ID 序列"""

        ptb_list = self.pretokenizer.pretokenize(text, preserve_special_tokens=True)

        res = []

        for ptb in ptb_list:
            if ptb in self.vocab_byte_to_id:  # 已在词汇表中
                res.append(self.vocab_byte_to_id[ptb])
                continue
            elif ptb in self.pretoken_cache:  # 已在缓存中
                res.extend(self.pretoken_cache[ptb])
                continue
            else:  # 计算 token IDs
                token_ids = self.calculate_token_ids(ptb)
                self.pretoken_cache[ptb] = token_ids
                res.extend(token_ids)

        return res

    def encode_iterable(self, iterable: Iterable[str]) -> Iterator[int]:
        """将可迭代对象中的文本编码为 token ID 迭代器"""
        for text in iterable:
            ptb_list = self.pretokenizer.pretokenize(text, preserve_special_tokens=True)
            for ptb in ptb_list:
                if ptb in self.vocab_byte_to_id:  # 已在词汇表中
                    yield self.vocab_byte_to_id[ptb]
                elif ptb in self.pretoken_cache:  # 已在缓存中
                    yield from self.pretoken_cache[ptb]
                else:  # 计算 token IDs
                    token_ids = self.calculate_token_ids(ptb)
                    self.pretoken_cache[ptb] = token_ids
                    yield from token_ids

    def encode_to_npfiles(
        self, input_file: str | os.PathLike, output_file: str | os.PathLike
    ):
        """将输入文件中的文本编码为 token ID 并保存为 npy 文件"""
        with open(input_file, "r", encoding="utf-8") as f:
            token_ids = [token_id for token_id in self.encode_iterable(f)]

        np.save(output_file, np.array(token_ids))

    def decode(self, ids: list[int], end_token_id: int | None = None) -> str:
        """将 token ID 序列解码为文本"""
        decode_res: bytes = b""
        for token_id in ids:
            if token_id in self.vocab_id_to_byte:
                decode_res += self.vocab_id_to_byte[token_id]
            else:
                raise ValueError(f"Token ID {token_id} not found in vocabulary.")

            if end_token_id and token_id == end_token_id:
                break

        return decode_res.decode("utf-8", errors="replace")


if __name__ == "__main__":
    from pathlib import Path

    ROOT_DIR = Path(__file__).resolve().parent.parent.parent

    end_token = "<|endoftext|>"
    tk = tokenizer = Tokenizer.from_files(
        vocab_filepath=ROOT_DIR.joinpath("output/tokenizer/TinyStoriesV2_vocab.pkl"),
        merges_filepath=ROOT_DIR.joinpath("output/tokenizer/TinyStoriesV2_merges.pkl"),
        # vocab_filepath=ROOT_DIR.joinpath("output/tokenizer/owt_vocab.pkl"),
        # merges_filepath=ROOT_DIR.joinpath("output/tokenizer/owt_merges.pkl"),
        special_tokens=[end_token],
    )

    dataset = ROOT_DIR.joinpath("data/TinyStoriesV2-GPT4-train").__str__()

    res = tk.encode_to_npfiles(dataset + ".txt", dataset + ".npy")
