import regex as re
from loguru import logger
from tqdm import tqdm
import concurrent
from concurrent.futures import ProcessPoolExecutor
import os
from typing import BinaryIO
import time
import json

def gpt2_bytes_to_unicode() -> dict[int, str]:
    bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
    cs = bs[:]
    n = 0
    for b in range(2**8):
        if b not in bs:
            bs.append(b)
            cs.append(2**8 + n)
            n += 1
    characters = [chr(n) for n in cs]
    d = dict(zip(bs, characters))
    return d


def find_chunk_boundaries(
    file: BinaryIO, 
    desired_num_chunks: int, 
    split_special_token: bytes
) -> list[int]:
    """
    Chunk the file into parts that can be counted independently.
    May return fewer chunks if the boundaries end up overlapping.
    """
    assert isinstance(split_special_token, bytes), (
        "Must represent special token as a bytestring"
    )

    # Get total file size in bytes
    file.seek(0, os.SEEK_END)
    file_size = file.tell()
    file.seek(0)

    chunk_size = file_size // desired_num_chunks

    # Initial guesses for chunk boundary locations, uniformly spaced
    # Chunks start on previous index, don't include last index
    chunk_boundaries = [i * chunk_size for i in range(desired_num_chunks + 1)]
    chunk_boundaries[-1] = file_size

    mini_chunk_size = 4096  # Read ahead by 4k bytes at a time

    for bi in range(1, len(chunk_boundaries) - 1):
        initial_position = chunk_boundaries[bi]
        file.seek(initial_position)  # Start at boundary guess
        while True:
            mini_chunk = file.read(mini_chunk_size)  # Read a mini chunk

            # If EOF, this boundary should be at the end of the file
            if mini_chunk == b"":
                chunk_boundaries[bi] = file_size
                break

            # Find the special token in the mini chunk
            found_at = mini_chunk.find(split_special_token)
            if found_at != -1:
                chunk_boundaries[bi] = initial_position + found_at
                break
            initial_position += mini_chunk_size

    # Make sure all boundaries are unique, but might be fewer than desired_num_chunks
    return sorted(set(chunk_boundaries))

def bytes2token(bytes_in: bytes, b2token: dict) -> list[int]:
    res = []
    start = 0
    while start < len(bytes_in):
        # 贪婪匹配：找到最长的匹配子串
        best_match = None
        best_len = 0
        
        for i in range(1, len(bytes_in) - start + 1):
            tmp = bytes_in[start: start + i]
            if tmp in b2token:
                best_match = tmp
                best_len = i
            else:
                break
        
        if best_match is not None:
            res.append(b2token[best_match])
            start += best_len
        else:
            # 单个字节不在词汇表中，这是一个错误情况
            raise ValueError(f"字节 {bytes_in[start:start+1]} 不在词汇表中")
    
    return res

def token2bytes(token_in: list[int] | tuple[int], vocab_dict: dict) -> bytes:
    res = bytearray()
    for t in token_in:
        res.extend(vocab_dict[t])
    return bytes(res)


def load_chunk_data(chunk_bytes: bytes, special_tokens: list[str], _id: int) -> dict[str, int]:
    t0 = time.time()
    logger.info(f"Processor {_id} 开始处理")
    word_freq_dict = {}
    chunk = chunk_bytes.decode('utf-8')
    gpt_pattern = re.compile(r"""'(?:[sdmt]|ll|ve|re)| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
    
    if special_tokens:
        special_pattern = f"({'|'.join(re.escape(s) for s in special_tokens)})"
        text_parts = re.split(special_pattern, chunk)
    else:
        text_parts = [chunk]

    for text_part in text_parts:
        if text_part in special_tokens:
            continue

        matches = gpt_pattern.findall(text_part)
        
        for m in matches:
            if len(m.encode('utf-8')) <= 1: continue
            freq = word_freq_dict.get(m, 0)
            word_freq_dict[m] = freq + 1
    t1 = time.time()
    logger.info(f"Processor {_id} 处理结束, 耗时{round(t1 - t0, 2)}s")
    return word_freq_dict

def train_bpe(input_path: str, vocab_size: int, special_tokens: list[str]) -> tuple[dict[int, bytes], list[tuple[bytes, bytes]]]:
    vocab = { i: bytes([i]) for i in range(256) }
    revocab = { bytes([i]): i for i in range(256) }
    pair_dict = {}
    word_freq_dict = {}
    word_token_dict = {}
    merges = []

    num_process = 40
    
    next_vocab_index = max(vocab.keys()) + 1

    for special_token in special_tokens:
        special_bytes = special_token.encode('utf-8')
        vocab[next_vocab_index] = special_bytes
        revocab[special_bytes] = next_vocab_index
        logger.info(f'添加special token: ({next_vocab_index}, {special_token})')
        next_vocab_index += 1

    t0 = time.time()
    with open(input_path, 'rb') as f, ProcessPoolExecutor(max_workers=num_process) as executor:
        to_do = []
        boundaries = find_chunk_boundaries(f, num_process, '<|endoftext|>'.encode('utf-8'))
        
        for i, start, end in zip(range(len(boundaries) - 1), boundaries[:-1], boundaries[1:]):
            f.seek(start)
            chunk = f.read(end - start)
            future = executor.submit(load_chunk_data, chunk, special_tokens, i)
            to_do.append(future)
        
        for future in concurrent.futures.as_completed(to_do):
            res: dict[str, int] = future.result()
            for k,v in res.items():
                if k in word_freq_dict:
                    word_freq_dict[k] += v
                else:
                    word_freq_dict[k] = v
                    word_token_dict[k] = bytes2token(k.encode('utf-8'), revocab)
        logger.info(f'完成预分词，耗时{round(time.time() - t0, 2)}s')

    for _ in tqdm(range(vocab_size - len(vocab.keys()))):

        pair_dict.clear()
        for (key, count), token in zip(word_freq_dict.items(), word_token_dict.values()):
            for i in range(len(token) - 1):
                pair = (token[i], token[i + 1])

                pair_num = pair_dict.get(pair, 0)
                pair_dict[pair] = pair_num + count

        sorted_pair = sorted(pair_dict.items(), key=lambda x: (x[1], vocab[x[0][0]], vocab[x[0][1]]), reverse=True)
        max_pair = sorted_pair[0][0]

        max_pair_bytes = token2bytes(max_pair, vocab)

        vocab[next_vocab_index] = max_pair_bytes
        revocab[max_pair_bytes] = next_vocab_index
        merges.append((vocab[max_pair[0]], vocab[max_pair[1]]))
        for (key, token), frequency in zip(word_token_dict.items(), word_freq_dict.values()):
            result = []
            start = 0
            while start < len(token):
                if start == len(token) - 1:
                    result.append(token[-1])
                    break

                if (token[start], token[start + 1]) == max_pair:
                    result.append(next_vocab_index)
                    start += 2
                else:
                    result.append(token[start])
                    start += 1

            word_token_dict[key] = result

        next_vocab_index += 1

    logger.info(f"[vocab]: {len(vocab.keys())}")

    return vocab, merges

def save_tk(vocab: dict[int, bytes], merges: list[tuple[bytes, bytes]], dir_name: str):
    os.makedirs(dir_name, exist_ok=True)
    b2s = gpt2_bytes_to_unicode()
    gpt_vocab = {k:''.join([b2s[i] for i in v]) for k, v in vocab.items()}
    gpt_merges = [[''.join([b2s[i] for i in b1]), ''.join([b2s[i] for i in b2])] for b1, b2 in merges]
    with open(os.path.join(dir_name, 'vocab.json'), 'w+') as f:
        json.dump(gpt_vocab, f, indent=4, ensure_ascii=False)
    with open(os.path.join(dir_name, 'merges.txt'), 'w+') as f:
        for merge in gpt_merges:
            f.write(" ".join(merge) + '\n')
    logger.info(f"成功保存 vocab.json 和 merges.txt 到 {dir_name}目录下")

        
if __name__ == '__main__':
    vocab, merges = train_bpe('data/owt_train.txt', 32_000, ['<|endoftext|>'])
    save_tk(vocab, merges, './owt_tk')