import pathlib
import pickle
import json
import os
import torch
from tqdm import tqdm
from sortedcontainers import SortedSet

import assistant

logger = assistant.logger


def open_jsonl_file(jsonl_file_path):
    """"""
    with open(jsonl_file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        print("数据总条数为:", len(lines))
        lines = [json.loads(x) for x in lines]
        return lines


class ChineseCharacterTokenizer:
    SOD_TOKEN = "<SOD>"
    EOD_TOKEN = "<EOD>"
    UNK_TOKEN = "<UNK>"
    PAD_TOKEN = "<PAD>"
    SOD_ID = 0
    EOD_ID = 1
    UNK_ID = 2
    PAD_ID = 3

    def __init__(self, tokenizer=lambda x: [y for y in x], vocab_file_path='./output/character_vocab_dict.pkl'):
        self.vocab_set = SortedSet()
        self._token_to_id = {}
        self._id_to_token = {}
        self.tokenizer = tokenizer
        self.vocab_file_path = vocab_file_path
        os.makedirs(os.path.dirname(vocab_file_path), exist_ok=True)

    def load_vocab(self, ):
        """加载已保存的字典"""
        with open(self.vocab_file_path, "rb") as f:
            self.vocab_set = SortedSet(pickle.load(f))
        self._from_set_build_dict()

    def build_vocab(self, data_jsonl_path,):
        """构建字典"""
        dict_list = open_jsonl_file(data_jsonl_path)
        logger.info('start build vocab')
        total = len(dict_list)
        for dic in tqdm(dict_list, total=total):
            txt = dic['comment']
            tokens = self.tokenizer(txt)
            self.vocab_set.update(tokens)
        self._from_set_build_dict()
        with open(self.vocab_file_path, "wb") as f:
            logger.info('build vocab success! ,saved in: ' + self.vocab_file_path)
            pickle.dump(list(self.vocab_set), f)

    def _from_set_build_dict(self):
        self._token_to_id = {token: id + 4 for id, token in enumerate(self.vocab_set)}
        self._token_to_id[self.SOD_TOKEN] = self.SOD_ID
        self._token_to_id[self.EOD_TOKEN] = self.EOD_ID
        self._token_to_id[self.UNK_TOKEN] = self.UNK_ID
        self._token_to_id[self.PAD_TOKEN] = self.PAD_ID
        self._id_to_token = {token_id: token for token, token_id in self._token_to_id.items()}

    def __len__(self):
        return len(self._token_to_id)

    def encode(self, x: str)-> list[int]:
        """x: str,
        要编码的内容不应该涉及特殊字符, 特殊字符是id层面的操作,不应该在这里做
        但该函数可以转换单独的特殊字符
        return -> id_list
        """
        if len(self.vocab_set) == 0:
            logger.warning("vocab is empty!")
            return
        if isinstance(x, str) and self._token_to_id.get(x, None):
            return [self._token_to_id.get(x)]
        chars = self.tokenizer(x)
        return [self._token_to_id.get(char, self.UNK_ID) for char in chars]

    def encode_list(self, chars: list):
        return [self._token_to_id.get(char, self.UNK_ID) for char in chars]

    def decode(self, x: int | list | torch.Tensor):
        """解码，输入int, int_list, int_tensor，只支持一维ids
        return -> token list"""
        if len(self.vocab_set) == 0:
            logger.warning("vocab is empty!")
            return
        if isinstance(x, int):
            return [self._id_to_token.get(x, self.UNK_TOKEN)]
        if isinstance(x, list):
            assert isinstance(x[0], int)
            return [self._id_to_token.get(token_id, self.UNK_TOKEN) for token_id in x]
        elif isinstance(x, torch.Tensor):
            assert x.dim() == 1
            return [self._id_to_token.get(int(token_id), self.UNK_TOKEN) for token_id in x.data]

    def show_token_id(self):
        for token, id in self._token_to_id.items():
            print(token + ' --- ' + str(id))


class TitleTokenizer:
    UNK_TOKEN = "<UNK>"
    UNK_ID = -1

    def __init__(self):
        self._token_to_id = {
            "很差": 0,
            "较差": 1,
            "还行": 2,
            "推荐": 3,
            "力荐": 4,
            self.UNK_TOKEN: self.UNK_ID,
        }
        self._id_to_token = {token_id: token for token, token_id in self._token_to_id.items()}

    def __len__(self):
        return len(self._token_to_id)

    def encode(self, x: str):
        """x: str,
        要编码的内容不应该涉及特殊字符, 特殊字符是id层面的操作,不应该在这里做
        但该函数可以转换单独的特殊字符
        return -> id_list
        """
        return self._token_to_id.get(x, self.UNK_ID)

    def encode_list(self, tokens: list):
        return [self._token_to_id.get(token, self.UNK_ID) for token in tokens]

    def decode(self, x: int | list | torch.Tensor):
        """解码，输入int, int_list, int_tensor，只支持一维ids
        return -> token list"""
        if isinstance(x, int):
            return [self._id_to_token.get(x, self.UNK_TOKEN)]
        if isinstance(x, list):
            assert isinstance(x[0], int)
            return [self._id_to_token.get(token_id, self.UNK_TOKEN) for token_id in x]
        elif isinstance(x, torch.Tensor):
            assert x.dim() == 1
            return [self._id_to_token.get(token_id, self.UNK_TOKEN) for token_id in x]

    def show_token_id(self):
        for token, id in self._token_to_id.items():
            print(token + ' --- ' + str(id))


if __name__ == '__main__':
    file_path = '../my_spider_douban_formal/output/clean_data.jsonl'
    tokenizer = ChineseCharacterTokenizer()
    tokenizer.build_vocab(file_path)
    tokenizer.show_token_id()
    input = "力荐"
    print(tokenizer.encode(input))
    print(tokenizer.decode([2663]))
    input2 = "我是耿雪龙, 你好呀.同学们，大家好！欢迎加入文字词汇课程。在日常生活中，你是不是也遇到过很多看似简单但却令人疑惑的问题，如：“立即”的“即”和“既然”的“既”为什么不同？可以说“很少下雨”，为什么不能说“很多下雨”？“妈”和“娘”意思相同，为什么“姑妈“和”姑娘”却完全不同？让我们的课程给你答案吧。"
    print(tokenizer.encode(input2))
