import os
import pickle

import torch

import ai.AiConstant as AiConstant
import ai.utils.utils_file
from ai.utils import utils_file
import config
import jieba


class GxlCharTokenizer:
    UNK_T = '[UNK]'
    PAD_T = '[PAD]'
    SOD_T = '[SOD]'
    EOD_T = '[EOD]'
    UNK_ID = 0
    PAD_ID = 1
    SOD_ID = 2
    EOD_ID = 3  # 不能是负值，会超出embedding层的词汇表index

    def __init__(self, logger=config.logger, tokenizer=jieba.lcut, min_freq=3,
                 max_vocab_size=100000):
        self.vocab = {}
        self._token_to_id = {}
        self._id_to_token = {}
        self.min_freq = min_freq
        self.max_vocab_size = max_vocab_size
        self.logger = logger
        self.tokenizer = tokenizer

    def load_vocab(self, vocab_file_path=config.OUTDIRPATH + '/vocab/vocab_dict_word.pkl'):
        with open(vocab_file_path, "rb") as f:
            self.vocab = pickle.load(f)
        self.vocab = dict(sorted(self.vocab.items(), key=lambda x: x[1], reverse=True)[:self.max_vocab_size])
        token_list = [x for x in self.vocab.items() if x[1] > self.min_freq and x[0] != '\n']

        self._token_to_id = {x[0]: i + 4 for i, x in enumerate(token_list)}
        self._token_to_id[self.UNK_T] = self.UNK_ID
        self._token_to_id[self.PAD_T] = self.PAD_ID
        self._token_to_id[self.SOD_T] = self.SOD_ID
        self._token_to_id[self.EOD_T] = self.EOD_ID
        self._id_to_token = {token_id: token for token, token_id in self._token_to_id.items()}

    @property
    def vocab_size(self):
        return len(self._token_to_id)

    def __len__(self):
        return len(self._token_to_id)

    def build_vocab(self, txt_data_dir=config.DATADIRPATH + "xiaohuangji/",
                    vocab_file_save_path=config.OUTDIRPATH + 'vocab/vocab_dict_word.pkl', sort=True):
        self.logger.info('building vocab from %s ...' % txt_data_dir)
        file_num = 0
        for filename in os.listdir(txt_data_dir):
            file_num += 1
            if not filename.endswith('.txt'):
                continue
            train_path = os.path.join(txt_data_dir, filename)
            with open(train_path, 'r', encoding='utf-8') as f:
                for line in f.readlines():
                    tokens = self.tokenizer(line)
                    for token in tokens:
                        self.vocab[token] = self.vocab.get(token, 0) + 1
        if sort:
            self.vocab = dict(sorted(self.vocab.items(), key=lambda x: x[1], reverse=True)[:self.max_vocab_size])
        else:
            self.vocab = dict((self.vocab.items())[:self.max_vocab_size])
        token_list = [x for x in self.vocab.items() if x[1] > self.min_freq and x[0] != '\n']
        self._token_to_id = {x[0]: i + 4 for i, x in enumerate(token_list)}
        self._token_to_id[self.UNK_T] = self.UNK_ID
        self._token_to_id[self.PAD_T] = self.PAD_ID
        self._token_to_id[self.SOD_T] = self.SOD_ID
        self._token_to_id[self.EOD_T] = self.EOD_ID
        self._id_to_token = {token_id: token for token, token_id in self._token_to_id.items()}
        self.logger.info('building vocab success!')
        self.logger.info('vocab size: %d' % len(self._token_to_id))
        utils_file.makedir_for_file(vocab_file_save_path)
        with open(vocab_file_save_path, "wb") as f:
            pickle.dump(self.vocab, f)

    def re_build_vocab(self, txt_data_dir=config.DATADIRPATH + "xiaohuangji/"):
        """新增训练预料,进行重建vocabulary,之前的
        vocabulary会被改动,必须在训练开始之前进行调用"""
        self.build_vocab(txt_data_dir)

    def re_build_vocab_just_add(self, txt_data_dir):
        """新增训练预料,不改变之前的vocabulary,仅仅新增,
        只是不能当总token数超过最大值后将是随机删除,而不再是删除最低频率者"""
        self.build_vocab(txt_data_dir, sort=False)

    def encode(self, x):
        """仅支持一维或字符串"""
        if len(self.vocab) == 0:
            self.logger.warning("vocab is empty!")
            return
        if isinstance(x, str) and self._token_to_id.get(x, None):
            return [self._token_to_id[x]]

        assert isinstance(x, list | torch.Tensor | str)
        if isinstance(x, str):
            x = self.tokenizer(x)
        if len(x) == 0:
            return []
        elif isinstance(x, list):
            return [self._token_to_id.get(token, self._token_to_id[self.UNK_T]) for token in x]
        elif isinstance(x, torch.Tensor):
            return [self._token_to_id.get(token.item(), self._token_to_id[self.UNK_T]) for token in x]

    def decode(self, x):
        """仅支持一维和零维"""
        if len(self.vocab) == 0:
            self.logger.warning("vocab is empty!")
            return

        assert isinstance(x, list | torch.Tensor | int)
        if isinstance(x, int):
            return self._id_to_token.get(x, self._id_to_token[self._token_to_id[self.UNK_T]])
        if len(x) == 0:
            return []
        elif isinstance(x, list):
            return [self.decode(y) for y in x]
        elif isinstance(x, torch.Tensor):
            return [self.decode(y.item()) for y in x]

    def show_token_key(self):
        for token, key in self._token_to_id.items():
            print(token, key)


tokenizer = GxlCharTokenizer()
tokenizer.load_vocab()


def get_tokenizer():
    return tokenizer


# if __name__ == '__main__':
    # gxl_tokenizer = GxlCharTokenizer()
    # gxl_tokenizer.build_vocab()
    # gxl_tokenizer.show_token_key()
    # text = '中国有句谚语“饱带干粮，晴带雨伞”，意思是说虽然吃饱饭出门，但还是要带足干粮；虽然是晴天出门，但还是要带上雨伞，凡事要有备无患。然而，也有人说，生活中我们也常常要面对意外挑战，因此涵养“无备”的心态也是人生的必修课。'
    # code = gxl_tokenizer.encode(text)
    # print(code)
    # print(gxl_tokenizer.decode(code))
    # print(len(gxl_tokenizer))
