import os
import pickle

import torch

import ai.AiConstant as AiConstant
import ai.utils.utils_file
from ai.utils import utils_file
from .gpt_config import gpt_logger
from .dataset import get_dataset


class GxlCharTokenizer:
    def __init__(self, logger=gpt_logger, tokenizer=lambda x: [y for y in x], min_freq=0,
                 max_vocab_size=30000):
        self.vocab = {}
        self._token_to_id = {}
        self._id_to_token = {}
        self.min_freq = min_freq
        self.max_vocab_size = max_vocab_size
        self.logger = logger
        self.tokenizer = tokenizer
        self.UNK_T = '[UNK]'
        self.PAD_T = '[PAD]'
        self.SOD_T = '['
        self.EOD_T = ']'
        self._reserved_tokens = [self.UNK_T, self.SOD_T, self.PAD_T, self.EOD_T]

    def load_vocab(self, vocab_file_path=ai.AiConstant.OUTPUT_PATH + 'gpt/vocab/vocab_dict.pkl'):
        with open(vocab_file_path, "rb") as f:
            self.vocab = pickle.load(f)
        self.vocab = dict(sorted(self.vocab.items(), key=lambda x: x[1], reverse=True)[:self.max_vocab_size])
        token_list = [x for x in self.vocab.items() if x[1] > self.min_freq]
        self._token_to_id = {x[0]: i for i, x in enumerate(token_list)}
        self._id_to_token = {i: x[0] for i, x in enumerate(token_list)}
        for token in self._reserved_tokens:
            self._token_to_id[token] = len(self._token_to_id)
            self._id_to_token[self._token_to_id[token]] = token

    @property
    def vocab_size(self):
        return len(self._token_to_id)+4

    def build_vocab(self, txt_data_dir=AiConstant.DATA_PATH + "gpt/data/作文数据集/",
                    vocab_file_save_path=ai.AiConstant.OUTPUT_PATH + 'gpt/vocab/vocab_dict.pkl', sort=True):
        self.logger.info('building vocab from %s ...' % txt_data_dir)
        file_num = 0
        for filename in os.listdir(txt_data_dir):
            file_num += 1
            if not filename.endswith('.txt'):
                continue
            train_path = os.path.join(txt_data_dir, filename)
            with open(train_path, 'r', encoding='utf-8') as f:
                for line in f.readlines():
                    tokens = self.tokenizer(line)
                    for token in tokens:
                        # self.vocab[token] = self.vocab.get(token, 0) + 1
                        if self.vocab.keys().__contains__(token):
                            self.vocab[token] += 1
                        else:
                            self.vocab[token] = 1
            if file_num > 200000:
                break
        if sort:
            self.vocab = dict(sorted(self.vocab.items(), key=lambda x: x[1], reverse=True)[:self.max_vocab_size])
        else:
            self.vocab = dict((self.vocab.items())[:self.max_vocab_size])
        token_list = [x for x in self.vocab.items() if x[1] > self.min_freq]
        self._token_to_id = {x[0]: i for i, x in enumerate(token_list)}
        self._id_to_token = {i: x[0] for i, x in enumerate(token_list)}
        for token in self._reserved_tokens:
            self._token_to_id[token] = len(self._token_to_id)
            self._id_to_token[self._token_to_id[token]] = token
        self.logger.info('building vocab success!')
        self.logger.info('vocab size: %d' % len(self.vocab))
        utils_file.makedir_for_file(vocab_file_save_path)
        with open(vocab_file_save_path, "wb") as f:
            pickle.dump(self.vocab, f)

    def re_build_vocab(self, txt_data_dir=AiConstant.DATA_PATH + "gpt/data/作文数据集/"):
        """新增训练预料,进行重建vocabulary,之前的
        vocabulary会被改动,必须在训练开始之前进行调用"""
        self.build_vocab(txt_data_dir)

    def re_build_vocab_just_add(self, txt_data_dir):
        """新增训练预料,不改变之前的vocabulary,仅仅新增,
        只是不能当总token数超过最大值后将是随机删除,而不再是删除最低频率者"""
        self.build_vocab(txt_data_dir, sort=False)

    def encoder(self, x):
        if len(self.vocab) == 0:
            gpt_logger.warning("vocab is empty!")
            return

        assert isinstance(x, list | torch.Tensor | str)
        if isinstance(x, str):
            return self._token_to_id.get(x, self._token_to_id[self.UNK_T])
        if len(x) == 0:
            return []
        elif isinstance(x[0], list):
            return [self.encoder(y) for y in x]
        else:
            return [self._token_to_id.get(token, self._token_to_id[self.UNK_T]) for token in x]

    def decoder(self, x):
        if len(self.vocab) == 0:
            gpt_logger.warning("vocab is empty!")
            return

        assert isinstance(x, list | torch.Tensor | int)
        if isinstance(x, int):
            return self._id_to_token.get(x, self._id_to_token[self._token_to_id[self.UNK_T]])
        if len(x) == 0:
            return []
        elif isinstance(x[0], list):
            return [self.encoder(y) for y in x]
        else:
            return [self._id_to_token.get(int(token), self._id_to_token[self._token_to_id[self.UNK_T]]) for token in x]


gxl_tokenizer = GxlCharTokenizer()
gxl_tokenizer.load_vocab()



def load_txt_data_to_list(txt_data_dir=AiConstant.DATA_PATH + "gpt/data/composition/", tokenizer=gxl_tokenizer,
                          pad_size=20):
    data_list = []
    ai.utils.utils_file.makedir(txt_data_dir)
    for filename in os.listdir(txt_data_dir):
        train_path = os.path.join(txt_data_dir, filename)
        with open(train_path, 'r', encoding='utf-8') as f:
            for line in f:
                lin = line.strip()
                if not lin:
                    continue
                tokens = tokenizer.tokenizer(lin)
                seq_len = len(tokens)
                if pad_size:
                    if len(tokens) < pad_size:
                        tokens.extend([tokenizer.PAD_T] * (pad_size - len(tokens)))
                    else:
                        tokens = tokens[:pad_size]
                        seq_len = pad_size
                token_ids = tokenizer.encoder(tokens)
                data_list.append((token_ids, seq_len))
    return data_list


def load_dataset(txt_data_dir=AiConstant.DATA_PATH + "gpt/data/composition/"):
    data_list = load_txt_data_to_list(txt_data_dir)
    return get_dataset(data_list)
