import pickle

import numpy as np
import torch
from torch.utils.data import DataLoader

import gxl_config


class EasyVocab:
    def __init__(self, f=None):
        self.token_set = set()
        self.token_to_id = dict()
        self.id_to_token = dict()
        self.UNK = '<unk>'
        self.PAD = '<pad>'
        if f:
            for line in f.readlines():
                self.token_set.update([ch for ch in line.strip()])
            self.token_to_id = {token: id for id, token in enumerate(self.token_set)}
            self.token_to_id[self.UNK] = len(self.token_to_id)
            self.token_to_id[self.PAD] = len(self.token_to_id)
            self.id_to_token = {id: token for token, id in self.token_to_id.items()}
            self.save_self()

    def __getitem__(self, tokens: str | list | torch.Tensor):
        # list 仅支持1维度
        if isinstance(tokens, str):
            return self.token_to_id.get(tokens, self.token_to_id[self.UNK])
            # return self.token_to_id.get(tokens)
        if isinstance(tokens, torch.Tensor):
            tokens = tokens.tolist()
        # return [self.token_to_id.get(i) for i in tokens]
        return [self.token_to_id.get(i, self.token_to_id[self.UNK]) for i in tokens]

    def decode(self, ids: int | list | torch.Tensor):
        if isinstance(ids, int):
            return self.id_to_token[ids]
        if isinstance(ids, torch.Tensor):
            ids = ids.tolist()
        return [self.id_to_token[i] for i in ids]

    def __len__(self):
        return len(self.token_to_id)

    def save_self(self, filename=gxl_config.VOCAB_FILE_PATH):
        with open(filename, 'wb') as f:
            save_tuple = (self.token_to_id, self.id_to_token)
            pickle.dump(save_tuple, f)

    def add_corpus(self, f):
        for line in f.readlines():
            self.token_set.update([ch for ch in line.strip()])
        self.token_to_id = {token: id for id, token in enumerate(self.token_set)}
        self.token_to_id[self.UNK] = len(self.token_to_id)
        self.token_to_id[self.PAD] = len(self.token_to_id)
        self.id_to_token = {id: token for token, id in self.token_to_id.items()}
        self.save_self()

    @staticmethod
    def load_data_to_get_vocab(filename=gxl_config.VOCAB_FILE_PATH):
        with open(filename, 'rb') as f:
            token_to_id, id_to_token = pickle.load(f)
        vocab = EasyVocab(None)
        vocab.token_to_id = token_to_id
        vocab.id_to_token = id_to_token
        return vocab


def trunk_and_padding(token_list, vocab, chunk_size=200):
    valid_len = len(token_list)
    if len(token_list) >= chunk_size:
        return token_list[:chunk_size], chunk_size
    else:
        return token_list + [vocab[vocab.PAD]] * (chunk_size - len(token_list)), valid_len


def load_data_list_from_file_to_tokens(filename=gxl_config.DATA1_FILE_PATH):
    """加载数据"""
    vocab = EasyVocab.load_data_to_get_vocab()
    with open(filename, 'r', encoding='utf-8') as f:
        data_list = f.readlines()
        data_list = [[i for i in y.strip()] for y in data_list if len(y.strip()) != 0]
        tokens_list = [vocab[i] for i in data_list]
    return tokens_list, vocab


class ChatDataSet:
    def __init__(self, tokens_list, vocab):
        self.data = tokens_list
        self.vocab = vocab

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        train_list, train_len = trunk_and_padding(self.data[index], self.vocab)
        target_list, target_len = trunk_and_padding(self.data[index][1:], self.vocab)
        return torch.tensor(train_list, dtype=torch.long), train_len, torch.tensor(target_list,
                                                                                   dtype=torch.long), target_len


def getDataLoader(file_path,batch_size=32):
    tokens_list, vocab = load_data_list_from_file_to_tokens(file_path)
    dataset = ChatDataSet(tokens_list, vocab)
    return DataLoader(dataset, batch_size=batch_size, shuffle=True), vocab


if __name__ == '__main__':
    loader, _ = getDataLoader()
    a, b, c, d = next(iter(loader))
    print(a.shape, b, c.shape, d)
