# %%
# 对于NLP任务包含两个类
# 1、Vocabulary类，对文件和文本进行编码预处理。
# 2、Dataset类，每一个Dattaset类，包含，一个Vocabulary的对象。
from matplotlib.style import available
import torch
from torch.utils.data import Dataset, DataLoader
from constants import Const
class Vocabulary:
    def __init__(self, sep='\002', min_freq=1, tag=True):
        if tag:
            self.itos = {Const.PAD_ID:Const.PAD_TOKEN, Const.BOS_ID:Const.BOS_TOKEN, Const.EOS_ID:Const.EOS_TOKEN}
            self.stoi = {Const.PAD_TOKEN:Const.PAD_ID, Const.BOS_TOKEN:Const.BOS_ID, Const.EOS_TOKEN:Const.EOS_ID}
        else:
            self.itos = {Const.PAD_ID:Const.PAD_TOKEN, Const.BOS_ID:Const.BOS_TOKEN, Const.EOS_ID:Const.EOS_TOKEN, Const.UNK_ID:Const.UNK_TOKEN}
            self.stoi = {Const.PAD_TOKEN:Const.PAD_ID, Const.BOS_TOKEN:Const.BOS_ID, Const.EOS_TOKEN:Const.EOS_ID, Const.UNK_TOKEN:Const.UNK_ID}
        # A collection: <string, counts>
        self.freqs = {}
        self.min_freq = min_freq
        assert len(self.itos) == len(self.stoi), "length of itos must equal to length of itos"
        self.idx = len(self.stoi)
        self.sep = sep

    def __len__(self):
        assert len(self.itos) == len(self.stoi), "length of itos must equal to length of itos"
        return len(self.itos)

    def tokenize(self, text):
        # sep='\002' 使用百度的快递单数据集
        tokenizer = lambda x: x.split(self.sep)
        return [tok.lower() for tok in tokenizer(text)]
# --------  --------  --------  --------  --------  --------  --------  --------  -------- 
    def build_vocab(self, sentence_list):
        # sentence list.一行一行的文本,尚未转化为Token。
        for sentence in sentence_list:
            for tok in self.tokenize(sentence):
                if tok not in self.freqs:
                    self.freqs[tok] = 1
                else:
                    self.freqs[tok] += 1
                # 如果词频满足要求，就进入到了ID序列中去。
                if self.freqs[tok] == self.min_freq:
                    self.stoi[tok] = self.idx
                    self.itos[self.idx] = tok
                    # 只有有效的tok才会self.idx+1
                    self.idx += 1
        self.available_save = True
# --------  --------  --------  --------  --------  --------  --------  --------  -------- 
    def seq_tok_id(self, sentence):
        tokened_sentence = self.tokenize(sentence)
        return [self.stoi[tok] if tok in self.stoi else Const.UNK_ID for tok in tokened_sentence]

class Express(Dataset):
    # Express快递单，数据集，就是面向个性化的。
    def __init__(self, data_path, train=True, sequence_vocab=None, tag_vocab=None):
        super().__init__()
        # 从文件加载数据。
        assert isinstance(data_path, str), "only support for one data path string"
        words_labels = list(self.__load_dataset__(data_path))
        self.sequences = [words for words, labels in words_labels]
        self.tags = [labels for words, labels in words_labels]
        if train:
        # 每个数据集都有一个Vocabulary
        # 并且，字典vocabulary只能从，train dataset中学习。
            self.sequence_vocab = Vocabulary(tag=False)
            self.sequence_vocab.build_vocab(self.sequences)
            self.tag_vocab = Vocabulary(tag=True)
            self.tag_vocab.build_vocab(self.tags)
        else:
            assert sequence_vocab is not None and tag_vocab is not None, "when the dataset is not train dataset , then sequence_vocab and tag_vocab must both not None"
            self.sequence_vocab = sequence_vocab
            self.tag_vocab = tag_vocab

    def __load_dataset__(self, data_path):
        with open(data_path, 'r', encoding='utf-8') as fp:
            next(fp)
            for line in fp.readlines():
                words, labels = line.strip('\n').split('\t')
                # words = words.split('\002')
                # labels = labels.split('\002')
                yield words, labels

        # elif isinstance(datafiles, list) or isinstance(datafiles, tuple):
        #     return [list(read(datafile)) for datafile in datafiles]
    def __len__(self):
        assert len(self.tags) == len(self.sequences)
        return len(self.tags)
    def __getitem__(self, index):
        words = self.sequences[index]
        labels = self.tags[index]
        # 全部进行id转化。
        words = self.sequence_vocab.seq_tok_id(words)
        labels = self.tag_vocab.seq_tok_id(labels)
        return torch.tensor(words), torch.tensor(labels)

    def save_vocab_to_file(self, vocabpath="vocab.pkl"):
        pass

    def load_vocab_from_file(self, vocabpath="vocab.pkl"):
        pass

class MaskSeqCollate:
    def __call__(self, batch):
        # batch: list of tuple(words, labels)
        # tuple内部是dataset的__getitem__的结果。
        # batch = torch.tensor(words), torch.tensor(labels)
        batch_max_seq_len = max([len(seqs) for seqs, tags in batch])
        batch_size = len(batch)

        x_seqs = torch.full((batch_size, batch_max_seq_len), Const.PAD_ID, dtype=torch.long)
        y_tags = torch.full((batch_size, batch_max_seq_len), Const.PAD_TAG_ID, dtype=torch.long)
        for i, (seqs, tags) in enumerate(batch):
            x_seqs[i,:len(seqs)] = seqs
            y_tags[i,:len(tags)] = tags
        # 完成了padding的操作
        # mask tensor with shape (batch_size, batch_max_seq_len)
        mask = (y_tags != Const.PAD_TAG_ID).float()
        return x_seqs, y_tags, mask

def get_express_dataloader(data_path, 
                        batch_size=32,
                        num_workers=8,
                        shuffle=True,
                        pin_memory=True,
                        train=True, sequence_vocab=None, tag_vocab=None):
    # 通过这个方法，得到不同的，情况。
    dataset = Express(data_path, train, sequence_vocab, tag_vocab)
    dataloader = DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=pin_memory,
        collate_fn=MaskSeqCollate()
    )
    return dataloader, dataset
# %%

train_dataloader, train_dataset = get_express_dataloader("../PaddleNLP_NER_CRF/express_ner/train.txt", batch_size=4)
dev_dataloader, _ = get_express_dataloader("../PaddleNLP_NER_CRF/express_ner/dev.txt", batch_size=4,train=False, sequence_vocab=train_dataset.sequence_vocab, tag_vocab=train_dataset.tag_vocab)
test_dataloader, _ = get_express_dataloader("../PaddleNLP_NER_CRF/express_ner/test.txt",batch_size=4 ,train=False, sequence_vocab=train_dataset.sequence_vocab, tag_vocab=train_dataset.tag_vocab)

for it, (x_seqs, y_tags, mask) in enumerate(train_dataloader):
    if it == 3:
        break
    print("batch start" * 30)
    print(x_seqs.shape)
    print(x_seqs)
    print(mask.shape)
    print(mask)
    print("batch end" * 50)

# %%
