import json
import random

import torch

from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence  # 进行句子长短补齐或截断

from P03_NER.LSTM_CRF.utils.common import *

datas, word2id = build_data()
# print(f'word2id-->{word2id}')

# 1、构建Dataset类
class NerDataset(Dataset):
    def __init__(self, datas):
        super(NerDataset, self).__init__()
        self.datas = datas

    def __len__(self):  # 获取数据集的长度
        return len(self.datas)

    def __getitem__(self, index):  # 根据索引获取数据
        # print(f'item = {index}')
        x = self.datas[index][0]
        y = self.datas[index][1]
        return x, y


def test_NerDataset():
    ner_dataset = NerDataset(datas)
    print(f'len(ner_dataset) = {len(ner_dataset)}')
    print(ner_dataset[0])
    # print(ner_dataset.__getitem__(0))


# 2、构建自定义函数collate_fn()
def collate_fn(batch_data):
    # print(f'batch_data-->{batch_data}')
    # 1)将文字和标签转成id
    # x_train = []
    # for data in batch_data:
    #     # 将文字列表转成id列表
    #     id_list = [word2id.get(word, 1) for word in data[0]]
    #     # 转成tensor
    #     x_train.append(torch.tensor(id_list))
    # print(f'x_train-->{x_train}')
    # 简写
    x_train = [torch.tensor([word2id.get(word, 1) for word in data[0]]) for data in batch_data]
    # print(f'x_train-->{x_train}')
    y_train = [torch.tensor([conf.tag2id.get(word, 11) for word in data[1]]) for data in batch_data]
    # print(f'y_train-->{y_train}')

    # 2)统一样本长度
    '''
    pad_sequence:可以对一个批次的样本进行统一长度，统一长度的方式是以该批次中最长的样本为基准
    batch_first=True,则返回的数据形状为[batch_size, max_seq_len]  padding_value是指用什么补齐
    '''
    input_ids_padded = pad_sequence(x_train, batch_first=True, padding_value=0)  # 用PAD对应的0补齐
    labels_padded = pad_sequence(y_train, batch_first=True, padding_value=11)  # 用PAD对应的11补齐

    # 3)创建attention mask【后边会用到再细讲】
    attention_mask = (input_ids_padded != 0).long()
    # print(f'attention_mask-->{attention_mask}')

    return input_ids_padded, labels_padded, attention_mask


# 3、构建get_data函数，获得数据迭代器
def get_data():
    # 将datas中的数据打乱，使数据分布均匀
    random.seed(66)
    random.shuffle(datas)
    # 获取训练数据集：基于原始的datas，进行分割，比例 8:2
    train_dataset = NerDataset(datas[:6300])
    # 在写代码的时候，把shuffle设置成False，训练时候再设置成True
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=conf.batch_size,
                                  shuffle=True,
                                  collate_fn=collate_fn,
                                  drop_last=True)

    # 获取验证数据集：基于原始的datas，进行分割，比例 8:2
    dev_dataset = NerDataset(datas[6300:])
    # 在写代码的时候，把shuffle设置成False，训练时候再设置成True
    dev_dataloader = DataLoader(dev_dataset,
                                batch_size=conf.batch_size,
                                shuffle=True,
                                collate_fn=collate_fn,
                                drop_last=True)

    return train_dataloader, dev_dataloader


if __name__ == '__main__':
    # test_NerDataset()

    train_dataloader, dev_dataloader = get_data()
    # for x in train_dataloader:
    #     print('调用dataloader')
    #     break

    for input_ids_padded, labels_padded, attention_mask in train_dataloader:
        print(f'input_ids_padded-->{input_ids_padded.shape}')
        print(f'labels_padded-->{labels_padded.shape}')
        print(f'attention_mask-->{attention_mask.shape}')
        break