import torch
from torch.utils import data
from config import *
import pandas as pd
from seqeval.metrics import classification_report

# 获取词表
def get_vocab():
    df = pd.read_csv(VOCAB_PATH, names=['word', 'id'])
    return list(df['word']), dict(df.values)



# 获取标签表
def get_label():
    df = pd.read_csv(LABEL_PATH, names=["label", 'id'])
    return list(df['label']), dict(df.values)
    # (['O', 'I-Test', 'I-Disease', 'I-Anatomy', 'I-Test_Value', 'I-Drug', 'B-Test', 'B-Disease', 'B-Anatomy', 'I-Reason',
    #  'I-Symptom', 'B-Drug', 'B-Test_Value', 'I-Amount', 'I-Treatment', 'B-Symptom', 'I-Level', 'B-Reason',......],
    # {'O': 0, 'I-Test': 1, 'I-Disease': 2, 'I-Anatomy': 3, 'I-Test_Value': 4, 'I-Drug': 5, 'B-Test': 6, 'B-Disease': 7,......})



# 定义DataSet
class Dataset(data.Dataset):
    def __init__(self, type='train', base_len = 50):
        super().__init__()
        self.base_len = base_len
        sample_path = TRAIN_SAMPLE_PATH if type == 'train' else TEST_SAMPLE_PATH
        self.df = pd.read_csv(sample_path, names=['word', 'label'])
        _, self.word2id = get_vocab()
        _, self.label2id = get_label()
        self.get_points()

    # 计算分割点
    def get_points(self):
        self.points= [0]  # 句子的分割点，每50个单词为一段[0, 50, 100, 150,  ......, ]
        i = 0
        while True:
            if i + self.base_len >= len(self.df):
                self.points.append(len(self.df))  # 如果再次右移动50后超过df长度，则添加实际点的长度
                break
            if self.df.loc[i + self.base_len, 'label'] == 'O':  # 右移动50字符后，判断该位置是否为“O”，是则切分
                i += self.base_len
                self.points.append(i)
            else:
                i += 1  # 如果是非“O”字符，则只移动一位，保证将实体的字符完整导入句子中

    def __len__(self):
        return len(self.points) - 1

    def __getitem__(self, index):
        sentence = self.df[self.points[index]: self.points[index + 1]]  # 取出每一句
        word_unk_id = self.word2id[WORD_UNK]
        label_o_id = self.label2id['O']
        input = [self.word2id.get(w, word_unk_id) for w in sentence['word']]  # 将每一句中的单词替换为对应的id值，没有则用unk表示
        target = [self.label2id.get(l, label_o_id) for l in sentence['label']]  # 将每一句中单词的字符标签替换为对应的id值，没有则用'o'的标签表示
        return input, target


def collate_fn(batch):  # 一个batch是一个list，列表中每一个元素是一个元组，batch[0]:（[128,456,80,...],[0,0,0,...]）文字的id值和标签id值
    batch.sort(key=lambda x: len(x[0]), reverse=True)  # x就是batch[0], batch[1],......, x[0]即为元组中第一个元素，句子。比较句子长度
    max_len = len(batch[0][0])  # 找到最大句长
    input= []
    target = []
    mask = []
    for sentence in batch:
        pad_len = max_len - len(sentence[0])
        input.append(sentence[0] + [WORD_PAD_ID] * pad_len)  # 文字句子填充
        target.append(sentence[1] + [LABEL_O_ID] * pad_len)  # 句子对应标签填充
        mask.append([1] * len(sentence[0]) + [0] * pad_len)  # mask，避免填充数据造成结果干扰，[1,1,1,1,....,0,0,0,0,0], 将原句与填充值区分
    return torch.tensor(input), torch.tensor(target), torch.tensor(mask).bool()


# 统计实体的评估函数
def report(y_true, y_pred):
    return classification_report(y_true=y_true, y_pred=y_pred)


if __name__ == '__main__':
    # res = get_label()
    # print(res)
    dataset = Dataset()
    loader = data.DataLoader(dataset=dataset, batch_size=50, collate_fn=collate_fn)
    print(iter(loader).next())



    print("end")