import transformers as ppb
import torch
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence

# ROCK 指的是岩石种类
# MINERAL 指的是矿物
# ORE_DEPOSIT指的是经济上重要的元素或矿物质
# STRAT 指的是地层单位
label_list = ['O', 'X', 'B-ROCK', 'I-ROCK', 'B-MINERAL', 'I-MINERAL', 'B-TIMESCALE', 'I-TIMESCALE', 'B-STRAT',
              'I-STRAT', 'B-LOCATION', 'I-LOCATION', 'B-ORE_DEPOSIT', 'I-ORE_DEPOSIT']
label_map = {}
for (i, label) in enumerate(label_list):
    label_map[label] = i

# 加载预训练模型
model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)


def read_test_data():
    """
    读取测试数据
    :return: 返回两个列表一个是句子列表  一个是标签列表
    """
    test_data = []
    test_label = []
    with open('test.txt') as f:
        sentence = []
        label = []
        for line in f:
            if line == "\n":
                test_data.append(" ".join(sentence))
                test_label.append(label)
                sentence = []
                label = []
            else:
                line = line.strip('\n')
                tmp = line.split(" ")
                sentence.append(tmp[0])
                label.append(tmp[1])
    return test_data, test_label


def read_train_data():
    """
    读取训练数据
    :return: 返回两个列表一个是句子列表  一个是标签列表
    """
    train_data = []
    train_label = []
    with open('train.txt') as f:
        sentence = []
        label = []
        for line in f:
            line = line.strip('\n')
            if line == " ":
                train_data.append(" ".join(sentence))
                train_label.append(label)
                sentence = []
                label = []
            else:
                tmp = line.split(" ")
                sentence.append(tmp[0])
                label.append(tmp[1])
    return train_data, train_label


def create_mini_batch(samples):
    tokens_tensors = [s[0] for s in samples]
    label_tensors = [s[1] for s in samples]

    # zero pad 到同一序列长度
    one = [0]
    tokens_tensors = pad_sequence(tokens_tensors, batch_first=True)
    label_tensors = pad_sequence(label_tensors, batch_first=True, padding_value=0)

    if len(tokens_tensors[0]) != 50:
        tokens_tensors = torch.tensor([t + one for t in tokens_tensors.numpy().tolist()])
    if len(label_tensors[0]) != 50:
        label_tensors = torch.tensor([t + one for t in label_tensors.numpy().tolist()])
    # attention masks，将 tokens_tensors 不为 zero padding 的位置设为1
    masks_tensors = torch.zeros(tokens_tensors.shape, dtype=torch.long)
    masks_tensors = masks_tensors.masked_fill(tokens_tensors != 0, 1)

    return tokens_tensors, masks_tensors, label_tensors


class generate_datasets:
    def __init__(self):
        self.train_data, self.train_labels = read_train_data()
        self.test_data, self.test_labels = read_test_data()
        self.train_word_ids, self.train_label_ids = self.build_vec(1)
        self.test_word_ids, self.test_label_ids = self.build_vec(0)
        self.train_dataset = NewDataset(self.train_word_ids, self.train_label_ids)
        self.test_dataset = NewDataset(self.test_word_ids, self.test_label_ids)

    # 构建数据向量
    def build_vec(self, data_type):
        word_ids = []
        label_ids = []
        if data_type == 1:
            text_list = self.train_data
            labels_list = self.train_labels
        else:
            text_list = self.test_data
            labels_list = self.test_labels

        for i, sentence in enumerate(text_list):
            # 分词处理第i句话
            # 把encode完的id添加进列表
            word_ids.append(tokenizer.encode(sentence, add_special_tokens=False))
            # 处理每个单词的标签
            word_list = sentence.split(" ")
            # 每句话中单词的标签
            sentence_label_id = []
            for (word, label) in (zip(word_list, labels_list[i])):
                # wordpiece会把单词分割
                token = tokenizer.tokenize(word)
                for k, _ in enumerate(token):
                    # 为首的是次首添加正常的标签 mask掉的添加“x”标签
                    if k == 0:
                        sentence_label_id.append(label_map[label])
                    else:
                        sentence_label_id.append(label_map['X'])
            label_ids.append(sentence_label_id)
        return word_ids, label_ids


class NewDataset(Dataset):
    def __init__(self, ids, labels):
        self.ids = ids
        self.labels = labels
        self.len = len(ids)

    def __getitem__(self, item):
        tokens_tensor = torch.tensor(self.ids[item])
        label_tensor = torch.tensor(self.labels[item])
        return tokens_tensor, label_tensor

    def __len__(self):
        return self.len


# def build_vec(text_list, labels_list):
#     word_ids = []
#     label_ids = []
#     for i, sentence in enumerate(text_list):
#         # 分词处理第i句话
#         # 把encode完的id添加进列表
#         word_ids.append(tokenizer.encode(sentence, add_special_tokens=False))
#         # 处理每个单词的标签
#         word_list = sentence.split(" ")
#         # 每句话中单词的标签
#         sentence_label_id = []
#         for (word, label) in (zip(word_list, labels_list[i])):
#             # wordpiece会把单词分割
#             token = tokenizer.tokenize(word)
#             for k, _ in enumerate(token):
#                 # 为首的是次首添加正常的标签 mask掉的添加“x”标签
#                 if k == 0:
#                     sentence_label_id.append(label_map[label])
#                 else:
#                     sentence_label_id.append(label_map['X'])
#         label_ids.append(sentence_label_id)
#     return word_ids, label_ids
#
#
a, b = read_test_data()
print(a[0])
# c, d = build_vec(a, b)
# print(len(c),len(d))
# print(len(c[0]), len(d[0]))
# for i in range(5):
#     print(c[i])
#     print(d[i])