import os
import pickle

import torch
import torch.nn as nn
from sklearn.metrics import f1_score
from torch.utils.data import Dataset, DataLoader


# make_vocab=True其含义是构建word2index
def build_corpus(split, make_vocab=True, data_dir="data"):
    """读取数据"""
    assert split in ['train', 'dev', 'test']

    word_lists = []
    tag_lists = []
    with open(os.path.join(data_dir, split + ".char.bmes"), 'r', encoding='utf-8') as f:
        word_list = []
        tag_list = []
        for line in f:
            if line != '\n':
                word, tag = line.strip('\n').split()
                word_list.append(word)
                tag_list.append(tag)
            else:
                word_lists.append(word_list)
                tag_lists.append(tag_list)
                word_list = []
                tag_list = []

    word_lists = sorted(word_lists, key=lambda x: len(x), reverse=False)
    tag_lists = sorted(tag_lists, key=lambda x: len(x), reverse=False)

    # 如果make_vocab为True，还需要返回word2id和tag2id
    if make_vocab:
        word2id = build_map(word_lists)
        tag2id = build_map(tag_lists)
        word2id['<UNK>'] = len(word2id)
        word2id['<PAD>'] = len(word2id)

        tag2id['<PAD>'] = len(tag2id)
        return word_lists, tag_lists, word2id, tag2id
    else:
        return word_lists, tag_lists


def build_map(lists):
    maps = {}
    for list_ in lists:
        for e in list_:
            if e not in maps:
                maps[e] = len(maps)

    return maps


class MyDataset(Dataset):
    # 将所有数据保存下来
    def __init__(self, datas, tags, word_2_index, tag_2_index):  # datas是原始的中文汉字
        self.datas = datas
        self.tags = tags
        self.word_2_index = word_2_index
        self.tag_2_index = tag_2_index

    # 按条数读取数据，将中文转成对应的汉字，暂时不转成向量
    def __getitem__(self, index):
        data = self.datas[index]  # 取其中的一条数据，列表类型，每个元素是一个字
        tag = self.tags[index]  # 每个字对应的标签

        data_index = [self.word_2_index.get(i, self.word_2_index["<UNK>"]) for i in data]  # 汉字转成数字
        tag_index = [self.tag_2_index[i] for i in tag]  # 标签转成数字

        return data_index, tag_index

    def __len__(self):
        assert len(self.datas) == len(self.tags)  # 断言，长度应该相等
        return len(self.tags)

    # 每次返回一组数据，但是长度不同，所以无法组成张量，所以对数据进行手动处理，不使用系统函数
    def pro_batch_data(self, batch_datas):
        global device
        datas = []  # 双重列表，每个元素是一个句子单字对应的数字
        tags = []  # 双重列表，每个元素是一个句子单字的标签对应的数字
        batch_lens = []

        for data, tag in batch_datas:
            datas.append(data)
            tags.append(tag)
            batch_lens.append(len(data))
        batch_max_len = max(batch_lens)  # 求出最大长度对长度不足的句子进行填充，才能将其转化为tensor类型
        # 长度不足的部分添加《padding》为最后一个汉字的标签，padding：1793，对应的tag值为28
        datas = [i + [self.word_2_index["<PAD>"]] * (batch_max_len - len(i)) for i in datas]
        tags = [i + [self.tag_2_index["<PAD>"]] * (batch_max_len - len(i)) for i in tags]

        return torch.tensor(datas, dtype=torch.int64, device=device), torch.tensor(tags, dtype=torch.long,
                                                                                   device=device)


class Mymodel(nn.Module):
    # corpus_num为不重复汉字数量, embedding_num, hidden_num, class_num为分类数量, bi=True进行双向lstm
    def __init__(self, corpus_num, embedding_num, hidden_num, class_num, bi=True):
        super().__init__()

        self.embedding = nn.Embedding(corpus_num, embedding_num)
        self.lstm = nn.LSTM(embedding_num, hidden_num, batch_first=True, bidirectional=bi)

        if bi:
            self.classifier = nn.Linear(hidden_num * 2, class_num)
        else:
            self.classifier = nn.Linear(hidden_num, class_num)

        self.cross_loss = nn.CrossEntropyLoss()

    # 做测试时没有batch_tag，先假设未传值
    def forward(self, batch_data, batch_tag=None):
        embedding = self.embedding(batch_data)
        out, _ = self.lstm(embedding)

        pre = self.classifier(out)
        self.pre = torch.argmax(pre, dim=-1).reshape(-1)  # 对预留值求最大值
        if batch_tag is not None:
            loss = self.cross_loss(pre.reshape(-1, pre.shape[-1]), batch_tag.reshape(-1))
            return loss


def named_entity(word_2_index, model, index_2_tag, device, text):
    # global word_2_index, model, index_2_tag, device

    text = text
    text_index = [[word_2_index.get(i, word_2_index["<PAD>"]) for i in text]]
    text_index = torch.tensor(text_index, dtype=torch.int64, device=device)
    model.forward(text_index)
    pre = [index_2_tag[i] for i in model.pre]
    print([f'{w}_{s}' for w, s in zip(text, pre)])
    return str([f'{w}_{s}' for w, s in zip(text, pre)])


if __name__ == "__main__":
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    # train_data为双重列表，第一层每个元素为一句话，第二层每个元素是单个字
    # word_2_index为字典，key为不重复的字，value为数字递增。
    train_data, train_tag, word_2_index, tag_2_index = build_corpus("train", make_vocab=True)
    # 验证集和测试集没有2index，因为实际中可能为止数据
    dev_data, dev_tag = build_corpus("dev", make_vocab=False)
    index_2_tag = [i for i in tag_2_index]

    corpus_num = len(word_2_index)  # 不重复的中文汉字数量
    class_num = len(tag_2_index)  # 分类的类别数，即标签数量

    epoch = 10
    train_batch_size = 50  # 一次输入50条数据，更新权重
    dev_batch_size = 100
    embedding_num = 101  # 每个汉字用101个数字表示，101维
    hidden_num = 107
    bi = True
    lr = 0.001

    train_dataset = MyDataset(train_data, train_tag, word_2_index, tag_2_index)

    # collate_fn=train_dataset.pro_batch_data就是不使用系统的拼接张量方法，使用自己定义的pro_batch_data（）方法
    train_dataloader = DataLoader(train_dataset, train_batch_size, shuffle=False,
                                  collate_fn=train_dataset.pro_batch_data)

    dev_dataset = MyDataset(dev_data, dev_tag, word_2_index, tag_2_index)
    # shuffle=False是不对元素随机排列
    dev_dataloader = DataLoader(dev_dataset, dev_batch_size, shuffle=False, collate_fn=dev_dataset.pro_batch_data)

    model = Mymodel(corpus_num, embedding_num, hidden_num, class_num, bi)
    opt = torch.optim.Adam(model.parameters(), lr=lr)
    model = model.to(device)

    for e in range(epoch):
        model.train()

        # batch_data, batch_tag为已经填充完毕的数据
        for batch_data, batch_tag in train_dataloader:
            train_loss = model.forward(batch_data, batch_tag)
            train_loss.backward()
            opt.step()
            opt.zero_grad()

        model.eval()
        all_pre = []
        all_tag = []
        for dev_batch_data, dev_batch_tag in dev_dataloader:
            dev_loss = model.forward(dev_batch_data, dev_batch_tag)
            all_pre.extend(model.pre.detach().cpu().numpy().tolist())
            all_tag.extend(dev_batch_tag.detach().cpu().numpy().reshape(-1).tolist())
        score = f1_score(all_tag, all_pre, average="micro")
        print(f"{e},f1_score:{score:.3f},dev_loss:{dev_loss:.3f},train_loss:{train_loss:.3f}")

    with open('argument.pickle', 'wb') as f:
        pickle.dump(word_2_index, f, pickle.HIGHEST_PROTOCOL)
        pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
        pickle.dump(index_2_tag, f, pickle.HIGHEST_PROTOCOL)
        pickle.dump(device, f, pickle.HIGHEST_PROTOCOL)
    named_entity()