'''
使用flair最终版本
'''
import os
import json
import torch
import numpy as np
import pickle
from torch.utils.data import Dataset
from flair.embeddings import WordEmbeddings, FlairEmbeddings, CharacterEmbeddings, StackedEmbeddings
from flair.data import Sentence
device = 'cuda' if torch.cuda.is_available() else 'cpu'


class MNERDataset(Dataset):
    def __init__(self, type, args):
        self.max_word_len = args.train.max_word_length
        self.type = type
        self.args = args
        self.image_dir = os.path.join(self.args.dataset.data_dir, "image")
        self.glove_embedding = WordEmbeddings('glove')  # 实例化词嵌入类的对象
        self.flair_embedding_forward = FlairEmbeddings('news-forward')
        self.flair_embedding_backward = FlairEmbeddings('news-backward')
        self.char_embedding = CharacterEmbeddings()
        self.stacked_embeddings = StackedEmbeddings([  # 实例化StackedEmbedding类对象，并给它传入包含三个嵌入的列表
            self.glove_embedding,
            self.flair_embedding_forward,
            self.flair_embedding_backward,
            self.char_embedding,
        ])

        # 加载文本数据
        self.caption = os.path.join(args.dataset.data_dir, args.dataset.image_caption.format(self.type))
        self.text = os.path.join(args.dataset.data_dir, args.dataset.text.format(self.type))
        self.captions_data = []
        self.texts_data = []
        self.tokens_data = []
        self.labels_data = []

        # 加载目标检测数据
        self.image = os.path.join(args.dataset.data_dir, args.dataset.image.format(self.type))
        self.image_data_feature = []
        self.image_data_label = []
        with open(self.image, 'rb') as fp:
            self.image_pkl_data = pickle.load(fp)

        with open(self.caption, 'r', encoding='utf8')as fp:
            self.caption_json_data = json.load(fp)
        with open(self.text, 'r', encoding='utf8')as fp:
            self.text_json_data = json.load(fp)
        for i in range(0, len(self.caption_json_data)):
            # 文本数据
            self.captions_data.append(self.caption_json_data[i]["caption"])
            self.texts_data.append(self.text_json_data[i]["text"])
            self.tokens_data.append(self.text_json_data[i]["tokens"])
            self.labels_data.append(self.text_json_data[i]["label"])
            # 目标检测数据
            image_filename = self.caption_json_data[i]['image_id']
            image_data = self.image_pkl_data[image_filename]
            image_feature_temp = []
            image_label_temp = []
            for j in image_data:
                image_feature_temp.append(j['feature'])
                image_label_temp.append(j['label'])
            # 少于5个目标时补0
            for j in range(5 - len(image_data)):
                image_feature_temp.append(np.zeros(2048))
                image_label_temp.append(-1)
            self.image_data_feature.append(image_feature_temp)
            self.image_data_label.append(image_label_temp)

    def __getitem__(self, index):
        text_str = self.texts_data[index]
        token_list = self.tokens_data[index]
        label_list = self.labels_data[index]    # 与token_list一一对应
        # 句子的分词及其中每个词对应的词嵌入
        ntokens = []
        ntokens_embeddings = []

        # 拼接字符串
        # text_str1 = " ".join(token_list).replace('️', ',')
        text_str1 = " ".join(token_list)


        # 得到token、embeddings
        sentence = Sentence(text_str1)
        # print("sentence", sentence)
        self.stacked_embeddings.embed(sentence)  # 调用embed()传入待处理的文本
        # print("sentence", sentence)
        # exit()
        for token in sentence:              # 遍历句子中的每个token,调用embedding得到对应的词向量
            # print("token", token)
            # exit()
            ntokens.append(token.text)
            ntokens_embeddings.append(token.embedding.tolist())

        label_id = label(self, label_list, token_list)
        '''
        存在问题：分词器不准确导致ntokens与tokenlist不一致
        比如：
            text_str1 = 'This sums up the 2 nd Half 😈 🏀 👭 🙌 🏾 😛 . . # ThunderBasketBallgbb @ KierreJames @ Milli_Bear96'
            ntokens = ['This', 'sums', 'up', 'the', '2', 'nd', 'Half', '😈', '🏀', '👭', '🙌', '🏾', '😛', '.', '.', '#', 'ThunderBasketBallgbb', '@', 'KierreJames', '@', 'Milli', '_', 'Bear96']
            分词器将‘Milli_Bear96’分成了'Milli', '_', 'Bear96'
        '''


        # 填充
        text_len = len(ntokens)
        pad_len = self.max_word_len - text_len
        pad_embeddings = pad_len * [[0] * self.stacked_embeddings.embedding_length]
        pad_ntokens = ['pad'] * pad_len
        ntokens_embeddings.extend(pad_embeddings)
        ntokens.extend(pad_ntokens)

        # 得到mask
        crf_attention_mask = [1] * text_len
        crf_attention_mask.extend([0] * pad_len)

        return {
            "label_id": label_id,
            "ntokens": ntokens,
            "ntokens_embeddings": ntokens_embeddings,
            "crf_attention_mask": crf_attention_mask
        }

    def __len__(self):
        return len(self.text_json_data)


def collate_fn(batch):
    label_id = []
    ntokens = []
    ntokens_embeddings = []
    crf_attention_mask = []

    # 准备向model中传递纯文本数据
    for index, value in enumerate(batch):
        label_id.append(value["label_id"])
        ntokens.append(value["ntokens"])
        ntokens_embeddings.append(value["ntokens_embeddings"])
        crf_attention_mask.append(value["crf_attention_mask"])

    return{
        "label": torch.tensor(label_id).to(device),
        "ntokens": ntokens,
        "ntokens_embeddings": torch.tensor(ntokens_embeddings).to(device),
        "crf_attention_mask": torch.tensor(crf_attention_mask).to(device).byte()
    }


# 得到 label 对应的 bert-id
def label(self, label_list, token_list):
    label_ids = []
    # 根据对每个词的分词结果处理lable
    for token, label in zip(token_list, label_list):  # iterate every word
        # 得到token、embeddings
        sentence = Sentence(token)
        if len(sentence) > 0:
            self.stacked_embeddings.embed(sentence)  # 调用embed()传入待处理的文本
            label_ids.append(self.args.train.tag2idx[label])
            l = len(sentence)
            if l != 1:
                label_ids.extend([10]*(l-1))


    # 填充
    pad_len = self.args.train.max_word_length - len(label_ids)
    label_ids.extend([0] * pad_len)

    return label_ids