import os
import json
import torch
from transformers import BertTokenizer
import pickle
from torch.utils.data import Dataset
device = 'cuda' if torch.cuda.is_available() else 'cpu'


class MNERDataset(Dataset):
    def __init__(self, etype, args):
        self.type = etype
        self.args = args
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # ('./utils/bert-base-uncased', local_files_only=True)

        # 加载图像块数据
        self.image_77 = os.path.join(args.dataset.data_dir, args.dataset.image_77.format(self.type))
        self.image_data_feature_77 = []
        with open(self.image_77, 'rb') as fp:
            self.image_pkl_data_77 = pickle.load(fp)

        # 加载文本数据
        self.caption = os.path.join(args.dataset.data_dir, args.dataset.image_caption.format(self.type))
        self.text = os.path.join(args.dataset.data_dir, args.dataset.text.format(self.type))
        self.phrase = os.path.join(self.args.dataset.data_dir, self.args.dataset.phrase.format(self.type, args.extract_algorithm))
        self.phrases_data = []
        self.captions_data = []
        self.texts_data = []
        self.tokens_data = []
        self.labels_data = []

        with open(self.phrase, 'r', encoding='utf8') as fp1:
            self.phrase_info_data = json.load(fp1)
        with open(self.caption, 'r', encoding='utf8') as fp:
            self.caption_json_data = json.load(fp)
        with open(self.text, 'r', encoding='utf8') as fp:
            self.text_json_data = json.load(fp)
        for i in range(0, len(self.caption_json_data)):
            # 文本数据：文本; 标签
            self.captions_data.append(self.caption_json_data[i]["caption"])
            self.texts_data.append(self.text_json_data[i]["text"])
            self.tokens_data.append(self.text_json_data[i]["tokens"])
            self.labels_data.append(self.text_json_data[i]["label"])
            # 视觉数据： 7*7 视觉块
            image_filename = self.caption_json_data[i]['image_id']
            image_data = self.image_pkl_data_77[image_filename]
            self.image_data_feature_77.append(image_data.squeeze(0))  # 特征提取的时候忘记squeeze了，所以这里需要

    def __getitem__(self, index):
        token_list = self.tokens_data[index]
        token_list = [i.lower() for i in token_list]
        label_list = self.labels_data[index]
        caption_str = self.captions_data[index]
        caption_list = []
        for token in caption_str.split(" "):
            caption_list.append(token)

        label_id = Bert_label(self, token_list, label_list)
        Visual_77_feature = self.image_data_feature_77[index]
        Phrase_extract_info = self.phrase_info_data[index]                     # 从input sentence提取出的名词短语
        Phrase_GT = extract_GTPhrase(self, label_list, token_list)

        Bert_sentence = Bert_text(self, token_list)                         # [input sentence] 过 Bert
        Bert_sentence_caption = Bert_text(self, token_list, caption_str)    # [input sentence + image caption] 过 Bert

        return {
            "label_id": label_id,
            'Visual_77_feature': Visual_77_feature,
            "Phrase_extract_info": Phrase_extract_info,                 # extract phrase.py 得到的结果
            "Phrase_GT": Phrase_GT,                                     # extract ground-true entity

            "Bert_input": Bert_sentence_caption,                        # 可选[Bert_sentence; Bert_sentence_caption]
            "Token_input": token_list + ["[SEP]"] + caption_list,

            "crf_attention_mask": Bert_sentence["attention_mask"],      # 作为 mask用于 CRF解码过程
        }

    def __len__(self):
        return len(self.text_json_data)


def collate_fn(batch):
    label_id = []
    Visual_77_feature = []
    Phrase_extract_info = []
    Phrase_GT = []

    Token_input = []
    Bert_ntokens = []
    Bert_ntokens_id = []
    Bert_attention_mask = []
    Bert_segment_id = []

    Bert_input_token_index = []
    crf_attention_mask = []

    for index, value in enumerate(batch):
        label_id.append(value["label_id"])
        Visual_77_feature.append(value['Visual_77_feature'])
        Phrase_extract_info.append(value["Phrase_extract_info"])
        Phrase_GT.append(value["Phrase_GT"])

        Token_input.append(value["Token_input"])
        Bert_ntokens.append(value["Bert_input"]["ntokens"])
        Bert_ntokens_id.append(value["Bert_input"]["ntokens_id"])
        Bert_attention_mask.append(value["Bert_input"]["attention_mask"])
        Bert_segment_id.append(value["Bert_input"]["segment_id"])

        Bert_input_token_index.append(value["Bert_input"]["token_index_list"])
        crf_attention_mask.append(value["crf_attention_mask"])

    return {
        "label": torch.tensor(label_id).to(device),
        "Visual_77_feature": torch.tensor(Visual_77_feature).to(device),
        "Phrase_extract_info": Phrase_extract_info,
        "Phrase_GT": Phrase_GT,

        "Token_input": Token_input,                                         # 词 [input sentence + image caption]
        "Bert_input": {
            "input_ids": torch.tensor(Bert_ntokens_id).to(device),
            "attention_mask": torch.tensor(Bert_attention_mask, dtype=torch.uint8).to(device),
            "token_type_ids": torch.tensor(Bert_segment_id).to(device),
        },
        "Bert_input_tokens": Bert_ntokens,                                  # 过 Bert后 [input sentence + image caption]
        "Bert_input_token_index": Bert_input_token_index,                   # 过 Bert后 每个 token对应的 Index
        "crf_attention_mask": torch.tensor(crf_attention_mask, dtype=torch.uint8).to(device),
    }

    # data数据
    #   data["label"]                         [batch, 128]
    #   data["Visual_77_feature"]             [batch, 49, 2048]
    #   data["Phrase_info"]                   len(list)==batch [[phrase], [phrase start index], [phrase length]]
    #   data["Phrase_GT"]                     len(list)==batch [ground_true phrase]
    #   data["Token_input"]                   len(list)==batch [token_number]
    #   data["Bert_input"]                    [ntokens_id; attention_mask; segment_id]
    #   data["Bert_input_ntokens"]            len(list)==batch [128]
    #   data["Bert_input_ntokens_index"]      len(list)==batch [phrase->Bert index]
    #   data["crf_attention_mask"]            [batch, 128]


# 得到真实名词短语
def extract_GTPhrase(self, label_list, token_list):
    i = 0
    phrase = ""
    phrase_GT_list = []
    boundarys = [[1, 2], [3, 4], [5, 6], [7, 8]]

    while i < len(label_list):
        if self.args.train.tag2idx[label_list[i]] == 9:
            i += 1
            continue

        for boundary in boundarys:
            while boundary[0] <= self.args.train.tag2idx[label_list[i]] and self.args.train.tag2idx[label_list[i]] <= boundary[1]:
                phrase += token_list[i] + " "
                i += 1
                if i >= len(label_list):
                    return phrase_GT_list
            if phrase != "":
                phrase_GT_list.append(phrase.strip(" "))
                phrase = ""
                break
    return phrase_GT_list


# 得到 label 对应的 bert-id
def Bert_label(self, token_list, label_list):
    label_ids = [self.args.train.tag2idx["CLS"]]
    for token, label in zip(token_list, label_list):  # iterate every word

        tokens = self.tokenizer._tokenize(token)
        if len(tokens) == 0:
            label_ids.append(self.args.train.tag2idx["O"])
        elif len(tokens) > 1 and ("_" in token or "." in token or "," in token or "##" not in tokens[1]):
            label_ids.append(self.args.train.tag2idx["O"])
        else:
            for i, _ in enumerate(tokens):
                label_ids.append(self.args.train.tag2idx[label] if i == 0 else self.args.train.tag2idx["X"])

    label_ids = label_ids[:self.args.train.max_word_length - 1]
    label_ids.append(self.args.train.tag2idx["SEP"])

    # 填充
    pad_len = self.args.train.max_word_length - len(label_ids)
    label_ids.extend([0] * pad_len)

    return label_ids


# 得到 text 对应的 token, token-id
def Bert_text(self, token_list, data=" "):
    ntokens = ["[CLS]"]

    for token in token_list:  # iterate every word
        tokens = self.tokenizer._tokenize(token)

        if len(tokens) == 0:
            ntokens.append('[UNK]')
        elif len(tokens) > 1 and ("_" in token or "." in token or "," in token or "##" not in tokens[1]):
            ntokens.append(tokens[0])
        else:
            ntokens.extend(tokens)

    ntokens = ntokens[:self.args.train.max_word_length - 1]
    ntokens.append("[SEP]")

    token_index_list = []
    for i in range(1, len(ntokens)):
        if '##' not in ntokens[i] and ntokens[i] != '[CLS]' and ntokens[i] != '[SEP]':
            token_index_list.append(i)

    text_len = len(ntokens)
    segment_id = [0] * len(ntokens)

    # 加词
    if data != " ":
        if isinstance(data, list) == True:
            for token in data:
                tokens = self.tokenizer._tokenize(token)
                ntokens.extend(tokens)
        elif isinstance(data, str) == True:
            for token in data.split(" "):
                tokens = self.tokenizer._tokenize(token)
                ntokens.extend(tokens)
        ntokens = ntokens[:self.args.train.max_word_length - 1]
        ntokens.append("[SEP]")
        segment_id.extend([1] * (len(ntokens) - text_len))

    ntokens_id = self.tokenizer.convert_tokens_to_ids(ntokens)
    attention_mask = [1] * len(ntokens_id)

    # 填充
    pad_len = self.args.train.max_word_length - len(ntokens_id)
    ntokens.extend(["pad"] * pad_len)
    ntokens_id.extend([0] * pad_len)
    attention_mask.extend([0] * pad_len)
    segment_id.extend([0] * pad_len)

    return {
        "ntokens": ntokens,                 # (max_word_length)：词
        "ntokens_id": ntokens_id,           # (max_word_length)：词在预训练Bert下的id
        "attention_mask": attention_mask,   # (max_word_length): 该位置词存在1
        "segment_id": segment_id,           # (max_word_length): 利用0/1分别表示不同的句子输入
        "token_index_list": token_index_list
    }