# -*- coding:utf8 -*-
# @Time : 2023/3/27 15:37
# @Author : WanJie Wu

import copy
import json
import torch
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler


ORD_OF_SENTENCE = [12290, 65311, 63, 65281, 33]


def locate_start_and_end_positions(passages_lst, tokenized_du_reader):
    start_positions = []
    end_positions = []
    batch_size = len(tokenized_du_reader["attention_mask"])
    offset_mapping = tokenized_du_reader.pop("offset_mapping")
    for idx in range(batch_size):
        sequence_ids = tokenized_du_reader.sequence_ids(idx)
        spans = passages_lst[idx]["spans"]
        if len(spans) == 0:
            start_positions.append(0)
            end_positions.append(0)
            continue

        # 探测[SEP]起始位置
        token_start_index = 0
        while sequence_ids[token_start_index] != 1:
            token_start_index += 1

        # 探测[SEP]结束位置
        token_end_idx = len(tokenized_du_reader["input_ids"][idx]) - 1
        while sequence_ids[token_end_idx] != 1:
            token_end_idx -= 1

        # 获取offsets信息
        offsets = offset_mapping[idx]
        start_idx, end_idx = spans
        # 开始点token_start_index位置必须小于或等于start_idx || 结束点token_end_idx必须大于或等于end_idx
        if offsets[token_start_index][0] > start_idx or offsets[token_end_idx][1] < end_idx:
            start_positions.append(0)
            end_positions.append(0)
            continue

        # 更新token_start_index位置
        while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_idx:
            token_start_index += 1

        # 更新token_end_idx位置
        while offsets[token_end_idx][1] >= end_idx:
            token_end_idx -= 1

        start_positions.append(token_start_index - 1)
        end_positions.append(token_end_idx + 1)
    tokenized_du_reader["start_positions"] = start_positions
    tokenized_du_reader["end_positions"] = end_positions


def content2sentence_lst(content, min_sentence_len=5):
    """内容切分为句子列表"""
    sentences = []
    sentence = ""
    for _char in content:
        sentence += _char
        if ord(_char) not in ORD_OF_SENTENCE:
            continue
        if len(sentence) <= min_sentence_len:
            continue
        sentences.append(sentence)
        sentence = ""
    if sentence:
        sentences.append(sentence)
    return sentences


def locate_answer_in_sentence_lst(sentence_lst, answers):
    """定位答案在句子列表中的位置
    sentence_lst: [sentence1, sentence2, ...]
    answers: [[start_idx, end_idx], [start_idx, end_idx], ...]
    return: [{"from_idx": 0, "start": 0, "to_idx": 0, "end": 20}, ...]
    """
    locate_lst= []
    for per_answer in answers:
        locate_answer = dict()
        sentence_begin_idx = 0
        for s_idx, sentence in enumerate(sentence_lst):
            sentence_end_idx = sentence_begin_idx + len(sentence)
            if sentence_begin_idx <= per_answer[0] <= sentence_end_idx:
                locate_answer["from_idx"] = s_idx
                locate_answer["start"] = per_answer[0] - sentence_begin_idx

            if sentence_begin_idx <= per_answer[1] <= sentence_end_idx:
                locate_answer["to_idx"] = s_idx
                locate_answer["end"] = per_answer[1] - sentence_begin_idx
                break
            sentence_begin_idx = sentence_end_idx

        locate_lst.append(locate_answer)
    return locate_lst


def get_no_answer_passages(sentence_lst, max_passage_len, tmp_passage_part1=None):
    """无答案的情况下，直接切片"""
    passages_lst = []
    passage = tmp_passage_part1["passage"] if tmp_passage_part1 else ""
    spans = tmp_passage_part1["spans"] if tmp_passage_part1 else []
    for sentence_idx, sentence in enumerate(sentence_lst):
        if len(passage) + len(sentence) <= max_passage_len:
            passage += sentence
        else:
            passages_lst.append({
                "passage": passage,
                "spans": spans
            })
            passage = sentence
            spans = []
    if passage:
        passages_lst.append({
            "passage": passage,
            "spans": spans
        })

    return passages_lst


def add_answer_sentences_to_passage(tmp_passage_part1, answer_item, sentence_lst, answer_len_of_sentence, max_passage_len):
    """答案片段添加方式"""
    if tmp_passage_part1:
        tmp_passage = copy.deepcopy(tmp_passage_part1)
    else:
        tmp_passage = {"passage": "", "spans": []}

    passages_lst = []
    # 如果句子刚好进入问答块儿，判定是否添加答案片段
    if len(tmp_passage["passage"]) + answer_len_of_sentence > max_passage_len:
        passages_lst.append(tmp_passage)
        tmp_passage = {"passage": "", "spans": []}

    # 判断刚好结束问答块儿
    tmp_passage["spans"].append(len(tmp_passage["passage"]) + answer_item["start"])
    tmp_passage["spans"].append(len(tmp_passage["passage"]) + answer_len_of_sentence - len(sentence_lst[answer_item["to_idx"]]) + answer_item["end"])
    tmp_passage["passage"] = tmp_passage["passage"] + "".join(sentence_lst[answer_item["from_idx"]: answer_item["to_idx"]+1])
    passages_lst.append(tmp_passage)
    return passages_lst


def generate_passages_from_sentences(answers, sentence_lst, max_passage_len):
    """根据答案和句子列表构建passage"""
    passages_lst = []
    sentence_from_idx = 0
    for answer_idx, answer_item in enumerate(answers):
        # 答案完整句长
        answer_len_of_sentence = sum(len(s) for s in sentence_lst[answer_item["from_idx"]: answer_item["to_idx"]+1])
        sentence_end_idx = len(sentence_lst) if answer_idx == len(answers) - 1 else answers[answer_idx + 1]["from_idx"]
        # part1. 答案开始之前的sentences分段

        passages_part_1 = get_no_answer_passages(sentence_lst[sentence_from_idx:answer_item["from_idx"]], max_passage_len)
        passages_lst.extend(passages_part_1[0:-1])
        tmp_passage_1 = passages_part_1[-1] if passages_part_1 else None

        # part2. 答案片段处理
        passages_part_2 = add_answer_sentences_to_passage(tmp_passage_1, answer_item, sentence_lst, answer_len_of_sentence, max_passage_len)
        passages_lst.extend(passages_part_2[0:-1])
        assert passages_part_2 is not None

        # part3. 后半段处理
        passages_part_3 = get_no_answer_passages(sentence_lst[answer_item["to_idx"]+1:sentence_end_idx], max_passage_len, passages_part_2[-1])
        passages_lst.extend(passages_part_3)

        sentence_from_idx = sentence_end_idx


    return passages_lst


def make_passages_by_content(content_example, max_sequence_len, max_query_len, max_title_len=0, title_tag=None, min_sentence_len=5):
    """
    :param content_example: {"title": str, "content": str, "question": question, "answers":list()}
    :param min_sentence_len: 句子最短长度
    :param max_sequence_len 序列最大长度
    :param max_query_len 问题最大长度
    :param max_title_len 标题最大长度
    :param title_tag 作为Title的特殊符号
    :return:
    """
    sentence_lst = content2sentence_lst(content_example["content"], min_sentence_len=min_sentence_len)
    new_answers = locate_answer_in_sentence_lst(sentence_lst, content_example["answers"])
    question = content_example["question"][:max_query_len]
    title = content_example["title"][:max_title_len]

    title_len = len(title) + 2 if title and title_tag else 0
    max_passage_len = max_sequence_len - len(question) - title_len
    title = f"{title_tag}{title}{title_tag}" if title_len else ""

    if not new_answers:
        passages_lst = get_no_answer_passages(sentence_lst, max_passage_len)
    else:
        passages_lst = generate_passages_from_sentences(new_answers, sentence_lst, max_passage_len)

    for passages_dict in passages_lst:
        passages_dict["question"] = question
        passages_dict["passage"] = title + passages_dict["passage"]
        if passages_dict["spans"]:
            passages_dict["spans"] = [title_len + passages_dict["spans"][0], title_len + passages_dict["spans"][1]]
    return passages_lst


def modify_sequence_ids(input_ids, sequence_ids, max_title_len, related_idx):
    if max_title_len is None:
        return

    related_count = 0
    for _index, input_id in enumerate(input_ids):
        if not sequence_ids[_index]:
            continue
        if input_id == related_idx:
            sequence_ids[_index] = 0
            related_count += 1
        if related_count < 2:
            sequence_ids[_index] = 0
        if related_count == 2:
            break


def update_offset_mapping(tokenized_du_reader, related_idx, max_title_len):
    offset_mapping = tokenized_du_reader.pop("offset_mapping")
    batch_size = len(tokenized_du_reader["attention_mask"])
    # 将TITLE和None的地方改为None
    for idx in range(batch_size):
        sequence_ids = tokenized_du_reader.sequence_ids(idx)
        input_ids = tokenized_du_reader["input_ids"][idx]
        if max_title_len:
            modify_sequence_ids(input_ids, sequence_ids, max_title_len, related_idx)
        offset_mapping[idx] = [
            (o if sequence_ids[k] == 1 else None) for k, o in enumerate(offset_mapping[idx])
        ]
    return offset_mapping



class MRCDataset(Dataset):
    def __init__(self, tokenizer, data_path, max_seq_len, max_query_len, max_title_len, mode="train"):
        """
        :param data_path: 数据地址
        :param tokenizer: 提词器
        """
        self.data_path = data_path
        self.tokenizer = tokenizer
        self.mode = mode
        self.max_seq_len = max_seq_len
        self.max_query_len = max_query_len
        self.max_title_len = max_title_len
        self.related_key = "[RELATED]"
        self.related_idx = tokenizer.get_vocab()[self.related_key]
        self._get_original_data()
        self._get_features()


    def _get_features(self):
        if self.mode == "train":
            self.features = self.convert_du_reader_2_train_features()
        else:
            self.features, self.dev_datasets = self.convert_du_reader_2_test_datasets()

    def _get_original_data(self):
        with open(self.data_path, "r") as train_f:
            total_row = [json.loads(row) for row in train_f.readlines()]

        self.original_data = total_row

    def common_convert_func(self, input_x):
        passages_lst = make_passages_by_content(input_x, self.max_seq_len, self.max_query_len, self.max_title_len, self.related_key)
        tokenized_du_reader = self.tokenizer(
            text=[passage["question"] for passage in passages_lst],
            text_pair=[passage["passage"] for passage in passages_lst],
            truncation="only_second",
            max_length=self.max_seq_len,
            return_offsets_mapping=True,
            padding="max_length"
        )
        return passages_lst, tokenized_du_reader

    def convert_du_reader_2_train_features(self):
        total_features = {
            "input_ids": list(),
            "token_type_ids": list(),
            "attention_mask": list(),
            "start_positions": list(),
            "end_positions": list()
        }
        for idx, input_x in tqdm(enumerate(self.original_data), total=len(self.original_data), desc="训练数据转换处理进度条..."):
            passages_lst, tokenized_du_reader = self.common_convert_func(input_x)
            locate_start_and_end_positions(passages_lst, tokenized_du_reader)
            for key in total_features.keys():
                total_features[key].extend(tokenized_du_reader[key])
            del passages_lst
            del tokenized_du_reader

        result = dict()
        for key, val in total_features.items():
            result[key] = torch.tensor(val, dtype=torch.long)
        del total_features
        return result

    def convert_du_reader_2_test_datasets(self):
        total_features = {
            "input_ids": list(),
            "token_type_ids": list(),
            "attention_mask": list()
        }
        data_ids = []  # 文章ID
        passage_ids = []  # 片段ID
        dev_datasets = dict()
        content_id = 0
        for idx, input_x in tqdm(enumerate(self.original_data), total=len(self.original_data), desc="测试数据转换进度条..."):
            passages_lst, tokenized_du_reader = self.common_convert_func(input_x)
            for key in total_features.keys():
                total_features[key].extend(tokenized_du_reader[key])
            offset_mapping = update_offset_mapping(tokenized_du_reader, self.related_idx, self.max_title_len)
            dev_datasets[content_id] = {
                "passages": passages_lst,
                "offset_mapping": offset_mapping,
                "example": input_x,
            }
            for i in range(len(passages_lst)):
                data_ids.append(content_id)
                passage_ids.append(i)
            content_id += 1
            del tokenized_du_reader
            del passages_lst

        total_features["data_ids"] = data_ids
        total_features["passage_ids"] = passage_ids

        result = dict()
        for key, val in total_features.items():
            result[key] = torch.tensor(val, dtype=torch.long)
        del total_features
        return result, dev_datasets

    def __getitem__(self, index):
        if self.mode == "train":
            input_id = self.features["input_ids"][index]
            token_type_id = self.features["token_type_ids"][index]
            attention_mask = self.features["attention_mask"][index]
            start_position = self.features["start_positions"][index]
            end_position = self.features["end_positions"][index]
            return {
                "input_ids": input_id,
                "token_type_ids": token_type_id,
                "attention_mask": attention_mask,
                "start_positions": start_position,
                "end_positions": end_position
            }
        else:
            input_id = self.features["input_ids"][index]
            token_type_id = self.features["token_type_ids"][index]
            attention_mask = self.features["attention_mask"][index]
            data_id = self.features["data_ids"][index]
            passage_id = self.features["passage_ids"][index]
            return {
                "input_ids": input_id,
                "token_type_ids": token_type_id,
                "attention_mask": attention_mask,
                "data_ids": data_id,
                "passage_ids": passage_id,
            }

    def __len__(self):
        return self.features["input_ids"].size(0)


def init_data_loader(
        tokenizer,
        data_path,
        num_workers=0,
        batch_size=4,
        is_distributed=False,
        max_seq_len=512,
        max_query_len=32,
        max_title_len=128,
        mode="train"):
    """
    :param data_path: 数据地址
    :param tokenizer: 分词器
    :param num_workers: 读取数据子进程数量
    :param batch_size: 每个批次数量
    :param is_distributed: 是否分布式读取数据
    :param mode: 训练/验证集
    :param max_seq_len: 最大长度
    :param max_query_len: 最大长度
    :param max_title_len: 标题最大长度
    :return:
    """
    mrc_dataset = MRCDataset(
        tokenizer=tokenizer,
        data_path=data_path,
        max_seq_len=max_seq_len,
        max_query_len=max_query_len,
        max_title_len=max_title_len,
        mode=mode,
    )
    if not is_distributed:
        data_sampler = None
        shuffle = True
    else:
        data_sampler = DistributedSampler(dataset=mrc_dataset)
        shuffle = False

    data_loader = DataLoader(
        dataset=mrc_dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        sampler=data_sampler,
        num_workers=num_workers,
    )
    if mode == "train":
        return data_loader
    return data_loader, mrc_dataset.dev_datasets


def get_features_ids_per_example(data_ids, all_passage_ids):
    # 每个example包含多个features
    tmp_features = defaultdict(list)
    for idx, data_id in enumerate(data_ids):
        tmp_features[data_id].append(idx)

    # 按照段落顺序排列
    # 0/1/69  --> 1/0/2
    features_ids_per_example = defaultdict(list)
    for data_id, vals in tmp_features.items():
        sorted_idx = [all_passage_ids[val] for val in vals]
        features_ids_per_example[data_id] = [vals[_id] for _id in sorted_idx]
    return features_ids_per_example


def get_candidates_by_logits(
        start_logit,
        end_logit,
        offset_mapping,
        search_depth: int,
        max_answer_length: int):
    """获取候选节点"""
    candidates_lst = []
    top_start_indexes = np.argsort(start_logit)[-1: -search_depth - 1: -1].tolist()
    top_end_indexes = np.argsort(end_logit)[-1: -search_depth - 1: -1].tolist()
    for start_index in top_start_indexes:
        for end_index in top_end_indexes:
            # 排除超过边界的index
            if (
                    start_index >= len(offset_mapping)
                    or end_index >= len(offset_mapping)
                    or offset_mapping[start_index] is None
                    or offset_mapping[end_index] is None
            ):
                continue
            # 答案长度限制
            if end_index <= start_index or end_index - start_index + 1 > max_answer_length:
                continue
            start = offset_mapping[start_index][0]
            end = offset_mapping[end_index][1]
            candidates_lst.append(
                {
                    "start": start,
                    "end": end,
                    "logit": start_logit[start_index] + end_logit[end_index],
                }
            )
    return candidates_lst


def get_passage_candidates(
        features_ids_per_example,
        data_id,
        example,
        all_start_logits,
        all_end_logits,
        all_passage_ids,
        search_depth,
        max_answer_length,
    ):
    # ===================================下面这部分代码异常重要， 不要瞎🐔吧改==================================================
    feature_indices = features_ids_per_example[data_id]
    passage_begin_id = 0
    min_null_score = None
    content = ""
    prelim_predictions = []
    for feature_idx in feature_indices:
        start_logit = all_start_logits[feature_idx]
        end_logit = all_end_logits[feature_idx]
        passage_id = all_passage_ids[feature_idx]

        # 获取对应的passage和offset_mapping
        offset_mapping = example["offset_mapping"][passage_id]
        passage = example["passages"][passage_id]
        # 计算真实start/end偏差
        offset_mapping_index = [item for item in offset_mapping if item is not None]
        begin_idx = offset_mapping_index[0][0]
        text = passage["passage"][begin_idx:]
        content += text
        # 决定min_Null_score选取min值
        if min_null_score is None or min_null_score["logit"] > start_logit[0] + end_logit[0]:
            min_null_score = {
                "start": 0,
                "end": 0,
                "logit": start_logit[0] + end_logit[0],
                "answer": ""
            }
        # 获取candidates
        candidates_lst = get_candidates_by_logits(start_logit, end_logit, offset_mapping, search_depth, max_answer_length)
        assert isinstance(begin_idx, int)
        # 更新candidates

        for candidate in candidates_lst:
            start = candidate["start"] + passage_begin_id - begin_idx
            end = candidate["end"] + passage_begin_id - begin_idx
            candidate["start"] = start
            candidate["end"] = end
            candidate["answer"] = content[start: end]

        end_idx = offset_mapping_index[-1][1] - begin_idx
        passage_begin_id += end_idx
        prelim_predictions.extend(candidates_lst)
    prelim_predictions.append(min_null_score)
    return prelim_predictions, min_null_score


def post_process_of_qa_predictions(
        all_start_logits,
        all_end_logits,
        all_data_ids,
        all_passage_ids,
        dev_datasets,
        search_depth: int = 20,
        top_k: int=1,
        max_answer_length: int = 200,
):
    """
    search_depth: 搜索深度
    max_seq_length: 答案长度
    """
    features_ids_per_example = get_features_ids_per_example(all_data_ids, all_passage_ids)
    predictions_result = defaultdict(list)
    ground_truth_result = defaultdict(dict)
    for data_id, example in dev_datasets.items():
        ground_truth_result[data_id] = example["example"]
        prelim_predictions, min_null_score = get_passage_candidates(features_ids_per_example, data_id, example, all_start_logits, all_end_logits, all_passage_ids, search_depth, max_answer_length)
        # 按照从大到小进行排序, 取top n预测信息; 如果无答案概率不在top_n中，后续添加

        prelim_predictions = sorted(prelim_predictions, key=lambda x: x["logit"], reverse=True)[:search_depth]
        if not any((p["start"], p["end"]) == (0, 0) for p in prelim_predictions):
            prelim_predictions.append(min_null_score)

        # 计算softmax概率
        probs = torch.softmax(torch.tensor([pred.pop("logit") for pred in prelim_predictions], dtype=torch.float), dim=0).tolist()
        for score, pred in zip(probs, prelim_predictions):
            pred["score"] = score

        top_n_candidates = prelim_predictions[0:top_k]
        # 排除non之后的predictions
        top_k_candidates = []
        for candidate in top_n_candidates:
            if candidate["start"] == candidate["end"] == 0:
                break
            top_k_candidates.append(candidate)
        predictions_result[data_id] = top_k_candidates

    return predictions_result, ground_truth_result


if __name__ == "__main__":
    example = {
        "title": "《禁室培欲》哪部最好看？",
        "content": "当然是第一部。芧：。还是不能。向海德薇希地窗畔仰望。[~。禁室培欲-日本禁锢类电影禁室培欲系列。日本特有的一类禁锢类电影，影片集中反映出现代日本人对感情缺失>的一种极端恐惧的表现，其中最为著名的就是《禁室培欲》系列，该系列目前共衍生出了9部影片。透过每部影片中形形色色的绑架人以及不尽相同的被禁锢人之间所上演的一幕幕令人匪夷所思的情节，从中我们不难看出日本文化所>具备的多重性。并且似乎能从中窥探出所具备的斯德哥尔摩综合症(stockholmsyndrome)情节。但不管怎样，借助电影这种表现形式所展现出的文化所多重性以及更深层次的涵义，《禁室培欲》系列作品展现在受众眼前的是深沉、抑>郁甚至是凄厉……。",
        "question": "禁室培欲几好看",
        "answers": [[3, 7]]
    }
    print(json.dumps(make_passages_by_content(example, 512, 16, 16, title_tag="[RELATED]"), indent=2, ensure_ascii=False))
