import json
import os
from collections import defaultdict
from random import choice

import torch

from ..config.config import Config

class TransferData():
    '''
    转换样本格式，符合模型训练：序列标准任务，token 级别的分类
    '''
    def __init__(self):
        """
        初始化参数
        """
        # 实例化config对象
        self.config = Config()
        # 1 实体类别 中英对照字典 label_dict
        self.label_dict = json.load(open(self.config.label_path))
        # 2 实体 标签-id 字典 tag2id_dict
        self.tag2id_dict = json.load(open(self.config.tag2id_path))
        # 3 原始数据路径 origin_path
        # self.origin_path = os.path.join(cur, 'LSTM_CRF/data_origin')
        # 4 样本格式转换之后 数据集保存路径 train_file_path
        # self.train_file_path = os.path.join(cur, 'LSTM_CRF/data/train.txt')

    def transfer(self):
        """
        执行转换样本格式
        :return:
        """
        # 1：获取 原始文本文件路径(.txtoriginal.txt)、标签文件路径(.txt)
        # 2：写入 token 及其 标签，写入 self.train_file_path
        # 1.1 创建并打开 train.txt, 用于保存转换之后的数据
        with open(self.config.train_txt_path, 'w', encoding='utf-8') as fw:
            # 1.2 逐级循环遍历文件
            # root 路径、dirs 路径下的文件夹列表、files 路径下的文件列表
            for root, dirs, files in os.walk(self.config.origin_path):
                # 1.3 循环遍历 当前级 路径下的 文件列表
                for file in files:
                    # 保证 file 是 原始文本文件(.txtoriginal.txt)
                    if 'original' not in file:
                        continue
                    # 1.4 拼接 原始文本文件路径 file_path、标签文件路径 label_file_path
                    file_path = os.path.join(root, file)
                    label_file_path = file_path.replace('.txtoriginal', '')
                    # print(file_path, '\t', label_file_path)
                    # 2.1 获取 {token索引：token标签} 字典 res_dict
                    # 形如 {4: 'B-LOC', 5: 'I-LOC', 6: 'I-LOC'}-'我爱美丽内蒙古'-实体'内蒙古'
                    res_dict = self.read_label_text(label_file_path)
                    # 2.2 读取原始文本，拿到非实体，结合 res_dict，写入 token 和标签
                    # res_dict 只有实体的标签，没有非实体，非实体来自原始文本
                    with open(file_path, 'r', encoding='utf-8') as fr:
                        # 一次拿到整个原始文本句子
                        content = fr.read().strip()
                        # 遍历原始文本中的每个字符char，并获取标签
                        for idx, char in enumerate(content):
                            # 获取标签 char_label，通过字典get方法
                            char_label = res_dict.get(idx, 'O')
                            # 写入字符和标签
                            fw.write(char + '\t' + char_label + '\n')
                        # fw.write('\n')  # 是否直接按行取出样本

    def read_label_text(self, label_file_path):
        """
        获取 {实体token索引：实体token标签} 字典
        :param label_file_path: 文本文件路径
        :return:
        """
        # 1：获得一个样本，分割成需要的元素
        # 2：组装 {token索引：token标签} 字典
        # 1.1 初始化要返回的字典 res_dict
        res_dict = {}
        # 1.2 打开标签文件 as f_label
        with open(label_file_path, 'r', encoding='utf-8') as f_label:
            # 1.3 逐行遍历样本，分割成需要的元素
            # line --> '右髋部\t21\t23\t身体部位'
            for line in f_label.readlines():
                # 切割为 res ['右髋部', '21', '23', '身体部位']
                res = line.strip().split('\t')
                # 获取实体的 开始start、结束end 索引
                start = int(res[1])
                end = int(res[2])
                # 获取中文实体类别label
                label = res[3]
                # 获取英文实体类别label_tag
                label_tag = self.label_dict.get(label)
                # 2.1 遍历索引 (start, end+1) 组装标签tag 形如'B-LOC'，并保存到res_dict
                for i in range(start, end + 1):
                    if i == start:
                        tag = "B-" + label_tag
                    else:
                        tag = "I-" + label_tag
                    # 将 i:tag 添加到字典中
                    res_dict[i] = tag
        # 2.2 返回 {token索引：token标签} 字典
        return res_dict

    # BERT模型实现关系抽取
    def find_head_idx(self, source, target):
        """
        获取实体开始索引位置
        :param source: 原始文本经过tokenizer编码的结果
        :param target: 实体编码之后的结果
        :return:
        """
        # 获取实体编码结果长度
        target_len = len(target)
        # 循环遍历tokenizer编码的结果
        for i in range(len(source)):
            # 如果存在就返回头索引
            if source[i: i + target_len] == target:
                return i
        return -1

    def create_label(self, inner_triples, inner_input_ids, seq_len):
        """
        获取每个样本的: 主实体长度、主实体开始和结束位置的张量表示，客实体以及对应关系实现张量表示
        :param inner_triples: 一条样本中spo样本的三元组
        :param inner_input_ids: 原始文本对应tokenizer编码之后的结果
        :param seq_len: 句子长度
        :return:
        """
        inner_sub_heads, inner_sub_tails = torch.zeros(seq_len), torch.zeros(seq_len)
        inner_obj_heads = torch.zeros((seq_len, self.config.num_rel))
        inner_obj_tails = torch.zeros((seq_len, self.config.num_rel))
        inner_sub_head2tail = torch.zeros(seq_len)  # 随机抽取一个实体，从开头一个词到末尾词的索引

        # 因为数据预处理代码还待优化,会有不存在关系三元组的情况，
        # 初始化一个主词的长度为1，即没有主词默认主词长度为1，
        # 防止零除报错,初始化任何非零数字都可以，没有主词分子是全零矩阵
        inner_sub_len = torch.tensor([1], dtype=torch.float)
        # 主词到谓词的映射
        s2ro_map = defaultdict(list)
        # print(s2ro_map)
        for inner_triple in inner_triples:
            # print(inner_triple)
            inner_triple = (
                self.config.tokenizer(inner_triple['subject'], add_special_tokens=False)['input_ids'],
                self.config.rel2id(inner_triple['predicate']),
                self.config.tokenizer(inner_triple['object'], add_special_tokens=False)['input_ids']
            )
            sub_head_idx = self.find_head_idx(inner_input_ids, inner_triple[0])
            obj_head_idx = self.find_head_idx(inner_input_ids, inner_triple[2])
            if sub_head_idx != -1 and obj_head_idx != -1:
                sub = (sub_head_idx, sub_head_idx + len(inner_triple[0]) - 1)
                # s2ro_map保存主语到谓语的映射
                s2ro_map[sub].append(
                    (obj_head_idx, obj_head_idx + len(inner_triple[2]) - 1, inner_triple[1]))  # {(3,5):[(7,8,0)]} 0是关系
        if s2ro_map:
            for s in s2ro_map:
                inner_sub_heads[s[0]] = 1
                inner_sub_tails[s[1]] = 1
            sub_head_idx, sub_tail_idx = choice(list(s2ro_map.keys()))
            inner_sub_head2tail[sub_head_idx:sub_tail_idx + 1] = 1
            inner_sub_len = torch.tensor([sub_tail_idx + 1 - sub_head_idx], dtype=torch.float)
            for ro in s2ro_map.get((sub_head_idx, sub_tail_idx), []):
                inner_obj_heads[ro[0]][ro[2]] = 1
                inner_obj_tails[ro[1]][ro[2]] = 1
        return inner_sub_len, inner_sub_head2tail, inner_sub_heads, inner_sub_tails, inner_obj_heads, inner_obj_tails

    def collate_fn(self, data):
        """
        获取inputs，labels数据
        :param data: 数据
        {"text": "《今晚会在哪里醒来》是黄家强的一首粤语歌曲，由何启弘作词，
        黄家强作曲编曲并演唱，收录于2007年08月01日发行的专辑《她他》中",
        "spo_list": [{"predicate": "作曲", "object_type": "人物",
         "subject_type": "歌曲", "object": "黄家强", "subject":
         "今晚会在哪里醒来"}, {"predicate": "所属专辑", "object_type":
         "音乐专辑", "subject_type": "歌曲", "object": "她他", "subject":
         "今晚会在哪里醒来"}, {"predicate": "歌手", "object_type": "人物",
          "subject_type": "歌曲", "object": "黄家强", "subject":
          "今晚会在哪里醒来"}, {"predicate": "作词", "object_type":
          "人物", "subject_type": "歌曲", "object": "何启弘", "subject": "今晚会在哪里醒来"}]}
        :return:
        """
        text_list = [value[0] for value in data]
        triple = [value[1] for value in data]
        # 按照batch中最长句子补齐
        text = self.config.tokenizer.batch_encode_plus(text_list, padding=True)
        batch_size = len(text['input_ids'])
        seq_len = len(text['input_ids'][0])
        sub_heads = []
        sub_tails = []
        obj_heads = []
        obj_tails = []
        sub_len = []
        sub_head2tail = []
        # 循环遍历每个样本，将实体信息进行张量的转化
        for batch_index in range(batch_size):
            inner_input_ids = text['input_ids'][batch_index]  # 单个句子变成索引后
            inner_triples = triple[batch_index]
            # 获取每个样本的：主实体长度、主实体开始和结束位置张量表示、客实体以及对应关系实现张量表示
            results = self.create_label(inner_triples, inner_input_ids, seq_len)
            sub_len.append(results[0])
            sub_head2tail.append(results[1])
            sub_heads.append(results[2])
            sub_tails.append(results[3])
            obj_heads.append(results[4])
            obj_tails.append(results[5])
        input_ids = torch.tensor(text['input_ids']).to(self.config.device)
        mask = torch.tensor(text['attention_mask']).to(self.config.device)
        # 借助torch.stack()函数沿一个新维度对输入batch_size张量序列进行连接，序列中所有张量应为相同形状；stack 函数返回的结果会新增一个维度,
        sub_heads = torch.stack(sub_heads).to(self.config.device)
        sub_tails = torch.stack(sub_tails).to(self.config.device)
        sub_len = torch.stack(sub_len).to(self.config.device)
        sub_head2tail = torch.stack(sub_head2tail).to(self.config.device)
        obj_heads = torch.stack(obj_heads).to(self.config.device)
        obj_tails = torch.stack(obj_tails).to(self.config.device)

        inputs = {
            'input_ids': input_ids,
            'mask': mask,
            'sub_head2tail': sub_head2tail,
            'sub_len': sub_len
        }
        labels = {
            'sub_heads': sub_heads,
            'sub_tails': sub_tails,
            'obj_heads': obj_heads,
            'obj_tails': obj_tails
        }
        return inputs, labels

    def extract_sub(self, pred_sub_heads, pred_sub_tails):
        """
        获取列表里面对应的所有实体
        :param pred_sub_heads: 模型预测出的主实体开头位置
        :param pred_sub_tails: 模型预测出的主实体尾部位置
        :return: subs 列表里面对应的所有实体 [head, tail]
        """
        subs = []
        # 统计预测数所有值为1的元素索引位置
        heads = torch.arange(0, len(pred_sub_heads), device=self.config.device)[pred_sub_heads == 1]
        tails = torch.arange(0, len(pred_sub_tails), device=self.config.device)[pred_sub_tails == 1]
        # 循环遍历
        for head, tail in zip(heads, tails):
            if tail >= head:
                subs.append((head.item(), tail.item()))
        return subs

    def extract_obj_and_rel(self, obj_heads, obj_tails):
        """
        元素形状转换
        :param obj_heads: 模型预测出的从实体开头位置以及关系类型
        :param obj_tails: 模型预测出的从实体尾部位置以及关系类型
        :return: 元素形状  (rel_index, start_index, end_index)
        """
        obj_heads = obj_heads.T
        obj_tails = obj_tails.T
        rel_count = obj_heads.shape[0]
        obj_and_rels = []

        for rel_index in range(rel_count):
            obj_head = obj_heads[rel_index]
            obj_tail = obj_tails[rel_index]
            objs = self.extract_sub(obj_head, obj_tail)
            if objs:
                for obj in objs:
                    start_index, end_index = obj
                    obj_and_rels.append((rel_index, start_index, end_index))
        return obj_and_rels

    def convert_score_to_zero_one(self, tensor):
        """
        以0.5为闵值，大于0.5的设置为1，小于0.5的设置为0
        :param tensor:
        :return:
        """
        tensor[tensor >= 0.5] = 1
        tensor[tensor < 0.5] = 0
        return tensor