import logging
import os
import random
from collections import defaultdict

import numpy as np
import torch


def get_logger(filename, verbosity=1, name=None):
    level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
    formatter = logging.Formatter(
        "[%(asctime)s][%(filename)s][%(levelname)s] %(message)s"
    )
    logger = logging.getLogger(name)
    logger.setLevel(level_dict[verbosity])

    fh = logging.FileHandler(filename, "w")
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    logger.addHandler(sh)

    return logger


def seed_everything(seed=1024):
    """
    设置整个开发环境的seed
    :param seed:
    :return:
    """
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    # some cudnn methods can be random even after fixing the seed
    # unless you tell it to be deterministic
    torch.backends.cudnn.deterministic = True


def min_distance(a, b, c, d):
    return min(abs(b - c), abs(a - d))


def min_distance1(a, b, c, d, e, f):
    return max(abs(a-d),abs(a-f),abs(c-b),abs(c-f),abs(e-b),abs(e-d))


# **Entity** start and end position pairing
def filter_unpaired(start_prob, end_prob, start, end, imp_start):
    """
    过滤没有配对的或者不匹配的项       传入的前4个参数都是列表
    :param start_prob: aspect/opinion/adv 开始的概率
    :param end_prob:   结束的概率
    :param start:      开始位置
    :param end:         结束位置
    :param imp_start: 评论的第一个元素的下标
    :return:
    """
    filtered_start = []
    filtered_end = []
    filtered_prob = []
    if len(start) > 0 and len(end) > 0:
        length = start[-1] + 1 if start[-1] >= end[-1] else end[-1] + 1
        temp_seq = [0] * length
        for s in start:
            temp_seq[s] += 1
        for e in end:
            temp_seq[e] += 2
        last_start = -1
        for idx in range(len(temp_seq)):
            assert temp_seq[idx] < 4
            # 确定start
            if temp_seq[idx] == 1:
                last_start = idx
            # start和end不重合
            elif temp_seq[idx] == 2:
                if last_start != -1 and idx - last_start <= 5:
                    if last_start == imp_start and idx != last_start:
                        continue
                    filtered_start.append(last_start)
                    filtered_end.append(idx)
                    prob = start_prob[start.index(last_start)] * end_prob[end.index(idx)]
                    filtered_prob.append(prob)
                last_start = -1
            # start和end重合
            elif temp_seq[idx] == 3:
                if last_start == imp_start and idx != last_start:
                    continue
                filtered_start.append(idx)
                filtered_end.append(idx)
                prob = start_prob[start.index(idx)] * end_prob[end.index(idx)]
                filtered_prob.append(prob)
                last_start = -1
    return filtered_start, filtered_end, filtered_prob


# MRC-CLRI acos 训练预测所需要的
def pair_combine(forward_pair_list, forward_pair_prob, forward_pair_idx_list,
                 backward_pair_list, backward_pair_prob, backward_pair_idx_list,
                 alpha, beta=0):
    forward_pair_list2, forward_pair_prob2, forward_pair_idx_list2 = [], [], []
    backward_pair_list2, backward_pair_prob2, backward_pair_idx_list2 = [], [], []
    # beta: distance filter
    if beta == 0:
        forward_pair_list2, forward_pair_prob2, forward_pair_idx_list2 = (forward_pair_list, forward_pair_prob,
                                                                          forward_pair_idx_list)
        backward_pair_list2, backward_pair_prob2, backward_pair_idx_list2 = (backward_pair_list, backward_pair_prob,
                                                                             backward_pair_idx_list)
    else:
        assert len(forward_pair_list) == len(forward_pair_prob) == len(forward_pair_idx_list)
        assert len(backward_pair_list) == len(backward_pair_prob) == len(backward_pair_idx_list)
        for idx in range(len(forward_pair_idx_list)):
            # asp 和 opi
            if min_distance(forward_pair_idx_list[idx][0], forward_pair_idx_list[idx][1],
                            forward_pair_idx_list[idx][2], forward_pair_idx_list[idx][-1]) <= beta:
                forward_pair_list2.append(forward_pair_list[idx])
                forward_pair_prob2.append(forward_pair_prob[idx])
                forward_pair_idx_list2.append(forward_pair_idx_list[idx])
        for idx in range(len(backward_pair_idx_list)):
            if min_distance(backward_pair_idx_list[idx][0], backward_pair_idx_list[idx][1],
                            backward_pair_idx_list[idx][2], backward_pair_idx_list[idx][-1]) <= beta:
                backward_pair_list2.append(backward_pair_list[idx])
                backward_pair_prob2.append(backward_pair_prob[idx])
                backward_pair_idx_list2.append(backward_pair_idx_list[idx])
    # forward combine
    final_asp_list, final_opi_list, final_asp_idx_list, final_opi_idx_list = [], [], [], []
    for idx in range(len(forward_pair_list2)):
        if forward_pair_list2[idx] not in backward_pair_list2 and forward_pair_prob2[idx] < alpha:
            continue
        if forward_pair_list2[idx][0] not in final_asp_list:
            final_asp_list.append(forward_pair_list2[idx][0])  # eg. [[7676, 1456], ...append]
            final_opi_list.append([forward_pair_list2[idx][1]])
            # 多个opi修饰一个asp
            final_asp_idx_list.append(forward_pair_idx_list2[idx][0:2])
            final_opi_idx_list.append([forward_pair_idx_list2[idx][2:4]])  # 增加一个维度
        else:
            # asp已经在final_aspect_list中
            asp_index = final_asp_list.index(forward_pair_list2[idx][0])
            if forward_pair_list2[idx][1] not in final_opi_list[asp_index]:
                # opi 没有
                final_opi_list[asp_index].append(forward_pair_list2[idx][1])   # [[[opi1, opi2], [opi3]], ...]
                final_opi_idx_list[asp_index].append(forward_pair_idx_list2[idx][2:4])

    for idx in range(len(backward_pair_list2)):
        if backward_pair_list2[idx] not in forward_pair_list2:
            if backward_pair_prob2[idx] < alpha:
                continue  # 可信度太低 该推测舍弃
            if backward_pair_list2[idx][0] not in final_asp_list:
                final_asp_list.append(backward_pair_list2[idx][0])  # [[asp1], [asp2]...]
                final_opi_list.append([backward_pair_list2[idx][1]])  # [[[opi1, opi2], [opi3]], [[opi1]]...]

                final_asp_idx_list.append(backward_pair_idx_list2[idx][0:2])
                final_opi_idx_list.append([backward_pair_idx_list2[idx][2:4]])  # 增加一个维度
            else:  # asp在的情况
                asp_index = final_asp_list.index(backward_pair_list2[idx][0])
                if backward_pair_list2[idx][1] not in final_opi_list[asp_index]:  # opi不在
                    # [[[opi1, opi2], [opi3]], [[opi1], [opi2]]...]
                    final_opi_list[asp_index].append(backward_pair_list2[idx][1])
                    final_opi_idx_list[asp_index].append(backward_pair_idx_list2[idx][2:4])
    return final_asp_list, final_opi_list, final_asp_idx_list, final_opi_idx_list


def triplet_combine(forward_triplet_list, forward_triplet_prob, forward_triplet_idx_list,
                    backward_triplet_list, backward_triplet_prob, backward_triplet_idx_list,
                    alpha, beta=0, delta=5):
    # print(forward_triplet_list, forward_triplet_prob, forward_triplet_idx_list)
    # 准备副本
    forward_triplet_list2, forward_triplet_prob2, forward_triplet_idx_list2 = [], [], []
    backward_triplet_list2, backward_triplet_prob2, backward_triplet_idx_list2 = [], [], []
    # 距离修剪
    if beta == 0:
        forward_triplet_list2, forward_triplet_prob2, forward_triplet_idx_list2 = \
            forward_triplet_list, forward_triplet_prob, forward_triplet_idx_list
        backward_triplet_list2, backward_triplet_prob2, backward_triplet_idx_list2 = \
            backward_triplet_list, backward_triplet_prob, backward_triplet_idx_list
    else:
        assert len(forward_triplet_list) == len(forward_triplet_prob) == len(forward_triplet_idx_list)
        assert len(backward_triplet_list) == len(backward_triplet_prob) == len(backward_triplet_idx_list)

        for idx in range(len(forward_triplet_idx_list)):

            value = min_distance1(forward_triplet_idx_list[idx][0], forward_triplet_idx_list[idx][1],
                                  forward_triplet_idx_list[idx][2], forward_triplet_idx_list[idx][3],
                                  forward_triplet_idx_list[idx][4], forward_triplet_idx_list[idx][5])

            if (forward_triplet_idx_list[idx][0] == forward_triplet_idx_list[idx][1] == -1) or \
                    (forward_triplet_idx_list[idx][2] == forward_triplet_idx_list[idx][3] == -1) or \
                    (forward_triplet_idx_list[idx][4] == forward_triplet_idx_list[idx][5] == -1):
                forward_triplet_list2.append(forward_triplet_list[idx])
                forward_triplet_prob2.append(forward_triplet_prob[idx])
                forward_triplet_idx_list2.append(forward_triplet_idx_list[idx])

            elif (value <= beta + delta) and (value >= beta - delta):
                forward_triplet_list2.append(forward_triplet_list[idx])
                forward_triplet_prob2.append(forward_triplet_prob[idx])
                forward_triplet_idx_list2.append(forward_triplet_idx_list[idx])

        for idx in range(len(backward_triplet_idx_list)):
            value = min_distance1(backward_triplet_idx_list[idx][0], backward_triplet_idx_list[idx][1],
                                  backward_triplet_idx_list[idx][2], backward_triplet_idx_list[idx][3],
                                  backward_triplet_idx_list[idx][4], backward_triplet_idx_list[idx][5])

            if (backward_triplet_idx_list[idx][0] == backward_triplet_idx_list[idx][1] == -1) or \
                    (backward_triplet_idx_list[idx][2] == backward_triplet_idx_list[idx][3] == -1) or \
                    (backward_triplet_idx_list[idx][4] == backward_triplet_idx_list[idx][5] == -1):
                backward_triplet_list2.append(backward_triplet_list[idx])
                backward_triplet_prob2.append(backward_triplet_prob[idx])
                backward_triplet_idx_list2.append(backward_triplet_idx_list[idx])

            elif (value <= beta + delta) and (value >= beta - delta):
                backward_triplet_list2.append(backward_triplet_list[idx])
                backward_triplet_prob2.append(backward_triplet_prob[idx])
                backward_triplet_idx_list2.append(backward_triplet_idx_list[idx])

    # forward: asp -> opi -> adv
    final_asp_list, final_opi_list, final_adv_list = [], [], []
    final_asp_idx_list, final_opi_idx_list, final_adv_idx_list = [], [], []
    for idx in range(len(forward_triplet_list2)):
        if forward_triplet_list2[idx] not in backward_triplet_list2 and forward_triplet_prob2[idx] < alpha:
            continue
        if forward_triplet_list2[idx][0] not in final_asp_list:
            # 新的aspect
            final_asp_list.append(forward_triplet_list2[idx][0])   # [[asp1], ...]
            final_opi_list.append([forward_triplet_list2[idx][1]])  # [[[opi1, opi2]], ...]
            final_adv_list.append([[forward_triplet_list2[idx][2]]])  # [[[[adv1, adv2]]], ...]

            final_asp_idx_list.append(forward_triplet_idx_list2[idx][0:2])
            final_opi_idx_list.append([forward_triplet_idx_list2[idx][2:4]])  # 增加一个维度
            final_adv_idx_list.append([[forward_triplet_idx_list2[idx][4:6]]])  # 增加2个维度
        else:
            # asp已经在final_aspect_list中
            asp_index = final_asp_list.index(forward_triplet_list2[idx][0])
            if forward_triplet_list2[idx][1] not in final_opi_list[asp_index]:
                # opi 没有
                final_opi_list[asp_index].append(forward_triplet_list2[idx][1])   # [[[opi1, opi2], [opi3]], ...]
                final_opi_idx_list[asp_index].append(forward_triplet_idx_list2[idx][2:4])
                # final_adv_list:索引到对应asp的opi区域，在opi区域中加入一个新的二级列表
                final_adv_list[asp_index].append([forward_triplet_list2[idx][2]])  # [[[[adv1, adv2]], [[adv3]]], ...]
                final_adv_idx_list[asp_index].append([forward_triplet_idx_list2[idx][4:6]])
            else:
                # opi有记录     (asp, opi) 对应的adv有无记录？
                opi_index = final_opi_list[asp_index].index(forward_triplet_list2[idx][1])
                if forward_triplet_list2[idx][2] not in final_adv_list[asp_index][opi_index]:
                    final_adv_list[asp_index][opi_index].append(forward_triplet_list2[idx][2])
                    # [[[[adv1, adv2]], [[adv3], [adv4]]], ...]
                    final_adv_idx_list[asp_index][opi_index].append(forward_triplet_idx_list2[idx][4:6])

    # forward 已将共同存在的和单个forward 置信度 > alpha 处理了  剩余的就是 forward没有 但是backward有
    # backward   原始的数据forward && backward 形式和意义是一样的   adv -> opi -> asp(4-d)
    for idx in range(len(backward_triplet_list2)):
        if backward_triplet_list2[idx] not in forward_triplet_list2:
            if backward_triplet_prob2[idx] < alpha:
                continue  # 可信度太低 该推测舍弃
            if backward_triplet_list2[idx][0] not in final_asp_list:
                final_asp_list.append(backward_triplet_list2[idx][0])  # [[asp1], [asp2]...]
                final_opi_list.append([backward_triplet_list2[idx][1]])  # [[[opi1, opi2], [opi3]], [[opi1]]...]
                final_adv_list.append([[backward_triplet_list2[idx][2]]])  # [[[[adv1, adv2]], [[adv3]]], [[[adv1]]]...]

                final_asp_idx_list.append(backward_triplet_idx_list2[idx][0:2])
                final_opi_idx_list.append([backward_triplet_idx_list2[idx][2:4]])  # 增加一个维度
                final_adv_idx_list.append([[backward_triplet_idx_list2[idx][4:6]]])  # 增加2个维度
            else:  # asp在的情况
                asp_index = final_asp_list.index(backward_triplet_list2[idx][0])
                if backward_triplet_list2[idx][1] not in final_opi_list[asp_index]:  # opi不在
                    # [[[opi1, opi2], [opi3]], [[opi1], [opi2]]...]
                    final_opi_list[asp_index].append(backward_triplet_list2[idx][1])
                    final_opi_idx_list[asp_index].append(backward_triplet_idx_list2[idx][2:4])
                    # [ [[[adv1, adv2]], [[adv3]]], [  [[adv1]], [[adv2], append new ] ]...]
                    final_adv_list[asp_index].append([backward_triplet_list2[idx][2]])
                    final_adv_idx_list[asp_index].append([backward_triplet_idx_list2[idx][4:6]])
                else:  # asp，opi都在，但是adv不在的情况
                    opi_index = final_opi_list[asp_index].index(backward_triplet_list2[idx][1])
                    if backward_triplet_list2[idx][2] not in final_adv_list[asp_index][opi_index]:
                        final_adv_list[asp_index][opi_index].append(backward_triplet_list2[idx][2])
                        final_adv_idx_list[asp_index][opi_index].append(backward_triplet_idx_list2[idx][4:6])

    return final_asp_list, final_opi_list, final_adv_list, final_asp_idx_list, final_opi_idx_list, final_adv_idx_list


def save_model(save_path, save_type, epoch, optimizer, model):
    # 修改为pkl格式
    save_path = os.path.join(save_path, "{}_model.pkl".format(save_type))
    torch.save(model, save_path)  # 将整个model都保存到save_path位置
    return save_path


def print_results(logger, results):
    """
    return {
            "aspect": {"precision": asp_p, "recall": asp_r, "f1": asp_f},
            "opinion": {"precision": opi_p, "recall": opi_r, "f1": opi_f},
            "adverb":{"precision": adv_p, "recall": adv_r, "f1": adv_f},
            "ao_pair": {"precision": ao_pair_p, "recall": ao_pair_r, "f1": ao_pair_f},
            "aste_triplet": {"precision": aste_triplet_p, "recall": aste_triplet_r, "f1": aste_triplet_f},
            "aoc_triplet": {"precision": aoc_triplet_p, "recall": aoc_triplet_r, "f1": aoc_triplet_f},
            "aoa_triplet": {"precision": aoa_triplet_p, "recall": aoa_triplet_r, "f1": aoa_triplet_f},
            "imp_quintuple": {"precision": imp_quintuple_p, "recall": imp_quintuple_r, "f1": imp_quintuple_f},
            "quintuple": {"precision": quintuple_p, "recall": quintuple_r, "f1": quintuple_f}
            }
    """
    logger.info("aspect: {}".format(results['aspect']))
    logger.info("opinion: {}".format(results['opinion']))
    logger.info("adverb:{}".format(results['adverb']))
    logger.info("imp_quintuple:{}".format(results['imp_quintuple']))
    logger.info("quadruple: {}".format(results['quadruple']))
    logger.info("quintuple: {}".format(results['quintuple']))


class FGM():
    """
        FGM对抗训练 Example：Fast Gradient Method快速梯度法 它通过在嵌入层（embedding layer）上添加对抗扰动来进行训练，从而提高模型对输入扰动的鲁棒性。
        # 初始化
        fgm = FGM(model,epsilon=1,emb_name='word_embeddings.')
        for batch_input, batch_label in data:
            # 正常训练
            loss = model(batch_input, batch_label)
            loss.backward() # 反向传播，得到正常的grad
            # 对抗训练
            fgm.attack() # 在embedding上添加对抗扰动
            loss_adv = model(batch_input, batch_label)
            loss_adv.backward() # 反向传播，并在正常的grad基础上，累加对抗训练的梯度
            fgm.restore() # 恢复embedding参数
            # 梯度下降，更新参数
            optimizer.step()
            model.zero_grad()
        """

    def __init__(self, model):
        self.model = model
        self.backup = {}

    def attack(self, epsilon=1., emb_name='emb.'):
        # emb_name这个参数要换成你模型中embedding的参数名
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                self.backup[name] = param.data.clone()
                norm = torch.norm(param.grad)
                if norm != 0 and not torch.isnan(norm):
                    r_at = epsilon * param.grad / norm
                    param.data.add_(r_at)

    def restore(self, emb_name='emb.'):
        # emb_name这个参数要换成你模型中embedding的参数名
        for name, param in self.model.named_parameters():
            if param.requires_grad and emb_name in name:
                assert name in self.backup
                param.data = self.backup[name]
        self.backup = {}


class PGD():
    def __init__(self, model, emb_name='emb.', epsilon=1., alpha=0.3):
        """
        PGD对抗训练 Example：Projected Gradient Descent投影梯度下降法 它在每个训练步骤中进行多次对抗攻击，并在每次攻击后进行投影以保持扰动的大小在限制范围内。
        pgd = PGD(model)
        K = 3 # 一般设置为3
        for batch_input, batch_label in data:
            # 正常训练
            loss = model(batch_input, batch_label)
            loss.backward() # 反向传播，得到正常的grad
            pgd.backup_grad()
            # 对抗训练
            for t in range(K):
                pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
                if t != K-1:
                    model.zero_grad()
                else:
                    pgd.restore_grad()
                loss_adv = model(batch_input, batch_label)
                loss_adv.backward() # 反向传播，并在正常的grad基础上，累加对抗训练的梯度
            pgd.restore() # 恢复embedding参数
            # 梯度下降，更新参数
            optimizer.step()
            model.zero_grad()
        """
        # emb_name这个参数要换成你模型中embedding的参数名
        self.model = model
        self.emb_name = emb_name
        self.epsilon = epsilon
        self.alpha = alpha
        self.emb_backup = {}
        self.grad_backup = {}

    def attack(self, is_first_attack=False):
        for name, param in self.model.named_parameters():
            if param.requires_grad and self.emb_name in name:
                if is_first_attack:
                    self.emb_backup[name] = param.data.clone()
                norm = torch.norm(param.grad)
                if norm != 0:
                    r_at = self.alpha * param.grad / norm
                    param.data.add_(r_at)
                    param.data = self.project(name, param.data, self.epsilon)

    def restore(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad and self.emb_name in name:
                assert name in self.emb_backup
                param.data = self.emb_backup[name]
        self.emb_backup = {}

    def project(self, param_name, param_data, epsilon):
        r = param_data - self.emb_backup[param_name]
        if torch.norm(r) > epsilon:
            r = epsilon * r / torch.norm(r)
        return self.emb_backup[param_name] + r

    def backup_grad(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad and param.grad is not None:
                self.grad_backup[name] = param.grad.clone()

    def restore_grad(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad and param.grad is not None:
                param.grad = self.grad_backup[name]
