import json
import os

import numpy as np
import torch
from torch.nn import functional as F
from tqdm import tqdm

from labels import get_aspect_category, get_sentiment
from losses.acoxs_losses import calculate_entity_loss, calculate_classification_loss, \
    calculate_scl_loss, FocalLoss
from metrics import ACOXSScore
from question_template import get_Chinese_Template, get_Chinese_Template2
from tools import filter_unpaired, triplet_combine, pair_combine, FGM, PGD


class ACOXSTrainer:
    def __init__(self, logger, model, optimizer, scheduler, tokenizer, args):
        self.logger = logger                             # 日志
        self.model = model                               # 深度学习模型
        self.optimizer = optimizer                       # 优化器
        self.scheduler = scheduler                       # 学习率调度器，根据预定的计划调整优化器的学习率
        self.tokenizer = tokenizer                       # 分词器 分词并转为id
        self.args = args                                 # Terminal传入的参数(有部分模型的超参数)
        self.fgm = FGM(self.model)                       # 对抗训练方法1
        self.pgd = PGD(self.model)                       # 对抗训练方法2
        self.focalLoss = FocalLoss(self.args.flp_gamma)  # 损失函数，处理类别不平衡的问题
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 处理单元

        q1, q2, q3, q4, q5, q6, q7, q8, q9 = get_Chinese_Template()
        self.q1_token_ids = self.tokenizer.convert_tokens_to_ids(q1)
        self.q2_token_ids = self.tokenizer.convert_tokens_to_ids(q2)
        self.q3_token_ids = self.tokenizer.convert_tokens_to_ids(q3)
        self.q4_token_ids = self.tokenizer.convert_tokens_to_ids(q4)
        self.q5_token_ids = self.tokenizer.convert_tokens_to_ids(q5)
        self.q6_token_ids = self.tokenizer.convert_tokens_to_ids(q6)
        self.q7_token_ids = self.tokenizer.convert_tokens_to_ids(q7)
        self.q8_token_ids = self.tokenizer.convert_tokens_to_ids(q8)
        self.q9_token_ids = self.tokenizer.convert_tokens_to_ids(q9)

    def train(self, train_dataloader, epoch):
        with tqdm(total=len(train_dataloader), desc="train") as pbar:
            for batch_idx, batch in enumerate(train_dataloader):
                # 确保每个batch的梯度计算是独立的
                self.optimizer.zero_grad()

                # 前向传播计算损失并反向传播
                loss_sum = self.get_train_loss(batch)
                loss_sum.backward()

                # 使用FGM对抗训练
                if self.args.use_FGM:
                    # 在embedding层上添加对抗扰动
                    self.fgm.attack()
                    FGM_loss_sum = self.get_train_loss(batch)

                    # 恢复embedding参数
                    FGM_loss_sum.backward()
                    self.fgm.restore()

                # 使用PGD对抗训练
                if self.args.use_PGD:
                    self.pgd.backup_grad()
                    for t in range(self.args.pgd_k):
                        # 在embedding上添加对抗扰动, first attack时备份param.data
                        self.pgd.attack(is_first_attack=(t == 0))
                        if t != self.args.pgd_k - 1:
                            self.model.zero_grad()
                        else:
                            self.pgd.restore_grad()

                        PGD_loss_sum = self.get_train_loss(batch)
                        # 反向传播，并在正常的grad基础上，累加对抗训练的梯度
                        PGD_loss_sum.backward()
                        # 恢复embedding参数
                    self.pgd.restore()

                # 梯度下降 更新参数
                self.optimizer.step()   # 更新模型的参数
                self.scheduler.step()   # 更新学习率
                self.model.zero_grad()  # 再次清空梯度

                # 释放显存
                torch.cuda.empty_cache()
                # 进度条动态显示
                pbar.set_description(f'Train Epoch [{epoch}/{self.args.epoch_num}]')
                pbar.set_postfix({'loss': '{0:1.5f}'.format(loss_sum)})
                pbar.update(1)

    def acos_train(self, train_dataloader, epoch):
        with tqdm(total=len(train_dataloader), desc="train") as pbar:
            for batch_idx, batch in enumerate(train_dataloader):
                # 确保每个batch的梯度计算是独立的
                self.optimizer.zero_grad()

                # 前向传播计算损失并反向传播
                loss_sum = self.get_acos_train_loss(batch)
                loss_sum.backward()

                # 使用FGM对抗训练
                # if self.args.use_FGM:
                #     # 在embedding层上添加对抗扰动
                #     self.fgm.attack()
                #     FGM_loss_sum = self.get_train_loss(batch)
                #
                #     # 恢复embedding参数
                #     FGM_loss_sum.backward()
                #     self.fgm.restore()
                #
                # # 使用PGD对抗训练
                # if self.args.use_PGD:
                #     self.pgd.backup_grad()
                #     for t in range(self.args.pgd_k):
                #         # 在embedding上添加对抗扰动, first attack时备份param.data
                #         self.pgd.attack(is_first_attack=(t == 0))
                #         if t != self.args.pgd_k - 1:
                #             self.model.zero_grad()
                #         else:
                #             self.pgd.restore_grad()
                #
                #         PGD_loss_sum = self.get_train_loss(batch)
                #         # 反向传播，并在正常的grad基础上，累加对抗训练的梯度
                #         PGD_loss_sum.backward()
                #         # 恢复embedding参数
                #     self.pgd.restore()

                # 梯度下降 更新参数
                self.optimizer.step()   # 更新模型的参数
                self.scheduler.step()   # 更新学习率
                self.model.zero_grad()  # 再次清空梯度

                # 释放显存
                torch.cuda.empty_cache()
                # 进度条动态显示
                pbar.set_description(f'Train Epoch [{epoch}/{self.args.epoch_num}]')
                pbar.set_postfix({'loss': '{0:1.5f}'.format(loss_sum)})
                pbar.update(1)

    def eval(self, eval_dataloader):
        self.model.eval()
        json_res = []
        acoxs_score = ACOXSScore(self.logger)

        for batch in tqdm(eval_dataloader):  # 三维列表 batch里面n个样本

            valid_lengths = [len(query.nonzero(as_tuple=True)[0]) for query in batch.forward_asp_query]
            max_valid_length = max(valid_lengths)
            batch.forward_asp_query = batch.forward_asp_query[:, :max_valid_length].to(self.device)
            batch.forward_asp_query_mask = batch.forward_asp_query_mask[:, :max_valid_length].to(self.device)
            batch.forward_asp_query_seg = batch.forward_asp_query_seg[:, :max_valid_length].to(self.device)

            f_asp_start_scores, f_asp_end_scores = self.model(batch.forward_asp_query.to(self.device),
                                                              batch.forward_asp_query_mask.to(self.device),
                                                              batch.forward_asp_query_seg.to(self.device), 0)

            valid_lengths = [len(query.nonzero(as_tuple=True)[0]) for query in batch.backward_adv_query]
            max_valid_length = max(valid_lengths)
            batch.backward_adv_query = batch.backward_adv_query[:, :max_valid_length].to(self.device)
            batch.backward_adv_query_mask = batch.backward_adv_query_mask[:, :max_valid_length].to(self.device)
            batch.backward_adv_query_seg = batch.backward_adv_query_seg[:, :max_valid_length].to(self.device)
            b_adv_start_scores, b_adv_end_scores = self.model(batch.backward_adv_query.to(self.device),
                                                              batch.backward_adv_query_mask.to(self.device),
                                                              batch.backward_adv_query_seg.to(self.device), 0)

            for x in range(self.args.eval_batch_size):
                # 统计数据
                asp_predict, opi_predict, adv_predict, asp_opi_predict, \
                    aste_triplets_predict, aoc_triplets_predict, aoa_triplets_predict, \
                    quadruples_predict, quintuples_predict = [], [], [], [], [], [], [], [], []
                # 最终结果
                forward_triplet_list, forward_triplet_prob, forward_triplet_idx_list = [], [], []
                backward_triplet_list, backward_triplet_prob, backward_triplet_idx_list = [], [], []

                # 原始评论： 返回 null + review 在temp_text中的索引
                # forward q_1 nonzero函数是numpy中用于得到数组array中非零元素的位置（数组索引）的函数
                passenge_index = batch.forward_asp_answer_start[x].gt(-1).float().nonzero()
                passenge = batch.forward_asp_query[x][passenge_index].squeeze(1)
                passenge = passenge.to(self.device)  # batch中每一个样本的 null + review 在vocab.txt 中的映射

                # ====================Forward Q1: Aspect=========================
                if self.args.use_Backward is not True:
                    f_asp_start_scores2 = F.softmax(f_asp_start_scores[x], dim=1)
                    f_asp_end_scores2 = F.softmax(f_asp_end_scores[x], dim=1)
                    f_asp_start_prob, f_asp_start_idx = torch.max(f_asp_start_scores2, dim=1)
                    f_asp_end_prob, f_asp_end_idx = torch.max(f_asp_end_scores2, dim=1)

                    f_asp_start_prob_temp = []
                    f_asp_end_prob_temp = []
                    f_asp_start_index_temp = []
                    f_asp_end_index_temp = []
                    for i in range(f_asp_start_idx.size(0)):
                        if batch.forward_asp_answer_start[x, i] != -1:  # 排除答案预测到query部分
                            if f_asp_start_idx[i].item() == 1:
                                f_asp_start_index_temp.append(i)
                                f_asp_start_prob_temp.append(f_asp_start_prob[i].item())
                            if f_asp_end_idx[i].item() == 1:
                                f_asp_end_index_temp.append(i)
                                f_asp_end_prob_temp.append(f_asp_end_prob[i].item())

                    f_asp_start_idx, f_asp_end_idx, f_asp_prob = filter_unpaired(
                        f_asp_start_prob_temp, f_asp_end_prob_temp, f_asp_start_index_temp, f_asp_end_index_temp,
                        9
                    )
                    # =================Forward Q2: Aspect->Opinion================
                    for a in range(len(f_asp_start_idx)):
                        # 获取方面项的 token id 列表
                        asp_token_ids = [batch.forward_asp_query[x][j].item() for j in
                                         range(f_asp_start_idx[a], f_asp_end_idx[a] + 1)]
                        f_opi_query = (
                                self.q2_token_ids[:3] +  # 前缀：[CLS], 关于
                                asp_token_ids +     # 你推导出来的方面项 token id
                                self.q2_token_ids[-8:]  # 后缀：的 感 观 有 哪 些？ [SEP]
                        )

                        f_opi_query_length = len(f_opi_query)
                        f_opi_query_seg = [0] * len(f_opi_query)
                        imp_start = len(f_opi_query)
                        f_opi_query = torch.tensor(f_opi_query).long().to(self.device)
                        f_opi_query = torch.cat([f_opi_query, passenge], -1).to(self.device).unsqueeze(0)

                        f_opi_query_mask = torch.ones(f_opi_query.size(1)).float().to(self.device).unsqueeze(0)
                        f_opi_query_seg += [1] * passenge.size(0)
                        f_opi_query_seg = torch.tensor(f_opi_query_seg).long().to(self.device).unsqueeze(0)

                        f_opi_start_scores, f_opi_end_scores = self.model(f_opi_query, f_opi_query_mask, f_opi_query_seg, 0)

                        f_opi_start_scores = F.softmax(f_opi_start_scores[0], dim=1)
                        f_opi_end_scores = F.softmax(f_opi_end_scores[0], dim=1)
                        f_opi_start_prob, f_opi_start_idx = torch.max(f_opi_start_scores, dim=1)
                        f_opi_end_prob, f_opi_end_idx = torch.max(f_opi_end_scores, dim=1)

                        f_opi_start_prob_temp, f_opi_end_prob_temp = [], []
                        f_opi_start_idx_temp, f_opi_end_idx_temp = [], []

                        for k in range(f_opi_start_idx.size(0)):
                            if f_opi_query_seg[0, k] == 1:  # 把判定到query部分的id全部过滤掉
                                if f_opi_start_idx[k].item() == 1:
                                    f_opi_start_idx_temp.append(k)
                                    f_opi_start_prob_temp.append(f_opi_start_prob[k].item())
                                if f_opi_end_idx[k].item() == 1:
                                    f_opi_end_idx_temp.append(k)
                                    f_opi_end_prob_temp.append(f_opi_end_prob[k].item())

                        f_opi_start_idx, f_opi_end_idx, f_opi_prob = filter_unpaired(
                            f_opi_start_prob_temp, f_opi_end_prob_temp, f_opi_start_idx_temp, f_opi_end_idx_temp,
                            imp_start=imp_start
                        )

                        # ==================Q3: Aspect + Opinion -> Adverb ======================
                        # ["[CLS]", "关", "于", "方", "面", "项", "感", "观", "的", "副", "词", "有", "哪", "些", "？", "[SEP]"]
                        for b in range(len(f_opi_start_idx)):
                            asp_token_ids = [batch.forward_asp_query[x][j].item() for j in range(f_asp_start_idx[a], f_asp_end_idx[a] + 1)]
                            opi_token_ids = [f_opi_query[0][j].item() for j in range(f_opi_start_idx[b], f_opi_end_idx[b] + 1)]
                            f_adv_query = (
                                self.q3_token_ids[:3] +
                                asp_token_ids +
                                opi_token_ids +
                                self.q3_token_ids[-8:]
                            )
                            f_adv_query_length = len(f_adv_query)

                            f_adv_query_seg = [0] * len(f_adv_query)
                            imp_start = len(f_adv_query)
                            f_adv_query = torch.tensor(f_adv_query).long().to(self.device)
                            f_adv_query = torch.cat([f_adv_query, passenge], -1).to(self.device).unsqueeze(0)

                            f_adv_query_mask = torch.ones(f_adv_query.size(1)).float().to(self.device).unsqueeze(0)
                            f_adv_query_seg += [1] * passenge.size(0)
                            f_adv_query_seg = torch.tensor(f_adv_query_seg).long().to(self.device).unsqueeze(0)

                            f_adv_start_scores, f_adv_end_scores = self.model(f_adv_query, f_adv_query_mask,
                                                                              f_adv_query_seg, 0)

                            f_adv_start_scores = F.softmax(f_adv_start_scores[0], dim=1)
                            f_adv_end_scores = F.softmax(f_adv_end_scores[0], dim=1)
                            f_adv_start_prob, f_adv_start_idx = torch.max(f_adv_start_scores, dim=1)
                            f_adv_end_prob, f_adv_end_idx = torch.max(f_adv_end_scores, dim=1)

                            f_adv_start_prob_temp, f_adv_end_prob_temp = [], []
                            f_adv_start_idx_temp, f_adv_end_idx_temp = [], []

                            for k in range(f_adv_start_idx.size(0)):
                                if f_adv_query_seg[0, k] == 1:
                                    if f_adv_start_idx[k].item() == 1:
                                        f_adv_start_idx_temp.append(k)
                                        f_adv_start_prob_temp.append(f_adv_start_prob[k].item())
                                    if f_adv_end_idx[k].item() == 1:
                                        f_adv_end_idx_temp.append(k)
                                        f_adv_end_prob_temp.append(f_adv_end_prob[k].item())

                            f_adv_start_idx, f_adv_end_idx, f_adv_prob = filter_unpaired(
                                f_adv_start_prob_temp, f_adv_end_prob_temp, f_adv_start_idx_temp, f_adv_end_idx_temp,
                                imp_start=imp_start
                            )

                            for k in range(len(f_adv_start_idx)):
                                asp = [batch.forward_asp_query[x][j].item() for j in
                                       range(f_asp_start_idx[a], f_asp_end_idx[a] + 1)]
                                opi = [f_opi_query[0][j].item() for j in
                                       range(f_opi_start_idx[b], f_opi_end_idx[b] + 1)]
                                adv = [f_adv_query[0][j].item() for j in
                                       range(f_adv_start_idx[k], f_adv_end_idx[k] + 1)]

                                # 问题 + null     没有null(0,0) -> (-1, -1)   回归最原始评论中关键元素的下标
                                asp_idx = [f_asp_start_idx[a] - len(self.q1_token_ids) - 1, f_asp_end_idx[a] 
                                           - len(self.q1_token_ids) - 1]
                                opi_idx = [f_opi_start_idx[b] - f_opi_query_length - 1,
                                           f_opi_end_idx[b] - f_opi_query_length - 1]
                                adv_idx = [f_adv_start_idx[k] - f_adv_query_length - 1,
                                           f_adv_end_idx[k] - f_adv_query_length - 1]

                                # 计算配对概率
                                temp_prob = f_asp_prob[a] * f_opi_prob[b] * f_adv_prob[k]

                                if asp_idx + opi_idx + adv_idx not in forward_triplet_idx_list:
                                    forward_triplet_list.append([asp] + [opi] + [adv])
                                    forward_triplet_prob.append(temp_prob)
                                    forward_triplet_idx_list.append(asp_idx + opi_idx + adv_idx)

                # =================Backward Q4: Adverb ========================
                if self.args.use_Forward is not True:
                    b_adv_start_scores2 = F.softmax(b_adv_start_scores[x], dim=1)
                    b_adv_end_scores2 = F.softmax(b_adv_end_scores[x], dim=1)
                    b_adv_start_prob, b_adv_start_idx = torch.max(b_adv_start_scores2, dim=1)
                    b_adv_end_prob, b_adv_end_idx = torch.max(b_adv_end_scores2, dim=1)

                    b_adv_start_prob_temp = []
                    b_adv_end_prob_temp = []
                    b_adv_start_index_temp = []
                    b_adv_end_index_temp = []
                    for i in range(b_adv_start_idx.size(0)):
                        if batch.backward_adv_answer_start[x, i] != -1:
                            if b_adv_start_idx[i].item() == 1:
                                b_adv_start_index_temp.append(i)
                                b_adv_start_prob_temp.append(b_adv_start_prob[i].item())
                            if b_adv_end_idx[i].item() == 1:
                                b_adv_end_index_temp.append(i)
                                b_adv_end_prob_temp.append(b_adv_end_prob[i].item())

                    b_adv_start_idx, b_adv_end_idx, b_adv_prob = filter_unpaired(
                        b_adv_start_prob_temp, b_adv_end_prob_temp, b_adv_start_index_temp, b_adv_end_index_temp,
                        8)

                    # ==================Backward Q5: Adverb -> Opinion =======================
                    # ["[CLS]", "副", "词", "修", "饰", "的", "感", "观", "有", "哪", "些", "？", "[SEP]"]
                    for a in range(len(b_adv_start_idx)):
                        adv_token_ids = [batch.backward_adv_query[x][j].item()
                                         for j in range(b_adv_start_idx[a], b_adv_end_idx[a] + 1)]
                        b_opi_query = (
                                self.q5_token_ids[:1] +  # "[CLS]"
                                adv_token_ids +  # 替换 "副", "词"
                                self.q5_token_ids[-10:]  # 保留 "修", "饰", "的", "感", "观", ...
                        )

                        b_opi_query_length = len(b_opi_query)

                        b_opi_query_seg = [0] * len(b_opi_query)
                        imp_start = len(b_opi_query)
                        b_opi_query = torch.tensor(b_opi_query).long().to(self.device)
                        b_opi_query = torch.cat([b_opi_query, passenge], -1).to(self.device).unsqueeze(0)

                        b_opi_query_mask = torch.ones(b_opi_query.size(1)).float().to(self.device).unsqueeze(0)
                        b_opi_query_seg += [1] * passenge.size(0)
                        b_opi_query_seg = torch.tensor(b_opi_query_seg).long().to(self.device).unsqueeze(0)

                        b_opi_start_scores, b_opi_end_scores = self.model(b_opi_query, b_opi_query_mask,
                                                                          b_opi_query_seg, 0)

                        b_opi_start_scores = F.softmax(b_opi_start_scores[0], dim=1)
                        b_opi_end_scores = F.softmax(b_opi_end_scores[0], dim=1)
                        b_opi_start_prob, b_opi_start_idx = torch.max(b_opi_start_scores, dim=1)
                        b_opi_end_prob, b_opi_end_idx = torch.max(b_opi_end_scores, dim=1)

                        b_opi_start_prob_temp, b_opi_end_prob_temp = [], []
                        b_opi_start_idx_temp, b_opi_end_idx_temp = [], []

                        for k in range(b_opi_start_idx.size(0)):
                            if b_opi_query_seg[0, k] == 1:
                                if b_opi_start_idx[k].item() == 1:
                                    b_opi_start_idx_temp.append(k)
                                    b_opi_start_prob_temp.append(b_opi_start_prob[k].item())
                                if b_opi_end_idx[k].item() == 1:
                                    b_opi_end_idx_temp.append(k)
                                    b_opi_end_prob_temp.append(b_opi_end_prob[k].item())

                        b_opi_start_idx, b_opi_end_idx, b_opi_prob = filter_unpaired(
                            b_opi_start_prob_temp, b_opi_end_prob_temp, b_opi_start_idx_temp, b_opi_end_idx_temp,
                            imp_start=imp_start
                        )

                        # =============================Backward Q6========================================
                        # ["[CLS]", "副", "词", "感", "观", "修", "饰", "的", "方", "面", "项", "有", "哪", "些", "？", "[SEP]"]
                        for b in range(len(b_opi_start_idx)):
                            adv_token_ids = [batch.backward_adv_query[x][j].item() for j in
                                             range(b_adv_start_idx[a], b_adv_end_idx[a] + 1)]
                            opi_token_ids = [b_opi_query[0][j].item() for j in
                                             range(b_opi_start_idx[b], b_opi_end_idx[b] + 1)]

                            b_asp_query = (
                                    self.q6_token_ids[:1] +  # [CLS]
                                    adv_token_ids +  # 替换 "副", "词"
                                    opi_token_ids +  # 替换 "感", "观"
                                    self.q6_token_ids[-11:]  # 从 "修", "饰", ... 后面继续
                            )

                            b_asp_query_length = len(b_asp_query)

                            b_asp_query_seg = [0] * len(b_asp_query)
                            imp_start = len(b_asp_query)
                            b_asp_query = torch.tensor(b_asp_query).to(self.device)
                            b_asp_query = torch.cat([b_asp_query, passenge], -1).to(self.device).unsqueeze(0)

                            b_asp_query_mask = torch.ones(b_asp_query.size(1)).float().to(self.device).unsqueeze(0)
                            b_asp_query_seg += [1] * passenge.size(0)
                            b_asp_query_seg = torch.tensor(b_asp_query_seg).long().to(self.device).unsqueeze(0)

                            b_asp_start_scores, b_asp_end_scores = self.model(b_asp_query, b_asp_query_mask,
                                                                              b_asp_query_seg, 0)

                            b_asp_start_scores = F.softmax(b_asp_start_scores[0], dim=1)
                            b_asp_end_scores = F.softmax(b_asp_end_scores[0], dim=1)
                            b_asp_start_prob, b_asp_start_idx = torch.max(b_asp_start_scores, dim=1)
                            b_asp_end_prob, b_asp_end_idx = torch.max(b_asp_end_scores, dim=1)

                            b_asp_start_prob_temp, b_asp_end_prob_temp = [], []
                            b_asp_start_idx_temp, b_asp_end_idx_temp = [], []

                            for k in range(b_asp_start_idx.size(0)):
                                if b_asp_query_seg[0, k] == 1:
                                    if b_asp_start_idx[k].item() == 1:
                                        b_asp_start_idx_temp.append(k)
                                        b_asp_start_prob_temp.append(b_asp_start_prob[k].item())
                                    if b_asp_end_idx[k].item() == 1:
                                        b_asp_end_idx_temp.append(k)
                                        b_asp_end_prob_temp.append(b_asp_end_prob[k].item())

                            b_asp_start_idx, b_asp_end_idx, b_asp_prob = filter_unpaired(
                                b_asp_start_prob_temp, b_asp_end_prob_temp, b_asp_start_idx_temp, b_asp_end_idx_temp,
                                imp_start=imp_start
                            )

                            for k in range(len(b_asp_start_idx)):
                                adv = [batch.backward_adv_query[x][j].item() for j in
                                       range(b_adv_start_idx[a], b_adv_end_idx[a] + 1)]
                                opi = [b_opi_query[0][j].item() for j in
                                       range(b_opi_start_idx[b], b_opi_end_idx[b] + 1)]
                                asp = [b_asp_query[0][j].item() for j in
                                       range(b_asp_start_idx[k], b_asp_end_idx[k] + 1)]

                                adv_idx = [b_adv_start_idx[a] - len(self.q4_token_ids) - 1,
                                           b_adv_end_idx[a] - len(self.q4_token_ids) - 1]
                                opi_idx = [b_opi_start_idx[b] - b_opi_query_length - 1,
                                           b_opi_end_idx[b] - b_opi_query_length - 1]
                                asp_idx = [b_asp_start_idx[k] - b_asp_query_length - 1,
                                           b_asp_end_idx[k] - b_asp_query_length - 1]

                                temp_prob = b_asp_prob[k] * b_opi_prob[b] * b_adv_prob[a]
                                if asp_idx + opi_idx + adv_idx not in backward_triplet_idx_list:
                                    backward_triplet_list.append([asp] + [opi] + [adv])
                                    backward_triplet_prob.append(temp_prob)
                                    backward_triplet_idx_list.append(asp_idx + opi_idx + adv_idx)

                # ===================Q7: Category + Q8: Sentiment + Q9: Intensity======================
                # ["[CLS]", "方", "面", "项", "和", "感", "观", "的", "类", "别", "是", "什", "么", "？", "[SEP]"]
                # ["[CLS]", "方", "面", "项", "和", "感", "观", "的", "情", "感", "是", "什", "么", "？","[SEP]"]
                # ["[CLS]", "方", "面", "项", "副", "词", "感", "观", "的", "强", "度", "是", "多", "少", "？", "[SEP]"]
                if self.args.use_Forward:
                    final_asp_list, final_opi_list, final_adv_list, \
                        final_asp_idx_list, final_opi_idx_list, final_adv_idx_list = triplet_combine(
                            forward_triplet_list,
                            forward_triplet_prob,
                            forward_triplet_idx_list,
                            [],
                            [],
                            [],
                            self.args.alpha,
                            self.args.beta,
                            self.args.delta
                        )
                elif self.args.use_Backward:
                    final_asp_list, final_opi_list, final_adv_list, \
                        final_asp_idx_list, final_opi_idx_list, final_adv_idx_list = triplet_combine(
                            [],
                            [],
                            [],
                            backward_triplet_list,
                            backward_triplet_prob,
                            backward_triplet_idx_list,
                            self.args.alpha,
                            self.args.beta,
                            self.args.delta
                        )
                else:
                    final_asp_list, final_opi_list, final_adv_list, \
                        final_asp_idx_list, final_opi_idx_list, final_adv_idx_list = triplet_combine(
                            forward_triplet_list,
                            forward_triplet_prob,
                            forward_triplet_idx_list,
                            backward_triplet_list,
                            backward_triplet_prob,
                            backward_triplet_idx_list,
                            self.args.alpha,
                            self.args.beta,
                            self.args.delta
                        )

                for a in range(len(final_asp_list)):
                    predict_opinion_num = len(final_opi_list[a])  # asp 对应的 opi数

                    category_query = self.q7_token_ids[:1]
                    sentiment_query = self.q8_token_ids[:1]
                    intensity_query = self.q9_token_ids[:1]
                    # insert/replace aspect in query
                    category_query += final_asp_list[a]
                    sentiment_query += final_asp_list[a]
                    intensity_query += final_asp_list[a]
                    category_query += self.q7_token_ids[4:5]
                    sentiment_query += self.q8_token_ids[4:5]
                    temp_category = category_query.copy()
                    temp_sentiment = sentiment_query.copy()
                    temp_intensity = intensity_query.copy()
                    for b in range(predict_opinion_num):
                        predict_adverb_num = len(final_adv_list[a][b])
                        # 循环状态回溯
                        category_query = temp_category.copy()
                        sentiment_query = temp_sentiment.copy()
                        # insert opinion in query
                        category_query += final_opi_list[a][b]
                        sentiment_query += final_opi_list[a][b]
                        category_query += self.q7_token_ids[-8:]
                        sentiment_query += self.q8_token_ids[-8:]

                        # category
                        category_query_seg = [0] * len(category_query)
                        category_query = torch.tensor(category_query).long().to(self.device)
                        category_query = torch.cat([category_query, passenge], -1).to(self.device).unsqueeze(0)
                        category_query_seg += [1] * passenge.size(0)
                        category_query_mask = torch.ones(category_query.size(1)).float().to(self.device).unsqueeze(0)
                        category_query_seg = torch.tensor(category_query_seg).long().to(self.device).unsqueeze(0)
                        # sentiment
                        sentiment_query_seg = [0] * len(sentiment_query)
                        sentiment_query = torch.tensor(sentiment_query).long().to(self.device)
                        sentiment_query = torch.cat([sentiment_query, passenge], -1).to(self.device).unsqueeze(0)
                        sentiment_query_seg += [1] * passenge.size(0)
                        sentiment_query_mask = torch.ones(sentiment_query.size(1)).float().to(self.device).unsqueeze(0)
                        sentiment_query_seg = torch.tensor(sentiment_query_seg).long().to(self.device).unsqueeze(0)

                        # inference results of category
                        category_scores = self.model(category_query, category_query_mask, category_query_seg, 1)
                        category_scores = F.softmax(category_scores, dim=1)
                        category_predicted = torch.argmax(category_scores[0], dim=0).item()

                        # inference results of sentiment
                        sentiment_scores = self.model(sentiment_query, sentiment_query_mask, sentiment_query_seg, 2)
                        sentiment_scores = F.softmax(sentiment_scores, dim=1)
                        sentiment_predicted = torch.argmax(sentiment_scores[0], dim=0).item()

                        # opinion对应的adverb
                        for c in range(predict_adverb_num):
                            # 回溯
                            intensity_query = temp_intensity.copy()
                            # 重新拼接
                            intensity_query += final_adv_list[a][b][c]
                            intensity_query += final_opi_list[a][b]
                            intensity_query += self.q9_token_ids[-8:]

                            intensity_query_seg = [0] * len(intensity_query)
                            intensity_query = torch.tensor(intensity_query).long().to(self.device)
                            intensity_query = torch.cat([intensity_query, passenge], -1).to(self.device).unsqueeze(0)
                            intensity_query_seg += [1] * passenge.size(0)
                            intensity_query_mask = torch.ones(intensity_query.size(1)).float().to(self.device).unsqueeze(0)
                            intensity_query_seg = torch.tensor(intensity_query_seg).long().to(self.device).unsqueeze(0)

                            # inference results of intensity
                            intensity_scores = self.model(intensity_query, intensity_query_mask, intensity_query_seg, 3)
                            intensity_scores = F.softmax(intensity_scores, dim=1)
                            intensity_predicted = torch.argmax(intensity_scores[0], dim=0).item()

                            # 三元组、五元组组合 + 六元组
                            asp, opi, adv = [], [], []
                            asp.append(final_asp_idx_list[a][0])  # asp 的start index
                            asp.append(final_asp_idx_list[a][1])  # asp 的end index
                            opi.append(final_opi_idx_list[a][b][0])
                            opi.append(final_opi_idx_list[a][b][1])
                            adv.append(final_adv_idx_list[a][b][c][0])
                            adv.append(final_adv_idx_list[a][b][c][1])
                            adv.append(intensity_predicted)  # adv 第三个位置给intensity_predicted 保留五元组

                            asp_opi = [asp, opi]
                            aste_triplet_predict = [asp, opi, sentiment_predicted]
                            aoc_triplet_predict = [asp, opi, category_predicted]
                            aoa_triplet_predict = [asp, opi, adv]
                            quadruple_predict = [asp, category_predicted, opi, sentiment_predicted]
                            quintuple_predict = [asp, category_predicted, opi, adv, sentiment_predicted]

                            if asp not in asp_predict:
                                asp_predict.append(asp)
                            if opi not in opi_predict:
                                opi_predict.append(opi)
                            if adv not in adv_predict:
                                adv_predict.append(adv)
                            if asp_opi not in asp_opi_predict:
                                asp_opi_predict.append(asp_opi)
                            if aste_triplet_predict not in aste_triplets_predict:
                                aste_triplets_predict.append(aste_triplet_predict)
                            if aoc_triplet_predict not in aoc_triplets_predict:
                                aoc_triplets_predict.append(aoc_triplet_predict)
                            if aoa_triplet_predict not in aoa_triplets_predict:
                                aoa_triplets_predict.append(aoa_triplet_predict)
                            if quadruple_predict not in quadruples_predict:
                                quadruples_predict.append(quadruple_predict)
                            if quintuple_predict not in quintuples_predict:
                                quintuples_predict.append(quintuple_predict)

                acoxs_score.update(batch.aspects[x], batch.opinions[x], batch.adverbs[x], batch.pairs[x],
                                   batch.aste_triplets[x], batch.aoc_triplets[x], batch.aoa_triplets[x],
                                   batch.quadruples[x], batch.quintuples[x],
                                   asp_predict, opi_predict, adv_predict, asp_opi_predict,
                                   aste_triplets_predict, aoc_triplets_predict, aoa_triplets_predict,
                                   quadruples_predict, quintuples_predict)
                one_json = {'sentence': ' '.join(batch.sentence_token[x]), 'pred': str(quintuples_predict),
                            'gold': str(batch.quintuples[x])}
                json_res.append(one_json)

                with open(os.path.join(self.args.output_dir, self.args.task, self.args.data_type, 'predict.json'), 'w',
                          encoding='utf-8') as fP:
                    json.dump(json_res, fP, ensure_ascii=False, indent=4)
        return acoxs_score.compute()

    def acos_eval(self, eval_dataloader):
        # aspect category opinion sentiment
        self.model.eval()
        json_res = []
        acoxs_score = ACOXSScore(self.logger)  # include acos_score

        q1, q2, q3, q4, q5, q6 = get_Chinese_Template2()
        for batch in tqdm(eval_dataloader):

            f_asp_start_scores, f_asp_end_scores = self.model(batch.forward_asp_query.to(self.device),
                                                              batch.forward_asp_query_mask.to(self.device),
                                                              batch.forward_asp_query_seg.to(self.device), 0)

            b_opi_start_scores, b_opi_end_scores = self.model(batch.backward_opi_query.to(self.device),
                                                              batch.backward_opi_query_mask.to(self.device),
                                                              batch.backward_opi_query_seg.to(self.device), 0)

            for x in range(self.args.eval_batch_size):
                asp_predict, opi_predict, asp_opi_predict, \
                    quadruples_predict = [], [], [], []

                forward_pair_list, forward_pair_prob, forward_pair_idx_list = [], [], []
                backward_pair_list, backward_pair_prob, backward_pair_idx_list = [], [], []

                # forward q_1 nonzero函数是numpy中用于得到数组array中非零元素的位置（数组索引）的函数
                # 返回 null + review 在temp_text中的索引
                passenge_index = batch.forward_asp_answer_start[x].gt(-1).float().nonzero()
                passenge = batch.forward_asp_query[x][passenge_index].squeeze(1)
                passenge = passenge.to(self.device)

                # ====================Forward Q1: Aspect=========================

                f_asp_start_scores2 = F.softmax(f_asp_start_scores[x], dim=1)
                f_asp_end_scores2 = F.softmax(f_asp_end_scores[x], dim=1)
                f_asp_start_prob, f_asp_start_idx = torch.max(f_asp_start_scores2, dim=1)
                f_asp_end_prob, f_asp_end_idx = torch.max(f_asp_end_scores2, dim=1)

                f_asp_start_prob_temp = []
                f_asp_end_prob_temp = []
                f_asp_start_index_temp = []
                f_asp_end_index_temp = []
                for i in range(f_asp_start_idx.size(0)):
                    if batch.forward_asp_answer_start[0, i] != -1:  # 忽略答案1  只要不是-1就行，效果就和idx_list一样
                        if f_asp_start_idx[i].item() == 1:
                            f_asp_start_index_temp.append(i)
                            f_asp_start_prob_temp.append(f_asp_start_prob[i].item())
                        if f_asp_end_idx[i].item() == 1:
                            f_asp_end_index_temp.append(i)
                            f_asp_end_prob_temp.append(f_asp_end_prob[i].item())

                f_asp_start_idx, f_asp_end_idx, f_asp_prob = filter_unpaired(
                    f_asp_start_prob_temp, f_asp_end_prob_temp, f_asp_start_index_temp, f_asp_end_index_temp,
                    8
                )
                # =================Forward Q2: Aspect->Opinion================
                for a in range(len(f_asp_start_idx)):
                    f_opi_query = self.tokenizer.convert_tokens_to_ids(q2)
                    for j in range(f_asp_start_idx[a], f_asp_end_idx[a] + 1):
                        f_opi_query.insert(5, batch.forward_asp_query[x][j].item())

                    f_opi_query_length = len(f_opi_query)
                    f_opi_query_seg = [0] * len(f_opi_query)
                    imp_start = len(f_opi_query)
                    f_opi_query = torch.tensor(f_opi_query).long().to(self.device)
                    f_opi_query = torch.cat([f_opi_query, passenge], -1).to(self.device).unsqueeze(0)

                    f_opi_query_mask = torch.ones(f_opi_query.size(1)).float().to(self.device).unsqueeze(0)
                    f_opi_query_seg += [1] * passenge.size(0)
                    f_opi_query_seg = torch.tensor(f_opi_query_seg).long().to(self.device).unsqueeze(0)

                    f_opi_start_scores, f_opi_end_scores = self.model(f_opi_query, f_opi_query_mask, f_opi_query_seg, 0)

                    f_opi_start_scores = F.softmax(f_opi_start_scores[0], dim=1)
                    f_opi_end_scores = F.softmax(f_opi_end_scores[0], dim=1)
                    f_opi_start_prob, f_opi_start_idx = torch.max(f_opi_start_scores, dim=1)
                    f_opi_end_prob, f_opi_end_idx = torch.max(f_opi_end_scores, dim=1)

                    f_opi_start_prob_temp, f_opi_end_prob_temp = [], []
                    f_opi_start_idx_temp, f_opi_end_idx_temp = [], []

                    for k in range(f_opi_start_idx.size(0)):
                        if f_opi_query_seg[0, k] == 1:  # 把判定到query部分的id全部过滤掉
                            if f_opi_start_idx[k].item() == 1:
                                f_opi_start_idx_temp.append(k)
                                f_opi_start_prob_temp.append(f_opi_start_prob[k].item())
                            if f_opi_end_idx[k].item() == 1:
                                f_opi_end_idx_temp.append(k)
                                f_opi_end_prob_temp.append(f_opi_end_prob[k].item())

                    f_opi_start_idx, f_opi_end_idx, f_opi_prob = filter_unpaired(
                        f_opi_start_prob_temp, f_opi_end_prob_temp, f_opi_start_idx_temp, f_opi_end_idx_temp,
                        imp_start=imp_start
                    )

                    for k in range(len(f_opi_start_idx)):
                        asp = [batch.forward_asp_query[x][j].item() for j in
                               range(f_asp_start_idx[a], f_asp_end_idx[a] + 1)]
                        opi = [f_opi_query[0][j].item() for j in
                               range(f_opi_start_idx[k], f_opi_end_idx[k] + 1)]

                        # 问题 + null     没有null(0,0) -> (-1, -1)   回归最原始评论中关键元素的下标
                        asp_idx = [f_asp_start_idx[a] - len(q1) - 1, f_asp_end_idx[a] - len(q1) - 1]
                        opi_idx = [f_opi_start_idx[k] - f_opi_query_length - 1,
                                   f_opi_end_idx[k] - f_opi_query_length - 1]

                        # 计算配对概率
                        temp_prob = f_asp_prob[a] * f_opi_prob[k]

                        if asp_idx + opi_idx not in forward_pair_idx_list:
                            forward_pair_list.append([asp] + [opi])
                            forward_pair_prob.append(temp_prob)
                            forward_pair_idx_list.append(asp_idx + opi_idx)

                # =================Backward Q3: Extract Opinion========================

                b_opi_start_scores2 = F.softmax(b_opi_start_scores[x], dim=1)
                b_opi_end_scores2 = F.softmax(b_opi_end_scores[x], dim=1)
                b_opi_start_prob, b_opi_start_idx = torch.max(b_opi_start_scores2, dim=1)
                b_opi_end_prob, b_opi_end_idx = torch.max(b_opi_end_scores2, dim=1)

                b_opi_start_prob_temp = []
                b_opi_end_prob_temp = []
                b_opi_start_index_temp = []
                b_opi_end_index_temp = []
                for i in range(b_opi_start_idx.size(0)):
                    if batch.backward_opi_answer_start[0, i] != -1:
                        if b_opi_start_idx[i].item() == 1:
                            b_opi_start_index_temp.append(i)
                            b_opi_start_prob_temp.append(b_opi_start_prob[i].item())
                        if b_opi_end_idx[i].item() == 1:
                            b_opi_end_index_temp.append(i)
                            b_opi_end_prob_temp.append(b_opi_end_prob[i].item())

                b_opi_start_idx, b_opi_end_idx, b_opi_prob = filter_unpaired(
                    b_opi_start_prob_temp, b_opi_end_prob_temp, b_opi_start_index_temp, b_opi_end_index_temp,
                    8)

                # ==================Backward Q4: Opinion -> Aspect =======================
                for a in range(len(b_opi_start_idx)):
                    b_asp_query = self.tokenizer.convert_tokens_to_ids(q4)
                    for j in range(b_opi_start_idx[a], b_opi_end_idx[a] + 1):
                        b_asp_query.insert(5, batch.backward_opi_query[x][j].item())

                    b_asp_query_length = len(b_asp_query)

                    b_asp_query_seg = [0] * len(b_asp_query)
                    imp_start = len(b_asp_query)
                    b_asp_query = torch.tensor(b_asp_query).long().to(self.device)
                    b_asp_query = torch.cat([b_asp_query, passenge], -1).to(self.device).unsqueeze(0)

                    b_asp_query_mask = torch.ones(b_asp_query.size(1)).float().to(self.device).unsqueeze(0)
                    b_asp_query_seg += [1] * passenge.size(0)
                    b_asp_query_seg = torch.tensor(b_asp_query_seg).long().to(self.device).unsqueeze(0)

                    b_asp_start_scores, b_asp_end_scores = self.model(b_asp_query, b_asp_query_mask,
                                                                      b_asp_query_seg, 0)

                    b_asp_start_scores = F.softmax(b_asp_start_scores[0], dim=1)
                    b_asp_end_scores = F.softmax(b_asp_end_scores[0], dim=1)
                    b_asp_start_prob, b_asp_start_idx = torch.max(b_asp_start_scores, dim=1)
                    b_asp_end_prob, b_asp_end_idx = torch.max(b_asp_end_scores, dim=1)

                    b_asp_start_prob_temp, b_asp_end_prob_temp = [], []
                    b_asp_start_idx_temp, b_asp_end_idx_temp = [], []

                    for k in range(b_asp_start_idx.size(0)):
                        if b_asp_query_seg[0, k] == 1:
                            if b_asp_start_idx[k].item() == 1:
                                b_asp_start_idx_temp.append(k)
                                b_asp_start_prob_temp.append(b_asp_start_prob[k].item())
                            if b_asp_end_idx[k].item() == 1:
                                b_asp_end_idx_temp.append(k)
                                b_asp_end_prob_temp.append(b_asp_end_prob[k].item())

                    b_asp_start_idx, b_asp_end_idx, b_asp_prob = filter_unpaired(
                        b_asp_start_prob_temp, b_asp_end_prob_temp, b_asp_start_idx_temp, b_asp_end_idx_temp,
                        imp_start=imp_start
                    )

                    for k in range(len(b_asp_start_idx)):
                        opi = [batch.backward_opi_query[x][j].item() for j in
                               range(b_opi_start_idx[a], b_opi_end_idx[a] + 1)]
                        asp = [b_asp_query[0][j].item() for j in
                               range(b_asp_start_idx[k], b_asp_end_idx[k] + 1)]

                        opi_idx = [b_opi_start_idx[a] - len(q3) - 1,
                                   b_opi_end_idx[a] - len(q3) - 1]
                        asp_idx = [b_asp_start_idx[k] - b_asp_query_length - 1,
                                   b_asp_end_idx[k] - b_asp_query_length - 1]

                        temp_prob = b_asp_prob[k] * b_opi_prob[a]
                        if asp_idx + opi_idx not in backward_pair_idx_list:
                            backward_pair_list.append([asp] + [opi])
                            backward_pair_prob.append(temp_prob)
                            backward_pair_idx_list.append(asp_idx + opi_idx)

                # ===================Q5: Category + Q6 Sentiment ======================
                if self.args.use_Forward:
                    final_asp_list, final_opi_list, \
                        final_asp_idx_list, final_opi_idx_list = pair_combine(
                            forward_pair_list,
                            forward_pair_prob,
                            forward_pair_idx_list,
                            [],
                            [],
                            [],
                            self.args.alpha,
                            self.args.beta
                        )
                elif self.args.use_Backward:
                    final_asp_list, final_opi_list, \
                        final_asp_idx_list, final_opi_idx_list = pair_combine(
                            [],
                            [],
                            [],
                            backward_pair_list,
                            backward_pair_prob,
                            backward_pair_idx_list,
                            self.args.alpha,
                            self.args.beta
                        )
                else:
                    final_asp_list, final_opi_list, \
                        final_asp_idx_list, final_opi_idx_list = pair_combine(
                            forward_pair_list,
                            forward_pair_prob,
                            forward_pair_idx_list,
                            backward_pair_list,
                            backward_pair_prob,
                            backward_pair_idx_list,
                            self.args.alpha,
                            self.args.beta
                        )

                for a in range(len(final_asp_list)):
                    predict_opinion_num = len(final_opi_list[a])  # asp 对应的 opi数
                    category_query = self.tokenizer.convert_tokens_to_ids(q5)
                    sentiment_query = self.tokenizer.convert_tokens_to_ids(q6)
                    # insert aspect in query
                    for j in range(len(final_asp_list[a])):
                        category_query.insert(5, final_asp_list[a][j])
                        sentiment_query.insert(5, final_asp_list[a][j])

                    temp_category = category_query
                    temp_sentiment = sentiment_query

                    for b in range(predict_opinion_num):
                        # 循环状态回溯
                        category_query = temp_category
                        sentiment_query = temp_sentiment

                        # insert opinion in query
                        for j in range(len(final_opi_list[a][b])):
                            category_query.insert(-8, final_opi_list[a][b][j])
                            sentiment_query.insert(-8, final_opi_list[a][b][j])
                        # category
                        category_query_seg = [0] * len(category_query)
                        category_query = torch.tensor(category_query).long().to(self.device)
                        category_query = torch.cat([category_query, passenge], -1).to(self.device).unsqueeze(0)
                        category_query_seg += [1] * passenge.size(0)
                        category_query_mask = torch.ones(category_query.size(1)).float().to(self.device).unsqueeze(0)
                        category_query_seg = torch.tensor(category_query_seg).long().to(self.device).unsqueeze(0)
                        # sentiment
                        sentiment_query_seg = [0] * len(sentiment_query)
                        sentiment_query = torch.tensor(sentiment_query).long().to(self.device)
                        sentiment_query = torch.cat([sentiment_query, passenge], -1).to(self.device).unsqueeze(0)
                        sentiment_query_seg += [1] * passenge.size(0)
                        sentiment_query_mask = torch.ones(sentiment_query.size(1)).float().to(self.device).unsqueeze(0)
                        sentiment_query_seg = torch.tensor(sentiment_query_seg).long().to(self.device).unsqueeze(0)

                        # inference results of category
                        category_scores = self.model(category_query, category_query_mask, category_query_seg, 1)
                        category_scores = F.softmax(category_scores, dim=1)
                        category_predicted = torch.argmax(category_scores[0], dim=0).item()

                        # inference results of sentiment
                        sentiment_scores = self.model(sentiment_query, sentiment_query_mask, sentiment_query_seg, 2)
                        sentiment_scores = F.softmax(sentiment_scores, dim=1)
                        sentiment_predicted = torch.argmax(sentiment_scores[0], dim=0).item()

                        # 三元组、五元组组合 + 六元组
                        asp, opi = [], []
                        asp.append(final_asp_idx_list[a][0])  # asp 的start index
                        asp.append(final_asp_idx_list[a][1])  # asp 的end index
                        opi.append(final_opi_idx_list[a][b][0])
                        opi.append(final_opi_idx_list[a][b][1])

                        asp_opi = [asp, opi]
                        quadruple_predict = [asp, category_predicted, opi, sentiment_predicted]

                        if asp not in asp_predict:
                            asp_predict.append(asp)
                        if opi not in opi_predict:
                            opi_predict.append(opi)
                        if asp_opi not in asp_opi_predict:
                            asp_opi_predict.append(asp_opi)
                        if quadruple_predict not in quadruples_predict:
                            quadruples_predict.append(quadruple_predict)

                acoxs_score.update(batch.aspects[x], batch.opinions[x], [], batch.pairs[x],
                                   [], [], [],
                                   batch.quadruples[x], [],
                                   asp_predict, opi_predict, [], asp_opi_predict,
                                   [], [], [],
                                   quadruples_predict, [])
                one_json = {'sentence': ' '.join(batch.sentence_token[0]), 'pred': str(quadruples_predict),
                            'gold': str(batch.quadruples[0])}
                json_res.append(one_json)
            with open(os.path.join(self.args.output_dir, self.args.task, self.args.data_type, 'predict.json'), 'w',
                      encoding='utf-8') as fP:
                json.dump(json_res, fP, ensure_ascii=False, indent=4)
        return acoxs_score.compute()

    def inference(self, reviews):
        # 评估模式
        self.model.eval()

        # 构建类别，情感字典
        category_id = get_aspect_category(self.args.task.lower(), self.args.data_type)[-1]
        sentiment_id = get_sentiment(self.args.task.lower())[-1]

        # 准备查询问题
        q1_asp, q2_asp2opi, q3_opi2adv, q4_adv, q5_adv2opi, q6_opi2asp, q7_c, q8_s, q9_i = get_Chinese_Template()

        # 准备查询列表
        f_asp_query_list, f_asp_mask_list, f_asp_seg_list = [], [], []
        b_adv_query_list, b_adv_mask_list, b_adv_seg_list = [], [], []

        # 准备索引列表 "null + review" 在 f_temp_text 中的下标列表
        idx_list, vocab_idx_list = [], []
        idx_list2 = []

        # 前期准备工作
        for review in reviews:
            # 替换 self.tokenizer.tokenize("[CLS]方面有哪些？[SEP]null" + review)
            review = review.split(' ')
            # ***forward***
            f_temp_text = q1_asp + ["null"] + review
            f_temp_text = list(map(self.tokenizer.tokenize, f_temp_text))
            f_temp_text = [elem for outer_list in f_temp_text for elem in outer_list]
            # 生成 "null + review" 在 f_temp_text 中的下标列表
            idx_list.append([idx + len(q1_asp) for idx in range(len(f_temp_text) - len(q1_asp))])
            idx_list2.append([idx + len(q4_adv) for idx in range(len(f_temp_text) - len(q4_adv))])
            # 生成tokenizer词汇表中token对应的id
            vocab_idx_list.append(self.tokenizer.convert_tokens_to_ids(["null"] + review))
            # 1.id列表 2.掩码列表 3.标记序列（区分问题、查询）
            # 1.id列表
            f_asp_query = self.tokenizer.convert_tokens_to_ids(f_temp_text)
            f_asp_query_list.append(f_asp_query)
            # 2.掩码列表
            f_asp_mask = [1 for _ in range(len(f_asp_query))]
            f_asp_mask_list.append(f_asp_mask)
            # 3.标记序列
            q1_len = len(self.tokenizer.convert_tokens_to_ids(q1_asp))
            f_asp_seg = [0] * q1_len + [1] * (len(f_asp_query) - q1_len)
            f_asp_seg_list.append(f_asp_seg)

            # ***backward***
            b_temp_text = q4_adv + ["null"] + review
            b_temp_text = list(map(self.tokenizer.tokenize, b_temp_text))
            b_temp_text = [elem for outer_list in b_temp_text for elem in outer_list]

            b_adv_query = self.tokenizer.convert_tokens_to_ids(b_temp_text)
            b_adv_query_list.append(b_adv_query)
            b_adv_mask = [1 for _ in range(len(b_adv_query))]
            b_adv_mask_list.append(b_adv_mask)
            q4_len = len(self.tokenizer.convert_tokens_to_ids(q4_adv))
            b_adv_seg = [0] * q4_len + [1] * (len(b_adv_query) - q4_len)
            b_adv_seg_list.append(b_adv_seg)

        # 执行各个子任务
        final_output = []
        for i in range(len(f_asp_query_list)):
            # null + review 的vocab(token)_id
            vocab_idx = torch.tensor(vocab_idx_list[i]).long()
            vocab_idx = vocab_idx.to(self.device)
            # 结果存放列表：四元组、五元组
            quadruples_predict, quintuples_predict = [], []
            # 数据容器准备
            forward_triplet_list, forward_triplet_prob, forward_triplet_idx_list = [], [], []
            backward_triplet_list, backward_triplet_prob, backward_triplet_idx_list = [], [], []

            # ********************Q1: Extract Aspect********************
            # transform type list(d1) -> tensor(d2)
            f_asp_query = torch.tensor([f_asp_query_list[i]]).long()
            f_asp_query_mask = torch.tensor([f_asp_mask_list[i]]).long()
            f_asp_query_seg = torch.tensor([f_asp_seg_list[i]]).long()

            # 调用模型 获取 aspect 提取结果
            f_asp_start_scores, f_asp_end_scores = self.model(
                f_asp_query.to(self.device),
                f_asp_query_mask.to(self.device),
                f_asp_query_seg.to(self.device),
                0
            )
            # 实体识别的模型输出是三维 [0] -> [sequence_length, hidden_size] 概率分布
            f_asp_start_scores = F.softmax(f_asp_start_scores[0], dim=1)
            f_asp_end_scores = F.softmax(f_asp_end_scores[0], dim=1)
            # torch.max() dim=0 沿着行操作，对每一列找出一个最大值 dim=1 沿着列操作 对每一行找出一个最大值
            f_asp_start_prob, f_asp_start_idx = torch.max(f_asp_start_scores, dim=1)
            f_asp_end_prob, f_asp_end_idx = torch.max(f_asp_end_scores, dim=1)
            # 方面词实体预测结果暂存表
            f_asp_start_prob_temp, f_asp_end_prob_temp = [], []
            f_asp_start_idx_temp, f_asp_end_idx_temp = [], []

            # 初筛：选择符合条件的下标
            for idx in range(f_asp_start_idx.size(0)):
                if idx in idx_list[i]:
                    if f_asp_start_idx[idx].item() == 1:  # 遍历找到最大可能对应的下标索引
                        f_asp_start_idx_temp.append(idx)
                        f_asp_start_prob_temp.append(f_asp_start_prob[idx].item())
                    if f_asp_end_idx[idx].item() == 1:
                        f_asp_end_idx_temp.append(idx)
                        f_asp_end_prob_temp.append(f_asp_end_prob[idx].item())

            # 再筛 得到最终结果和一个整体的可能性
            f_asp_start_idx, f_asp_end_idx, f_asp_prob = filter_unpaired(
                f_asp_start_prob_temp, f_asp_end_prob_temp, f_asp_start_idx_temp, f_asp_end_idx_temp,
                imp_start=8
            )

            # print("forward aspect predict：")
            # for mj in range(len(f_asp_start_idx_temp)):
            #     print(f'start position: [{f_asp_start_idx_temp[mj] - 8}] start prob: [{f_asp_start_prob_temp[mj]}]')
            # for mj in range(len(f_asp_end_idx_temp)):
            #     print(f'end position: [{f_asp_end_idx_temp[mj] - 8}] start prob: [{f_asp_end_prob_temp[mj]}]')
            # print("filter_unpaired...")
            # for mj in range(len(f_asp_start_idx)):
            #     print(f'({f_asp_start_idx[mj] - 8}, {f_asp_end_idx[mj] - 8}) prob: [{f_asp_prob[mj]}]')
            # print('-------------------------------------------------------------------------------------')

            # ********************Q2: Aspect->Opinion ********************
            for a in range(len(f_asp_start_idx)):
                # 获取方面项的 token id 列表
                asp_token_ids = [f_asp_query[0][j].item() for j in
                                 range(f_asp_start_idx[a], f_asp_end_idx[a] + 1)]
                f_opi_query = (
                        self.q2_token_ids[:3] +
                        asp_token_ids +
                        self.q2_token_ids[-8:]
                )
                # tokens = self.tokenizer.convert_ids_to_tokens(f_opi_query)
                # print(f"f_opi_query:{tokens}")

                f_opi_query_length = len(f_opi_query)
                f_opi_query_seg = [0] * len(f_opi_query)
                imp_start = len(f_opi_query)
                f_opi_query = torch.tensor(f_opi_query).long().to(self.device)
                f_opi_query = torch.cat([f_opi_query, vocab_idx], -1).to(self.device).unsqueeze(0)

                f_opi_query_mask = torch.ones(f_opi_query.size(1)).float().to(self.device).unsqueeze(0)
                f_opi_query_seg += [1] * vocab_idx.size(0)
                f_opi_query_seg = torch.tensor(f_opi_query_seg).long().to(self.device).unsqueeze(0)

                f_opi_start_scores, f_opi_end_scores = self.model(f_opi_query, f_opi_query_mask, f_opi_query_seg, 0)

                f_opi_start_scores = F.softmax(f_opi_start_scores[0], dim=1)
                f_opi_end_scores = F.softmax(f_opi_end_scores[0], dim=1)
                f_opi_start_prob, f_opi_start_idx = torch.max(f_opi_start_scores, dim=1)
                f_opi_end_prob, f_opi_end_idx = torch.max(f_opi_end_scores, dim=1)

                f_opi_start_prob_temp, f_opi_end_prob_temp = [], []
                f_opi_start_idx_temp, f_opi_end_idx_temp = [], []

                for k in range(f_opi_start_idx.size(0)):
                    if f_opi_query_seg[0, k] == 1:
                        if f_opi_start_idx[k].item() == 1:
                            f_opi_start_idx_temp.append(k)
                            f_opi_start_prob_temp.append(f_opi_start_prob[k].item())
                        if f_opi_end_idx[k].item() == 1:
                            f_opi_end_idx_temp.append(k)
                            f_opi_end_prob_temp.append(f_opi_end_prob[k].item())

                f_opi_start_idx, f_opi_end_idx, f_opi_prob = filter_unpaired(
                    f_opi_start_prob_temp, f_opi_end_prob_temp, f_opi_start_idx_temp, f_opi_end_idx_temp,
                    imp_start=imp_start
                )

                # print("forward aspect -> opinion predict：")
                # for mj in range(len(f_opi_start_idx_temp)):
                #     print(
                #         f'start position: [{f_opi_start_idx_temp[mj] - imp_start}] start prob: [{f_opi_start_prob_temp[mj]}]')
                # for mj in range(len(f_opi_end_idx_temp)):
                #     print(
                #         f'end position: [{f_opi_end_idx_temp[mj] - imp_start}] end prob: [{f_opi_end_prob_temp[mj]}]')
                # print("filter_unpaired...")
                # for mj in range(len(f_opi_start_idx)):
                #     print(f'({f_opi_start_idx[mj] - imp_start}, {f_opi_end_idx[mj] - imp_start}) prob: [{f_opi_prob[mj]}]')
                # print('-------------------------------------------------------------------------------------')

                # ********************Q3: Aspect + Opinion -> Adverb ********************
                # ["[CLS]", "关", "于", "方", "面", "项", "感", "观", "的", "副", "词", "有", "哪", "些", "？", "[SEP]"]
                for b in range(len(f_opi_start_idx)):
                    asp_token_ids = [f_asp_query[0][j].item() for j in
                                     range(f_asp_start_idx[a], f_asp_end_idx[a] + 1)]
                    opi_token_ids = [f_opi_query[0][j].item() for j in range(f_opi_start_idx[b], f_opi_end_idx[b] + 1)]
                    f_adv_query = (
                            self.q3_token_ids[:3] +
                            asp_token_ids +
                            opi_token_ids +
                            self.q3_token_ids[-8:]
                    )
                    # tokens = self.tokenizer.convert_ids_to_tokens(f_adv_query)
                    # print(f"f_adv_query:{tokens}")

                    f_adv_query_length = len(f_adv_query)

                    f_adv_query_seg = [0] * len(f_adv_query)
                    imp_start = len(f_adv_query)
                    f_adv_query = torch.tensor(f_adv_query).long().to(self.device)
                    f_adv_query = torch.cat([f_adv_query, vocab_idx], -1).to(self.device).unsqueeze(0)

                    f_adv_query_mask = torch.ones(f_adv_query.size(1)).float().to(self.device).unsqueeze(0)
                    f_adv_query_seg += [1] * vocab_idx.size(0)  # 还是list
                    f_adv_query_seg = torch.tensor(f_adv_query_seg).long().to(self.device).unsqueeze(0)

                    f_adv_start_scores, f_adv_end_scores = self.model(f_adv_query, f_adv_query_mask, f_adv_query_seg, 0)

                    f_adv_start_scores = F.softmax(f_adv_start_scores[0], dim=1)
                    f_adv_end_scores = F.softmax(f_adv_end_scores[0], dim=1)
                    f_adv_start_prob, f_adv_start_idx = torch.max(f_adv_start_scores, dim=1)
                    f_adv_end_prob, f_adv_end_idx = torch.max(f_adv_end_scores, dim=1)

                    f_adv_start_prob_temp, f_adv_end_prob_temp = [], []
                    f_adv_start_idx_temp, f_adv_end_idx_temp = [], []

                    for k in range(f_adv_start_idx.size(0)):
                        if f_adv_query_seg[0, k] == 1:
                            if f_adv_start_idx[k].item() == 1:
                                f_adv_start_idx_temp.append(k)
                                f_adv_start_prob_temp.append(f_adv_start_prob[k].item())
                            if f_adv_end_idx[k].item() == 1:
                                f_adv_end_idx_temp.append(k)
                                f_adv_end_prob_temp.append(f_adv_end_prob[k].item())

                    f_adv_start_idx, f_adv_end_idx, f_adv_prob = filter_unpaired(
                        f_adv_start_prob_temp, f_adv_end_prob_temp, f_adv_start_idx_temp, f_adv_end_idx_temp,
                        imp_start=imp_start
                    )

                    # print("forward Aspect + Opinion -> Adverb：")
                    # for mj in range(len(f_adv_start_idx_temp)):
                    #     print(
                    #         f'start position: [{f_adv_start_idx_temp[mj] - imp_start}] start prob: [{f_adv_start_prob_temp[mj]}]')
                    # for mj in range(len(f_adv_end_idx_temp)):
                    #     print(
                    #         f'end position: [{f_adv_end_idx_temp[mj] - imp_start}] end prob: [{f_adv_end_prob_temp[mj]}]')
                    # print("filter_unpaired...")
                    # for mj in range(len(f_adv_start_idx)):
                    #     print(f'({f_adv_start_idx[mj] - imp_start}, {f_adv_end_idx[mj] - imp_start}) prob: [{f_adv_prob[mj]}]')
                    # print('-------------------------------------------------------------------------------------')

                    for k in range(len(f_adv_start_idx)):
                        asp = [f_asp_query[0][j].item() for j in range(f_asp_start_idx[a], f_asp_end_idx[a] + 1)]
                        opi = [f_opi_query[0][j].item() for j in range(f_opi_start_idx[b], f_opi_end_idx[b] + 1)]
                        adv = [f_adv_query[0][j].item() for j in range(f_adv_start_idx[k], f_adv_end_idx[k] + 1)]

                        # null(0,0) -> (-1, -1)
                        asp_idx = [f_asp_start_idx[a] - len(q1_asp) - 1, f_asp_end_idx[a] - len(q1_asp) - 1]
                        opi_idx = [f_opi_start_idx[b] - f_opi_query_length - 1,
                                   f_opi_end_idx[b] - f_opi_query_length - 1]
                        adv_idx = [f_adv_start_idx[k] - f_adv_query_length - 1,
                                   f_adv_end_idx[k] - f_adv_query_length - 1]

                        # 计算配对概率
                        temp_prob = f_asp_prob[a] * f_opi_prob[b] * f_adv_prob[k]

                        if asp_idx + opi_idx + adv_idx not in forward_triplet_idx_list:
                            forward_triplet_list.append([asp] + [opi] + [adv])
                            forward_triplet_prob.append(temp_prob)
                            forward_triplet_idx_list.append(asp_idx + opi_idx + adv_idx)

            # ********************Q4: Extract Adverb ********************
            b_adv_query = torch.tensor([b_adv_query_list[i]]).long()
            b_adv_query_mask = torch.tensor([b_adv_mask_list[i]]).long()
            b_adv_query_seg = torch.tensor([b_adv_seg_list[i]]).long()

            b_adv_start_scores, b_adv_end_scores = self.model(
                b_adv_query.to(self.device),
                b_adv_query_mask.to(self.device),
                b_adv_query_seg.to(self.device),
                0
            )

            b_adv_start_scores = F.softmax(b_adv_start_scores[0], dim=1)
            b_adv_end_scores = F.softmax(b_adv_end_scores[0], dim=1)
            b_adv_start_prob, b_adv_start_idx = torch.max(b_adv_start_scores, dim=1)
            b_adv_end_prob, b_adv_end_idx = torch.max(b_adv_end_scores, dim=1)

            b_adv_start_prob_temp, b_adv_end_prob_temp = [], []
            b_adv_start_idx_temp, b_adv_end_idx_temp = [], []

            for k in range(b_adv_start_idx.size(0)):
                if k in idx_list2[i]:
                    if b_adv_start_idx[k].item() == 1:
                        b_adv_start_idx_temp.append(k)
                        b_adv_start_prob_temp.append(b_adv_start_prob[k].item())
                    if b_adv_end_idx[k].item() == 1:
                        b_adv_end_idx_temp.append(k)
                        b_adv_end_prob_temp.append(b_adv_end_prob[k].item())
            b_adv_start_idx, b_adv_end_idx, b_adv_prob = filter_unpaired(
                b_adv_start_prob_temp, b_adv_end_prob_temp, b_adv_start_idx_temp, b_adv_end_idx_temp,
                imp_start=8
            )

            # print("backward Extract Adverb：")
            # for mj in range(len(b_adv_start_idx_temp)):
            #     print(
            #         f'start position: [{b_adv_start_idx_temp[mj] - 8}] start prob: [{b_adv_start_prob_temp[mj]}]')
            # for mj in range(len(b_adv_end_idx_temp)):
            #     print(
            #         f'end position: [{b_adv_end_idx_temp[mj] - 8}] end prob: [{b_adv_end_prob_temp[mj]}]')
            # print("filter_unpaired...")
            # for mj in range(len(b_adv_start_idx)):
            #     print(f'({b_adv_start_idx[mj] - 8}, {b_adv_end_idx[mj] - 8}) prob: [{b_adv_prob[mj]}]')
            # print('-------------------------------------------------------------------------------------')

            # ********************Q5: Adverb -> Opinion ********************
            for a in range(len(b_adv_start_idx)):
                adv_token_ids = [b_adv_query[0][j].item()
                                 for j in range(b_adv_start_idx[a], b_adv_end_idx[a] + 1)]
                b_opi_query = (
                        self.q5_token_ids[:1] +
                        adv_token_ids +
                        self.q5_token_ids[-10:]
                )
                # tokens = self.tokenizer.convert_ids_to_tokens(b_opi_query)
                # print(f"b_opi_query:{tokens}")

                b_opi_query_length = len(b_opi_query)

                b_opi_query_seg = [0] * len(b_opi_query)
                imp_start = len(b_opi_query)
                b_opi_query = torch.tensor(b_opi_query).long().to(self.device)
                b_opi_query = torch.cat([b_opi_query, vocab_idx], -1).to(self.device).unsqueeze(0)

                b_opi_query_mask = torch.ones(b_opi_query.size(1)).float().to(self.device).unsqueeze(0)
                b_opi_query_seg += [1] * vocab_idx.size(0)  # 还是list
                b_opi_query_seg = torch.tensor(b_opi_query_seg).long().to(self.device).unsqueeze(0)

                b_opi_start_scores, b_opi_end_scores = self.model(b_opi_query, b_opi_query_mask,
                                                                  b_opi_query_seg, 0)

                b_opi_start_scores = F.softmax(b_opi_start_scores[0], dim=1)
                b_opi_end_scores = F.softmax(b_opi_end_scores[0], dim=1)
                b_opi_start_prob, b_opi_start_idx = torch.max(b_opi_start_scores, dim=1)
                b_opi_end_prob, b_opi_end_idx = torch.max(b_opi_end_scores, dim=1)

                b_opi_start_prob_temp, b_opi_end_prob_temp = [], []
                b_opi_start_idx_temp, b_opi_end_idx_temp = [], []

                for k in range(b_opi_start_idx.size(0)):
                    if b_opi_query_seg[0, k] == 1:  # 等价于[0][k]
                        if b_opi_start_idx[k].item() == 1:
                            b_opi_start_idx_temp.append(k)
                            b_opi_start_prob_temp.append(b_opi_start_prob[k].item())
                        if b_opi_end_idx[k].item() == 1:
                            b_opi_end_idx_temp.append(k)
                            b_opi_end_prob_temp.append(b_opi_end_prob[k].item())

                b_opi_start_idx, b_opi_end_idx, b_opi_prob = filter_unpaired(
                    b_opi_start_prob_temp, b_opi_end_prob_temp, b_opi_start_idx_temp, b_opi_end_idx_temp,
                    imp_start=imp_start
                )

                # print("backward Adverb -> Opinion：")
                # for mj in range(len(b_opi_start_idx_temp)):
                #     print(
                #         f'start position: [{b_opi_start_idx_temp[mj] - imp_start}] start prob: [{b_opi_start_prob_temp[mj]}]')
                # for mj in range(len(b_opi_end_idx_temp)):
                #     print(
                #         f'end position: [{b_opi_end_idx_temp[mj] - imp_start}] end prob: [{b_opi_end_prob_temp[mj]}]')
                # print("filter_unpaired...")
                # for mj in range(len(b_opi_start_idx)):
                #     print(
                #         f'({b_opi_start_idx[mj] - imp_start}, {b_opi_end_idx[mj] - imp_start}) prob: [{b_opi_prob[mj]}]')
                # print('-------------------------------------------------------------------------------------')

                # ********************Q6: Adverb + Opinion->Aspect ********************
                # ["[CLS]", "这", "个", "副", "词", "5和", "意", "见", "-10修", "饰", "的", "方", "面", "有", "哪", "些", "？", "[SEP]"]
                for b in range(len(b_opi_start_idx)):
                    adv_token_ids = [b_adv_query[0][j].item() for j in
                                     range(b_adv_start_idx[a], b_adv_end_idx[a] + 1)]
                    opi_token_ids = [b_opi_query[0][j].item() for j in
                                     range(b_opi_start_idx[b], b_opi_end_idx[b] + 1)]

                    b_asp_query = (
                            self.q6_token_ids[:1] +
                            adv_token_ids +
                            opi_token_ids +
                            self.q6_token_ids[-11:]
                    )
                    # tokens = self.tokenizer.convert_ids_to_tokens(b_asp_query)
                    # print(f"b_asp_query:{tokens}")

                    b_asp_query_length = len(b_asp_query)

                    b_asp_query_seg = [0] * len(b_asp_query)
                    imp_start = len(b_asp_query)
                    b_asp_query = torch.tensor(b_asp_query).to(self.device)
                    b_asp_query = torch.cat([b_asp_query, vocab_idx], -1).to(self.device).unsqueeze(0)

                    b_asp_query_mask = torch.ones(b_asp_query.size(1)).float().to(self.device).unsqueeze(0)
                    b_asp_query_seg += [1] * vocab_idx.size(0)
                    b_asp_query_seg = torch.tensor(b_asp_query_seg).long().to(self.device).unsqueeze(0)

                    b_asp_start_scores, b_asp_end_scores = self.model(b_asp_query, b_asp_query_mask, b_asp_query_seg, 0)

                    b_asp_start_scores = F.softmax(b_asp_start_scores[0], dim=1)
                    b_asp_end_scores = F.softmax(b_asp_end_scores[0], dim=1)
                    b_asp_start_prob, b_asp_start_idx = torch.max(b_asp_start_scores, dim=1)
                    b_asp_end_prob, b_asp_end_idx = torch.max(b_asp_end_scores, dim=1)

                    b_asp_start_prob_temp, b_asp_end_prob_temp = [], []
                    b_asp_start_idx_temp, b_asp_end_idx_temp = [], []

                    for k in range(b_asp_start_idx.size(0)):
                        if b_asp_query_seg[0, k] == 1:
                            if b_asp_start_idx[k].item() == 1:
                                b_asp_start_idx_temp.append(k)
                                b_asp_start_prob_temp.append(b_asp_start_prob[k].item())
                            if b_asp_end_idx[k].item() == 1:
                                b_asp_end_idx_temp.append(k)
                                b_asp_end_prob_temp.append(b_asp_end_prob[k].item())

                    b_asp_start_idx, b_asp_end_idx, b_asp_prob = filter_unpaired(
                        b_asp_start_prob_temp, b_asp_end_prob_temp, b_asp_start_idx_temp, b_asp_end_idx_temp,
                        imp_start=imp_start
                    )

                    # print("backward Adverb + Opinion->Aspect：")
                    # for mj in range(len(b_asp_start_idx_temp)):
                    #     print(
                    #         f'start position: [{b_asp_start_idx_temp[mj] - imp_start}] start prob: [{b_asp_start_prob_temp[mj]}]')
                    # for mj in range(len(b_asp_end_idx_temp)):
                    #     print(f'end position: [{b_asp_end_idx_temp[mj] - imp_start}] start prob: [{b_asp_end_prob_temp[mj]}]')
                    # print("filter_unpaired...")
                    # for mj in range(len(b_asp_start_idx)):
                    #     print(f'({b_asp_start_idx[mj] - imp_start}, {b_asp_end_idx[mj] - imp_start}) prob: [{b_asp_prob[mj]}]')
                    # print('-------------------------------------------------------------------------------------')

                    for k in range(len(b_asp_start_idx)):
                        adv = [b_adv_query[0][j].item() for j in range(b_adv_start_idx[a], b_adv_end_idx[a] + 1)]
                        opi = [b_opi_query[0][j].item() for j in range(b_opi_start_idx[b], b_opi_end_idx[b] + 1)]
                        asp = [b_asp_query[0][j].item() for j in range(b_asp_start_idx[k], b_asp_end_idx[k] + 1)]

                        adv_idx = [b_adv_start_idx[a] - len(q4_adv) - 1, b_adv_end_idx[a] - len(q4_adv) - 1]
                        opi_idx = [b_opi_start_idx[b] - b_opi_query_length - 1,
                                   b_opi_end_idx[b] - b_opi_query_length - 1]
                        asp_idx = [b_asp_start_idx[k] - b_asp_query_length - 1,
                                   b_asp_end_idx[k] - b_asp_query_length - 1]

                        temp_prob = b_asp_prob[k] * b_opi_prob[b] * b_adv_prob[a]
                        if asp_idx + opi_idx + adv_idx not in backward_triplet_idx_list:
                            backward_triplet_list.append([asp] + [opi] + [adv])
                            backward_triplet_prob.append(temp_prob)
                            backward_triplet_idx_list.append(asp_idx + opi_idx + adv_idx)

            # ********************Q7: Category + Q8: Sentiment + Q9: Intensity********************
            if self.args.use_Forward:
                pass
            elif self.args.use_Backward:
                pass
            else:
                final_asp_list, final_opi_list, final_adv_list, \
                    final_asp_idx_list, final_opi_idx_list, final_adv_idx_list = triplet_combine(
                        forward_triplet_list,
                        forward_triplet_prob,
                        forward_triplet_idx_list,
                        backward_triplet_list,
                        backward_triplet_prob,
                        backward_triplet_idx_list,
                        self.args.alpha,
                        self.args.beta,
                        self.args.delta
                    )

                def find_mutual_complement_asp(final_asp_list, forward_triplet_list, backward_triplet_list):
                    # 提取所有 asp（aspect）from forward/backward
                    forward_asp_list = [triplet[0] for triplet in forward_triplet_list]
                    backward_asp_list = [triplet[0] for triplet in backward_triplet_list]

                    mutual_asp = []

                    for asp in final_asp_list:
                        in_forward = asp in forward_asp_list
                        in_backward = asp in backward_asp_list

                        # 如果 asp 同时在 forward 和 backward 中就跳过
                        if in_forward and in_backward:
                            continue

                        # 如果只出现在 final，但不是两边都有，就是互补
                        if in_forward != in_backward:
                            mutual_asp.append((asp, "forward" if in_forward else "backward"))

                    return mutual_asp

                result = find_mutual_complement_asp(final_asp_list, forward_triplet_list, backward_triplet_list)

                for asp, side in result:
                    print(f"🧩 ASP {asp} 是由 {side} 补出来的")

                for a in range(len(final_asp_list)):
                    predict_opinion_num = len(final_opi_list[a])  # asp 对应的 opi数

                    category_query = self.q7_token_ids[:1]
                    sentiment_query = self.q8_token_ids[:1]
                    intensity_query = self.q9_token_ids[:1]
                    # insert/replace aspect in query
                    category_query += final_asp_list[a]
                    sentiment_query += final_asp_list[a]
                    intensity_query += final_asp_list[a]
                    category_query += self.q7_token_ids[4:5]
                    sentiment_query += self.q8_token_ids[4:5]
                    temp_category = category_query.copy()
                    temp_sentiment = sentiment_query.copy()
                    temp_intensity = intensity_query.copy()
                    for b in range(predict_opinion_num):
                        predict_adverb_num = len(final_adv_list[a][b])
                        # 循环状态回溯
                        category_query = temp_category.copy()
                        sentiment_query = temp_sentiment.copy()
                        # insert opinion in query
                        category_query += final_opi_list[a][b]
                        sentiment_query += final_opi_list[a][b]
                        category_query += self.q7_token_ids[-8:]
                        sentiment_query += self.q8_token_ids[-8:]
                        # tokens = self.tokenizer.convert_ids_to_tokens(category_query)
                        # print(f"category_query:{tokens}")

                        # category
                        category_query_seg = [0] * len(category_query)
                        category_query = torch.tensor(category_query).long().to(self.device)
                        category_query = torch.cat([category_query, vocab_idx], -1).to(self.device).unsqueeze(0)
                        category_query_seg += [1] * vocab_idx.size(0)
                        category_query_mask = torch.ones(category_query.size(1)).float().to(self.device).unsqueeze(0)
                        category_query_seg = torch.tensor(category_query_seg).long().to(self.device).unsqueeze(0)
                        # sentiment
                        sentiment_query_seg = [0] * len(sentiment_query)
                        sentiment_query = torch.tensor(sentiment_query).long().to(self.device)
                        sentiment_query = torch.cat([sentiment_query, vocab_idx], -1).to(self.device).unsqueeze(0)
                        sentiment_query_seg += [1] * vocab_idx.size(0)
                        sentiment_query_mask = torch.ones(sentiment_query.size(1)).float().to(self.device).unsqueeze(0)
                        sentiment_query_seg = torch.tensor(sentiment_query_seg).long().to(self.device).unsqueeze(0)

                        # inference results of category
                        category_scores = self.model(category_query, category_query_mask, category_query_seg, 1)
                        category_scores = F.softmax(category_scores, dim=1)
                        category_predicted = torch.argmax(category_scores[0], dim=0).item()

                        # inference results of sentiment
                        sentiment_scores = self.model(sentiment_query, sentiment_query_mask, sentiment_query_seg, 2)
                        sentiment_scores = F.softmax(sentiment_scores, dim=1)
                        sentiment_predicted = torch.argmax(sentiment_scores[0], dim=0).item()

                        # opinion对应的adverb
                        for c in range(predict_adverb_num):
                            # 回溯
                            intensity_query = temp_intensity.copy()
                            # 重新拼接
                            intensity_query += final_adv_list[a][b][c]
                            intensity_query += final_opi_list[a][b]
                            intensity_query += self.q9_token_ids[-8:]
                            # tokens = self.tokenizer.convert_ids_to_tokens(intensity_query)
                            # print(f"intensity_query:{tokens}")

                            intensity_query_seg = [0] * len(intensity_query)
                            intensity_query = torch.tensor(intensity_query).long().to(self.device)
                            intensity_query = torch.cat([intensity_query, vocab_idx], -1).to(self.device).unsqueeze(0)
                            intensity_query_seg += [1] * vocab_idx.size(0)
                            intensity_query_mask = torch.ones(intensity_query.size(1)).float().to(
                                self.device).unsqueeze(0)
                            intensity_query_seg = torch.tensor(intensity_query_seg).long().to(self.device).unsqueeze(0)

                            # inference results of intensity
                            intensity_scores = self.model(intensity_query, intensity_query_mask, intensity_query_seg, 3)
                            intensity_scores = F.softmax(intensity_scores, dim=1)
                            intensity_predicted = torch.argmax(intensity_scores[0], dim=0).item()

                            # 四元组、五元组组合
                            asp, opi, adv = [], [], []
                            asp.append(final_asp_idx_list[a][0])  # asp 的start index
                            asp.append(final_asp_idx_list[a][1])  # asp 的end index
                            opi.append(final_opi_idx_list[a][b][0])
                            opi.append(final_opi_idx_list[a][b][1])
                            adv.append(final_adv_idx_list[a][b][c][0])
                            adv.append(final_adv_idx_list[a][b][c][1])
                            adv.append(intensity_predicted)

                            quadruple_predict = [asp, category_predicted, opi, sentiment_predicted]
                            quintuple_predict = [asp, category_predicted, opi, adv, sentiment_predicted]

                            if quadruple_predict not in quadruples_predict:
                                quadruples_predict.append(quadruple_predict)
                            if quintuple_predict not in quintuples_predict:
                                quintuples_predict.append(quintuple_predict)

                # else 范围内
                # print_quadruples_predict = []
                print_quintuples_predict = []
                review_list = reviews[i].split(' ')

                # 每个单词可能被拆分成多个子词
                tokenized_review = list(map(self.tokenizer.tokenize, review_list))
                subword_lengths = list(map(len, tokenized_review))
                token_start_idxs = np.cumsum([0] + subword_lengths[:-1])
                tokenized2word = {}
                for k in range(len(review_list)):
                    for t in range(token_start_idxs[k], token_start_idxs[k] + subword_lengths[k]):
                        tokenized2word[t] = k
                # for q in quadruples_predict:
                #     if q[0] == [-1, -1]:
                #         asp = 'NULL'
                #     else:
                #         asp = ' '.join(review_list[tokenized2word[q[0][0]]: tokenized2word[q[0][-1]] + 1])
                #     if q[2] == [-1, -1]:
                #         opi = 'NULL'
                #     else:
                #         opi = ' '.join(review_list[tokenized2word[q[2][0]]:tokenized2word[q[2][-1]] + 1])
                #     category, sentiment = category_id[q[1]], sentiment_id[q[-1]]
                #     print_quadruples_predict.append([asp, category, opi, sentiment])
                # print(f"`{reviews[i]}` 四元组抽取结果：`{print_quadruples_predict}`")

                # 根据构建的字典映射表进行映射
                for q in quintuples_predict:
                    if q[0] == [-1, -1]:
                        asp = 'NULL'
                    else:
                        asp = ' '.join(review_list[tokenized2word[q[0][0]]: tokenized2word[q[0][-1]] + 1])
                    if q[2] == [-1, -1]:
                        opi = 'NULL'
                    else:
                        opi = ' '.join(review_list[tokenized2word[q[2][0]]:tokenized2word[q[2][-1]] + 1])
                    if q[3][:2] == [-1, -1]:
                        adv = 'NULL'
                    else:
                        adv = ' '.join(review_list[tokenized2word[q[3][0]]:tokenized2word[q[3][1]] + 1])
                    category, sentiment = category_id[q[1]], sentiment_id[q[-1]]
                    print_quintuples_predict.append([asp, category, opi, adv, q[3][-1], sentiment])
                # 上面两个if实现之后，应该在外面一层
                print(f"评论：{reviews[i]} \n五元组抽取结果：{print_quintuples_predict}\n")
                # ====================== new inserting =============================
                # print(f"no: {i + 1} sentence：{reviews[i]} label: {quintuples_predict}")
                final_output.append(f"评论：{reviews[i]}\n预测结果：{print_quintuples_predict}\nlabel: {quintuples_predict}")
        with open("./model_inference.txt", "w", encoding="UTF-8") as f:
            f.write("\n".join(final_output))

    def get_train_loss(self, batch):
        forward_opi_nums, forward_adv_nums, backward_opi_nums, backward_asp_nums, pairs_nums = \
            batch.forward_opi_nums, batch.forward_adv_nums, batch.backward_opi_nums, batch.backward_asp_nums, \
            batch.pairs_nums
        # 获取该批次中的最长输入
        max_f_asp_len, max_f_opi_lens, max_f_adv_lens, max_b_adv_len, max_b_opi_lens, max_b_asp_lens = \
            max(batch.forward_aspect_len), \
            max([max(batch.forward_opinion_lens[b]) for b in range(self.args.train_batch_size)]), \
            max([max(batch.forward_adverb_lens[b]) for b in range(self.args.train_batch_size)]), \
            max(batch.backward_adverb_len), \
            max([max(batch.backward_opinion_lens[b]) for b in range(self.args.train_batch_size)]), \
            max([max(batch.backward_aspect_lens[b]) for b in range(self.args.train_batch_size)])
        max_sent_cate_lens = max([max(batch.sentiment_category_lens[b]) for b in range(self.args.train_batch_size)])
        max_intensity_lens = max([max(batch.intensity_lens[b]) for b in range(self.args.train_batch_size)])

        # 对输入切片（尽可能减少无用信息）
        forward_asp_query = batch.forward_asp_query[:, :max_f_asp_len]
        forward_asp_query_mask = batch.forward_asp_query_mask[:, :max_f_asp_len]
        forward_asp_query_seg = batch.forward_asp_query_seg[:, :max_f_asp_len]
        forward_asp_answer_start = batch.forward_asp_answer_start[:, :max_f_asp_len]
        forward_asp_answer_end = batch.forward_asp_answer_end[:, :max_f_asp_len]

        backward_adv_query = batch.backward_adv_query[:, :max_b_adv_len]
        backward_adv_query_mask = batch.backward_adv_query_mask[:, :max_b_adv_len]
        backward_adv_query_seg = batch.backward_adv_query_seg[:, :max_b_adv_len]
        backward_adv_answer_start = batch.backward_adv_answer_start[:, :max_b_adv_len]
        backward_adv_answer_end = batch.backward_adv_answer_end[:, :max_b_adv_len]

        forward_opi_query, forward_opi_query_mask, forward_opi_query_seg = [], [], []
        forward_opi_answer_start, forward_opi_answer_end = [], []
        forward_adv_query, forward_adv_query_mask, forward_adv_query_seg = [], [], []
        forward_adv_answer_start, forward_adv_answer_end = [], []
        backward_opi_query, backward_opi_query_mask, backward_opi_query_seg = [], [], []
        backward_opi_answer_start, backward_opi_answer_end = [], []
        backward_asp_query, backward_asp_query_mask, backward_asp_query_seg = [], [], []
        backward_asp_answer_start, backward_asp_answer_end = [], []

        category_query, category_query_mask, category_query_seg, category_answer = [], [], [], []
        sentiment_query, sentiment_query_mask, sentiment_query_seg, sentiment_answer = [], [], [], []
        intensity_query, intensity_query_mask, intensity_query_seg, intensity_answer = [], [], [], []

        for b in range(self.args.train_batch_size):
            # 张量切片保存至列表  max_f_opi_lens 是这个批次中序列的最长长度
            forward_opi_query.append(batch.forward_opi_query[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_query_mask.append(batch.forward_opi_query_mask[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_query_seg.append(batch.forward_opi_query_seg[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_answer_start.append(batch.forward_opi_answer_start[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_answer_end.append(batch.forward_opi_answer_end[b][:forward_opi_nums[b], :max_f_opi_lens])

            forward_adv_query.append(batch.forward_adv_query[b][:forward_adv_nums[b], :max_f_adv_lens])
            forward_adv_query_mask.append(batch.forward_adv_query_mask[b][:forward_adv_nums[b], :max_f_adv_lens])
            forward_adv_query_seg.append(batch.forward_adv_query_seg[b][:forward_adv_nums[b], :max_f_adv_lens])
            forward_adv_answer_start.append(batch.forward_adv_answer_start[b][:forward_adv_nums[b], :max_f_adv_lens])
            forward_adv_answer_end.append(batch.forward_adv_answer_end[b][:forward_adv_nums[b], :max_f_adv_lens])

            backward_opi_query.append(batch.backward_opi_query[b][:backward_opi_nums[b], :max_b_opi_lens])
            backward_opi_query_mask.append(batch.backward_opi_query_mask[b][:backward_opi_nums[b], :max_b_opi_lens])
            backward_opi_query_seg.append(batch.backward_opi_query_seg[b][:backward_opi_nums[b], :max_b_opi_lens])
            backward_opi_answer_start.append(batch.backward_opi_answer_start[b][:backward_opi_nums[b], :max_b_opi_lens])
            backward_opi_answer_end.append(batch.backward_opi_answer_end[b][:backward_opi_nums[b], :max_b_opi_lens])

            backward_asp_query.append(batch.backward_asp_query[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_query_mask.append(batch.backward_asp_query_mask[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_query_seg.append(batch.backward_asp_query_seg[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_answer_start.append(batch.backward_asp_answer_start[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_answer_end.append(batch.backward_asp_answer_end[b][:backward_asp_nums[b], :max_b_asp_lens])

            category_query.append(batch.category_query[b][:pairs_nums[b], :max_sent_cate_lens])
            category_query_mask.append(batch.category_query_mask[b][:pairs_nums[b], :max_sent_cate_lens])
            category_query_seg.append(batch.category_query_seg[b][:pairs_nums[b], :max_sent_cate_lens])
            category_answer.append(batch.category_answer[b][:pairs_nums[b]])

            sentiment_query.append(batch.sentiment_query[b][:pairs_nums[b], :max_sent_cate_lens])
            sentiment_query_mask.append(batch.sentiment_query_mask[b][:pairs_nums[b], :max_sent_cate_lens])
            sentiment_query_seg.append(batch.sentiment_query_seg[b][:pairs_nums[b], :max_sent_cate_lens])
            sentiment_answer.append(batch.sentiment_answer[b][:pairs_nums[b]])

            intensity_query.append(batch.intensity_query[b][:pairs_nums[b], :max_intensity_lens])
            intensity_query_mask.append(batch.intensity_query_mask[b][:pairs_nums[b], :max_intensity_lens])
            intensity_query_seg.append(batch.intensity_query_seg[b][:pairs_nums[b], :max_intensity_lens])
            intensity_answer.append(batch.intensity_answer[b][:pairs_nums[b]])

        # 列表转成二维张量
        forward_opi_query = torch.cat(forward_opi_query, dim=0)
        forward_opi_query_mask = torch.cat(forward_opi_query_mask, dim=0)
        forward_opi_query_seg = torch.cat(forward_opi_query_seg, dim=0)
        forward_opi_answer_start = torch.cat(forward_opi_answer_start, dim=0)
        forward_opi_answer_end = torch.cat(forward_opi_answer_end, dim=0)

        forward_adv_query = torch.cat(forward_adv_query, dim=0)
        forward_adv_query_mask = torch.cat(forward_adv_query_mask, dim=0)
        forward_adv_query_seg = torch.cat(forward_adv_query_seg, dim=0)
        forward_adv_answer_start = torch.cat(forward_adv_answer_start, dim=0)
        forward_adv_answer_end = torch.cat(forward_adv_answer_end, dim=0)

        backward_opi_query = torch.cat(backward_opi_query, dim=0)
        backward_opi_query_mask = torch.cat(backward_opi_query_mask, dim=0)
        backward_opi_query_seg = torch.cat(backward_opi_query_seg, dim=0)
        backward_opi_answer_start = torch.cat(backward_opi_answer_start, dim=0)
        backward_opi_answer_end = torch.cat(backward_opi_answer_end, dim=0)

        backward_asp_query = torch.cat(backward_asp_query, dim=0)
        backward_asp_query_mask = torch.cat(backward_asp_query_mask, dim=0)
        backward_asp_query_seg = torch.cat(backward_asp_query_seg, dim=0)
        backward_asp_answer_start = torch.cat(backward_asp_answer_start, dim=0)
        backward_asp_answer_end = torch.cat(backward_asp_answer_end, dim=0)

        category_query = torch.cat(category_query, dim=0)
        category_query_mask = torch.cat(category_query_mask, dim=0)
        category_query_seg = torch.cat(category_query_seg, dim=0)
        category_answer = torch.cat(category_answer, dim=0)

        sentiment_query = torch.cat(sentiment_query, dim=0)
        sentiment_query_mask = torch.cat(sentiment_query_mask, dim=0)
        sentiment_query_seg = torch.cat(sentiment_query_seg, dim=0)
        sentiment_answer = torch.cat(sentiment_answer, dim=0)

        intensity_query = torch.cat(intensity_query, dim=0)
        intensity_query_mask = torch.cat(intensity_query_mask, dim=0)
        intensity_query_seg = torch.cat(intensity_query_seg, dim=0)
        intensity_answer = torch.cat(intensity_answer, dim=0)

        # ========================= calculate loss =================================
        f_asp_loss, f_opi_loss, f_adv_loss, b_asp_loss, b_opi_loss, b_adv_loss = 0, 0, 0, 0, 0, 0
        if self.args.use_Forward:
            f_asp_start_scores, f_asp_end_scores = self.model(forward_asp_query.to(self.device),
                                                              forward_asp_query_mask.to(self.device),
                                                              forward_asp_query_seg.to(self.device), 0)
            f_opi_start_scores, f_opi_end_scores = self.model(forward_opi_query.to(self.device),
                                                              forward_opi_query_mask.to(self.device),
                                                              forward_opi_query_seg.to(self.device), 0)
            f_adv_start_scores, f_adv_end_scores = self.model(forward_adv_query.to(self.device),
                                                              forward_adv_query_mask.to(self.device),
                                                              forward_adv_query_seg.to(self.device), 0)
            f_asp_loss = calculate_entity_loss(f_asp_start_scores, f_asp_end_scores,
                                               forward_asp_answer_start.to(self.device),
                                               forward_asp_answer_end.to(self.device))
            f_opi_loss = calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
                                               forward_opi_answer_start.to(self.device),
                                               forward_opi_answer_end.to(self.device))
            f_adv_loss = calculate_entity_loss(f_adv_start_scores, f_adv_end_scores,
                                               forward_adv_answer_start.to(self.device),
                                               forward_adv_answer_end.to(self.device))
        elif self.args.use_Backward:
            b_adv_start_scores, b_adv_end_scores = self.model(backward_adv_query.to(self.device),
                                                              backward_adv_query_mask.to(self.device),
                                                              backward_adv_query_seg.to(self.device), 0)
            b_opi_start_scores, b_opi_end_scores = self.model(backward_opi_query.to(self.device),
                                                              backward_opi_query_mask.to(self.device),
                                                              backward_opi_query_seg.to(self.device), 0)
            b_asp_start_scores, b_asp_end_scores = self.model(backward_asp_query.to(self.device),
                                                              backward_asp_query_mask.to(self.device),
                                                              backward_asp_query_seg.to(self.device), 0)
            b_adv_loss = calculate_entity_loss(b_adv_start_scores, b_adv_end_scores,
                                               backward_adv_answer_start.to(self.device),
                                               backward_adv_answer_end.to(self.device))
            b_opi_loss = calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
                                               backward_opi_answer_start.to(self.device),
                                               backward_opi_answer_end.to(self.device))
            b_asp_loss = calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
                                               backward_asp_answer_start.to(self.device),
                                               backward_asp_answer_end.to(self.device))
        else:
            # =============================Run Model=====================================
            # forward
            f_asp_start_scores, f_asp_end_scores = self.model(forward_asp_query.to(self.device),
                                                              forward_asp_query_mask.to(self.device),
                                                              forward_asp_query_seg.to(self.device), 0)
            f_opi_start_scores, f_opi_end_scores = self.model(forward_opi_query.to(self.device),
                                                              forward_opi_query_mask.to(self.device),
                                                              forward_opi_query_seg.to(self.device), 0)
            f_adv_start_scores, f_adv_end_scores = self.model(forward_adv_query.to(self.device),
                                                              forward_adv_query_mask.to(self.device),
                                                              forward_adv_query_seg.to(self.device), 0)
            # backward
            b_adv_start_scores, b_adv_end_scores = self.model(backward_adv_query.to(self.device),
                                                              backward_adv_query_mask.to(self.device),
                                                              backward_adv_query_seg.to(self.device), 0)
            b_opi_start_scores, b_opi_end_scores = self.model(backward_opi_query.to(self.device),
                                                              backward_opi_query_mask.to(self.device),
                                                              backward_opi_query_seg.to(self.device), 0)
            b_asp_start_scores, b_asp_end_scores = self.model(backward_asp_query.to(self.device),
                                                              backward_asp_query_mask.to(self.device),
                                                              backward_asp_query_seg.to(self.device), 0)
            # ============================Calculate Loss===================================
            # forward
            f_asp_loss = calculate_entity_loss(f_asp_start_scores, f_asp_end_scores,
                                               forward_asp_answer_start.to(self.device),
                                               forward_asp_answer_end.to(self.device))
            f_opi_loss = calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
                                               forward_opi_answer_start.to(self.device),
                                               forward_opi_answer_end.to(self.device))
            f_adv_loss = calculate_entity_loss(f_adv_start_scores, f_adv_end_scores,
                                               forward_adv_answer_start.to(self.device),
                                               forward_adv_answer_end.to(self.device))
            # backward
            b_adv_loss = calculate_entity_loss(b_adv_start_scores, b_adv_end_scores,
                                               backward_adv_answer_start.to(self.device),
                                               backward_adv_answer_end.to(self.device))
            b_opi_loss = calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
                                               backward_opi_answer_start.to(self.device),
                                               backward_opi_answer_end.to(self.device))
            b_asp_loss = calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
                                               backward_asp_answer_start.to(self.device),
                                               backward_asp_answer_end.to(self.device))

        category_scores = self.model(category_query.to(self.device), category_query_mask.to(self.device),
                                     category_query_seg.to(self.device), 1)

        sentiment_scores = self.model(sentiment_query.to(self.device), sentiment_query_mask.to(self.device),
                                      sentiment_query_seg.to(self.device), 2)

        intensity_scores = self.model(intensity_query.to(self.device), intensity_query_mask.to(self.device),
                                      intensity_query_seg.to(self.device), 3)
        if self.args.use_FocalLoss:
            # FocalLoss
            category_loss = self.focalLoss(category_scores, category_answer.to(self.device))
            sentiment_loss = self.focalLoss(sentiment_scores, sentiment_answer.to(self.device))
            intensity_loss = self.focalLoss(intensity_scores, intensity_answer.to(self.device))
        else:
            # Cross_entropy Loss
            category_loss = calculate_classification_loss(category_scores, category_answer.to(self.device))
            sentiment_loss = calculate_classification_loss(sentiment_scores, sentiment_answer.to(self.device))
            intensity_loss = calculate_classification_loss(intensity_scores, intensity_answer.to(self.device))

        # supervise contrastive learning loss
        if self.args.use_category_SCL:
            scl_category_loss = calculate_scl_loss(category_answer.to(self.device), category_scores)
            all_category_loss = ((1 - self.args.contrastive_lr1) * category_loss
                                 + self.args.contrastive_lr1 * scl_category_loss)
        else:
            all_category_loss = category_loss
        if self.args.use_sentiment_SCL:
            scl_sentiment_loss = calculate_scl_loss(sentiment_answer.to(self.device), sentiment_scores)
            all_sentiment_loss = ((1 - self.args.contrastive_lr2) * sentiment_loss
                                  + self.args.contrastive_lr2 * scl_sentiment_loss)
        else:
            all_sentiment_loss = sentiment_loss
        if self.args.use_intensity_SCL:
            scl_intensity_loss = calculate_scl_loss(intensity_answer.to(self.device), intensity_scores)
            all_intensity_loss = ((1 - self.args.contrastive_lr3) * intensity_loss
                                  + self.args.contrastive_lr3 * scl_intensity_loss)
        else:
            all_intensity_loss = intensity_loss

        # 正常训练loss =======================汇总=============================
        if self.args.use_Forward:
            loss_sum = (f_asp_loss + f_opi_loss + f_adv_loss) + 2 * all_category_loss + 3 * all_sentiment_loss + 2 * all_intensity_loss
        elif self.args.use_Backward:
            loss_sum = (b_adv_loss + b_opi_loss + b_asp_loss) + 2 * all_category_loss + 3 * all_sentiment_loss + 2 * all_intensity_loss
        else:
            loss_sum = ((f_asp_loss + f_opi_loss + f_adv_loss) +
                        (b_adv_loss + b_opi_loss + b_asp_loss) +
                        2 * all_category_loss + 3 * all_sentiment_loss + 2 * all_intensity_loss)
        return loss_sum

    def get_acos_train_loss(self, batch):
        forward_opi_nums, backward_asp_nums, pairs_nums = \
            batch.forward_opi_nums, batch.backward_asp_nums, batch.pairs_nums
        # 获取该批次中的最长输入
        max_f_asp_len, max_f_opi_lens, max_b_opi_len, max_b_asp_lens = \
            max(batch.forward_aspect_len), \
            max([max(batch.forward_opinion_lens[b]) for b in range(self.args.train_batch_size)]), \
            max(batch.backward_opinion_len), \
            max([max(batch.backward_aspect_lens[b]) for b in range(self.args.train_batch_size)])
        max_sent_cate_lens = max([max(batch.sentiment_category_lens[b]) for b in range(self.args.train_batch_size)])

        # 对输入切片（尽可能减少无用信息）
        forward_asp_query = batch.forward_asp_query[:, :max_f_asp_len]
        forward_asp_query_mask = batch.forward_asp_query_mask[:, :max_f_asp_len]
        forward_asp_query_seg = batch.forward_asp_query_seg[:, :max_f_asp_len]
        forward_asp_answer_start = batch.forward_asp_answer_start[:, :max_f_asp_len]
        forward_asp_answer_end = batch.forward_asp_answer_end[:, :max_f_asp_len]

        backward_opi_query = batch.backward_opi_query[:, :max_b_opi_len]
        backward_opi_query_mask = batch.backward_opi_query_mask[:, :max_b_opi_len]
        backward_opi_query_seg = batch.backward_opi_query_seg[:, :max_b_opi_len]
        backward_opi_answer_start = batch.backward_opi_answer_start[:, :max_b_opi_len]
        backward_opi_answer_end = batch.backward_opi_answer_end[:, :max_b_opi_len]

        forward_opi_query, forward_opi_query_mask, forward_opi_query_seg = [], [], []
        forward_opi_answer_start, forward_opi_answer_end = [], []

        backward_asp_query, backward_asp_query_mask, backward_asp_query_seg = [], [], []
        backward_asp_answer_start, backward_asp_answer_end = [], []

        category_query, category_query_mask, category_query_seg, category_answer = [], [], [], []
        sentiment_query, sentiment_query_mask, sentiment_query_seg, sentiment_answer = [], [], [], []

        for b in range(self.args.train_batch_size):
            # 张量切片保存至列表  max_f_opi_lens 是这个批次中序列的最长长度
            forward_opi_query.append(batch.forward_opi_query[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_query_mask.append(batch.forward_opi_query_mask[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_query_seg.append(batch.forward_opi_query_seg[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_answer_start.append(batch.forward_opi_answer_start[b][:forward_opi_nums[b], :max_f_opi_lens])
            forward_opi_answer_end.append(batch.forward_opi_answer_end[b][:forward_opi_nums[b], :max_f_opi_lens])

            backward_asp_query.append(batch.backward_asp_query[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_query_mask.append(batch.backward_asp_query_mask[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_query_seg.append(batch.backward_asp_query_seg[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_answer_start.append(batch.backward_asp_answer_start[b][:backward_asp_nums[b], :max_b_asp_lens])
            backward_asp_answer_end.append(batch.backward_asp_answer_end[b][:backward_asp_nums[b], :max_b_asp_lens])

            category_query.append(batch.category_query[b][:pairs_nums[b], :max_sent_cate_lens])
            category_query_mask.append(batch.category_query_mask[b][:pairs_nums[b], :max_sent_cate_lens])
            category_query_seg.append(batch.category_query_seg[b][:pairs_nums[b], :max_sent_cate_lens])
            category_answer.append(batch.category_answer[b][:pairs_nums[b]])

            sentiment_query.append(batch.sentiment_query[b][:pairs_nums[b], :max_sent_cate_lens])
            sentiment_query_mask.append(batch.sentiment_query_mask[b][:pairs_nums[b], :max_sent_cate_lens])
            sentiment_query_seg.append(batch.sentiment_query_seg[b][:pairs_nums[b], :max_sent_cate_lens])
            sentiment_answer.append(batch.sentiment_answer[b][:pairs_nums[b]])

        # 列表转成二维张量
        forward_opi_query = torch.cat(forward_opi_query, dim=0)
        forward_opi_query_mask = torch.cat(forward_opi_query_mask, dim=0)
        forward_opi_query_seg = torch.cat(forward_opi_query_seg, dim=0)
        forward_opi_answer_start = torch.cat(forward_opi_answer_start, dim=0)
        forward_opi_answer_end = torch.cat(forward_opi_answer_end, dim=0)

        backward_asp_query = torch.cat(backward_asp_query, dim=0)
        backward_asp_query_mask = torch.cat(backward_asp_query_mask, dim=0)
        backward_asp_query_seg = torch.cat(backward_asp_query_seg, dim=0)
        backward_asp_answer_start = torch.cat(backward_asp_answer_start, dim=0)
        backward_asp_answer_end = torch.cat(backward_asp_answer_end, dim=0)

        category_query = torch.cat(category_query, dim=0)
        category_query_mask = torch.cat(category_query_mask, dim=0)
        category_query_seg = torch.cat(category_query_seg, dim=0)
        category_answer = torch.cat(category_answer, dim=0)

        sentiment_query = torch.cat(sentiment_query, dim=0)
        sentiment_query_mask = torch.cat(sentiment_query_mask, dim=0)
        sentiment_query_seg = torch.cat(sentiment_query_seg, dim=0)
        sentiment_answer = torch.cat(sentiment_answer, dim=0)

        # ========================= calculate loss =================================
        f_asp_loss, f_opi_loss, f_adv_loss, b_asp_loss, b_opi_loss, b_adv_loss = 0, 0, 0, 0, 0, 0
        if self.args.use_Forward:
            f_asp_start_scores, f_asp_end_scores = self.model(forward_asp_query.to(self.device),
                                                              forward_asp_query_mask.to(self.device),
                                                              forward_asp_query_seg.to(self.device), 0)
            f_opi_start_scores, f_opi_end_scores = self.model(forward_opi_query.to(self.device),
                                                              forward_opi_query_mask.to(self.device),
                                                              forward_opi_query_seg.to(self.device), 0)
            f_asp_loss = calculate_entity_loss(f_asp_start_scores, f_asp_end_scores,
                                               forward_asp_answer_start.to(self.device),
                                               forward_asp_answer_end.to(self.device))
            f_opi_loss = calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
                                               forward_opi_answer_start.to(self.device),
                                               forward_opi_answer_end.to(self.device))
        elif self.args.use_Backward:
            b_opi_start_scores, b_opi_end_scores = self.model(backward_opi_query.to(self.device),
                                                              backward_opi_query_mask.to(self.device),
                                                              backward_opi_query_seg.to(self.device), 0)
            b_asp_start_scores, b_asp_end_scores = self.model(backward_asp_query.to(self.device),
                                                              backward_asp_query_mask.to(self.device),
                                                              backward_asp_query_seg.to(self.device), 0)
            b_opi_loss = calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
                                               backward_opi_answer_start.to(self.device),
                                               backward_opi_answer_end.to(self.device))
            b_asp_loss = calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
                                               backward_asp_answer_start.to(self.device),
                                               backward_asp_answer_end.to(self.device))
        else:
            # =============================Run Model=====================================
            # forward
            f_asp_start_scores, f_asp_end_scores = self.model(forward_asp_query.to(self.device),
                                                              forward_asp_query_mask.to(self.device),
                                                              forward_asp_query_seg.to(self.device), 0)
            f_opi_start_scores, f_opi_end_scores = self.model(forward_opi_query.to(self.device),
                                                              forward_opi_query_mask.to(self.device),
                                                              forward_opi_query_seg.to(self.device), 0)
            # backward
            b_opi_start_scores, b_opi_end_scores = self.model(backward_opi_query.to(self.device),
                                                              backward_opi_query_mask.to(self.device),
                                                              backward_opi_query_seg.to(self.device), 0)
            b_asp_start_scores, b_asp_end_scores = self.model(backward_asp_query.to(self.device),
                                                              backward_asp_query_mask.to(self.device),
                                                              backward_asp_query_seg.to(self.device), 0)
            # ============================Calculate Loss===================================
            # forward
            f_asp_loss = calculate_entity_loss(f_asp_start_scores, f_asp_end_scores,
                                               forward_asp_answer_start.to(self.device),
                                               forward_asp_answer_end.to(self.device))
            f_opi_loss = calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
                                               forward_opi_answer_start.to(self.device),
                                               forward_opi_answer_end.to(self.device))
            # backward
            b_opi_loss = calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
                                               backward_opi_answer_start.to(self.device),
                                               backward_opi_answer_end.to(self.device))
            b_asp_loss = calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
                                               backward_asp_answer_start.to(self.device),
                                               backward_asp_answer_end.to(self.device))

        category_scores = self.model(category_query.to(self.device), category_query_mask.to(self.device),
                                     category_query_seg.to(self.device), 1)

        sentiment_scores = self.model(sentiment_query.to(self.device), sentiment_query_mask.to(self.device),
                                      sentiment_query_seg.to(self.device), 2)

        if self.args.use_FocalLoss:
            # FocalLoss
            category_loss = self.focalLoss(category_scores, category_answer.to(self.device))
            sentiment_loss = self.focalLoss(sentiment_scores, sentiment_answer.to(self.device))
        else:
            # Cross_entropy Loss
            category_loss = calculate_classification_loss(category_scores, category_answer.to(self.device))
            sentiment_loss = calculate_classification_loss(sentiment_scores, sentiment_answer.to(self.device))

        # supervise contrastive learning loss
        if self.args.use_category_SCL:
            scl_category_loss = calculate_scl_loss(category_answer.to(self.device), category_scores)
            all_category_loss = ((1 - self.args.contrastive_lr1) * category_loss
                                 + self.args.contrastive_lr1 * scl_category_loss)
        else:
            all_category_loss = category_loss
        if self.args.use_sentiment_SCL:
            scl_sentiment_loss = calculate_scl_loss(sentiment_answer.to(self.device), sentiment_scores)
            all_sentiment_loss = ((1 - self.args.contrastive_lr2) * sentiment_loss
                                  + self.args.contrastive_lr2 * scl_sentiment_loss)
        else:
            all_sentiment_loss = sentiment_loss

        # 正常训练loss =======================汇总=============================
        if self.args.use_Forward:
            loss_sum = (f_asp_loss + f_opi_loss) + 2 * all_category_loss + 3 * all_sentiment_loss
        elif self.args.use_Backward:
            loss_sum = (b_adv_loss + b_opi_loss + b_asp_loss) + 2 * all_category_loss + 3 * all_sentiment_loss
        else:
            loss_sum = ((f_asp_loss + f_opi_loss + f_adv_loss) + (b_adv_loss + b_opi_loss + b_asp_loss) +
                        2 * all_category_loss + 3 * all_sentiment_loss)
        return loss_sum

