from network.ResNet_model import resnet50
import torch
from dataset.dataset import DRR_dataset
import network.metrics as met
import utils.image_process as imp
from DRR.drr_generator import Projector
from torch import nn


def net_out2real_out(pre):
    """
    将网络输出的预测值转换为实际的角度值
    :param pre:网络预测的输出
    :return: 实际的输出
    """
    pre_rx = pre[:, 0: 6]
    pre_ry = pre[:, 6: 12]
    pre_rz = pre[:, 12: 18]
    pre_tx = pre[:, 18: 30]
    pre_ty = pre[:, 30: 42]
    pre_tz = pre[:, 42: 62]
    # 将类别转换为角度和位移输出
    softmax = nn.Softmax(dim=1)
    _, out_rx = torch.max(softmax(pre_rx), dim=1)
    # print(out_rx.unsqueeze(dim=0).mT)
    _, out_ry = torch.max(softmax(pre_ry), dim=1)
    _, out_rz = torch.max(softmax(pre_rz), dim=1)
    _, out_tx = torch.max(softmax(pre_tx), dim=1)
    _, out_ty = torch.max(softmax(pre_ty), dim=1)
    _, out_tz = torch.max(softmax(pre_tz), dim=1)
    out_r = torch.cat((out_rx.unsqueeze(dim=0).mT, out_ry.unsqueeze(dim=0).mT, out_rz.unsqueeze(dim=0).mT), dim=1) * 5 + 2.5
    out_txy = torch.cat((out_tx.unsqueeze(dim=0).mT, out_ty.unsqueeze(dim=0).mT), dim=1) * 2.5 + 1.25
    out = torch.cat((out_r, out_txy, out_tz.unsqueeze(dim=0).mT * 2 + 1), dim=1)
    return out


class ClaTest:
    def __init__(self, num_classes=62, batch_size=1,
                 model_address=None,
                 test_path=None, test_label_path=None,
                 CT_path=None, saving_drr_path=None,
                 refiner_address=None
                 ):
        # 导入测试数据
        self.test_dataset = DRR_dataset(DRR_path=test_path, fn_path=test_label_path,
                                        Batch_size=batch_size)
        # 导入基础模型
        self.test_model = resnet50(num_classes=num_classes)
        self.test_model.load_state_dict(torch.load(model_address))
        self.test_model.cuda()
        self.Batch_size = batch_size
        # 导入优化模型
        try:
            self.refiner = resnet50(num_classes=6, in_channels=2)
            self.refiner.load_state_dict(torch.load(refiner_address))
            self.refiner.cuda()
        except AttributeError:
            pass
        # 保存DRR图片的地址
        self.saving_drr_path = saving_drr_path
        # 导入用于DRR生成的对象
        self.data_generator = Projector()
        self.data_generator.load_ct_images(CT_path)

    def test(self, serial_number=None, voxel_size=None, mode=None):
        """

        :return: 最大距离，角度最大差值，xy方向位移的最大差值，z方向位移的差值
        """
        # 在测试集上测试
        self.test_model.eval()
        with torch.no_grad():
            batch_x, batch_y = self.test_dataset.get_data()
            out = self.test_model(batch_x, batch_y)
        tru_r = batch_y[:, 0:3].detach().cpu().numpy().ravel()
        tru_t = batch_y[:, 3:6].detach().cpu().numpy().ravel()
        out = net_out2real_out(out)
        pre_r = out[:, 0: 3].detach().cpu().numpy().ravel()
        pre_t = out[:, 3: 6].detach().cpu().numpy().ravel()
        # 计算vm
        max_dis = met.max_distance(batch_y[:, 0:3], batch_y[:, 3:6], out[:, 0: 3], out[:, 3: 6], self.Batch_size)
        # 计算预测值和真实值之间最大的差值
        r_max = max(abs(tru_r - pre_r))
        # print(self.tru_t[0][0:2] - self.pre_t[0][0:1])
        xy_max = max(abs(tru_t[0:2] - pre_t[0:2]) * 2)
        z_max = float(abs(tru_t[2] - pre_t[2]) * 5)
        if mode == 'refine':
            # 添加优化器优化的值
            # 获取预测的DRR
            pre_alpha = (pre_r[0] - 15).reshape(self.Batch_size, 1)
            pre_beta = (pre_r[1] + 255).reshape(self.Batch_size, 1)
            pre_theta = (pre_r[2] - 15).reshape(self.Batch_size, 1)
            pre_tx = (pre_t[0] * 2 - 30).reshape(self.Batch_size, 1)
            pre_ty = (pre_t[1] * 2 - 30).reshape(self.Batch_size, 1)
            pre_tz = (pre_t[2] * 5 - 100).reshape(self.Batch_size, 1)
            all_new_drr = imp.get_new_drr(pre_alpha, pre_beta, pre_theta,
                                          pre_tx, pre_ty, pre_tz,
                                          size=self.Batch_size,
                                          data_generator=self.data_generator)
            # 获取预测的差值
            refine_x = torch.cat((batch_x, all_new_drr), 1)
            out_margin = self.refiner(refine_x)
            out_margin_rx = out_margin[:, 0: 3].detach().cpu().numpy().ravel()
            out_margin_tx = out_margin[:, 3: 6].detach().cpu().numpy().ravel()
            refine_r = pre_r + out_margin_rx
            refine_t = pre_t + out_margin_tx
            # 计算顶点距离
            max_dis_re = met.max_distance(batch_y[:, 0:3], batch_y[:, 3:6],
                                          refine_r, refine_t, self.Batch_size)
            # 计算预测值和真实值之间最大的差值
            r_max_re = max(abs(tru_r - refine_r))
            xy_max_re = max(abs(tru_t[0:2] - refine_t[0:2]) * 2)
            z_max_re = float(abs(tru_t[2] - refine_t[2]) * 5)
            if max_dis > 10 or r_max > 2 or xy_max > 2 or z_max > 10 or \
                    max_dis_re > 10 or r_max_re > 2 or xy_max_re > 2 or z_max_re > 10:
                # 获取真实的位姿参数
                tru_r = [tru_r[0] - 15, tru_r[1] + 255, tru_r[2] - 15]
                tru_t = [tru_t[0] * 2 - 30, tru_t[1] * 2 - 30, tru_t[2] * 5 - 100]
                # 获取预测的位姿参数
                pre_r = [pre_r[0] - 15, pre_r[1] + 255, pre_r[2] - 15]
                pre_t = [pre_t[0] * 2 - 30, pre_t[1] * 2 - 30, pre_t[2] * 5 - 100]
                # 获取修正后的位姿参数
                refine_r = [refine_r[0] - 15, refine_r[1] + 255, refine_r[2] - 15]
                refine_t = [refine_t[0] * 2 - 30, refine_t[1] * 2 - 30, refine_t[2] * 5 - 100]
                # 得到图片
                imp.get_DRR_image_re(projector=self.data_generator,
                                     tru_img=batch_x,
                                     pre_img=all_new_drr,
                                     tru_r=tru_r,
                                     tru_t=tru_t,
                                     pre_r=pre_r,
                                     pre_t=pre_t,
                                     refine_r=refine_r,
                                     refine_t=refine_t,
                                     i=serial_number,
                                     saving_path=self.saving_drr_path)
            return max_dis, r_max, xy_max, z_max, max_dis_re, r_max_re, xy_max_re, z_max_re
        # 满足条件则保存DRR图片
        if max_dis > 10 or r_max > 2 or xy_max > 2 or z_max > 10:
            # 获取真实的位姿参数
            tru_r = [tru_r[0] - 15, tru_r[1] + 255, tru_r[2] - 15]
            tru_t = [tru_t[0] * 2 - 30, tru_t[1] * 2 - 30, tru_t[2] * 5 - 100]
            # 获取预测的位姿参数
            pre_r = [pre_r[0] - 15, pre_r[1] + 255, pre_r[2] - 15]
            pre_t = [pre_t[0] * 2 - 30, pre_t[1] * 2 - 30, pre_t[2] * 5 - 100]
            # 得到图片
            imp.get_DRR_image(projector=self.data_generator,
                              tru_img=batch_x,
                              tru_r=tru_r,
                              tru_t=tru_t,
                              pre_r=pre_r,
                              pre_t=pre_t,
                              i=serial_number,
                              saving_path=self.saving_drr_path
                              )
        return max_dis, r_max, xy_max, z_max
