import numpy as np
from utils.label_transform import label2real
from network.ResNet_model import resnet50
from network.Basic_model import basic_net
from network.Faster_model import faster_net
from network.fasternet import FasterNet
import torch
from dataset.dataset import DRR_dataset
import network.metrics as met
import utils.image_process as imp
from DRR.drr_generator import Projector
from network.Fuse_model_1 import F1Net
from network.Fuse_model_2 import F2Net
from network.Unet import UBasicNet


class RegTest:
    def __init__(self, out_channels=6, batch_size=1,
                 model_address=None,
                 test_path=None, test_label_path=None,
                 CT_path=None, saving_drr_path=None,
                 refiner_address=None
                 ):
        # 导入测试数据
        self.test_dataset = DRR_dataset(DRR_path=test_path, fn_path=test_label_path,
                                        Batch_size=batch_size)
        # 导入基础模型
        # self.test_model = resnet50(num_classes=num_classes)
        self.test_model = basic_net(in_channels=3, out_channels=out_channels)
        # self.test_model = F1Net(in_channels=3)
        # self.test_model = UBasicNet(img_channels=1, pos_channels=2, layers_num=4)
        # self.test_model = faster_net()
        # self.test_model = FasterNet(
        #     mlp_ratio=2.0,
        #     embed_dim=128,
        #     depths=(1, 2, 13, 2),
        #     drop_path_rate=0.15,
        #     act_layer='RELU',
        # )
        self.test_model.load_state_dict(torch.load(model_address))
        self.test_model.cuda()
        self.Batch_size = batch_size
        try:
            # 导入优化模型
            self.refiner = resnet50(num_classes=out_channels, in_channels=2)
            self.refiner.load_state_dict(torch.load(refiner_address))
            self.refiner.cuda()
        except AttributeError:
            pass
        # 保存DRR图片的地址
        self.saving_drr_path = saving_drr_path
        # 导入用于DRR生成的对象
        self.data_generator = Projector(directory=CT_path)

    def test(self, serial_number=None, voxel_size=None, mode='标准正位'):
        """

        :return: 最大距离，角度最大差值，xy方向位移的最大差值，z方向位移的差值
        """
        # 在测试集上测试
        self.test_model.eval()
        with torch.no_grad():
            batch_x, batch_y = self.test_dataset.get_data()
            out = self.test_model(batch_x)
            # 图像和位置分开输入专用
            # out = self.test_model(batch_x[:, 0].reshape(self.Batch_size, 1, batch_x.shape[2], batch_x.shape[3]),
            #                       batch_x[:, 1:])
        # 计算vm
        vm = met.vm_sample(batch_y[:, 0:3], batch_y[:, 3:6], out[:, 0: 3], out[:, 3: 6],
                           self.Batch_size, voxel_size=voxel_size)
        tru_r = batch_y[:, 0:3].detach().cpu().numpy()
        tru_t = batch_y[:, 3:6].detach().cpu().numpy()
        pre_r = out[:, 0: 3].detach().cpu().numpy()
        pre_t = out[:, 3: 6].detach().cpu().numpy()
        # 标签值转换为真实值
        tru_alpha, tru_beta, tru_theta, tru_tx, tru_ty, tru_tz = label2real(tru_r, tru_t, self.Batch_size)
        pre_alpha, pre_beta, pre_theta, pre_tx, pre_ty, pre_tz = label2real(pre_r, pre_t, self.Batch_size)
        tru_r = np.array([tru_alpha, tru_beta, tru_theta]).ravel()
        tru_t = np.array([tru_tx, tru_ty, tru_tz]).ravel()
        pre_r = np.array([pre_alpha, pre_beta, pre_theta]).ravel()
        pre_t = np.array([pre_tx, pre_ty, pre_tz]).ravel()
        # 计算预测值和真实值之间最大的差值
        r_max = max(abs(tru_r - pre_r))
        # print(self.tru_t[0][0:2] - self.pre_t[0][0:1])
        t_max = max(abs(tru_t - pre_t))
        # if mode == 'refine':
        #     # 添加优化器优化的值
        #     # 获取预测的DRR
        #     pre_alpha, pre_beta, pre_theta, pre_tx, pre_ty, pre_tz = label2real(pre_r, pre_t, self.Batch_size)
        #     all_new_drr = imp.get_new_drr(pre_alpha, pre_beta, pre_theta,
        #                                   pre_tx, pre_ty, pre_tz,
        #                                   size=self.Batch_size,
        #                                   data_generator=self.data_generator)
        #     # 获取预测的差值
        #     refine_x = torch.cat((batch_x, all_new_drr), 1)
        #     out_margin = self.refiner(refine_x)
        #     out_margin_rx = out_margin[:, 0: 3].detach().cpu().numpy().ravel()
        #     out_margin_tx = out_margin[:, 3: 6].detach().cpu().numpy().ravel()
        #     refine_r = pre_r + out_margin_rx
        #     refine_t = pre_t + out_margin_tx
        #     # 计算顶点距离
        #     max_dis_re = met.max_distance(batch_y[:, 0:3], batch_y[:, 3:6],
        #                                   refine_r, refine_t, self.Batch_size)
        #     # 计算预测值和真实值之间最大的差值
        #     r_max_re = max(abs(tru_r - refine_r))
        #     xy_max_re = max(abs(tru_t[0:2] - refine_t[0:2]) * 2)
        #     z_max_re = float(abs(tru_t[2] - refine_t[2]) * 5)
        #     if max_dis > 10 or r_max > 2 or xy_max > 2 or z_max > 10 or \
        #             max_dis_re > 10 or r_max_re > 2 or xy_max_re > 2 or z_max_re > 10:
        #         # 获取真实的位姿参数
        #         tru_r = [tru_r[0] - 15, tru_r[1] + 255, tru_r[2] - 15]
        #         tru_t = [tru_t[0] * 2 - 30, tru_t[1] * 2 - 30, tru_t[2] * 5 - 100]
        #         # 获取预测的位姿参数
        #         pre_r = [pre_r[0] - 15, pre_r[1] + 255, pre_r[2] - 15]
        #         pre_t = [pre_t[0] * 2 - 30, pre_t[1] * 2 - 30, pre_t[2] * 5 - 100]
        #         # 获取修正后的位姿参数
        #         refine_r = [refine_r[0] - 15, refine_r[1] + 255, refine_r[2] - 15]
        #         refine_t = [refine_t[0] * 2 - 30, refine_t[1] * 2 - 30, refine_t[2] * 5 - 100]
        #         # 得到图片
        #         imp.get_DRR_image_re(projector=self.data_generator,
        #                              tru_img=batch_x,
        #                              pre_img=all_new_drr,
        #                              tru_r=tru_r,
        #                              tru_t=tru_t,
        #                              pre_r=pre_r,
        #                              pre_t=pre_t,
        #                              refine_r=refine_r,
        #                              refine_t=refine_t,
        #                              i=serial_number,
        #                              saving_path=self.saving_drr_path)
        #     return max_dis, r_max, xy_max, z_max, max_dis_re, r_max_re, xy_max_re, z_max_re

        # 满足条件则保存DRR图片
        if vm > 10 or r_max > 2 or t_max > 5:
            # 得到图片
            imp.get_DRR_image(projector=self.data_generator,
                              tru_img=batch_x[0][0],
                              tru_r=tru_r,
                              tru_t=tru_t,
                              pre_r=pre_r,
                              pre_t=pre_t,
                              i=serial_number,
                              saving_path=self.saving_drr_path,
                              mode=mode)
        return vm, abs(tru_r - pre_r), abs(tru_t - pre_t)
