from dataset.dataset import DRR_dataset
import torch
from utils.logger import Logger
from network.ResNet_model import resnet50
from torch import nn, optim
from DRR.drr_generator import Projector
import utils.image_process as imp
from utils.label_transform import label2real
import numpy as np
from tester.classification_test import net_out2real_out


def get_loss(origin_drr, pre, tru, batch_size, data_generator, train_model):
    out_rx = pre[:, 0: 3]
    out_tx = pre[:, 3: 6]
    batch_rx = tru[:, 0:3].cuda().float()
    batch_tx = tru[:, 3:6].cuda().float()
    # 获取预测的DRR
    pre_alpha, pre_beta, pre_theta, pre_tx, pre_ty, pre_tz = label2real(out_rx, out_tx, batch_size)
    all_new_drr = imp.get_new_drr(pre_alpha.detach().cpu().numpy(),
                                  pre_beta.detach().cpu().numpy(),
                                  pre_theta.detach().cpu().numpy(),
                                  pre_tx.detach().cpu().numpy(),
                                  pre_ty.detach().cpu().numpy(),
                                  pre_tz.detach().cpu().numpy(),
                                  size=batch_size,
                                  data_generator=data_generator)
    # 获取预测值和真值的差
    margin_rx = batch_rx - out_rx
    margin_tx = batch_tx - out_tx
    # 初始状态为0
    # states = torch.from_numpy(np.zeros([batch_size, 1, 128, 128])).float().cuda()
    refine_x = torch.cat((origin_drr, all_new_drr), 1)
    out = train_model(refine_x)
    out_margin_rx = out[:, 0: 3]
    out_margin_tx = out[:, 3: 6]
    mse_loss = nn.MSELoss()
    loss1 = mse_loss(out_margin_rx, margin_rx)
    loss2 = mse_loss(out_margin_tx, margin_tx)
    # 加上权重系数，使其更加注重角度上的偏移量
    loss = 1.05 * loss1 + loss2
    return out, loss


class RefineTrain:
    def __init__(self, num_classes=6, batch_size=8,
                 train_path=None, train_label_path=None,
                 test_path=None, test_label_path=None,
                 CT_path=None, pre_model=None, mode='reg'
                 ):
        # 数据导入
        self.train_dataset = DRR_dataset(DRR_path=train_path, fn_path=train_label_path,
                                         Batch_size=batch_size)
        self.test_dataset = DRR_dataset(DRR_path=test_path, fn_path=test_label_path,
                                        Batch_size=batch_size)
        # 训练模型
        self.train_model = resnet50(num_classes=num_classes, in_channels=2)
        # 定义损失函数和优化器
        self.optimizer = optim.Adam(self.train_model.parameters(), lr=0.0001)
        # 训练的相关参数
        self.init_epoch = 0  # 轮次
        self.Batch_size = batch_size
        self.min_loss = 10000  # 设置最小损失，用于判断并保存断点
        self.pre_model = pre_model.cuda()
        self.mode = mode
        # 导入用于DRR生成的对象
        self.data_generator = Projector()
        self.data_generator.load_ct_images(CT_path)

    def train_once(self):
        # 前向传播一轮
        self.train_model.train().cuda()
        batch_x, batch_y = self.train_dataset.get_data()
        # 先单步预测获取初始位姿估计
        out = self.pre_model(batch_x, batch_y)
        if self.mode == 'reg':
            pre_margin, loss = get_loss(batch_x, out, batch_y,
                                        self.Batch_size, self.data_generator, self.train_model)
            # 反向传播更新权重参数
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            out = out + pre_margin
            return batch_y, out, loss

        if self.mode == 'cla':
            out = net_out2real_out(out)
            pre_margin, loss = get_loss(batch_x, out, batch_y,
                                        self.Batch_size, self.data_generator, self.train_model)
            # 反向传播更新权重参数
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            out = out + pre_margin
            return batch_y, out, loss

    def test(self):
        # 在测试集上测试
        self.train_model.eval()
        with torch.no_grad():
            batch_x, batch_y = self.test_dataset.get_data()
            # 先单步预测获取初始位姿估计
            out = self.pre_model(batch_x)
            if self.mode == 'cla':
                out = net_out2real_out(out)
            pre_margin, loss = get_loss(batch_x, out, batch_y,
                                        self.Batch_size, self.data_generator, self.train_model)
            out = out + pre_margin
        return batch_y, out, loss
