from dataset.refine_dataset import DRR_dataset
import torch
from utils.logger import Logger
from network.ResNet_model import resnet50
from torch import nn, optim
from DRR.drr_generator import Projector
import utils.image_process as imp
import numpy as np
from tester.classification_test import net_out2real_out
import torch.utils.data


class FRefineTrain:
    def __init__(self, num_classes=6, batch_size=8,
                 train_path=None, train_label_path=None,
                 CT_path=None, pre_model=None, mode='cla'
                 ):
        # 数据导入
        self.train_dataset = DRR_dataset(DRR_path=train_path, fn_path=train_label_path,
                                         Batch_size=batch_size)
        # 训练模型
        self.train_model = resnet50(num_classes=num_classes, in_channels=2)
        # 定义损失函数和优化器
        self.mse_loss = nn.MSELoss()
        self.optimizer = optim.Adam(self.train_model.parameters(), lr=0.00001)
        # 训练的相关参数
        self.init_epoch = 0  # 轮次
        self.Batch_size = batch_size
        self.min_loss = 10000  # 设置最小损失，用于判断并保存断点
        self.pre_model = pre_model.cuda()
        self.mode = mode
        # 导入用于DRR生成的对象
        self.data_generator = Projector()
        self.data_generator.load_ct_images(CT_path)

    def train_once(self):
        # 前向传播一轮
        self.train_model.train().cuda()
        batch_x, batch_y, tru_pos = self.train_dataset.get_train_data()
        out = self.train_model(batch_x)
        loss = self.mse_loss(out, batch_y)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        batch_y = tru_pos + batch_y
        out = tru_pos + out
        return batch_y, out, loss

    def test(self):
        # 在测试集上测试
        self.train_model.eval()
        with torch.no_grad():
            batch_x, batch_y, tru_pos = self.train_dataset.get_test_data()
            # 先单步预测获取初始位姿估计
            out = self.train_model(batch_x)
            loss = self.mse_loss(out, batch_y)
            batch_y = tru_pos + batch_y
            out = tru_pos + out
        return batch_y, out, loss
