import os
from dataset.dataset import DRR_dataset
import torch
from utils.logger import Logger
from configs.config import global_var
from network.ResNet_model import resnet50
from network.Basic_model import basic_net
from network.Faster_model import faster_net
from network.fasternet import FasterNet
from network.Fuse_model_1 import F1Net
from network.Fuse_model_2 import F2Net
from network.Unet import UBasicNet
from torch import nn, optim
import network.metrics as met
from network.vm_loss import VmLoss

# global_var.address = 'C:/Users/adminTKJ/Desktop/RLIR_sumup/'


# 计算loss
def get_loss(pre, tru):
    """
    根据网络的预测值和真实值，输出损失
    :param pre: 预测值
    :param tru: 真实值
    :return: 损失
    """
    pre_rx = pre[:, 0: 3]
    pre_tx = pre[:, 3: 6]
    tru_rx = tru[:, 0:3]
    tru_tx = tru[:, 3:6]
    mse_loss = nn.MSELoss()
    loss1 = mse_loss(pre_rx, tru_rx)
    loss2 = mse_loss(pre_tx, tru_tx)
    # 添加正则化项，防止过拟合
    # loss11 = met.l1_regularization(train_model, 0.0001)
    # loss12 = met.l2_regularization(train_model, 0.0003)
    # 加上权重系数，使其更加注重角度上的偏移量
    loss = 1.1 * loss1 + loss2
    return loss


# 真正实际运行的主程序
class RegTrain:
    def __init__(self, num_classes=6, batch_size=8,
                 train_path=None, train_label_path=None,
                 test_path=None, test_label_path=None,
                 mlp_num=None, conv_scale=None, layers_num=None,
                 voxel_size=None, interval_num_train=None, interval_num_test=None,
                 rot_cen=None, d_s2c=None):
        # 数据导入
        self.train_dataset = DRR_dataset(DRR_path=train_path, fn_path=train_label_path,
                                         Batch_size=batch_size)
        self.test_dataset = DRR_dataset(DRR_path=test_path, fn_path=test_label_path,
                                        Batch_size=batch_size)
        # 训练模型
        # self.train_model = resnet50(num_classes=num_classes)
        self.train_model = basic_net(in_channels=3)
        # self.train_model = F1Net(in_channels=3)
        # self.train_model = UBasicNet(img_channels=1, pos_channels=2, layers_num=layers_num)
        # self.train_model = FasterNet(
        #     mlp_ratio=2.0,
        #     embed_dim=128,
        #     depths=(1, 2, 13, 2),
        #     drop_path_rate=0.15,
        #     act_layer='RELU',
        # )
        # 定义损失函数和优化器
        # self.mse_loss = nn.MSELoss()
        self.optimizer = optim.Adam(self.train_model.parameters(), lr=0.0001)
        self.vm_loss = VmLoss(Batch_size=batch_size, voxel_size=voxel_size, interval_num=interval_num_train,
                              rot_cen=rot_cen, d_s2c=d_s2c)
        self.voxel_size = voxel_size
        self.interval_num_test = interval_num_test
        # 训练的相关参数
        self.init_epoch = 0  # 轮次
        self.Batch_size = batch_size
        self.min_loss = 10000  # 设置最小损失，用于判断并保存断点
        # 投影相关参数
        self.rot_cen = rot_cen
        self.d_s2c = d_s2c

    def train_once(self):
        # 前向传播一轮
        self.train_model.train().cuda()
        batch_x, batch_y = self.train_dataset.get_data()
        out = self.train_model(batch_x)
        # 图像和位置分开输入专用
        # out = self.train_model(batch_x[:, 0].reshape(self.Batch_size, 1, batch_x.shape[2],  batch_x.shape[3]),
        #                        batch_x[:, 1:])
        # loss = get_loss(out, batch_y)
        # 修改为vm损失
        loss = self.vm_loss(out, batch_y, )
        # loss = torch.sum(out - batch_y)
        # 反向传播更新权重参数
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return batch_y, out, loss

    def test(self):
        # 在测试集上测试
        self.train_model.eval()
        with torch.no_grad():
            batch_x, batch_y = self.test_dataset.get_data()
            out = self.train_model(batch_x)
            # 图像和位置分开输入专用
            # out = self.train_model(batch_x[:, 0].reshape(self.Batch_size, 1, batch_x.shape[2], batch_x.shape[3]),
            #                        batch_x[:, 1:])
            # loss = get_loss(out, batch_y)
            # 修改为vm损失
            truth_rx, truth_tx = batch_y[:, 0:3], batch_y[:, 3:6]
            pred_rx, pred_tx = out[:, 0: 3], out[:, 3: 6]
            loss = met.vm_sample(truth_rx, truth_tx, pred_rx, pred_tx, Batch_size=self.Batch_size,
                                 voxel_size=self.voxel_size, interval_num=self.interval_num_test)
        return batch_y, out, loss


if __name__ == "__main__":
    exampleLogger = Logger()
    # exampleLogger.visualization()
    exampleLogger.train_log.info("#epoch:0 loss:0.5 max_dis:20#")
    exampleLogger.test_log.info("#epoch: loss:0.2 max_dis:10#")
    # train_test = Train(DRR_train_path=global_var.address + "data/CT/DRR_data",
    #                    train_json_path=global_var.address + "data/CT/train_label_info.json",
    #                    DRR_test_path=global_var.address + "data/CT/DRR_test_data",
    #                    test_json_path=global_var.address + "data/CT/test_label_info.json",
    #                    )
    # train_test.run()
    # train_test.logger.py.visualization()
