import os
from dataset.dataset import DRR_dataset
import torch
from utils.logger import Logger
from configs.config import global_var
from network.ResNet_model import resnet50
from torch import nn, optim
import network.metrics as met
from train.regression_train import RegTrain
from train.mutil_label_train import ClaTrain
from train.refiner_train import RefineTrain
from train.fast_refiner_train import FRefineTrain
import numpy as np


# global_var.address = 'C:/Users/adminTKJ/Desktop/RLIR_sumup/'


# 计算loss
def get_loss(pre, tru):
    """
    根据网络的预测值和真实值，输出损失
    :param pre: 预测值
    :param tru: 真实值
    :return: 损失
    """
    pre_rx = pre[:, 0: 3]
    pre_tx = pre[:, 3: 6]
    tru_rx = tru[:, 0:3]
    tru_tx = tru[:, 3:6]
    mse_loss = nn.MSELoss()
    loss1 = mse_loss(pre_rx, tru_rx)
    loss2 = mse_loss(pre_tx, tru_tx)
    # 添加正则化项，防止过拟合
    # loss11 = met.l1_regularization(train_model, 0.0001)
    # loss12 = met.l2_regularization(train_model, 0.0003)
    # 加上权重系数，使其更加注重角度上的偏移量
    loss = 1.1 * loss1 + loss2
    return loss


# 真正实际运行的主程序
class Train:
    def __init__(self, batch_size=8, end_epoch=20,
                 DRR_train_path=None, train_json_path=None,
                 DRR_test_path=None, test_json_path=None,
                 is_resume=True, mode=None,
                 CT_path=None, pre_model_address=None, mlp_num=None, conv_scale=None, layers_num=None,
                 voxel_size=None, interval_num_train=None, interval_num_test=None,
                 rot_cen=np.array([89.82421875, 89.82421875, 53]), d_s2c=400):
        # 数据记录器
        self.logger = Logger()
        # 导入神经网络模块
        if mode == 'reg':
            self.neural_net = RegTrain(train_path=DRR_train_path,
                                       train_label_path=train_json_path,
                                       test_path=DRR_test_path,
                                       test_label_path=test_json_path,
                                       batch_size=batch_size,
                                       mlp_num=mlp_num,
                                       conv_scale=conv_scale,
                                       layers_num=layers_num,
                                       voxel_size=voxel_size,
                                       interval_num_train=interval_num_train,
                                       interval_num_test=interval_num_test,
                                       rot_cen=rot_cen, d_s2c=d_s2c
                                       )
        elif mode == 'cla':
            self.neural_net = ClaTrain(train_path=DRR_train_path,
                                       train_label_path=train_json_path,
                                       test_path=DRR_test_path,
                                       test_label_path=test_json_path,
                                       batch_size=batch_size
                                       )
        if mode == 'reg_re':
            pre_model = resnet50(num_classes=6)
            pre_model.load_state_dict(torch.load(pre_model_address))
            self.neural_net = FRefineTrain(train_path=DRR_train_path,
                                           train_label_path=train_json_path,
                                           # test_path=DRR_test_path,
                                           # test_label_path=test_json_path,
                                           batch_size=batch_size,
                                           CT_path=CT_path,
                                           pre_model=pre_model
                                           )
        elif mode == 'cla_re':
            pre_model = resnet50(num_classes=62)
            pre_model.load_state_dict(torch.load(pre_model_address))
            self.neural_net = RefineTrain(train_path=DRR_train_path,
                                          train_label_path=train_json_path,
                                          test_path=DRR_test_path,
                                          test_label_path=test_json_path,
                                          batch_size=batch_size,
                                          CT_path=CT_path,
                                          pre_model=pre_model
                                          )

        # 训练的相关参数
        self.init_epoch = 0  # 轮次
        self.end_epoch = end_epoch
        self.min_loss = 10000  # 设置最小损失，用于判断并保存断点
        self.Batch_size = batch_size
        # 是否断点续传
        self.resume = is_resume
        # 投影相关参数
        self.rot_cen = rot_cen
        self.d_s2c = d_s2c

    # 设置断点续传
    def train_resume(self):
        if self.resume:
            # 恢复模型
            if os.path.isfile(global_var.loading_address + "data/checkpoint/check_model.pth"):
                print("Resume from checkpoint...")
                checkpoint = torch.load(global_var.loading_address + "data/checkpoint/check_model.pth")
                self.neural_net.train_model.load_state_dict(checkpoint['model_state_dict'])
                self.neural_net.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                # 重新载入之后需要将优化器的所有参数重新转移到cuda上
                for state in self.neural_net.optimizer.state.values():
                    for k, v in state.items():
                        if torch.is_tensor(v):
                            state[k] = v.cuda()
                self.init_epoch = checkpoint['epoch'] + 1
                print("====>loaded checkpoint (epoch{})".format(checkpoint['epoch']))
                del checkpoint
            else:
                print("====>no checkpoint found.")
                self.init_epoch = 0  # 如果没进行训练过，初始训练epoch值为1

    def save_checkpoint(self, epoch):
        # 保存断点
        checkpoint = {"model_state_dict": self.neural_net.train_model.state_dict(),
                      "optimizer_state_dict": self.neural_net.optimizer.state_dict(),
                      "epoch": epoch}
        path_checkpoint = global_var.saving_address + "eval/checkpoint/check_model.pth"
        current_path_checkpoint = global_var.loading_address + 'data/checkpoint/check_model.pth'
        torch.save(checkpoint, path_checkpoint)
        torch.save(checkpoint, current_path_checkpoint)

    # 结果展示
    def result_display(self, batch_y, out, epoch, loss, mode=None):
        # 计算顶点距离
        batch_rx = batch_y[:, 0:3]
        batch_tx = batch_y[:, 3:6]
        out_rx = out[:, 0: 3]
        out_tx = out[:, 3: 6]
        max_dis = met.max_distance(batch_rx, batch_tx, out_rx, out_tx, self.Batch_size,
                                   rot_cen=self.rot_cen, d_s2c=self.d_s2c)
        if mode == 'train':
            self.logger.train_log.info("#train#epoch: {} loss: {} max_dis: {}"
                                       .format(epoch,
                                               round(float(loss.detach().cpu().numpy().ravel()), 5),
                                               round(max_dis, 5)))
        elif mode == 'tester':
            try:
                self.logger.test_log.info("#tester#epoch: {} loss: {} max_dis: {}"
                                          .format(epoch,
                                                  round(float(loss.detach().cpu().numpy().ravel()), 5),
                                                  round(max_dis, 5)))
            except AttributeError:
                self.logger.test_log.info("#tester#epoch: {} loss: {} max_dis: {}"
                                          .format(epoch,
                                                  round(float(loss.ravel()), 5),
                                                  round(max_dis, 5)))
            # print("#tester#batch_rx: {}".format(np.round(batch_rx.detach().cpu().numpy(), 1)))
            # print("#tester#out_rx: {}".format(np.round(out_rx.detach().cpu().numpy(), 1)))
            # print("#tester#batch_tx: {}".format(np.round(batch_tx.detach().cpu().numpy(), 1)))
            # print("#tester#out_tx: {}".format(np.round(out_tx.detach().cpu().numpy(), 1)))
        return max_dis

    def run(self):
        self.train_resume()
        # 开始训练
        for epoch in range(self.init_epoch, self.end_epoch):
            # 训练一次
            batch_y, out, loss = self.neural_net.train_once()
            # 训练结果展示
            self.result_display(batch_y, out, epoch, loss, mode="train")
            # 在测试集上测试
            batch_y, out, loss = self.neural_net.test()
            # 测试结果展示
            self.result_display(batch_y, out, epoch, loss, mode='tester')
            # 根据条件保存断点
            if loss <= self.min_loss:
                self.min_loss = loss
                self.save_checkpoint(epoch)
        # 保存模型
        torch.save(self.neural_net.train_model.state_dict(), global_var.saving_address +
                   "model/6D_model_loss{0}.pth".format(self.min_loss))


if __name__ == "__main__":
    exampleLogger = Logger()
    # exampleLogger.visualization()
    exampleLogger.train_log.info("#epoch:0 loss:0.5 max_dis:20#")
    exampleLogger.test_log.info("#epoch: loss:0.2 max_dis:10#")
    # train_test = Train(DRR_train_path=global_var.address + "data/CT/DRR_data",
    #                    train_json_path=global_var.address + "data/CT/train_label_info.json",
    #                    DRR_test_path=global_var.address + "data/CT/DRR_test_data",
    #                    test_json_path=global_var.address + "data/CT/test_label_info.json",
    #                    )
    # train_test.run()
    # train_test.logger.py.visualization()
