# -*- coding: utf-8 -*-
"""
@Time: 2023年4月7日08:49:12
@Auth ： GaoShuai
@File ：RatFemurPDGanTrainer.py
@IDE ：PyCharm
"""
import os
import copy
import sys

import cv2
import numpy
import numpy as np
import torch

from networks.InDuDoNet.InDuDoNetDeeplesion import InDuDoNetDeeplesion
from networks.InDuDoNet.opt import opt

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

package_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(package_path)
print(sys.path)

from dataloaders.DeeplesionDuDoNetMultiLoader import DeeplesionDuDoNetMultiLoader, DeeplesionDuDoNetInferMultiLoader
from networks.DuDoNet.DuDoNet import DuDoNet
from trainers.BaseTrainer import _BaseHandler
from utils.Visualizer import Visualizer
from utils.build_gemotry import generate_deeplesion_odl_operator
from utils.config import Config


class DeeplesionDuDoNetHandler(_BaseHandler):
    def __init__(self, config):
        super(DeeplesionDuDoNetHandler, self).__init__(config)
        self.config = config
        op_module_fp, op_module_pT = generate_deeplesion_odl_operator()
        self.model = DuDoNet(config, op_module_pT)
        if self.config.run_type == "inference":
            self.data_loader = DeeplesionDuDoNetInferMultiLoader(config, op_module_fp)
            self.InDuDoNet = InDuDoNetDeeplesion(opt, op_module_fp, op_module_pT, config.S_normalize_coefficient).cuda()
            self.InDuDoNet.load_state_dict(
                torch.load(r"G:\code\my-in-du-do-net\models\DeeplesionInDuDoNetV4_0_0\net_epoch_01_index_030000.pt"))

        else:
            self.data_loader = DeeplesionDuDoNetMultiLoader(config, op_module_fp)
        self.visualizer_Y = Visualizer(config, if_Y=True)
        # python -m visdom.server

    def train(self):
        tmp_train_index = 0
        self.visualizer.reset()
        show_interval = config['show_interval']
        logging_interval = config['logging_interval']
        for epoch in range(self.config.start_epoch + 1,
                           self.config.start_epoch + 1 + self.config['epochs_num']):
            for i, data_list in enumerate(self.data_loader):
                tmp_train_index += 1
                self.model.set_input(data_list)
                # with torch.autograd.detect_anomaly():
                self.model.optimize_parameters()  # this function contains forward and backward

                if config['if_show'] and i % show_interval == 0:
                    current_visuals = self.model.get_current_visuals_custom(0)
                    current_visuals_Y = self.model.get_current_visuals_custom_Y(0)
                    self.visualizer.display_current_results_LGNet(current_visuals, epoch, i, True)
                    self.visualizer_Y.display_current_results_LGNet(current_visuals_Y, epoch, i, True)

                if config['if_show'] and i % logging_interval == 0:
                    losses = self.model.get_current_losses()
                    loss_info = f"ExpName: {config.name} \nEpoch: {epoch}, Steps: {i}  \n"
                    for k, v in losses.items():
                        loss_info = loss_info + f"{k}: {v:.4f}, "
                    self.logger.info(loss_info)
                    # self.processPool.apply_async(self.tensorBoardLogger.add_scalars,
                    #                              ["loss_show", losses, i + len(self.train_data_loader) * epoch])
                    self.tensorBoardLogger.add_scalars("loss_show", losses, tmp_train_index)
            self.model.save_networks(epoch, 0)
            self.model.update_learning_rate()

    def inference(self):
        self.model.load_parameters(self.config.load_parameters_path)
        tmp_inference_index = 0
        # self.model.eval()
        for i, data_list in enumerate(self.data_loader):
            tmp_inference_index += 1
            self.logger.info("The current inference index is " + str(tmp_inference_index))
            self.model.set_input(data_list)
            self.model.inference()
            current_visuals = self.model.get_current_visuals_custom(0)
            current_visuals_Y = self.model.get_current_visuals_custom_Y(0)
            self.visualizer.display_current_results_LGNet(current_visuals, 0, i, True)
            self.visualizer_Y.display_current_results_LGNet(current_visuals_Y, 0, i, True)
            for i, save_path in enumerate(data_list[-1]):
                tmp = current_visuals["X_out"][i][0] + 1
                numpy.save(save_path, tmp)
                # numpy.save(os.path.join(gt_save_path, os.path.basename(save_path)), current_visuals["Sgt"][i][0] + 1)

    def inference_vs_InDuDoNet(self):
        self.model.load_parameters(self.config.load_parameters_path, 2, 0)
        tmp_inference_index = 0
        for i, data_list in enumerate(self.data_loader):
            for image_dir in data_list[-1]:
                if image_dir.find("000376_16_02_319_9") != -1:
                    print("good")
            # InDuDoNet inference
            # Xgt, Xli, Xma, mask, Sgt, Sma, Sli, Smetal = [x.cuda() for x in data_list[0:-1]]
            # with torch.no_grad():
            #     ListX, ListS, ListYS = self.InDuDoNet(Xma, Xli, mask, Sma, Sli, Smetal)
            #
            # InDuDoNet_X_final_result = np.clip(ListX[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy(), 0, 255)
            # InDuDoNet_Y_final_result = np.clip(ListYS[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy(), 0, 255)
            InDuDoNetOutFilePath = os.path.join(
                r"E:\data_transform_buffer\metal_artifact\easy_dataset\test\A_InDuDoNetV1_0_3",
                os.path.basename(data_list[-1][0]))
            InDuDoNet_X_final_result = np.expand_dims(cv2.imread(InDuDoNetOutFilePath, 0), axis=0)

            # DuDoNet inference
            tmp_inference_index += 1
            self.logger.info("The current inference index is " + str(tmp_inference_index))
            self.model.set_input(data_list[0:-1])
            self.model.inference()

            current_visuals = self.model.get_current_visuals_custom(0)
            current_visuals_Y = self.model.get_current_visuals_custom_Y(0)
            current_visuals["X_InDuDoNet_out"] = InDuDoNet_X_final_result
            # current_visuals_Y["Y_InDuDoNet_out"] = InDuDoNet_Y_final_result
            self.visualizer.display_current_results_LGNet(current_visuals, 0, i, True)
            self.visualizer_Y.display_current_results_LGNet(current_visuals_Y, 0, i, True)
            # for i, save_path in enumerate(data_list[-1]):
            #     tmp = current_visuals["X_out"][i][0] + 1
            #     np.save(save_path, tmp)
            # numpy.save(os.path.join(gt_save_path, os.path.basename(save_path)), current_visuals["Sgt"][i][0] + 1)

    def go(self):
        if self.config.run_type == "inference":
            self.inference_vs_InDuDoNet()
        else:
            self.train()


if __name__ == '__main__':
    train_config_file_path = os.path.join("..", "yamls", "DeeplesionDuDoNetTrain.yml")
    config = Config(train_config_file_path)
    trainer = DeeplesionDuDoNetHandler(config)
    trainer.go()
