import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks


class BaseModel(ABC):
    """This class is an abstract base class (ABC) for models.
    这个类是一个（Pytorch）模型的抽象基类
    To create a subclass, you need to implement the following five functions:
    为了建立子类，你需要部署以下五个方法：
    -- <__init__>:                      initialize the class; first call BaseModel.__init__(self, opt). 
                                        初始化类， 首先调用 BaseModel.__init__(self, opt)
    -- <set_input>:                     unpack data from dataset and apply preprocessing.
                                        从数据集中提取数据并执行预处理
    -- <forward>:                       produce intermediate results.
                                        产生中间结果
    -- <optimize_parameters>:           calculate losses, gradients, and update network weights.
                                        计算损失，梯度并更新网络权重
    -- <modify_commandline_options>:    (optionally) add model-specific options and set default options.
                                        （可选的）模型专用的选项和默认选项
    """

    def __init__(self, opt):
        """Initialize the BaseModel class.
        初始化基础模型类
        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
                                 存储所有的实验标志需要是BaseOptions的子类

        When creating your custom class, you need to implement your own
         initialization.
         创建你的定制类时，你需要部署你自己的初始化。
        In this function, you should first call <BaseModel.__init__(self, opt)>
        在这个函数中，你需要首先调用 <BaseModel.__init__(self, opt)>
        Then, you need to define four lists:
        然后，你需要定义四个列表：
        -- self.loss_names (str list):          specify the training losses that you want to plot and save.
                                                定义你想绘制和保存的训练损失

        -- self.model_names (str list):         define networks used in our training.
                                                定义我们训练中用到的网络

        -- self.visual_names (str list):        specify the images that you want to display and save.
                                                定义你想展示和保存的图像

        -- self.optimizers (optimizer list):    define and initialize optimizers.
                                                定义和初始化优化器
                                                You can define one optimizer for each network.
                                                你可以为每一个网络定义一个优化器
                                                If two networks are updated at the same time, you can use itertools.chain to group them. 
                                                如果两个网络需要同时更新，你可以使用 itertools.chain 来组合他们
                                                See cycle_gan_model.py for an example.
                                                可以在 cycle_gan_model.py 中找到示例
        """
        self.opt = opt
        self.gpu_ids = opt.gpu_ids
        self.isTrain = opt.isTrain
        self.device = (
            torch.device("cuda:{}".format(self.gpu_ids[0]))
            if self.gpu_ids
            else torch.device("cpu")
        )  # get device name: CPU or GPU
        self.save_dir = os.path.join(
            opt.checkpoints_dir, opt.name
        )  # save all the checkpoints to save_dir
        if (
            opt.preprocess != "scale_width"
        ):  # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. 输入图像可能拥有不同尺寸，可能伤害 cudnn.benchmark 的表现
            torch.backends.cudnn.benchmark = True
        self.loss_names = []
        self.model_names = []
        self.visual_names = []
        self.optimizers = []
        self.image_paths = []
        self.metric = 0  # used for learning rate policy 'plateau'

    @staticmethod
    def modify_commandline_options(parser, is_train):
        """Add new model-specific options, and rewrite default values for existing options.
        增加新的模型专用选项，并重写已存在选项的默认值

        Parameters:
            parser          -- original option parser
                                原选项分析器
            is_train (bool) -- whether training phase or test phase.
                                You can use this flag to add training-specific or test-specific options.
                                是训练解析器还是测试解析器，你可以用这个标志添加训练专用或者测试专用选项

        Returns:
            the modified parser.
            修改过得处理器
        """
        return parser

    @abstractmethod
    def set_input(self, input):
        """Unpack input data from the dataloader and perform necessary pre-processing steps.
        从 dataloader 中解包，并部署必要的预处理步骤。

        Parameters:
            input (dict): includes the data itself and its metadata information.
                            包含数据迭代器和元数据信息
        """
        pass

    @abstractmethod
    def forward(self):
        """Run forward pass; called by both functions <optimize_parameters> and <test>.
            运行向前步骤，被 <optimize_parameters> 和 <test> 两个函数调用
        """
        pass

    @abstractmethod
    def optimize_parameters(self):
        """Calculate losses, gradients, and update network weights; called in every training iteration
            计算 损失、梯度，并更新网络权重，在每个训练迭代中被调用。
        """
        pass

    def setup(self, opt):
        """Load and print networks; create schedulers
            加载并答应网络，建立调度器

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
                                    存储所有的实验标志，必须是 BaseOptions 的子类。
        """
        if self.isTrain:
            self.schedulers = [
                networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers
            ]
        if not self.isTrain or opt.continue_train:
            load_suffix = "iter_%d" % opt.load_iter if opt.load_iter > 0 else opt.epoch
            self.load_networks(load_suffix)
        self.print_networks(opt.verbose)

    def eval(self):
        """Make models eval mode during test time
            在测试时创建评估模式
        """
        for name in self.model_names:
            if isinstance(name, str):
                net = getattr(self, "net" + name)
                net.eval()

    def test(self):
        """Forward function used in test time.
            在测试时调用向前函数
        This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
        这个函数将 <forward> 函数包装为 no_grad() （不求导）所以我们不存储反向传播的中间结果
        It also calls <compute_visuals> to produce additional visualization results
        它同时调用 <compute_visuals> 来生成额外的可视化结果
        """
        with torch.no_grad():
            self.forward()
            self.compute_visuals()

    def compute_visuals(self):
        """Calculate additional output images for visdom and HTML visualization
            为 visdom 和 HTML 可视化计算额外的输出图像。
        """
        pass

    def get_image_paths(self):
        """ Return image paths that are used to load current data
            返回加载当前数据的图像路径
        """
        return self.image_paths

    def update_learning_rate(self):
        """Update learning rates for all the networks; called at the end of every epoch
            为所有网络更新学习率，在每个epoch结束后调用
        """
        for scheduler in self.schedulers:
            if self.opt.lr_policy == "plateau":
                scheduler.step(self.metric)
            else:
                scheduler.step()

        lr = self.optimizers[0].param_groups[0]["lr"]
        print("learning rate = %.7f" % lr)

    def get_current_visuals(self):
        """Return visualization images. train.py will display these images with visdom, and save the images to a HTML
            返回可视化图像， train.py 会用 visdom 显示这些图像并将它们保存为HTML
        """
        visual_ret = OrderedDict()
        for name in self.visual_names:
            if isinstance(name, str):
                visual_ret[name] = getattr(self, name)
        return visual_ret

    def get_current_losses(self):
        """Return traning losses / errors. train.py will print out these errors on console, and save them to a file
            返回训练的损失/误差。train.py会在控制台中打印出这些错误并将它们保存成文件
        """
        errors_ret = OrderedDict()
        for name in self.loss_names:
            if isinstance(name, str):
                errors_ret[name] = float(
                    getattr(self, "loss_" + name)
                )  # float(...) works for both scalar tensor and float number
        return errors_ret

    def save_networks(self, epoch):
        """Save all the networks to the disk.
            将所有的网络保存在硬盘上
        Parameters:
            epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
                            当前 epoch；使用文件名 '%s_net_%s.pth' % (epoch, name)
        """
        for name in self.model_names:
            if isinstance(name, str):
                save_filename = "%s_net_%s.pth" % (epoch, name)
                save_path = os.path.join(self.save_dir, save_filename)
                net = getattr(self, "net" + name)

                if len(self.gpu_ids) > 0 and torch.cuda.is_available():
                    torch.save(net.module.cpu().state_dict(), save_path)
                    net.cuda(self.gpu_ids[0])
                else:
                    torch.save(net.cpu().state_dict(), save_path)

    def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
        """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)
            修正检查节点间的 实例模？（InstanceNorm）不匹配性（先验为 0.4）
        """
        key = keys[i]
        if i + 1 == len(keys):  # at the end, pointing to a parameter/buffer
            if module.__class__.__name__.startswith("InstanceNorm") and (
                key == "running_mean" or key == "running_var"
            ):
                if getattr(module, key) is None:
                    state_dict.pop(".".join(keys))
            if module.__class__.__name__.startswith("InstanceNorm") and (
                key == "num_batches_tracked"
            ):
                state_dict.pop(".".join(keys))
        else:
            self.__patch_instance_norm_state_dict(
                state_dict, getattr(module, key), keys, i + 1
            )

    def load_networks(self, epoch):
        """Load all the networks from the disk.
            从硬盘中加载所有的网络
        Parameters:
            epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
                            当前 epoch；使用文件名 '%s_net_%s.pth' % (epoch, name)
        """
        for name in self.model_names:
            if isinstance(name, str):
                load_filename = "%s_net_%s.pth" % (epoch, name)
                load_path = os.path.join(self.save_dir, load_filename)
                net = getattr(self, "net" + name)
                if isinstance(net, torch.nn.DataParallel):
                    net = net.module
                print("loading the model from %s" % load_path)
                # if you are using PyTorch newer than 0.4 (e.g., built from
                # GitHub source), you can remove str() on self.device
                state_dict = torch.load(
                    load_path, map_location=str(self.device))
                if hasattr(state_dict, "_metadata"):
                    del state_dict._metadata

                # patch InstanceNorm checkpoints prior to 0.4
                for key in list(
                    state_dict.keys()
                ):  # need to copy keys here because we mutate in loop
                    self.__patch_instance_norm_state_dict(
                        state_dict, net, key.split(".")
                    )
                net.load_state_dict(state_dict)

    def print_networks(self, verbose):
        """Print the total number of parameters in the network and (if verbose) network architecture
            打印网络中的所有参数数量和（if verbose）网络结构
        Parameters:
            verbose (bool) -- if verbose: print the network architecture
                                如果（冗长模式）置为真，则打赢网络结构
        """
        print("---------- Networks initialized -------------")
        for name in self.model_names:
            if isinstance(name, str):
                net = getattr(self, "net" + name)
                num_params = 0
                for param in net.parameters():
                    num_params += param.numel()
                if verbose:
                    print(net)
                print(
                    "[Network %s] Total number of parameters : %.3f M"
                    % (name, num_params / 1e6)
                )
        print("-----------------------------------------------")

    def set_requires_grad(self, nets, requires_grad=False):
        """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
            设置所有网络的 requies_grad=Fasle（需要梯度为假）来避免多余的计算
        Parameters:
            nets (network list)   -- a list of networks
                                        网络的列表
            requires_grad (bool)  -- whether the networks require gradients or not
                                        网络是否需要梯度
        """
        if not isinstance(nets, list):
            nets = [nets]
        for net in nets:
            if net is not None:
                for param in net.parameters():
                    param.requires_grad = requires_grad
