import math

import torch
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch import nn
from torch.nn import Parameter
# import pdb
import numpy as np
# from . import networks
from .base_model import BaseModel
from .cdcn import network

class CdcnSingleModel(BaseModel):
    @staticmethod
    def modify_commandline_options(parser, is_train=True):
        """Add new dataset-specific options, and rewrite default values for existing options.

        Parameters:
            parser          -- original option parser
            is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.

        Returns:
            the modified parser.

        
        """
        parser.set_defaults(no_dropout=True)  # default CycleGAN did not use dropout
        parser.add_argument('--netCDCN', type=str, default='single', help='the architecture situation of model')
        parser.add_argument('--dataIn', type=str, default='s0', help='the data type of input')
        parser.add_argument('--label', type=str, default='dolp', help='the data type of label')
        if is_train:
            pass
            # parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
            # parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
            # parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')

        return parser
    
    def __init__(self, opt):#basic_conv=networks.Conv2d_cd, theta=0.7 ):   
        BaseModel.__init__(self, opt)
        self.loss_names=["CDCN_Single"]
        if self.isTrain:
            self.model_names=['CDCN']
        else:
            self.model_names=[]
        self.netCDCN=network.define_CDCN(opt.input_nc, opt.output_nc, opt.ngf, opt.netCDCN, opt.theta,
                                         opt.init_type, opt.init_gain, self.gpu_ids)
        if self.isTrain:
            # if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
            #     assert(opt.input_nc == opt.output_nc)
            # self.fake_A_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            # self.fake_B_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            # define loss functions
            self.criterion = nn.BCEWithLogitsLoss()

            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer = torch.optim.Adam(self.netCDCN.parameters(), lr=opt.lr, weight_decay=0.00005)
            # self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            # self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer)
            # self.optimizers.append(self.optimizer_D)
        pass
    def set_input(self, data):
        # self.input = data[self.opt.in]
        self.dataIn = data[self.opt.dataIn]
        self.label = data[self.opt.label]
        pass
    def forward(self, x):	    	# x [3, 256, 256]
        self.out = self.netCDCN(self.dataIn)
        pass
    def backward(self):
        self.loss_CDCN_Single = self.criterion(self.out,self.label)
        self.loss_CDCN_Single.backward()
    def optimize_parameters(self):
        self.forward()
        self.optimizer.zero_grad()
        self.backward()
        self.optimizer.step()
        pass