import torch
import itertools
from utils.image_pool import ImagePool
from .base_model import BaseModel
from .diffusion import networks
from utils.util import SSIM,PSNR,normal,AverageMeter
import numpy as np
from  numpy.fft import fft2,fftshift
# from .lightcnn import network as lightcnn
import torch.nn.functional as F
from .cycle_gan_S012D_fft_model import NewAverageMeter
from models import create_model
import os

# import mypymath
# import time
from PIL import Image
from data.base_dataset import  get_transform
from torchvision import transforms
def change_fre(F):
    ans = 100 - F
    if ans <= 0:
        ans = 0
    return ans  

def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
    if beta_schedule == 'quad':
        betas = np.linspace(beta_start ** 0.5, beta_end ** 0.5, num_diffusion_timesteps, dtype=np.float64) ** 2
    elif beta_schedule == 'linear':
        betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
    elif beta_schedule == 'warmup10':
        betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
    elif beta_schedule == 'warmup50':
        betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
    elif beta_schedule == 'const':
        betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
    elif beta_schedule == 'jsd':  # 1/T, 1/(T-1), 1/(T-2), ..., 1
        betas = 1. / np.linspace(num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64)
    else:
        raise NotImplementedError(beta_schedule)
    assert betas.shape == (num_diffusion_timesteps,)
    return betas


def extract(a, t, x_shape):
    """Extract coefficients from a based on t and reshape to make it
    broadcastable with x_shape."""
    bs, = t.shape
    assert x_shape[0] == bs
    out = torch.gather(torch.tensor(a).float().to(device=t.device), 0, t.long())
    assert out.shape == (bs,)
    out = out.reshape((bs,)+(1,)*(len(x_shape)-1))
    return out

def diffusion_step(x, t, *,
                   noise=None,
                   sqrt_alphas,
                   sqrt_one_minus_alphas):
    """
    Sample from q(x_t | x_{t-1}) (eq. (2))
    """
    if noise is None:
        noise = torch.randn_like(x)
    assert noise.shape == x.shape
    return (
        extract(sqrt_alphas, t, x.shape) * x +
        extract(sqrt_one_minus_alphas, t, x.shape) * noise
    )


def denoising_step(x, t, *,
                   model,
                   logvar,
                   sqrt_recip_alphas_cumprod,
                   sqrt_recipm1_alphas_cumprod,
                   posterior_mean_coef1,
                   posterior_mean_coef2,
                   return_pred_xstart=False):
    """
    Sample from p(x_{t-1} | x_t)
    """
    # instead of using eq. (11) directly, follow original implementation which,
    # equivalently, predicts x_0 and uses it to compute mean of the posterior
    # 1. predict eps via model
    model_output = model(x, t)
    # 2. predict clipped x_0
    # (follows from x_t=sqrt_alpha_cumprod*x_0 + sqrt_one_minus_alpha*eps)
    pred_xstart = (extract(sqrt_recip_alphas_cumprod, t, x.shape)*x -
                   extract(sqrt_recipm1_alphas_cumprod, t, x.shape)*model_output)
    pred_xstart = torch.clamp(pred_xstart, -1, 1)
    # 3. compute mean of q(x_{t-1} | x_t, x_0) (eq. (6))
    mean = (extract(posterior_mean_coef1, t, x.shape)*pred_xstart +
            extract(posterior_mean_coef2, t, x.shape)*x)

    logvar = extract(logvar, t, x.shape)

    # sample - return mean for t==0
    noise = torch.randn_like(x)
    mask = 1-(t==0).float()
    mask = mask.reshape((x.shape[0],)+(1,)*(len(x.shape)-1))
    sample = mean + mask*torch.exp(0.5*logvar)*noise
    sample = sample.float()
    if return_pred_xstart:
        return sample, pred_xstart
    return sample

class diffusionModel(BaseModel):
    """
    This class implements the CycleGAN model, for learning image-to-image translation without paired data.

    The model training requires '--dataset_mode unaligned' dataset.
    By default, it uses a '--netG resnet_9blocks' ResNet generator,
    a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
    and a least-square GANs objective ('--gan_mode lsgan').

    CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
    """
    @staticmethod
    def modify_commandline_options(parser, is_train=True):
        """Add new dataset-specific options, and rewrite default values for existing options.

        Parameters:
            parser          -- original option parser
            is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.

        Returns:
            the modified parser.

        For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
        A (source domain), B (target domain).
        Generators: G_A: A -> B; G_B: B -> A.
        Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
        Forward cycle loss:  lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
        Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
        Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
        Dropout is not used in the original CycleGAN paper.
        """
        
        # parser.set_defaults(frequency=True)
        # parser.add_argument('--num_classes', type=int, default=3, help='the architecture situation of model')
        # parser.add_argument('--pretrained_model', type=str, default="lightcnn", help='the architecture of pretrained model')
        # parser.add_argument('--pretrained_path', type=str, default="lightcnn", help='the path of pretrained model')
        # parser.add_argument('--pretrained_ndf', type=int, default=128, help='the number of features of output ')
        # parser.add_argument('--pretrained_output_nc', type=int, default=256, help='the number of features of output ')

        parser.set_defaults(no_dropout=True)  # default CycleGAN did not use dropout
        parser.add_argument('--inputs', type=str, default='0', help='the moudal of input data ')
        parser.add_argument('--outputs', type=str, default='1', help='the moudal ofoutput data ')
        # parser.add_argument('--bins', type=int, default=16, help='the number of features of output ')
        # parser.add_argument('--cal_mode', type=str, default='linear', help='the mode of caculating')
        parser.add_argument('--slope', type=int, default=0, help='slope parameter')
        # parser.add_argument('--frequency', type=bool, default=True, help='the mode of caculating')
        # if is_train:
        parser.add_argument('--lambda_A', type=float, default=1.0, help='weight for cycle loss (A -> B -> A)')
        parser.add_argument('--lambda_B', type=float, default=1.0, help='weight for cycle loss (B -> A -> B)')
        parser.add_argument('--lambda_fre', type=float, default=0, help='weight for preTrained Discriminator loss')
        return parser
    
    def display_opt_init(opt):
        # parser.add_argument('--inputs', type=str, default='img', help='the moudal of input data ')
        opt.__setattr__('inputs','S0')
        opt.__setattr__('outputs','SD')
        # opt.__setattr__('cal_mode','linear')
        opt.__setattr__('slope',0)
        opt.__setattr__('lambda_fre',0)
        return opt
    def display_init(self,_dict):
        
        transform = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        # pil_img = Image.fromarray(np.uint8(_dict['img']))
        # _dict[self.opt.inputs] = transform(pil_img)
        # c,h,w = _dict[self.opt.inputs].shape
        # _dict[self.opt.inputs] = _dict[self.opt.inputs].reshape(1,c,h,w)
        _dict[self.opt.inputs+'_paths'] = "ZedCamera"
        for b in _dict['boxes']:
            face = transform(Image.fromarray(np.uint8(_dict['img'][b[1]:b[3],b[0]:b[2],:])))
            c,h,w = face.shape
            if self.opt.inputs in _dict.keys():
                _dict[self.opt.inputs] = torch.cat((_dict[self.opt.inputs],face.unsqueeze(0)),0)
            else:
                _dict[self.opt.inputs] = face.unsqueeze(0)
        return _dict

    def __init__(self, opt):
        """Initialize the CycleGAN class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        
        BaseModel.__init__(self, opt)
        diffusion_config = opt.diffusion_config
        model_config = opt.model_config
        self.init_diffusion_parameters(**diffusion_config)
        self.model = networks(**model_config)
        if device is None:
            device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.device = device
        self.model.to(self.device)


    def init_diffusion_parameters(self, **config):
        self.model_var_type = config.get("model_var_type", "fixedsmall")
        betas=get_beta_schedule(
            beta_schedule=config['beta_schedule'],
            beta_start=config['beta_start'],
            beta_end=config['beta_end'],
            num_diffusion_timesteps=config['num_diffusion_timesteps']
        )
        self.num_timesteps = betas.shape[0]

        alphas = 1.0-betas
        alphas_cumprod = np.cumprod(alphas, axis=0)
        alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
        posterior_variance = betas*(1.0-alphas_cumprod_prev) / (1.0-alphas_cumprod)
        sqrt_recip_alphas_cumprod = np.sqrt(1. / alphas_cumprod)
        sqrt_recipm1_alphas_cumprod = np.sqrt(1. / alphas_cumprod - 1)
        posterior_mean_coef1 = betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)
        posterior_mean_coef2 = (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)

        self.alphas = alphas
        self.sqrt_recip_alphas_cumprod = sqrt_recip_alphas_cumprod
        self.sqrt_recipm1_alphas_cumprod = sqrt_recipm1_alphas_cumprod
        self.posterior_mean_coef1 = posterior_mean_coef1
        self.posterior_mean_coef2 = posterior_mean_coef2
        self.sqrt_alphas = np.sqrt(alphas)
        self.sqrt_one_minus_alphas = np.sqrt(1. - alphas)

        if self.model_var_type == "fixedlarge":
            self.logvar = np.log(np.append(posterior_variance[1], betas[1:]))
        elif self.model_var_type == 'fixedsmall':
            self.logvar = np.log(np.maximum(posterior_variance, 1e-20))

    def visuale_param(self):
        if self.isTrain:
            self.loss_names = ["G_SAB","cycle_SAB","idt_SAB",
                                "G_SBA","cycle_SBA","idt_SBA",
                                "D_SA","D_SB","G","mean_G",
                                ]
            self.count_G=AverageMeter()
        else:
            self.loss_names = ["SB_SSIM","SAB_SSIM","SB_PSNR","SAB_PSNR"]
            self.loss_names += ["mean_SB_PSNR","mean_SAB_PSNR"]
            self.count_mean_SB_PSNR=AverageMeter()
            self.count_mean_SAB_PSNR=AverageMeter()
        
            self.loss_names += ["mean_SB_SSIM","mean_SAB_SSIM"]
            self.count_mean_SB_SSIM=NewAverageMeter()
            self.count_mean_SAB_SSIM=AverageMeter()
            # if self.opt.controller == 'test':
            # self.count_mean_S012D_SSIM=NewAverageMeter()
        # self.count_ppaGSBA=AverageMeter()
        # self.count_ppaGSAB=AverageMeter()
        # self.count_ppaRSBA=AverageMeter()
        # self.count_ppaRSAB=AverageMeter()
        # if self.opt.lambda_fre != 0:
        self.loss_names +=["mean_Frequency_SB"]
        self.count_Frequency_SB=AverageMeter()
        
        visual_names_S1A = ['real_SA', 'fake_SBA', ]
        visual_names_S1B = ['real_SB', 'fake_SAB']
        # if self.isTrain:
        visual_names_S1B += ['rec_SAB', 'rec_SBA']
        # visual_names_S1A += ['real_SA_h', 'fake_SBA_h', 'rec_SBA_h']
        # visual_names_S1B += ['real_SB_h', 'fake_SAB_h', 'rec_SAB_h']
        
        self.visual_names = visual_names_S1A + visual_names_S1B  # combine visualizations for A and B
        pass
    
    # def cal_slope(self,data,directions=2,mode='constant',):
    #     if directions == 0:
    #         return data.to(self.device)
    #     x = F.pad(data,(1,1,1,1,0,0),mode,value=0)
    #     x1 = x[:,:,1:-1,1:-1]-x[:,:,1:-1,0:-2]
    #     x2 = x[:,:,1:-1,1:-1]-x[:,:,0:-2,1:-1]
    #     if directions == 2:
    #         return torch.cat((x[:,:,1:-1,1:-1],x1,x2),dim=1).to(self.device)
    #     if directions == 4:
    #         x3 = x[:,:,1:-1,1:-1]-x[:,:,0:-2,0:-2]
    #         x4 = x[:,:,1:-1,1:-1] - x[:,:,0:-2,2:]
    #         # test = torch.cat((x[:,:,1:-1,1:-1],x1,x2,x3,x4),dim=1).to(self.device)
    #         # recoverFslope(test)
    #         return torch.cat((x[:,:,1:-1,1:-1],x1,x2,x3,x4),dim=1).to(self.device)
        
    def set_input(self, input):
        """Unpack input data from the dataloader and perform necessary pre-processing steps.

        Parameters:
            input (dict): include the data itself and its metadata information.

        The option 'direction' can be used to swap domain A and domain B.
        """
        # AtoB = self.opt.direction == 'AtoB'
        # self.real_A = input['A' if AtoB else 'B'].to(self.device)
        # self.real_B = input['B' if AtoB else 'A']self.device)
        # self.image_paths = input['A_paths' if AtoB else 'B_paths']
        if self.A in input.keys():
            self.real_SA = input[self.A].to(self.device)
            self.image_paths = input[self.A+'_paths']
        else:
            self.real_SA = None
            self.image_paths = None
            
        if self.B in input.keys():
            self.dualGen = True
            self.real_SB = input[self.B].to(self.device)
        # self.real_S2 = input['S2'].to(self.device)
        
        # if self.opt.lambda_fre != 0:
        self.real_SA_F = input[self.A+'_F'][:,0,:,:]
        if self.B+'_F' in input.keys():
            self.real_SB_F = input[self.B+'_F'][:,0,:,:]
        
        if "isTrain" in input.keys():
            if self.isTrain != input["isTrain"]:
                self.isTrain = input["isTrain"]
                self.visuale_param()

    def denoise(self, n, n_steps=None, x=None, curr_step=None,
                progress_bar=lambda i, total=None: i,
                callback=lambda x, i, x0=None: None):
        with torch.no_grad():
            if curr_step is None:
                curr_step = self.num_timesteps

            assert curr_step > 0, curr_step

            if n_steps is None or curr_step-n_steps < 0:
                n_steps = curr_step

            if x is None:
                assert curr_step == self.num_timesteps, curr_step
                # start the chain with x_T from normal distribution
                x = torch.randn(n, self.model.in_channels, self.model.resolution, self.model.resolution)
                x = x.to(self.device)

            for i in progress_bar(reversed(range(curr_step-n_steps, curr_step)), total=n_steps):
                t = (torch.ones(n)*i).to(self.device)
                x, x0 = denoising_step(x,
                                       t=t,
                                       model=self.model,
                                       logvar=self.logvar,
                                       sqrt_recip_alphas_cumprod=self.sqrt_recip_alphas_cumprod,
                                       sqrt_recipm1_alphas_cumprod=self.sqrt_recipm1_alphas_cumprod,
                                       posterior_mean_coef1=self.posterior_mean_coef1,
                                       posterior_mean_coef2=self.posterior_mean_coef2,
                                       return_pred_xstart=True)
                callback(x, i, x0=x0)

            return x


    def diffuse(self, n, n_steps=None, x=None, curr_step=None,
                progress_bar=lambda i, total=None: i,
                callback=lambda x, i: None):
        with torch.no_grad():
            if curr_step is None:
                curr_step = 0

            assert curr_step < self.num_timesteps, curr_step

            if n_steps is None or curr_step+n_steps > self.num_timesteps:
                n_steps = self.num_timesteps-curr_step

            assert x is not None

            for i in progress_bar(range(curr_step, curr_step+n_steps), total=n_steps):
                t = (torch.ones(n)*i).to(self.device)
                x = diffusion_step(x,
                                   t=t,
                                   sqrt_alphas=self.sqrt_alphas,
                                   sqrt_one_minus_alphas=self.sqrt_one_minus_alphas)
                callback(x, i+1)

            return x
    def optimize_parameters(self):
        """Calculate losses, gradients, and update network weights; called in every training iteration"""
        # forward
        self.forward()      # compute fake images and reconstruction images.
        # G_A and G_B
        pass
        
    def cal_score(self):

        pass
    def iter_end(self):
        pass
    def epoch_end(self):
        pass
        
        # if self.opt.isTrain:
        #     self.epoch+=1
            # self.fac = self.epoch/(self.nepoch+1)
    def val_save(self,param):
        self.val_best = getattr(self,param)
        if self.last_val_best[param] < self.val_best:
            self.last_val_best[param] = self.val_best
            return True
        return False

    def display_proc(self,_dict):
        """
            为了展示控制器准备的函数接口，用户可以自己定义
        """
        
            
        return _dict
