from copy import Error
from abc import ABC, abstractmethod
from collections import OrderedDict
import os
from random import shuffle

import torch
from torch import optim
from torch import nn
from torch.nn import functional as F
from torch import distributed as dist
# from torchvision import transforms
# from torchvision.utils import make_grid
# import torchvision
from torch.utils.tensorboard.writer import SummaryWriter
import numpy as np
from scipy.stats import entropy
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt

from components import *
from models import *
from utils import *


from DiffAugment_pytorch import DiffAug

class Trainer(ABC):
    def __init__(self, device, logger, writer):
        self.device = device
        self.logger = logger
        self.writer = writer

    @abstractmethod
    def step_train(self, *args, **kwargs):
        pass
    
    def save(self, model_name:str, save_path:str, iter_:int):
        model = getattr(self, model_name)
        os.makedirs(os.path.join(save_path, str(iter_)), exist_ok=True)
        save_path = os.path.join(save_path, str(iter_), f"{model_name}.pth")
        torch.save(model.state_dict(), save_path)
        return model



class DiscriminatorCausalTrainer(Trainer):
    #@ the trainer to train DiscriminatorCausal
    def __init__(self, model:DiscriminatorCausal, lr: float = 1e-4, 
                 total_iter = None,
                 device:torch.device=torch.device('cpu'), 
                 logger = None,
                 writer: torch.utils.tensorboard.writer.SummaryWriter = None) -> None:
        super().__init__(device, logger, writer)
        self.discriminator_causal = model.to(device)
        self.optimizer = optim.Adam(self.discriminator_causal.parameters(), lr=lr, betas=(0.0, 0.999))
        self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, [total_iter*1.], gamma=0.1,)

        # self.count_max = 2_000.
        # self.count     = 1_000.
        self.desc_loss = ''
    
    def put_samples(self, real_samples:torch.Tensor, labels:torch.Tensor, ):
        self.samples = real_samples.detach() # b o d

    
    def step_train(self, residual) -> nn.Module:
        """step_train train D_c one step
        """

        with torch.no_grad():
            real_samples = residual.detach()
            virtual_samples = real_samples.clone()
            for i in range(virtual_samples.size(1)):
	            shuffle(virtual_samples[:, i:(i+1), :])
            # virtual_samples = virtual_samples.view_as( real_samples )
            
            # shrink
            virtual_samples.mul_(0.9)

            # add noise initially
            # if self.count > 0:
            #     real_samples    += torch.rand_like(real_samples)  * self.count / self.count_max
            #     virtual_samples += torch.rand_like(virtual_samples)  * self.count / self.count_max
            #     self.count -= 1

        # GAN D train : E_pr softplus(-x) + E_pg  softplus(x)
        real_samples.requires_grad = True
        out_real = self.discriminator_causal( real_samples.to(self.device) )
        out_fake = self.discriminator_causal( virtual_samples.to(self.device) )
        loss_real = F.softplus( -out_real ).mean()
        loss_fake = F.softplus(  out_fake ).mean()
        loss_r1 = R1(out_real.mean(), real_samples)
        loss = loss_real + loss_fake + loss_r1

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        self.scheduler.step()
        
        if self.writer is not None:
            if self.writer.interval(10):
                self.desc_loss = f'''DC out_gap:{(out_real - out_fake).mean().cpu().item():.4} loss_r1:{loss_r1.cpu().item():.4}'''
                
            if self.writer.interval(49):

                # if self.logger is not None:
                #     self.logger.info(f'''\riter_{self.writer.global_step}/DC gap:{out_real.mean().item() - out_fake.mean().item():.4}''' )
                
                self.writer.add_scalar("DC/loss", loss.mean().item(), self.writer.global_step)

                self.writer.add_scalar("DC/loss_real", loss_real.mean().item(), self.writer.global_step)
                self.writer.add_scalar("DC/loss_fake", loss_fake.mean().item(), self.writer.global_step)
                self.writer.add_scalar("DC/real_fake_mse", F.mse_loss(virtual_samples, real_samples).item(), self.writer.global_step)
                self.writer.add_scalar("DC/loss_r1", loss_r1.item(), self.writer.global_step)
                
                self.writer.add_scalars("DC/out", 
                                        {"out_real": out_real.mean().item(),
                                         "out_fake": out_fake.mean().item(),
                                         "gap": out_real.mean().item() - out_fake.mean().item(),
                                        }, self.writer.global_step)
                
                self.writer.add_scalar('DC/learning rate', self.scheduler.get_last_lr()[0], self.writer.global_step)
                self.writer.add_scalars('DC/alpha', keyword_dict(self.discriminator_causal, 'alpha'), self.writer.global_step)

            if self.writer.interval(10_012):
                write_weights_grad(self.writer, self.discriminator_causal, 'DC', self.writer.global_step)
        
        return self.desc_loss
        


class ARTrainer(Trainer):
    #@ the autoresssor trainer, inspired by https://github.com/fishmoon1234/DAG-GNN/
    def __init__(self, causalgraph:CategoryCausalGraph, 
                 autoregressor:LinAutoRegressor, 
                 dc_trainer: DiscriminatorCausalTrainer, 
                 lr:float = 1e-4,
                 total_iter = None,
                 device: torch.device=torch.device('cpu'), 
                 logger = None,
                 writer: torch.utils.tensorboard.writer.SummaryWriter = None) -> None:
        """
        Args:
            causalgraph (CategoryCausalGraph): the causal graph W
            autoregressor (LinAutoRegressor): the main auto regressor model
            dc_trainer (DiscriminatorCausalTrainer): the trainer to train and get MI discriminator
            lr (float, optional): Learning rate. Defaults to 1e-4.
            device (torch.device, optional): device. Defaults to torch.device('cpu').
        """
        super().__init__(device, logger, writer)

        self.cg = causalgraph.to(device)
        self.ar = autoregressor.to(device)
        self.dc_trainer = dc_trainer
        # assert dc_trainer.device == device, f"{dc_trainer.device} ~= {device}"

        parameters = itertools.chain(self.cg.parameters(), self.ar.parameters())
        self.M_optimizer = optim.Adam(parameters, lr=lr, betas=(0., 0.999))
        self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.M_optimizer, [total_iter*1.], gamma=0.1,)
        
        self.lambda_A = 1.
        self.c_A = 1
        self.lambda_sparse = 1e-3 
        self.lambda_causal = 1
        

    def loss(self, batch_samples:torch.Tensor, batch_bboxes:torch.Tensor,
             batch_labels:torch.Tensor, W_selected_G):
        """loss compute loss given samples, 
        also dc_trainer.put_samples(residual, batch_labels)

        Args:
            batch_samples (torch.Tensor): b 3 w h
            batch_bboxes (torch.Tensor): b o 4
            batch_labels (torch.Tensor): b o
            W_selected_G (torch.Tensor): b o o, the W of G to match

        Returns:
            [type]: [description]
        """
        
        b, o, d = batch_samples.size()
        
        W_selected = self.cg(batch_bboxes, batch_labels) # causal graph
        obj_feat = batch_samples - batch_samples.mean(dim=0, keepdim=True)
        # obj_feat = F.normalize(obj_feat, p=2, dim=2, eps=1e-6) # unit norm
        x = self.ar(obj_feat, W_selected, batch_labels ) # auto regress
        # x = F.normalize(x, 2, 2, 1e-6) # unit norm
        residual = x - obj_feat #  b o d
        
        # train the MI discriminator first
        # self.dc_trainer.put_samples(residual.detach(), batch_labels.detach())

        # auto regressive reconstruction term, as x and obj_feat are unit norm, is -2 cos()
        # L_rec = - torch.einsum('bod, bod -> bo', x, obj_feat).mean().mul(2.)
        # L_rec = 2 - 2 * torch.mul(x, obj_feat).sum(2).mean()
        L_rec = F.mse_loss(obj_feat, x , reduction="sum")/b/o
        # the DAG regularization term, o is the number of object
        h_A = regular_DAG(W_selected, o) # b o o
        # the aug lag term
        aug_lag_term = h_A.pow(2)
        # path_length_term = W.power(2).matrix_power(n).norm() # \| (A .* A)^n \|^2
        # the closed solution of  lambda_sparse * \| W - W_selected \| + |W|
        sparse_W = F.softshrink(W_selected.detach(), self.lambda_sparse).detach() 
        # the aug term \| W - W_selected \|
        W_squared_dist = torch.dist(W_selected, sparse_W).pow(2)
        # the causal regularization term to encourage mutual independence of residual
        L_tc = self.dc_trainer.discriminator_causal(residual).mean() # b o d -> b 1 -> 1
        
        # match W_selected_D
        W_selected_abs = W_selected.abs()
        normalized_W_selected_D_abs = W_selected_abs.div(W_selected_abs.sum(2, keepdim=True) + 1e-8 )
        normalized_W_selected_G_abs = W_selected_G.abs().div(W_selected_G.abs().sum(2, keepdim=True) + 1e-8 )
        matching_loss = torch.dist(normalized_W_selected_D_abs, normalized_W_selected_G_abs.detach())

        # the total loss
        loss = L_rec + self.lambda_A * h_A + 0.5 * self.c_A * aug_lag_term + \
            2 * self.lambda_sparse * W_squared_dist + self.lambda_causal * L_tc + matching_loss
            
        return [loss, L_rec, h_A, aug_lag_term, W_squared_dist, L_tc, matching_loss], residual.detach()
        
    def step_train(self, batch_samples, batch_bboxes, batch_labels, W_selected_G, logger = None,
                   writer: torch.utils.tensorboard.writer.SummaryWriter = None):
        #* requires the input features {batch_samples} to be b o d, with padded objects
        
        b, o, d = batch_samples.size()
        b, o = batch_labels.size()

        batch_samples = batch_samples.detach()
        losses, residual = self.loss(batch_samples.to(self.device), \
                batch_bboxes, batch_labels.to(self.device), W_selected_G)
        loss, L_rec, h_A, aug_lag_term, W_squared_dist, L_tc, matching_loss = losses

        # optimize
        self.M_optimizer.zero_grad()
        loss.backward()
        self.M_optimizer.step()
        self.scheduler.step()

        W_selected = self.cg(batch_bboxes, batch_labels.to(self.device))
        # update dual variables
        h_A_old = h_A.item()
        h_A_new = regular_DAG(W_selected, o).detach().item()
        if h_A_new > 1 * h_A_old and self.c_A < 1e10: # DAG admm rho , #! weak regularization, was 0.25 * h_A_old
            self.c_A *= 1.0001 # ! here regularization weaker, was 10
        self.lambda_A += self.c_A * h_A_new  # ADMM DAG的
        # self.lambda_sparse += W_squared_dist.item() / 10 # ADMM l1的

        # if h_A_new.item() <= 1e-6: # small enough
        #     break
        
        if self.logger is not None:
            pass
        if self.writer is not None: 
            if self.writer.interval(50):
                self.writer.add_scalar("AR/loss",  loss.item(), self.writer.global_step)
                self.writer.add_scalar("AR/L_rec", L_rec.item(), self.writer.global_step)
                self.writer.add_scalar("AR/h_A",   h_A.item(), self.writer.global_step)
                self.writer.add_scalar("AR/aug_lag_term",   aug_lag_term.item(), self.writer.global_step)
                self.writer.add_scalar("AR/W_squared_dist", W_squared_dist.item(), self.writer.global_step)
                self.writer.add_scalar("AR/L_tc",           L_tc.item(), self.writer.global_step)
                self.writer.add_scalar("AR/L_1_w_selected", W_selected.abs().sum([1,2]).mean().item(), self.writer.global_step)
                self.writer.add_scalar("AR/match_W_G",      matching_loss.item(), self.writer.global_step)

                self.writer.add_scalar("AR/c_A",       self.c_A, self.writer.global_step)
                self.writer.add_scalar("AR/lambda_A",  self.lambda_A, self.writer.global_step)
                self.writer.add_scalar("AR/lambda_sparse",  self.lambda_sparse, self.writer.global_step)
                
                self.writer.add_scalar('AR/learning rate',  self.scheduler.get_last_lr()[0], self.writer.global_step)

            if self.writer.interval(53):
                self.writer.add_scalars("AR/W_selected", para_statics(W_selected.detach().cpu()), self.writer.global_step)
            #     self.writer.add_scalars("AR/W", para_statics(self.cg(None, None).detach().cpu()), self.writer.global_step)

                # self.writer.add_scalars('AR/alpha', keyword_dict(self.ar, 'alpha'), self.writer.global_step)
                # self.writer.add_scalars('AR/swish_beta', keyword_dict(self.ar, 'swish_beta'), self.writer.global_step)
            
            if self.writer.interval(10_013):
                write_weights_grad(self.writer, self.ar, 'AR', self.writer.global_step)
                write_weights_grad(self.writer, self.cg, 'CG', self.writer.global_step)

        return W_selected.detach(), residual.detach()

class ImageDiscriminatorTrainer(Trainer):
    # 
    def __init__(self, model:ResnetDiscriminator, 
                 lr: float = 3 * 1e-4, 
                 lamb_obj = 1.0, lamb_img = 0.1,
                 total_iter = None,
                 device:torch.device=torch.device('cpu'), 
                 logger = None,
                 writer: torch.utils.tensorboard.writer.SummaryWriter = None) -> None:
        super().__init__(device, logger, writer)
        self.discriminator = model.to(device)
        self.optimizer = optim.Adam(self.discriminator.parameters(), lr=lr, betas=(0., 0.999))
        self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, [total_iter*1.], gamma=0.1,)
        self.count = 0
        self.lamb_obj = lamb_obj
        self.lamb_img = lamb_img
        self.desc_loss = ''
        self.diff_aug = DiffAug(total_iter,)
        self.APA = APA()
        self.LECAM_img = LECAM()
        self.LECAM_obj_TF = LECAM()
        self.LECAM_obj_cls = LECAM()

    def step_train(self, real_samples, fake_samples, batch_bboxes, batch_labels, *, flag_R1=False):
        real_samples = real_samples.to(self.device)
        fake_samples = fake_samples.detach().to(self.device)
        
        # real_samples = self.APA.adaptive_pseudo_augmentation(real_samples, fake_samples)
        real_samples_aug = self.diff_aug(real_samples).detach()
        real_samples_aug.requires_grad = True
        out_real = self.discriminator(real_samples_aug, batch_labels.to(self.device), batch_bboxes.to(self.device))
        out_img_real, out_obj_TF_real, out_obj_cls_real, out_real_rearrage = out_real
        if out_real_rearrage is None:
            out_real_rearrage = [None,] * 3
        obj_feat_rearrage_real, bbox_rearrage_real, labels_rearrage_real = out_real_rearrage
        self.count += 1
        if flag_R1 and ((self.count > 20_000 and self.count % 10 == 0) or self.count == 11):
            r1_loss = R1(out_img_real.mean(), real_samples_aug)
        else:
            r1_loss = torch.tensor(0., device=self.device)
        
        loss_img_real = HingeLoss.train_real(out_img_real)
        loss_obj_TF_real = HingeLoss.train_real(out_obj_TF_real)
        # loss_obj_cls_real = F.cross_entropy(out_obj_cls_real, labels_rearrage_real.flatten().to(self.device)) # bo 1
        # out_obj_cls_real = out_obj_cls_real[:, labels_rearrage_real.flatten().to(self.device)]
        loss_obj_cls_real = HingeLoss.train_real(out_obj_cls_real)
        loss_obj_real = loss_obj_TF_real + loss_obj_cls_real

        fake_samples_aug = self.diff_aug(fake_samples).detach()
        
        out_fake = self.discriminator(fake_samples_aug, batch_labels.to(self.device), batch_bboxes.to(self.device))
        out_img_fake, out_obj_TF_fake, out_obj_cls_fake, out_fake_rearrage = out_fake
        if out_fake_rearrage is None:
            out_fake_rearrage = [None,] * 3
        obj_feat_rearrage_fake, bbox_rearrage_fake, labels_rearrage_fake = out_fake_rearrage
        loss_img_fake = HingeLoss.train_fake(out_img_fake)
        loss_obj_TF_fake = HingeLoss.train_fake(out_obj_TF_fake)
        # loss_obj_cls_fake = F.cross_entropy(out_obj_cls_fake, labels_rearrage_fake.flatten().to(self.device)) # bo 1
        # loss_obj_fake = loss_obj_TF_fake - loss_obj_cls_fake
        loss_obj_cls_fake = HingeLoss.train_fake(out_obj_cls_fake)
        loss_obj_fake = loss_obj_TF_fake + loss_obj_cls_fake
        
        loss_img = loss_img_real + loss_img_fake
        loss_obj = loss_obj_real + loss_obj_fake
        
        loss_lecam_img = self.LECAM_img.lecamreg(out_img_real, out_img_fake)
        loss_lecam_obj_TF = self.LECAM_obj_TF.lecamreg(out_obj_TF_real, out_obj_TF_fake)
        loss_lecam_obj_cls = self.LECAM_obj_cls.lecamreg(out_obj_cls_real, out_obj_cls_fake)
        
        d_loss  = self.lamb_img * loss_img + self.lamb_obj * loss_obj + r1_loss
        d_loss += self.lamb_img * loss_lecam_img + self.lamb_obj * (loss_lecam_obj_TF + loss_lecam_obj_cls)
        
        self.optimizer.zero_grad()
        d_loss.backward()
        self.optimizer.step()
        self.scheduler.step()
        out_fake = torch.cat([out_img_fake.flatten(), out_obj_TF_fake.flatten(), out_obj_cls_fake.flatten()], dim=0)
        self.diff_aug.step_fake( out_fake )
        self.APA.step_fake( out_fake )
        
        if self.writer is not None: 
    
            out_img_gap = out_img_real.mean().item() - out_img_fake.mean().item()
            out_obj_TF_gap = out_obj_TF_real.mean().item() - out_obj_TF_fake.mean().item()
            out_obj_cls_gap = out_obj_cls_real.mean().item() - out_obj_cls_fake.mean().item()

            if self.writer.interval(10):
                self.desc_loss = f'''D out_img_gap:{out_img_gap:.4} out_obj_TF_gap:{out_obj_TF_gap:.4} out_obj_cls_gap:{out_obj_cls_gap:.4}'''

            if self.writer.interval(50):
                self.writer.add_scalar("D/d_loss", d_loss.item(), self.writer.global_step)
                self.writer.add_scalar("D/d_loss_img", loss_img.item(), self.writer.global_step)
                self.writer.add_scalar("D/d_loss_obj", loss_obj.item(), self.writer.global_step)
                
                self.writer.add_scalars("D/d_loss_img_realfake",
                    {"real": loss_img_real.item(), "fake": loss_img_fake.item()}, self.writer.global_step)
                self.writer.add_scalars("D/d_loss_obj_TF",
                    {"real": loss_obj_TF_real.item(), "fake": loss_obj_TF_fake.item()}, self.writer.global_step)
                self.writer.add_scalars("D/d_loss_obj_cls",
                    {"real": loss_obj_cls_real.item(), "fake": loss_obj_cls_fake.item()}, self.writer.global_step)
                self.writer.add_scalars("D/d_loss_obj_realfake",
                    {"real": loss_obj_real.item(), "fake": loss_obj_fake.item()}, self.writer.global_step)
                
                self.writer.add_scalars("D/d_out_img",
                    {"real": out_img_real.mean().item(), 
                    "fake": out_img_fake.mean().item(),
                    "gap": out_img_gap}, self.writer.global_step)
                self.writer.add_scalars("D/d_out_img_range",
                    {"real_max": out_img_real.max().item(), 
                    "real_min": out_img_real.min().item(), 
                    "fake_max": out_img_fake.max().item(),
                    "fake_min": out_img_fake.min().item(),}, self.writer.global_step)
            
            if self.writer.interval(51):
                self.writer.add_scalars("D/d_out_obj_TF",
                    {"real": out_obj_TF_real.mean().item(), 
                    "fake": out_obj_TF_fake.mean().item(),
                    "gap": out_obj_TF_gap}, self.writer.global_step)
                self.writer.add_scalars("D/d_out_obj_TF_range",
                    {"real_max": out_obj_TF_real.max().item(), 
                    "real_min": out_obj_TF_real.min().item(), 
                    "fake_max": out_obj_TF_fake.max().item(),
                    "fake_min": out_obj_TF_fake.min().item(),}, self.writer.global_step)

                self.writer.add_scalars("D/d_out_obj_cls",
                    {"real": out_obj_cls_real.mean().item(), 
                    "fake": out_obj_cls_fake.mean().item(),
                    "gap": out_obj_cls_gap}, self.writer.global_step)
                self.writer.add_scalars("D/d_out_obj_cls_range",
                    {"real_max": out_obj_cls_real.max().item(), 
                    "real_min": out_obj_cls_real.min().item(), 
                    "fake_max": out_obj_cls_fake.max().item(),
                    "fake_min": out_obj_cls_fake.min().item(),}, self.writer.global_step)
                
                self.writer.add_scalar('D/learning rate', self.scheduler.get_last_lr()[0], self.writer.global_step)
                self.writer.add_scalar('D/diff_aug_prob', self.diff_aug.this_p, self.writer.global_step)
                self.writer.add_scalar('D/APA_prob', self.APA.p, self.writer.global_step)
                self.writer.add_scalars("D/LECAM_img", {"ema_real": self.LECAM_img.ema_real, "ema_fake": self.LECAM_img.ema_fake}, self.writer.global_step)
                self.writer.add_scalars("D/LECAM_obj_TF", {"ema_real": self.LECAM_obj_TF.ema_real, "ema_fake": self.LECAM_obj_TF.ema_fake}, self.writer.global_step)
                self.writer.add_scalars("D/LECAM_obj_cls", {"ema_real": self.LECAM_obj_cls.ema_real, "ema_fake": self.LECAM_obj_cls.ema_fake}, self.writer.global_step)
                self.writer.add_scalars('D/alpha', keyword_dict(self.discriminator, 'alpha'), self.writer.global_step)
                # self.writer.add_scalars('D/swish_beta', keyword_dict(self.discriminator, 'swish_beta'), self.writer.global_step)

            if self.count % 100 == 0:
                self.writer.add_scalar('D/R1_loss', r1_loss.item(), self.writer.global_step)
            
            if self.writer.interval(999):
                num_o = batch_bboxes.size(1)
                img_size = real_samples.size(2)
                real_samples = real_samples.detach().cpu()
                crop_size = img_size//2
                self.writer.add_image("images/real_images", make_grid(real_samples * 0.5 + 0.5, nrow=4), self.writer.global_step)
                num_ = 4
                real_crops = crop_images(real_samples[:num_,:,:,:], batch_bboxes[:num_,:,:], crop_size).view(num_*num_o, 3, crop_size, crop_size)
                self.writer.add_image("images/real_crops",  make_grid(real_crops * 0.5 + 0.5, nrow=4), self.writer.global_step)
            
            if self.writer.interval(9777):
                num_o = batch_bboxes.size(1)
                img_size = real_samples.size(2)
                real_samples_aug = real_samples_aug.detach().cpu()
                fake_samples_aug = fake_samples_aug.detach().cpu()
                self.writer.add_image("images/real_images_aug", make_grid(real_samples_aug * 0.5 + 0.5, nrow=4), self.writer.global_step)
                self.writer.add_image("images/fake_images_aug", make_grid(fake_samples_aug * 0.5 + 0.5, nrow=4), self.writer.global_step)

            if self.writer.interval(10_011):
                write_weights_grad(self.writer, self.discriminator, 'D', self.writer.global_step)
                # try:
                #     real_samples.requires_grad_()
                #     fake_samples.requires_grad_()
                #     self.writer.add_image("D/real_loss_img_grad", make_grid( torch.autograd.grad(loss_img, real_samples).deatch().cpu(), nrow=4) )
                #     self.writer.add_image("D/fake_loss_img_grad", make_grid( torch.autograd.grad(loss_img, fake_samples).deatch().cpu(), nrow=4) )
                #     self.writer.add_image("D/real_loss_obj_grad", make_grid( torch.autograd.grad(loss_obj, real_samples).deatch().cpu(), nrow=4) )
                #     self.writer.add_image("D/fake_loss_obj_grad", make_grid( torch.autograd.grad(loss_obj, fake_samples).deatch().cpu(), nrow=4) )
                # except:
                #     self.logger.info("add image error")
        
        return (obj_feat_rearrage_real, bbox_rearrage_real, labels_rearrage_real), \
                (obj_feat_rearrage_fake, bbox_rearrage_fake, labels_rearrage_fake), self.desc_loss
        # # update D network
        # netD.zero_grad()
        # d_out_real, d_out_robj = netD(real_images, bbox, label)
        # d_loss_real = F.relu(1.0 - d_out_real).mean()
        # d_loss_robj = F.relu(1.0 - d_out_robj).mean()

        # z = torch.randn(real_images.size(0), bbox.size(1), z_dim)
        # if args.GTmasks:
        #     fake_images = netG(z, bbox, y=label.squeeze(dim=-1), input_mask=GTmasks.float())
        # else:
        #     fake_images = netG(z, bbox, y=label.squeeze(dim=-1))
        # d_out_fake, d_out_fobj = netD(fake_images.detach(), bbox, label)
        # d_loss_fake = F.relu(1.0 + d_out_fake).mean()
        # d_loss_fobj = F.relu(1.0 + d_out_fobj).mean()

        # # loss_object竟然是直接参与对抗训练而不是直接CE吗，有没有

        # d_loss = lamb_obj * (d_loss_robj + d_loss_fobj) + lamb_img * (d_loss_real + d_loss_fake)
        # self.optimizer.zero_grad()
        # d_loss.backward()
        # self.optimizer.step()


class ImageGeneratorTrainer(Trainer):
    def __init__(self,
                 generator:ResnetGenerator,
                 causalgraph:CategoryCausalGraph, 
                 discriminator:ImageDiscriminatorTrainer,
                 ar_trainer: ARTrainer,
                 lr:float = 1e-4,
                 lamb_obj = 1.0, lamb_img = 0.1, lambda_ar_loss = 0.01,
                 total_iter = None, 
                 device: torch.device=torch.device('cpu'),
                 logger = None,
                 writer: torch.utils.tensorboard.writer.SummaryWriter = None) -> None:        
        super().__init__(device, logger, writer)

        self.generator = generator.to(device)
        self.W = causalgraph
        if self.W is not None:
            self.W = self.W.to(device)
        self.discriminator = discriminator.to(device)
        self.ar_trainer = ar_trainer
        if self.W is not None:
            parameters = itertools.chain(self.generator.parameters(), self.W.parameters())
        else:
            parameters = self.generator.parameters()
        self.optimizer = optim.Adam(parameters, lr=lr, betas=(0., 0.999))
        self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, [total_iter*1.], gamma=0.1,)
        self.lamb_obj = lamb_obj
        self.lamb_img = lamb_img
        self.lambda_ar_loss = lambda_ar_loss
        self.diff_aug = DiffAug(total_iter,)
        
        self.lambda_A = 1.
        self.c_A = 1
        self.lambda_sparse = 1e-4 
        self.h_A_old = 0.

    def sample(self, W_selected, batch_bboxes=None, batch_labels=None, random_trunc=False, no_grad=False):
        # fake_samples, mask, raw_mask, adjust = self.generator(batch_bboxes.to(self.device), batch_labels.to(self.device), \
            # self.W, return_mask=True, random_trunc=random_trunc)
        z = torch.randn(batch_bboxes.size(0), batch_bboxes.size(1), self.generator.module.z_dim, device=self.device)
        # z, bbox, z_im=None, y=None, return_mask=False, input_mask=None
        context = torch.no_grad() if no_grad else torch.enable_grad()
        with context:
            fake_samples, mask, raw_mask, adjust = self.generator(W_selected, z, 
                batch_bboxes.to(self.device), None, batch_labels.to(self.device), return_mask=True)
        return fake_samples, mask, raw_mask, adjust

    def step_train(self, real_samples, batch_bboxes=None, batch_labels=None, W_selected_D=None, train=True):
        W_selected = self.W(batch_bboxes, batch_labels)
        out_G = self.sample(W_selected, batch_bboxes, batch_labels)
        fake_samples, mask, raw_mask, adjust = out_G
        if not train:
            return fake_samples, mask, raw_mask, adjust, W_selected.detach()

        fake_samples_aug = self.diff_aug(fake_samples)
        
        out_fake = self.discriminator(fake_samples_aug, batch_labels.to(self.device), batch_bboxes.to(self.device))
        out_img_fake, out_obj_TF_fake, out_obj_cls_fake, out_fake_rearrage = out_fake
        if out_fake_rearrage is None:
            out_fake_rearrage = [None,] * 3
        obj_feat_rearrage_fake, bbox_rearrage_fake, labels_rearrage_fake = out_fake_rearrage
        g_loss_img_fake = HingeLoss.train_g(out_img_fake)
        g_loss_obj_TF_fake = HingeLoss.train_g(out_obj_TF_fake)
        # out_obj_cls_fake = out_obj_cls_fake[:, labels_rearrage_fake.flatten().to(self.device)]
        g_loss_obj_cls_fake = HingeLoss.train_g(out_obj_cls_fake)
        # out_obj_cls_fake = F.cross_entropy(out_obj_cls_fake, labels_rearrage_fake.flatten().to(self.device))
        g_loss_obj_fake = g_loss_obj_TF_fake + g_loss_obj_cls_fake
        g_loss = self.lamb_img * g_loss_img_fake + self.lamb_obj * g_loss_obj_fake

        # optimizing causal graph self.W
        # DAG
        h_A = regular_DAG(W_selected, batch_bboxes.size(1)) # b o o
        # the aug lag term of DAG
        aug_lag_term = h_A.pow(2)
        # sparsity
        sparse_W = F.softshrink(W_selected.detach(), self.lambda_sparse).detach()
        W_squared_dist = torch.dist(W_selected, sparse_W).pow(2)
        # match W_selected_D
        W_selected_abs = W_selected.abs()
        normalized_W_selected_G_abs = W_selected_abs.div(W_selected_abs.sum(2, keepdim=True) + 1e-8)
        normalized_W_selected_D_abs = W_selected_D.abs().div(W_selected_D.abs().sum(2, keepdim=True) + 1e-8)
        matching_loss = torch.dist(normalized_W_selected_G_abs, normalized_W_selected_D_abs.detach())
        
        cg_loss = self.lambda_A * h_A + 0.5 * self.c_A * aug_lag_term + W_squared_dist + matching_loss
        
        if h_A.cpu().item() > 1 * self.h_A_old and self.c_A < 1e10: # DAG admm rho , #! weak regularization, was 0.25 * h_A_old
            self.c_A *= 1.0001 # ! here regularization weaker, was 10
        self.h_A_old = h_A.cpu().item()
        self.lambda_A += self.c_A * h_A.cpu().item()  # ADMM DAG的
            
        loss = g_loss + cg_loss
        # because W, cannot train in DDP mode
        # ar_losses = self.ar_trainer.loss(obj_feat_rearrage_fake, \
        #                                 bbox_rearrage_fake, labels_rearrage_fake.to(self.device))
        # g_ar_loss = ar_losses[0]
        
        # loss = g_loss + g_ar_loss * self.lambda_ar_loss
        
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        self.scheduler.step()
        out_fake = torch.cat([out_img_fake.flatten(), out_obj_TF_fake.flatten(), out_obj_cls_fake.flatten()], dim=0)
        self.diff_aug.step_fake( out_fake )
        
        if self.writer is not None :
            if self.writer.interval(50):
                self.writer.add_scalar("G/total_loss", loss.item(), self.writer.global_step)
                self.writer.add_scalar("G/g_loss", g_loss.item(), self.writer.global_step)
                # self.writer.add_scalar("G/g_ar_loss", g_ar_loss.item(), self.writer.global_step)
                
                self.writer.add_scalar("G/g_loss_img_fake",
                    g_loss_img_fake.item(), self.writer.global_step)
                self.writer.add_scalar("G/g_loss_obj_TF",
                    g_loss_obj_TF_fake.item(), self.writer.global_step)
                self.writer.add_scalar("G/g_loss_obj_cls",
                    g_loss_obj_cls_fake.item(), self.writer.global_step)
                self.writer.add_scalar("G/g_loss_obj_realfake",
                    g_loss_obj_fake.item(), self.writer.global_step)
                
                self.writer.add_scalar("G/g_out_img",
                    out_img_fake.mean().item(), self.writer.global_step)
                self.writer.add_scalar("G/g_out_obj_TF",
                    out_obj_TF_fake.mean().item(), self.writer.global_step)
                self.writer.add_scalar("G/g_out_obj_cls",
                    out_obj_cls_fake.mean().item(), self.writer.global_step)
                
                self.writer.add_scalars('G/alpha', keyword_dict(self.generator, 'alpha'), self.writer.global_step)
                self.writer.add_scalars('G/rho', keyword_dict(self.generator, 'rho'), self.writer.global_step)
                self.writer.add_scalars("G/mask_adjust_alpha", \
                            keyword_dict(self.generator, "mask_adjust_alpha"), self.writer.global_step)
                self.writer.add_scalar('G/learning rate', self.scheduler.get_last_lr()[0], self.writer.global_step)
                self.writer.add_scalar('G/diff_aug_prob', self.diff_aug.this_p, self.writer.global_step)
                
                l1_loss = W_selected.abs().sum([1,2]).mean().detach()
                self.writer.add_scalar('G/W_DAG', h_A.detach(), self.writer.global_step)
                self.writer.add_scalar("G/W_aug_DAG_term", aug_lag_term.item(), self.writer.global_step)
                self.writer.add_scalar('G/W_l1', l1_loss, self.writer.global_step)
                self.writer.add_scalar('G/match_W_D', matching_loss, self.writer.global_step)

                self.writer.add_scalar("G/c_A",       self.c_A, self.writer.global_step)
                self.writer.add_scalar("G/lambda_A",  self.lambda_A, self.writer.global_step)
                self.writer.add_scalar("G/lambda_sparse",  self.lambda_sparse, self.writer.global_step)
                
            if self.writer.interval(1001):
                # print("G trainer 1001")
                num_o = batch_bboxes.size(1)
                img_size = fake_samples.size(2)
                fake_samples = fake_samples.detach().cpu()
                
                self.writer.add_image("images/fake_images", make_grid(fake_samples * 0.5 + 0.5, nrow=4), self.writer.global_step)
                crop_size = img_size//2
                num_ = 4
                false_crops = crop_images(fake_samples[:num_,:,:,:], batch_bboxes[:num_,:,:], crop_size).view(num_*num_o, 3, crop_size, crop_size)
                self.writer.add_image("images/false_crops", make_grid(false_crops * 0.5 + 0.5, nrow=4), self.writer.global_step)

            if self.writer.interval(1002):
                num_o = batch_bboxes.size(1)
                img_size = fake_samples.size(2)
                raw_mask = raw_mask.detach().cpu()
                mask = mask.detach().cpu()
                adjust = adjust.detach().cpu()
                self.writer.add_image("images/raw_masks", make_grid(raw_mask[:4,:,:,:].view(4*num_o, 1, img_size, img_size), nrow=4), self.writer.global_step)
                self.writer.add_image("images/masks" ,    make_grid(mask[:4,:,:,:].view(    4*num_o, 1, img_size, img_size), nrow=4), self.writer.global_step)
                self.writer.add_image("images/adjust",    make_grid(adjust[:4,:,:,:].view(  4*num_o, 1, img_size, img_size), nrow=4), self.writer.global_step)

                try:
                    colors = colormap(num_ = self.generator.num_classes, as_array=True)
                except:
                    colors = colormap(num_ = self.generator.module.num_classes, as_array=True)
                labels = batch_labels.squeeze().tolist()
                self.writer.add_images("images/raw_masks_color", torch.stack([torchvision.transforms.ToTensor()(draw_mask(r, l, colors, False)) for r, l in zip(raw_mask, labels)], dim=0), self.writer.global_step)
                self.writer.add_images("images/masks_color", torch.stack([torchvision.transforms.ToTensor()(draw_mask(m, l, colors, False)) for m, l in zip(mask, labels)], dim=0), self.writer.global_step)
                
                    # color histogram
                # for iii in range(3):
                #     name = ['red', 'green', 'blue'][iii]
                #     writer.add_histogram(f'images/real_images_{name}', real_samples[:,iii,:,:], ii)
                #     writer.add_histogram(f'images/fake_images_{name}', fake_samples[:,iii,:,:], ii)

                    # flat_real = real_samples[:,iii,:,:].mul(255).int().flatten().numpy()
                    # fig, ax = plt.subplots()
                    # plt.hist(flat_real, bins=np.arange(256), range=[0, 255], density=True, figure=fig)
                    # # hist_real, _ = np.histogram(flat_real, bins=np.arange(256), density=True)
                    # # ax.bar(np.arange(0, 255), hist_real)
                    # # plt.ylim(bottom=0., top=1.)
                    # writer.add_figure(f"hist/real_image_{name}_hist", fig, ii)

                    # flat_fake = fake_samples[:,iii,:,:].mul(256).floor().flatten().numpy()
                    # fig, ax = plt.subplots()
                    # plt.hist(flat_fake, bins=np.arange(256), range=[0, 255], density=True, figure=fig)
                    # # hist_fake, _ = np.histogram(flat_fake, bins=np.arange(256), density=True)
                    # # ax.bar(np.arange(0, 255), hist_fake)
                    # # plt.ylim(bottom=0., top=1.)
                    # writer.add_figure(f"hist/fake_image_{name}_hist", fig, ii)
            

            if self.writer.interval(1003):
                raw_mask_cpu = raw_mask.detach().cpu()
                mask_cpu = mask.detach().cpu()
                num_o = mask.size(1)
                if mask_cpu.isnan().any():
                    print("mask nan")
                    print([m.isnan().any() for m in mask_cpu])
                if raw_mask_cpu.isnan().any():
                    print("raw_mask nan")
                    print([m.isnan().any() for m in raw_mask_cpu])
                mask_np = mask_cpu + 1e-8
                mask_np  = mask_np.div( mask_np.sum(dim=1, keepdim=True) ).numpy() # b o w h
                raw_mask_np = raw_mask_cpu + 1e-8
                raw_mask_np  = raw_mask_np.div( raw_mask_np.sum(dim=1, keepdim=True) ).numpy() # b o w h

                mask_entropy = np.mean( entropy( np.transpose(mask_np, [1,0,2,3]).reshape(num_o, -1) + 1e-8 ) )
                raw_mask_entropy = np.mean( entropy( np.transpose(raw_mask_np, [1,0,2,3]).reshape(num_o, -1) + 1e-8 ) )
                
                self.writer.add_scalars('G/mask_entropy', 
                            {
                                "mask_entropy": mask_entropy,
                                "raw_mask_entropy": raw_mask_entropy,
                                "gap": raw_mask_entropy - mask_entropy
                            }, self.writer.global_step)
                          
            if self.writer.interval(1004):
                adjust_cpu = adjust.detach().cpu() # b o w h
                self.writer.add_scalars('G/adjust',
                            {
                                "mean": adjust_cpu.mean().item(),
                                "std":  adjust_cpu.std([1,2,3]).mean().item(),
                                "max":  adjust_cpu.max(1)[0].mean().item(),
                                "min":  adjust_cpu.min(1)[0].mean().item(),
                            }, self.writer.global_step)
                self.writer.add_histogram('G/adjust', adjust_cpu, self.writer.global_step)

            if self.writer.interval(10_010):
                write_weights_grad(self.writer, self.generator, 'G', self.writer.global_step)
                
        return fake_samples, mask, raw_mask, adjust, W_selected.detach()


class ObjectClassifierTrainer(Trainer):
    # ! unused
    def __init__(self,
                 model,
                 lr:float = 1e-3,
                 device: torch.device=torch.device('cpu'),
                 logger = None,
                 writer: torch.utils.tensorboard.writer.SummaryWriter = None) -> None:
        super().__init__(device, logger, writer)

        self.obj_cls = model.to(device)

        self.optimizer = optim.RMSprop(self.obj_cls.parameters(), lr, 0.99, 1e-3, momentum=0.9)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer, 1_0000, )
        self.amp_scale = torch.cuda.amp.GradScaler()
    
    def step_train(self, batch_samples, batch_labels):
        if batch_samples.ndim == 5:
            b, o, num_color, w, h = batch_samples.size()
            bo = b * o
            batch_samples = batch_samples.view(bo, num_color, w, h)
            batch_labels = batch_labels.view(bo)
        elif batch_samples.ndim == 4:
            bo, num_color, w, h = batch_samples.size()
        else:
            raise Exception("should not be here")

        batch_samples = batch_samples.to(self.device)
        batch_labels  = batch_labels.to(self.device)

        self.optimizer.zero_grad()
        if str(self.device) == "cpu":
            pred, feat = self.obj_cls(batch_samples)
            loss = F.cross_entropy(pred, batch_labels)
            loss.backward()
            self.optimizer.step()
        else:
            with torch.cuda.amp.autocast():
                pred, feat = self.obj_cls(batch_samples) # bo num_classes, bo dim

                logprobs = F.log_softmax(pred, dim=-1)
                nll_loss = -logprobs.gather(dim=-1, index=batch_labels.unsqueeze(1))
                nll_loss = nll_loss.squeeze(1)
                smooth_loss = -logprobs.mean(dim=-1)
                loss = 0.9 * nll_loss + 0.1 * smooth_loss
                # loss = F.cross_entropy(pred, batch_labels)
            self.optimizer.zero_grad()
            self.amp_scale.scale(loss).backward()
            self.amp_scale.step(self.optimizer)
            self.amp_scale.update()
        self.scheduler.step()

        if batch_samples.ndim == 5:
            feat = feat.view(b, o, -1)

        # if dist.is_initialized():
        #     torch.cuda.synchronize()

        if self.writer is not None :
            if self.writer.interval(20):
                self.writer.add_scalar("ObjCls/total_loss", loss.item(), self.writer.global_step)
                self.writer.add_scalar('ObjCls/acc', pred.max(dim=1)[1].eq(batch_labels).sum().div(bo), self.writer.global_step)
                self.writer.add_scalar('ObjCls/learning rate', self.scheduler.get_last_lr()[0], self.writer.global_step)

            if self.writer.interval(100):
                cm = confusion_matrix(batch_labels, pred.softmax(dim=-1).detach().cpu().numpy())
                fig = plt.figure()
                ax = fig.add_subplot(111)
                sns.heatmap(cm, cmap=None, annot=True, ax = ax)
                self.writer.add_figure('ObjCls/confusion_matrix', fig, self.writer.global_step)

            if self.writer.interval(10_000):

                write_weights_grad(self.writer, self.obj_cls, 'ObjCls', self.writer.global_step)
                try:
                    batch_samples.requires_grad_()
                    self.writer.add_image("ObjCls/grad", make_grid( torch.autograd.grad(loss, batch_samples).detach().cpu(), nrow=4) )
                except:
                    pass
        
        return feat



def R1(prediction_real: torch.Tensor, real_sample: torch.Tensor) -> torch.Tensor:
    """
    Forward pass to compute the regularization
    :param prediction_real: (torch.Tensor) Prediction of the discriminator for a batch of real images
    :param real_sample: (torch.Tensor) Batch of the corresponding real images
    :return: (torch.Tensor) Loss value
    """
    # Calc gradient
    grad_real = torch.autograd.grad(outputs=prediction_real.sum(), inputs=real_sample, create_graph=True)[0]
    # grad_outputs=torch.ones(output.size(), device=images.device),
    #                    create_graph=True, retain_graph=True, only_inputs=True)
    
    # Calc regularization
    regularization_loss: torch.Tensor = 20 * grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
    return regularization_loss


# https://github.com/google/lecam-gan/blob/master/stylegan2/lecam_loss.py
class LECAM:
    def __init__(self, init = 0.1, decay=0.99, weight=0.2):
        super().__init__()
        self.ema_real = +init
        self.ema_fake = -init
        self.decay = decay
        self.weight = weight

    def lecamreg(self, D_real, D_fake):
        """Compute the lecam regularization loss.

        Args:
            D_real: discriminator predictions of real images.
            D_fake: discriminator predictions of fake images.
        Returns:
            D_lecam_loss: lecam regularization loss.
        """

        # Historical statics of the descriminator predictions
        real_scores_mean = D_real.mean().detach().item()
        fake_scores_mean = D_fake.mean().detach().item()
        
        self.ema_real = self.decay * self.ema_real + (1 - self.decay) * real_scores_mean
        self.ema_fake = self.decay * self.ema_fake + (1 - self.decay) * fake_scores_mean
        
        D_lecam_real = F.relu(D_real - self.ema_real).pow(2).mean()
        D_lecam_fake = F.relu(self.ema_fake - D_fake).pow(2).mean()
        D_lecam_loss = D_lecam_real + D_lecam_fake
        
        return D_lecam_loss * self.weight

        """ 
        import torch
        from torch.nn import functional as F
        l = LECAM()
        D_real = torch.randn(200)
        D_fake = -torch.randn(200)
        loss = l.lecamreg(D_real, D_fake)
        """


# https://github.com/EndlessSora/DeceiveD
class APA:
    def __init__(self, init=0.):
        super().__init__()
        self.p = init
        self.t = 0.6
        self.max_p = 0.8
        self.apa_interval = 1e-4
        self.count = 0
        self.update_interval = 4
        
    def step(self, out_real):
        self.count += 1
        if self.count % self.update_interval == 0:
            adjust = torch.sign(out_real - self.t).mean().item() * self.apa_interval
            self.p = min( max(self.p + adjust, 0.), self.max_p )
        
    def step_fake(self, out_fake):
        self.count += 1
        if self.count % self.update_interval == 0:
            adjust = torch.sign(- out_fake - self.t).mean().item() * self.apa_interval
            self.p = min( max(self.p + adjust, 0.), self.max_p )

    def adaptive_pseudo_augmentation(self, real_img, pseudo_data):
        # Apply Adaptive Pseudo Augmentation (APA)
        batch_size = real_img.shape[0]
        device = real_img.device
        pseudo_flag = torch.ones([batch_size, 1, 1, 1], device=device)
        pseudo_flag = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.p,
                                  pseudo_flag, torch.zeros_like(pseudo_flag))
        if torch.allclose(pseudo_flag, torch.zeros_like(pseudo_flag)):
            return real_img
        else:
            return pseudo_data.to(device) * pseudo_flag + real_img * (1 - pseudo_flag)
        

"""
import torch
from torch.nn import functional as F

out_real = torch.randn(4)
a = APA()
a.step(out_real)
a.step(out_real)
a.step(out_real)
a.step(out_real)
a.step(out_real)
a.step(out_real)
a.step(out_real)
print(a.p)
a.p = 0.5
real_img = torch.randn(4, 3, 64, 64)
pseudo_data = torch.randn(4, 3, 64, 64)
r = a.adaptive_pseudo_augmentation(real_img, pseudo_data)
r.size()
"""