from utils.networks import AdaINGen, Decoder, MsImageDis, VAEGen, ContentEncoder, StyleEncoder, MLP, ContentEncoder_condconv
from torch.autograd import Variable
import torch
import torch.nn as nn
import os
from torch.optim import lr_scheduler
import torch.nn.init as init
import math
import segmentation_models_pytorch as smp


def get_num_adain_params(model):
    # return the number of AdaIN parameters needed by the model
    num_adain_params = 0
    for m in model.modules():
        if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
            num_adain_params += 2*m.num_features
    return num_adain_params


def assign_adain_params(adain_params, model):
    # assign the adain_params to the AdaIN layers in model
    for m in model.modules():
        if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
            mean = adain_params[:, :m.num_features]
            std = adain_params[:, m.num_features:2*m.num_features]
            m.bias = mean.contiguous().view(-1)
            m.weight = std.contiguous().view(-1)
            if adain_params.size(1) > 2*m.num_features:
                adain_params = adain_params[:, 2*m.num_features:]


def decode(dec, mlp, content, style):
    # decode content and style codes to an image
    adain_params = mlp(style)
    assign_adain_params(adain_params, dec)
    images = dec(content)
    return images
# def weights_init(init_type='gaussian'):
#     def init_fun(m):
#         classname = m.__class__.__name__
#         if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
#             # print m.__class__.__name__
#             if init_type == 'gaussian':
#                 init.normal_(m.weight.data, 0.0, 0.02)
#             elif init_type == 'xavier':
#                 init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
#             elif init_type == 'kaiming':
#                 init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
#             elif init_type == 'orthogonal':
#                 init.orthogonal_(m.weight.data, gain=math.sqrt(2))
#             elif init_type == 'default':
#                 pass
#             else:
#                 assert 0, "Unsupported initialization: {}".format(init_type)
#             if hasattr(m, 'bias') and m.bias is not None:
#                 init.constant_(m.bias.data, 0.0)

#     return init_fun


class content_Trainer(nn.Module):
    def __init__(self):
        super(content_Trainer, self).__init__()
        self.content_encoder = ContentEncoder(
            2, 4, 4, 64, 'in', 'relu', pad_type="zero")
        self.s1_encoder = ContentEncoder(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s2_encoder = ContentEncoder(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s3_encoder = ContentEncoder(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s4_encoder = ContentEncoder(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")

        # adain->in
        self.s1_decoder = Decoder(
            2, 4, 512, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s2_decoder = Decoder(
            2, 4, 512, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s3_decoder = Decoder(
            2, 4, 512, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s4_decoder = Decoder(
            2, 4, 512, 1, res_norm='in', activ="relu", pad_type="zero")
        dis_para = {"dim": 64, "norm": "none", "activ": "lrelu", "n_layer": 4,
                    "gan_type": 'lsgan', 'num_scales': 3, "pad_type": 'reflect'}
        self.dis1 = MsImageDis(1, dis_para)
        self.dis2 = MsImageDis(1, dis_para)
        self.dis3 = MsImageDis(1, dis_para)
        self.dis4 = MsImageDis(1, dis_para)
        gen_params = list(self.content_encoder.parameters())+list(self.s1_encoder.parameters())+list(self.s2_encoder.parameters())+list(self.s3_encoder.parameters())+list(
            self.s4_encoder.parameters())+list(self.s1_decoder.parameters())+list(self.s2_decoder.parameters())+list(self.s3_decoder.parameters())+list(self.s4_decoder.parameters())
        dis_params = list(self.dis1.parameters()) + list(self.dis2.parameters()) + \
            list(self.dis3.parameters()) + list(self.dis4.parameters())
        self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.dis_scheduler = lr_scheduler.StepLR(self.dis_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        self.gen_scheduler = lr_scheduler.StepLR(self.gen_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        # self.apply(weights_init('kaiming'))
        # self.dis1.apply(weights_init('gaussian'))
        # self.dis2.apply(weights_init('gaussian'))
        # self.dis3.apply(weights_init('gaussian'))
        # self.dis4.apply(weights_init('gaussian'))

    def gen_update(self, x1, x2, x3, x4):
        self.gen_opt.zero_grad()
        content = self.content_encoder(torch.cat([x1, x2, x3, x4], dim=1))
        s1 = self.s1_encoder(x1)
        s2 = self.s2_encoder(x2)
        s3 = self.s3_encoder(x3)
        s4 = self.s4_encoder(x4)
        x1_recon = self.s1_decoder(torch.cat([content, s1], dim=1))
        x2_recon = self.s2_decoder(torch.cat([content, s2], dim=1))
        x3_recon = self.s3_decoder(torch.cat([content, s3], dim=1))
        x4_recon = self.s4_decoder(torch.cat([content, s4], dim=1))

        self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
        self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
        self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
        self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

        s1_recon = self.s1_encoder(x1_recon)
        s2_recon = self.s2_encoder(x2_recon)
        s3_recon = self.s3_encoder(x3_recon)
        s4_recon = self.s4_encoder(x4_recon)
        content_recon = self.content_encoder(
            torch.cat([x1_recon, x2_recon, x3_recon, x4_recon], dim=1))

        self.loss_cyc1 = self.recon_criterion(s1, s1_recon)
        self.loss_cyc2 = self.recon_criterion(s2, s2_recon)
        self.loss_cyc3 = self.recon_criterion(s3, s3_recon)
        self.loss_cyc4 = self.recon_criterion(s4, s4_recon)
        self.loss_content_cyc = self.recon_criterion(content, content_recon)

        self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
        self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
        self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
        self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

        adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
            self.loss_gen_adv3+self.loss_gen_adv4
        recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
                         self.loss_gen_recon3+self.loss_gen_recon4)
        cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
            self.loss_cyc3+self.loss_cyc4+self.loss_content_cyc
        self.loss_gen_total = adv_loss+recon_loss+cyc_loss
        self.loss_gen_total.backward()
        self.gen_opt.step()
        return adv_loss.item(), recon_loss.item(), cyc_loss.item()

    def dis_update(self, x1, x2, x3, x4):
        self.dis_opt.zero_grad()
        content = self.content_encoder(torch.cat([x1, x2, x3, x4], dim=1))
        s1 = self.s1_encoder(x1)
        s2 = self.s2_encoder(x2)
        s3 = self.s3_encoder(x3)
        s4 = self.s4_encoder(x4)
        x1_recon = self.s1_decoder(torch.cat([content, s1], dim=1))
        x2_recon = self.s2_decoder(torch.cat([content, s2], dim=1))
        x3_recon = self.s3_decoder(torch.cat([content, s3], dim=1))
        x4_recon = self.s4_decoder(torch.cat([content, s4], dim=1))
        # D loss
        self.loss_dis_1 = self.dis1.calc_dis_loss(x1_recon.detach(), x1)
        self.loss_dis_2 = self.dis2.calc_dis_loss(x2_recon.detach(), x2)
        self.loss_dis_3 = self.dis3.calc_dis_loss(x3_recon.detach(), x3)
        self.loss_dis_4 = self.dis4.calc_dis_loss(x4_recon.detach(), x4)

        self.loss_dis_total = self.loss_dis_1 + \
            self.loss_dis_2+self.loss_dis_3+self.loss_dis_4
        self.loss_dis_total.backward()
        self.dis_opt.step()
        return self.loss_dis_total.item()

    def sample(self, x1, x2, x3, x4):
        self.eval()
        x1_recon, x2_recon, x3_recon, x4_recon = [], [], [], []
        # for i in range(x1a.size(0)):
        #     x1,x2,x3,x4=x1a[i].unsqueeze(0),x2a.unsqueeze(0),x3a.unsqueeze(0),x4a.unsqueeze(0)
        #     content = self.content_encoder(torch.cat([x1, x2, x3, x4],dim=1))
        #     s1 = self.s1_encoder(x1)
        #     s2 = self.s2_encoder(x2)
        #     s3 = self.s3_encoder(x3)
        #     s4 = self.s4_encoder(x4)
        #     x1_recon.append(self.s1_decoder(torch.cat([content, s1],dim=1)))
        #     x2_recon.append(self.s2_decoder(torch.cat([content, s2],dim=1)))
        #     x3_recon.append(self.s3_decoder(torch.cat([content, s3],dim=1)))
        #     x4_recon.append(self.s4_decoder(torch.cat([content, s4],dim=1)))
        content = self.content_encoder(torch.cat([x1, x2, x3, x4], dim=1))
        s1 = self.s1_encoder(x1)
        s2 = self.s2_encoder(x2)
        s3 = self.s3_encoder(x3)
        s4 = self.s4_encoder(x4)
        x1_recon = self.s1_decoder(torch.cat([content, s1], dim=1))
        x2_recon = self.s2_decoder(torch.cat([content, s2], dim=1))
        x3_recon = self.s3_decoder(torch.cat([content, s3], dim=1))
        x4_recon = self.s4_decoder(torch.cat([content, s4], dim=1))
        # x1_recon, x2_recon = torch.cat(x1_recon), torch.cat(x2_recon)
        # x3_recon, x4_recon = torch.cat(x3_recon), torch.cat(x4_recon)
        self.train()
        return x1[0], x1_recon[0], x2[0], x2_recon[0], x3[0], x3_recon[0], x4[0], x4_recon[0]

    def recon_criterion(self, input, target):
        return torch.mean(torch.abs(input - target))

    def update_learning_rate(self):
        if self.dis_scheduler is not None:
            self.dis_scheduler.step()
        if self.gen_scheduler is not None:
            self.gen_scheduler.step()


# style latent code trainer
class stylecode_Trainer(nn.Module):
    def __init__(self):
        super(stylecode_Trainer, self).__init__()
        # gen_para = {"dim": 64, "mlp_dim": 256, "style_dim": 8,
        #             "activ": 'relu', 'n_downsample': 2, 'n_res': 4, 'pad_type': 'zero'}
        self.content_encoder = ContentEncoder(
            2, 4, 4, 64, 'in', 'relu', pad_type="zero")
        # self.gen1 = AdaINGen(1, gen_para)  # auto-encoder for domain a
        # self.gen2 = AdaINGen(1, gen_para)
        # self.gen3 = AdaINGen(1, gen_para)
        # self.gen4 = AdaINGen(1, gen_para)
        self.s1_encoder = StyleEncoder(
            4, 1, 64, 8, norm='none', activ='relu', pad_type="zero")
        self.s2_encoder = StyleEncoder(
            4, 1, 64, 8, norm='none', activ='relu', pad_type="zero")
        self.s3_encoder = StyleEncoder(
            4, 1, 64, 8, norm='none', activ='relu', pad_type="zero")
        self.s4_encoder = StyleEncoder(
            4, 1, 64, 8, norm='none', activ='relu', pad_type="zero")
        # adain->in
        self.s1_decoder = Decoder(
            2, 4, 256, 1, res_norm='adain', activ="relu", pad_type="zero")
        self.s2_decoder = Decoder(
            2, 4, 256, 1, res_norm='adain', activ="relu", pad_type="zero")
        self.s3_decoder = Decoder(
            2, 4, 256, 1, res_norm='adain', activ="relu", pad_type="zero")
        self.s4_decoder = Decoder(
            2, 4, 256, 1, res_norm='adain', activ="relu", pad_type="zero")
        dis_para = {"dim": 64, "norm": "none", "activ": "lrelu", "n_layer": 4,
                    "gan_type": 'nsgan', 'num_scales': 3, "pad_type": 'reflect'}
        self.dis1 = MsImageDis(1, dis_para)
        self.dis2 = MsImageDis(1, dis_para)
        self.dis3 = MsImageDis(1, dis_para)
        self.dis4 = MsImageDis(1, dis_para)
        self.mlp1 = MLP(8, get_num_adain_params(
            self.s1_decoder), 256, 3, norm='none')
        self.mlp2 = MLP(8, get_num_adain_params(
            self.s2_decoder), 256, 3, norm='none')
        self.mlp3 = MLP(8, get_num_adain_params(
            self.s3_decoder), 256, 3, norm='none')
        self.mlp4 = MLP(8, get_num_adain_params(
            self.s4_decoder), 256, 3, norm='none')

        gen_params = list(self.content_encoder.parameters())+list(self.s1_encoder.parameters())+list(self.s2_encoder.parameters())+list(self.s3_encoder.parameters())+list(
            self.s4_encoder.parameters())+list(self.s1_decoder.parameters())+list(self.s2_decoder.parameters())+list(self.s3_decoder.parameters())+list(self.s4_decoder.parameters())\
            + list(self.mlp1.parameters())+list(self.mlp2.parameters()) + \
            list(self.mlp3.parameters())+list(self.mlp4.parameters())
        dis_params = list(self.dis1.parameters()) + list(self.dis2.parameters()) + \
            list(self.dis3.parameters()) + list(self.dis4.parameters())
        self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.dis_scheduler = lr_scheduler.StepLR(self.dis_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        self.gen_scheduler = lr_scheduler.StepLR(self.gen_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        self.s1 = torch.randn(1, 8, 1, 1).cuda()
        self.s2 = torch.randn(1, 8, 1, 1).cuda()
        self.s3 = torch.randn(1, 8, 1, 1).cuda()
        self.s4 = torch.randn(1, 8, 1, 1).cuda()
        # self.apply(weights_init('kaiming'))
        # self.dis1.apply(weights_init('gaussian'))
        # self.dis2.apply(weights_init('gaussian'))
        # self.dis3.apply(weights_init('gaussian'))
        # self.dis4.apply(weights_init('gaussian'))

    def gen_update(self, x1, x2, x3, x4, label):
        self.gen_opt.zero_grad()
        s1_random = Variable(torch.randn(
            x1.size(0), 8, 1, 1).cuda())
        s2_random = Variable(torch.randn(
            x2.size(0), 8, 1, 1).cuda())
        s3_random = Variable(torch.randn(
            x3.size(0), 8, 1, 1).cuda())
        s4_random = Variable(torch.randn(
            x4.size(0), 8, 1, 1).cuda())
        if label[0] == 1:
            x1_0 = torch.zeros_like(x1)
        else:
            x1_0 = x1
        if label[1] == 1:
            x2_0 = torch.zeros_like(x2)
        else:
            x2_0 = x2
        if label[2] == 1:
            x3_0 = torch.zeros_like(x3)
        else:
            x3_0 = x3
        if label[3] == 1:
            x4_0 = torch.zeros_like(x4)
        else:
            x4_0 = x4
        s1_prime = self.s1_encoder(x1_0)
        s2_prime = self.s1_encoder(x2_0)
        s3_prime = self.s1_encoder(x3_0)
        s4_prime = self.s1_encoder(x4_0)

        content = self.content_encoder(
            torch.cat([x1_0, x2_0, x3_0, x4_0], dim=1))

        x1_recon = decode(self.s1_decoder, self.mlp1, content, s1_prime)
        x2_recon = decode(self.s2_decoder, self.mlp2, content, s2_prime)
        x3_recon = decode(self.s3_decoder, self.mlp3, content, s3_prime)
        x4_recon = decode(self.s4_decoder, self.mlp4, content, s4_prime)

        x1_recon_random = decode(
            self.s1_decoder, self.mlp1, content, s1_random)
        x2_recon_random = decode(
            self.s2_decoder, self.mlp2, content, s2_random)
        x3_recon_random = decode(
            self.s3_decoder, self.mlp3, content, s3_random)
        x4_recon_random = decode(
            self.s4_decoder, self.mlp4, content, s4_random)

        self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
        self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
        self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
        self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

        self.loss_gen_recon1_random = self.recon_criterion(x1, x1_recon_random)
        self.loss_gen_recon2_random = self.recon_criterion(x2, x2_recon_random)
        self.loss_gen_recon3_random = self.recon_criterion(x3, x3_recon_random)
        self.loss_gen_recon4_random = self.recon_criterion(x4, x4_recon_random)

        s1_recon_random = self.s1_encoder(x1_recon_random)
        s2_recon_random = self.s2_encoder(x2_recon_random)
        s3_recon_random = self.s3_encoder(x3_recon_random)
        s4_recon_random = self.s4_encoder(x4_recon_random)
        content_recon = self.content_encoder(
            torch.cat([x1_recon, x2_recon, x3_recon, x4_recon], dim=1))

        self.loss_cyc1_random = self.recon_criterion(
            s1_random, s1_recon_random)
        self.loss_cyc2_random = self.recon_criterion(
            s2_random, s2_recon_random)
        self.loss_cyc3_random = self.recon_criterion(
            s3_random, s3_recon_random)
        self.loss_cyc4_random = self.recon_criterion(
            s4_random, s4_recon_random)
        self.loss_content_cyc = self.recon_criterion(content, content_recon)

        self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon_random)
        self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon_random)
        self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon_random)
        self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon_random)

        adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
            self.loss_gen_adv3+self.loss_gen_adv4
        recon_loss = 20*(self.loss_gen_recon1+self.loss_gen_recon2 +
                         self.loss_gen_recon3+self.loss_gen_recon4) +\
            10*(self.loss_gen_recon1_random+self.loss_gen_recon2_random +
                self.loss_gen_recon3_random+self.loss_gen_recon4_random)
        cyc_loss = self.loss_cyc1_random+self.loss_cyc2_random + \
            self.loss_cyc3_random+self.loss_cyc4_random+self.loss_content_cyc
        self.loss_gen_total = adv_loss+recon_loss+cyc_loss
        self.loss_gen_total.backward()
        self.gen_opt.step()
        return adv_loss.item(), recon_loss.item(), cyc_loss.item()

    def dis_update(self, x1, x2, x3, x4, label):
        self.dis_opt.zero_grad()
        s1_random = Variable(torch.randn(
            x1.size(0), 8, 1, 1).cuda())
        s2_random = Variable(torch.randn(
            x2.size(0), 8, 1, 1).cuda())
        s3_random = Variable(torch.randn(
            x3.size(0), 8, 1, 1).cuda())
        s4_random = Variable(torch.randn(
            x4.size(0), 8, 1, 1).cuda())
        if label[0] == 1:
            x1_0 = torch.zeros_like(x1)
        else:
            x1_0 = x1
        if label[1] == 1:
            x2_0 = torch.zeros_like(x2)
        else:
            x2_0 = x2
        if label[2] == 1:
            x3_0 = torch.zeros_like(x3)
        else:
            x3_0 = x3
        if label[3] == 1:
            x4_0 = torch.zeros_like(x4)
        else:
            x4_0 = x4
        content = self.content_encoder(
            torch.cat([x1_0, x2_0, x3_0, x4_0], dim=1))

        x1_recon_random = decode(
            self.s1_decoder, self.mlp1, content, s1_random)
        x2_recon_random = decode(
            self.s2_decoder, self.mlp2, content, s2_random)
        x3_recon_random = decode(
            self.s3_decoder, self.mlp3, content, s3_random)
        x4_recon_random = decode(
            self.s4_decoder, self.mlp4, content, s4_random)

        # D loss
        self.loss_dis_1 = self.dis1.calc_dis_loss(x1_recon_random.detach(), x1)
        self.loss_dis_2 = self.dis2.calc_dis_loss(x2_recon_random.detach(), x2)
        self.loss_dis_3 = self.dis3.calc_dis_loss(x3_recon_random.detach(), x3)
        self.loss_dis_4 = self.dis4.calc_dis_loss(x4_recon_random.detach(), x4)

        self.loss_dis_total = self.loss_dis_1 + \
            self.loss_dis_2+self.loss_dis_3+self.loss_dis_4
        self.loss_dis_total.backward()
        self.dis_opt.step()
        return self.loss_dis_total.item()

    def sample(self, x1, x2, x3, x4, label):
        self.eval()
        if label[0] == 1:
            x1_0 = torch.zeros_like(x1)
        else:
            x1_0 = x1
        if label[1] == 1:
            x2_0 = torch.zeros_like(x2)
        else:
            x2_0 = x2
        if label[2] == 1:
            x3_0 = torch.zeros_like(x3)
        else:
            x3_0 = x3
        if label[3] == 1:
            x4_0 = torch.zeros_like(x4)
        else:
            x4_0 = x4
        # for i in range(x1a.size(0)):
        #     x1,x2,x3,x4=x1a[i].unsqueeze(0),x2a.unsqueeze(0),x3a.unsqueeze(0),x4a.unsqueeze(0)
        #     content = self.content_encoder(torch.cat([x1, x2, x3, x4],dim=1))
        #     s1 = self.s1_encoder(x1)
        #     s2 = self.s2_encoder(x2)
        #     s3 = self.s3_encoder(x3)
        #     s4 = self.s4_encoder(x4)
        #     x1_recon.append(self.s1_decoder(torch.cat([content, s1],dim=1)))
        #     x2_recon.append(self.s2_decoder(torch.cat([content, s2],dim=1)))
        #     x3_recon.append(self.s3_decoder(torch.cat([content, s3],dim=1)))
        #     x4_recon.append(self.s4_decoder(torch.cat([content, s4],dim=1)))
        s1_random = Variable(torch.randn(
            x1.size(0), 8, 1, 1).cuda())
        s2_random = Variable(torch.randn(
            x2.size(0), 8, 1, 1).cuda())
        s3_random = Variable(torch.randn(
            x3.size(0), 8, 1, 1).cuda())
        s4_random = Variable(torch.randn(
            x4.size(0), 8, 1, 1).cuda())

        content = self.content_encoder(
            torch.cat([x1_0, x2_0, x3_0, x4_0], dim=1))

        x1_recon = decode(
            self.s1_decoder, self.mlp1, content, s1_random)
        x2_recon = decode(
            self.s2_decoder, self.mlp2, content, s2_random)
        x3_recon = decode(
            self.s3_decoder, self.mlp3, content, s3_random)
        x4_recon = decode(
            self.s4_decoder, self.mlp4, content, s4_random)
        # x1_recon, x2_recon = torch.cat(x1_recon), torch.cat(x2_recon)
        # x3_recon, x4_recon = torch.cat(x3_recon), torch.cat(x4_recon)
        self.train()
        return x1[0], x1_recon[0], x2[0], x2_recon[0], x3[0], x3_recon[0], x4[0], x4_recon[0]

    def recon_criterion(self, input, target):
        return torch.mean(torch.abs(input - target))

    def update_learning_rate(self):
        if self.dis_scheduler is not None:
            self.dis_scheduler.step()
        if self.gen_scheduler is not None:
            self.gen_scheduler.step()
# class shared_Trainer(nn.Module):
#     def __init__(self):
#         super(shared_Trainer, self).__init__()

#         self.s1_encoder = ContentEncoder(
#             2, 4, 4, 64, 'in', 'relu', pad_type="zero")
#         self.s2_encoder = ContentEncoder(
#             2, 4, 4, 64, 'in', 'relu', pad_type="zero")
#         self.s3_encoder = ContentEncoder(
#             2, 4, 4, 64, 'in', 'relu', pad_type="zero")
#         self.s4_encoder = ContentEncoder(
#             2, 4, 4, 64, 'in', 'relu', pad_type="zero")

#         # adain->in
#         self.s1_decoder = Decoder(
#             2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
#         self.s2_decoder = Decoder(
#             2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
#         self.s3_decoder = Decoder(
#             2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
#         self.s4_decoder = Decoder(
#             2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
#         dis_para = {"dim": 64, "norm": "none", "activ": "lrelu", "n_layer": 4,
#                     "gan_type": 'lsgan', 'num_scales': 3, "pad_type": 'reflect'}
#         self.dis1 = MsImageDis(1, dis_para)
#         self.dis2 = MsImageDis(1, dis_para)
#         self.dis3 = MsImageDis(1, dis_para)
#         self.dis4 = MsImageDis(1, dis_para)
#         gen_params =list(self.s1_encoder.parameters())+list(self.s2_encoder.parameters())+list(self.s3_encoder.parameters())+list(
#             self.s4_encoder.parameters())+list(self.s1_decoder.parameters())+list(self.s2_decoder.parameters())+list(self.s3_decoder.parameters())+list(self.s4_decoder.parameters())
#         dis_params = list(self.dis1.parameters()) + list(self.dis2.parameters()) + \
#             list(self.dis3.parameters()) + list(self.dis4.parameters())
#         self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
#                                         lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
#         self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
#                                         lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
#         self.dis_scheduler = lr_scheduler.StepLR(self.dis_opt, step_size=100000,
#                                                  gamma=0.5, last_epoch=-1)
#         self.gen_scheduler = lr_scheduler.StepLR(self.gen_opt, step_size=100000,
#                                                  gamma=0.5, last_epoch=-1)
#         # self.apply(weights_init('kaiming'))
#         # self.dis1.apply(weights_init('gaussian'))
#         # self.dis2.apply(weights_init('gaussian'))
#         # self.dis3.apply(weights_init('gaussian'))
#         # self.dis4.apply(weights_init('gaussian'))

#     def gen_update(self, x1, x2, x3, x4):
#         self.gen_opt.zero_grad()
#         s1 = self.s1_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s2 = self.s2_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s3 = self.s3_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s4 = self.s4_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         x1_recon = self.s1_decoder(s1)
#         x2_recon = self.s2_decoder(s2)
#         x3_recon = self.s3_decoder(s3)
#         x4_recon = self.s4_decoder(s4)

#         self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
#         self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
#         self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
#         self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

#         s1_recon = self.s1_encoder(torch.cat([x1_recon, x2_recon, x3_recon, x4_recon], dim=1))
#         s2_recon = self.s2_encoder(torch.cat([x1_recon, x2_recon, x3_recon, x4_recon], dim=1))
#         s3_recon = self.s3_encoder(torch.cat([x1_recon, x2_recon, x3_recon, x4_recon], dim=1))
#         s4_recon = self.s4_encoder(torch.cat([x1_recon, x2_recon, x3_recon, x4_recon], dim=1))

#         self.loss_cyc1 = self.recon_criterion(s1, s1_recon)
#         self.loss_cyc2 = self.recon_criterion(s2, s2_recon)
#         self.loss_cyc3 = self.recon_criterion(s3, s3_recon)
#         self.loss_cyc4 = self.recon_criterion(s4, s4_recon)

#         self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
#         self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
#         self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
#         self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

#         adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
#             self.loss_gen_adv3+self.loss_gen_adv4
#         recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
#                          self.loss_gen_recon3+self.loss_gen_recon4)
#         cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
#             self.loss_cyc3+self.loss_cyc4
#         self.loss_gen_total = adv_loss+recon_loss+cyc_loss
#         self.loss_gen_total.backward()
#         self.gen_opt.step()
#         return adv_loss.item(), recon_loss.item(), cyc_loss.item()

#     def dis_update(self, x1, x2, x3, x4):
#         self.dis_opt.zero_grad()
#         s1 = self.s1_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s2 = self.s2_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s3 = self.s3_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s4 = self.s4_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         x1_recon = self.s1_decoder(s1)
#         x2_recon = self.s2_decoder(s2)
#         x3_recon = self.s3_decoder(s3)
#         x4_recon = self.s4_decoder(s4)
#         # D loss
#         self.loss_dis_1 = self.dis1.calc_dis_loss(x1_recon.detach(), x1)
#         self.loss_dis_2 = self.dis2.calc_dis_loss(x2_recon.detach(), x2)
#         self.loss_dis_3 = self.dis3.calc_dis_loss(x3_recon.detach(), x3)
#         self.loss_dis_4 = self.dis4.calc_dis_loss(x4_recon.detach(), x4)

#         self.loss_dis_total = self.loss_dis_1 + \
#             self.loss_dis_2+self.loss_dis_3+self.loss_dis_4
#         self.loss_dis_total.backward()
#         self.dis_opt.step()
#         return self.loss_dis_total.item()

#     def sample(self, x1, x2, x3, x4):
#         self.eval()
#         x1_recon, x2_recon, x3_recon, x4_recon = [], [], [], []
#         # for i in range(x1a.size(0)):
#         #     x1,x2,x3,x4=x1a[i].unsqueeze(0),x2a.unsqueeze(0),x3a.unsqueeze(0),x4a.unsqueeze(0)
#         #     content = self.content_encoder(torch.cat([x1, x2, x3, x4],dim=1))
#         #     s1 = self.s1_encoder(x1)
#         #     s2 = self.s2_encoder(x2)
#         #     s3 = self.s3_encoder(x3)
#         #     s4 = self.s4_encoder(x4)
#         #     x1_recon.append(self.s1_decoder(torch.cat([content, s1],dim=1)))
#         #     x2_recon.append(self.s2_decoder(torch.cat([content, s2],dim=1)))
#         #     x3_recon.append(self.s3_decoder(torch.cat([content, s3],dim=1)))
#         #     x4_recon.append(self.s4_decoder(torch.cat([content, s4],dim=1)))
#         s1 = self.s1_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s2 = self.s2_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s3 = self.s3_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         s4 = self.s4_encoder(torch.cat([x1, x2, x3, x4], dim=1))
#         x1_recon = self.s1_decoder(s1)
#         x2_recon = self.s2_decoder(s2)
#         x3_recon = self.s3_decoder(s3)
#         x4_recon = self.s4_decoder(s4)
#         # x1_recon, x2_recon = torch.cat(x1_recon), torch.cat(x2_recon)
#         # x3_recon, x4_recon = torch.cat(x3_recon), torch.cat(x4_recon)
#         self.train()
#         return x1[0], x1_recon[0], x2[0], x2_recon[0], x3[0], x3_recon[0], x4[0], x4_recon[0]

#     def recon_criterion(self, input, target):
#         return torch.mean(torch.abs(input - target))

#     def update_learning_rate(self):
#         if self.dis_scheduler is not None:
#             self.dis_scheduler.step()
#         if self.gen_scheduler is not None:
#             self.gen_scheduler.step()


class shared_Trainer_condconv(nn.Module):
    def __init__(self):
        super(shared_Trainer_condconv, self).__init__()
        self.s1_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s2_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s3_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s4_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")

        # adain->in
        self.s1_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s2_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s3_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s4_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        dis_para = {"dim": 64, "norm": "none", "activ": "lrelu", "n_layer": 4,
                    "gan_type": 'lsgan', 'num_scales': 3, "pad_type": 'reflect'}
        self.dis1 = MsImageDis(1, dis_para)
        self.dis2 = MsImageDis(1, dis_para)
        self.dis3 = MsImageDis(1, dis_para)
        self.dis4 = MsImageDis(1, dis_para)
        gen_params = list(self.s1_encoder.parameters())+list(self.s2_encoder.parameters())+list(self.s3_encoder.parameters())+list(
            self.s4_encoder.parameters())+list(self.s1_decoder.parameters())+list(self.s2_decoder.parameters())+list(self.s3_decoder.parameters())+list(self.s4_decoder.parameters())
        dis_params = list(self.dis1.parameters()) + list(self.dis2.parameters()) + \
            list(self.dis3.parameters()) + list(self.dis4.parameters())
        self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.dis_scheduler = lr_scheduler.StepLR(self.dis_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        self.gen_scheduler = lr_scheduler.StepLR(self.gen_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        # self.apply(weights_init('kaiming'))
        # self.dis1.apply(weights_init('gaussian'))
        # self.dis2.apply(weights_init('gaussian'))
        # self.dis3.apply(weights_init('gaussian'))
        # self.dis4.apply(weights_init('gaussian'))

    # 1 to n
    # def gen_update(self, x1, x2, x3, x4,label):
    #     self.gen_opt.zero_grad()
    #     if label==1:
    #         x=x1.detach()
    #     if label==2:
    #         x=x2.detach()
    #     if label==3:
    #         x=x3.detach()
    #     if label==4:
    #         x=x4.detach()
    #     s1 = self.s1_encoder(x,type=label*torch.ones(3,1).cuda())
    #     s2 = self.s2_encoder(x,type=label*torch.ones(3,1).cuda())
    #     s3 = self.s3_encoder(x,type=label*torch.ones(3,1).cuda())
    #     s4 = self.s4_encoder(x,type=label*torch.ones(3,1).cuda())
    #     x1_recon = self.s1_decoder(s1)
    #     x2_recon = self.s2_decoder(s2)
    #     x3_recon = self.s3_decoder(s3)
    #     x4_recon = self.s4_decoder(s4)

    #     self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
    #     self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
    #     self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
    #     self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

    #     s11_recon = self.s1_encoder(x1_recon,type=1*torch.ones(3,1).cuda())
    #     s12_recon = self.s1_encoder(x2_recon,type=2*torch.ones(3,1).cuda())
    #     s13_recon = self.s1_encoder(x3_recon,type=3*torch.ones(3,1).cuda())
    #     s14_recon = self.s1_encoder(x4_recon,type=4*torch.ones(3,1).cuda())

    #     self.loss_cyc1 = self.recon_criterion(s1, s11_recon)
    #     self.loss_cyc2 = self.recon_criterion(s1, s12_recon)
    #     self.loss_cyc3 = self.recon_criterion(s1, s13_recon)
    #     self.loss_cyc4 = self.recon_criterion(s1, s14_recon)

    #     self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
    #     self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
    #     self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
    #     self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

    #     adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
    #         self.loss_gen_adv3+self.loss_gen_adv4
    #     recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
    #                      self.loss_gen_recon3+self.loss_gen_recon4)
    #     cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
    #         self.loss_cyc3+self.loss_cyc4
    #     self.loss_gen_total = adv_loss+recon_loss+cyc_loss
    #     self.loss_gen_total.backward()
    #     self.gen_opt.step()
    #     return adv_loss.item(), recon_loss.item(), cyc_loss.item()

    # n to n
    def gen_update(self, x1, x2, x3, x4, label):
        self.gen_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1,_ = torch.max(torch.stack(s1), 0)
        s2,_ = torch.max(torch.stack(s2), 0)
        s3,_ = torch.max(torch.stack(s3), 0)
        s4,_ = torch.max(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)

        self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
        self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
        self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
        self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

        s11_recon = self.s1_encoder(x1_recon, type=1*torch.ones(3, 1).cuda())
        s12_recon = self.s1_encoder(x2_recon, type=2*torch.ones(3, 1).cuda())
        s13_recon = self.s1_encoder(x3_recon, type=3*torch.ones(3, 1).cuda())
        s14_recon = self.s1_encoder(x4_recon, type=4*torch.ones(3, 1).cuda())

        self.loss_cyc1 = self.recon_criterion(s1, s11_recon)
        self.loss_cyc2 = self.recon_criterion(s1, s12_recon)
        self.loss_cyc3 = self.recon_criterion(s1, s13_recon)
        self.loss_cyc4 = self.recon_criterion(s1, s14_recon)

        self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
        self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
        self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
        self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

        adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
            self.loss_gen_adv3+self.loss_gen_adv4
        recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
                         self.loss_gen_recon3+self.loss_gen_recon4)
        cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
            self.loss_cyc3+self.loss_cyc4
        self.loss_gen_total = adv_loss+recon_loss+cyc_loss
        self.loss_gen_total.backward()
        self.gen_opt.step()
        return adv_loss.item(), recon_loss.item(), cyc_loss.item()

    def dis_update(self, x1, x2, x3, x4, label):
        self.dis_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1,_ = torch.max(torch.stack(s1), 0)
        s2,_ = torch.max(torch.stack(s2), 0)
        s3,_ = torch.max(torch.stack(s3), 0)
        s4,_ = torch.max(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # D loss
        self.loss_dis_1 = self.dis1.calc_dis_loss(x1_recon.detach(), x1)
        self.loss_dis_2 = self.dis2.calc_dis_loss(x2_recon.detach(), x2)
        self.loss_dis_3 = self.dis3.calc_dis_loss(x3_recon.detach(), x3)
        self.loss_dis_4 = self.dis4.calc_dis_loss(x4_recon.detach(), x4)

        self.loss_dis_total = self.loss_dis_1 + \
            self.loss_dis_2+self.loss_dis_3+self.loss_dis_4
        self.loss_dis_total.backward()
        self.dis_opt.step()
        return self.loss_dis_total.item()

    def sample(self, x1, x2, x3, x4, label):
        self.eval()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1,_ = torch.max(torch.stack(s1), 0)
        s2,_ = torch.max(torch.stack(s2), 0)
        s3,_ = torch.max(torch.stack(s3), 0)
        s4,_ = torch.max(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # x1_recon, x2_recon = torch.cat(x1_recon), torch.cat(x2_recon)
        # x3_recon, x4_recon = torch.cat(x3_recon), torch.cat(x4_recon)
        self.train()
        return x1[0], x1_recon[0], x2[0], x2_recon[0], x3[0], x3_recon[0], x4[0], x4_recon[0]

    def recon_criterion(self, input, target):
        return torch.mean(torch.abs(input - target))

    def update_learning_rate(self):
        if self.dis_scheduler is not None:
            self.dis_scheduler.step()
        if self.gen_scheduler is not None:
            self.gen_scheduler.step()
class shared_Trainer_condconv_mean(nn.Module):
    def __init__(self):
        super(shared_Trainer_condconv_mean, self).__init__()
        self.s1_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s2_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s3_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s4_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")

        # adain->in
        self.s1_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s2_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s3_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s4_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        dis_para = {"dim": 64, "norm": "none", "activ": "lrelu", "n_layer": 4,
                    "gan_type": 'lsgan', 'num_scales': 3, "pad_type": 'reflect'}
        self.dis1 = MsImageDis(1, dis_para)
        self.dis2 = MsImageDis(1, dis_para)
        self.dis3 = MsImageDis(1, dis_para)
        self.dis4 = MsImageDis(1, dis_para)
        gen_params = list(self.s1_encoder.parameters())+list(self.s2_encoder.parameters())+list(self.s3_encoder.parameters())+list(
            self.s4_encoder.parameters())+list(self.s1_decoder.parameters())+list(self.s2_decoder.parameters())+list(self.s3_decoder.parameters())+list(self.s4_decoder.parameters())
        dis_params = list(self.dis1.parameters()) + list(self.dis2.parameters()) + \
            list(self.dis3.parameters()) + list(self.dis4.parameters())
        self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.dis_scheduler = lr_scheduler.StepLR(self.dis_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        self.gen_scheduler = lr_scheduler.StepLR(self.gen_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        # self.apply(weights_init('kaiming'))
        # self.dis1.apply(weights_init('gaussian'))
        # self.dis2.apply(weights_init('gaussian'))
        # self.dis3.apply(weights_init('gaussian'))
        # self.dis4.apply(weights_init('gaussian'))

    # 1 to n
    # def gen_update(self, x1, x2, x3, x4,label):
    #     self.gen_opt.zero_grad()
    #     if label==1:
    #         x=x1.detach()
    #     if label==2:
    #         x=x2.detach()
    #     if label==3:
    #         x=x3.detach()
    #     if label==4:
    #         x=x4.detach()
    #     s1 = self.s1_encoder(x,type=label*torch.ones(3,1).cuda())
    #     s2 = self.s2_encoder(x,type=label*torch.ones(3,1).cuda())
    #     s3 = self.s3_encoder(x,type=label*torch.ones(3,1).cuda())
    #     s4 = self.s4_encoder(x,type=label*torch.ones(3,1).cuda())
    #     x1_recon = self.s1_decoder(s1)
    #     x2_recon = self.s2_decoder(s2)
    #     x3_recon = self.s3_decoder(s3)
    #     x4_recon = self.s4_decoder(s4)

    #     self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
    #     self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
    #     self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
    #     self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

    #     s11_recon = self.s1_encoder(x1_recon,type=1*torch.ones(3,1).cuda())
    #     s12_recon = self.s1_encoder(x2_recon,type=2*torch.ones(3,1).cuda())
    #     s13_recon = self.s1_encoder(x3_recon,type=3*torch.ones(3,1).cuda())
    #     s14_recon = self.s1_encoder(x4_recon,type=4*torch.ones(3,1).cuda())

    #     self.loss_cyc1 = self.recon_criterion(s1, s11_recon)
    #     self.loss_cyc2 = self.recon_criterion(s1, s12_recon)
    #     self.loss_cyc3 = self.recon_criterion(s1, s13_recon)
    #     self.loss_cyc4 = self.recon_criterion(s1, s14_recon)

    #     self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
    #     self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
    #     self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
    #     self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

    #     adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
    #         self.loss_gen_adv3+self.loss_gen_adv4
    #     recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
    #                      self.loss_gen_recon3+self.loss_gen_recon4)
    #     cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
    #         self.loss_cyc3+self.loss_cyc4
    #     self.loss_gen_total = adv_loss+recon_loss+cyc_loss
    #     self.loss_gen_total.backward()
    #     self.gen_opt.step()
    #     return adv_loss.item(), recon_loss.item(), cyc_loss.item()

    # n to n
    def gen_update(self, x1, x2, x3, x4, label):
        self.gen_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1 = torch.mean(torch.stack(s1), 0)
        s2 = torch.mean(torch.stack(s2), 0)
        s3 = torch.mean(torch.stack(s3), 0)
        s4 = torch.mean(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)

        self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
        self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
        self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
        self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

        s11_recon = self.s1_encoder(x1_recon, type=1*torch.ones(3, 1).cuda())
        s12_recon = self.s1_encoder(x2_recon, type=2*torch.ones(3, 1).cuda())
        s13_recon = self.s1_encoder(x3_recon, type=3*torch.ones(3, 1).cuda())
        s14_recon = self.s1_encoder(x4_recon, type=4*torch.ones(3, 1).cuda())

        self.loss_cyc1 = self.recon_criterion(s1, s11_recon)
        self.loss_cyc2 = self.recon_criterion(s1, s12_recon)
        self.loss_cyc3 = self.recon_criterion(s1, s13_recon)
        self.loss_cyc4 = self.recon_criterion(s1, s14_recon)

        self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
        self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
        self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
        self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

        adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
            self.loss_gen_adv3+self.loss_gen_adv4
        recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
                         self.loss_gen_recon3+self.loss_gen_recon4)
        cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
            self.loss_cyc3+self.loss_cyc4
        self.loss_gen_total = adv_loss+recon_loss+cyc_loss
        self.loss_gen_total.backward()
        self.gen_opt.step()
        return adv_loss.item(), recon_loss.item(), cyc_loss.item()

    def dis_update(self, x1, x2, x3, x4, label):
        self.dis_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1= torch.mean(torch.stack(s1), 0)
        s2= torch.mean(torch.stack(s2), 0)
        s3= torch.mean(torch.stack(s3), 0)
        s4= torch.mean(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # D loss
        self.loss_dis_1 = self.dis1.calc_dis_loss(x1_recon.detach(), x1)
        self.loss_dis_2 = self.dis2.calc_dis_loss(x2_recon.detach(), x2)
        self.loss_dis_3 = self.dis3.calc_dis_loss(x3_recon.detach(), x3)
        self.loss_dis_4 = self.dis4.calc_dis_loss(x4_recon.detach(), x4)

        self.loss_dis_total = self.loss_dis_1 + \
            self.loss_dis_2+self.loss_dis_3+self.loss_dis_4
        self.loss_dis_total.backward()
        self.dis_opt.step()
        return self.loss_dis_total.item()

    def sample(self, x1, x2, x3, x4, label):
        self.eval()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1= torch.mean(torch.stack(s1), 0)
        s2= torch.mean(torch.stack(s2), 0)
        s3 = torch.mean(torch.stack(s3), 0)
        s4 = torch.mean(torch.stack(s4), 0)
        # for i in range(x1a.size(0)):
        #     x1,x2,x3,x4=x1a[i].unsqueeze(0),x2a.unsqueeze(0),x3a.unsqueeze(0),x4a.unsqueeze(0)
        #     content = self.content_encoder(torch.cat([x1, x2, x3, x4],dim=1))
        #     s1 = self.s1_encoder(x1)
        #     s2 = self.s2_encoder(x2)
        #     s3 = self.s3_encoder(x3)
        #     s4 = self.s4_encoder(x4)
        #     x1_recon.append(self.s1_decoder(torch.cat([content, s1],dim=1)))
        #     x2_recon.append(self.s2_decoder(torch.cat([content, s2],dim=1)))
        #     x3_recon.append(self.s3_decoder(torch.cat([content, s3],dim=1)))
        #     x4_recon.append(self.s4_decoder(torch.cat([content, s4],dim=1)))
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # x1_recon, x2_recon = torch.cat(x1_recon), torch.cat(x2_recon)
        # x3_recon, x4_recon = torch.cat(x3_recon), torch.cat(x4_recon)
        self.train()
        return x1[0], x1_recon[0], x2[0], x2_recon[0], x3[0], x3_recon[0], x4[0], x4_recon[0]

    def recon_criterion(self, input, target):
        return torch.mean(torch.abs(input - target))

    def update_learning_rate(self):
        if self.dis_scheduler is not None:
            self.dis_scheduler.step()
        if self.gen_scheduler is not None:
            self.gen_scheduler.step()
            
class shared_Trainer_condconv_seg(nn.Module):
    def __init__(self):
        super(shared_Trainer_condconv_seg, self).__init__()
        self.s1_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s2_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s3_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s4_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")

        # adain->in
        self.s1_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s2_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s3_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s4_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        dis_para = {"dim": 64, "norm": "none", "activ": "lrelu", "n_layer": 4,
                    "gan_type": 'lsgan', 'num_scales': 3, "pad_type": 'reflect'}
        self.dis1 = MsImageDis(1, dis_para)
        self.dis2 = MsImageDis(1, dis_para)
        self.dis3 = MsImageDis(1, dis_para)
        self.dis4 = MsImageDis(1, dis_para)
        
        self.seg = smp.Unet(
            encoder_name="resnet50", 
            encoder_weights="imagenet", 
            classes=3, 
            activation="sigmoid",
            in_channels=4,
            )
        self.diceloss=smp.utils.losses.DiceLoss()
        gen_params = list(self.s1_encoder.parameters())+list(self.s2_encoder.parameters())+list(self.s3_encoder.parameters())+list(
            self.s4_encoder.parameters())+list(self.s1_decoder.parameters())+list(self.s2_decoder.parameters())+list(self.s3_decoder.parameters())+list(self.s4_decoder.parameters())+list(self.seg.parameters())
        dis_params = list(self.dis1.parameters()) + list(self.dis2.parameters()) + \
            list(self.dis3.parameters()) + list(self.dis4.parameters())
        self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.dis_scheduler = lr_scheduler.StepLR(self.dis_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        self.gen_scheduler = lr_scheduler.StepLR(self.gen_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
    # n to n
    def gen_update(self, x1, x2, x3, x4,seg, label):
        self.gen_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1,_ = torch.max(torch.stack(s1), 0)
        s2,_ = torch.max(torch.stack(s2), 0)
        s3,_ = torch.max(torch.stack(s3), 0)
        s4,_ = torch.max(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)

        self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
        self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
        self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
        self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

        s11_recon = self.s1_encoder(x1_recon, type=1*torch.ones(3, 1).cuda())
        s12_recon = self.s1_encoder(x2_recon, type=2*torch.ones(3, 1).cuda())
        s13_recon = self.s1_encoder(x3_recon, type=3*torch.ones(3, 1).cuda())
        s14_recon = self.s1_encoder(x4_recon, type=4*torch.ones(3, 1).cuda())

        self.seg_pred=self.seg.forward(torch.cat([x1_recon,x2_recon,x3_recon,x4_recon],dim=1))
        loss_seg=self.diceloss(self.seg_pred,seg.cuda())
        
        self.loss_cyc1 = self.recon_criterion(s1, s11_recon)
        self.loss_cyc2 = self.recon_criterion(s1, s12_recon)
        self.loss_cyc3 = self.recon_criterion(s1, s13_recon)
        self.loss_cyc4 = self.recon_criterion(s1, s14_recon)

        self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
        self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
        self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
        self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

        adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
            self.loss_gen_adv3+self.loss_gen_adv4
        recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
                         self.loss_gen_recon3+self.loss_gen_recon4)
        cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
            self.loss_cyc3+self.loss_cyc4
        self.loss_gen_total = adv_loss+recon_loss+cyc_loss+loss_seg
        self.loss_gen_total.backward()
        self.gen_opt.step()
        return adv_loss.item(), recon_loss.item(), cyc_loss.item(),loss_seg.item()

    def dis_update(self, x1, x2, x3, x4, label):
        self.dis_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1,_ = torch.max(torch.stack(s1), 0)
        s2,_ = torch.max(torch.stack(s2), 0)
        s3,_ = torch.max(torch.stack(s3), 0)
        s4,_ = torch.max(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # D loss
        self.loss_dis_1 = self.dis1.calc_dis_loss(x1_recon.detach(), x1)
        self.loss_dis_2 = self.dis2.calc_dis_loss(x2_recon.detach(), x2)
        self.loss_dis_3 = self.dis3.calc_dis_loss(x3_recon.detach(), x3)
        self.loss_dis_4 = self.dis4.calc_dis_loss(x4_recon.detach(), x4)

        self.loss_dis_total = self.loss_dis_1 + \
            self.loss_dis_2+self.loss_dis_3+self.loss_dis_4
        self.loss_dis_total.backward()
        self.dis_opt.step()
        return self.loss_dis_total.item()

    def sample(self, x1, x2, x3, x4, label):
        self.eval()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1,_ = torch.max(torch.stack(s1), 0)
        s2,_ = torch.max(torch.stack(s2), 0)
        s3,_ = torch.max(torch.stack(s3), 0)
        s4,_ = torch.max(torch.stack(s4), 0)
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # x1_recon, x2_recon = torch.cat(x1_recon), torch.cat(x2_recon)
        # x3_recon, x4_recon = torch.cat(x3_recon), torch.cat(x4_recon)
        self.train()
        return x1[0], x1_recon[0], x2[0], x2_recon[0], x3[0], x3_recon[0], x4[0], x4_recon[0]

    def recon_criterion(self, input, target):
        return torch.mean(torch.abs(input - target))

    def update_learning_rate(self):
        if self.dis_scheduler is not None:
            self.dis_scheduler.step()
        if self.gen_scheduler is not None:
            self.gen_scheduler.step()

class shared_Trainer_condconv_seg_3(nn.Module):
    def __init__(self):
        super(shared_Trainer_condconv_seg_3, self).__init__()
        self.s1_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s2_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s3_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")
        self.s4_encoder = ContentEncoder_condconv(
            2, 4, 1, 64, 'in', 'relu', pad_type="zero")

        # adain->in
        self.s1_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s2_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s3_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        self.s4_decoder = Decoder(
            2, 4, 256, 1, res_norm='in', activ="relu", pad_type="zero")
        dis_para = {"dim": 64, "norm": "none", "activ": "lrelu", "n_layer": 4,
                    "gan_type": 'lsgan', 'num_scales': 3, "pad_type": 'reflect'}
        self.dis1 = MsImageDis(1, dis_para)
        self.dis2 = MsImageDis(1, dis_para)
        self.dis3 = MsImageDis(1, dis_para)
        self.dis4 = MsImageDis(1, dis_para)
        
        self.conv1 = nn.Conv2d(in_channels=256*3, out_channels=256, kernel_size=1, stride=1, padding=0)
        self.conv2 = nn.Conv2d(in_channels=256*3, out_channels=256, kernel_size=1, stride=1, padding=0)
        self.conv3 = nn.Conv2d(in_channels=256*3, out_channels=256, kernel_size=1, stride=1, padding=0)
        self.conv4 = nn.Conv2d(in_channels=256*3, out_channels=256, kernel_size=1, stride=1, padding=0)
              
        self.seg = smp.Unet(
            encoder_name="resnet50", 
            encoder_weights="imagenet", 
            classes=3, 
            activation="sigmoid",
            in_channels=4,
            )
        self.diceloss=smp.utils.losses.DiceLoss()
        gen_params = list(self.s1_encoder.parameters())+list(self.s2_encoder.parameters())+list(self.s3_encoder.parameters())+list(
            self.s4_encoder.parameters())+list(self.s1_decoder.parameters())+list(self.s2_decoder.parameters())+list(self.s3_decoder.parameters())+list(self.s4_decoder.parameters())+list(self.seg.parameters())+list(self.conv1.parameters())\
                +list(self.conv2.parameters())+list(self.conv3.parameters())+list(self.conv4.parameters())
        dis_params = list(self.dis1.parameters()) + list(self.dis2.parameters()) + \
            list(self.dis3.parameters()) + list(self.dis4.parameters())
        self.dis_opt = torch.optim.Adam([p for p in dis_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
                                        lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0001)
        self.dis_scheduler = lr_scheduler.StepLR(self.dis_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
        self.gen_scheduler = lr_scheduler.StepLR(self.gen_opt, step_size=100000,
                                                 gamma=0.5, last_epoch=-1)
    # n to n
    def gen_update(self, x1, x2, x3, x4,seg, label):
        self.gen_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1_max,_ = torch.max(torch.stack(s1), 0)
        s2_max,_ = torch.max(torch.stack(s2), 0)
        s3_max,_ = torch.max(torch.stack(s3), 0)
        s4_max,_ = torch.max(torch.stack(s4), 0)
        
        s1_mean = torch.mean(torch.stack(s1), 0)
        s2_mean = torch.mean(torch.stack(s2), 0)
        s3_mean = torch.mean(torch.stack(s3), 0)
        s4_mean = torch.mean(torch.stack(s4), 0)
        
        s1_min,_ = torch.min(torch.stack(s1), 0)
        s2_min,_ = torch.min(torch.stack(s2), 0)
        s3_min,_ = torch.min(torch.stack(s3), 0)
        s4_min,_ = torch.min(torch.stack(s4), 0)
        
        s1=self.conv1(torch.cat([s1_max,s1_mean,s1_min],dim=1))
        s2=self.conv2(torch.cat([s2_max,s2_mean,s2_min],dim=1))
        s3=self.conv3(torch.cat([s3_max,s3_mean,s3_min],dim=1))
        s4=self.conv4(torch.cat([s4_max,s4_mean,s4_min],dim=1))
        
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)

        self.loss_gen_recon1 = self.recon_criterion(x1, x1_recon)
        self.loss_gen_recon2 = self.recon_criterion(x2, x2_recon)
        self.loss_gen_recon3 = self.recon_criterion(x3, x3_recon)
        self.loss_gen_recon4 = self.recon_criterion(x4, x4_recon)

        s11_recon = self.s1_encoder(x1_recon, type=1*torch.ones(x1.shape[0], 1).cuda())
        s12_recon = self.s1_encoder(x2_recon, type=2*torch.ones(x1.shape[0], 1).cuda())
        s13_recon = self.s1_encoder(x3_recon, type=3*torch.ones(x1.shape[0], 1).cuda())
        s14_recon = self.s1_encoder(x4_recon, type=4*torch.ones(x1.shape[0], 1).cuda())

        self.seg_pred=self.seg.forward(torch.cat([x1_recon,x2_recon,x3_recon,x4_recon],dim=1))
        loss_seg=self.diceloss(self.seg_pred,seg.cuda())
        
        self.loss_cyc1 = self.recon_criterion(s1, s11_recon)
        self.loss_cyc2 = self.recon_criterion(s1, s12_recon)
        self.loss_cyc3 = self.recon_criterion(s1, s13_recon)
        self.loss_cyc4 = self.recon_criterion(s1, s14_recon)

        self.loss_gen_adv1 = self.dis1.calc_gen_loss(x1_recon)
        self.loss_gen_adv2 = self.dis2.calc_gen_loss(x2_recon)
        self.loss_gen_adv3 = self.dis3.calc_gen_loss(x3_recon)
        self.loss_gen_adv4 = self.dis4.calc_gen_loss(x4_recon)

        adv_loss = self.loss_gen_adv1+self.loss_gen_adv2 + \
            self.loss_gen_adv3+self.loss_gen_adv4
        recon_loss = 10*(self.loss_gen_recon1+self.loss_gen_recon2 +
                         self.loss_gen_recon3+self.loss_gen_recon4)
        cyc_loss = self.loss_cyc1+self.loss_cyc2 + \
            self.loss_cyc3+self.loss_cyc4
        self.loss_gen_total = adv_loss+recon_loss+cyc_loss+loss_seg
        self.loss_gen_total.backward()
        self.gen_opt.step()
        return adv_loss.item(), recon_loss.item(), cyc_loss.item(),loss_seg.item()

    def dis_update(self, x1, x2, x3, x4, label):
        self.dis_opt.zero_grad()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1_max,_ = torch.max(torch.stack(s1), 0)
        s2_max,_ = torch.max(torch.stack(s2), 0)
        s3_max,_ = torch.max(torch.stack(s3), 0)
        s4_max,_ = torch.max(torch.stack(s4), 0)
        
        s1_mean = torch.mean(torch.stack(s1), 0)
        s2_mean = torch.mean(torch.stack(s2), 0)
        s3_mean = torch.mean(torch.stack(s3), 0)
        s4_mean = torch.mean(torch.stack(s4), 0)
        
        s1_min,_ = torch.min(torch.stack(s1), 0)
        s2_min,_ = torch.min(torch.stack(s2), 0)
        s3_min,_ = torch.min(torch.stack(s3), 0)
        s4_min,_ = torch.min(torch.stack(s4), 0)
        
        s1=self.conv1(torch.cat([s1_max,s1_mean,s1_min],dim=1))
        s2=self.conv2(torch.cat([s2_max,s2_mean,s2_min],dim=1))
        s3=self.conv3(torch.cat([s3_max,s3_mean,s3_min],dim=1))
        s4=self.conv4(torch.cat([s4_max,s4_mean,s4_min],dim=1))
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # D loss
        self.loss_dis_1 = self.dis1.calc_dis_loss(x1_recon.detach(), x1)
        self.loss_dis_2 = self.dis2.calc_dis_loss(x2_recon.detach(), x2)
        self.loss_dis_3 = self.dis3.calc_dis_loss(x3_recon.detach(), x3)
        self.loss_dis_4 = self.dis4.calc_dis_loss(x4_recon.detach(), x4)

        self.loss_dis_total = self.loss_dis_1 + \
            self.loss_dis_2+self.loss_dis_3+self.loss_dis_4
        self.loss_dis_total.backward()
        self.dis_opt.step()
        return self.loss_dis_total.item()

    def sample(self, x1, x2, x3, x4, label):
        self.eval()
        xlist = [x1, x2, x3, x4]
        s1, s2, s3, s4 = [], [], [], []
        for i in range(4):
            if label[i] != 0:
                continue
            x = xlist[i]
            s1.append(self.s1_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s2.append(self.s2_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s3.append(self.s3_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
            s4.append(self.s4_encoder(x, type=(i+1)*torch.ones(x1.shape[0], 1).cuda()))
        s1_max,_ = torch.max(torch.stack(s1), 0)
        s2_max,_ = torch.max(torch.stack(s2), 0)
        s3_max,_ = torch.max(torch.stack(s3), 0)
        s4_max,_ = torch.max(torch.stack(s4), 0)
        
        s1_mean = torch.mean(torch.stack(s1), 0)
        s2_mean = torch.mean(torch.stack(s2), 0)
        s3_mean = torch.mean(torch.stack(s3), 0)
        s4_mean = torch.mean(torch.stack(s4), 0)
        
        s1_min,_ = torch.min(torch.stack(s1), 0)
        s2_min,_ = torch.min(torch.stack(s2), 0)
        s3_min,_ = torch.min(torch.stack(s3), 0)
        s4_min,_ = torch.min(torch.stack(s4), 0)
        
        s1=self.conv1(torch.cat([s1_max,s1_mean,s1_min],dim=1))
        s2=self.conv2(torch.cat([s2_max,s2_mean,s2_min],dim=1))
        s3=self.conv3(torch.cat([s3_max,s3_mean,s3_min],dim=1))
        s4=self.conv4(torch.cat([s4_max,s4_mean,s4_min],dim=1))
        x1_recon = self.s1_decoder(s1)
        x2_recon = self.s2_decoder(s2)
        x3_recon = self.s3_decoder(s3)
        x4_recon = self.s4_decoder(s4)
        # x1_recon, x2_recon = torch.cat(x1_recon), torch.cat(x2_recon)
        # x3_recon, x4_recon = torch.cat(x3_recon), torch.cat(x4_recon)
        self.train()
        return x1[0], x1_recon[0], x2[0], x2_recon[0], x3[0], x3_recon[0], x4[0], x4_recon[0]

    def recon_criterion(self, input, target):
        return torch.mean(torch.abs(input - target))

    def update_learning_rate(self):
        if self.dis_scheduler is not None:
            self.dis_scheduler.step()
        if self.gen_scheduler is not None:
            self.gen_scheduler.step()