from torchvision.models import vgg16
from torchvision import transforms
import torch
import torch.nn as nn


class PerceptualLossFn(nn.Module):
    def __init__(self, weight_path, device):
        super().__init__()

        vgg = vgg16()
        vgg.load_state_dict(torch.load(weight_path))
        vgg.to(device)

        self.vgg_part1 = nn.Sequential(*list(vgg.features.children())[:3])
        self.vgg_part2 = nn.Sequential(*list(vgg.features.children())[3:8])
        self.vgg_part3 = nn.Sequential(*list(vgg.features.children())[8:15])
        self.vgg_part4 = nn.Sequential(*list(vgg.features.children())[15:27])

        self.lossfn = nn.MSELoss(reduction='mean')
        # imagenet数据集的均值和标准差
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        
    def forward(self, pred, target):
        pred = self.normalize(pred)
        target = self.normalize(target)
        
        p1 = self.vgg_part1(pred)
        p2 = self.vgg_part2(p1)
        p3 = self.vgg_part3(p2)
        p4 = self.vgg_part4(p3)

        with torch.no_grad():
            t1 = self.vgg_part1(target)
            t2 = self.vgg_part2(t1)
            t3 = self.vgg_part3(t2)
            t4 = self.vgg_part4(t3)
        
        # 感知损失
        loss = (
            self.lossfn(p1, t1) + \
            self.lossfn(p2, t2) + \
            self.lossfn(p3, t3) + \
            self.lossfn(p4, t4)
        ) / 4.

        return loss

class WGANGP:
    def __init__(self, device, lambda_gp=10):
        self.device = device
        self.lambda_gp = lambda_gp
        
    def discriminator_loss(self, real_output, fake_output, real_samples, fake_samples, discriminator):
        # 基本的Wasserstein距离
        d_loss = -torch.mean(real_output) + torch.mean(fake_output)
        
        # 计算梯度惩罚
        # 在真实和生成样本之间随机插值
        alpha = torch.rand(real_samples.size(0), 1, 1, 1, device=self.device)
        interpolates = alpha * real_samples + (1 - alpha) * fake_samples
        interpolates.requires_grad_(True)
        
        # 计算判别器对插值样本的输出
        d_interpolates = discriminator(interpolates)
        
        # 计算梯度
        fake_outputs = torch.ones_like(d_interpolates, device=self.device, requires_grad=False)
        gradients = torch.autograd.grad(
            outputs=d_interpolates,
            inputs=interpolates,
            grad_outputs=fake_outputs,
            create_graph=True,
            retain_graph=True,
            only_inputs=True
        )[0]
        
        # 计算梯度L2范数
        gradients = gradients.view(gradients.size(0), -1)
        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
        
        # 添加梯度惩罚项
        d_loss = d_loss + self.lambda_gp * gradient_penalty
        
        return d_loss
    
    def generator_loss(self, fake_output):
        # 与WGAN相同
        g_loss = -torch.mean(fake_output)
        return g_loss

class K_mean(nn.Module):
    def __init__(self, k, dim, device):
        super().__init__()
        self.k = k
        self.p_num = [[torch.randn(1, dim, device=device), 1] for _ in range(k)]

    def init_each_epoch(self):
        for i in range(self.k):
            self.p_num[i][1] = 1
    
    def forward(self, x):
        minDict = 999999999
        minIdx = -1
        # 计算距离
        for i, (p, num) in enumerate(self.p_num):
            dict = torch.sqrt(torch.sum((p-x)**2))
            if dict <= minDict:
                minDict = dict
                minDict = i
        # 更新聚类中心
        p, num = self.p_num[minIdx]
        new_p = (p * num + x) / (num + 1)
        self.p_num[minIdx][0] = new_p
        self.p_num[minIdx][1] = num + 1

        return new_p.detach()


class VaeLossFn:
    def __init__(self, weight_path, device):
        self.L1 = nn.L1Loss()
        self.perceptual_lossFn = PerceptualLossFn(weight_path, device)
        self.kmean = K_mean(k=5, dim=1024, device=device)
        self.kl_weight = 2e-4
    
    def init_kmean_each_epoch(self):
        self.kmean.init_each_epoch()
    
    def __call__(self, gen, img, P):
        # L1损失
        L1_loss = self.L1(gen, img)
        # 感知损失
        perceptual_loss = self.perceptual_lossFn(gen, img)
        # KL散度损失
        mean, log_std = P
        mean_target = self.kmean(mean)
        kl_loss = torch.sum((torch.exp(log_std)**2 + (mean - mean_target)**2 - 2*log_std - 1)*0.5)
        return L1_loss + perceptual_loss + self.kl_weight*kl_loss
    
if __name__ == '__main__':
    print(vgg16(weights=True))