|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch.autograd import Variable |
|
from math import exp |
|
|
|
def l1_loss(network_output, gt): |
|
return torch.abs((network_output - gt)).mean() |
|
|
|
def l2_loss(network_output, gt): |
|
return ((network_output - gt) ** 2).mean() |
|
|
|
def gaussian(window_size, sigma): |
|
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) |
|
return gauss / gauss.sum() |
|
|
|
def create_window(window_size, channel): |
|
_1D_window = gaussian(window_size, 1.5).unsqueeze(1) |
|
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) |
|
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) |
|
return window |
|
|
|
def ssim(img1, img2, window_size=11, size_average=True): |
|
channel = img1.size(-3) |
|
window = create_window(window_size, channel) |
|
|
|
if img1.is_cuda: |
|
window = window.cuda(img1.get_device()) |
|
window = window.type_as(img1) |
|
|
|
return _ssim(img1, img2, window, window_size, channel, size_average) |
|
|
|
def _ssim(img1, img2, window, window_size, channel, size_average=True): |
|
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) |
|
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) |
|
|
|
mu1_sq = mu1.pow(2) |
|
mu2_sq = mu2.pow(2) |
|
mu1_mu2 = mu1 * mu2 |
|
|
|
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq |
|
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq |
|
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 |
|
|
|
C1 = 0.01 ** 2 |
|
C2 = 0.03 ** 2 |
|
|
|
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) |
|
|
|
if size_average: |
|
return ssim_map.mean() |
|
else: |
|
return ssim_map.mean(1).mean(1).mean(1) |
|
|
|
import torch |
|
import torch.nn as nn |
|
|
|
from taming.modules.losses.vqperceptual import * |
|
|
|
|
|
class LPIPSWithDiscriminator(nn.Module): |
|
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, |
|
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, |
|
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, |
|
disc_loss="hinge"): |
|
|
|
super().__init__() |
|
assert disc_loss in ["hinge", "vanilla"] |
|
self.kl_weight = kl_weight |
|
self.pixel_weight = pixelloss_weight |
|
self.perceptual_loss = LPIPS().eval() |
|
self.perceptual_weight = perceptual_weight |
|
|
|
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) |
|
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, |
|
n_layers=disc_num_layers, |
|
use_actnorm=use_actnorm |
|
).apply(weights_init) |
|
self.discriminator_iter_start = disc_start |
|
self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss |
|
self.disc_factor = disc_factor |
|
self.discriminator_weight = disc_weight |
|
self.disc_conditional = disc_conditional |
|
|
|
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): |
|
if last_layer is not None: |
|
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] |
|
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] |
|
else: |
|
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] |
|
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] |
|
|
|
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) |
|
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() |
|
d_weight = d_weight * self.discriminator_weight |
|
return d_weight |
|
|
|
def forward(self, inputs, reconstructions, optimizer_idx, |
|
global_step, last_layer=None, cond=None, split="train"): |
|
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) |
|
if self.perceptual_weight > 0: |
|
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) |
|
rec_loss = rec_loss + self.perceptual_weight * p_loss |
|
|
|
|
|
|
|
if optimizer_idx == 0: |
|
|
|
logits_fake = self.discriminator(reconstructions.contiguous()) |
|
|
|
g_loss = F.relu(1 - logits_fake).mean() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return g_loss |
|
|
|
if optimizer_idx == 1: |
|
|
|
|
|
logits_real = self.discriminator(inputs.contiguous().detach()) |
|
logits_fake = self.discriminator(reconstructions.contiguous().detach()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
d_loss = self.disc_loss(logits_real, logits_fake) |
|
return d_loss |
|
|
|
import torch |
|
from chamfer_distance import ChamferDistance |
|
|
|
|
|
chamfer_dist_module = ChamferDistance() |
|
|
|
def calculate_chamfer_loss(pred, gt): |
|
""" |
|
计算 Chamfer Distance 损失 |
|
Args: |
|
pred (torch.Tensor): 预测点云,维度为 (batch_size, num_points, 3) |
|
gt (torch.Tensor): 真实点云,维度为 (batch_size, num_points, 3) |
|
chamfer_dist_module (ChamferDistance): 预先初始化的 Chamfer Distance 模块 |
|
|
|
Returns: |
|
torch.Tensor: Chamfer Distance 损失 |
|
""" |
|
|
|
dist1, dist2, idx1, idx2 = chamfer_dist_module(pred, gt) |
|
loss = (torch.mean(dist1) + torch.mean(dist2)) / 2 |
|
|
|
return loss |
|
|
|
if __name__ == "__main__": |
|
|
|
discriminator = LPIPSWithDiscriminator(disc_start=0, disc_weight=0.5) |
|
|
|
|
|
|
|
|