lunde's picture
Adding first version of app
0691d6d
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.vgg import vgg16
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class L_color(nn.Module):
def __init__(self):
super(L_color, self).__init__()
def forward(self, x ):
b,c,h,w = x.shape
mean_rgb = torch.mean(x, [2, 3], keepdim=True)
mr, mg, mb = torch.split(mean_rgb, 1, dim=1)
Drg = torch.pow(mr-mg, 4)
Drb = torch.pow(mr-mb, 4)
Dgb = torch.pow(mb-mg, 4)
return torch.sqrt(Drg + Drb + Dgb)
class L_spa(nn.Module):
def __init__(self):
super(L_spa, self).__init__()
kernel_left = torch.tensor( [[0,0,0], [-1,1,0], [0,0,0]], dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
kernel_right = torch.tensor( [[0,0,0], [0,1,-1], [0,0,0]], dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
kernel_up = torch.tensor( [[0,-1,0], [0,1,0], [0,0,0]], dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
kernel_down = torch.tensor( [[0,0,0], [0,1,0], [0,-1,0]], dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
self.weight_left = nn.Parameter(data=kernel_left, requires_grad=False)
self.weight_right = nn.Parameter(data=kernel_right, requires_grad=False)
self.weight_up = nn.Parameter(data=kernel_up, requires_grad=False)
self.weight_down = nn.Parameter(data=kernel_down, requires_grad=False)
self.pool = nn.AvgPool2d(4)
def forward(self, org , enhance ):
b,c,h,w = org.shape
org_mean = torch.mean(org, 1, keepdim=True)
enhance_mean = torch.mean(enhance, 1, keepdim=True)
org_pool = self.pool(org_mean)
enhance_pool = self.pool(enhance_mean)
D_org_letf = F.conv2d(org_pool , self.weight_left, padding=1)
D_org_right = F.conv2d(org_pool , self.weight_right, padding=1)
D_org_up = F.conv2d(org_pool , self.weight_up, padding=1)
D_org_down = F.conv2d(org_pool , self.weight_down, padding=1)
D_enhance_letf = F.conv2d(enhance_pool , self.weight_left, padding=1)
D_enhance_right = F.conv2d(enhance_pool , self.weight_right, padding=1)
D_enhance_up = F.conv2d(enhance_pool , self.weight_up, padding=1)
D_enhance_down = F.conv2d(enhance_pool , self.weight_down, padding=1)
D_left = torch.pow(D_org_letf - D_enhance_letf,2)
D_right = torch.pow(D_org_right - D_enhance_right,2)
D_up = torch.pow(D_org_up - D_enhance_up,2)
D_down = torch.pow(D_org_down - D_enhance_down,2)
E = (D_left + D_right + D_up +D_down)
# E = 25*(D_left + D_right + D_up +D_down)
return E
class L_exp(nn.Module):
def __init__(self,patch_size):
super(L_exp, self).__init__()
self.pool = nn.AvgPool2d(patch_size)
# self.mean_val = mean_val
def forward(self, x, mean_val):
b,c,h,w = x.shape
x = torch.mean(x,1,keepdim=True)
mean = self.pool(x)
d = torch.mean(torch.pow(mean - torch.tensor([mean_val], dtype=torch.float, device=device).to(), 2))
return d
class L_TV(nn.Module):
def __init__(self,TVLoss_weight=1):
super(L_TV,self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self,x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = (x.size()[2]-1) * x.size()[3]
count_w = x.size()[2] * (x.size()[3] - 1)
h_tv = torch.pow((x[:,:,1:,:] - x[:,:,:h_x-1,:]),2).sum()
w_tv = torch.pow((x[:,:,:,1:] - x[:,:,:,:w_x-1]),2).sum()
return self.TVLoss_weight*2*(h_tv / count_h + w_tv / count_w) / batch_size
class Sa_Loss(nn.Module):
def __init__(self):
super(Sa_Loss, self).__init__()
def forward(self, x ):
b,c,h,w = x.shape
# x_de = x.cpu().detach().numpy()
r,g,b = torch.split(x , 1, dim=1)
mean_rgb = torch.mean(x,[2,3],keepdim=True)
mr,mg, mb = torch.split(mean_rgb, 1, dim=1)
Dr = r-mr
Dg = g-mg
Db = b-mb
k =torch.pow( torch.pow(Dr,2) + torch.pow(Db,2) + torch.pow(Dg,2),0.5)
k = torch.mean(k)
return k
class perception_loss(nn.Module):
def __init__(self):
super(perception_loss, self).__init__()
features = vgg16(pretrained=True).features
self.to_relu_1_2 = nn.Sequential()
self.to_relu_2_2 = nn.Sequential()
self.to_relu_3_3 = nn.Sequential()
self.to_relu_4_3 = nn.Sequential()
for x in range(4):
self.to_relu_1_2.add_module(str(x), features[x])
for x in range(4, 9):
self.to_relu_2_2.add_module(str(x), features[x])
for x in range(9, 16):
self.to_relu_3_3.add_module(str(x), features[x])
for x in range(16, 23):
self.to_relu_4_3.add_module(str(x), features[x])
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h = self.to_relu_1_2(x)
h = self.to_relu_2_2(h)
h = self.to_relu_3_3(h)
h = self.to_relu_4_3(h)
return h