import torch
from torch import nn
from torchvision.models.vgg import vgg16
from models import blocks as B
import torch.nn.functional as F

class build_tv_loss(nn.Module):
    def __init__(self, tv_loss_weight=1):
        super(build_tv_loss, self).__init__()
        self.tv_loss_weight = tv_loss_weight

    def forward(self, x):
        batch_size = x.size()[0]
        h_x = x.size()[2]
        w_x = x.size()[3]
        count_h = self.tensor_size(x[:, :, 1:, :])
        count_w = self.tensor_size(x[:, :, :, 1:])
        h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
        w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
        return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size

    @staticmethod
    def tensor_size(t):
        return t.size()[1] * t.size()[2] * t.size()[3]

class build_sharp_loss(nn.Module):
    def __init__(self):
        super(build_sharp_loss,self).__init__()
        kernel_v = [[0, -1, 0],
                    [0, 0, 0],
                    [0, 1, 0]]
        kernel_h = [[0, 0, 0],
                    [-1, 0, 1],
                    [0, 0, 0]]
        kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
        kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
        self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False).cuda()
        self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False).cuda()
    def forward(self,x):
        x0 = x[:, 0]
        x1 = x[:, 1]
        x2 = x[:, 2]
        x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=1)
        x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=1)

        x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=1)
        x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=1)

        x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=1)
        x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=1)

        return torch.mean(-((torch.mul(x0_v,x0_v)+torch.mul(x0_h,x0_h))+(torch.mul(x1_v,x1_v)+torch.mul(x1_h,x1_h))+(torch.mul(x2_v,x2_v)+torch.mul(x2_h,x2_h))))

class build_forward_loss(nn.Module):
    def __init__(self):
        super(build_forward_loss,self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss()
        self.tv_loss = build_tv_loss()
        self.sharp_loss = build_sharp_loss()

    def forward(self,hr,sr):
        perception_loss = self.mse_loss(self.loss_network(hr),self.loss_network(sr))
        image_loss = self.mse_loss(hr,sr)
        tv_loss = self.tv_loss(sr)
        sharp_loss = self.sharp_loss(sr)

        return image_loss+0.006*perception_loss+2e-8*tv_loss+0.006*sharp_loss


class build_feedback_loss(nn.Module):
    def __init__(self):
        super(build_feedback_loss, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss()
        self.tv_loss = build_tv_loss()
        self.gradient_loss = B.GetGradientNopadding()

    def forward(self, hr, sr):
        image_loss = self.mse_loss(hr, sr)
        gradient_loss = self.mse_loss(self.gradient_loss(hr),self.gradient_loss(sr))

        return image_loss + 0.006 * gradient_loss

if __name__ == "__main__":
    g_loss = build_loss()
    print(g_loss)