import torchvision
import torch
from torch import nn

# 其实如果按照下面注释了的代码，也可以返回中间结果，但是我觉得在效率上没有什么提升，就不如直接
# 删减原网络
# INNER_TENSOR = None

# def get_inner_tensor_for_losses(model, num, x):
    
#     def get_features_hook(self, input, output):
#         global INNER_TENSOR
#         INNER_TENSOR = output.data.cpu().numpy()
#     handle = model.features[num].register_forward_hook(get_features_hook)
#     output = model(x)
#     return INNER_TENSOR



def get_removed_vgg(vgg16, num):
    vgg_new = nn.Sequential(*list(vgg16.children())[0][:num])
    for p in vgg_new.parameters():
        p.requires_grad = True
    return vgg_new

def get_inner_tensor(vgg16, num, x):
    vgg_new = get_removed_vgg(vgg16, num)
    inner_tensor = vgg_new(x)
    return inner_tensor

def compute_perceptual_loss(vgg16, y, yc, num = 16):
    y_feature_maps = get_inner_tensor(vgg16, num, y)
    yc_feature_maps = get_inner_tensor(vgg16, num, yc)
    criterion = nn.MSELoss()
    loss = criterion(y_feature_maps, yc_feature_maps)
    return loss

def compute_style_loss(vgg16, y, ys, nums):
    losses = []
    for num in nums:
        y_feature_map = get_inner_tensor(vgg16, num, y)
        ys_feature_map = get_inner_tensor(vgg16, num, ys)
        n_channel = ys_feature_map.size()[1]
        height = ys_feature_map.size()[2]
        width = ys_feature_map.size()[3]
        y_reshaped = torch.reshape(y_feature_map, shape = (n_channel, height * width))
        gram_y = torch.matmul(y_reshaped, y_reshaped.T) / (n_channel * height * width)
        ys_reshaped = torch.reshape(ys_feature_map, shape = (n_channel, height * width))
        gram_ys = torch.matmul(ys_reshaped, ys_reshaped.T) / (n_channel * height * width)
        criterion = nn.MSELoss(reduction = 'sum')
        loss = criterion(gram_y, gram_ys).sqrt()
        losses.append(loss)
    return losses

    
