import torchvision
import torch
from torch import nn

# 其实如果按照下面注释了的代码，也可以返回中间结果，但是我觉得在效率上没有什么提升，就不如直接
# 删减原网络
# INNER_TENSOR = None

# def get_inner_tensor_for_losses(model, num, x):
    
#     def get_features_hook(self, input, output):
#         global INNER_TENSOR
#         INNER_TENSOR = output.data.cpu().numpy()
#     handle = model.features[num].register_forward_hook(get_features_hook)
#     output = model(x)
#     return INNER_TENSOR


def compute_loss(y_pred, y_data):
    critetion = nn.CrossEntropyLoss()
    loss = critetion(y_pred, y_data)
    return loss



# def get_removed_vgg(vgg16, num):
#     vgg_new = nn.Sequential(*list(vgg16.children())[0][:num])
#     for p in vgg_new.parameters():
#         p.requires_grad = True
#     return vgg_new

# def get_inner_tensor(vgg16, num, x):
#     vgg_new = get_removed_vgg(vgg16, num)
#     inner_tensor = vgg_new(x)
#     return inner_tensor

# def compute_perceptual_loss(vgg16, y, yc, num = 16):
#     y_feature_maps = get_inner_tensor(vgg16, num, y)
#     batch_size = y_feature_maps.size()[0]
#     yc_feature_maps = get_inner_tensor(vgg16, num, yc)
#     # yc_feature_maps_repeated = yc_feature_maps.repeat(batch_size, 1, 1, 1)
#     criterion = nn.MSELoss()
#     loss = criterion(y_feature_maps, yc_feature_maps)
#     return loss

# def compute_style_loss(vgg16, y, ys, nums):
#     losses = []
#     for num in nums:
#         y_feature_map = get_inner_tensor(vgg16, num, y)
#         ys_feature_map = get_inner_tensor(vgg16, num, ys)
#         n_channel = ys_feature_map.size()[1]
#         height = ys_feature_map.size()[2]
#         width = ys_feature_map.size()[3]
#         batch_size = y_feature_map.size()[0]
#         ys_reshaped = torch.reshape(ys_feature_map, shape = (n_channel, height * width))
#         gram_ys = torch.matmul(ys_reshaped, ys_reshaped.T) / (n_channel * height * width)
#         criterion = nn.MSELoss(reduction = 'sum')
#         for batch in range(batch_size):
#             y_reshaped = torch.reshape(y_feature_map[batch], shape = (n_channel, height * width))
#             gram_y = torch.matmul(y_reshaped, y_reshaped.T) / (n_channel * height * width)
#             loss = criterion(gram_y, gram_ys).sqrt() / batch_size
#             losses.append(loss)
#     return losses

    
