import torch as t
from torch import nn, optim
from thop import profile
import torchsummary
from torch.utils.tensorboard import SummaryWriter
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
class test_net(nn.Module):
    def __init__(self):
        super(test_net, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=0),
            nn.MaxPool2d(2),
            nn.Conv2d(16, 64, kernel_size=3, stride=1, padding=0),
            nn.MaxPool2d(2),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=0))
        self.flat = nn.Flatten()
        self.classify = nn.Sequential(
            nn.Linear(2048, 256),
            nn.ReLU(),
            nn.Linear(256, 64),
            nn.ReLU(),
            nn.Linear(64, 2)
        )

    def forward(self, x):
        x = self.features(x)
        x = self.flat(x)
        x = self.classify(x)
        return x



def get_threshold(model, loss):
    for layer in model.modules():
        if isinstance(layer, nn.Conv2d):
            # print(layer.weight.data)
            hessians_list = []
            grads = t.autograd.grad(loss, layer.weight, retain_graph=True, create_graph=True)
            # for grad in grads:
            #     print(grad.size())

            # for grad in grads:
            #     hessians_grad = t.autograd.grad(grad, layer.weight, retain_graph=True, create_graph=True)
            #     print(hessians_grad)
                # for hessians in hessians_list:
                #     # print(hessians)
                #     layer.weight.data = 1/2 * t.sqrt(layer.weight.data) * hessians
            # print(layer.weight.data)
            # for i in range(len(hessians_list)):
            #     layer.weight.data = hessians_list[i] * layer.weight.data

    return 0

def get_l1(model):
    l1_list = []
    for layer in model.modules():
        if isinstance(layer, nn.Conv2d):
            l1_list.append(t.sum(t.pow(layer.weight.data, 2)))
    plt.plot([1, 2, 3], l1_list)
    plt.show()
    print(l1_list)

    return l1_list

def get_mask(data, threshold):
    mask = t.abs(data) > threshold
    return mask

if __name__ == '__main__':
    inputs = t.rand(1, 1, 32, 32)
    model = test_net()
    # if os.path.exists('log1'):
    #     shutil.rmtree('log1')
    # writer = SummaryWriter('log1')
    # writer.add_graph(model=model, input_to_model=inputs)
    # outputs = model(inputs)
    # flops, params = profile(model, (inputs, ))  # 获得计算量和参数量
    # print(flops)
    # print(params)
    # torchsummary.summary(model=model, input_size=(1, 32, 32), device='cpu')
    # print(outputs)
    labels = t.tensor([[0., 1.]])
    outputs = model(inputs)
    # print(outputs)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    loss = criterion(outputs, labels)
    threshold = get_threshold(model, loss)
    loss.backward()
    li_list = get_l1(model)
    # threshold = get_threshold(model, loss)
    for layer in model.modules():
        if isinstance(layer, nn.Conv2d):
            pass
    #         print("loss grad", layer.weight.grad, layer.weight.grad.size())
    #         mask = get_mask(layer.weight.data, 1e-5)
    #         pruner_rate = t.sum(mask)/ t.numel(layer.weight.data)
    #         layer.weight.data = layer.weight.data * mask
            # print('weight', layer.weight.data)
            # print('pruner_rate', pruner_rate)



