import torch
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(12345)
class predictor(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(predictor, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)
        # self.relu2 = nn.ReLU()
        # self.fc3 = nn.Linear(hidden_size, output_size)
    def forward(self, x):
        x = self.fc1(x)
        x = self.relu1(x)
        x = self.fc2(x)
        # x = self.relu2(x)
        # x = self.fc3(x)
        return x
if __name__ == '__main__':
    # ipt = torch.tensor([[39., 0., 77516., 1., 13., 2., 3., 4., 5., 6., 0., 40., 7., 8.]])
    # ipt = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0., 0.1, 0.2, 0.3]])
    ipt = torch.randn((1, 14)).requires_grad_(True)
    # lbl = torch.tensor([[2174.]])
    # lbl = torch.tensor([[0.7]])
    lbl = torch.randn((1, 1)).requires_grad_(True)
    model = predictor(input_size=14, hidden_size=32, output_size=1)
    criterion = nn.MSELoss()
    opt = model(ipt)
    loss = criterion(opt, lbl)
    print(loss)
    # loss.backward()
    # 我想在这里获取代价函数相对输出结果的梯度
    dy_dx = torch.autograd.grad(loss, model.parameters())
    original_dy_dx = list((_.detach().clone() for _ in dy_dx))
    print(dy_dx)
    # 这里我想不利用之前的代价函数，通过梯度来获取代价函数
    cal_loss = dy_dx[-1].detach().clone()[0]
    cal_loss.requires_grad_(True)
    print(cal_loss)
    dummy_data = torch.randn(ipt.size()).requires_grad_(True)
    dummy_label = torch.randn(lbl.size()).requires_grad_(True)
    optimizer = optim.LBFGS([dummy_data, dummy_label], lr=0.1)
    for iters in range(1500):
        def closure():
            optimizer.zero_grad()
            dummy_pred = model(dummy_data)
            dummy_loss = criterion(dummy_pred, dummy_label)
            dummy_dy_dx = torch.autograd.grad(dummy_loss, model.parameters(), create_graph=True)
            # grad_diff = (dummy_loss - cal_loss) ** 2
            grad_diff = 0
            for i in range(len(dummy_dy_dx)):
                grad_diff += ((dummy_dy_dx[i] - original_dy_dx[i]) ** 2).sum()
            # for gx, gy in zip(dummy_dy_dx, original_dy_dx):
            #     grad_diff += ((gx - gy) ** 2).sum()
            grad_diff.backward()
            return grad_diff
        optimizer.step(closure)
        if iters % 10 == 0:
            current_loss = closure()
            print(current_loss)
            print(iters, "%.4f" % current_loss.item())
    print(ipt)
    print(dummy_data)
    print(lbl)
    print(dummy_label)

