"""
Newton法极小化下面的函数
函数 f(x)=0.5*(X^(T)X)+0.25(X^(T)AX).^2
梯度 g(x)=X + X^(T)AXAX
hessian G(x)=I + 2AXAX + X^(T)AXA
A=[[5,1,0,0.5],[1,4,0.5,0],[0,0.5,3,0],[0.5,0,0,2]]
X0=[1,1,0,0]
"""

import numpy as np
import matplotlib.pyplot as plt
import torch

X = torch.tensor([1, 1, 0, 0])
A = torch.tensor([[5, 1, 0, 0.5], [1, 4, 0.5, 0], [0, 0.5, 3, 0], [0.5, 0, 0, 2]])
y = 0.5 * X.t() * X + 0.25 * (X.t() * A * X).pow(2)
# Print = torch.tensor([])
# grad = torch.autograd.grad(y, X, retain_graph=True, create_graph=True)
# for anygrad in grad[0]:  # torch.autograd.grad 返回的是元组
#     Print = torch.cat((Print, torch.autograd.grad(anygrad, X, retain_graph=True)[0]))
# print(Print.view(X.size()[0], -1))


def gradient(x):
    return x + x.t()*A*x*A*x


def hessian(x):
    return np.eye() + 2*A*x*A*x + x.t()*A*x*A


def newton(x0):

    print('初始点为:')
    print(X, '\n')
    W=np.zeros((2, 10**3))
    i = 1
    imax = 1000
    W[:,0] = x0
    x = x0
    delta = 1
    alpha = 1

    while i<imax and delta>10**(-5):
        p = -np.dot(np.linalg.inv(hessian(x)), gradient(x))
        x0 = x
        x = x + alpha*p
        W[:,i] = x
        delta = sum((x-x0)**2)
        print('第',i,'次迭代结果:')
        print(x,'\n')
        i=i+1
    W=W[:,0:i]  # 记录迭代点
    return W

x0 = np.array([-1.2,1])
W=newton(x0)

plt.plot(W[0,:],W[1,:],'g*',W[0,:],W[1,:]) # 画出迭代点收敛的轨迹
plt.show()