import torch
import sys
sys.path.append("..")  # 将上级目录添加到模块搜索路径中
from GradientDescent import GradientDescent  # 导入 GradientDescent 类
import matplotlib.pyplot as plt
import yaml
sys.path.append(r'C:\Users\Administrator\Desktop\programm\python\最优化\optmize\UnconstrainedAlg')
from LineSearch import LineSearch
import numpy as np

def set_line_search(line_search,max_search_iter,f,grad,x,d):
    Search = LineSearch(max_search_iter)
    if line_search == "armijo":
        new_lr = Search.armijo(f,grad,x,d)
    if line_search == "goldstein":
        new_lr = Search.goldstein(f,grad,x,d)
    if line_search == "wolf":
        new_lr = Search.wolf(f,grad,x,d)
    return new_lr

def gradient_descent(function, x, params):
    # 创建一个 GradientDescentWithHistory 实例，并传递参数
    optimizer = GradientDescent([x],lr = 0.1)
    max_iter = params['max_decent_iter']
    line_search = params['line_search']
    max_search_iter = params['max_search_iter']
    history = []
    precision = params['precision']
    
    # 执行梯度下降算法，迭代 max_iter 次
    for i in range(max_iter):
        loss = function(x)     # 计算损失函数值
        history.append(loss.data.numpy())
        loss.backward()        # 计算梯度
        gradient = x.grad.data
        d = -gradient
        print('torch.dot(gradient, d)',torch.dot(gradient, d))
        # 非精确线搜索
        if line_search:
            new_lr = set_line_search(line_search, max_search_iter, function, gradient, x.detach(),d)

            print(">>>>new_lr<<<<<<",new_lr)
            optimizer.set_lr(0.5)  # 更新学习率

        optimizer.step()      

        print("=================Iteration {}, f(x) = {} =================\n".format(i+1, function(x).item()))
        # print("torch.norm(x.grad) \n",torch.norm(gradient))
        print("x = {} \n".format(x.detach().numpy()))
        print("(x.grad norm) ------->",torch.norm(x.grad))

        if torch.norm(x.grad) < precision:
            print(torch.norm(x.grad))
            print("Gradient descent converged at iteration:", i+1)
            break

        x.grad.zero_() # 梯度清零
        #检查梯度的大小是否小于指定精度


    # 绘制函数值历史记录的曲线图
    def plot_history(history):
        plt.plot(history)
        plt.xlabel('Iteration')
        plt.ylabel('Function Value')
        plt.title('Function Value vs Iteration')
        plt.grid(True)
        plt.show()
    plot_history(history)


###################2.函数########################
# 定义向量函数,错误的定义函数会导致梯度不变
def function(x):
    y = torch.tensor([-7.0, -2.0, -3.0], requires_grad=False)
    return torch.sum((x + y) ** 2) + torch.dot(x,y)

if __name__ == "__main__":
    # 读取 YAML 文件中的参数
    with open('params.yml', 'r') as f:
        params = yaml.safe_load(f)
    print(params)

    x = torch.tensor([1.0, 5.0, 7.0], requires_grad=True)
    gradient_descent(function, x, params)