# -*- coding: utf-8 -*-

import numpy as np
import warnings
warnings.filterwarnings("ignore")

def gradient_descent(initial_theta,eta=0.05,n_iters=1000,epslion=1e-8):
    '''
    梯度下降
    :param initial_theta: 参数初始值，类型为float
    :param eta: 学习率，类型为float
    :param n_iters: 训练轮数，类型为int
    :param epslion: 容忍误差范围，类型为float
    :return: 训练后得到的参数
    '''
    #   请在此添加实现代码   #
    #********** Begin *********#
    # 初始化参数
    theta = initial_theta
    iter_num = 0

    
    def gradient(theta):
        return 2*(theta-3)  

    # 梯度下降主循环
    while iter_num < n_iters:
        # 计算当前参数的梯度
        grad = gradient(theta)
        
        
        theta_new = theta - eta * grad
        
        
        if np.abs(theta_new - theta) < epslion:  # 使用绝对值判断
            break
        
        # 更新参数值
        theta = theta_new
        iter_num += 1

    return theta
    #********** End **********#