import numpy as np

'''
全量梯度下降
'''
def grad_desc(train_data, train_labels):
    """梯度下降"""
    data_mat = np.matrix(train_data)# 转成矩阵 7*2
    label_mat = np.matrix(train_labels).transpose()# 转成矩阵7*1
    n = np.shape(data_mat)[1]# 获取参数的个数 w的个数
    # 步长
    alpha = 0.001
    # 最大循环次数
    max_cycles = 100
    # 初始化回归系数weights
    weights = np.ones((n, 1))
    for index in range(max_cycles):
        h = data_mat * weights-label_mat# wx-y 求出误差 7*2 * 2*1 -7*1 =7*1
        weights = weights - alpha * data_mat.transpose() * h# 2*1 - 0.01*2*7*7*1 公式就是w-
        # 返回压平的系数数组
    return np.asarray(weights).flatten()


'''
随机梯度下降
'''
def advanced_random_grad_desc(train_data, train_labels):
    """随机梯度下降改进"""
    data_mat = np.asarray(train_data)
    label_mat = np.asarray(train_labels)
    m, n = np.shape(data_mat)# 获取x的行列
    # 步长
    alpha = 0.001
    # 初始化回归系数weights
    weights = np.ones(n)# 初始化矩阵为1 w的参数的长度
    max_cycles = 500
    for j in range(max_cycles):
        data_index = list(range(m))
        for i in range(m):
            random_index = int(np.random.uniform(0, len(data_index)))# 均匀分布
            h = sum(data_mat[random_index] * weights)-label_mat[random_index]# 计算差值 1*3 * 3*1 -1*1
            weights = weights - alpha * h * data_mat[random_index]# 1*3 -1*1 *1*3
            del data_index[random_index]
    return weights

if __name__ == '__main__':
    train_data = [[1,2,3],[1,8,1],[1,7,4],[1,5,6],[1,4,3],[1,2,9],[1,1,7]]
    train_labels = [25,41,53,55,34,58,43]
    R = grad_desc(train_data,train_labels)
    print(R)
    r = advanced_random_grad_desc(train_data,train_labels)
    print(r)
    print(np.eye(2))
