import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error

# 梯度下降
# 默认学习率是0.01，训练次数是100次
def logistic_grad(X, y, sample_size, learning_rate=0.01, num_iterations=100):
    # 初始化权重
    weights = np.zeros(X.shape[1])

    # 梯度下降
    for i in range(num_iterations):
        # 线性模型算的y值，y = w^t * x + b
        linear_model = np.dot(X, weights)
        #print(f"第{i}次迭代，线性模型值：{linear_model}")
        # 通过sigmoid函数映射到0-1之间
        y_predicted = sigmoid(linear_model)
        # print(f"第{i}次迭代，预测值：{y_predicted}")

        # 计算梯度，方法1,训练次数需要2000次
        gradient = np.dot(X.T, (y_predicted - y)) / sample_size
        # 计算梯度，方法2，均方误差,训练次数只需要100次
        t = y_predicted - y
        gradient = np.dot(X.T, (np.sign(t)-0.5)*2* (t**2)) / sample_size
        weights -= learning_rate * gradient

    return weights

def sigmoid(z):
    return 1 / (1 + np.exp(-z))

def predict(weights, X):
    linear_model = np.dot(X, weights)
    y_pre = sigmoid(linear_model)
    y_pre = [1 if i >= 0.5 else 0 for i in y_pre]
    return y_pre

def accuracy(y_true, y_pred):
    correct = np.sum(y_true == y_pred)
    return correct / len(y_true)

# 牛顿法
# 默认训练次数100次
def logistic_newton(X, y, sample_size): 
    m = X.shape[0]
    n = X.shape[1]
    # 初始化权重
    weights = np.zeros(n)

    # 训练
    for i in range(100):
        # 线性模型算的y值，y = w^t * x + b
        linear_model = np.dot(X, weights)
        # 通过sigmoid函数映射到0-1之间
        y_predicted = sigmoid(linear_model)
        # 计算梯度和海森矩阵
        gradient = np.dot(X.T, (y_predicted - y)) / m
        hessian = np.dot(X.T, (y_predicted * (1 - y_predicted))[:, np.newaxis] * X) / m
        weights -= np.linalg.inv(hessian).dot(gradient)
    return weights

def main():
    # 数据准备
    # Integr_data
    X = np.array([
        [0, 0, 0, 0, 0, 0],  
        [1, 0, 1, 0, 0, 0],  
        [1, 0, 0, 0, 0, 0],  
        [0, 0, 1, 0, 0, 0],  
        [2, 0, 0, 0, 0, 0],  
        [0, 1, 0, 0, 1, 1],  
        [1, 1, 0, 1, 1, 1],  
        [1, 1, 0, 0, 1, 0],  
        [1, 1, 1, 1, 1, 0],  
        [0, 2, 2, 0, 2, 1],  
        [2, 2, 2, 2, 2, 0],  
        [2, 0, 0, 2, 2, 1],  
        [0, 1, 0, 1, 0, 0],  
        [2, 1, 1, 1, 0, 0],  
        [1, 1, 0, 0, 1, 1],  
        [2, 0, 0, 2, 2, 0],  
        [0, 0, 1, 1, 1, 0]   
    ])
    y = np.array([1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0])
    # feature_names = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感']
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    
    # 梯度下降法， 模型训练
    weights_grad = logistic_grad(X_train, y_train, X_train.shape[0])
    print(f"梯度下降法-模型参数: {weights_grad}")
    # 预测
    y_pred_grad = predict(weights_grad, X_test)
    score_grad = accuracy(y_test, y_pred_grad)
    print(f"梯度下降法-模型准确率: {score_grad}")

    # 牛顿法，模型训练
    weights_newton = logistic_newton(X_train, y_train, X_train.shape[0])
    print(f"牛顿法-模型参数: {weights_newton}")
    # 预测
    y_pred_newton = predict(weights_newton, X_test)
    score_newton = accuracy(y_test, y_pred_newton)
    print(f"牛顿法-模型准确率: {score_newton}")

if __name__ == "__main__":
    main()