import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.preprocessing import scale
from sklearn.linear_model import LogisticRegression

data = load_breast_cancer()
X, y = scale(data['data'][:, :2]), data['target'].reshape(-1, 1)
X = np.c_[np.ones(len(X)), X[:, :2]]


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


# 模仿全量梯度下降
n_iterations = 50000
learn_rate = 0.01
theta1 = np.zeros((3, 1))
n_samples = len(y)

for _ in range(n_iterations):
    g = X.T.dot(sigmoid(X.dot(theta1)) - y)
    theta1 -= learn_rate * g
print("my1:", theta1)
# 模仿随机梯度下降
batch = 100
theta2 = np.zeros((3, 1))
for _ in range(50000):
    rand_index = np.random.randint(len(X))
    X_, y_ = X[rand_index:rand_index + 1], y[rand_index:rand_index + 1]
    g = X_.T.dot(sigmoid(X_.dot(theta2)) - y_)  # 仅用当前样本计算梯度
    theta2 -= learn_rate * g
print("my2:", theta2)
# 模仿小批量梯度下降
batch_size = 10
theta3 = np.zeros((3, 1))
for _ in range(50000):
    rand_index = np.random.randint(len(X) - batch_size)
    X_batch, y_batch = X[rand_index:rand_index + batch_size], y[rand_index:rand_index + batch_size]
    g = X_batch.T.dot(sigmoid(X_batch.dot(theta3)) - y_batch) / batch_size  # 小批量平均梯度
    theta3 -= learn_rate * g
print("my3:", theta3)

# 逻辑回归模型包
# 对比Scikit-learn
lr = LogisticRegression(fit_intercept=False, max_iter=100000, tol=1e-8)
lr.fit(X, y.ravel())
print("Scikit-learn参数:", lr.intercept_, lr.coef_)
