import numpy as np

class MyLogisticRegression:
    def __init__(self, learn_rate=0.01, max_iter=1000,
                 solver="gddc",penalty="l2",C=1.0,tol=1e-4):
        self.w_ = None
        self.b_ = None
        self.losses_ = None
        self.learn_rate = learn_rate  # 学习率
        self.solver = solver  # 优化器
        self.max_iter = max_iter  # 最大迭代次数
        self.penalty = penalty  # 正则化类型
        self.C = C  # 正则化强度（值越小，正则化越强）
        self.tol = tol  # 损失变化的容忍度(早停条件)
        self.grad_w = None



    def sigmoid(self, z):
        return 1 / (1 + np.exp(-z))

    # 计算损失（交叉熵 + 正则化）
    def loss(self, X, y):
        z = np.dot(X, self.w_) + self.b_
        f_x = self.sigmoid(z)
        loss = -np.mean(y * np.log(f_x + 1e-10) + (1 - y) * np.log(1 - f_x + 1e-10))

        # 添加正则化项
        if self.penalty == 'l2':
            reg_term = (1 / (2 * self.C)) * np.sum(self.w_ ** 2)
        elif self.penalty == 'l1':
            reg_term = (1 / self.C) * np.sum(np.abs(self.w_))
        else:
            reg_term = 0

        return loss + reg_term

    def gradient(self, X, y):
        # 参数准备
        z = np.dot(X, self.w_) + self.b_
        f_x = self.sigmoid(z)
        error = f_x - y
        m = X.shape[0]

        # 梯度计算
        grad_w = (1 / m) * np.dot(X.T, error)
        grad_b = np.mean(error)

        # 添加正则化项的梯度
        if self.penalty == 'l2':
            grad_w += (1 / self.C) * self.w_
        elif self.penalty == 'l1':
            grad_w += (1 / self.C) * np.sign(self.w_)

        return grad_w,grad_b

    def fit(self, X, y):
        # 初始化参数
        n_features = X.shape[1]
        self.w_ = np.zeros(n_features).reshape(-1, 1)
        self.b_ = 0.0
        self.losses_ = []  # 计算损失变化

        # 梯度下降
        if self.solver == 'gddc':
            for i in range(self.max_iter):
                grad_w, grad_b = self.gradient(X, y)

                # 更新参数
                self.w_ -= self.learn_rate * grad_w
                self.b_ -= self.learn_rate * grad_b

                # 计算损失并检查收敛
                cuurent_loss = self.loss(X, y)
                self.losses_.append(cuurent_loss)

                # 最新和次新损失降值
                if i > 0 and abs(self.losses_[-1] - self.losses_[-2]) < self.tol:
                    print(f'梯度下降结束，下降了{i}次')
                    break

    # 概率预测
    def predict_proba(self, X):
        z = np.dot(X, self.w_) + self.b_
        return self.sigmoid(z).reshape(-1, 1)  # 返回列向量

    # 类别预测（默认阈值0.5）
    def predict(self, X, threshold=0.5):
        proba = self.predict_proba(X)
        return (proba >= threshold).astype(int).flatten() # 转换为0/1.展平为1维数组

    # 计算准确率
    def score(self, X, y):
        y_pred = self.predict(X)
        return np.mean(y == y_pred)

# elif self.solver == 'lgfbs'
# def lbfgs_optimize(grad_func, initial_theta, m=5, max_iter=100, epsilon=1e-5):
#     theta = initial_theta.copy()
#     n = theta.shape[0]
#     history = []  # 存储历史 (Δθ, Δg)
#
#     for k in range(max_iter):
#         g = grad_func(theta)
#         if np.linalg.norm(g) < epsilon:
#             break
#
#         # 构造搜索方向 d = -H_k * g
#         d = -two_loop_recursion(g, history)
#
#         # 线搜索（简化为固定步长）
#         alpha = 0.01
#         theta_new = theta + alpha * d
#         g_new = grad_func(theta_new)
#
#         # 更新历史
#         delta_theta = theta_new - theta
#         delta_g = g_new - g
#         history.append((delta_theta, delta_g))
#         if len(history) > m:
#             history.pop(0)
#
#         theta = theta_new
#     return theta
#
#
# def two_loop_recursion(g, history):
#     """L-BFGS 双循环递归算法构造搜索方向"""
#     q = g.copy()
#     alpha_list = []
#     for delta_theta, delta_g in reversed(history):
#         rho = 1.0 / np.dot(delta_g, delta_theta)
#         alpha = rho * np.dot(delta_theta, q)
#         q -= alpha * delta_g
#         alpha_list.append(alpha)
#
#     # 初始 H0 设为单位矩阵
#     r = q.copy()
#     for delta_theta, delta_g in history:
#         rho = 1.0 / np.dot(delta_g, delta_theta)
#         beta = rho * np.dot(delta_g, r)
#         r += delta_theta * (alpha_list.pop() - beta)
#     return r
#
#
# return self