import numpy as np

class LogisticRegression:
    def __init__(self, lr=0.01, epochs=1000, batch_size=32):
        """
        初始化逻辑回归模型的超参数
        参数:
        lr (float): 学习率
        epochs (int): 训练轮数
        batch_size (int): 批次大小
        """
        self.lr = lr
        self.epochs = epochs
        self.batch_size = batch_size
        self.w = None
        self.b = None

    def softmax(self, z):
        """
        Softmax 激活函数，将线性输出映射到概率分布
        """
        exp_z = np.exp(z - np.max(z, axis=1, keepdims=True))
        return exp_z / np.sum(exp_z, axis=1, keepdims=True)

    def fit(self, X, y):
        """
        训练逻辑回归模型
        参数:
        X (ndarray): 训练数据集，形状为 (n_samples, n_features)
        y (ndarray): 训练标签，形状为 (n_samples,)
        """
        m, n = X.shape  # 获取数据的维度
        k = len(np.unique(y))  # 获取类别数
        self.w = np.zeros((n, k))  # 初始化权重
        self.b = np.zeros(k)  # 初始化偏置

        # 将标签转换为one-hot编码
        y_onehot = np.zeros((m, k))
        for i, label in enumerate(y):
            y_onehot[i, label] = 1

        # 梯度下降过程
        for epoch in range(self.epochs):
            # 随机打乱数据
            permutation = np.random.permutation(m)
            X = X[permutation]
            y_onehot = y_onehot[permutation]

            for i in range(0, m, self.batch_size):
                # 获取当前批次数据
                X_batch = X[i:i + self.batch_size]
                y_batch = y_onehot[i:i + self.batch_size]

                # 计算线性模型输出
                z = np.dot(X_batch, self.w) + self.b
                predictions = self.softmax(z)

                # 计算损失 (交叉熵损失)
                loss = -np.mean(np.sum(y_batch * np.log(predictions), axis=1))

                # 计算梯度
                dw = np.dot(X_batch.T, (predictions - y_batch)) / self.batch_size
                db = np.sum(predictions - y_batch, axis=0) / self.batch_size

                # 更新权重和偏置
                self.w -= self.lr * dw
                self.b -= self.lr * db

            if epoch % 100 == 0:
                print(f"Epoch {epoch}, Loss: {loss}")

    def predict(self, X):
        """
        使用训练好的模型进行预测
        参数:
        X (ndarray): 输入数据，形状为 (n_samples, n_features)

        返回:
        ndarray: 预测结果（类别标签）
        """
        z = np.dot(X, self.w) + self.b
        prob = self.softmax(z)
        return np.argmax(prob, axis=1)

    def score(self, X, y):
        """
        计算模型的准确率
        参数:
        X (ndarray): 测试数据集，形状为 (n_samples, n_features)
        y (ndarray): 测试标签，形状为 (n_samples,)

        返回:
        float: 准确率
        """
        y_pred = self.predict(X)
        return np.mean(y_pred == y)
