import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score


# 1. 激活函数实现
def sigmoid(x):
    """Sigmoid激活函数：将输入映射到(0,1)区间，用于二分类输出"""
    return 1 / (1 + np.exp(-x))
def tanh(x):
    """Tanh激活函数：将输入映射到(-1,1)区间，常用于隐藏层"""
    return np.tanh(x)

def relu(x):
    """ReLU激活函数：max(0, x)，常用于隐藏层"""
    return np.maximum(0, x)


# 2. 交叉熵损失函数（二分类）
def binary_cross_entropy(y_true, y_pred, epsilon=1e-10):
    """
    计算二分类交叉熵损失
    Args:
        y_true: 真实标签，形状(n_samples,)，值为0或1
        y_pred: 预测概率，形状(n_samples,)，值在[0,1]区间
        epsilon: 防止log(0)的微小值
    Returns:
        平均损失值
    """
    # 限制预测值范围，避免log(0)或log(1)导致数值问题
    y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
    # 交叉熵公式：-mean(y_true*log(y_pred) + (1-y_true)*log(1-y_pred))
    loss = -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
    return loss


# 3. 感知器模型（单个神经元，无激活函数时为线性分类器）
class Perceptron:
    def __init__(self, learning_rate=0.01, epochs=100):
        self.learning_rate = learning_rate  # 学习率
        self.epochs = epochs  # 训练轮次
        self.weights = None  # 权重
        self.bias = None  # 偏置

    def fit(self, X, y):
        """
        训练感知器（使用MSE损失的梯度下降）
        Args:
            X: 输入特征，形状(n_samples, n_features)
            y: 标签，形状(n_samples,)，值为0或1
        """
        n_samples, n_features = X.shape
        # 初始化权重和偏置
        self.weights = np.zeros(n_features)
        self.bias = 0

        for _ in range(self.epochs):
            # 前向传播：z = w·X + b
            linear_output = np.dot(X, self.weights) + self.bias
            
            # 计算误差（MSE损失的梯度方向）
            error = y - linear_output
            
            # 权重更新：w = w + lr * X.T · error / n_samples
            self.weights += self.learning_rate * np.dot(X.T, error) / n_samples
            # 偏置更新：b = b + lr * mean(error)
            self.bias += self.learning_rate * np.mean(error)

    def predict(self, X):
        """感知器预测（输出线性结果，无激活）"""
        return np.dot(X, self.weights) + self.bias


# 4. 逻辑回归模型（感知器+Sigmoid激活，用于二分类）
class LogisticRegression:
    def __init__(self, learning_rate=0.01, epochs=1000):
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.weights = None
        self.bias = None

    def fit(self, X, y):
        """
        训练逻辑回归模型（使用交叉熵损失的梯度下降）
        Args:
            X: 输入特征，形状(n_samples, n_features)
            y: 标签，形状(n_samples,)，值为0或1
        """
        n_samples, n_features = X.shape
        self.weights = np.zeros(n_features)
        self.bias = 0

        for _ in range(self.epochs):
            # 前向传播
            linear_output = np.dot(X, self.weights) + self.bias  # z = w·X + b
            y_pred = sigmoid(linear_output)  # 应用sigmoid得到概率

            # 计算梯度（交叉熵损失对w和b的导数）
            dw = (1 / n_samples) * np.dot(X.T, (y_pred - y))  # 权重梯度
            db = (1 / n_samples) * np.sum(y_pred - y)  # 偏置梯度

            # 梯度下降更新
            self.weights -= self.learning_rate * dw
            self.bias -= self.learning_rate * db

    def predict_proba(self, X):
        """预测正类概率"""
        linear_output = np.dot(X, self.weights) + self.bias
        return sigmoid(linear_output)

    def predict(self, X, threshold=0.5):
        """预测类别（基于阈值划分）"""
        proba = self.predict_proba(X)
        return (proba >= threshold).astype(int)


# 测试代码
if __name__ == "__main__":
    # 生成二分类数据集
    X, y = make_classification(
        n_samples=1000, n_features=5, n_informative=3, random_state=42
    )
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 测试逻辑回归模型
    lr = LogisticRegression(learning_rate=0.01, epochs=5000)
    lr.fit(X_train, y_train)
    
    # 预测与评估
    y_pred = lr.predict(X_test)
    y_proba = lr.predict_proba(X_test)
    loss = binary_cross_entropy(y_test, y_proba)
    
    print(f"逻辑回归测试集准确率: {accuracy_score(y_test, y_pred):.4f}")
    print(f"逻辑回归测试集交叉熵损失: {loss:.4f}")

    # 对比感知器（需将标签映射为-1和1以适配感知器的训练目标）
    y_train_perceptron = 2 * y_train - 1  # 0→-1，1→1
    perceptron = Perceptron(learning_rate=0.01, epochs=5000)
    perceptron.fit(X_train, y_train_perceptron)
    
    # 感知器预测（通过符号判断类别）
    y_pred_perceptron = np.sign(perceptron.predict(X_test))
    y_pred_perceptron = (y_pred_perceptron + 1) // 2  # 还原为0和1
    print(f"感知器测试集准确率: {accuracy_score(y_test, y_pred_perceptron):.4f}")