# 第二题：感知器、基本激活函数、交叉熵损失函数、逻辑回归实现

import numpy as np
import matplotlib.pyplot as plt

# 1. 基本激活函数实现
class ActivationFunctions:
    """激活函数类，包含常用的激活函数及其导数"""
    
    @staticmethod
    def sigmoid(x):
        """Sigmoid激活函数"""
        return 1 / (1 + np.exp(-x))
    

    @staticmethod
    def tanh(x):
        """双曲正切激活函数"""
        return np.tanh(x)
    
    @staticmethod
    def relu(x):
        """ReLU激活函数"""
        return np.maximum(0, x)
    


# 2. 感知器实现
class Perceptron:
    """简单感知器实现"""
    
    def __init__(self, input_size, learning_rate=0.01, epochs=100):
        """
        初始化感知器
        :param input_size: 输入特征维度
        :param learning_rate: 学习率
        :param epochs: 训练轮数
        """
        self.weights = np.zeros(input_size + 1)  # +1 for bias
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.errors = []  # 记录每轮的错误数
    
    def predict(self, x):
        """预测函数"""
        # 添加偏置项
        """- np.insert() 是 NumPy 库中的一个函数，用于在数组的指定位置插入值
        - x 是输入特征向量。- 0 是插入位置（在索引0处，即数组的开头）。- 1 是要插入的值"""
        x_with_bias = np.insert(x, 0, 1)
        # 计算加权和
        weighted_sum = np.dot(x_with_bias, self.weights)
        # 应用激活函数（这里使用阶跃函数）
        return 1 if weighted_sum > 0 else 0
    
    def train(self, X, y):
        """训练感知器"""
        for epoch in range(self.epochs):
            errors = 0
            for xi, target in zip(X, y):
                prediction = self.predict(xi)
                update = self.learning_rate * (target - prediction)
                # 更新权重（包括偏置）
                self.weights[1:] += update * xi
                self.weights[0] += update  # 偏置更新
                errors += int(update != 0.0)
            self.errors.append(errors)
            # 如果没有错误，提前停止训练
            if errors == 0:
                break
        return self.errors
    
    def plot_decision_boundary(self, X, y):
        """绘制决策边界（仅适用于二维数据）"""
        if X.shape[1] != 2:
            print("只能绘制二维数据的决策边界")
            return
            
        # 绘制数据点
        plt.figure(figsize=(10, 6))
        plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], marker='o', label='Class 0')
        plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], marker='x', label='Class 1')
        
        # 计算决策边界
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
                             np.arange(y_min, y_max, 0.01))
        
        # 预测网格中的每个点
        Z = np.array([self.predict(np.array([x, y])) for x, y in zip(xx.ravel(), yy.ravel())])
        Z = Z.reshape(xx.shape)
        
        # 绘制决策边界
        plt.contourf(xx, yy, Z, alpha=0.3)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title('Perceptron Decision Boundary')
        plt.xlabel('Feature 1')
        plt.ylabel('Feature 2')
        plt.legend()
        plt.show()

# 3. 交叉熵损失函数实现
class CrossEntropyLoss:
    """交叉熵损失函数"""
    
    @staticmethod
    def binary_cross_entropy(y_true, y_pred):
        """
        二分类交叉熵损失
        :param y_true: 真实标签 (0或1)
        :param y_pred: 预测概率 (0到1之间)
        :return: 交叉熵损失值
        """
        # 避免log(0)的情况
        epsilon = 1e-15
        #np.clip(a, a_min, a_min) 将数组 a 中的元素限制在 [a_min, a_max] 区间
        y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
        return -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
    
    @staticmethod
    def categorical_cross_entropy(y_true, y_pred):
        """
        多分类交叉熵损失
        :param y_true: 真实标签 (one-hot编码)
        :param y_pred: 预测概率 (softmax输出)
        :return: 交叉熵损失值
        """
        # 避免log(0)的情况
        epsilon = 1e-15
        y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
        return -np.mean(np.sum(y_true * np.log(y_pred), axis=1))

# 4. 逻辑回归实现
class LogisticRegression:
    """逻辑回归实现"""
    
    def __init__(self, learning_rate=0.01, epochs=1000):
        """
        初始化逻辑回归
        :param learning_rate: 学习率
        :param epochs: 训练轮数
        """
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.weights = None
        self.bias = None
        self.losses = []  # 记录损失值
    
    def fit(self, X, y):
        """
        训练逻辑回归模型
        :param X: 输入特征
        :param y: 目标变量
        """
        # 初始化参数
        n_samples, n_features = X.shape
        self.weights = np.zeros(n_features)
        self.bias = 0
        
        # 梯度下降
        for i in range(self.epochs):
            # 线性模型
            linear_model = np.dot(X, self.weights) + self.bias
            # 应用sigmoid激活函数
            y_predicted = ActivationFunctions.sigmoid(linear_model)
            
            # 计算交叉熵损失
            loss = CrossEntropyLoss.binary_cross_entropy(y, y_predicted)
            self.losses.append(loss)
            
            # 计算梯度
            dw = (1 / n_samples) * np.dot(X.T, (y_predicted - y))
            db = (1 / n_samples) * np.sum(y_predicted - y)
            
            # 更新参数
            self.weights -= self.learning_rate * dw
            self.bias -= self.learning_rate * db
            
            # 每100轮打印一次损失
            if i % 100 == 0:
                print(f"Epoch {i}, Loss: {loss:.4f}")
    
    def predict_proba(self, X):
        """
        预测概率
        :param X: 输入特征
        :return: 预测概率
        """
        linear_model = np.dot(X, self.weights) + self.bias
        return ActivationFunctions.sigmoid(linear_model)
    
    def predict(self, X, threshold=0.5):
        """
        预测类别
        :param X: 输入特征
        :param threshold: 分类阈值
        :return: 预测类别
        """
        probabilities = self.predict_proba(X)
        return (probabilities >= threshold).astype(int)
    
    def plot_loss(self):
        """绘制损失曲线"""
        plt.figure(figsize=(10, 6))
        plt.plot(range(len(self.losses)), self.losses)
        plt.title('Cross-Entropy Loss During Training')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.grid(True)
        plt.show()
    
    def plot_decision_boundary(self, X, y):
        """绘制决策边界（仅适用于二维数据）"""
        if X.shape[1] != 2:
            print("只能绘制二维数据的决策边界")
            return
            
        # 绘制数据点
        plt.figure(figsize=(10, 6))
        plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], marker='o', label='Class 0')
        plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], marker='x', label='Class 1')
        
        # 计算决策边界
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
                             np.arange(y_min, y_max, 0.01))
        
        # 预测网格中的每个点
        Z = self.predict(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)
        
        # 绘制决策边界
        plt.contourf(xx, yy, Z, alpha=0.3)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title('Logistic Regression Decision Boundary')
        plt.xlabel('Feature 1')
        plt.ylabel('Feature 2')
        plt.legend()
        plt.show()

# 测试代码
if __name__ == "__main__":
    # 1. 测试激活函数
    print("测试激活函数:")
    x = np.array([-2, -1, 0, 1, 2])
    print(f"Sigmoid: {ActivationFunctions.sigmoid(x)}")
    print(f"Tanh: {ActivationFunctions.tanh(x)}")
    print(f"ReLU: {ActivationFunctions.relu(x)}")
    print()
    
    # 2. 测试感知器
    print("测试感知器:")
    # 创建简单的线性可分数据
    X = np.array([
        [2, 1],
        [3, 1],
        [1, 2],
        [0, 3]
    ])
    y = np.array([0, 0, 1, 1])
    
    perceptron = Perceptron(input_size=2, learning_rate=0.1, epochs=10)
    errors = perceptron.train(X, y)
    print(f"训练错误: {errors}")
    
    # 预测
    test_point = np.array([1.5, 1.5])
    prediction = perceptron.predict(test_point)
    print(f"测试点 {test_point} 的预测类别: {prediction}")
    print()
    
    # 3. 测试交叉熵损失函数
    print("测试交叉熵损失函数:")
    y_true = np.array([0, 1, 1, 0])
    y_pred = np.array([0.1, 0.9, 0.8, 0.2])
    loss = CrossEntropyLoss.binary_cross_entropy(y_true, y_pred)
    print(f"二分类交叉熵损失: {loss:.4f}")
    print()
    
    # 4. 测试逻辑回归
    print("测试逻辑回归:")
    # 创建简单的线性可分数据
    np.random.seed(42)
    X_class0 = np.random.normal(loc=[2, 2], scale=[1, 1], size=(50, 2))
    X_class1 = np.random.normal(loc=[6, 6], scale=[1, 1], size=(50, 2))
    X = np.vstack((X_class0, X_class1))
    y = np.hstack((np.zeros(50), np.ones(50)))
    
    # 训练逻辑回归模型
    logistic_reg = LogisticRegression(learning_rate=0.1, epochs=1000)
    logistic_reg.fit(X, y)
    
    # 预测
    test_point = np.array([[4, 4]])
    prediction = logistic_reg.predict(test_point)
    probability = logistic_reg.predict_proba(test_point)
    print(f"测试点 {test_point} 的预测类别: {prediction[0]}, 概率: {probability[0]:.4f}")
    
    # 计算准确率
    y_pred = logistic_reg.predict(X)
    accuracy = np.mean(y_pred == y)
    print(f"模型准确率: {accuracy:.4f}")