{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "acc830c8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "class activation():\n",
    "    def sigmoid(x):\n",
    "        return 1/(1+np.exp(-x))\n",
    "    def tanh(x):\n",
    "        return np.tanh(x)\n",
    "    def relu(x):\n",
    "        return np.maximum(0,x)\n",
    "    def leaky_relu(x,aplha=0.01):\n",
    "        return np.where(x>0,x,alpha*x)\n",
    "    def softmax(x):\n",
    "        x=np.exp(x)/np.sum(np.exp(x))\n",
    "        return x\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7184fca9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import pandas as pd\n",
    "from sklearn.datasets import load_iris\n",
    "\n",
    "class Perceptron():\n",
    "\tdef __init__(self):\n",
    "\t\t # w初始化为全1数组\n",
    "\t\tself.w = np.ones(len(data[0]) - 1, dtype = np.float32)\n",
    "\t\tself.b = 0\n",
    "\t\tself.rate = 0.5 # 初始化学习率\n",
    "\t\n",
    "\t# 感知机训练, 找出最合适的w, b\n",
    "\tdef fit(self, x_train, y_train):\n",
    "\t\twhile True:\n",
    "\t\t\tflag = True # 标记是否存在误分类数据\n",
    "\t\t\tfor i in range(len(x_train)): # 遍历训练数据\n",
    "\t\t\t\txi = x_train[i]\n",
    "\t\t\t\tyi = y_train[i]\n",
    "\t\t\t\t # 判断 yi * (wx + b) <= 0\n",
    "\t\t\t\tif yi * (np.inner(self.w, xi) + self.b) <= 0:\n",
    "\t\t\t\t\tflag = False # 找到误分类数据, flag标记为False\n",
    "\t\t\t\t\t # 更新w, b值\n",
    "\t\t\t\t\tself.w += self.rate * np.dot(xi, yi)\n",
    "\t\t\t\t\tself.b += self.rate * yi\n",
    "\t\t\tif flag:\n",
    "\t\t\t\tbreak\n",
    "\t\t # 输出w = ? , b = ?\n",
    "\t\tprint('w = ' + str(self.w) + ', b = ' + str(self.b))\n",
    "————————————————\n",
    "版权声明：本文为CSDN博主「_dingzhen」的原创文章，遵循CC 4.0 BY-SA版权协议，转载请附上原文出处链接及本声明。\n",
    "原文链接：https://blog.csdn.net/codedz/article/details/108707540"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "24eaaf4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from sklearn.datasets import make_classification\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "\n",
    "# 1. 激活函数实现\n",
    "def sigmoid(x):\n",
    "    \"\"\"Sigmoid激活函数：将输入映射到(0,1)区间，用于二分类输出\"\"\"\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "def relu(x):\n",
    "    \"\"\"ReLU激活函数：max(0, x)，常用于隐藏层\"\"\"\n",
    "    return np.maximum(0, x)\n",
    "\n",
    "\n",
    "# 2. 交叉熵损失函数（二分类）\n",
    "def binary_cross_entropy(y_true, y_pred, epsilon=1e-10):\n",
    "    \"\"\"\n",
    "    计算二分类交叉熵损失\n",
    "    Args:\n",
    "        y_true: 真实标签，形状(n_samples,)，值为0或1\n",
    "        y_pred: 预测概率，形状(n_samples,)，值在[0,1]区间\n",
    "        epsilon: 防止log(0)的微小值\n",
    "    Returns:\n",
    "        平均损失值\n",
    "    \"\"\"\n",
    "    # 限制预测值范围，避免log(0)或log(1)导致数值问题\n",
    "    y_pred = np.clip(y_pred, epsilon, 1 - epsilon)\n",
    "    # 交叉熵公式：-mean(y_true*log(y_pred) + (1-y_true)*log(1-y_pred))\n",
    "    loss = -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))\n",
    "    return loss\n",
    "\n",
    "\n",
    "# 3. 感知器模型（单个神经元，无激活函数时为线性分类器）\n",
    "class Perceptron:\n",
    "    def __init__(self, learning_rate=0.01, epochs=100):\n",
    "        self.learning_rate = learning_rate  # 学习率\n",
    "        self.epochs = epochs  # 训练轮次\n",
    "        self.weights = None  # 权重\n",
    "        self.bias = None  # 偏置\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        \"\"\"\n",
    "        训练感知器（使用MSE损失的梯度下降）\n",
    "        Args:\n",
    "            X: 输入特征，形状(n_samples, n_features)\n",
    "            y: 标签，形状(n_samples,)，值为0或1\n",
    "        \"\"\"\n",
    "        n_samples, n_features = X.shape\n",
    "        # 初始化权重和偏置\n",
    "        self.weights = np.zeros(n_features)\n",
    "        self.bias = 0\n",
    "\n",
    "        for _ in range(self.epochs):\n",
    "            # 前向传播：z = w·X + b\n",
    "            linear_output = np.dot(X, self.weights) + self.bias\n",
    "            \n",
    "            # 计算误差（MSE损失的梯度方向）\n",
    "            error = y - linear_output\n",
    "            \n",
    "            # 权重更新：w = w + lr * X.T · error / n_samples\n",
    "            self.weights += self.learning_rate * np.dot(X.T, error) / n_samples\n",
    "            # 偏置更新：b = b + lr * mean(error)\n",
    "            self.bias += self.learning_rate * np.mean(error)\n",
    "\n",
    "    def predict(self, X):\n",
    "        \"\"\"感知器预测（输出线性结果，无激活）\"\"\"\n",
    "        return np.dot(X, self.weights) + self.bias\n",
    "\n",
    "\n",
    "# 4. 逻辑回归模型（感知器+Sigmoid激活，用于二分类）\n",
    "class LogisticRegression:\n",
    "    def __init__(self, learning_rate=0.01, epochs=1000):\n",
    "        self.learning_rate = learning_rate\n",
    "        self.epochs = epochs\n",
    "        self.weights = None\n",
    "        self.bias = None\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        \"\"\"\n",
    "        训练逻辑回归模型（使用交叉熵损失的梯度下降）\n",
    "        Args:\n",
    "            X: 输入特征，形状(n_samples, n_features)\n",
    "            y: 标签，形状(n_samples,)，值为0或1\n",
    "        \"\"\"\n",
    "        n_samples, n_features = X.shape\n",
    "        self.weights = np.zeros(n_features)\n",
    "        self.bias = 0\n",
    "\n",
    "        for _ in range(self.epochs):\n",
    "            # 前向传播\n",
    "            linear_output = np.dot(X, self.weights) + self.bias  # z = w·X + b\n",
    "            y_pred = sigmoid(linear_output)  # 应用sigmoid得到概率\n",
    "\n",
    "            # 计算梯度（交叉熵损失对w和b的导数）\n",
    "            dw = (1 / n_samples) * np.dot(X.T, (y_pred - y))  # 权重梯度\n",
    "            db = (1 / n_samples) * np.sum(y_pred - y)  # 偏置梯度\n",
    "\n",
    "            # 梯度下降更新\n",
    "            self.weights -= self.learning_rate * dw\n",
    "            self.bias -= self.learning_rate * db\n",
    "\n",
    "    def predict_proba(self, X):\n",
    "        \"\"\"预测正类概率\"\"\"\n",
    "        linear_output = np.dot(X, self.weights) + self.bias\n",
    "        return sigmoid(linear_output)\n",
    "\n",
    "    def predict(self, X, threshold=0.5):\n",
    "        \"\"\"预测类别（基于阈值划分）\"\"\"\n",
    "        proba = self.predict_proba(X)\n",
    "        return (proba >= threshold).astype(int)\n",
    "\n",
    "\n",
    "# 测试代码\n",
    "if __name__ == \"__main__\":\n",
    "    # 生成二分类数据集\n",
    "    X, y = make_classification(\n",
    "        n_samples=1000, n_features=5, n_informative=3, random_state=42\n",
    "    )\n",
    "    X_train, X_test, y_train, y_test = train_test_split(\n",
    "        X, y, test_size=0.2, random_state=42\n",
    "    )\n",
    "\n",
    "    # 测试逻辑回归模型\n",
    "    lr = LogisticRegression(learning_rate=0.01, epochs=5000)\n",
    "    lr.fit(X_train, y_train)\n",
    "    \n",
    "    # 预测与评估\n",
    "    y_pred = lr.predict(X_test)\n",
    "    y_proba = lr.predict_proba(X_test)\n",
    "    loss = binary_cross_entropy(y_test, y_proba)\n",
    "    \n",
    "    print(f\"逻辑回归测试集准确率: {accuracy_score(y_test, y_pred):.4f}\")\n",
    "    print(f\"逻辑回归测试集交叉熵损失: {loss:.4f}\")\n",
    "\n",
    "    # 对比感知器（需将标签映射为-1和1以适配感知器的训练目标）\n",
    "    y_train_perceptron = 2 * y_train - 1  # 0→-1，1→1\n",
    "    perceptron = Perceptron(learning_rate=0.01, epochs=5000)\n",
    "    perceptron.fit(X_train, y_train_perceptron)\n",
    "    \n",
    "    # 感知器预测（通过符号判断类别）\n",
    "    y_pred_perceptron = np.sign(perceptron.predict(X_test))\n",
    "    y_pred_perceptron = (y_pred_perceptron + 1) // 2  # 还原为0和1\n",
    "    print(f\"感知器测试集准确率: {accuracy_score(y_test, y_pred_perceptron):.4f}\")"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
