{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "770fc64b-6fc6-4301-966b-e67e9d3f7815",
   "metadata": {},
   "source": [
    "# 依赖定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "38a7849e-7348-45e2-b6d3-e86f3052aba2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型部分\n",
    "import mlflow\n",
    "import mlflow.pyfunc\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import confusion_matrix\n",
    "from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score\n",
    "import time\n",
    "from pathlib import Path\n",
    "from typing import List\n",
    "\n",
    "\n",
    "# 设置随机种子\n",
    "torch.manual_seed(42)\n",
    "np.random.seed(42)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "df2e7295-b714-4b5a-bd8a-1529baaecba3",
   "metadata": {},
   "source": [
    "# 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "bb045fa8-8ee2-4c11-83cb-c9518933f291",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "\n",
    "class TSAnomalyDetector(nn.Module):\n",
    "    \"\"\"时间序列异常检测器（支持变长序列）\"\"\"\n",
    "\n",
    "    def __init__(self, input_dim=4, hidden_dim=32, device=torch.device('cpu')):\n",
    "        super().__init__()\n",
    "        # 特征提取器\n",
    "        encoder = nn.Sequential(\n",
    "            nn.Linear(input_dim, 128),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(128, 64),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(64, 32),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(32, hidden_dim)\n",
    "        )\n",
    "        self.encoder = encoder.to(device=device)\n",
    "        # 重建解码器\n",
    "        decoder = nn.Sequential(\n",
    "            nn.Linear(hidden_dim, 32),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(32, 64),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(64, 128),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(128, input_dim)\n",
    "        )\n",
    "        self.decoder = decoder.to(device=device)\n",
    "        # 异常分类器\n",
    "        classifier = nn.Sequential(\n",
    "            nn.Linear(hidden_dim, 16),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(16, 1),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        self.classifier = classifier.to(device=device)\n",
    "        # 动态标准化层\n",
    "        self.norm = nn.BatchNorm1d(input_dim, affine=False, device=device)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.norm(x)\n",
    "        encoded = self.encoder(x)\n",
    "        decoded = self.decoder(encoded)\n",
    "        prob = self.classifier(encoded)\n",
    "        return decoded, prob.squeeze()\n",
    "\n",
    "\n",
    "class AnomalyLearningSystem:\n",
    "    \"\"\"持续学习系统\"\"\"\n",
    "    def __init__(self, memory_size=100, threshold=0.3, input_dim=4, model_name=\"new model\", if_load_file=False):\n",
    "        self.model_name = model_name\n",
    "        if if_load_file:\n",
    "            # file_name = './model/' + model_name + \".pth\"\n",
    "            self.load_state(self.model_file_path())\n",
    "        else:\n",
    "            self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "            self.input_dim = input_dim\n",
    "            self.model = TSAnomalyDetector(input_dim=input_dim, device=self.device).float()\n",
    "            self.memory_size = memory_size\n",
    "            self.memory_X = []\n",
    "            self.memory_y = []\n",
    "            self.threshold = threshold  # 异常阈值\n",
    "        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)\n",
    "        self.loss_fn = nn.MSELoss()  # 重建损失\n",
    "        self.cls_fn = nn.BCELoss()  # 分类损失\n",
    "        self.recon_losses = []\n",
    "        self.cls_losses = []\n",
    "        self.total_losses = []\n",
    "\n",
    "    def train(self, curves2, labels2, epochs=100, if_update_memory=True, probs_percen=99):\n",
    "        start = time.time()\n",
    "        # 损失记录重置\n",
    "        self.recon_losses = []\n",
    "        self.cls_losses = []\n",
    "        self.total_losses = []\n",
    "        \"\"\"半监督训练模块\"\"\"\n",
    "        self.model.train()\n",
    "        if len(self.memory_X) > 0:\n",
    "            curves = curves2 + self.memory_X\n",
    "            labels = labels2 + self.memory_y\n",
    "        else:\n",
    "            curves = curves2.copy()\n",
    "            labels = labels2.copy()\n",
    "\n",
    "        X = self._preprocess(curves)\n",
    "        y = torch.tensor(labels, dtype=torch.float32, device=self.device)\n",
    "\n",
    "        # 复合损失训练\n",
    "        for i_ep in range(epochs):\n",
    "            self.optimizer.zero_grad()\n",
    "            decoded, prob = self.model(X)\n",
    "\n",
    "            # 重建损失（无监督）\n",
    "            recon_loss = self.loss_fn(decoded, X)\n",
    "            # 分类损失（监督）\n",
    "            mask = ~torch.isnan(y)  # 忽略未标注数据\n",
    "            if y[mask].numel() > 0:  # 检查筛选后的标签数据是否为空\n",
    "                cls_loss = self.cls_fn(prob[mask], y[mask])\n",
    "            else:\n",
    "                cls_loss = torch.tensor(0.0, device=self.device, requires_grad=True)  # 如果为空，将损失设为 0\n",
    "\n",
    "            # 加权总损失（侧重召回率）\n",
    "            total_loss = recon_loss + 3 * cls_loss  # 网页3[3](@ref)与网页6[6](@ref)的复合策略\n",
    "\n",
    "            # 记录损失\n",
    "            self.recon_losses.append(recon_loss.item())\n",
    "            self.cls_losses.append(cls_loss.item())\n",
    "            self.total_losses.append(total_loss.item())\n",
    "\n",
    "            mlflow.log_metric(\"recon_loss\", recon_loss, step=i_ep)\n",
    "            mlflow.log_metric(\"cls_loss\", cls_loss, step=i_ep)\n",
    "            mlflow.log_metric(\"total_loss\", total_loss, step=i_ep)\n",
    "\n",
    "            total_loss.backward()\n",
    "            self.optimizer.step()\n",
    "\n",
    "        # 更新阈值, 仅使用带标签的部分\n",
    "        self.model.eval()\n",
    "        with torch.no_grad():\n",
    "            _, probs = self.model(X)\n",
    "            mask = ~torch.isnan(y)  # 忽略未标注数据\n",
    "            if np.all(np.isnan(labels)) or np.all(np.logical_not(labels)):\n",
    "                # 按照数据集的 99% 分位数设定初始阈值\n",
    "                self.threshold = np.nanpercentile(probs.cpu().numpy(), probs_percen)\n",
    "            else:\n",
    "                self.adjust_threshold(probs[mask].tolist(), y[mask].cpu())\n",
    "\n",
    "        # # 更新记忆池（高置信度样本）\n",
    "        if if_update_memory:\n",
    "            self.update_memory(curves, labels)\n",
    "\n",
    "        # 保存模型\n",
    "        # filename = './model/' + self.model_name + '.pth'\n",
    "        torch.save(self.get_state(), self.model_file_path())\n",
    "\n",
    "        print(f\"训练完成, device = {self.device}, spent time = {time.time() - start}\")\n",
    "\n",
    "    def predict(self, curves):\n",
    "        \"\"\"预测模块\"\"\"\n",
    "        self.model.eval()\n",
    "        X = self._preprocess(curves)\n",
    "        with torch.no_grad():\n",
    "            _, probs = self.model(X)\n",
    "        return probs >= self.threshold, probs\n",
    "\n",
    "    def initial_train(self, curves1, labels1, epochs=100, if_update_memory=True, probs_percen=99):\n",
    "        # 初始训练，会清除记忆池\n",
    "        # 清除记忆\n",
    "        self.memory_X = []\n",
    "        self.memory_y = []\n",
    "        self.train(curves1, labels1, epochs=epochs, if_update_memory=if_update_memory, probs_percen=probs_percen)\n",
    "\n",
    "    def add_training(self, curves1, labels1, epochs=100, if_update_memory=True, probs_percen=99):\n",
    "        # 增量训练，不会记忆池\n",
    "        self.train(curves1, labels1, epochs=epochs, if_update_memory=if_update_memory, probs_percen=probs_percen)\n",
    "\n",
    "    def self_learn(self, curves1, labels1):\n",
    "        \"\"\"自学习触发模块\"\"\"\n",
    "        self.model.eval()\n",
    "        preds, probs = self.predict(curves1)\n",
    "        true_labels = np.array(labels1)\n",
    "\n",
    "        acc = accuracy_score(true_labels, preds.cpu())\n",
    "        logger.info(f\"acc = {acc}\")\n",
    "\n",
    "        if len(true_labels) >= 3 and acc < 0.9:\n",
    "            logger.info(f\"检测到{len(true_labels)}个样本中，ACC为{acc}，低于0.9，触发自学习...\")\n",
    "            self.train(curves1, labels1, if_update_memory=True)\n",
    "\n",
    "        print(\"自学习完成\")\n",
    "\n",
    "    # 根据验证集动态调整阈值\n",
    "    def adjust_threshold(self, y_scores, y_true):\n",
    "        # 生成候选阈值序列（网页6的线性采样策略）\n",
    "        thresholds = sorted(set(y_scores))\n",
    "        best_accuracy = 0\n",
    "        best_threshold = self.threshold  # 默认阈值\n",
    "        accuracy_records = []\n",
    "\n",
    "        # 遍历所有候选阈值（网页8的遍历评估方法）\n",
    "        for thresh in thresholds:\n",
    "            # 根据当前阈值生成预测标签\n",
    "            y_pred = (np.array(y_scores) >= thresh).astype(bool)\n",
    "\n",
    "            # 计算当前阈值下的准确度（网页6的核心指标）\n",
    "            acc = accuracy_score(y_true, y_pred)\n",
    "            cm = confusion_matrix(y_true, y_pred)\n",
    "            accuracy_records.append(acc)\n",
    "\n",
    "            # 更新最优阈值（网页8的极值追踪策略）\n",
    "            if acc > best_accuracy or (acc == best_accuracy and thresh > best_threshold):\n",
    "                best_accuracy = acc\n",
    "                best_threshold = thresh\n",
    "                best_cm = cm\n",
    "\n",
    "        print(f\"best_threshold = {best_threshold}, best_accuracy = {best_accuracy}\")\n",
    "        print(f\"该阈值下的混淆矩阵：\\n {best_cm}\")\n",
    "        self.threshold = best_threshold\n",
    "\n",
    "    def get_state(self):\n",
    "        \"\"\"获取需要保存的所有数据\"\"\"\n",
    "        return {\n",
    "            \"input_dim\": self.input_dim,  # 模型输入尺寸\n",
    "            \"model_state\": self.model.state_dict(),  # 保存模型权重\n",
    "            \"threshold\": self.threshold,  # 保存阈值\n",
    "            \"memory_size\": self.memory_size,  # 保存记忆池大小\n",
    "            \"memory_X\": self.memory_X,  # 保存记忆池X\n",
    "            \"memory_y\": self.memory_y  # 保存记忆池y\n",
    "        }\n",
    "\n",
    "    def load_state(self, name):\n",
    "        # 读取数据\n",
    "        try:\n",
    "            # 加载保存的状态字典\n",
    "            state_dict = torch.load(name)\n",
    "            self.input_dim = state_dict[\"input_dim\"]\n",
    "            self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "            self.model = TSAnomalyDetector(input_dim=state_dict[\"input_dim\"], device=self.device).float()\n",
    "            # 加载模型权重\n",
    "            self.model.load_state_dict(state_dict[\"model_state\"])\n",
    "            # 加载异常阈值\n",
    "            self.threshold = state_dict[\"threshold\"]\n",
    "            # 加载记忆池大小\n",
    "            self.memory_size = state_dict[\"memory_size\"]\n",
    "            # 加载记忆池 X\n",
    "            self.memory_X = state_dict[\"memory_X\"]\n",
    "            # 加载记忆池 y\n",
    "            self.memory_y = state_dict[\"memory_y\"]\n",
    "        except FileNotFoundError:\n",
    "            print(f\"文件 {name} 未找到，请检查文件路径和文件名。\")\n",
    "        except Exception as e:\n",
    "            print(f\"加载模型状态时发生错误: {e}\")\n",
    "\n",
    "    def update_memory(self, curves, labels):\n",
    "        # 更新记忆池\n",
    "        # 对记忆池中的样本进行预处理\n",
    "        X = self._preprocess(curves)\n",
    "        y = torch.FloatTensor(labels)\n",
    "        with torch.no_grad():\n",
    "            # 进行预测，得到概率\n",
    "            _, probs = self.model(X)\n",
    "            cpu_probs = probs.cpu()\n",
    "\n",
    "            # 分离异常和正常样本\n",
    "            anomaly_indices = [i for i, label in enumerate(labels) if label is True]\n",
    "            normal_indices = [i for i, label in enumerate(labels) if label is not True]\n",
    "\n",
    "            # 按异常程度排序异常样本\n",
    "            anomaly_samples = [(curves[i], labels[i], cpu_probs[i].item()) for i in anomaly_indices]\n",
    "            anomaly_samples.sort(key=lambda x: x[2], reverse=True)\n",
    "\n",
    "            # 按正常程度排序正常样本\n",
    "            normal_samples = [(curves[i], labels[i], cpu_probs[i].item()) for i in normal_indices]\n",
    "            normal_samples.sort(key=lambda x: x[2])\n",
    "\n",
    "            # 分别选取不超过 0.5 * self.memory_size 个异常和正常样本\n",
    "            max_anomaly_num = int(0.5 * self.memory_size)\n",
    "            max_normal_num = int(0.5 * self.memory_size)\n",
    "            selected_anomaly = anomaly_samples[:max_anomaly_num]\n",
    "            if len(selected_anomaly) < max_anomaly_num:    # 异常标签数据太少的时候，取异常分数高的作为补充\n",
    "                selected_anomaly = selected_anomaly + normal_samples[-(max_anomaly_num - len(selected_anomaly)):]\n",
    "            selected_normal = normal_samples[:max_normal_num]\n",
    "\n",
    "            # 合并选取的样本\n",
    "            selected = selected_anomaly + selected_normal\n",
    "\n",
    "            # 更新记忆池\n",
    "            self.memory_X = [item[0] for item in selected]\n",
    "            self.memory_y = [item[1] for item in selected]\n",
    "\n",
    "    def _preprocess(self, curves):\n",
    "        \"\"\"数据预处理（支持变长序列填充）\"\"\"\n",
    "        # 统一填充为最大长度（网页1[1](@ref)预处理策略）\n",
    "        max_len = self.input_dim\n",
    "        curves = [c[:max_len] for c in curves]\n",
    "        padded = [c + [0] * (max_len - len(c)) for c in curves]\n",
    "        return torch.tensor(padded, dtype=torch.float32, device=self.device)\n",
    "\n",
    "    def model_file_path(self):        \n",
    "        return './repository/'+ self.model_name + '.pth'\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "16922911-c3b1-4a90-af6d-9eb2f4fb7dc2",
   "metadata": {},
   "source": [
    "# 定义Ml model wrapper"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "484be2f5-9817-420f-93e8-3d15c100430d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DL001Wrapper(mlflow.pyfunc.PythonModel):\n",
    "    def __init__(self, model):\n",
    "        self.model = model\n",
    "\n",
    "    def predict(self, context:mlflow.pyfunc.PythonModelContext,\n",
    "                model_input:List[List[float]]):\n",
    "        return self.model.predict(model_input)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "06ce7901-286e-47b9-8027-b5d9cb75bff9",
   "metadata": {},
   "source": [
    "# 初始训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "1308b185-3941-45f1-b396-53f9dd5ba480",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025/04/14 18:17:44 INFO mlflow.models.signature: Inferring model signature from type hints\n",
      "2025/04/14 18:17:44 INFO mlflow.models.signature: Running the predict function to generate output based on input example\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "best_threshold = 0.9999483823776245, best_accuracy = 1.0\n",
      "该阈值下的混淆矩阵：\n",
      " [[6 0]\n",
      " [0 3]]\n",
      "训练完成, device = cpu, spent time = 1.551009178161621\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b9e3b858ca99484ca87df1478fe9a7c3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Downloading artifacts:   0%|          | 0/7 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "模型已注册到 MLflow，Run ID: af85c36eeb3840b8a6ce61ecb2639a88\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\develop\\Anaconda\\envs\\jupyter\\lib\\site-packages\\torch\\storage.py:414: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  return torch.load(io.BytesIO(b))\n",
      "Registered model 'dl_anomaly' already exists. Creating a new version of this model...\n",
      "Created version '12' of model 'dl_anomaly'.\n"
     ]
    }
   ],
   "source": [
    "curves_train = [\n",
    "        [0.0, 5.0, 5.0, 0.0], [0.01, -6.1, 4.9, 0.0], [0.0, 5.1, 4.9, 0.0], [0.03, 5.2, 5.2, 0.04], [0.0, -6.1, 4.95, 0.0], [0.05, 5.16, 4.99, 0.0],\n",
    "        [0.0, 5.1, 5.3, 0.01], [0.0, -6.2, 4.8, 0.0], [0.01, 5.1, 4.7, 0.0], [0.0, 5.01, 5.4, 0.0], [0.0, -6.1, 4.93, 0.0], [0.02, 5.15, 4.92, 0.01],\n",
    "        [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0],\n",
    "        [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0],\n",
    "        [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0], [0.0, 5.0, 5.0, 0.0],\n",
    "    ]\n",
    "labels_train = [False, True, False, False, True, np.nan,\n",
    "                    False, True, False, False, np.nan, np.nan,\n",
    "                    np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n",
    "                    np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n",
    "                    np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]\n",
    "\n",
    "curves_val = [[1.01, 5.1, 5.3, 0.02], [0.0, 0.01, -0.01, -0.02, 0.01]]\n",
    "labels_val = [False, True]\n",
    "mlflow.set_tracking_uri(\"file:D:\\project\\mlflow\\mlruns\")\n",
    "\n",
    "with mlflow.start_run(run_name=\"train init dl001\") as run:\n",
    "    # 训练模型\n",
    "    params = {\"input_dim\":4, \"memory_size\":10, \"model_name\":\"A-AB002\"}\n",
    "    mlflow.log_params(params)\n",
    "    system = AnomalyLearningSystem(**params)\n",
    "    system.initial_train(curves_train, labels_train, probs_percen=99)\n",
    "    \n",
    "    # 测试模型    \n",
    "    y_pred, probs = system.predict(curves_val)\n",
    "    accuracy = accuracy_score(labels_val, y_pred)\n",
    "    recall = recall_score(labels_val, y_pred)\n",
    "    f1 = f1_score(labels_val, y_pred)\n",
    "    auc = roc_auc_score(labels_val, probs) \n",
    "    mlflow.log_metric(\"accuracy\", accuracy)\n",
    "    mlflow.log_metric(\"recall\", recall)\n",
    "    mlflow.log_metric(\"f1\", f1)\n",
    "    mlflow.log_metric(\"auc\", auc)\n",
    "    \n",
    "    # 封装 MLflow 兼容模型\n",
    "    wrapped_model = DL001Wrapper(system)\n",
    "    \n",
    "    # 记录并注册 MLflow 模型\n",
    "    mlflow.pyfunc.log_model(\n",
    "        artifact_path=\"dl001\",\n",
    "        python_model=wrapped_model,\n",
    "        registered_model_name=\"dl_anomaly\",\n",
    "        input_example=curves_val\n",
    "    )\n",
    "\n",
    "    # 获取运行 ID\n",
    "    run_id = run.info.run_id\n",
    "\n",
    "print(f\"模型已注册到 MLflow，Run ID: {run_id}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "662e8b37-39b9-4761-b2ca-a792679207e8",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
