{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8a805d85",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "import math\n",
    "\n",
    "class TextHybridModel(nn.Module):\n",
    "    \"\"\"融合CNN与Transformer的文本分类模型\"\"\"\n",
    "    def __init__(self, \n",
    "                 vocab_size,        # 词汇表大小\n",
    "                 embed_dim=128,     # 词嵌入维度\n",
    "                 hidden_dim=256,    # 隐藏层维度\n",
    "                 num_classes=2,     # 分类类别数\n",
    "                 num_heads=4,       # Transformer注意力头数\n",
    "                 num_layers=2,      # Transformer编码器层数\n",
    "                 cnn_kernel_sizes=[3,5],  # CNN卷积核尺寸\n",
    "                 cnn_out_channels=64,     # CNN输出通道数\n",
    "                 max_seq_len=128,    # 最大序列长度\n",
    "                 dropout=0.1):       # dropout概率\n",
    "        super().__init__()\n",
    "        \n",
    "        # 1. 词嵌入层\n",
    "        self.embedding = nn.Embedding(\n",
    "            num_embeddings=vocab_size,\n",
    "            embedding_dim=embed_dim,\n",
    "            padding_idx=0  # 假设0为填充符\n",
    "        )\n",
    "        \n",
    "        # 2. 位置编码（用于Transformer）\n",
    "        self.pos_encoder = PositionalEncoding(\n",
    "            d_model=embed_dim,\n",
    "            dropout=dropout,\n",
    "            max_len=max_seq_len\n",
    "        )\n",
    "        \n",
    "        # 3. CNN层（多尺度卷积提取局部特征）\n",
    "        self.cnn_layers = nn.ModuleList([\n",
    "            nn.Conv1d(\n",
    "                in_channels=embed_dim,\n",
    "                out_channels=cnn_out_channels,\n",
    "                kernel_size=k,\n",
    "                padding=k//2  # 保持序列长度不变\n",
    "            ) for k in cnn_kernel_sizes\n",
    "        ])\n",
    "        # CNN输出特征维度（多尺度拼接）\n",
    "        self.cnn_total_dim = cnn_out_channels * len(cnn_kernel_sizes)\n",
    "        \n",
    "        # 4. 特征融合层（将CNN特征与嵌入特征合并）\n",
    "        self.fusion_proj = nn.Linear(\n",
    "            in_features=embed_dim + self.cnn_total_dim,\n",
    "            out_features=hidden_dim\n",
    "        )\n",
    "        \n",
    "        # 5. Transformer编码器\n",
    "        transformer_layer = TransformerEncoderLayer(\n",
    "            d_model=hidden_dim,\n",
    "            nhead=num_heads,\n",
    "            dim_feedforward=hidden_dim*4,  # 前馈网络维度\n",
    "            dropout=dropout,\n",
    "            batch_first=True  # 批次维度在前\n",
    "        )\n",
    "        self.transformer_encoder = TransformerEncoder(\n",
    "            encoder_layer=transformer_layer,\n",
    "            num_layers=num_layers\n",
    "        )\n",
    "        \n",
    "        # 6. 分类头\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(hidden_dim, hidden_dim//2),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(dropout),\n",
    "            nn.Linear(hidden_dim//2, num_classes)\n",
    "        )\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, x, mask=None):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            x: 输入序列，形状 [batch_size, seq_len]\n",
    "            mask: 掩码（可选），形状 [batch_size, seq_len]，1表示有效位置\n",
    "        Returns:\n",
    "            logits: 分类输出，形状 [batch_size, num_classes]\n",
    "        \"\"\"\n",
    "        # 1. 词嵌入 + 位置编码\n",
    "        embed = self.embedding(x)  # [batch, seq_len, embed_dim]\n",
    "        embed = self.pos_encoder(embed)  # 加入位置信息\n",
    "        \n",
    "        # 2. CNN特征提取（需调整维度为[batch, embed_dim, seq_len]）\n",
    "        cnn_input = embed.permute(0, 2, 1)  # 调整通道维度位置\n",
    "        cnn_features = []\n",
    "        for conv in self.cnn_layers:\n",
    "            feat = conv(cnn_input)  # [batch, cnn_out_channels, seq_len]\n",
    "            feat = F.relu(feat)\n",
    "            cnn_features.append(feat.permute(0, 2, 1))  # 恢复为[batch, seq_len, cnn_out]\n",
    "        cnn_cat = torch.cat(cnn_features, dim=-1)  # [batch, seq_len, cnn_total_dim]\n",
    "        \n",
    "        # 3. 特征融合（嵌入特征 + CNN特征）\n",
    "        fused = torch.cat([embed, cnn_cat], dim=-1)  # [batch, seq_len, embed_dim + cnn_total]\n",
    "        fused = self.fusion_proj(fused)  # [batch, seq_len, hidden_dim]\n",
    "        fused = self.dropout(fused)\n",
    "        \n",
    "        # 4. Transformer编码\n",
    "        if mask is not None:\n",
    "            # 转换为Transformer需要的掩码格式（True表示忽略）\n",
    "            attn_mask = (mask == 0).unsqueeze(1).repeat(1, fused.shape[1], 1)  # [batch, seq_len, seq_len]\n",
    "        else:\n",
    "            attn_mask = None\n",
    "        transformer_out = self.transformer_encoder(fused, src_mask=attn_mask)  # [batch, seq_len, hidden_dim]\n",
    "        \n",
    "        # 5. 聚合序列特征（取CLS标记或全局平均）\n",
    "        # 这里使用全局平均池化\n",
    "        seq_feat = transformer_out.mean(dim=1)  # [batch, hidden_dim]\n",
    "        \n",
    "        # 6. 分类输出\n",
    "        logits = self.classifier(seq_feat)  # [batch, num_classes]\n",
    "        return logits\n",
    "\n",
    "\n",
    "class PositionalEncoding(nn.Module):\n",
    "    \"\"\"位置编码模块（Transformer必备）\"\"\"\n",
    "    def __init__(self, d_model, dropout=0.1, max_len=5000):\n",
    "        super().__init__()\n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "        \n",
    "        # 预计算位置编码\n",
    "        position = torch.arange(max_len).unsqueeze(1)\n",
    "        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n",
    "        pe = torch.zeros(max_len, 1, d_model)\n",
    "        pe[:, 0, 0::2] = torch.sin(position * div_term)  # 偶数维度用正弦\n",
    "        pe[:, 0, 1::2] = torch.cos(position * div_term)  # 奇数维度用余弦\n",
    "        self.register_buffer('pe', pe)  # 不参与训练的参数\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            x: 嵌入特征，形状 [seq_len, batch_size, d_model] 或 [batch_size, seq_len, d_model]\n",
    "        Returns:\n",
    "            加入位置编码的特征\n",
    "        \"\"\"\n",
    "        # 适配batch_first格式\n",
    "        if x.ndim == 3 and x.shape[1] == x.size(1):  # [batch, seq, dim]\n",
    "            x = x + self.pe[:x.size(1)].permute(1, 0, 2)  # 调整pe维度为[1, seq_len, dim]\n",
    "        else:\n",
    "            x = x + self.pe[:x.size(0)]\n",
    "        return self.dropout(x)\n",
    "\n",
    "\n",
    "# 测试代码\n",
    "if __name__ == \"__main__\":\n",
    "    # 超参数设置\n",
    "    vocab_size = 10000\n",
    "    batch_size = 8\n",
    "    seq_len = 32\n",
    "    \n",
    "    # 初始化模型\n",
    "    model = TextHybridModel(\n",
    "        vocab_size=vocab_size,\n",
    "        embed_dim=128,\n",
    "        hidden_dim=256,\n",
    "        num_classes=3,\n",
    "        num_heads=4,\n",
    "        num_layers=2\n",
    "    )\n",
    "    \n",
    "    # 生成随机输入\n",
    "    x = torch.randint(0, vocab_size, (batch_size, seq_len))  # 随机序列\n",
    "    mask = torch.ones(batch_size, seq_len)  # 掩码（全为有效）\n",
    "    \n",
    "    # 前向传播\n",
    "    logits = model(x, mask)\n",
    "    print(f\"输入形状: {x.shape}\")\n",
    "    print(f\"输出形状: {logits.shape}\")  # 应输出 [8, 3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "0249df2f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\ZTC\\miniconda3\\envs\\cv\\Lib\\site-packages\\torch\\nn\\modules\\transformer.py:392: UserWarning: enable_nested_tensor is True, but self.use_nested_tensor is False because encoder_layer.self_attn.batch_first was not True(use batch_first for better inference performance)\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "class AnomalyTransformer(nn.Module):\n",
    "   def __init__(self, input_dim, model_dim, num_heads, num_layers):\n",
    "       super(AnomalyTransformer, self).__init__()\n",
    "       self.encoder_layer = nn.TransformerEncoderLayer(d_model=model_dim, nhead=num_heads)\n",
    "       self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)\n",
    "       self.fc = nn.Linear(model_dim, input_dim)\n",
    "   def forward(self, x):\n",
    "       x = self.encoder(x)\n",
    "       x = self.fc(x)\n",
    "       return x\n",
    "# 初始化模型\n",
    "model = AnomalyTransformer(input_dim=34, model_dim=64, num_heads=4, num_layers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9cffcfde",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "class transformerclassfier(nn.Module):\n",
    "    def __init__(self,vocab_size,embed_size,num_class):\n",
    "        super().__init__()\n",
    "        self.embedding=nn.Embedding(vocab_size,embed_size)\n",
    "        self.transformer=nn.TransformerEncoder(\n",
    "            nn.TransformerEncoderLayer(embed_size,nhead=2),num_layers=2\n",
    "            )\n",
    "        self.fc=nn.linear(embed_size,num_class)\n",
    "    def forward(self,x):\n",
    "        x=self.embedding(x)\n",
    "        x=self.transformer(x)\n",
    "        x=x.mean(dim=0)#取平均作为全局特征\n",
    "        x=self.fc(x)\n",
    "        return x\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1845c1fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "class transformerencoder(nn.Module):\n",
    "    def __init__(self,dim=768,depth=12,heads=12):\n",
    "        super().__init__()\n",
    "        self.layers=nn.MoudleList([\n",
    "            nn.TransfomerEncoderLayer(\n",
    "                d_model=dim,\n",
    "                nhead=heads,\n",
    "                dim_feedforward=int(dim*mlp_ratio),\n",
    "                activation=\"gelu\", \n",
    "                batch_first=True\n",
    "            )for _ in range(depth)\n",
    "        ])\n",
    "    def forward(self,x):\n",
    "        for layer in self.layers:\n",
    "            x=layer(x)\n",
    "            return x[:,0]\n",
    "\n",
    "class Vit(nn.Module):\n",
    "    def __init__(self,num_class=10):\n",
    "        super().__init__()\n",
    "        self.patch_embed=PatchEmbedding()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
