{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "融合语法规则与自注意力机制的GCN情感分析方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "class TransformerEncoder(nn.Module):\n",
    "    def __init__(self, hidden_size, num_layers, dropout):\n",
    "        super(TransformerEncoder, self).__init__()\n",
    "        self.layers = nn.ModuleList([self._get_layer(hidden_size, dropout) for _ in range(num_layers)])\n",
    "\n",
    "    def _get_layer(self, hidden_size, dropout):\n",
    "        attention_layer = MultiHeadAttention(hidden_size)\n",
    "        feed_forward_layer = FeedForwardNetwork(hidden_size, hidden_size * 4)\n",
    "        return nn.Sequential(\n",
    "            nn.LayerNorm(hidden_size),\n",
    "            attention_layer,\n",
    "            nn.LayerNorm(hidden_size),\n",
    "            feed_forward_layer,\n",
    "            nn.Dropout(dropout)\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        for layer in self.layers:\n",
    "            x = layer(x) + x  # Residual connection\n",
    "        return x\n",
    "\n",
    "# 使用定义好的 TransformerEncoder 对模态进行编码\n",
    "transformer_encoder = TransformerEncoder(hidden_size=64, num_layers=2, dropout=0.5)\n",
    "\n",
    "# 对视觉模态进行编码\n",
    "Xv_encoded = transformer_encoder(Xv)\n",
    "\n",
    "# 后续对音频和文本模态编码类似\n",
    "Xa_encoded = transformer_encoder(xa)\n",
    "Xt_encoded = transformer_encoder(xt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "其中，$q_i$、$k_j$、$v_j$分别表示查询向量、键向量和值向量，$\\mathrm{score}()$表示点积注意力函数，$\\alpha_{ij}$表示注意力权重，$z_i$表示最终的输出向量。这个方程的意思是，对于每个查询向量$q_i$，我们计算它与所有键向量$k_j$之间的相似度得分$e_{ij}$，然后根据这些得分计算注意力权重$\\alpha_{ij}$，最后将所有值向量$v_j$加权平均得到最终的输出向量$z_i$。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "代码修改：增加音频特征\n",
    "### 主要改动解释：\n",
    "\n",
    "1. **音频特征处理**：新增 `AudioEmbedding` 模块用于处理音频特征。在这里，假设音频特征的维度为 128，经过一个全连接层进行简单的处理。\n",
    "\n",
    "2. **音频的 LSTM 处理**：与文本类似，音频也通过 LSTM 进行处理，将其转换为上下文特征。\n",
    "\n",
    "3. **注意力机制的应用**：文本和音频都分别经过自注意力机制，捕捉到特征之间的关系。\n",
    "\n",
    "4. **多模态特征融合**：最后将文本和音频的自注意力输出进行拼接，并通过全连接层进行分类。\n",
    "\n",
    "### 进一步扩展：\n",
    "\n",
    "- **音频特征提取**：可以使用实际的音频特征提取方法，比如 MFCC 或 Mel-spectrogram，而不是简单的随机模拟。\n",
    "\n",
    "- **跨模态融合**：当前的拼接方式可以替换为更复杂的融合方法，例如跨模态注意力机制，来捕捉文本和音频之间的关系。\n",
    "\n",
    "- **其他模态**：可以继续加入图像、视频等其他模态，只需为每种模态设计相应的特征提取和处理模块。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "\n",
    "# 音频特征模拟（假设每个音频特征向量维度为128）\n",
    "class AudioEmbedding(nn.Module):\n",
    "    def __init__(self, audio_dim):\n",
    "        super(AudioEmbedding, self).__init__()\n",
    "        self.fc = nn.Linear(audio_dim, audio_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return F.relu(self.fc(x))\n",
    "\n",
    "# 增强的GCN+自注意力模型，加入音频特征\n",
    "class MultiModalGCNAttentionModel(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, gcn_dim, audio_dim, hidden_dim, num_classes):\n",
    "        super(MultiModalGCNAttentionModel, self).__init__()\n",
    "        self.text_embedding = GloVeEmbedding(embedding_dim, vocab_size)\n",
    "        self.audio_embedding = AudioEmbedding(audio_dim)\n",
    "        \n",
    "        # GCN处理文本特征\n",
    "        self.gcn1 = GCNLayer(embedding_dim, gcn_dim)\n",
    "        self.gcn2 = GCNLayer(gcn_dim, gcn_dim)\n",
    "        \n",
    "        # Bi-LSTM处理文本特征\n",
    "        self.text_lstm = nn.LSTM(gcn_dim, hidden_dim, batch_first=True)\n",
    "        \n",
    "        # 音频特征的LSTM处理\n",
    "        self.audio_lstm = nn.LSTM(audio_dim, hidden_dim, batch_first=True)\n",
    "        \n",
    "        # 自注意力层，用于文本和音频的注意力\n",
    "        self.attention = SelfAttention(hidden_dim, heads=8)\n",
    "        \n",
    "        # 融合后的全连接层\n",
    "        self.fc = nn.Linear(hidden_dim * 2, num_classes)  # 2倍隐藏层大小，因为拼接了文本和音频\n",
    "\n",
    "    def forward(self, text, adj, audio):\n",
    "        # 文本处理\n",
    "        text_embed = self.text_embedding(text)\n",
    "        gcn_out = self.gcn1(text_embed, adj)\n",
    "        gcn_out = self.gcn2(gcn_out, adj)\n",
    "        text_lstm_out, _ = self.text_lstm(gcn_out)\n",
    "        \n",
    "        # 音频处理\n",
    "        audio_embed = self.audio_embedding(audio)\n",
    "        audio_lstm_out, _ = self.audio_lstm(audio_embed)\n",
    "        \n",
    "        # 对文本和音频分别进行自注意力处理\n",
    "        text_attention_out = self.attention(text_lstm_out)\n",
    "        audio_attention_out = self.attention(audio_lstm_out)\n",
    "        \n",
    "        # 拼接文本和音频特征\n",
    "        combined = torch.cat((text_attention_out[:, -1, :], audio_attention_out[:, -1, :]), dim=1)\n",
    "        \n",
    "        # 分类输出\n",
    "        logits = self.fc(combined)\n",
    "        return logits\n",
    "\n",
    "# 数据准备\n",
    "batch_size = 32\n",
    "seq_len = 10\n",
    "vocab_size = 5000\n",
    "embedding_dim = 300\n",
    "gcn_dim = 128\n",
    "audio_dim = 128\n",
    "hidden_dim = 256\n",
    "num_classes = 3\n",
    "\n",
    "# 模拟输入\n",
    "text = torch.randint(0, vocab_size, (batch_size, seq_len))  # 随机文本输入\n",
    "adj = torch.rand(batch_size, seq_len, seq_len)  # 随机邻接矩阵\n",
    "audio = torch.rand(batch_size, seq_len, audio_dim)  # 随机音频输入\n",
    "\n",
    "# 创建模型\n",
    "model = MultiModalGCNAttentionModel(vocab_size, embedding_dim, gcn_dim, audio_dim, hidden_dim, num_classes)\n",
    "\n",
    "# 前向传播\n",
    "output = model(text, adj, audio)\n",
    "print(output.shape)  # 输出维度: [batch_size, num_classes]"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
