{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "文本的矩阵表示"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "text_adj = torch.tensor([\n",
    "    [0, 1, 0, 0, 0],  # \"This\" 依赖于 \"is\"\n",
    "    [1, 0, 0, 0, 0],  # \"is\" 连接 \"This\"\n",
    "    [0, 0, 0, 1, 0],  # \"fun\" 修饰 \"project\"\n",
    "    [0, 0, 1, 0, 0],  # \"project\" 被 \"fun\" 修饰\n",
    "    [0, 0, 0, 0, 0],  # 其他无连接\n",
    "])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "图像的矩阵表示"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "image_adj = torch.tensor([\n",
    "    [0, 1, 0, 1, 0, 0, 0, 0, 0],  # p1 与 p2、p4 相邻\n",
    "    [1, 0, 1, 0, 1, 0, 0, 0, 0],  # p2 与 p1、p3、p5 相邻\n",
    "    [0, 1, 0, 0, 0, 1, 0, 0, 0],  # p3 与 p2、p6 相邻\n",
    "    [1, 0, 0, 0, 1, 0, 1, 0, 0],  # p4 与 p1、p5、p7 相邻\n",
    "    # ... (类似的其他相邻关系)\n",
    "])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在深度学习模型中的应用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 假设我们有文本和图像的嵌入表示\n",
    "text_embeds = torch.randn(5, 768)  # 5个词，768维的嵌入\n",
    "image_embeds = torch.randn(9, 768)  # 9个图像块，768维的嵌入\n",
    "\n",
    "# 初始化模型\n",
    "model = CMHICLModel(embed_dim=768, num_heads=4, graph_out_dim=512)\n",
    "\n",
    "# 前向传播\n",
    "output = model(text_embeds, image_embeds, text_adj, image_adj)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# GCN相关Lib\n",
    "# pip install spacy torch transformers\n",
    "# python -m spacy download en_core_web_sm\n",
    "\n",
    "import torch\n",
    "import spacy\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from transformers import BertTokenizer, BertModel\n",
    "\n",
    "# 加载spaCy的预训练模型\n",
    "nlp = spacy.load(\"en_core_web_sm\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 第二步：构建文本的邻接矩阵\n",
    "\n",
    "我们先构建文本的依存句法图，并生成邻接矩阵。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_text_adjacency_matrix(text, max_len):\n",
    "    \"\"\"\n",
    "    使用spaCy进行依存句法分析，构建文本的邻接矩阵。\n",
    "    \n",
    "    Args:\n",
    "    text (str): 输入文本。\n",
    "    max_len (int): 最大句子长度（固定大小的矩阵）。\n",
    "    \n",
    "    Returns:\n",
    "    adj_matrix (torch.Tensor): 生成的邻接矩阵。\n",
    "    \"\"\"\n",
    "    doc = nlp(text)\n",
    "    adj_matrix = torch.zeros((max_len, max_len))\n",
    "    \n",
    "    for token in doc:\n",
    "        # 自环\n",
    "        adj_matrix[token.i, token.i] = 1\n",
    "        # 依存关系\n",
    "        if token.head.i < max_len and token.i < max_len:\n",
    "            adj_matrix[token.i, token.head.i] = 1\n",
    "            adj_matrix[token.head.i, token.i] = 1  # 连接是无向的\n",
    "    \n",
    "    return adj_matrix\n",
    "\n",
    "# 示例文本\n",
    "text = \"This is a fun project.\"\n",
    "max_len = 10  # 最大词数限制\n",
    "text_adj = create_text_adjacency_matrix(text, max_len)\n",
    "\n",
    "print(\"Text adjacency matrix:\\n\", text_adj)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 第三步：模拟图像的邻接矩阵\n",
    "\n",
    "为了模拟一个图像邻接矩阵，我们将假设图像被分割成了9个块，并且相邻块之间有边连接。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_image_adjacency_matrix(num_patches):\n",
    "    \"\"\"\n",
    "    创建一个简单的图像邻接矩阵，假设图像分成num_patches块，邻接关系基于相邻性。\n",
    "    \n",
    "    Args:\n",
    "    num_patches (int): 图像块的数量。\n",
    "    \n",
    "    Returns:\n",
    "    adj_matrix (torch.Tensor): 图像邻接矩阵。\n",
    "    \"\"\"\n",
    "    adj_matrix = torch.zeros((num_patches, num_patches))\n",
    "    \n",
    "    for i in range(num_patches):\n",
    "        if i % 3 != 0:  # 左邻\n",
    "            adj_matrix[i, i-1] = 1\n",
    "            adj_matrix[i-1, i] = 1\n",
    "        if (i + 1) % 3 != 0:  # 右邻\n",
    "            adj_matrix[i, i+1] = 1\n",
    "            adj_matrix[i+1, i] = 1\n",
    "        if i >= 3:  # 上邻\n",
    "            adj_matrix[i, i-3] = 1\n",
    "            adj_matrix[i-3, i] = 1\n",
    "        if i < num_patches - 3:  # 下邻\n",
    "            adj_matrix[i, i+3] = 1\n",
    "            adj_matrix[i+3, i] = 1\n",
    "            \n",
    "    return adj_matrix\n",
    "\n",
    "# 示例图像邻接矩阵\n",
    "image_adj = create_image_adjacency_matrix(9)\n",
    "\n",
    "print(\"Image adjacency matrix:\\n\", image_adj)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 第四步：获取文本和图像的嵌入表示\n",
    "\n",
    "我们使用预训练的 `BERT` 模型获取文本的嵌入，并为图像块生成随机嵌入来模拟图像特征。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载预训练的BERT模型和分词器\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "bert_model = BertModel.from_pretrained('bert-base-uncased')\n",
    "\n",
    "def get_text_embeddings(text, max_len):\n",
    "    \"\"\"\n",
    "    使用BERT模型获取文本的嵌入表示。\n",
    "    \n",
    "    Args:\n",
    "    text (str): 输入文本。\n",
    "    max_len (int): 最大长度。\n",
    "    \n",
    "    Returns:\n",
    "    text_embeds (torch.Tensor): 文本嵌入。\n",
    "    \"\"\"\n",
    "    inputs = tokenizer(text, return_tensors=\"pt\", padding='max_length', max_length=max_len, truncation=True)\n",
    "    outputs = bert_model(**inputs)\n",
    "    text_embeds = outputs.last_hidden_state.squeeze(0)  # 获取最后一层的隐状态\n",
    "    return text_embeds\n",
    "\n",
    "# 获取文本的嵌入表示\n",
    "text_embeds = get_text_embeddings(text, max_len)\n",
    "print(\"Text embeddings shape:\", text_embeds.shape)\n",
    "\n",
    "# 随机生成图像块的嵌入表示\n",
    "image_embeds = torch.randn((9, 768))  # 9个图像块，每个块768维度的嵌入\n",
    "print(\"Image embeddings shape:\", image_embeds.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 第五步：传递给模型进行前向传播\n",
    "\n",
    "现在我们将文本和图像的嵌入表示以及它们的邻接矩阵传递给之前实现的模型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用之前定义的 CMHICL 模型\n",
    "model = CMHICLModel(embed_dim=768, num_heads=4, graph_out_dim=512)\n",
    "\n",
    "# 前向传播\n",
    "output = model(text_embeds, image_embeds, text_adj, image_adj)\n",
    "\n",
    "print(\"Model output:\", output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 完整代码\n",
    "\n",
    "整合以上所有步骤，完整代码如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import spacy\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from transformers import BertTokenizer, BertModel\n",
    "\n",
    "# 加载spaCy的预训练模型\n",
    "nlp = spacy.load(\"en_core_web_sm\")\n",
    "\n",
    "def create_text_adjacency_matrix(text, max_len):\n",
    "    doc = nlp(text)\n",
    "    adj_matrix = torch.zeros((max_len, max_len))\n",
    "    for token in doc:\n",
    "        adj_matrix[token.i, token.i] = 1\n",
    "        if token.head.i < max_len and token.i < max_len:\n",
    "            adj_matrix[token.i, token.head.i] = 1\n",
    "            adj_matrix[token.head.i, token.i] = 1\n",
    "    return adj_matrix\n",
    "\n",
    "def create_image_adjacency_matrix(num_patches):\n",
    "    adj_matrix = torch.zeros((num_patches, num_patches))\n",
    "    for i in range(num_patches):\n",
    "        if i % 3 != 0:\n",
    "            adj_matrix[i, i-1] = 1\n",
    "            adj_matrix[i-1, i] = 1\n",
    "        if (i + 1) % 3 != 0:\n",
    "            adj_matrix[i, i+1] = 1\n",
    "            adj_matrix[i+1, i] = 1\n",
    "        if i >= 3:\n",
    "            adj_matrix[i, i-3] = 1\n",
    "            adj_matrix[i-3, i] = 1\n",
    "        if i < num_patches - 3:\n",
    "            adj_matrix[i, i+3] = 1\n",
    "            adj_matrix[i+3, i] = 1\n",
    "    return adj_matrix\n",
    "\n",
    "def get_text_embeddings(text, max_len):\n",
    "    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "    bert_model = BertModel.from_pretrained('bert-base-uncased')\n",
    "    inputs = tokenizer(text, return_tensors=\"pt\", padding='max_length', max_length=max_len, truncation=True)\n",
    "    outputs = bert_model(**inputs)\n",
    "    text_embeds = outputs.last_hidden_state.squeeze(0)\n",
    "    return text_embeds\n",
    "\n",
    "class CMHICLModel(nn.Module):\n",
    "    # 前面已经实现的模型定义...\n",
    "\n",
    "# 示例文本和图像邻接矩阵\n",
    "text = \"This is a fun project.\"\n",
    "max_len = 10\n",
    "text_adj = create_text_adjacency_matrix(text, max_len)\n",
    "image_adj = create_image_adjacency_matrix(9)\n",
    "\n",
    "# 获取文本和图像的嵌入\n",
    "text_embeds = get_text_embeddings(text, max_len)\n",
    "image_embeds = torch.randn((9, 768))\n",
    "\n",
    "# 初始化模型并进行前向传播\n",
    "model = CMHICLModel(embed_dim=768, num_heads=4, graph_out_dim=512)\n",
    "output = model(text_embeds\n",
    "\n",
    ", image_embeds, text_adj, image_adj)\n",
    "\n",
    "print(\"Model output:\", output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import random\n",
    "import spacy\n",
    "\n",
    "# GloVe随机模拟文本特征（假设每个文本的词向量维度为300）\n",
    "class GloVeEmbedding(nn.Module):\n",
    "    def __init__(self, embedding_dim, vocab_size):\n",
    "        super(GloVeEmbedding, self).__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        return self.embedding(x)\n",
    "\n",
    "# GCN层\n",
    "class GCNLayer(nn.Module):\n",
    "    def __init__(self, input_dim, output_dim):\n",
    "        super(GCNLayer, self).__init__()\n",
    "        self.weight = nn.Parameter(torch.FloatTensor(input_dim, output_dim))\n",
    "        self.reset_parameters()\n",
    "        \n",
    "    def reset_parameters(self):\n",
    "        nn.init.xavier_uniform_(self.weight)\n",
    "\n",
    "    def forward(self, x, adj):\n",
    "        h = torch.matmul(x, self.weight)\n",
    "        output = torch.matmul(adj, h)\n",
    "        return F.relu(output)\n",
    "\n",
    "# 自注意力层\n",
    "class SelfAttention(nn.Module):\n",
    "    def __init__(self, embed_dim, heads):\n",
    "        super(SelfAttention, self).__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.heads = heads\n",
    "        self.values = nn.Linear(embed_dim, embed_dim, bias=False)\n",
    "        self.keys = nn.Linear(embed_dim, embed_dim, bias=False)\n",
    "        self.queries = nn.Linear(embed_dim, embed_dim, bias=False)\n",
    "        self.fc_out = nn.Linear(embed_dim, embed_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        batch_size, seq_len, embed_dim = x.size()\n",
    "        values = self.values(x)\n",
    "        keys = self.keys(x)\n",
    "        queries = self.queries(x)\n",
    "        \n",
    "        # 多头自注意力机制\n",
    "        energy = torch.bmm(queries, keys.transpose(1, 2))\n",
    "        attention = torch.softmax(energy / (embed_dim ** (1/2)), dim=2)\n",
    "        \n",
    "        out = torch.bmm(attention, values)\n",
    "        out = self.fc_out(out)\n",
    "        return out\n",
    "\n",
    "# 整体模型\n",
    "class SentimentGCNAttentionModel(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, gcn_dim, hidden_dim, num_classes):\n",
    "        super(SentimentGCNAttentionModel, self).__init__()\n",
    "        self.embedding = GloVeEmbedding(embedding_dim, vocab_size)\n",
    "        self.gcn1 = GCNLayer(embedding_dim, gcn_dim)\n",
    "        self.gcn2 = GCNLayer(gcn_dim, gcn_dim)\n",
    "        self.lstm = nn.LSTM(gcn_dim, hidden_dim, batch_first=True)\n",
    "        self.attention = SelfAttention(hidden_dim, heads=8)\n",
    "        self.fc = nn.Linear(hidden_dim, num_classes)\n",
    "        \n",
    "    def forward(self, x, adj):\n",
    "        embed = self.embedding(x)\n",
    "        gcn_out = self.gcn1(embed, adj)\n",
    "        gcn_out = self.gcn2(gcn_out, adj)\n",
    "        lstm_out, _ = self.lstm(gcn_out)\n",
    "        attention_out = self.attention(lstm_out)\n",
    "        logits = self.fc(attention_out[:, -1, :])  # 取最后时间步的输出进行分类\n",
    "        return logits\n",
    "\n",
    "# 数据准备\n",
    "batch_size = 32\n",
    "seq_len = 10\n",
    "vocab_size = 5000\n",
    "embedding_dim = 300\n",
    "gcn_dim = 128\n",
    "hidden_dim = 256\n",
    "num_classes = 3\n",
    "\n",
    "# 模拟输入\n",
    "x = torch.randint(0, vocab_size, (batch_size, seq_len))  \t# 随机文本输入\n",
    "adj = torch.rand(batch_size, seq_len, seq_len)  \t\t\t# 随机邻接矩阵\n",
    "\n",
    "# 创建模型\n",
    "model = SentimentGCNAttentionModel(vocab_size, embedding_dim, gcn_dim, hidden_dim, num_classes)\n",
    "\n",
    "# 前向传播\n",
    "output = model(x, adj)\n",
    "print(output.shape)  # 输出维度: [batch_size, num_classes]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "\n",
    "# 音频特征模拟（假设每个音频特征向量维度为128）\n",
    "class AudioEmbedding(nn.Module):\n",
    "    def __init__(self, audio_dim):\n",
    "        super(AudioEmbedding, self).__init__()\n",
    "        self.fc = nn.Linear(audio_dim, audio_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return F.relu(self.fc(x))\n",
    "\n",
    "# 增强的GCN+自注意力模型，加入音频特征\n",
    "class MultiModalGCNAttentionModel(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, gcn_dim, audio_dim, hidden_dim, num_classes):\n",
    "        super(MultiModalGCNAttentionModel, self).__init__()\n",
    "        self.text_embedding = GloVeEmbedding(embedding_dim, vocab_size)\n",
    "        self.audio_embedding = AudioEmbedding(audio_dim)\n",
    "        \n",
    "        # GCN处理文本特征\n",
    "        self.gcn1 = GCNLayer(embedding_dim, gcn_dim)\n",
    "        self.gcn2 = GCNLayer(gcn_dim, gcn_dim)\n",
    "        \n",
    "        # Bi-LSTM处理文本特征\n",
    "        self.text_lstm = nn.LSTM(gcn_dim, hidden_dim, batch_first=True)\n",
    "        \n",
    "        # 音频特征的LSTM处理\n",
    "        self.audio_lstm = nn.LSTM(audio_dim, hidden_dim, batch_first=True)\n",
    "        \n",
    "        # 自注意力层，用于文本和音频的注意力\n",
    "        self.attention = SelfAttention(hidden_dim, heads=8)\n",
    "        \n",
    "        # 融合后的全连接层\n",
    "        self.fc = nn.Linear(hidden_dim * 2, num_classes)  # 2倍隐藏层大小，因为拼接了文本和音频\n",
    "\n",
    "    def forward(self, text, adj, audio):\n",
    "        # 文本处理\n",
    "        text_embed = self.text_embedding(text)\n",
    "        gcn_out = self.gcn1(text_embed, adj)\n",
    "        gcn_out = self.gcn2(gcn_out, adj)\n",
    "        text_lstm_out, _ = self.text_lstm(gcn_out)\n",
    "        \n",
    "        # 音频处理\n",
    "        audio_embed = self.audio_embedding(audio)\n",
    "        audio_lstm_out, _ = self.audio_lstm(audio_embed)\n",
    "        \n",
    "        # 对文本和音频分别进行自注意力处理\n",
    "        text_attention_out = self.attention(text_lstm_out)\n",
    "        audio_attention_out = self.attention(audio_lstm_out)\n",
    "        \n",
    "        # 拼接文本和音频特征\n",
    "        combined = torch.cat((text_attention_out[:, -1, :], audio_attention_out[:, -1, :]), dim=1)\n",
    "        \n",
    "        # 分类输出\n",
    "        logits = self.fc(combined)\n",
    "        return logits\n",
    "\n",
    "# 数据准备\n",
    "batch_size = 32\n",
    "seq_len = 10\n",
    "vocab_size = 5000\n",
    "embedding_dim = 300\n",
    "gcn_dim = 128\n",
    "audio_dim = 128\n",
    "hidden_dim = 256\n",
    "num_classes = 3\n",
    "\n",
    "# 模拟输入\n",
    "text = torch.randint(0, vocab_size, (batch_size, seq_len))  # 随机文本输入\n",
    "adj = torch.rand(batch_size, seq_len, seq_len)  # 随机邻接矩阵\n",
    "audio = torch.rand(batch_size, seq_len, audio_dim)  # 随机音频输入\n",
    "\n",
    "# 创建模型\n",
    "model = MultiModalGCNAttentionModel(vocab_size, embedding_dim, gcn_dim, audio_dim, hidden_dim, num_classes)\n",
    "\n",
    "# 前向传播\n",
    "output = model(text, adj, audio)\n",
    "print(output.shape)  # 输出维度: [batch_size, num_classes]"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
