{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 发散研究-跨模态自注意力机制的QKV交叉使用\n",
    "跨模态多头注意力机制\n",
    "在DCCMCI模块中，多头注意力（Multi-Head Attention）机制确实意味着使用多个独立的自注意力机制来处理输入，每个自注意力机制都有自己的查询（Q）、键（K）和值（V）向量。这些独立的注意力机制允许模型在不同的表示子空间中关注不同的信息。\n",
    "\n",
    "对于多模态数据，每个模态可以有其自己的多头注意力机制，用于在同一模态内部捕获上下文信息。然而，当探索跨模态的上下文信息时，可以设计一种机制，其中不同模态的查询、键和值向量可以交互使用，以实现跨模态的多头注意力。\n",
    "\n",
    "下面是一个简化的代码示例，展示了如何在PyTorch中实现跨模态的多头注意力机制，其中不同模态的QKV可以交叉结合使用：\n",
    "\n",
    "请注意，这个示例中的`CrossModalMultiHeadAttention`类仅展示了文本模态到音频和视频模态的注意力计算。在实际应用中，您可能还需要实现音频到文本和视频的注意力，以及视频到文本和音频的注意力。此外，输出融合方式（例如连接或平均）取决于您的具体需求。\n",
    "\n",
    "此外，`mask`参数用于在注意力机制中排除某些位置（例如填充位置），但在这个简化的示例中并未详细使用。在实际应用中，您可能需要根据数据的具体情况来实现适当的掩码。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "class CrossModalMultiHeadAttention(nn.Module):\n",
    "    def __init__(self, d_model, num_heads):\n",
    "        super(CrossModalMultiHeadAttention, self).__init__()\n",
    "        self.num_heads = num_heads\n",
    "        self.d_model = d_model\n",
    "    \n",
    "        assert d_model % self.num_heads == 0\n",
    "    \n",
    "        self.depth = d_model // self.num_heads\n",
    "    \n",
    "        self.wq = nn.Linear(d_model, d_model)\n",
    "        self.wk = nn.Linear(d_model, d_model)\n",
    "        self.wv = nn.Linear(d_model, d_model)\n",
    "        self.dense = nn.Linear(d_model, d_model)\n",
    "    \n",
    "        self.split_heads = lambda x: x.reshape(x.size(0), x.size(1), self.num_heads, self.depth).permute(0, 2, 1, 3)\n",
    "    \n",
    "    def scaled_dot_product_attention(self, q, k, v, mask=None):\n",
    "        matmul_qk = torch.matmul(q, k.transpose(-2, -1))\n",
    "    \n",
    "        dk = k.size(-1)\n",
    "        scaled_attention_logits = matmul_qk / torch.sqrt(torch.tensor(dk, dtype=torch.float32))\n",
    "    \n",
    "        if mask is not None:\n",
    "            scaled_attention_logits += (mask * -1e9)  # Add the mask to the scaled tensor.\n",
    "    \n",
    "        attention_weights = F.softmax(scaled_attention_logits, dim=-1)\n",
    "    \n",
    "        output = torch.matmul(attention_weights, v)\n",
    "        return output, attention_weights\n",
    "  \n",
    "    def forward(self, text_rep, audio_rep, video_rep, mask=None):\n",
    "        # Split the embedding into self.num_heads different pieces\n",
    "        text_q, audio_q, video_q = [self.split_heads(self.wq(rep)) for rep in [text_rep, audio_rep, video_rep]]\n",
    "        text_k, audio_k, video_k = [self.split_heads(self.wk(rep)) for rep in [text_rep, audio_rep, video_rep]]\n",
    "        text_v, audio_v, video_v = [self.split_heads(self.wv(rep)) for rep in [text_rep, audio_rep, video_rep]]\n",
    "    \n",
    "        # Compute attention for text -> other modalities\n",
    "        text_to_audio_output, _ = self.scaled_dot_product_attention(text_q, audio_k, audio_v, mask)\n",
    "        text_to_video_output, _ = self.scaled_dot_product_attention(text_q, video_k, video_v, mask)\n",
    "    \n",
    "        # Compute attention for audio -> other modalities (similarly for video)\n",
    "        # ... (omitted for brevity)\n",
    "    \n",
    "        # Combine the attention outputs (e.g., concatenate or average)\n",
    "        # This step depends on how you want to fuse the cross-modal information\n",
    "        combined_output = torch.cat([text_to_audio_output.permute(0, 2, 1, 3).contiguous().view(text_rep.size(0), -1),\n",
    "                                     text_to_video_output.permute(0, 2, 1, 3).contiguous().view(text_rep.size(0), -1)], dim=1)\n",
    "    \n",
    "        # Apply a dense layer\n",
    "        combined_output = self.dense(combined_output)\n",
    "        return combined_output\n",
    "\n",
    "# Usage example:\n",
    "# Assuming text_rep, audio_rep, and video_rep are the representations of the text, audio, and video modalities, respectively.\n",
    "cross_modal_attention = CrossModalMultiHeadAttention(d_model=128, num_heads=8)\n",
    "output = cross_modal_attention(text_rep, audio_rep, video_rep)"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
