{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Readme Doc"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 依赖引入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import math"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Utils function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Masking the sequence mask\n",
    "def make_mask(feature):\n",
    "    return (torch.sum(\n",
    "        torch.abs(feature),\n",
    "        dim=-1\n",
    "    ) == 0).unsqueeze(1).unsqueeze(2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Base module"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FFN(nn.Module):\n",
    "    \"\"\"Feed Forward Nets\n",
    "\n",
    "    Args:\n",
    "        nn (_type_): _description_\n",
    "    \"\"\"    \n",
    "    def __init__(self, args):\n",
    "        super(FFN, self).__init__()\n",
    "\n",
    "        self.mlp = MLP(\n",
    "            in_size=args.hidden_size,\n",
    "            mid_size=args.ff_size,\n",
    "            out_size=args.hidden_size,\n",
    "            dropout_r=args.dropout_r,\n",
    "            use_relu=True\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.mlp(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MHAtt(nn.Module):\n",
    "    \"\"\"Multi-Head Attention\n",
    "\n",
    "    Args:\n",
    "        nn (_type_): _description_\n",
    "    \"\"\"    \n",
    "    def __init__(self, args):\n",
    "        super(MHAtt, self).__init__()\n",
    "        self.args = args\n",
    "\n",
    "        self.linear_v = nn.Linear(args.hidden_size, args.hidden_size)\n",
    "        self.linear_k = nn.Linear(args.hidden_size, args.hidden_size)\n",
    "        self.linear_q = nn.Linear(args.hidden_size, args.hidden_size)\n",
    "        self.linear_merge = nn.Linear(args.hidden_size, args.hidden_size)\n",
    "\n",
    "        self.dropout = nn.Dropout(args.dropout_r)\n",
    "\n",
    "    def forward(self, v, k, q, mask):\n",
    "        n_batches = q.size(0)\n",
    "        v = self.linear_v(v).view(\n",
    "            n_batches,\n",
    "            -1,\n",
    "            self.args.multi_head,\n",
    "            int(self.args.hidden_size / self.args.multi_head)\n",
    "        ).transpose(1, 2)\n",
    "\n",
    "        k = self.linear_k(k).view(\n",
    "            n_batches,\n",
    "            -1,\n",
    "            self.args.multi_head,\n",
    "            int(self.args.hidden_size / self.args.multi_head)\n",
    "        ).transpose(1, 2)\n",
    "\n",
    "        q = self.linear_q(q).view(\n",
    "            n_batches,\n",
    "            -1,\n",
    "            self.args.multi_head,\n",
    "            int(self.args.hidden_size / self.args.multi_head)\n",
    "        ).transpose(1, 2)\n",
    "\n",
    "        atted = self.att(v, k, q, mask)\n",
    "\n",
    "        atted = atted.transpose(1, 2).contiguous().view(\n",
    "            n_batches,\n",
    "            -1,\n",
    "            self.args.hidden_size\n",
    "        )\n",
    "        atted = self.linear_merge(atted)\n",
    "\n",
    "        return atted\n",
    "\n",
    "    def att(self, value, key, query, mask):\n",
    "        d_k = query.size(-1)\n",
    "\n",
    "        scores = torch.matmul(\n",
    "            query, key.transpose(-2, -1)\n",
    "        ) / math.sqrt(d_k)\n",
    "\n",
    "        if mask is not None:\n",
    "            scores = scores.masked_fill(mask, -1e9)\n",
    "\n",
    "        att_map = F.softmax(scores, dim=-1)\n",
    "        att_map = self.dropout(att_map)\n",
    "\n",
    "        return torch.matmul(att_map, value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LayerNorm(nn.Module):\n",
    "    def __init__(self, size, eps=1e-6):\n",
    "        super(LayerNorm, self).__init__()\n",
    "        self.eps = eps\n",
    "\n",
    "        self.a_2 = nn.Parameter(torch.ones(size))\n",
    "        self.b_2 = nn.Parameter(torch.zeros(size))\n",
    "\n",
    "    def forward(self, x):\n",
    "        mean = x.mean(-1, keepdim=True)\n",
    "        std = x.std(-1, keepdim=True)\n",
    "        return self.a_2 * (x - mean) / (std + self.eps) + self.b_2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SA(nn.Module):\n",
    "    \"\"\"Self Attention\n",
    "\n",
    "    Args:\n",
    "        nn (_type_): _description_\n",
    "    \"\"\"    \n",
    "    def __init__(self, args):\n",
    "        super(SA, self).__init__()\n",
    "\n",
    "        self.mhatt = MHAtt(args)\n",
    "        self.ffn = FFN(args)\n",
    "\n",
    "        self.dropout1 = nn.Dropout(args.dropout_r)\n",
    "        self.norm1 = LayerNorm(args.hidden_size)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(args.dropout_r)\n",
    "        self.norm2 = LayerNorm(args.hidden_size)\n",
    "\n",
    "    def forward(self, y, y_mask):\n",
    "        y = self.norm1(y + self.dropout1(\n",
    "            self.mhatt(y, y, y, y_mask)\n",
    "        ))\n",
    "\n",
    "        y = self.norm2(y + self.dropout2(\n",
    "            self.ffn(y)\n",
    "        ))\n",
    "\n",
    "        return y\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SGA(nn.Module):\n",
    "    \"\"\"Self Guided Attention\n",
    "\n",
    "    Args:\n",
    "        nn (_type_): _description_\n",
    "    \"\"\"    \n",
    "    def __init__(self, args):\n",
    "        super(SGA, self).__init__()\n",
    "\n",
    "        self.mhatt1 = MHAtt(args)\n",
    "        self.mhatt2 = MHAtt(args)\n",
    "        self.ffn = FFN(args)\n",
    "\n",
    "        self.dropout1 = nn.Dropout(args.dropout_r)\n",
    "        self.norm1 = LayerNorm(args.hidden_size)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(args.dropout_r)\n",
    "        self.norm2 = LayerNorm(args.hidden_size)\n",
    "\n",
    "        self.dropout3 = nn.Dropout(args.dropout_r)\n",
    "        self.norm3 = LayerNorm(args.hidden_size)\n",
    "\n",
    "    def forward(self, x, y, x_mask, y_mask):\n",
    "        x = self.norm1(x + self.dropout1(\n",
    "            self.mhatt1(v=x, k=x, q=x, mask=x_mask)\n",
    "        ))\n",
    "\n",
    "        x = self.norm2(x + self.dropout2(\n",
    "            self.mhatt2(v=y, k=y, q=x, mask=y_mask)\n",
    "        ))\n",
    "\n",
    "        x = self.norm3(x + self.dropout3(\n",
    "            self.ffn(x)\n",
    "        ))\n",
    "\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FC(nn.Module):\n",
    "    \"\"\"Fully Connected Network 全连接层\n",
    "    包含ReLU active function 和 Dropout 层\n",
    "\n",
    "    Args:\n",
    "        nn (_type_): pytorch base class\n",
    "    \"\"\"    \n",
    "    def __init__(self, in_size :int, out_size :int, dropout_r :float = 0., use_relu :bool =True):\n",
    "        \"\"\"init function\n",
    "\n",
    "        Args:\n",
    "            in_size (int): 输入特征的维度\n",
    "            out_size (int): 输出特征的维度\n",
    "            dropout_r (float, optional): Dropout比率，用于防止过拟合. Defaults to 0..\n",
    "            use_relu (bool, optional): 是否使用ReLU激活函数. Defaults to True.\n",
    "        \"\"\"        \n",
    "        super(FC, self).__init__()\n",
    "        self.dropout_r = dropout_r\n",
    "        self.use_relu = use_relu\n",
    "\n",
    "        # 将输入特征映射到输出特征\n",
    "        self.linear = nn.Linear(in_size, out_size)\n",
    "\n",
    "        if use_relu:\n",
    "            self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "        if dropout_r > 0:\n",
    "            self.dropout = nn.Dropout(dropout_r)\n",
    "\n",
    "    def forward(self, x :'torch.Tensor'):\n",
    "        \"\"\"前向传播\n",
    "\n",
    "        Args:\n",
    "            x (torch.Tensor): 输入的张量\n",
    "\n",
    "        Returns:\n",
    "            (torch.Tensor): 返回处理后的张量\n",
    "        \"\"\"        \n",
    "        x = self.linear(x)\n",
    "\n",
    "        # 如果use_relu为True，则通过ReLU激活函数处理x\n",
    "        if self.use_relu:\n",
    "            x = self.relu(x)\n",
    "\n",
    "        # 如果dropout_r大于0，则通过Dropout层处理x\n",
    "        if self.dropout_r > 0:\n",
    "            x = self.dropout(x)\n",
    "\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MLP(nn.Module):\n",
    "    \"\"\"多层感知机 提取Text modal information into a lower dimension to fusion with\n",
    "    other modal\n",
    "\n",
    "    Args:\n",
    "        nn (_type_): pytorch base class\n",
    "    \"\"\"    \n",
    "    def __init__(self, in_size :int, mid_size :int, out_size :int, dropout_r :float = 0., use_relu :bool =True):\n",
    "        \"\"\"init\n",
    "\n",
    "        Args:\n",
    "            in_size (int): 输入特征的维度\n",
    "            mid_size (int): 隐藏层的维度\n",
    "            out_size (int): 输出特征的维度\n",
    "            dropout_r (float, optional): Dropout比率，用于防止过拟合。\n",
    "            默认值为0.0. \n",
    "            use_relu (bool, optional): 是否使用ReLU激活函数. \n",
    "            Defaults to True.\n",
    "        \"\"\"        \n",
    "        super(MLP, self).__init__()\n",
    "        \n",
    "        # 将输入特征映射到中间维度\n",
    "        self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)\n",
    "        self.linear = nn.Linear(mid_size, out_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"前向传播\n",
    "\n",
    "        Args:\n",
    "            x (torch.Tensor): _description_\n",
    "\n",
    "        Returns:\n",
    "            (torch.Tensor): _description_\n",
    "        \"\"\"        \n",
    "        return self.linear(self.fc(x))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Glimpse layer 瞥见层模块"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class AttFlat(nn.Module):\n",
    "    def __init__(self, args, flat_glimpse: int, merge: bool = False):\n",
    "        super(AttFlat, self).__init__()\n",
    "        self.args = args\n",
    "        self.merge = merge\n",
    "        self.flat_glimpse = flat_glimpse\n",
    "\n",
    "        self.mlp = MLP(\n",
    "            in_size=args.hidden_size,\n",
    "            mid_size=args.flat_mlp_size,\n",
    "            out_size=flat_glimpse,  # 确保输出维度与flat_glimpse一致\n",
    "            dropout_r=args.dropout_r,\n",
    "            use_relu=True\n",
    "        )\n",
    "\n",
    "        if self.merge:\n",
    "            self.linear_merge = nn.Linear(args.hidden_size * flat_glimpse, args.hidden_size * 2)\n",
    "\n",
    "    def forward(self, x: 'torch.Tensor', x_mask: 'torch.Tensor'):\n",
    "        \"\"\"前向传播\n",
    "\n",
    "        Args:\n",
    "            x (torch.Tensor): [batch_size, seq_len, hidden_size]\n",
    "            x_mask (torch.Tensor): [batch_size, 1, seq_len]\n",
    "\n",
    "        Raises:\n",
    "            ValueError: _description_\n",
    "            ValueError: _description_\n",
    "\n",
    "        Returns:\n",
    "            _type_: _description_\n",
    "        \"\"\"        \n",
    "        \n",
    "        # [batch_size, seq_len, flat_glimpse]\n",
    "        att = self.mlp(x)\n",
    "\n",
    "        if x_mask is not None:\n",
    "            x_mask_reshaped = x_mask.squeeze(1).squeeze(1).unsqueeze(2)\n",
    "            if att.size(1) == x_mask_reshaped.size(1):\n",
    "                att = att.masked_fill(x_mask_reshaped, -1e9)\n",
    "            else:\n",
    "                raise ValueError(\"Mask and attention size mismatch!\")\n",
    "\n",
    "        att = F.softmax(att, dim=1)\n",
    "\n",
    "        att_list = []\n",
    "        for i in range(self.flat_glimpse):\n",
    "            # print(f\"Glimpse {i}: att[:, :, {i}:{i+1}].shape = {att[:, :, i:i+1].shape}, x.shape = {x.shape}\")\n",
    "            # item shape: [batch_size, hidden_size]\n",
    "            att_sum = torch.sum(att[:, :, i:i+1] * x, dim=1)\n",
    "            if att_sum.shape[1] == 0:\n",
    "                raise ValueError(f\"Dimension mismatch after summation at glimpse {i}. att_sum.shape = {att_sum.shape}\")\n",
    "            att_list.append(att_sum)\n",
    "\n",
    "        if self.merge:\n",
    "            x_atted = torch.cat(att_list, dim=1)\n",
    "            x_atted = self.linear_merge(x_atted)\n",
    "            # [batch_size, hidden_size * 2]\n",
    "\n",
    "            check_message = (\n",
    "                f\"Need merge?: {self.merge}\\n\"\n",
    "                f\"x.shape: {x.shape}\\n\"\n",
    "                f\"x_mask.shape: {x_mask.shape}\\n\"\n",
    "                f\"x_atted.shape: {x_atted.shape}\\n\" \n",
    "            )\n",
    "            print(check_message)\n",
    "            \n",
    "            if x_atted.shape[0] != x.shape[0] or x_atted.shape[1] != x.shape[1] or x_atted.shape[2] != x.shape[2]:\n",
    "                error_message = (\n",
    "                    f\"Dimension mismatch detected.\\n\"\n",
    "                    f\"Need merge?: {self.merge}\\n\"\n",
    "                    f\"x.shape: {x.shape}\\n\"\n",
    "                    f\"x_mask.shape: {x_mask.shape}\\n\"\n",
    "                    f\"x_atted.shape: {x_atted.shape}\\n\" \n",
    "                )\n",
    "                raise ValueError(error_message)\n",
    "            return x_atted\n",
    "        else:\n",
    "            # [batch_size, hidden_size, flat_glimpse]\n",
    "            \"\"\" \n",
    "                1. after stack [flat_glimpse, batch_size, hidden_size]\n",
    "                2. after transpose_(0, 1) [batch_size, flat_glimpse, hidden_size]\n",
    "            \"\"\"\n",
    "            x_atted = torch.stack(att_list).transpose_(0, 1)\n",
    "            \n",
    "            check_message = (\n",
    "                f\"Need merge?: {self.merge}\\n\"\n",
    "                f\"x.shape: {x.shape}\\n\"\n",
    "                f\"x_mask.shape: {x_mask.shape}\\n\"\n",
    "                f\"x_atted.shape: {x_atted.shape}\\n\" \n",
    "            )\n",
    "            print(check_message)\n",
    "            \n",
    "            if x_atted.shape[0] != x.shape[0] or x_atted.shape[1] != x.shape[1] or x_atted.shape[2] != x.shape[2]:\n",
    "                error_message = (\n",
    "                    f\"Dimension mismatch detected.\\n\"\n",
    "                    f\"Need merge?: {self.merge}\\n\"\n",
    "                    f\"x.shape: {x.shape}\\n\"\n",
    "                    f\"x_mask.shape: {x_mask.shape}\\n\"\n",
    "                    f\"x_atted.shape: {x_atted.shape}\\n\" \n",
    "                )\n",
    "                raise ValueError(error_message)\n",
    "            return x_atted\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Modular co-attention 模块化协同注意力"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Block(nn.Module):\n",
    "    \"\"\"Modular co-attention 模块化协同注意力\n",
    "\n",
    "    Args:\n",
    "        nn (_type_): pytorch base class\n",
    "    \"\"\"    \n",
    "    def __init__(self, args, i :int):\n",
    "        \"\"\"init function\n",
    "\n",
    "        Args:\n",
    "            args (object): 包含模型参数的对象\n",
    "            i (int): 当前块的索引，用于判断是否是最后一层\n",
    "        \"\"\"        \n",
    "        super(Block, self).__init__()\n",
    "        self.args = args\n",
    "        # 初始化自注意力模块SA，处理输入x\n",
    "        self.sa1 = SA(args)\n",
    "        # 初始化自引导注意力模块SGA，处理输入y并用x引导注意\n",
    "        self.sa3 = SGA(args)\n",
    "\n",
    "        # 判断是否是最后一层，最后一层不进行特征扁平化处理\n",
    "        self.last = i == args.layer - 1\n",
    "        if not self.last:\n",
    "            # 针对语言模态的扁平化处理\n",
    "            self.att_lang = AttFlat(args, args.lang_seq_len, merge=False)\n",
    "            # 针对音频模态的扁平化处理\n",
    "            self.att_audio = AttFlat(args, args.audio_seq_len, merge=False)\n",
    "            # 针对语言模态的层归一化\n",
    "            self.norm_l = LayerNorm(args.hidden_size)\n",
    "            # 针对音频模态的层归一化\n",
    "            self.norm_i = LayerNorm(args.hidden_size)\n",
    "            # Dropout层，用于正则化\n",
    "            self.dropout = nn.Dropout(args.dropout_r)\n",
    "\n",
    "    def forward(self, x, x_mask, y, y_mask):\n",
    "        ax = self.sa1(x, x_mask)\n",
    "        ay = self.sa3(y, x, y_mask, x_mask)\n",
    "\n",
    "        x = ax + x # No sideeffect shape won't change\n",
    "        y = ay + y\n",
    "\n",
    "        if self.last:\n",
    "            return x, y\n",
    "\n",
    "        ax = self.att_lang(x, x_mask)\n",
    "        ay = self.att_audio(y, y_mask)\n",
    "        \n",
    "        # Ensure the dimensions match before adding\n",
    "        if ax.shape[1] != x.shape[1]:\n",
    "            error_message = (\n",
    "                f\"Dimension mismatch detected.\\n\"\n",
    "                f\"x.shape: {x.shape}\\n\"\n",
    "                f\"ax.shape: {ax.shape}\\n\"\n",
    "                f\"x.shape[1]: {x.shape[1]}\\n\"\n",
    "                f\"ax.shape[1]: {ax.shape[1]}\\n\"\n",
    "                f\"Current layer state is last? : {self.last}\"\n",
    "            )\n",
    "            raise ValueError(error_message)\n",
    "\n",
    "        if ay.shape[1] != y.shape[1]:\n",
    "            raise ValueError(f\"Dimension mismatch in y: {y.shape[1]} vs ay: {ay.shape[1]}\")\n",
    "        result_tensor_1 = self.norm_l(x + self.dropout(ax))\n",
    "        result_tensor_2 = self.norm_i(y + self.dropout(ay))\n",
    "        return result_tensor_1 , result_tensor_1\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 测试用例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test\n",
      "Need merge?: False\n",
      "x.shape: torch.Size([10, 32, 512])\n",
      "x_mask.shape: torch.Size([10, 1, 1, 32])\n",
      "x_atted.shape: torch.Size([10, 32, 512])\n",
      "\n",
      "Test\n",
      "Need merge?: False\n",
      "x.shape: torch.Size([10, 64, 512])\n",
      "x_mask.shape: torch.Size([10, 1, 1, 64])\n",
      "x_atted.shape: torch.Size([10, 64, 512])\n",
      "\n",
      "torch.Size([10, 32, 512])\n",
      "torch.Size([10, 32, 512])\n"
     ]
    }
   ],
   "source": [
    "# Mock 预训练词嵌入\n",
    "def get_pretrained_emb(vocab_size, emb_size):\n",
    "    return np.random.rand(vocab_size, emb_size)\n",
    "\n",
    "\"\"\"   \n",
    "随机选择参数,无实际意义\n",
    "\"\"\"\n",
    "class MockArgs:\n",
    "    def __init__(self):\n",
    "        self.model = \"Model_LA\"\n",
    "        self.name = \"mymodel\"\n",
    "        self.task = \"emotion\"     \n",
    "        self.multi_head = 4        \n",
    "        self.ff_size = 1024       \n",
    "        self.hidden_size = 512   \n",
    "        self.layer = 4           \n",
    "        self.batch_size = 60      \n",
    "        self.lr_base = 0.0001     \n",
    "        self.dropout_r = 0.1      \n",
    "        self.word_embed_size = 300  # 词嵌入维度\n",
    "        self.audio_feat_size = 80   # 音频特征维度\n",
    "        self.lang_seq_len = 32      # 语言序列长度\n",
    "        self.audio_seq_len = 64     # 音频序列长度\n",
    "        self.flat_mlp_size = 1024   # 扁平化MLP中间层\n",
    "        self.flat_glimpses = 1      # 扁平化后的特征维度\n",
    "\n",
    "args = MockArgs()\n",
    "\n",
    "# 创建Block实例\n",
    "block = Block(args, 0)  # 假设是第一个block\n",
    "\n",
    "# 创建模拟输入数据\n",
    "vocab_size = 10000  # 假设词汇表大小\n",
    "pretrained_emb = get_pretrained_emb(vocab_size, args.word_embed_size)\n",
    "\n",
    "# 语音编码\n",
    "x = torch.randint(0, vocab_size, (10, 32))  # (批大小, 序列长度)\n",
    "x_embedding = torch.randn(10, 32, args.hidden_size)  # (批大小, 序列长度, 隐藏维度)\n",
    "# 音频编码\n",
    "y = torch.randn(10, 64, args.hidden_size)  # 确保y的最后一个维度和hidden_size匹配\n",
    "\"\"\" \n",
    "注意,这里的参数需要和args进行匹配\n",
    "self.lang_seq_len = 32      # 语言序列长度\n",
    "self.audio_seq_len = 64     # 音频序列长度\n",
    "\"\"\"\n",
    "\n",
    "x_mask = make_mask(x_embedding.float())\n",
    "y_mask = make_mask(y)\n",
    "\n",
    "x, y = block(x_embedding, x_mask, y, y_mask)\n",
    "\n",
    "print(x.shape)\n",
    "print(y.shape)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torchenv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
