{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## nn.Conv1d 的使用\n",
    "nn.Conv1d 是 PyTorch 框架中用于进行一维卷积操作的一个类，它属于 torch.nn 模块。一维卷积通常用于处理序列数据，比如时间序列分析、信号处理或文本数据等。与二维卷积（nn.Conv2d）和三维卷积（nn.Conv3d）不同，一维卷积的输入和输出数据的维度更低，通常用于处理单条序列的局部特征提取。\n",
    "\n",
    "### nn.Conv1d 类的主要参数包括：\n",
    "\n",
    "+ in_channels (int) – 输入信号的通道数。例如，在文本处理中，这可以对应于词嵌入的维度。\n",
    "+ out_channels (int) – 卷积产生的通道数，即卷积核（或过滤器）的数量。\n",
    "+ kernel_size (int or tuple) – 卷积核的大小。由于是一维卷积，所以通常是一个整数，表示卷积核的宽度。\n",
    "+ stride (int or tuple, optional) – 卷积核移动的步长。默认为1。\n",
    "+ padding (int or tuple, optional) – 输入两端各填充的零的层数。这有助于控制输出序列的长度。默认为0。\n",
    "+ dilation (int or tuple, optional) – 卷积核元素之间的间距。默认为1。\n",
    "+ groups (int, optional) – 控制输入和输出之间的连接。默认为1，表示所有输入通道都连接到所有输出通道。当设置为大于1的值时，输入和输出通道会被分成groups个组，每组内部的通道相互连接。\n",
    "+ bias (bool, optional) – 如果为True，则添加可学习的偏置项到输出中。默认为True。\n",
    "\n",
    "$$  output_sequence_length=⌊ (sequence_length+2×padding−kernel_size) /stride ⌋ +1\n",
    "$$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[0.5118, 0.9258, 0.7900, 0.6446, 0.1917, 0.1533, 0.5762, 0.1277,\n",
      "          0.9501, 0.9893, 0.2884, 0.3654, 0.8010, 0.4007, 0.4690, 0.8410,\n",
      "          0.5926, 0.5587, 0.4277, 0.1503, 0.4489, 0.5277, 0.6078, 0.0372,\n",
      "          0.2392, 0.2798, 0.9369, 0.1267, 0.8719, 0.7867, 0.7215, 0.0517,\n",
      "          0.6912, 0.7349, 0.2682, 0.5440, 0.5483, 0.6609, 0.5934, 0.4991,\n",
      "          0.6395, 0.8160, 0.5933, 0.0904, 0.4144, 0.6272, 0.7530, 0.5782,\n",
      "          0.4120, 0.6311, 0.9364, 0.8661, 0.5339, 0.0551, 0.3050, 0.0318,\n",
      "          0.3287, 0.0596, 0.7585, 0.6628, 0.1856, 0.7230, 0.2676, 0.6283,\n",
      "          0.6959, 0.5357, 0.3338, 0.7292, 0.8922, 0.4871, 0.2221, 0.2264,\n",
      "          0.0123, 0.7517, 0.8789, 0.6581, 0.8069, 0.9103, 0.8186, 0.4874,\n",
      "          0.5933, 0.1457, 0.4431, 0.9761, 0.4176, 0.8129, 0.7011, 0.6984,\n",
      "          0.9436, 0.5939, 0.7773, 0.5755, 0.3926, 0.1329, 0.4173, 0.4578,\n",
      "          0.0542, 0.5510, 0.9275, 0.3555]]])\n",
      "torch.Size([1, 20, 100])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "# 创建一个一维卷积层\n",
    "conv1d=nn.Conv1d(in_channels=1,out_channels=20,kernel_size=5,stride=1,padding=2)\n",
    "# 假设我们有一个形状为 (batch_size, in_channels, sequence_length) 的输入数据  \n",
    "# 例如: (1, 1, 100) 表示一个批次中有1个序列，序列长度为100，且序列是单通道的 \n",
    "input_tensor=torch.rand(1,1,100)\n",
    "print(input_tensor)\n",
    "\n",
    "#使用一维卷积\n",
    "output_tensor=conv1d(input_tensor)\n",
    "# 输出张量的形状将会是 (batch_size, out_channels, output_sequence_length)  \n",
    "# 由于 padding=2, stride=1, kernel_size=5, 所以 output_sequence_length = (100 + 2*2 - 5) / 1 + 1 = 100\n",
    "print(output_tensor.shape)  # 输出: torch.Size([1, 20, 100])\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1, 2, 3, 4])\n",
      "tensor([[1, 2, 3, 4],\n",
      "        [1, 2, 3, 4]])\n",
      "Parameter containing:\n",
      "tensor([[0.0000, 0.6931, 1.0986, 1.3863],\n",
      "        [0.0000, 0.6931, 1.0986, 1.3863]], requires_grad=True)\n",
      "Parameter containing:\n",
      "tensor([[0.0000, 0.6931, 1.0986, 1.3863],\n",
      "        [0.0000, 0.6931, 1.0986, 1.3863]], requires_grad=True)\n",
      "tensor([[1., 2., 3., 4.],\n",
      "        [1., 2., 3., 4.]], grad_fn=<ExpBackward0>)\n",
      "4\n"
     ]
    }
   ],
   "source": [
    "import torch.nn as nn\n",
    "from einops import rearrange, repeat, einsum\n",
    "\n",
    "A = repeat(torch.arange(1, 5), 'n -> d n', d=2)\n",
    "\n",
    "A_log = nn.Parameter(torch.log(A))\n",
    "\n",
    "print(torch.arange(1,5))\n",
    "print(A)\n",
    "#A_log=torch.log(A)\n",
    "print(A_log)\n",
    "print(A_log.float())\n",
    "A=torch.exp(A_log.float())\n",
    "print(A)\n",
    "print(A.shape[1])  #"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3\n"
     ]
    }
   ],
   "source": [
    "class mytest():\n",
    "    def __init__(self):\n",
    "        pass\n",
    "    def func(self,x):\n",
    "        print(x)\n",
    "\n",
    "test=mytest()\n",
    "test.func(3)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
