{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-12-20T03:07:52.974651Z",
     "start_time": "2024-12-20T03:07:19.454189Z"
    }
   },
   "source": [
    "import os\n",
    "import json\n",
    "import jieba\n",
    "import torch\n",
    "import pickle\n",
    "import random\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision import datasets, transforms\n",
    "from torch.utils.data import DataLoader"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "若已生成并保存好.pkl文件请跳到下一个markdown部分",
   "id": "ab3ba16ce61377a5"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-19T00:52:11.870077Z",
     "start_time": "2024-12-19T00:52:11.862869Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def read_json(file_path):\n",
    "    text = []\n",
    "    for file_name in os.listdir(file_path):\n",
    "        if file_name.endswith('.json'):\n",
    "            with open(os.path.join(file_path, file_name), 'r', encoding='utf-8') as f:\n",
    "                data = json.load(f)\n",
    "                if type(data) == dict:\n",
    "                    if data['text'] is not None:\n",
    "                        text.append(data['text'])\n",
    "                else:\n",
    "                    for it in range(len(data)):\n",
    "                        if data[it]['text'] is not None:\n",
    "                            text.append(data[it]['text'])\n",
    "                    \n",
    "            f.close()\n",
    "    print(len(text))\n",
    "    return text"
   ],
   "id": "ea2506aae0d3dacf",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-19T00:53:37.788408Z",
     "start_time": "2024-12-19T00:52:15.307436Z"
    }
   },
   "cell_type": "code",
   "source": [
    "text = read_json(\"./work/Chinese_Rumor_Dataset-master/CED_Dataset/original-microblog\")\n",
    "text = text + read_json(\"./work/Chinese_Rumor_Dataset-master/CED_Dataset/non-rumor-repost\")\n",
    "text = text + read_json(\"./work/Chinese_Rumor_Dataset-master/CED_Dataset/rumor-repost\")"
   ],
   "id": "e9118c4e09d08d3a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3387\n",
      "791563\n",
      "483617\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-18T13:25:34.065084Z",
     "start_time": "2024-12-18T13:25:34.056832Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def cut_words(text):\n",
    "    words = {}\n",
    "    \n",
    "    for line in text:\n",
    "        texts = jieba.lcut(line)\n",
    "        \n",
    "        for word in texts:\n",
    "            words[word] = 1\n",
    "    \n",
    "    print(len(words))\n",
    "    \n",
    "    return words"
   ],
   "id": "bad3c8afad41a47f",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-18T13:26:41.984039Z",
     "start_time": "2024-12-18T13:25:35.671841Z"
    }
   },
   "cell_type": "code",
   "source": "words = cut_words(text)",
   "id": "107eb42be2dfc877",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Dumping model to file cache C:\\Users\\Lenovo\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 1.730 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "276412\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-18T13:26:46.106999Z",
     "start_time": "2024-12-18T13:26:46.097511Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def word_2_vec(word_dic):\n",
    "    l = len(word_dic)\n",
    "    embed=nn.Embedding(l, embedding_dim=64)\n",
    "    vec_list = []\n",
    "    \n",
    "    for it in range(l):\n",
    "        vec_list.append(embed(torch.tensor(it)))\n",
    "        \n",
    "    return vec_list"
   ],
   "id": "e7aefe2a7e253024",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-18T13:27:05.137637Z",
     "start_time": "2024-12-18T13:26:48.521603Z"
    }
   },
   "cell_type": "code",
   "source": [
    "vecs = word_2_vec(words)\n",
    "print(vecs[0])"
   ],
   "id": "fe95903278e02f95",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([ 0.6329,  0.0476, -0.7358,  1.2637,  0.6318,  0.7386, -0.7044, -0.1845,\n",
      "         0.2995,  0.2079,  0.4076,  0.1490, -2.4393, -1.0129, -0.7221,  0.7870,\n",
      "        -0.5701, -0.4493,  0.6979,  0.5384, -1.3440, -1.1163, -0.7039, -0.1158,\n",
      "         0.9733,  0.4256,  0.1774, -0.6521, -0.3600, -2.4813, -1.1453, -0.7631,\n",
      "        -0.4975, -0.3326, -1.2546, -0.1930,  0.8300,  0.9562,  1.7531, -1.5133,\n",
      "        -0.4589, -0.3010,  0.5048, -0.3114, -1.1669, -0.0173, -0.4065,  1.2529,\n",
      "        -1.0190, -1.6655,  0.6458, -1.4487,  0.2179,  1.1864,  1.8650,  0.9635,\n",
      "        -2.4477, -1.8235, -0.3005,  0.2977,  0.9527,  1.0764, -0.9072,  0.0159],\n",
      "       grad_fn=<EmbeddingBackward0>)\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-18T13:27:09.122793Z",
     "start_time": "2024-12-18T13:27:08.951286Z"
    }
   },
   "cell_type": "code",
   "source": [
    "count = 0\n",
    "for word in words:\n",
    "    words[word] = count\n",
    "    count += 1"
   ],
   "id": "f411817c97c3fafc",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-18T13:27:31.520541Z",
     "start_time": "2024-12-18T13:27:10.238834Z"
    }
   },
   "cell_type": "code",
   "source": [
    "torch.save(vecs,'embeddings.pth')\n",
    "with open(\"word_to_idx.pkl\", \"wb\") as f:\n",
    "    pickle.dump(words, f)"
   ],
   "id": "62e4f1ed8eab9621",
   "outputs": [],
   "execution_count": 9
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "若已生成并保存好.pkl文件，则导完包后直接从下面一行开始",
   "id": "3d64cac93ff880cc"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:22.444335Z",
     "start_time": "2024-12-20T03:08:01.228221Z"
    }
   },
   "cell_type": "code",
   "source": [
    "device1 = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "vecs = torch.load('embeddings.pth')         #加载tensors。若上文中已生成完tensors则不需要运行这一段\n",
    "with open(\"word_to_idx.pkl\", \"rb\") as f:\n",
    "    words = pickle.load(f)"
   ],
   "id": "b5d769b0181b160d",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:24.089289Z",
     "start_time": "2024-12-20T03:08:24.072871Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Line2Vec(nn.Module):                                  #RNN神经元，将一段输入(n*1*64)堆叠到一个1*128的tensor中\n",
    "    def __init__(self):\n",
    "        super(Line2Vec, self).__init__()\n",
    "        self.weight1 = torch.nn.Parameter(torch.randn(64, 128))\n",
    "        self.bias1 = torch.nn.Parameter(torch.randn(1, 1))\n",
    "        self.weight2 = torch.nn.Parameter(torch.randn(128, 128))\n",
    "        self.bias2 = torch.nn.Parameter(torch.randn(1, 1))\n",
    "        self.weight3 = torch.nn.Parameter(torch.randn(128, 128))\n",
    "        self.bias3 = torch.nn.Parameter(torch.randn(1, 1))\n",
    "        self.relu = nn.ReLU()\n",
    "        self.weightM=torch.zeros(128, 128,requires_grad=True)\n",
    "        \n",
    "    def forward(self,x,mem):\n",
    "        x = self.relu(x @ self.weight1 + self.bias1)\n",
    "        x = x+mem\n",
    "        x = self.relu(x @ self.weight2 + self.bias2)\n",
    "        #print(x.shape,self.weightM.shape)\n",
    "        mem=x @ self.weightM\n",
    "        x = self.relu(x @ self.weight3 + self.bias3)\n",
    "        return x, mem"
   ],
   "id": "755f8c50a745ef79",
   "outputs": [],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:24.770173Z",
     "start_time": "2024-12-20T03:08:24.760119Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class ResidualBlock(nn.Module):         #残差块\n",
    "    def __init__(self, in_channels, out_channels, Stride=1):\n",
    "        super(ResidualBlock, self).__init__()\n",
    "        self.shortcut = nn.Sequential()\n",
    "        self.bn = nn.BatchNorm2d(out_channels)\n",
    "        if Stride!=1 or in_channels != out_channels:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=Stride, bias=False),\n",
    "                nn.BatchNorm2d(out_channels)\n",
    "            )\n",
    " \n",
    "    def forward(self, x):\n",
    "        out = self.shortcut(x)\n",
    "        out = self.bn(out)\n",
    "        out = F.relu(out)\n",
    "        return out"
   ],
   "id": "cb0addd2e023ad0d",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:25.354895Z",
     "start_time": "2024-12-20T03:08:25.331223Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CNN_Net(nn.Module):               #卷积，将1*10*128的tensor卷成1*128\n",
    "    def __init__(self):\n",
    "        super(CNN_Net, self).__init__()\n",
    "        self.relu = nn.ReLU()\n",
    "        self.shape_weight = torch.nn.parameter.Parameter(torch.randn(10,128), requires_grad=True)\n",
    "        self.shape_bias = torch.nn.parameter.Parameter(torch.randn(128,128), requires_grad=True)\n",
    "        \n",
    "        self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=2, padding=2)\n",
    "        self.batch_norm1 = nn.BatchNorm2d(32)\n",
    "        self.pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)\n",
    "        \n",
    "        self.jump = ResidualBlock(32, 128)\n",
    "        \n",
    "        self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=2, padding=2)\n",
    "        self.batch_norm2 = nn.BatchNorm2d(64)\n",
    "        \n",
    "        self.conv3 = nn.Conv2d(64, 64, kernel_size=3, padding=1)\n",
    "        self.batch_norm3 = nn.BatchNorm2d(64)\n",
    "        \n",
    "        self.conv4 = nn.Conv2d(64, 128, kernel_size=3,padding=1)\n",
    "        self.batch_norm4 = nn.BatchNorm2d(128)\n",
    "        self.pool4 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)\n",
    "        \n",
    "        self.conv5 = nn.Conv2d(128, 128, kernel_size=5, stride=2)\n",
    "        self.batch_norm5 = nn.BatchNorm2d(128)\n",
    "        \n",
    "        self.conv6 = nn.Conv2d(128, 128, kernel_size=3)\n",
    "        self.batch_norm6 = nn.BatchNorm2d(128)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x=x.T @ self.shape_weight + self.shape_bias\n",
    "        x = self.relu(x)\n",
    "        \n",
    "        x = x.unsqueeze(0)\n",
    "        x = x.unsqueeze(0)\n",
    "        \n",
    "        x = self.pool1(self.batch_norm1(self.conv1(x)))\n",
    "        \n",
    "        mem = x\n",
    "        \n",
    "        x = self.batch_norm2(self.conv2(x))\n",
    "        x = self.batch_norm3(self.conv3(x))\n",
    "        x = self.pool4(self.batch_norm4(self.conv4(x)) + F.max_pool2d(self.jump(mem), kernel_size=3, stride=2, padding=1))\n",
    "        x = F.avg_pool2d(self.batch_norm5(self.conv5(x)), kernel_size=2)\n",
    "        \n",
    "        return x.squeeze().unsqueeze_(0)"
   ],
   "id": "aae5ccea76ae029e",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:25.990250Z",
     "start_time": "2024-12-20T03:08:25.981391Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Net_Gate(nn.Module):              #门控神经元\n",
    "    def __init__(self):\n",
    "        super(Net_Gate, self).__init__()\n",
    "        self.w_in = torch.nn.Parameter(torch.randn((128,128),dtype=torch.float,requires_grad=True))\n",
    "        self.w_short = torch.nn.Parameter(torch.randn((128,128),dtype=torch.float,requires_grad=True))\n",
    "        self.w_out = torch.nn.Parameter(torch.randn((128,128),dtype=torch.float,requires_grad=True))\n",
    "        self.bias = torch.nn.Parameter(torch.randn((1,128),dtype=torch.float,requires_grad=True))\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "        \n",
    "    def forward(self, input, short):\n",
    "        t1 = input @ self.w_in\n",
    "        t2 = short @ self.w_short\n",
    "        t1 += (t2 + self.bias)\n",
    "        t1 = t1 @ self.w_out\n",
    "        \n",
    "        return self.sigmoid(t1)\n",
    "        \n",
    "    "
   ],
   "id": "e613513d0b03868e",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:26.580480Z",
     "start_time": "2024-12-20T03:08:26.567915Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class vec2prob(nn.Module):              #LSTM神经元，\n",
    "    def __init__(self):\n",
    "        super(vec2prob, self).__init__()\n",
    "        self.gateLR = Net_Gate()    \n",
    "        self.gatePR = Net_Gate()\n",
    "        self.gatePL = Net_Gate()\n",
    "        self.gateCM = Net_Gate()\n",
    "        \n",
    "        self.tanh = nn.Tanh()\n",
    "        \n",
    "    def forward(self, input, short, Lmem):\n",
    "        fg = self.gateLR(input, short)  #1*128\n",
    "        Lmem = Lmem * fg\n",
    "        \n",
    "        nmL = self.gatePR(input, short)\n",
    "        nmR = self.gatePL(input, short)\n",
    "        nmL = nmL * nmR\n",
    "        Lmem = Lmem + nmL\n",
    "        \n",
    "        cm = self.gateCM(input, short)\n",
    "        lm = self.tanh(Lmem)\n",
    "        cm = cm * lm\n",
    "        short = cm\n",
    "        \n",
    "        return cm, short, Lmem"
   ],
   "id": "695a2962469732be",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:27.481718Z",
     "start_time": "2024-12-20T03:08:27.464472Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class tree(nn.Module):          #决策树\n",
    "    def __init__(self):\n",
    "        super(tree, self,).__init__()\n",
    "        self.treeW = torch.randn((1,128),dtype=torch.float,requires_grad=True)\n",
    "        self.product = self.treeW.clone()\n",
    "        self.getproduct()\n",
    "        \n",
    "    def getproduct(self):\n",
    "        for i in range(1, len(self.treeW)):\n",
    "            self.product[i:] *= self.treeW[i-1]  # 累积前面的权重\n",
    "        \n",
    "        \n",
    "    def forward(self, input):\n",
    "        x = input * self.product\n",
    "        return x.squeeze(0)\n",
    "        "
   ],
   "id": "100e876bd0238f9b",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:12:13.160752Z",
     "start_time": "2024-12-20T03:12:13.140336Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Net, self).__init__()\n",
    "        self.l2v = Line2Vec()\n",
    "        self.cnn = CNN_Net()\n",
    "        self.lstm = vec2prob()\n",
    "        self.tree1 = tree()\n",
    "        self.tree2 = tree()\n",
    "        self.tree3 = tree()\n",
    "        self.linear = nn.Linear(128, 128)\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "        \n",
    "    def forward(self, x, short, Lmem):\n",
    "        \n",
    "        nlist =[]   #10*128\n",
    "        \n",
    "        for it in range(10):        #x是包含十段评论的tensor的二维list\n",
    "            Rmem = torch.zeros(1,128,requires_grad=False)\n",
    "            for it2 in range(len(x[it])):\n",
    "                temp, Rmem = self.l2v(x[it][0],Rmem)\n",
    "            nlist.append(temp)\n",
    "        \n",
    "        nX = torch.stack(nlist, dim=0)\n",
    "        nX = nX.squeeze(1)\n",
    "        \n",
    "        nX = self.cnn(nX)\n",
    "        nX, short, Lmem = self.lstm(nX, short ,Lmem)\n",
    "        \n",
    "        nX = self.linear(nX)            #逻辑回归\n",
    "        nX = self.sigmoid(nX)\n",
    "        \n",
    "        x1 = self.tree1(nX)\n",
    "        x2 = self.tree2(nX)\n",
    "        x3 = self.tree3(nX)\n",
    "        \n",
    "        \n",
    "        x1 = x1/3           #随机森林\n",
    "        x2 = x2/3\n",
    "        x3 = x3/3\n",
    "        x1 += x2 + x3\n",
    "        return torch.sigmoid(x1.squeeze(0).sum())\n",
    "        \n",
    "        "
   ],
   "id": "ff79de93fe619cd2",
   "outputs": [],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:12:14.749261Z",
     "start_time": "2024-12-20T03:12:14.737389Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def loss_fn(pred, target):\n",
    "    return pow(pred-target,2)"
   ],
   "id": "d83f77dde5e8ed02",
   "outputs": [],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:30.695041Z",
     "start_time": "2024-12-20T03:08:30.685264Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def read_json_once(file_path):\n",
    "    text = []\n",
    "    for file_name in os.listdir(file_path):\n",
    "        if file_name.endswith('.json'):\n",
    "            with open(os.path.join(file_path, file_name), 'r', encoding='utf-8') as f:\n",
    "                temp = []\n",
    "                data = json.load(f)\n",
    "                if type(data) == dict:\n",
    "                    if data['text'] is not None:\n",
    "                        temp.append(data['text'])\n",
    "                else:\n",
    "                    for it in range(len(data)):\n",
    "                        if data[it]['text'] is not None:\n",
    "                            temp.append(data[it]['text'])\n",
    "                    \n",
    "            f.close()\n",
    "            if temp != []:          #我也不知道触发了什么bug，不写成这样就不行\n",
    "                text.append(temp)\n",
    "    return text"
   ],
   "id": "61c7b75d71f5b555",
   "outputs": [],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:08:31.962382Z",
     "start_time": "2024-12-20T03:08:31.950778Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def cut(text):                  #接收多段评论的二维list，进行词分隔\n",
    "    temp = []\n",
    "    for line in text:\n",
    "        texts = jieba.lcut(line)\n",
    "        temp.append(texts)\n",
    "    \n",
    "    return temp\n",
    "\n",
    "def to_vector(text, words, vecs):  #words 是上文中构造的词到序号的映射，vecs是序号到tensor的映射（命名太潦草了，有机会改:) ）。\n",
    "    temp = []                       #接收一系列分割好的二维词list，将其映射到tensor上\n",
    "    for line in text:\n",
    "        temp2 = []\n",
    "        for word in line:\n",
    "            temp2.append(vecs[words[word]])\n",
    "        temp.append(temp2)\n",
    "    \n",
    "    return temp\n",
    "\n",
    "def get_vector(text, words, vecs): #接收一个文件的所有评论，并将其转化为tensor\n",
    "    temp = cut(text)\n",
    "    temp = to_vector(temp, words, vecs)\n",
    "    return temp\n",
    "\n",
    "class My_dataloader():\n",
    "    def __init__(self):\n",
    "        self.trues = \"./work/Chinese_Rumor_Dataset-master/CED_Dataset/non-rumor-repost\"\n",
    "        self.rumor = \"./work/Chinese_Rumor_Dataset-master/CED_Dataset/rumor-repost\"\n",
    "        self.lines = []\n",
    "        self.init_lines()\n",
    "        \n",
    "    def init_lines(self):\n",
    "        self.lines.append(read_json_once(self.trues))\n",
    "        self.lines.append(read_json_once(self.rumor))    #谣言为1\n",
    "    \n",
    "    def get_lines(self, words, vecs):\n",
    "        flag = random.randint(0,1)\n",
    "        commit = get_vector(self.lines[flag][random.randint(0,len(self.lines[flag])-1)], words, vecs) #随机返回一个文件中的评论\n",
    "        return commit, torch.tensor(flag)\n",
    "    "
   ],
   "id": "86e00da07cf37c6e",
   "outputs": [],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:12:19.309678Z",
     "start_time": "2024-12-20T03:12:19.277892Z"
    }
   },
   "cell_type": "code",
   "source": "net = Net()",
   "id": "5b349bceb567293c",
   "outputs": [],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:12:20.455804Z",
     "start_time": "2024-12-20T03:12:20.434932Z"
    }
   },
   "cell_type": "code",
   "source": "optimizerF = torch.optim.Adam(net.parameters(), lr=0.1) #变速训练，前期快速收敛，后期缓步精调",
   "id": "7b06941fc185b2f9",
   "outputs": [],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:09:22.695131Z",
     "start_time": "2024-12-20T03:08:34.914728Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#optimizerM = torch.optim.Adam(net.parameters(), lr=0.01)\n",
    "#optimizerS = torch.optim.Adam(net.parameters(), lr=0.001)\n",
    "dataloader = My_dataloader()"
   ],
   "id": "cccd9ad9f1020e2f",
   "outputs": [],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:28:50.521684Z",
     "start_time": "2024-12-20T03:28:50.493620Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def train(net, optimizer):\n",
    "    net.train()\n",
    "    for it in range(100):\n",
    "        commit, target = dataloader.get_lines(words, vecs)\n",
    "        print(\"commit: \", it)\n",
    "        tloss = 0\n",
    "        tcount = 1\n",
    "        for it2 in range(len(commit)-10):    #滑动窗口\n",
    "            count=0\n",
    "            i=0\n",
    "            epoch =[]\n",
    "            while (it2 + i)<len(commit) and count<10:\n",
    "                if commit[it2+i] == []:     #同上面read_json_once一样，不知道为何\n",
    "                    i+=1\n",
    "                    continue\n",
    "                epoch.append(commit[it2+i])\n",
    "                count += 1\n",
    "            \n",
    "            if len(epoch)<10:\n",
    "                for i in range(10-len(epoch)):\n",
    "                    epoch.append(torch.zeros(1, 64) + 0.001)#噪音\n",
    "            \n",
    "            #print(epoch)\n",
    "            flag = 0\n",
    "            for re in range(5):            #单样本重复训练次数\n",
    "                Lmem = torch.zeros(1,128,requires_grad=False)\n",
    "                short = torch.zeros(1,128,requires_grad=False)\n",
    "                optimizer.zero_grad()\n",
    "                pred = net(epoch, short, Lmem)\n",
    "                loss = loss_fn(pred, target)\n",
    "                tloss += loss.item()\n",
    "                tcount += 1\n",
    "                if loss.item() < 0.001:\n",
    "                    flag = 1\n",
    "                    break\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "            \n",
    "            if flag == 1:\n",
    "                break\n",
    "        print('loss: ',(tloss/tcount))    \n",
    "        #break\n",
    "                "
   ],
   "id": "8a565d3ec11c1375",
   "outputs": [],
   "execution_count": 28
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-20T03:29:54.451144Z",
     "start_time": "2024-12-20T03:28:51.245217Z"
    }
   },
   "cell_type": "code",
   "source": "train(net, optimizerF)",
   "id": "564fe377396fe0f9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "commit:  0\n",
      "loss:  2.1936338605443395e-22\n",
      "commit:  1\n",
      "loss:  0.9982788296041308\n",
      "commit:  2\n",
      "loss:  2.398817717952028e-22\n",
      "commit:  3\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[29], line 1\u001B[0m\n\u001B[1;32m----> 1\u001B[0m \u001B[43mtrain\u001B[49m\u001B[43m(\u001B[49m\u001B[43mnet\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43moptimizerF\u001B[49m\u001B[43m)\u001B[49m\n",
      "Cell \u001B[1;32mIn[28], line 37\u001B[0m, in \u001B[0;36mtrain\u001B[1;34m(net, optimizer)\u001B[0m\n\u001B[0;32m     35\u001B[0m     optimizer\u001B[38;5;241m.\u001B[39mzero_grad()\n\u001B[0;32m     36\u001B[0m     loss\u001B[38;5;241m.\u001B[39mbackward()\n\u001B[1;32m---> 37\u001B[0m     \u001B[43moptimizer\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mstep\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     39\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m flag \u001B[38;5;241m==\u001B[39m \u001B[38;5;241m1\u001B[39m:\n\u001B[0;32m     40\u001B[0m     \u001B[38;5;28;01mbreak\u001B[39;00m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\optim\\optimizer.py:391\u001B[0m, in \u001B[0;36mOptimizer.profile_hook_step.<locals>.wrapper\u001B[1;34m(*args, **kwargs)\u001B[0m\n\u001B[0;32m    386\u001B[0m         \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m    387\u001B[0m             \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mRuntimeError\u001B[39;00m(\n\u001B[0;32m    388\u001B[0m                 \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mfunc\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m must return None or a tuple of (new_args, new_kwargs), but got \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mresult\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m.\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    389\u001B[0m             )\n\u001B[1;32m--> 391\u001B[0m out \u001B[38;5;241m=\u001B[39m \u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    392\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_optimizer_step_code()\n\u001B[0;32m    394\u001B[0m \u001B[38;5;66;03m# call optimizer step post hooks\u001B[39;00m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\optim\\optimizer.py:76\u001B[0m, in \u001B[0;36m_use_grad_for_differentiable.<locals>._use_grad\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m     74\u001B[0m     torch\u001B[38;5;241m.\u001B[39mset_grad_enabled(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdefaults[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdifferentiable\u001B[39m\u001B[38;5;124m'\u001B[39m])\n\u001B[0;32m     75\u001B[0m     torch\u001B[38;5;241m.\u001B[39m_dynamo\u001B[38;5;241m.\u001B[39mgraph_break()\n\u001B[1;32m---> 76\u001B[0m     ret \u001B[38;5;241m=\u001B[39m \u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     77\u001B[0m \u001B[38;5;28;01mfinally\u001B[39;00m:\n\u001B[0;32m     78\u001B[0m     torch\u001B[38;5;241m.\u001B[39m_dynamo\u001B[38;5;241m.\u001B[39mgraph_break()\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\optim\\adam.py:168\u001B[0m, in \u001B[0;36mAdam.step\u001B[1;34m(self, closure)\u001B[0m\n\u001B[0;32m    157\u001B[0m     beta1, beta2 \u001B[38;5;241m=\u001B[39m group[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mbetas\u001B[39m\u001B[38;5;124m'\u001B[39m]\n\u001B[0;32m    159\u001B[0m     has_complex \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_init_group(\n\u001B[0;32m    160\u001B[0m         group,\n\u001B[0;32m    161\u001B[0m         params_with_grad,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    165\u001B[0m         max_exp_avg_sqs,\n\u001B[0;32m    166\u001B[0m         state_steps)\n\u001B[1;32m--> 168\u001B[0m     \u001B[43madam\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    169\u001B[0m \u001B[43m        \u001B[49m\u001B[43mparams_with_grad\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    170\u001B[0m \u001B[43m        \u001B[49m\u001B[43mgrads\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    171\u001B[0m \u001B[43m        \u001B[49m\u001B[43mexp_avgs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    172\u001B[0m \u001B[43m        \u001B[49m\u001B[43mexp_avg_sqs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    173\u001B[0m \u001B[43m        \u001B[49m\u001B[43mmax_exp_avg_sqs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    174\u001B[0m \u001B[43m        \u001B[49m\u001B[43mstate_steps\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    175\u001B[0m \u001B[43m        \u001B[49m\u001B[43mamsgrad\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mamsgrad\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    176\u001B[0m \u001B[43m        \u001B[49m\u001B[43mhas_complex\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mhas_complex\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    177\u001B[0m \u001B[43m        \u001B[49m\u001B[43mbeta1\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbeta1\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    178\u001B[0m \u001B[43m        \u001B[49m\u001B[43mbeta2\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbeta2\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    179\u001B[0m \u001B[43m        \u001B[49m\u001B[43mlr\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mlr\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    180\u001B[0m \u001B[43m        \u001B[49m\u001B[43mweight_decay\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mweight_decay\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    181\u001B[0m \u001B[43m        \u001B[49m\u001B[43meps\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43meps\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    182\u001B[0m \u001B[43m        \u001B[49m\u001B[43mmaximize\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mmaximize\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    183\u001B[0m \u001B[43m        \u001B[49m\u001B[43mforeach\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mforeach\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    184\u001B[0m \u001B[43m        \u001B[49m\u001B[43mcapturable\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mcapturable\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    185\u001B[0m \u001B[43m        \u001B[49m\u001B[43mdifferentiable\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mdifferentiable\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    186\u001B[0m \u001B[43m        \u001B[49m\u001B[43mfused\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgroup\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mfused\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    187\u001B[0m \u001B[43m        \u001B[49m\u001B[43mgrad_scale\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mgetattr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mgrad_scale\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    188\u001B[0m \u001B[43m        \u001B[49m\u001B[43mfound_inf\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mgetattr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mfound_inf\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    189\u001B[0m \u001B[43m    \u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    191\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m loss\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\optim\\adam.py:318\u001B[0m, in \u001B[0;36madam\u001B[1;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, foreach, capturable, differentiable, fused, grad_scale, found_inf, has_complex, amsgrad, beta1, beta2, lr, weight_decay, eps, maximize)\u001B[0m\n\u001B[0;32m    315\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m    316\u001B[0m     func \u001B[38;5;241m=\u001B[39m _single_tensor_adam\n\u001B[1;32m--> 318\u001B[0m \u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[43mparams\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    319\u001B[0m \u001B[43m     \u001B[49m\u001B[43mgrads\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    320\u001B[0m \u001B[43m     \u001B[49m\u001B[43mexp_avgs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    321\u001B[0m \u001B[43m     \u001B[49m\u001B[43mexp_avg_sqs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    322\u001B[0m \u001B[43m     \u001B[49m\u001B[43mmax_exp_avg_sqs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    323\u001B[0m \u001B[43m     \u001B[49m\u001B[43mstate_steps\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    324\u001B[0m \u001B[43m     \u001B[49m\u001B[43mamsgrad\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mamsgrad\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    325\u001B[0m \u001B[43m     \u001B[49m\u001B[43mhas_complex\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mhas_complex\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    326\u001B[0m \u001B[43m     \u001B[49m\u001B[43mbeta1\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbeta1\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    327\u001B[0m \u001B[43m     \u001B[49m\u001B[43mbeta2\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbeta2\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    328\u001B[0m \u001B[43m     \u001B[49m\u001B[43mlr\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mlr\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    329\u001B[0m \u001B[43m     \u001B[49m\u001B[43mweight_decay\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mweight_decay\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    330\u001B[0m \u001B[43m     \u001B[49m\u001B[43meps\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43meps\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    331\u001B[0m \u001B[43m     \u001B[49m\u001B[43mmaximize\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmaximize\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    332\u001B[0m \u001B[43m     \u001B[49m\u001B[43mcapturable\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mcapturable\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    333\u001B[0m \u001B[43m     \u001B[49m\u001B[43mdifferentiable\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mdifferentiable\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    334\u001B[0m \u001B[43m     \u001B[49m\u001B[43mgrad_scale\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mgrad_scale\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    335\u001B[0m \u001B[43m     \u001B[49m\u001B[43mfound_inf\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mfound_inf\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\optim\\adam.py:441\u001B[0m, in \u001B[0;36m_single_tensor_adam\u001B[1;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, grad_scale, found_inf, amsgrad, has_complex, beta1, beta2, lr, weight_decay, eps, maximize, capturable, differentiable)\u001B[0m\n\u001B[0;32m    439\u001B[0m         denom \u001B[38;5;241m=\u001B[39m (max_exp_avg_sqs[i]\u001B[38;5;241m.\u001B[39msqrt() \u001B[38;5;241m/\u001B[39m bias_correction2_sqrt)\u001B[38;5;241m.\u001B[39madd_(eps)\n\u001B[0;32m    440\u001B[0m     \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m--> 441\u001B[0m         denom \u001B[38;5;241m=\u001B[39m \u001B[43m(\u001B[49m\u001B[43mexp_avg_sq\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msqrt\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m/\u001B[39;49m\u001B[43m \u001B[49m\u001B[43mbias_correction2_sqrt\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43madd_\u001B[49m\u001B[43m(\u001B[49m\u001B[43meps\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    443\u001B[0m     param\u001B[38;5;241m.\u001B[39maddcdiv_(exp_avg, denom, value\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m-\u001B[39mstep_size)\n\u001B[0;32m    445\u001B[0m \u001B[38;5;66;03m# Lastly, switch back to complex view\u001B[39;00m\n",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "execution_count": 29
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "87bb1863227b2cb0"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
