{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-12-05T11:05:23.915422Z",
     "start_time": "2024-12-05T11:04:56.290926Z"
    }
   },
   "source": [
    "import os\n",
    "import json\n",
    "import jieba\n",
    "import torch\n",
    "import pickle\n",
    "import random\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision import datasets, transforms\n",
    "from torch.utils.data import DataLoader"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "若已生成并保存好.pkl文件请跳到下一个markdown部分",
   "id": "ab3ba16ce61377a5"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T08:10:33.601032Z",
     "start_time": "2024-12-05T08:10:33.583137Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def read_json(file_path):\n",
    "    text = []\n",
    "    for file_name in os.listdir(file_path):\n",
    "        if file_name.endswith('.json'):\n",
    "            with open(os.path.join(file_path, file_name), 'r', encoding='utf-8') as f:\n",
    "                data = json.load(f)\n",
    "                if type(data) == dict:\n",
    "                    if data['text'] is not None:\n",
    "                        text.append(data['text'])\n",
    "                else:\n",
    "                    for it in range(len(data)):\n",
    "                        if data[it]['text'] is not None:\n",
    "                            text.append(data[it]['text'])\n",
    "                    \n",
    "            f.close()\n",
    "    print(len(text))\n",
    "    return text"
   ],
   "id": "ea2506aae0d3dacf",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T08:10:44.519736Z",
     "start_time": "2024-12-05T08:10:37.129937Z"
    }
   },
   "cell_type": "code",
   "source": [
    "text = read_json(\"./work/Chinese_Rumor_Dataset-master/CED_Dataset/original-microblog\")\n",
    "text = text + read_json(\"./work/Chinese_Rumor_Dataset-master/CED_Dataset/non-rumor-repost\")\n",
    "text = text + read_json(\"./work/Chinese_Rumor_Dataset-master/CED_Dataset/rumor-repost\")"
   ],
   "id": "e9118c4e09d08d3a",
   "outputs": [
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[3], line 1\u001B[0m\n\u001B[1;32m----> 1\u001B[0m text \u001B[38;5;241m=\u001B[39m \u001B[43mread_json\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43m./work/Chinese_Rumor_Dataset-master/CED_Dataset/original-microblog\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[0;32m      2\u001B[0m text \u001B[38;5;241m=\u001B[39m text \u001B[38;5;241m+\u001B[39m read_json(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m./work/Chinese_Rumor_Dataset-master/CED_Dataset/non-rumor-repost\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m      3\u001B[0m text \u001B[38;5;241m=\u001B[39m text \u001B[38;5;241m+\u001B[39m read_json(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m./work/Chinese_Rumor_Dataset-master/CED_Dataset/rumor-repost\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n",
      "Cell \u001B[1;32mIn[2], line 5\u001B[0m, in \u001B[0;36mread_json\u001B[1;34m(file_path)\u001B[0m\n\u001B[0;32m      3\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m file_name \u001B[38;5;129;01min\u001B[39;00m os\u001B[38;5;241m.\u001B[39mlistdir(file_path):\n\u001B[0;32m      4\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m file_name\u001B[38;5;241m.\u001B[39mendswith(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m.json\u001B[39m\u001B[38;5;124m'\u001B[39m):\n\u001B[1;32m----> 5\u001B[0m         \u001B[38;5;28;01mwith\u001B[39;00m \u001B[38;5;28;43mopen\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mos\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mpath\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mjoin\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfile_path\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mfile_name\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mr\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mencoding\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mutf-8\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m)\u001B[49m \u001B[38;5;28;01mas\u001B[39;00m f:\n\u001B[0;32m      6\u001B[0m             data \u001B[38;5;241m=\u001B[39m json\u001B[38;5;241m.\u001B[39mload(f)\n\u001B[0;32m      7\u001B[0m             \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mtype\u001B[39m(data) \u001B[38;5;241m==\u001B[39m \u001B[38;5;28mdict\u001B[39m:\n",
      "File \u001B[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\IPython\\core\\interactiveshell.py:324\u001B[0m, in \u001B[0;36m_modified_open\u001B[1;34m(file, *args, **kwargs)\u001B[0m\n\u001B[0;32m    317\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m file \u001B[38;5;129;01min\u001B[39;00m {\u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m2\u001B[39m}:\n\u001B[0;32m    318\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mValueError\u001B[39;00m(\n\u001B[0;32m    319\u001B[0m         \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mIPython won\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mt let you open fd=\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mfile\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m by default \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    320\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mas it is likely to crash IPython. If you know what you are doing, \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    321\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124myou can use builtins\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m open.\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    322\u001B[0m     )\n\u001B[1;32m--> 324\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mio_open\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfile\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m<frozen codecs>:309\u001B[0m, in \u001B[0;36m__init__\u001B[1;34m(self, errors)\u001B[0m\n",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T08:10:47.500933Z",
     "start_time": "2024-12-05T08:10:47.491776Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def cut_words(text):\n",
    "    words = {}\n",
    "    \n",
    "    for line in text:\n",
    "        texts = jieba.lcut(line)\n",
    "        \n",
    "        for word in texts:\n",
    "            words[word] = 1\n",
    "    \n",
    "    print(len(words))\n",
    "    \n",
    "    return words"
   ],
   "id": "bad3c8afad41a47f",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T03:25:49.041243Z",
     "start_time": "2024-12-05T03:24:42.457614Z"
    }
   },
   "cell_type": "code",
   "source": "words = cut_words(text)",
   "id": "107eb42be2dfc877",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\Lenovo\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 1.949 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "276412\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T08:10:51.611299Z",
     "start_time": "2024-12-05T08:10:51.604323Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def word_2_vec(word_dic):\n",
    "    l = len(word_dic)\n",
    "    embed=nn.Embedding(l, embedding_dim=64)\n",
    "    vec_list = []\n",
    "    \n",
    "    for it in range(l):\n",
    "        vec_list.append(embed(torch.tensor(it)))\n",
    "        \n",
    "    return vec_list"
   ],
   "id": "e7aefe2a7e253024",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T03:31:10.769162Z",
     "start_time": "2024-12-05T03:30:59.341102Z"
    }
   },
   "cell_type": "code",
   "source": [
    "vecs = word_2_vec(words)\n",
    "print(vecs[0])"
   ],
   "id": "fe95903278e02f95",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-1.3116,  0.8352,  0.2186,  0.3733, -0.6603, -0.3662, -0.2850,  0.9606,\n",
      "        -0.3415,  1.8797,  0.4414,  1.2678, -0.6273, -0.0996,  1.6962,  0.0193,\n",
      "        -0.7628,  0.2492,  1.0498, -1.8708, -0.3257,  0.3657,  0.9662,  0.4997,\n",
      "         0.7057,  0.1157, -0.5662, -0.5519, -1.0356, -0.2481, -0.4447, -1.5392,\n",
      "         1.5970,  0.5158,  0.4587,  1.1310,  0.2670, -0.3033, -0.8109, -0.4621,\n",
      "         0.4288,  0.8190, -1.0835, -0.7132,  1.0865, -0.7656,  0.5924, -1.0507,\n",
      "        -1.3214,  0.2182,  1.2230, -0.0397, -3.3317,  2.4509, -2.1428, -0.8730,\n",
      "        -0.3462,  0.1447, -0.7253,  0.7714, -0.1039,  1.1575,  0.8201,  2.1888],\n",
      "       grad_fn=<EmbeddingBackward0>)\n"
     ]
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T03:31:42.169860Z",
     "start_time": "2024-12-05T03:31:41.997302Z"
    }
   },
   "cell_type": "code",
   "source": [
    "count = 0\n",
    "for word in words:\n",
    "    words[word] = count\n",
    "    count += 1"
   ],
   "id": "f411817c97c3fafc",
   "outputs": [],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T03:32:04.335125Z",
     "start_time": "2024-12-05T03:31:45.737386Z"
    }
   },
   "cell_type": "code",
   "source": [
    "torch.save(vecs,'embeddings.pth')\n",
    "with open(\"word_to_idx.pkl\", \"wb\") as f:\n",
    "    pickle.dump(words, f)"
   ],
   "id": "62e4f1ed8eab9621",
   "outputs": [],
   "execution_count": 22
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "若已生成并保存好.pkl文件，则导完包后直接从下面一行开始",
   "id": "3d64cac93ff880cc"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:05:53.180486Z",
     "start_time": "2024-12-05T11:05:33.488562Z"
    }
   },
   "cell_type": "code",
   "source": [
    "device1 = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "vecs = torch.load('embeddings.pth')         #加载tensors。若上文中已生成完tensors则不需要运行这一段\n",
    "with open(\"word_to_idx.pkl\", \"rb\") as f:\n",
    "    words = pickle.load(f)"
   ],
   "id": "b5d769b0181b160d",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:05:57.863349Z",
     "start_time": "2024-12-05T11:05:57.848164Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Line2Vec(nn.Module):                                  #RNN神经元，将一段输入(n*1*64)堆叠到一个1*128的tensor中\n",
    "    def __init__(self):\n",
    "        super(Line2Vec, self).__init__()\n",
    "        self.weight1 = torch.nn.Parameter(torch.randn(64, 128))\n",
    "        self.bias1 = torch.nn.Parameter(torch.randn(1, 1))\n",
    "        self.weight2 = torch.nn.Parameter(torch.randn(128, 128))\n",
    "        self.bias2 = torch.nn.Parameter(torch.randn(1, 1))\n",
    "        self.weight3 = torch.nn.Parameter(torch.randn(128, 128))\n",
    "        self.bias3 = torch.nn.Parameter(torch.randn(1, 1))\n",
    "        self.relu = nn.ReLU()\n",
    "        self.weightM=torch.zeros(128, 128,requires_grad=True)\n",
    "        \n",
    "    def forward(self,x,mem):\n",
    "        x = self.relu(x @ self.weight1 + self.bias1)\n",
    "        x = x+mem\n",
    "        x = self.relu(x @ self.weight2 + self.bias2)\n",
    "        #print(x.shape,self.weightM.shape)\n",
    "        mem=x @ self.weightM\n",
    "        x = self.relu(x @ self.weight3 + self.bias3)\n",
    "        return x, mem"
   ],
   "id": "755f8c50a745ef79",
   "outputs": [],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:05:58.682987Z",
     "start_time": "2024-12-05T11:05:58.667687Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class ResidualBlock(nn.Module):         #残差块\n",
    "    def __init__(self, in_channels, out_channels, Stride=1):\n",
    "        super(ResidualBlock, self).__init__()\n",
    "        self.shortcut = nn.Sequential()\n",
    "        self.bn = nn.BatchNorm2d(out_channels)\n",
    "        if Stride!=1 or in_channels != out_channels:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=Stride, bias=False),\n",
    "                nn.BatchNorm2d(out_channels)\n",
    "            )\n",
    " \n",
    "    def forward(self, x):\n",
    "        out = self.shortcut(x)\n",
    "        out = self.bn(out)\n",
    "        out = F.relu(out)\n",
    "        return out"
   ],
   "id": "cb0addd2e023ad0d",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:05:59.315832Z",
     "start_time": "2024-12-05T11:05:59.293926Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CNN_Net(nn.Module):               #卷积，将1*10*128的tensor卷成1*128\n",
    "    def __init__(self):\n",
    "        super(CNN_Net, self).__init__()\n",
    "        self.relu = nn.ReLU()\n",
    "        self.shape_weight = torch.nn.parameter.Parameter(torch.randn(10,128), requires_grad=True)\n",
    "        self.shape_bias = torch.nn.parameter.Parameter(torch.randn(128,128), requires_grad=True)\n",
    "        \n",
    "        self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=2, padding=2)\n",
    "        self.batch_norm1 = nn.BatchNorm2d(32)\n",
    "        self.pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)\n",
    "        \n",
    "        self.jump = ResidualBlock(32, 128)\n",
    "        \n",
    "        self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=2, padding=2)\n",
    "        self.batch_norm2 = nn.BatchNorm2d(64)\n",
    "        \n",
    "        self.conv3 = nn.Conv2d(64, 64, kernel_size=3, padding=1)\n",
    "        self.batch_norm3 = nn.BatchNorm2d(64)\n",
    "        \n",
    "        self.conv4 = nn.Conv2d(64, 128, kernel_size=3,padding=1)\n",
    "        self.batch_norm4 = nn.BatchNorm2d(128)\n",
    "        self.pool4 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)\n",
    "        \n",
    "        self.conv5 = nn.Conv2d(128, 128, kernel_size=5, stride=2)\n",
    "        self.batch_norm5 = nn.BatchNorm2d(128)\n",
    "        \n",
    "        self.conv6 = nn.Conv2d(128, 128, kernel_size=3)\n",
    "        self.batch_norm6 = nn.BatchNorm2d(128)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x=x.T @ self.shape_weight + self.shape_bias\n",
    "        x = self.relu(x)\n",
    "        \n",
    "        x = x.unsqueeze(0)\n",
    "        x = x.unsqueeze(0)\n",
    "        \n",
    "        x = self.pool1(self.batch_norm1(self.conv1(x)))\n",
    "        \n",
    "        mem = x\n",
    "        \n",
    "        x = self.batch_norm2(self.conv2(x))\n",
    "        x = self.batch_norm3(self.conv3(x))\n",
    "        x = self.pool4(self.batch_norm4(self.conv4(x)) + F.max_pool2d(self.jump(mem), kernel_size=3, stride=2, padding=1))\n",
    "        x = F.avg_pool2d(self.batch_norm5(self.conv5(x)), kernel_size=2)\n",
    "        \n",
    "        return x.squeeze().unsqueeze_(0)"
   ],
   "id": "aae5ccea76ae029e",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:05:59.920180Z",
     "start_time": "2024-12-05T11:05:59.909312Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Net_Gate(nn.Module):              #门控神经元\n",
    "    def __init__(self):\n",
    "        super(Net_Gate, self).__init__()\n",
    "        self.w_in = torch.nn.Parameter(torch.randn((128,128),dtype=torch.float,requires_grad=True))\n",
    "        self.w_short = torch.nn.Parameter(torch.randn((128,128),dtype=torch.float,requires_grad=True))\n",
    "        self.w_out = torch.nn.Parameter(torch.randn((128,128),dtype=torch.float,requires_grad=True))\n",
    "        self.bias = torch.nn.Parameter(torch.randn((1,128),dtype=torch.float,requires_grad=True))\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "        \n",
    "    def forward(self, input, short):\n",
    "        t1 = input @ self.w_in\n",
    "        t2 = short @ self.w_short\n",
    "        t1 += (t2 + self.bias)\n",
    "        t1 = t1 @ self.w_out\n",
    "        \n",
    "        return self.sigmoid(t1)\n",
    "        \n",
    "    "
   ],
   "id": "e613513d0b03868e",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:00.492222Z",
     "start_time": "2024-12-05T11:06:00.472327Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class vec2prob(nn.Module):              #LSTM神经元，\n",
    "    def __init__(self):\n",
    "        super(vec2prob, self).__init__()\n",
    "        self.gateLR = Net_Gate()    \n",
    "        self.gatePR = Net_Gate()\n",
    "        self.gatePL = Net_Gate()\n",
    "        self.gateCM = Net_Gate()\n",
    "        \n",
    "        self.tanh = nn.Tanh()\n",
    "        \n",
    "    def forward(self, input, short, Lmem):\n",
    "        fg = self.gateLR(input, short)  #1*128\n",
    "        Lmem = Lmem * fg\n",
    "        \n",
    "        nmL = self.gatePR(input, short)\n",
    "        nmR = self.gatePL(input, short)\n",
    "        nmL = nmL * nmR\n",
    "        Lmem = Lmem + nmL\n",
    "        \n",
    "        cm = self.gateCM(input, short)\n",
    "        lm = self.tanh(Lmem)\n",
    "        cm = cm * lm\n",
    "        short = cm\n",
    "        \n",
    "        return cm, short, Lmem"
   ],
   "id": "695a2962469732be",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:01.082253Z",
     "start_time": "2024-12-05T11:06:01.065361Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Net, self).__init__()\n",
    "        self.l2v = Line2Vec()\n",
    "        self.cnn = CNN_Net()\n",
    "        self.lstm = vec2prob()\n",
    "        self.linear = nn.Linear(128, 1)\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "        \n",
    "    def forward(self, x, short, Lmem):\n",
    "        \n",
    "        nlist =[]   #10*128\n",
    "        \n",
    "        for it in range(10):        #x是包含十段评论的tensor的二维list\n",
    "            Rmem = torch.zeros(1,128,requires_grad=False)\n",
    "            for it2 in range(len(x[it])):\n",
    "                temp, Rmem = self.l2v(x[it][0],Rmem)\n",
    "            nlist.append(temp)\n",
    "        \n",
    "        nX = torch.stack(nlist, dim=0)\n",
    "        nX = nX.squeeze(1)\n",
    "        \n",
    "        nX = self.cnn(nX)\n",
    "        nX, short, Lmem = self.lstm(nX, short ,Lmem)\n",
    "        nX = self.linear(nX)\n",
    "        nX = self.sigmoid(nX)\n",
    "        return nX[0]\n",
    "        \n",
    "        "
   ],
   "id": "ff79de93fe619cd2",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:01.701790Z",
     "start_time": "2024-12-05T11:06:01.692255Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def loss_fn(pred, target):\n",
    "    return pow(pred-target,2)*10 #sigmoid出来后差值太小，不利于收敛"
   ],
   "id": "d83f77dde5e8ed02",
   "outputs": [],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:03.577773Z",
     "start_time": "2024-12-05T11:06:03.557770Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def read_json_once(file_path):\n",
    "    text = []\n",
    "    for file_name in os.listdir(file_path):\n",
    "        if file_name.endswith('.json'):\n",
    "            with open(os.path.join(file_path, file_name), 'r', encoding='utf-8') as f:\n",
    "                temp = []\n",
    "                data = json.load(f)\n",
    "                if type(data) == dict:\n",
    "                    if data['text'] is not None:\n",
    "                        temp.append(data['text'])\n",
    "                else:\n",
    "                    for it in range(len(data)):\n",
    "                        if data[it]['text'] is not None:\n",
    "                            temp.append(data[it]['text'])\n",
    "                    \n",
    "            f.close()\n",
    "            if temp != []:          #我也不知道触发了什么bug，不写成这样就不行\n",
    "                text.append(temp)\n",
    "    return text"
   ],
   "id": "61c7b75d71f5b555",
   "outputs": [],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:41.066490Z",
     "start_time": "2024-12-05T11:06:41.051178Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def cut(text):                  #接收多段评论的二维list，进行词分隔\n",
    "    temp = []\n",
    "    for line in text:\n",
    "        texts = jieba.lcut(line)\n",
    "        temp.append(texts)\n",
    "    \n",
    "    return temp\n",
    "\n",
    "def to_vector(text, words, vecs):  #words 是上文中构造的词到序号的映射，vecs是序号到tensor的映射（命名太潦草了，有机会改:) ）。\n",
    "    temp = []                       #接收一系列分割好的二维词list，将其映射到tensor上\n",
    "    for line in text:\n",
    "        temp2 = []\n",
    "        for word in line:\n",
    "            temp2.append(vecs[words[word]])\n",
    "        temp.append(temp2)\n",
    "    \n",
    "    return temp\n",
    "\n",
    "def get_vector(text, words, vecs): #接收一个文件的所有评论，并将其转化为tensor\n",
    "    temp = cut(text)\n",
    "    temp = to_vector(temp, words, vecs)\n",
    "    return temp\n",
    "\n",
    "class My_dataloader():\n",
    "    def __init__(self):\n",
    "        self.trues = \"./work/Chinese_Rumor_Dataset-master/CED_Dataset/non-rumor-repost\"\n",
    "        self.rumor = \"./work/Chinese_Rumor_Dataset-master/CED_Dataset/rumor-repost\"\n",
    "        self.lines = []\n",
    "        self.init_lines()\n",
    "        \n",
    "    def init_lines(self):\n",
    "        self.lines.append(read_json_once(self.trues))\n",
    "        self.lines.append(read_json_once(self.rumor))    #谣言为1\n",
    "    \n",
    "    def get_lines(self, words, vecs):\n",
    "        flag = random.randint(0,1)\n",
    "        commit = get_vector(self.lines[flag][random.randint(0,len(self.lines[flag])-1)], words, vecs) #随机返回一个文件中的评论\n",
    "        return commit, torch.tensor(flag)\n",
    "    "
   ],
   "id": "86e00da07cf37c6e",
   "outputs": [],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:46.942777Z",
     "start_time": "2024-12-05T11:06:46.920066Z"
    }
   },
   "cell_type": "code",
   "source": "net = Net()",
   "id": "5b349bceb567293c",
   "outputs": [],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:47.981854Z",
     "start_time": "2024-12-05T11:06:47.969422Z"
    }
   },
   "cell_type": "code",
   "source": "optimizerF = torch.optim.Adam(net.parameters(), lr=0.1) #变速训练，前期快速收敛，后期缓步精调",
   "id": "7b06941fc185b2f9",
   "outputs": [],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:06:54.404154Z",
     "start_time": "2024-12-05T11:06:48.418220Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#optimizerM = torch.optim.Adam(net.parameters(), lr=0.01)\n",
    "#optimizerS = torch.optim.Adam(net.parameters(), lr=0.001)\n",
    "dataloader = My_dataloader()"
   ],
   "id": "cccd9ad9f1020e2f",
   "outputs": [],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:34:31.515932Z",
     "start_time": "2024-12-05T11:34:31.499784Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def train(net, optimizer):\n",
    "    net.train()\n",
    "    for it in range(100):\n",
    "        commit, target = dataloader.get_lines(words, vecs)\n",
    "        print(\"commit: \", it)\n",
    "        tloss = 0\n",
    "        for it2 in range(len(commit)-10):    #滑动窗口\n",
    "            count=0\n",
    "            i=0\n",
    "            epoch =[]\n",
    "            while (it2 + i)<len(commit) and count<10:\n",
    "                if commit[it2+i] == []:     #同上面read_json_once一样，不知道为何\n",
    "                    i+=1\n",
    "                    continue\n",
    "                epoch.append(commit[it2+i])\n",
    "                count += 1\n",
    "            \n",
    "            if len(epoch)<10:\n",
    "                for i in range(10-len(epoch)):\n",
    "                    epoch.append(torch.zeros(1, 64) + 0.001)#噪音\n",
    "            \n",
    "            #print(epoch)\n",
    "                 \n",
    "            for re in range(10):            #单样本重复训练次数\n",
    "                Lmem = torch.zeros(1,128,requires_grad=False)\n",
    "                short = torch.zeros(1,128,requires_grad=False)\n",
    "                pred = net(epoch, short, Lmem)\n",
    "                loss = loss_fn(pred, target)\n",
    "                if loss.item() ==0: break\n",
    "                tloss += loss.item()\n",
    "                \n",
    "                optimizer.zero_grad()\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "            #break\n",
    "        print('loss: ',(tloss/10)/(len(commit)-9))    \n",
    "        #break\n",
    "                "
   ],
   "id": "8a565d3ec11c1375",
   "outputs": [],
   "execution_count": 39
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-05T11:43:12.411094Z",
     "start_time": "2024-12-05T11:34:32.191321Z"
    }
   },
   "cell_type": "code",
   "source": "train(net, optimizerF)",
   "id": "564fe377396fe0f9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "commit:  0\n",
      "loss:  9.909090909090908\n",
      "commit:  1\n",
      "loss:  9.97787610619469\n",
      "commit:  2\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[40], line 1\u001B[0m\n\u001B[1;32m----> 1\u001B[0m \u001B[43mtrain\u001B[49m\u001B[43m(\u001B[49m\u001B[43mnet\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43moptimizerF\u001B[49m\u001B[43m)\u001B[49m\n",
      "Cell \u001B[1;32mIn[39], line 27\u001B[0m, in \u001B[0;36mtrain\u001B[1;34m(net, optimizer)\u001B[0m\n\u001B[0;32m     25\u001B[0m Lmem \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mzeros(\u001B[38;5;241m1\u001B[39m,\u001B[38;5;241m128\u001B[39m,requires_grad\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m)\n\u001B[0;32m     26\u001B[0m short \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mzeros(\u001B[38;5;241m1\u001B[39m,\u001B[38;5;241m128\u001B[39m,requires_grad\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m)\n\u001B[1;32m---> 27\u001B[0m pred \u001B[38;5;241m=\u001B[39m \u001B[43mnet\u001B[49m\u001B[43m(\u001B[49m\u001B[43mepoch\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mshort\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mLmem\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     28\u001B[0m loss \u001B[38;5;241m=\u001B[39m loss_fn(pred, target)\n\u001B[0;32m     29\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m loss\u001B[38;5;241m.\u001B[39mitem() \u001B[38;5;241m==\u001B[39m\u001B[38;5;241m0\u001B[39m: \u001B[38;5;28;01mbreak\u001B[39;00m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1530\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)  \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[0;32m   1531\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m-> 1532\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1536\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[0;32m   1537\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[0;32m   1538\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[0;32m   1539\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[0;32m   1540\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[1;32m-> 1541\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1543\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m   1544\u001B[0m     result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
      "Cell \u001B[1;32mIn[8], line 17\u001B[0m, in \u001B[0;36mNet.forward\u001B[1;34m(self, x, short, Lmem)\u001B[0m\n\u001B[0;32m     15\u001B[0m     Rmem \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mzeros(\u001B[38;5;241m1\u001B[39m,\u001B[38;5;241m128\u001B[39m,requires_grad\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m)\n\u001B[0;32m     16\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m it2 \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;28mlen\u001B[39m(x[it])):\n\u001B[1;32m---> 17\u001B[0m         temp, Rmem \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43ml2v\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx\u001B[49m\u001B[43m[\u001B[49m\u001B[43mit\u001B[49m\u001B[43m]\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;241;43m0\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\u001B[43mRmem\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     18\u001B[0m     nlist\u001B[38;5;241m.\u001B[39mappend(temp)\n\u001B[0;32m     20\u001B[0m nX \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mstack(nlist, dim\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m0\u001B[39m)\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1530\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)  \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[0;32m   1531\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m-> 1532\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1536\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[0;32m   1537\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[0;32m   1538\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[0;32m   1539\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[0;32m   1540\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[1;32m-> 1541\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1543\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m   1544\u001B[0m     result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
      "Cell \u001B[1;32mIn[3], line 16\u001B[0m, in \u001B[0;36mLine2Vec.forward\u001B[1;34m(self, x, mem)\u001B[0m\n\u001B[0;32m     14\u001B[0m x \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mrelu(x \u001B[38;5;241m@\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mweight1 \u001B[38;5;241m+\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mbias1)\n\u001B[0;32m     15\u001B[0m x \u001B[38;5;241m=\u001B[39m x\u001B[38;5;241m+\u001B[39mmem\n\u001B[1;32m---> 16\u001B[0m x \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrelu\u001B[49m(x \u001B[38;5;241m@\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mweight2 \u001B[38;5;241m+\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mbias2)\n\u001B[0;32m     17\u001B[0m \u001B[38;5;66;03m#print(x.shape,self.weightM.shape)\u001B[39;00m\n\u001B[0;32m     18\u001B[0m mem\u001B[38;5;241m=\u001B[39mx \u001B[38;5;241m@\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mweightM\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1696\u001B[0m, in \u001B[0;36mModule.__getattr__\u001B[1;34m(self, name)\u001B[0m\n\u001B[0;32m   1687\u001B[0m         \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;241m=\u001B[39m OrderedDict()\n\u001B[0;32m   1689\u001B[0m \u001B[38;5;66;03m# On the return type:\u001B[39;00m\n\u001B[0;32m   1690\u001B[0m \u001B[38;5;66;03m# We choose to return `Any` in the `__getattr__` type signature instead of a more strict `Union[Tensor, Module]`.\u001B[39;00m\n\u001B[0;32m   1691\u001B[0m \u001B[38;5;66;03m# This is done for better interop with various type checkers for the end users.\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   1694\u001B[0m \u001B[38;5;66;03m# See full discussion on the problems with returning `Union` here\u001B[39;00m\n\u001B[0;32m   1695\u001B[0m \u001B[38;5;66;03m# https://github.com/microsoft/pyright/issues/4213\u001B[39;00m\n\u001B[1;32m-> 1696\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m__getattr__\u001B[39m(\u001B[38;5;28mself\u001B[39m, name: \u001B[38;5;28mstr\u001B[39m) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Any:\n\u001B[0;32m   1697\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m_parameters\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m\u001B[38;5;18m__dict__\u001B[39m:\n\u001B[0;32m   1698\u001B[0m         _parameters \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m\u001B[38;5;18m__dict__\u001B[39m[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m_parameters\u001B[39m\u001B[38;5;124m'\u001B[39m]\n",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "execution_count": 40
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "87bb1863227b2cb0"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
