{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [],
   "source": [
    "#server ip:port\n",
    "#    http://114.213.253.204:8899"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "from torch import nn\n",
    "import utils\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision import datasets, transforms\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "epochs = 1000000\n",
    "batch_size = 10\n",
    "learning_rate = 1e-1"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "import csv\n",
    "import os\n",
    "import utils\n",
    "import json\n",
    "from tqdm import tqdm\n",
    "\n",
    "def get_LLDs_dataset_casia(config):\n",
    "    #file_name=\"opensmile_features_casia.json\"\n",
    "    #feature_type=\"IS13_ComParE\"\n",
    "    file_name=config.opensmile_feature_json_path\n",
    "    with open(file_name, 'r', encoding='UTF-8') as f:\n",
    "        load_dict = json.load(f)\n",
    "    embeddings = []\n",
    "    label = []\n",
    "    for k, v in load_dict.items():\n",
    "        embeddings.append(v)\n",
    "        label.append(int(k[-5]))\n",
    "\n",
    "    return embeddings,label"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "class LinearNetwork(nn.Module):\n",
    "\n",
    "    def __init__(self):\n",
    "        super(LinearNetwork, self).__init__()\n",
    "\n",
    "        self.linear_relu_stack = nn.Sequential(\n",
    "            # nn.Linear(88,64),\n",
    "            # nn.ReLU(),\n",
    "            # nn.Dropout(0.6),\n",
    "            # nn.Linear(64, 6),\n",
    "            # nn.Softmax(dim=1)\n",
    "            nn.Linear(6373, 4096),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(p=0.5),\n",
    "            nn.Linear(4096, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(p=0.5),\n",
    "            nn.Linear(512, 7),\n",
    "            nn.Softmax(dim=1)\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "\n",
    "        logits = self.linear_relu_stack(x)\n",
    "\n",
    "        return logits\n",
    "\n",
    "def train_loop(dataloader, model, loss_fn, optimizer):\n",
    "    for batch, (X, y) in enumerate(dataloader):\n",
    "        # Compute prediction and loss\n",
    "        X = X.to(device)\n",
    "        pred = model(X)\n",
    "        y = y.to(device)\n",
    "        pred = pred.to(device)\n",
    "        loss = loss_fn(pred, y)\n",
    "\n",
    "        # Backpropagation\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "\n",
    "def test_loop(dataloader, model, loss_fn):\n",
    "    size = len(dataloader.dataset)\n",
    "    num_batches = len(dataloader)\n",
    "    test_loss, correct = 0, 0\n",
    "    with torch.no_grad():\n",
    "        for X, y in dataloader:\n",
    "            X = X.to(device)\n",
    "            y = y.to(device)\n",
    "            pred = model(X).to(device)\n",
    "            test_loss += loss_fn(pred, y).item()\n",
    "            correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n",
    "\n",
    "    test_loss /= num_batches\n",
    "    correct /= size\n",
    "    print(f\"Test Error: \\n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n",
    "\n",
    "def oooo():\n",
    "    # config_file = 'configs/LLD_config.yaml'\n",
    "    # config = utils.parse_opt(config_file)\n",
    "    # embeddings,label = get_LLDs_dataset_casia(config)\n",
    "    embeddings = torch.tensor(np.load(\"EMO_DB_embedding_IS13.npy\"),dtype=torch.float32)\n",
    "    label = torch.tensor(np.load(\"EMO_DB_label_IS13.npy\"),dtype=torch.long)\n",
    "\n",
    "    dataset = torch.utils.data.TensorDataset(F.normalize(embeddings,dim=0), label)\n",
    "    train, test = torch.utils.data.random_split(dataset, (380, 155))\n",
    "    train_dataloader = DataLoader(train, batch_size=batch_size, shuffle=True)\n",
    "    test_dataloader = DataLoader(test, batch_size=batch_size, shuffle=True)\n",
    "    # train_dataloader = DataLoader(dataset, batch_size, shuffle=True)\n",
    "    # test_dataloader = DataLoader(dataset, batch_size, shuffle=True)\n",
    "    model = LinearNetwork()\n",
    "    model.to(device)\n",
    "    # Initialize\n",
    "\n",
    "    loss_fn = nn.CrossEntropyLoss()\n",
    "    optimizer = torch.optim.SGD(model.parameters() , lr=learning_rate )\n",
    "\n",
    "    for t in range(epochs):\n",
    "        train_loop(dataloader=train_dataloader, model=model, loss_fn=loss_fn, optimizer=optimizer)\n",
    "        if t%100==0:\n",
    "            print(f\"Epoch {t }\\n-------------------------------\")\n",
    "            test_loop(dataloader=test_dataloader, model=model, loss_fn=loss_fn)\n",
    "\n",
    "    print(\"Done!\")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 21.3%, Avg loss: 1.944827 \n",
      "\n",
      "Epoch 100\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 61.9%, Avg loss: 1.571585 \n",
      "\n",
      "Epoch 200\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 74.2%, Avg loss: 1.449938 \n",
      "\n",
      "Epoch 300\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 74.2%, Avg loss: 1.431277 \n",
      "\n",
      "Epoch 400\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 76.8%, Avg loss: 1.396120 \n",
      "\n",
      "Epoch 500\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 77.4%, Avg loss: 1.417322 \n",
      "\n",
      "Epoch 600\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.1%, Avg loss: 1.403364 \n",
      "\n",
      "Epoch 700\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 74.8%, Avg loss: 1.414740 \n",
      "\n",
      "Epoch 800\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 76.1%, Avg loss: 1.402006 \n",
      "\n",
      "Epoch 900\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 77.4%, Avg loss: 1.399636 \n",
      "\n",
      "Epoch 1000\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 77.4%, Avg loss: 1.397088 \n",
      "\n",
      "Epoch 1100\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 75.5%, Avg loss: 1.403540 \n",
      "\n",
      "Epoch 1200\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.387312 \n",
      "\n",
      "Epoch 1300\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.385494 \n",
      "\n",
      "Epoch 1400\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.367949 \n",
      "\n",
      "Epoch 1500\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.369283 \n",
      "\n",
      "Epoch 1600\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.9%, Avg loss: 1.355263 \n",
      "\n",
      "Epoch 1700\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 77.4%, Avg loss: 1.384930 \n",
      "\n",
      "Epoch 1800\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.379122 \n",
      "\n",
      "Epoch 1900\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.364285 \n",
      "\n",
      "Epoch 2000\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.358479 \n",
      "\n",
      "Epoch 2100\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.9%, Avg loss: 1.352157 \n",
      "\n",
      "Epoch 2200\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.371483 \n",
      "\n",
      "Epoch 2300\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 77.4%, Avg loss: 1.370896 \n",
      "\n",
      "Epoch 2400\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.3%, Avg loss: 1.354922 \n",
      "\n",
      "Epoch 2500\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.367600 \n",
      "\n",
      "Epoch 2600\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.3%, Avg loss: 1.359116 \n",
      "\n",
      "Epoch 2700\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.360426 \n",
      "\n",
      "Epoch 2800\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.373705 \n",
      "\n",
      "Epoch 2900\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.371758 \n",
      "\n",
      "Epoch 3000\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.3%, Avg loss: 1.348247 \n",
      "\n",
      "Epoch 3100\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.3%, Avg loss: 1.358493 \n",
      "\n",
      "Epoch 3200\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.374638 \n",
      "\n",
      "Epoch 3300\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.373296 \n",
      "\n",
      "Epoch 3400\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.348959 \n",
      "\n",
      "Epoch 3500\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.381160 \n",
      "\n",
      "Epoch 3600\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.9%, Avg loss: 1.345744 \n",
      "\n",
      "Epoch 3700\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 76.8%, Avg loss: 1.380665 \n",
      "\n",
      "Epoch 3800\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.373377 \n",
      "\n",
      "Epoch 3900\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.351218 \n",
      "\n",
      "Epoch 4000\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.372862 \n",
      "\n",
      "Epoch 4100\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.366876 \n",
      "\n",
      "Epoch 4200\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.9%, Avg loss: 1.353116 \n",
      "\n",
      "Epoch 4300\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.372782 \n",
      "\n",
      "Epoch 4400\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.369071 \n",
      "\n",
      "Epoch 4500\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.363501 \n",
      "\n",
      "Epoch 4600\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 77.4%, Avg loss: 1.364437 \n",
      "\n",
      "Epoch 4700\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.368572 \n",
      "\n",
      "Epoch 4800\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 83.9%, Avg loss: 1.324753 \n",
      "\n",
      "Epoch 4900\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.373903 \n",
      "\n",
      "Epoch 5000\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.361416 \n",
      "\n",
      "Epoch 5100\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.1%, Avg loss: 1.368402 \n",
      "\n",
      "Epoch 5200\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.354919 \n",
      "\n",
      "Epoch 5300\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.1%, Avg loss: 1.365501 \n",
      "\n",
      "Epoch 5400\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.3%, Avg loss: 1.345186 \n",
      "\n",
      "Epoch 5500\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 82.6%, Avg loss: 1.346216 \n",
      "\n",
      "Epoch 5600\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.358455 \n",
      "\n",
      "Epoch 5700\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.352186 \n",
      "\n",
      "Epoch 5800\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.353109 \n",
      "\n",
      "Epoch 5900\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.361568 \n",
      "\n",
      "Epoch 6000\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 79.4%, Avg loss: 1.359138 \n",
      "\n",
      "Epoch 6100\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.360371 \n",
      "\n",
      "Epoch 6200\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.373832 \n",
      "\n",
      "Epoch 6300\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.6%, Avg loss: 1.357781 \n",
      "\n",
      "Epoch 6400\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.374033 \n",
      "\n",
      "Epoch 6500\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.351468 \n",
      "\n",
      "Epoch 6600\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.7%, Avg loss: 1.367833 \n",
      "\n",
      "Epoch 6700\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.1%, Avg loss: 1.368994 \n",
      "\n",
      "Epoch 6800\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 78.1%, Avg loss: 1.362401 \n",
      "\n",
      "Epoch 6900\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 80.0%, Avg loss: 1.365579 \n",
      "\n",
      "Epoch 7000\n",
      "-------------------------------\n",
      "Test Error: \n",
      " Accuracy: 81.9%, Avg loss: 1.344829 \n",
      "\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[0;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "\u001B[0;32m/tmp/ipykernel_6927/1209020766.py\u001B[0m in \u001B[0;36m<module>\u001B[0;34m\u001B[0m\n\u001B[0;32m----> 1\u001B[0;31m \u001B[0moooo\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m      2\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m/tmp/ipykernel_6927/1034346290.py\u001B[0m in \u001B[0;36moooo\u001B[0;34m()\u001B[0m\n\u001B[1;32m     78\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m     79\u001B[0m     \u001B[0;32mfor\u001B[0m \u001B[0mt\u001B[0m \u001B[0;32min\u001B[0m \u001B[0mrange\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mepochs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 80\u001B[0;31m         \u001B[0mtrain_loop\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mdataloader\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mtrain_dataloader\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mmodel\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mmodel\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mloss_fn\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mloss_fn\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0moptimizer\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0moptimizer\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m     81\u001B[0m         \u001B[0;32mif\u001B[0m \u001B[0mt\u001B[0m\u001B[0;34m%\u001B[0m\u001B[0;36m100\u001B[0m\u001B[0;34m==\u001B[0m\u001B[0;36m0\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m     82\u001B[0m             \u001B[0mprint\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34mf\"Epoch {t }\\n-------------------------------\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m/tmp/ipykernel_6927/1034346290.py\u001B[0m in \u001B[0;36mtrain_loop\u001B[0;34m(dataloader, model, loss_fn, optimizer)\u001B[0m\n\u001B[1;32m     36\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m     37\u001B[0m         \u001B[0;31m# Backpropagation\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 38\u001B[0;31m         \u001B[0moptimizer\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mzero_grad\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m     39\u001B[0m         \u001B[0mloss\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mbackward\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m     40\u001B[0m         \u001B[0moptimizer\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mstep\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m~/anaconda3/envs/torch/lib/python3.8/site-packages/torch/optim/optimizer.py\u001B[0m in \u001B[0;36mzero_grad\u001B[0;34m(self, set_to_none)\u001B[0m\n\u001B[1;32m    215\u001B[0m                             \u001B[0;32melse\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    216\u001B[0m                                 \u001B[0mp\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mgrad\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mrequires_grad_\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;32mFalse\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 217\u001B[0;31m                             \u001B[0mp\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mgrad\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mzero_\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m    218\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    219\u001B[0m     \u001B[0;32mdef\u001B[0m \u001B[0mstep\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mclosure\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m~/anaconda3/envs/torch/lib/python3.8/site-packages/torch/autograd/profiler.py\u001B[0m in \u001B[0;36m__exit__\u001B[0;34m(self, exc_type, exc_value, traceback)\u001B[0m\n\u001B[1;32m    619\u001B[0m     \u001B[0;32mdef\u001B[0m \u001B[0m__exit__\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mexc_type\u001B[0m\u001B[0;34m:\u001B[0m \u001B[0mAny\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mexc_value\u001B[0m\u001B[0;34m:\u001B[0m \u001B[0mAny\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mtraceback\u001B[0m\u001B[0;34m:\u001B[0m \u001B[0mAny\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    620\u001B[0m         \u001B[0;32mif\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mrun_callbacks_on_exit\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 621\u001B[0;31m             \u001B[0mtorch\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mops\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mprofiler\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_record_function_exit\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mhandle\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m    622\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    623\u001B[0m     \u001B[0;32mdef\u001B[0m \u001B[0m_call_end_callbacks_on_future\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mfut\u001B[0m\u001B[0;34m:\u001B[0m \u001B[0mFuture\u001B[0m\u001B[0;34m[\u001B[0m\u001B[0mAny\u001B[0m\u001B[0;34m]\u001B[0m\u001B[0;34m)\u001B[0m \u001B[0;34m->\u001B[0m \u001B[0mFuture\u001B[0m\u001B[0;34m[\u001B[0m\u001B[0mAny\u001B[0m\u001B[0;34m]\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "source": [
    "oooo()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "python3",
   "language": "python",
   "display_name": "Python 3 (ipykernel)"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}