{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "先说一下`batch_first = True`这个东西\n",
    "\n",
    "https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html?highlight=lstm\n",
    "\n",
    "mini-batch实际上是介于训练模式。\n",
    "\n",
    "```\n",
    "Stochastic Gradient Descent(1 example)\n",
    "Mini-Batch Gradient Descent(pick m out of n)\n",
    "Batch Gradient Descent(average total n example)\n",
    "\n",
    "```\n",
    "它实际上应该叫batch_instance_first，也就是第一维度代表的是batch的索引，放在第一维度是符合人们的认知规律的。\n",
    "\n",
    "The semantics of the axes of these tensors is important. The first axis is the sequence itself, the second indexes instances in the mini-batch, and the third indexes elements of the input. We haven’t discussed mini-batching, so lets just ignore that and assume we will always have just 1 dimension on the second axis. If we want to run the sequence model over the sentence “The cow jumped”, our input should look like\n",
    "\n",
    "<a href='https://postimages.org/' target='_blank'><img src='https://i.postimg.cc/y8yqcmyk/image.png' border='0' alt='image'/></a>\n",
    "\n",
    "Except remember there is an additional 2nd dimension with size 1.\n",
    "\n",
    "> batch_first – If True, then the input and output tensors are provided as (batch, seq, feature). Default: False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "inputs\n",
      "\n",
      "[tensor([[ 0.5922, -0.6394,  0.5386],\n",
      "        [-0.0648, -1.9198,  0.0291],\n",
      "        [ 0.0398, -0.4481, -0.4246],\n",
      "        [-1.1099, -0.7322,  0.2759],\n",
      "        [ 1.2943, -1.6916, -0.1943]]), tensor([[-0.7331, -0.9197, -0.8914],\n",
      "        [-0.8615,  1.8490,  1.1459],\n",
      "        [-1.2556,  0.2283,  0.8894],\n",
      "        [ 0.2102,  0.4648, -0.6704],\n",
      "        [-1.4915, -0.1187,  0.4463]])]\n",
      "out\n",
      "\n",
      "tensor([[[-0.7331, -0.9197, -0.8914],\n",
      "         [-0.8615,  1.8490,  1.1459],\n",
      "         [-1.2556,  0.2283,  0.8894],\n",
      "         [ 0.2102,  0.4648, -0.6704],\n",
      "         [-1.4915, -0.1187,  0.4463]]])\n",
      "hidden\n",
      "\n",
      "tensor([[[ 1.2874, -0.1642,  2.7836]]])\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "seed = 6\n",
    "torch.manual_seed(seed)\n",
    "np.random.seed(seed)\n",
    "\n",
    "# https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html\n",
    "# 输入维度应该是词向量的维度，实际上指输入的特征数量\n",
    "# Input dim is 3, output dim is 3\n",
    "#最后一个维度默认省略为1，即单层LSTM\n",
    "lstm = nn.LSTM(3, 2, batch_first=True)\n",
    "\n",
    "# 相当于产生了5个词向量\n",
    "# make a sequence of length 5\n",
    "# 我觉得batch_first比较好理解\n",
    "# 这里我们用四个batch，也就是四个句子\n",
    "# (batch,seq_len, input_size)\n",
    "# （4，5，6） 代表 4个句子，每个句子2个词【这个维度之后是可变的】，词向量的维度是6\\\n",
    "batch_size = 2\n",
    "\n",
    "inputs = [torch.randn(5, 3) for _ in range(batch_size)]  \n",
    "\n",
    "print(\"inputs\\n\",inputs,sep='\\n')\n",
    "\n",
    "# initialize the hidden state.\n",
    "# (batch, num_layers * num_directions, hidden_size)\n",
    "# hidden(batch_size，层数n_layers，隐藏层维数)\n",
    "# 意思就是每一个 batch 都有一个维度\n",
    "# 在分析的时候实际可以忽略第一个维度\n",
    "(hidden,cell)=(torch.randn(1, 1, 3),\n",
    "          torch.randn(1, 1, 3))\n",
    "\n",
    "\n",
    "for i in inputs:\n",
    "    # Step through the sequence one element at a time.\n",
    "    # after each step, hidden contains the hidden state.\n",
    "    # Inputs: input, (h_0, c_0)\n",
    "    '''\n",
    "    input of shape (seq_len, batch, input_size): tensor containing the features of the input sequence. The input can also be a packed variable length sequence.\n",
    "    See torch.nn.utils.rnn.pack_padded_sequence() or torch.nn.utils.rnn.pack_sequence() for details.\n",
    "    '''\n",
    "    # (1（单独一组）,seq_len,自动生成)\n",
    "    out, (hidden,cell) = (i.view(1, 5, -1), (hidden,cell))\n",
    "\n",
    "# alternatively, we can do the entire sequence all at once.\n",
    "# the first value returned by LSTM is all of the hidden states throughout\n",
    "# the sequence. the second is just the most recent hidden state\n",
    "# (compare the last slice of \"out\" with \"hidden\" below, they are the same)\n",
    "# The reason for this is that:\n",
    "# \"out\" will give you access to all hidden states in the sequence\n",
    "# \"hidden\" will allow you to continue the sequence and backpropagate,\n",
    "# by passing it as an argument  to the lstm at a later time\n",
    "\n",
    "\n",
    "# Add the extra 2nd dimension\n",
    "# batch_size是指每一个minibatch 当中传入的数据两\n",
    "\n",
    "print(\"out\\n\",out,sep='\\n')\n",
    "print(\"hidden\\n\",hidden,sep='\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['D:\\\\Anaconda3\\\\lib\\\\site-packages\\\\ipykernel_launcher.py', '-f', 'C:\\\\Users\\\\Frank\\\\AppData\\\\Roaming\\\\jupyter\\\\runtime\\\\kernel-d4b81c61-4e5a-49a4-8a67-958dd8c5ccd5.json']\n"
     ]
    }
   ],
   "source": [
    "# jupyter 当中系统的args 不为空，可以查看系统变量\n",
    "import sys\n",
    "print(sys.argv)\n",
    "# 这一步就是模拟人工赋值的操作\n",
    "# 去除自带的命令行信息\n",
    "args = parser.parse_args(args=[])\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Namespace(batch_size=1024, epochs=50, hidden_size=1, lr=0.001, num_layers=1, output_path='./model/', params=<class 'str'>, seed=6, weight_decay=0.0001)\n",
      "torch.Size([1, 20, 1])\n"
     ]
    }
   ],
   "source": [
    "import argparse\n",
    "\n",
    "parser = argparse.ArgumentParser(description='Matching')\n",
    "parser.add_argument('--seed', type=int, default=6)\n",
    "parser.add_argument('--lr', type=float, default=1e-3)\n",
    "parser.add_argument('--weight-decay', type=float, default=1e-4)\n",
    "parser.add_argument('--batch-size', type=int, default=1024)\n",
    "parser.add_argument('--params', type=bool, default=str)\n",
    "parser.add_argument('--epochs', type=int, default=50)\n",
    "parser.add_argument('--output-path', type=str, default='./model/')\n",
    "\n",
    "\n",
    "\n",
    "config = args\n",
    "config.hidden_size = hidden_size = 1\n",
    "config.num_layers = 1\n",
    "\n",
    "print(config)\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "bi = 1\n",
    "size = 20\n",
    "\n",
    "hidden = [torch.zeros(config.num_layers * bi, \n",
    "            size, hidden_size, device=device), \n",
    "            torch.zeros(config.num_layers * bi, \n",
    "            size, hidden_size, device=device)\n",
    "]\n",
    "\n",
    "print(hidden[0].size())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cpu\n"
     ]
    }
   ],
   "source": [
    "print(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LSTM(nn.Module):\n",
    "    def __init__(self, config, vocab, device):\n",
    "        # 构造函数\n",
    "        # 传入配置参数，词库，设备类型\n",
    "        super(LSTM, self).__init__()\n",
    "        # config 是argparser 产生的对象\n",
    "        self.config = config\n",
    "        # 这是传入的设备类型对象\n",
    "        self.device = device\n",
    "        # 这是传入的词向量\n",
    "        self.embed = nn.Embedding.from_pretrained(vocab.vectors)\n",
    "        # 这是LSTM 编码层\n",
    "        self.input_txt_lstm = nn.LSTM(\n",
    "            input_size=config.input_size,\n",
    "            # 隐藏层深度实际传递到了这里\n",
    "            hidden_size=config.hidden_size,\n",
    "            batch_first=True,\n",
    "            # 这是双向LSTM的标识符\n",
    "            bidirectional= self.bidirectional,\n",
    "            num_layers=config.num_layers\n",
    "        )\n",
    "        self.projection = nn.Linear(config.hidden_size*4, config.n_out)\n",
    "        self.dropout = nn.Dropout(config.dropout)\n",
    "        self.apply(init_xavier_uniform)\n",
    "\n",
    "    '''\n",
    "    方便地接受lstm地返回值\n",
    "    '''\n",
    "    def hidden(self, size, hidden_size):\n",
    "        hidden = (torch.zeros(self.config.num_layers * self.bi, \n",
    "            size, hidden_size, device=self.device), \n",
    "            torch.zeros(self.config.num_layers * self.bi, \n",
    "            size, hidden_size, device=self.device))\n",
    "        return hidden\n",
    "\n",
    "    def forward(self,record):\n",
    "        print(record)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'_backend': <torch.nn.backends.thnn.THNNFunctionBackend object at 0x000002A1F1ABE128>, '_parameters': OrderedDict([('weight', Parameter containing:\n",
      "tensor([[1.0000, 2.3000, 3.0000],\n",
      "        [4.0000, 5.1000, 6.3000],\n",
      "        [3.5000, 2.1200, 6.6800]]))]), '_buffers': OrderedDict(), '_backward_hooks': OrderedDict(), '_forward_hooks': OrderedDict(), '_forward_pre_hooks': OrderedDict(), '_state_dict_hooks': OrderedDict(), '_load_state_dict_pre_hooks': OrderedDict(), '_modules': OrderedDict(), 'training': True, 'num_embeddings': 3, 'embedding_dim': 3, 'padding_idx': None, 'max_norm': None, 'norm_type': 2.0, 'scale_grad_by_freq': False, 'sparse': False}\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[3.5000, 2.1200, 6.6800]])"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#这波操作证明了\n",
    "# nn.Embedding.from_pretrained() 实际上就是拿来主义\n",
    "\n",
    "weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3],[3.5,2.12,6.68]])\n",
    "embedding = nn.Embedding.from_pretrained(weight)\n",
    "\n",
    "print(embedding.__dict__)\n",
    "\n",
    "# Get embeddings for index 1\n",
    "input = torch.LongTensor([2])\n",
    "embedding(input)\n",
    "\n",
    "# torch 的操作要基于张量进行\n",
    "# embedding(2)\n",
    "# embedding(): argument 'indices' (position 2) must be Tensor, not int"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 接下来看一下forward 是做什么的"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "//Pytorch official document\n",
    "\n",
    "    forward(*input)[SOURCE]\n",
    "\n",
    "Defines the computation performed at every call.\n",
    "\n",
    "Should be overridden by all subclasses."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "-------\n",
    "\n",
    "### DFS 学习法登场hhhhhh\n",
    "\n",
    "这里牵扯到了一个计算的问题\n",
    "\n",
    "我们知道BP 是深度学习领域重获新生的灵魂之一，中间变量的梯度自然是链接 input 和 output 的桥梁，梯度在`pytorch` 当中是如何传递的呢？\n",
    "\n",
    "https://zhuanlan.zhihu.com/p/75054200\n",
    "\n",
    "> 实名感谢知乎\n",
    "\n",
    "> 对于中间变量z，hook 的使用方式为：z.register_hook(hook_fn)，其中 hook_fn为一个用户自定义的函数，其签名为：\n",
    "\n",
    ">hook_fn(grad) -> Tensor or None\n",
    "它的输入为变量 z 的梯度，输出为一个 Tensor 或者是 None （None 一般用于直接打印梯度）。反向传播时，梯度传播到变量 z，再继续向前传播之前，将会传入 hook_fn。如果 hook_fn的返回值是 None，那么梯度将不改变，继续向前传播，如果 hook_fn的返回值是 Tensor 类型，则该 Tensor 将取代 z 原有的梯度，向前传播。\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "啊啊啊 什么是Hook？\n",
    "//Winform编程当中听说可以使用Hook实现全局键盘监听（就类似于QQ浏览器截图的 `Ctrl + Shift + Z`） \n",
    "\n",
    ">In computer programming, the term hooking covers a range of techniques used to alter or augment the behaviour of an operating system, of applications, or of other software components by intercepting function calls or messages or events passed between software components. Code that handles such intercepted function calls, events or messages is called a hook. Hooking is used for many purposes, including debugging and extending functionality.\n",
    "\n",
    "> Wiki\n",
    "\n",
    "\n",
    "```\n",
    "register_forward_hook(hook)[SOURCE]\n",
    "Registers a forward hook on the module.\n",
    "\n",
    "The hook will be called every time after forward() has computed an output. It should have the following signature:\n",
    "\n",
    "hook(module, input, output) -> None or modified output\n",
    "The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after forward() is called.\n",
    "```\n",
    "---------\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "看一下model(input)调用的时候做了什么\n",
    "不要太纠结细节\n",
    "\n",
    "```\n",
    "def __call__(self, *input, **kwargs):\n",
    "        for hook in self._forward_pre_hooks.values():\n",
    "\n",
    "            #钩子是一个函数对象\n",
    "            result = hook(self, input)\n",
    "            if result is not None:\n",
    "                if not isinstance(result, tuple):\n",
    "                    result = (result,)\n",
    "                # 可以改变输入\n",
    "                input = result\n",
    "        if torch._C._get_tracing_state():\n",
    "            result = self._slow_forward(*input, **kwargs)\n",
    "        else:\n",
    "            # 实际这样执行了\n",
    "            result = self.forward(*input, **kwargs)\n",
    "        for hook in self._forward_hooks.values():\n",
    "            hook_result = hook(self, input, result)\n",
    "            if hook_result is not None:\n",
    "                result = hook_result\n",
    "        if len(self._backward_hooks) > 0:\n",
    "            var = result\n",
    "            while not isinstance(var, torch.Tensor):\n",
    "                if isinstance(var, dict):\n",
    "                    var = next((v for v in var.values() if isinstance(v, torch.Tensor)))\n",
    "                else:\n",
    "                    var = var[0]\n",
    "            grad_fn = var.grad_fn\n",
    "            if grad_fn is not None:\n",
    "                for hook in self._backward_hooks.values():\n",
    "                    wrapper = functools.partial(hook, self)\n",
    "                    functools.update_wrapper(wrapper, hook)\n",
    "                    grad_fn.register_hook(wrapper)\n",
    "        return result\n",
    "```\n",
    "\n",
    "看一下Pytorch 官方文档的一个应用 \n",
    "\n",
    "https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html\n",
    "\n",
    "``` python\n",
    "\n",
    "def train(category_tensor, input_line_tensor, target_line_tensor):\n",
    "    target_line_tensor.unsqueeze_(-1)\n",
    "\n",
    "    hidden = rnn.initHidden()\n",
    "\n",
    "    rnn.zero_grad()\n",
    "\n",
    "    loss = 0\n",
    "\n",
    "    for i in range(input_line_tensor.size(0)):\n",
    "        # 注意这里的调用方法\n",
    "        output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)\n",
    "        l = criterion(output, target_line_tensor[i])\n",
    "        loss += l\n",
    "\n",
    "    loss.backward()\n",
    "\n",
    "    for p in rnn.parameters():\n",
    "        p.data.add_(-learning_rate, p.grad.data)\n",
    "\n",
    "    return output, loss.item() / input_line_tensor.size(0)\n",
    "    \n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "其实模型的定义到此已经基本结束了\n",
    "\n",
    "毕竟前馈的工作就是forward 完成的，换句话说，forward 定义一定要完整，要产生完整的输出，以供之后的BP 使用。\n",
    "\n",
    "实际上BP 才叫机器学习当中的“学习”\n",
    "\n",
    "学习也应如此，不BackPropagate去吸取教训，而只是不断地接受input 再forward。看似你在前进，实际只是自嗨罢了。\n",
    "\n",
    "------\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "https://pytorch.org/tutorials/beginner/transformer_tutorial.html\n",
    "这是用到了torchText，在另一篇里面分析一下它的用法\n",
    "\n",
    "\n",
    "我们先看一下这个进行字母预测的官方示例\n",
    "https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html#training\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "#### Run the model\n",
    "\n",
    "CrossEntropyLoss is applied to track the loss and SGD implements stochastic gradient descent method as the optimizer. The initial learning rate is set to 5.0. StepLR is applied to adjust the learn rate through epochs. During the training, we use nn.utils.clip_grad_norm_ function to scale all the gradient together to prevent exploding.'''\n",
    "\n",
    "```python\n",
    "# 【Loss损失函数】\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "lr = 5.0 # learning rate\n",
    "# 【optimizer优化器】\n",
    "optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n",
    "# https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\n",
    "scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)\n",
    "\n",
    "import time\n",
    "def train():\n",
    "    model.train() # Turn on the train mode\n",
    "\n",
    "    total_loss = 0.\n",
    "\n",
    "    start_time = time.time()\n",
    "\n",
    "    ntokens = len(TEXT.vocab.stoi)\n",
    "    \n",
    "    for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):\n",
    "        data, targets = get_batch(train_data, i)\n",
    "        # Clears the gradients of all optimized torch.Tensor s.\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        # 【前馈】forward\n",
    "        output = model(data)\n",
    "        # 【计算损失】\n",
    "        loss = criterion(output.view(-1, ntokens), targets)\n",
    "        # 【通过损失】\n",
    "        # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.backward\n",
    "        loss.backward()\n",
    "\n",
    "        # 【防止梯度爆炸】Clips gradient norm of an iterable of parameters.\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n",
    "\n",
    "        # https://pytorch.org/docs/stable/optim.html#taking-an-optimization-step\n",
    "        # 【https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.step】源码在此\n",
    "        optimizer.step()\n",
    "\n",
    "        total_loss += loss.item()\n",
    "        log_interval = 200\n",
    "        if batch % log_interval == 0 and batch > 0:\n",
    "            cur_loss = total_loss / log_interval\n",
    "            elapsed = time.time() - start_time\n",
    "            print('| epoch {:3d} | {:5d}/{:5d} batches | '\n",
    "                  'lr {:02.2f} | ms/batch {:5.2f} | '\n",
    "                  'loss {:5.2f} | ppl {:8.2f}'.format(\n",
    "                    epoch, batch, len(train_data) // bptt, scheduler.get_lr()[0],\n",
    "                    elapsed * 1000 / log_interval,\n",
    "                    cur_loss, math.exp(cur_loss)))\n",
    "            total_loss = 0\n",
    "            start_time = time.time()\n",
    "\n",
    "def evaluate(eval_model, data_source):\n",
    "    eval_model.eval() # Turn on the evaluation mode\n",
    "    total_loss = 0.\n",
    "    ntokens = len(TEXT.vocab.stoi)\n",
    "    with torch.no_grad():\n",
    "        for i in range(0, data_source.size(0) - 1, bptt):\n",
    "            data, targets = get_batch(data_source, i)\n",
    "            output = eval_model(data)\n",
    "            output_flat = output.view(-1, ntokens)\n",
    "            total_loss += len(data) * criterion(output_flat, targets).item()\n",
    "    return total_loss / (len(data_source) - 1)\n",
    "\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([ 0.7695,  0.2592, -0.3814], requires_grad=True)\n",
      "tensor([4, 3, 0])\n"
     ]
    },
    {
     "ename": "RuntimeError",
     "evalue": "Dimension out of range (expected to be in range of [-1, 0], but got 1)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-58-479c16cc014c>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      8\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlabel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      9\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 10\u001b[1;33m \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpred\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mlabel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     11\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m    487\u001b[0m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    488\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 489\u001b[1;33m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    490\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    491\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\lib\\site-packages\\torch\\nn\\modules\\loss.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input, target)\u001b[0m\n\u001b[0;32m    902\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    903\u001b[0m         return F.cross_entropy(input, target, weight=self.weight,\n\u001b[1;32m--> 904\u001b[1;33m                                ignore_index=self.ignore_index, reduction=self.reduction)\n\u001b[0m\u001b[0;32m    905\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    906\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\lib\\site-packages\\torch\\nn\\functional.py\u001b[0m in \u001b[0;36mcross_entropy\u001b[1;34m(input, target, weight, size_average, ignore_index, reduce, reduction)\u001b[0m\n\u001b[0;32m   1968\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0msize_average\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mreduce\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1969\u001b[0m         \u001b[0mreduction\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_Reduction\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlegacy_get_string\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msize_average\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mreduce\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1970\u001b[1;33m     \u001b[1;32mreturn\u001b[0m \u001b[0mnll_loss\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlog_softmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mweight\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mignore_index\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mreduction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1971\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1972\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\lib\\site-packages\\torch\\nn\\functional.py\u001b[0m in \u001b[0;36mlog_softmax\u001b[1;34m(input, dim, _stacklevel, dtype)\u001b[0m\n\u001b[0;32m   1293\u001b[0m         \u001b[0mdim\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjit\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_unwrap_optional\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdim\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1294\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mdtype\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1295\u001b[1;33m         \u001b[0mret\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlog_softmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdim\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1296\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1297\u001b[0m         \u001b[0m_dtype\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjit\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_unwrap_optional\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mRuntimeError\u001b[0m: Dimension out of range (expected to be in range of [-1, 0], but got 1)"
     ]
    }
   ],
   "source": [
    "criterion = nn.CrossEntropyLoss()\n",
    "# pred = torch.LongTensor([0,2,3,1,2,3,1,0])\n",
    "\n",
    "pred = torch.randn(3, requires_grad=True)\n",
    "label = torch.empty(3, dtype=torch.long).random_(5)\n",
    "\n",
    "print(pred)\n",
    "print(label)\n",
    "\n",
    "loss = criterion(pred,label)\n",
    "print(loss)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss\n",
    "\n",
    "错误原因其实很简单，交叉熵需要传入一个参数\n",
    "\n",
    "<a href='https://postimages.org/' target='_blank'><img src='https://i.postimg.cc/BZB0xR47/image.png' border='0' alt='image'/></a>\n",
    "\n",
    "\n",
    "loss = criterion(output.view(-1, ntokens), targets)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[-0.6485, -1.9115, -1.0582,  0.2153,  0.5563],\n",
      "        [-0.6648, -0.6191, -0.9170,  0.5365, -2.0950],\n",
      "        [-0.1783,  0.2811,  0.7539,  1.4375, -0.2072]], requires_grad=True)\n",
      "tensor([4, 2, 1])\n",
      "tensor(1.6290, grad_fn=<NllLossBackward>)\n",
      "tensor([[ 0.0435,  0.0123,  0.0289,  0.1033, -0.1881],\n",
      "        [ 0.0522,  0.0546, -0.2928,  0.1735,  0.0125],\n",
      "        [ 0.0300, -0.2859,  0.0761,  0.1507,  0.0291]])\n"
     ]
    }
   ],
   "source": [
    "pred = torch.randn(3, 5,requires_grad=True)\n",
    "label = torch.empty(3, dtype=torch.long).random_(5)\n",
    "\n",
    "print(pred)\n",
    "print(label)\n",
    "\n",
    "loss = criterion(pred,label)\n",
    "print(loss)\n",
    "loss.backward()\n",
    "print(pred.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "file_extension": ".py",
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  },
  "mimetype": "text/x-python",
  "name": "python",
  "npconvert_exporter": "python",
  "pygments_lexer": "ipython3",
  "version": 3
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
