{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "################################################################################\n",
      "### WARNING, path does not exist: KALDI_ROOT=/mnt/matylda5/iveselyk/Tools/kaldi-trunk\n",
      "###          (please add 'export KALDI_ROOT=<your_path>' in your $HOME/.profile)\n",
      "###          (or run as: KALDI_ROOT=<your_path> python <your_script>.py)\n",
      "################################################################################\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import random\n",
    "from io import open\n",
    "import unicodedata\n",
    "import string\n",
    "import re\n",
    "\n",
    "import torch\n",
    "import torchaudio\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch import nn\n",
    "from torch import optim\n",
    "import torch.nn.functional as F\n",
    "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n",
    "from pathlib import Path\n",
    "import kaldi_io\n",
    "import sys\n",
    "import gc\n",
    "import json\n",
    "import time\n",
    "from data_4 import AudioDataLoader, AudioDataset, pad_list\n",
    "\n",
    "%matplotlib inline\n",
    "\n",
    "print_use = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_json = \"data.json\"\n",
    "test_json = \"data_test.json\"\n",
    "batch_size = 32\n",
    "maxlen_in = 100000\n",
    "maxlen_out = 30\n",
    "num_workers = 4"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 加载数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(train_json, 'rb') as f:\n",
    "            data = json.load(f)['utts']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "sorted_data = sorted(data.items(), key=lambda data: int(\n",
    "            data[1]['input']['shape'][0]), reverse=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr_dataset = AudioDataset(train_json, batch_size,\n",
    "                              maxlen_in, maxlen_out)\n",
    "\n",
    "tr_loader = AudioDataLoader(tr_dataset, batch_size=1, num_workers=num_workers)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "te_dataset = AudioDataset(test_json, batch_size,\n",
    "                              maxlen_in, maxlen_out)\n",
    "te_loader = AudioDataLoader(te_dataset, batch_size=1, num_workers=num_workers)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "char_list = []\n",
    "char_list_path = \"train_chars.txt\"\n",
    "with open(char_list_path, \"r\") as f:\n",
    "    for line in f:\n",
    "        data = line.split()\n",
    "        char_list.append(data[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型搭建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "MAX_LENGTH= 200\n",
    "SOS_token = 0\n",
    "EOS_token = 1\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Attention"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DotProductAttention(nn.Module):\n",
    "    r\"\"\"Dot product attention.\n",
    "    Given a set of vector values, and a vector query, attention is a technique\n",
    "    to compute a weighted sum of the values, dependent on the query.\n",
    "\n",
    "    NOTE: Here we use the terminology in Stanford cs224n-2018-lecture11.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        super(DotProductAttention, self).__init__()\n",
    "        # TODO: move this out of this class?\n",
    "        # self.linear_out = nn.Linear(dim*2, dim)\n",
    "\n",
    "    def forward(self, queries, values):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            queries: N x To x H\n",
    "            values : N x Ti x H\n",
    "\n",
    "        Returns:\n",
    "            output: N x To x H\n",
    "            attention_distribution: N x To x Ti\n",
    "        \"\"\"\n",
    "        batch_size = queries.size(0)\n",
    "        hidden_size = queries.size(2)\n",
    "        input_lengths = values.size(1)\n",
    "        # (N, To, H) * (N, H, Ti) -> (N, To, Ti)\n",
    "        attention_scores = torch.bmm(queries, values.transpose(1, 2))\n",
    "        attention_distribution = F.softmax(\n",
    "            attention_scores.view(-1, input_lengths), dim=1).view(batch_size, -1, input_lengths)\n",
    "        # (N, To, Ti) * (N, Ti, H) -> (N, To, H)\n",
    "        attention_output = torch.bmm(attention_distribution, values)\n",
    "        # # concat -> (N, To, 2*H)\n",
    "        # concated = torch.cat((attention_output, queries), dim=2)\n",
    "        # # TODO: Move this out of this class?\n",
    "        # # output -> (N, To, H)\n",
    "        # output = torch.tanh(self.linear_out(\n",
    "        #     concated.view(-1, 2*hidden_size))).view(batch_size, -1, hidden_size)\n",
    "\n",
    "        return attention_output, attention_distribution"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 金字塔BLSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "class pyramidalBLSTM(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, num_layers, dropout=0.0, bidirectional=True):\n",
    "        super(pyramidalBLSTM, self).__init__()\n",
    "        self.input_size = input_size\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "        self.bidirectional = bidirectional\n",
    "        self.lstm1 = nn.LSTM(input_size, hidden_size, \n",
    "                           batch_first=True,\n",
    "                           dropout=dropout,\n",
    "                           bidirectional=bidirectional)\n",
    "        self.lstm2 = nn.LSTM(hidden_size*4, hidden_size,\n",
    "                           batch_first=True,\n",
    "                           dropout=dropout,\n",
    "                           bidirectional=bidirectional)\n",
    "        self.lstm3 = nn.LSTM(hidden_size*4, hidden_size,\n",
    "                           batch_first=True,\n",
    "                           dropout=dropout,\n",
    "                           bidirectional=bidirectional)\n",
    "\n",
    "    def forward(self, padded_input, input_lengths):\n",
    "        \n",
    "#         print(\"pyramidalBLSTM.padded_input.shape:\", padded_input.shape)\n",
    "        total_length = padded_input.size(1)\n",
    "#         print(\"total_length:\",total_length)\n",
    "        packed_input = pack_padded_sequence(padded_input, input_lengths,\n",
    "                                            batch_first=True)\n",
    "\n",
    "\n",
    "#         print(\"packed_input.shape\",packed_input.data.shape)\n",
    "        packed_output1, hidden1 = self.lstm1(packed_input)\n",
    "#         print(\"packed_output1.shape\",packed_output1.data.shape)\n",
    "\n",
    "        #两步拼一步\n",
    "\n",
    "        padded_output1, _ = pad_packed_sequence(packed_output1,\n",
    "                                        batch_first=True,\n",
    "                                        total_length=total_length)\n",
    "#         print(\"padded_output1.shape:\",padded_output1.shape)\n",
    "        padded_input2 = padded_output1.reshape(padded_output1.shape[0],padded_output1.shape[1]//2,padded_output1.shape[2]*2)\n",
    "        \n",
    "        packed_input2 = pack_padded_sequence(padded_input2, input_lengths//2,\n",
    "                                            batch_first=True)\n",
    "        packed_output2, hidden2 = self.lstm2(packed_input2)\n",
    "        padded_output2, _ = pad_packed_sequence(packed_output2,\n",
    "                                        batch_first=True,\n",
    "                                        total_length=total_length//2)\n",
    "        \n",
    "        padded_input3 = padded_output2.reshape(padded_output2.shape[0],padded_output2.shape[1]//2,padded_output2.shape[2]*2)\n",
    "        packed_input3 = pack_padded_sequence(padded_input3, input_lengths//4,\n",
    "                                            batch_first=True)\n",
    "        \n",
    "        packed_output3, hidden3 = self.lstm2(packed_input3)  \n",
    "        output, _ = pad_packed_sequence(packed_output3,\n",
    "                                        batch_first=True,\n",
    "                                        total_length=total_length//4)\n",
    "\n",
    "        \n",
    "        \n",
    "        return output, (hidden1, hidden2, hidden3)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "a = [[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]\n",
    "a = torch.tensor(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([32, 2236, 512])"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.rand([32, 2236, 512])\n",
    "a.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "# a.reshape(a.shape[0],a.shape[1]//2,a.shape[2]*2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Encoder(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, dropout=0.0):\n",
    "        super(Encoder, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.first = True\n",
    "\n",
    "        self.pyramidalBLSTM = pyramidalBLSTM(input_size, hidden_size, 1, dropout=dropout)\n",
    "\n",
    "    def forward(self, input, input_lengths):\n",
    "\n",
    "        output, hidden = self.pyramidalBLSTM(input, input_lengths)\n",
    "\n",
    "        return output, hidden\n",
    "\n",
    "    def initHidden(self):\n",
    "        return torch.zeros(1, 1, self.hidden_size, device=device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Decoder(nn.Module):\n",
    "    \"\"\"\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, vocab_size, embedding_dim, sos_id, eos_id, hidden_size,\n",
    "                 num_layers, bidirectional_encoder=True):\n",
    "        super(Decoder, self).__init__()\n",
    "        # Hyper parameters\n",
    "        # embedding + output\n",
    "        self.vocab_size = vocab_size\n",
    "        self.embedding_dim = embedding_dim\n",
    "        self.sos_id = sos_id  # Start of Sentence\n",
    "        self.eos_id = eos_id  # End of Sentence\n",
    "        # rnn\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "        self.bidirectional_encoder = bidirectional_encoder  # useless now\n",
    "        self.encoder_hidden_size = hidden_size  # must be equal now\n",
    "        # Components\n",
    "        self.embedding = nn.Embedding(self.vocab_size, self.embedding_dim)\n",
    "        self.rnn = nn.ModuleList()\n",
    "        self.rnn += [nn.LSTMCell(self.embedding_dim +\n",
    "                                 self.encoder_hidden_size, self.hidden_size)]\n",
    "        for l in range(1, self.num_layers):\n",
    "            self.rnn += [nn.LSTMCell(self.hidden_size, self.hidden_size)]\n",
    "        self.attention = DotProductAttention()\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(self.encoder_hidden_size + self.hidden_size,\n",
    "                      self.hidden_size),\n",
    "            nn.Tanh(),\n",
    "            nn.Linear(self.hidden_size, self.vocab_size))\n",
    "\n",
    "    def zero_state(self, encoder_padded_outputs, H=None):\n",
    "        N = encoder_padded_outputs.size(0)\n",
    "        H = self.hidden_size if H == None else H\n",
    "        return encoder_padded_outputs.new_zeros(N, H)\n",
    "\n",
    "    def forward(self, padded_input, encoder_padded_outputs):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            padded_input: N x To\n",
    "            # encoder_hidden: (num_layers * num_directions) x N x H\n",
    "            encoder_padded_outputs: N x Ti x H\n",
    "\n",
    "        Returns:\n",
    "        \"\"\"\n",
    "        # *********Get Input and Output\n",
    "        # from espnet/Decoder.forward()\n",
    "        # TODO: need to make more smart way\n",
    "        ys = [y[y != IGNORE_ID] for y in padded_input]  # parse padded ys\n",
    "        # prepare input and output word sequences with sos/eos IDs\n",
    "        eos = ys[0].new([self.eos_id])\n",
    "        sos = ys[0].new([self.sos_id])\n",
    "        ys_in = [torch.cat([sos, y], dim=0) for y in ys]\n",
    "        ys_out = [torch.cat([y, eos], dim=0) for y in ys]\n",
    "        # padding for ys with -1\n",
    "        # pys: utt x olen\n",
    "        ys_in_pad = pad_list(ys_in, self.eos_id)\n",
    "        ys_out_pad = pad_list(ys_out, IGNORE_ID)\n",
    "        # print(\"ys_in_pad\", ys_in_pad.size())\n",
    "        assert ys_in_pad.size() == ys_out_pad.size()\n",
    "        batch_size = ys_in_pad.size(0)\n",
    "        output_length = ys_in_pad.size(1)\n",
    "        # max_length = ys_in_pad.size(1) - 1  # TODO: should minus 1(sos)?\n",
    "\n",
    "        # *********Init decoder rnn\n",
    "        h_list = [self.zero_state(encoder_padded_outputs)]\n",
    "        c_list = [self.zero_state(encoder_padded_outputs)]\n",
    "        for l in range(1, self.num_layers):\n",
    "            h_list.append(self.zero_state(encoder_padded_outputs))\n",
    "            c_list.append(self.zero_state(encoder_padded_outputs))\n",
    "        att_c = self.zero_state(encoder_padded_outputs,\n",
    "                                H=encoder_padded_outputs.size(2))\n",
    "        y_all = []\n",
    "\n",
    "        # **********LAS: 1. decoder rnn 2. attention 3. concate and MLP\n",
    "        embedded = self.embedding(ys_in_pad)\n",
    "        for t in range(output_length):\n",
    "            # step 1. decoder RNN: s_i = RNN(s_i−1,y_i−1,c_i−1)\n",
    "            rnn_input = torch.cat((embedded[:, t, :], att_c), dim=1)\n",
    "            h_list[0], c_list[0] = self.rnn[0](\n",
    "                rnn_input, (h_list[0], c_list[0]))\n",
    "            for l in range(1, self.num_layers):\n",
    "                h_list[l], c_list[l] = self.rnn[l](\n",
    "                    h_list[l-1], (h_list[l], c_list[l]))\n",
    "            rnn_output = h_list[-1]  # below unsqueeze: (N x H) -> (N x 1 x H)\n",
    "            # step 2. attention: c_i = AttentionContext(s_i,h)\n",
    "            att_c, att_w = self.attention(rnn_output.unsqueeze(dim=1),\n",
    "                                          encoder_padded_outputs)\n",
    "            att_c = att_c.squeeze(dim=1)\n",
    "            # step 3. concate s_i and c_i, and input to MLP\n",
    "            mlp_input = torch.cat((rnn_output, att_c), dim=1)\n",
    "            predicted_y_t = self.mlp(mlp_input)\n",
    "            y_all.append(predicted_y_t)\n",
    "\n",
    "        y_all = torch.stack(y_all, dim=1)  # N x To x C\n",
    "        # **********Cross Entropy Loss\n",
    "        # F.cross_entropy = NLL(log_softmax(input), target))\n",
    "        y_all = y_all.view(batch_size * output_length, self.vocab_size)\n",
    "        ce_loss = F.cross_entropy(y_all, ys_out_pad.view(-1),\n",
    "                                  ignore_index=IGNORE_ID,\n",
    "                                  reduction='mean')\n",
    "\n",
    "        return ce_loss\n",
    "\n",
    "       \n",
    "\n",
    "    def recognize_beam(self, encoder_outputs, char_list, args):\n",
    "        \"\"\"Beam search, decode one utterence now.\n",
    "        Args:\n",
    "            encoder_outputs: T x H\n",
    "            char_list: list of character\n",
    "            args: args.beam\n",
    "\n",
    "        Returns:\n",
    "            nbest_hyps:\n",
    "        \"\"\"\n",
    "        # search params\n",
    "        beam = args.beam_size\n",
    "        nbest = args.nbest\n",
    "        if args.decode_max_len == 0:\n",
    "            maxlen = encoder_outputs.size(0)\n",
    "        else:\n",
    "            maxlen = args.decode_max_len\n",
    "\n",
    "        # *********Init decoder rnn\n",
    "        h_list = [self.zero_state(encoder_outputs.unsqueeze(0))]\n",
    "        c_list = [self.zero_state(encoder_outputs.unsqueeze(0))]\n",
    "        for l in range(1, self.num_layers):\n",
    "            h_list.append(self.zero_state(encoder_outputs.unsqueeze(0)))\n",
    "            c_list.append(self.zero_state(encoder_outputs.unsqueeze(0)))\n",
    "        att_c = self.zero_state(encoder_outputs.unsqueeze(0),\n",
    "                                H=encoder_outputs.unsqueeze(0).size(2))\n",
    "        # prepare sos\n",
    "        y = self.sos_id\n",
    "        vy = encoder_outputs.new_zeros(1).long()\n",
    "\n",
    "        hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list, 'h_prev': h_list,\n",
    "               'a_prev': att_c}\n",
    "        hyps = [hyp]\n",
    "        ended_hyps = []\n",
    "\n",
    "        for i in range(maxlen):\n",
    "            hyps_best_kept = []\n",
    "            for hyp in hyps:\n",
    "                # vy.unsqueeze(1)\n",
    "                vy[0] = hyp['yseq'][i]\n",
    "                embedded = self.embedding(vy)\n",
    "                # embedded.unsqueeze(0)\n",
    "                # step 1. decoder RNN: s_i = RNN(s_i−1,y_i−1,c_i−1)\n",
    "                rnn_input = torch.cat((embedded, hyp['a_prev']), dim=1)\n",
    "                h_list[0], c_list[0] = self.rnn[0](\n",
    "                    rnn_input, (hyp['h_prev'][0], hyp['c_prev'][0]))\n",
    "                for l in range(1, self.num_layers):\n",
    "                    h_list[l], c_list[l] = self.rnn[l](\n",
    "                        h_list[l-1], (hyp['h_prev'][l], hyp['c_prev'][l]))\n",
    "                rnn_output = h_list[-1]\n",
    "                # step 2. attention: c_i = AttentionContext(s_i,h)\n",
    "                # below unsqueeze: (N x H) -> (N x 1 x H)\n",
    "                att_c, att_w = self.attention(rnn_output.unsqueeze(dim=1),\n",
    "                                              encoder_outputs.unsqueeze(0))\n",
    "                att_c = att_c.squeeze(dim=1)\n",
    "                # step 3. concate s_i and c_i, and input to MLP\n",
    "                mlp_input = torch.cat((rnn_output, att_c), dim=1)\n",
    "                predicted_y_t = self.mlp(mlp_input)\n",
    "                local_scores = F.log_softmax(predicted_y_t, dim=1)\n",
    "                # topk scores\n",
    "                local_best_scores, local_best_ids = torch.topk(\n",
    "                    local_scores, beam, dim=1)\n",
    "\n",
    "                for j in range(beam):\n",
    "                    new_hyp = {}\n",
    "                    new_hyp['h_prev'] = h_list[:]\n",
    "                    new_hyp['c_prev'] = c_list[:]\n",
    "                    new_hyp['a_prev'] = att_c[:]\n",
    "                    new_hyp['score'] = hyp['score'] + local_best_scores[0, j]\n",
    "                    new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))\n",
    "                    new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']\n",
    "                    new_hyp['yseq'][len(hyp['yseq'])] = int(\n",
    "                        local_best_ids[0, j])\n",
    "                    # will be (2 x beam) hyps at most\n",
    "                    hyps_best_kept.append(new_hyp)\n",
    "\n",
    "                hyps_best_kept = sorted(hyps_best_kept,\n",
    "                                        key=lambda x: x['score'],\n",
    "                                        reverse=True)[:beam]\n",
    "            # end for hyp in hyps\n",
    "            hyps = hyps_best_kept\n",
    "\n",
    "            # add eos in the final loop to avoid that there are no ended hyps\n",
    "            if i == maxlen - 1:\n",
    "                for hyp in hyps:\n",
    "                    hyp['yseq'].append(self.eos_id)\n",
    "\n",
    "            # add ended hypothes to a final list, and removed them from current hypothes\n",
    "            # (this will be a probmlem, number of hyps < beam)\n",
    "            remained_hyps = []\n",
    "            for hyp in hyps:\n",
    "                if hyp['yseq'][-1] == self.eos_id:\n",
    "                    # hyp['score'] += (i + 1) * penalty\n",
    "                    ended_hyps.append(hyp)\n",
    "                else:\n",
    "                    remained_hyps.append(hyp)\n",
    "\n",
    "            hyps = remained_hyps\n",
    "            if len(hyps) > 0:\n",
    "                print('remeined hypothes: ' + str(len(hyps)))\n",
    "            else:\n",
    "                print('no hypothesis. Finish decoding.')\n",
    "                break\n",
    "\n",
    "            for hyp in hyps:\n",
    "                print('hypo: ' + ''.join([char_list[int(x)]\n",
    "                                          for x in hyp['yseq'][1:]]))\n",
    "        # end for i in range(maxlen)\n",
    "        nbest_hyps = sorted(ended_hyps, key=lambda x: x['score'], reverse=True)[\n",
    "            :min(len(ended_hyps), nbest)]\n",
    "        return nbest_hyps"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Seq2Seq(nn.Module):\n",
    "    \"\"\"Sequence-to-Sequence architecture with configurable encoder and decoder.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, encoder, decoder):\n",
    "        super(Seq2Seq, self).__init__()\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "\n",
    "    def forward(self, padded_input, input_lengths, padded_target):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            padded_input: N x Ti x D\n",
    "            padded_targets: N x To\n",
    "        \"\"\"\n",
    "        encoder_padded_outputs, _ = self.encoder(padded_input , input_lengths)\n",
    "        loss = self.decoder(padded_target, encoder_padded_outputs)\n",
    "        return loss\n",
    "    \n",
    "    def recognize(self, input, input_lengths, char_list, args):\n",
    "        \"\"\"Sequence-to-Sequence beam search, decode one utterence now.\n",
    "        Args:\n",
    "            input: T x D\n",
    "            char_list: list of characters\n",
    "            args: args.beam\n",
    "\n",
    "        Returns:\n",
    "            nbest_hyps:\n",
    "        \"\"\"\n",
    "        encoder_outputs, _ = self.encoder(input, input_lengths)\n",
    "#         print(\"encoder_outputs\", encoder_outputs.squeeze(1).shape)\n",
    "        \n",
    "        nbest_hyps = self.decoder.recognize_beam(encoder_outputs.squeeze(0), char_list, args)\n",
    "        return nbest_hyps\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 单步训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import math\n",
    "\n",
    "\n",
    "def asMinutes(s):\n",
    "    m = math.floor(s / 60)\n",
    "    s -= m * 60\n",
    "    return '%dm %ds' % (m, s)\n",
    "\n",
    "\n",
    "def timeSince(since, percent):\n",
    "    now = time.time()\n",
    "    s = now - since\n",
    "    es = s / (percent)\n",
    "    rs = es - s\n",
    "    return '%s (- %s)' % (asMinutes(s), asMinutes(rs))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练迭代"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "def trainIters(model, epoch, optimizier, print_every=10, plot_every=10, learning_rate=0.01):\n",
    "    log = open('train.log', 'w')\n",
    "    start = time.time()\n",
    "    n_iters = len(tr_dataset)\n",
    "    plot_losses = []\n",
    "    print_loss_total = 0  # Reset every print_every\n",
    "    plot_loss_total = 0  # Reset every plot_every\n",
    "\n",
    "    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n",
    "    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n",
    "#     training_pairs = random.choices(a, k=n_iters)\n",
    "    \n",
    "    criterion = nn.NLLLoss()\n",
    "\n",
    "    for e in range(epoch):\n",
    "        for i, (data) in enumerate(tr_loader):\n",
    "            padded_input, input_lengths, padded_target = data\n",
    "            padded_input, input_lengths, padded_target = data\n",
    "            padded_input = padded_input.cuda()\n",
    "            input_lengths = input_lengths.cuda()\n",
    "            padded_target = padded_target.cuda()\n",
    "    #         print(\"padded_input:\",padded_input.shape)\n",
    "            loss = model(padded_input, input_lengths, padded_target)\n",
    "    #         print(loss) #.requires_grad\n",
    "            print_loss_total += float(loss)\n",
    "            plot_loss_total += float(loss)\n",
    "\n",
    "            optimizier.zero_grad()\n",
    "            loss.backward()\n",
    "\n",
    "            optimizier.step()\n",
    "\n",
    "            if (i+1) % print_every == 0:\n",
    "                print_loss_avg = print_loss_total / print_every\n",
    "                print_loss_total = 0\n",
    "                txt = 'Epoch %d | Iter %d | %s (%d %d%%) %.4f' % (e+1, i+1, timeSince(start, (e *n_iters +i+1) / (n_iters*epoch)),\n",
    "                                             (i+1), (e *n_iters +i+1) / (n_iters*epoch) * 100, print_loss_avg)\n",
    "                print(txt)\n",
    "                log.write(txt + \"\\n\")\n",
    "                log.flush()\n",
    "            if i+1 % plot_every == 0:\n",
    "                plot_loss_avg = plot_loss_total / plot_every\n",
    "                plot_losses.append(plot_loss_avg)\n",
    "                plot_loss_total = 0\n",
    "\n",
    "    log.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Seq2Seq(\n",
      "  (encoder): Encoder(\n",
      "    (pyramidalBLSTM): pyramidalBLSTM(\n",
      "      (lstm1): LSTM(40, 256, batch_first=True, bidirectional=True)\n",
      "      (lstm2): LSTM(1024, 256, batch_first=True, bidirectional=True)\n",
      "      (lstm3): LSTM(1024, 256, batch_first=True, bidirectional=True)\n",
      "    )\n",
      "  )\n",
      "  (decoder): Decoder(\n",
      "    (embedding): Embedding(4520, 512)\n",
      "    (rnn): ModuleList(\n",
      "      (0): LSTMCell(1024, 512)\n",
      "    )\n",
      "    (attention): DotProductAttention()\n",
      "    (mlp): Sequential(\n",
      "      (0): Linear(in_features=1024, out_features=512, bias=True)\n",
      "      (1): Tanh()\n",
      "      (2): Linear(in_features=512, out_features=4520, bias=True)\n",
      "    )\n",
      "  )\n",
      ")\n",
      "Epoch 1 | Iter 20 | 0m 13s (- 6268m 11s) (20 0%) 6.9429\n",
      "Epoch 1 | Iter 40 | 0m 25s (- 5678m 34s) (40 0%) 6.3361\n",
      "Epoch 1 | Iter 60 | 0m 35s (- 5365m 26s) (60 0%) 6.2632\n",
      "Epoch 1 | Iter 80 | 0m 45s (- 5153m 45s) (80 0%) 6.3083\n",
      "Epoch 1 | Iter 100 | 0m 54s (- 4889m 12s) (100 0%) 6.3281\n",
      "Epoch 1 | Iter 120 | 1m 2s (- 4717m 53s) (120 0%) 6.2642\n",
      "Epoch 1 | Iter 140 | 1m 10s (- 4542m 40s) (140 0%) 6.3263\n",
      "Epoch 1 | Iter 160 | 1m 18s (- 4417m 43s) (160 0%) 6.3302\n",
      "Epoch 1 | Iter 180 | 1m 25s (- 4299m 33s) (180 0%) 6.3798\n",
      "Epoch 1 | Iter 200 | 1m 33s (- 4200m 55s) (200 0%) 6.3181\n",
      "Epoch 1 | Iter 220 | 1m 39s (- 4102m 33s) (220 0%) 6.1868\n",
      "Epoch 1 | Iter 240 | 1m 47s (- 4025m 21s) (240 0%) 6.2024\n",
      "Epoch 1 | Iter 260 | 1m 53s (- 3946m 38s) (260 0%) 6.0450\n",
      "Epoch 1 | Iter 280 | 2m 0s (- 3898m 31s) (280 0%) 5.8340\n",
      "Epoch 1 | Iter 300 | 2m 7s (- 3832m 20s) (300 0%) 5.6983\n",
      "Epoch 1 | Iter 320 | 2m 14s (- 3783m 6s) (320 0%) 5.4910\n",
      "Epoch 1 | Iter 340 | 2m 20s (- 3738m 18s) (340 0%) 5.3786\n",
      "Epoch 1 | Iter 360 | 2m 27s (- 3690m 3s) (360 0%) 5.2308\n",
      "Epoch 1 | Iter 380 | 2m 33s (- 3640m 55s) (380 0%) 5.1301\n",
      "Epoch 1 | Iter 400 | 2m 39s (- 3601m 37s) (400 0%) 5.0991\n",
      "Epoch 1 | Iter 420 | 2m 45s (- 3561m 58s) (420 0%) 5.0022\n",
      "Epoch 1 | Iter 440 | 2m 51s (- 3519m 7s) (440 0%) 4.9150\n",
      "Epoch 1 | Iter 460 | 2m 57s (- 3480m 36s) (460 0%) 4.8528\n",
      "Epoch 1 | Iter 480 | 3m 3s (- 3454m 3s) (480 0%) 4.7925\n",
      "Epoch 1 | Iter 500 | 3m 9s (- 3424m 13s) (500 0%) 4.7765\n",
      "Epoch 1 | Iter 520 | 3m 15s (- 3396m 29s) (520 0%) 4.7272\n",
      "Epoch 1 | Iter 540 | 3m 21s (- 3368m 34s) (540 0%) 4.6519\n",
      "Epoch 1 | Iter 560 | 3m 27s (- 3347m 52s) (560 0%) 4.6599\n",
      "Epoch 1 | Iter 580 | 3m 33s (- 3314m 27s) (580 0%) 4.6121\n",
      "Epoch 1 | Iter 600 | 3m 39s (- 3293m 56s) (600 0%) 4.5738\n",
      "Epoch 1 | Iter 620 | 3m 44s (- 3262m 21s) (620 0%) 4.5128\n",
      "Epoch 1 | Iter 640 | 3m 49s (- 3238m 49s) (640 0%) 4.5230\n",
      "Epoch 1 | Iter 660 | 3m 55s (- 3214m 51s) (660 0%) 4.5172\n",
      "Epoch 1 | Iter 680 | 4m 1s (- 3201m 23s) (680 0%) 4.4876\n",
      "Epoch 1 | Iter 700 | 4m 7s (- 3184m 20s) (700 0%) 4.3636\n",
      "Epoch 1 | Iter 720 | 4m 12s (- 3162m 43s) (720 0%) 4.3868\n",
      "Epoch 1 | Iter 740 | 4m 18s (- 3146m 58s) (740 0%) 4.2905\n",
      "Epoch 1 | Iter 760 | 4m 23s (- 3129m 18s) (760 0%) 4.3410\n",
      "Epoch 1 | Iter 780 | 4m 29s (- 3111m 10s) (780 0%) 4.2550\n",
      "Epoch 1 | Iter 800 | 4m 34s (- 3096m 1s) (800 0%) 4.3042\n",
      "Epoch 1 | Iter 820 | 4m 40s (- 3082m 11s) (820 0%) 4.2398\n",
      "Epoch 1 | Iter 840 | 4m 45s (- 3062m 1s) (840 0%) 4.2083\n",
      "Epoch 1 | Iter 860 | 4m 50s (- 3047m 2s) (860 0%) 4.2757\n",
      "Epoch 1 | Iter 880 | 4m 56s (- 3036m 5s) (880 0%) 4.2084\n",
      "Epoch 1 | Iter 900 | 5m 1s (- 3022m 20s) (900 0%) 4.1374\n",
      "Epoch 1 | Iter 920 | 5m 6s (- 3005m 46s) (920 0%) 4.1546\n",
      "Epoch 1 | Iter 940 | 5m 11s (- 2990m 20s) (940 0%) 4.1271\n",
      "Epoch 1 | Iter 960 | 5m 17s (- 2977m 28s) (960 0%) 4.1153\n",
      "Epoch 1 | Iter 980 | 5m 23s (- 2973m 4s) (980 0%) 4.0786\n",
      "Epoch 1 | Iter 1000 | 5m 28s (- 2961m 2s) (1000 0%) 4.0798\n",
      "Epoch 1 | Iter 1020 | 5m 34s (- 2951m 51s) (1020 0%) 4.0067\n",
      "Epoch 1 | Iter 1040 | 5m 39s (- 2945m 8s) (1040 0%) 4.0735\n",
      "Epoch 1 | Iter 1060 | 5m 45s (- 2936m 18s) (1060 0%) 4.0012\n",
      "Epoch 1 | Iter 1080 | 5m 50s (- 2927m 46s) (1080 0%) 4.0324\n",
      "Epoch 1 | Iter 1100 | 5m 55s (- 2916m 29s) (1100 0%) 3.9460\n",
      "Epoch 1 | Iter 1120 | 6m 1s (- 2908m 20s) (1120 0%) 3.9689\n",
      "Epoch 1 | Iter 1140 | 6m 6s (- 2896m 41s) (1140 0%) 3.9058\n",
      "Epoch 1 | Iter 1160 | 6m 11s (- 2884m 53s) (1160 0%) 3.8986\n",
      "Epoch 1 | Iter 1180 | 6m 16s (- 2877m 51s) (1180 0%) 3.9296\n",
      "Epoch 1 | Iter 1200 | 6m 22s (- 2869m 2s) (1200 0%) 3.9781\n",
      "Epoch 1 | Iter 1220 | 6m 27s (- 2862m 33s) (1220 0%) 3.9089\n",
      "Epoch 1 | Iter 1240 | 6m 32s (- 2855m 23s) (1240 0%) 3.7973\n",
      "Epoch 1 | Iter 1260 | 6m 38s (- 2846m 22s) (1260 0%) 3.8280\n",
      "Epoch 1 | Iter 1280 | 6m 43s (- 2838m 34s) (1280 0%) 3.9213\n",
      "Epoch 1 | Iter 1300 | 6m 48s (- 2828m 38s) (1300 0%) 3.8387\n",
      "Epoch 1 | Iter 1320 | 6m 53s (- 2821m 3s) (1320 0%) 3.8204\n",
      "Epoch 1 | Iter 1340 | 6m 58s (- 2814m 8s) (1340 0%) 3.7935\n",
      "Epoch 1 | Iter 1360 | 7m 3s (- 2803m 0s) (1360 0%) 3.7239\n",
      "Epoch 1 | Iter 1380 | 7m 7s (- 2792m 41s) (1380 0%) 3.7632\n",
      "Epoch 1 | Iter 1400 | 7m 12s (- 2786m 0s) (1400 0%) 3.7718\n",
      "Epoch 1 | Iter 1420 | 7m 17s (- 2775m 24s) (1420 0%) 3.7688\n",
      "Epoch 1 | Iter 1440 | 7m 22s (- 2767m 21s) (1440 0%) 3.7276\n",
      "Epoch 1 | Iter 1460 | 7m 27s (- 2758m 52s) (1460 0%) 3.7389\n",
      "Epoch 1 | Iter 1480 | 7m 31s (- 2748m 53s) (1480 0%) 3.7207\n",
      "Epoch 1 | Iter 1500 | 7m 36s (- 2738m 54s) (1500 0%) 3.6703\n",
      "Epoch 1 | Iter 1520 | 7m 41s (- 2731m 35s) (1520 0%) 3.6806\n",
      "Epoch 1 | Iter 1540 | 7m 46s (- 2726m 10s) (1540 0%) 3.7306\n",
      "Epoch 1 | Iter 1560 | 7m 51s (- 2721m 36s) (1560 0%) 3.7949\n",
      "Epoch 1 | Iter 1580 | 7m 56s (- 2717m 13s) (1580 0%) 3.5974\n",
      "Epoch 1 | Iter 1600 | 8m 2s (- 2716m 58s) (1600 0%) 3.6297\n",
      "Epoch 1 | Iter 1620 | 8m 8s (- 2714m 41s) (1620 0%) 3.6875\n",
      "Epoch 1 | Iter 1640 | 8m 13s (- 2711m 7s) (1640 0%) 3.6477\n",
      "Epoch 1 | Iter 1660 | 8m 19s (- 2707m 5s) (1660 0%) 3.5514\n",
      "Epoch 1 | Iter 1680 | 8m 24s (- 2703m 7s) (1680 0%) 3.6067\n",
      "Epoch 1 | Iter 1700 | 8m 29s (- 2697m 10s) (1700 0%) 3.6475\n",
      "Epoch 1 | Iter 1720 | 8m 34s (- 2691m 57s) (1720 0%) 3.6208\n",
      "Epoch 1 | Iter 1740 | 8m 39s (- 2686m 44s) (1740 0%) 3.6731\n",
      "Epoch 1 | Iter 1760 | 8m 44s (- 2680m 48s) (1760 0%) 3.5218\n",
      "Epoch 1 | Iter 1780 | 8m 49s (- 2676m 40s) (1780 0%) 3.5334\n",
      "Epoch 1 | Iter 1800 | 8m 54s (- 2673m 21s) (1800 0%) 3.6144\n",
      "Epoch 1 | Iter 1820 | 8m 59s (- 2669m 32s) (1820 0%) 3.4771\n",
      "Epoch 1 | Iter 1840 | 9m 4s (- 2664m 12s) (1840 0%) 3.5092\n",
      "Epoch 1 | Iter 1860 | 9m 9s (- 2657m 10s) (1860 0%) 3.5269\n",
      "Epoch 1 | Iter 1880 | 9m 13s (- 2651m 36s) (1880 0%) 3.5645\n",
      "Epoch 1 | Iter 1900 | 9m 18s (- 2646m 10s) (1900 0%) 3.4601\n",
      "Epoch 1 | Iter 1920 | 9m 23s (- 2640m 42s) (1920 0%) 3.4663\n",
      "Epoch 1 | Iter 1940 | 9m 27s (- 2633m 33s) (1940 0%) 3.5211\n",
      "Epoch 1 | Iter 1960 | 9m 31s (- 2621m 52s) (1960 0%) 3.4670\n",
      "Epoch 1 | Iter 1980 | 9m 35s (- 2615m 0s) (1980 0%) 3.5134\n",
      "Epoch 1 | Iter 2000 | 9m 40s (- 2610m 38s) (2000 0%) 3.4441\n",
      "Epoch 1 | Iter 2020 | 9m 45s (- 2606m 32s) (2020 0%) 3.5230\n",
      "Epoch 1 | Iter 2040 | 9m 49s (- 2601m 35s) (2040 0%) 3.3031\n",
      "Epoch 1 | Iter 2060 | 9m 54s (- 2596m 32s) (2060 0%) 3.2948\n",
      "Epoch 1 | Iter 2080 | 9m 59s (- 2591m 42s) (2080 0%) 3.4281\n",
      "Epoch 1 | Iter 2100 | 10m 4s (- 2588m 16s) (2100 0%) 3.5081\n",
      "Epoch 1 | Iter 2120 | 10m 8s (- 2584m 3s) (2120 0%) 3.4299\n",
      "Epoch 1 | Iter 2140 | 10m 14s (- 2581m 34s) (2140 0%) 3.4260\n",
      "Epoch 1 | Iter 2160 | 10m 19s (- 2577m 54s) (2160 0%) 3.4022\n",
      "Epoch 1 | Iter 2180 | 10m 23s (- 2573m 28s) (2180 0%) 3.2716\n",
      "Epoch 1 | Iter 2200 | 10m 28s (- 2569m 10s) (2200 0%) 3.3578\n",
      "Epoch 1 | Iter 2220 | 10m 33s (- 2564m 57s) (2220 0%) 3.4790\n",
      "Epoch 1 | Iter 2240 | 10m 37s (- 2561m 33s) (2240 0%) 3.3173\n",
      "Epoch 1 | Iter 2260 | 10m 42s (- 2557m 46s) (2260 0%) 3.4104\n",
      "Epoch 1 | Iter 2280 | 10m 47s (- 2553m 22s) (2280 0%) 3.3074\n",
      "Epoch 1 | Iter 2300 | 10m 51s (- 2549m 21s) (2300 0%) 3.4241\n",
      "Epoch 1 | Iter 2320 | 10m 56s (- 2544m 38s) (2320 0%) 3.2312\n",
      "Epoch 1 | Iter 2340 | 11m 0s (- 2539m 8s) (2340 0%) 3.2560\n",
      "Epoch 1 | Iter 2360 | 11m 5s (- 2534m 1s) (2360 0%) 3.3627\n",
      "Epoch 1 | Iter 2380 | 11m 9s (- 2529m 53s) (2380 0%) 3.4043\n",
      "Epoch 1 | Iter 2400 | 11m 14s (- 2525m 55s) (2400 0%) 3.2436\n",
      "Epoch 1 | Iter 2420 | 11m 18s (- 2521m 2s) (2420 0%) 3.3054\n",
      "Epoch 1 | Iter 2440 | 11m 22s (- 2516m 41s) (2440 0%) 3.2587\n",
      "Epoch 1 | Iter 2460 | 11m 27s (- 2510m 47s) (2460 0%) 3.2495\n",
      "Epoch 1 | Iter 2480 | 11m 31s (- 2506m 38s) (2480 0%) 3.1809\n",
      "Epoch 1 | Iter 2500 | 11m 35s (- 2502m 26s) (2500 0%) 3.3542\n",
      "Epoch 1 | Iter 2520 | 11m 40s (- 2498m 2s) (2520 0%) 3.2570\n",
      "Epoch 1 | Iter 2540 | 11m 44s (- 2493m 54s) (2540 0%) 3.2482\n",
      "Epoch 1 | Iter 2560 | 11m 48s (- 2487m 57s) (2560 0%) 3.2671\n",
      "Epoch 1 | Iter 2580 | 11m 52s (- 2483m 32s) (2580 0%) 3.2114\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1 | Iter 2600 | 11m 57s (- 2478m 53s) (2600 0%) 3.1231\n",
      "Epoch 1 | Iter 2620 | 12m 1s (- 2474m 35s) (2620 0%) 3.2000\n",
      "Epoch 1 | Iter 2640 | 12m 5s (- 2470m 5s) (2640 0%) 3.2016\n",
      "Epoch 1 | Iter 2660 | 12m 9s (- 2465m 51s) (2660 0%) 3.2841\n",
      "Epoch 1 | Iter 2680 | 12m 14s (- 2461m 52s) (2680 0%) 3.2082\n",
      "Epoch 1 | Iter 2700 | 12m 18s (- 2458m 8s) (2700 0%) 3.1423\n",
      "Epoch 1 | Iter 2720 | 12m 22s (- 2453m 53s) (2720 0%) 3.2390\n",
      "Epoch 1 | Iter 2740 | 12m 26s (- 2449m 15s) (2740 0%) 2.9885\n",
      "Epoch 1 | Iter 2760 | 12m 31s (- 2446m 7s) (2760 0%) 3.1719\n",
      "Epoch 1 | Iter 2780 | 12m 35s (- 2442m 53s) (2780 0%) 3.1430\n",
      "Epoch 1 | Iter 2800 | 12m 40s (- 2439m 39s) (2800 0%) 3.1908\n",
      "Epoch 1 | Iter 2820 | 12m 44s (- 2434m 56s) (2820 0%) 3.0825\n",
      "Epoch 1 | Iter 2840 | 12m 48s (- 2430m 9s) (2840 0%) 3.2142\n"
     ]
    }
   ],
   "source": [
    "input_size = 40\n",
    "\n",
    "hidden_size = 256\n",
    "vocab_size = len(char_list)\n",
    "embedding_dim = 512\n",
    "sos_id = 0\n",
    "eos_id = 1\n",
    "learning_rate = 1e-3\n",
    "momentum = 0\n",
    "l2 = 1e-5\n",
    "\n",
    "IGNORE_ID=-1\n",
    "\n",
    "encoder = Encoder(input_size, hidden_size, dropout=0.0)\n",
    "decoder = Decoder(vocab_size, embedding_dim, sos_id, eos_id, hidden_size*2,\n",
    "                 num_layers=1, bidirectional_encoder=True)\n",
    "\n",
    "model = Seq2Seq(encoder, decoder)\n",
    "print(model)\n",
    "model.cuda()\n",
    "\n",
    "optimizier = torch.optim.Adam(model.parameters(),\n",
    "                                     lr=learning_rate,\n",
    "#                                      momentum=momentum,\n",
    "                                     weight_decay=l2)\n",
    "trainIters(model, 30,optimizier, print_every=20)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Args(object):\n",
    "    def __init__(self, beam_size, nbest, decode_max_len):\n",
    "        self.beam_size = beam_size\n",
    "        self.nbest = nbest\n",
    "        self.decode_max_len = decode_max_len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "args = Args(30, 1, 15)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([800])\n",
      "input_tensor: torch.Size([1, 800, 240])\n",
      "remeined hypothes: 30\n",
      "hypo: 温\n",
      "hypo: 跟\n",
      "hypo: 分\n",
      "hypo: 奔\n",
      "hypo: 文\n",
      "hypo: 纷\n",
      "hypo: 很\n",
      "hypo: 问\n",
      "hypo: 芬\n",
      "hypo: 郑\n",
      "hypo: 根\n",
      "hypo: 尊\n",
      "hypo: 微\n",
      "hypo: 昆\n",
      "hypo: 目\n",
      "hypo: 奋\n",
      "hypo: 封\n",
      "hypo: 更\n",
      "hypo: 深\n",
      "hypo: 稳\n",
      "hypo: 翁\n",
      "hypo: 伦\n",
      "hypo: 恩\n",
      "hypo: 愤\n",
      "hypo: 荆\n",
      "hypo: 惠\n",
      "hypo: 闷\n",
      "hypo: 闻\n",
      "hypo: 喷\n",
      "hypo: 樊\n",
      "remeined hypothes: 30\n",
      "hypo: 温州\n",
      "hypo: 跟周\n",
      "hypo: 跟踪\n",
      "hypo: 跟州\n",
      "hypo: 分钟\n",
      "hypo: 奔周\n",
      "hypo: 温周\n",
      "hypo: 跟洲\n",
      "hypo: 跟着\n",
      "hypo: 跟征\n",
      "hypo: 文章\n",
      "hypo: 跟真\n",
      "hypo: 很多\n",
      "hypo: 跟妆\n",
      "hypo: 纷周\n",
      "hypo: 芬州\n",
      "hypo: 分州\n",
      "hypo: 问真\n",
      "hypo: 郑州\n",
      "hypo: 温洲\n",
      "hypo: 根州\n",
      "hypo: 文周\n",
      "hypo: 跟植\n",
      "hypo: 跟卓\n",
      "hypo: 尊州\n",
      "hypo: 跟诸\n",
      "hypo: 跟求\n",
      "hypo: 跟筹\n",
      "hypo: 分支\n",
      "hypo: 分征\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光\n",
      "hypo: 跟周公\n",
      "hypo: 跟踪光\n",
      "hypo: 跟周光\n",
      "hypo: 跟州光\n",
      "hypo: 跟踪公\n",
      "hypo: 分钟关\n",
      "hypo: 跟踪关\n",
      "hypo: 温州公\n",
      "hypo: 温州高\n",
      "hypo: 跟着光\n",
      "hypo: 跟踪高\n",
      "hypo: 跟洲光\n",
      "hypo: 奔周公\n",
      "hypo: 分钟光\n",
      "hypo: 温周光\n",
      "hypo: 跟征光\n",
      "hypo: 奔周高\n",
      "hypo: 温周公\n",
      "hypo: 奔周光\n",
      "hypo: 跟州公\n",
      "hypo: 分钟高\n",
      "hypo: 分钟公\n",
      "hypo: 温州钢\n",
      "hypo: 跟洲公\n",
      "hypo: 跟周高\n",
      "hypo: 跟真光\n",
      "hypo: 跟周康\n",
      "hypo: 跟周瓜\n",
      "hypo: 跟真公\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速\n",
      "hypo: 跟周公诉\n",
      "hypo: 温州光塑\n",
      "hypo: 温州光束\n",
      "hypo: 跟踪公诉\n",
      "hypo: 温州光树\n",
      "hypo: 跟踪关系\n",
      "hypo: 温州高速\n",
      "hypo: 温州公诉\n",
      "hypo: 温州光素\n",
      "hypo: 跟周光宿\n",
      "hypo: 跟周光诉\n",
      "hypo: 分钟关注\n",
      "hypo: 温州光诉\n",
      "hypo: 分钟关系\n",
      "hypo: 跟踪光素\n",
      "hypo: 跟踪光束\n",
      "hypo: 跟踪光速\n",
      "hypo: 跟州光速\n",
      "hypo: 跟踪光宿\n",
      "hypo: 跟周光素\n",
      "hypo: 跟踪高速\n",
      "hypo: 奔周公诉\n",
      "hypo: 跟踪光塑\n",
      "hypo: 温州光宿\n",
      "hypo: 温州光伏\n",
      "hypo: 跟州光束\n",
      "hypo: 跟踪光诉\n",
      "hypo: 奔周高速\n",
      "hypo: 温州光度\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车\n",
      "hypo: 跟周公诉车\n",
      "hypo: 温州光塑车\n",
      "hypo: 跟踪公诉车\n",
      "hypo: 温州光束车\n",
      "hypo: 温州光速出\n",
      "hypo: 温州公诉车\n",
      "hypo: 温州光树车\n",
      "hypo: 温州光速戳\n",
      "hypo: 跟踪关系车\n",
      "hypo: 温州光素车\n",
      "hypo: 温州高速车\n",
      "hypo: 分钟关系车\n",
      "hypo: 温州光速说\n",
      "hypo: 温州光速抽\n",
      "hypo: 温州光速冲\n",
      "hypo: 温州光诉车\n",
      "hypo: 跟周公诉出\n",
      "hypo: 跟周光宿车\n",
      "hypo: 温州光宿车\n",
      "hypo: 奔周公诉车\n",
      "hypo: 温州光伏车\n",
      "hypo: 跟踪光宿车\n",
      "hypo: 分钟关注称\n",
      "hypo: 温州光速周\n",
      "hypo: 跟踪光素车\n",
      "hypo: 温州光速春\n",
      "hypo: 温州光速处\n",
      "hypo: 温州光速撤\n",
      "hypo: 跟周光诉车\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车火\n",
      "hypo: 温州光速车或\n",
      "hypo: 跟周公诉车祸\n",
      "hypo: 温州光塑车或\n",
      "hypo: 跟周公诉车或\n",
      "hypo: 跟踪公诉车或\n",
      "hypo: 温州光塑车火\n",
      "hypo: 温州光束车火\n",
      "hypo: 温州光速车祸\n",
      "hypo: 跟周公诉车火\n",
      "hypo: 跟周公诉车伙\n",
      "hypo: 温州光速戳火\n",
      "hypo: 跟踪关系车或\n",
      "hypo: 分钟关系车或\n",
      "hypo: 温州光树车或\n",
      "hypo: 温州光速出火\n",
      "hypo: 跟踪公诉车祸\n",
      "hypo: 温州公诉车祸\n",
      "hypo: 温州高速车火\n",
      "hypo: 温州光树车火\n",
      "hypo: 温州公诉车火\n",
      "hypo: 温州光素车火\n",
      "hypo: 温州光素车或\n",
      "hypo: 温州光束车祸\n",
      "hypo: 温州光速冲我\n",
      "hypo: 温州光束车或\n",
      "hypo: 跟踪光宿车或\n",
      "hypo: 温州光速说或\n",
      "hypo: 温州光速出货\n",
      "hypo: 温州光速出获\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车或九\n",
      "hypo: 温州光速车火九\n",
      "hypo: 温州光塑车或九\n",
      "hypo: 跟周公诉车或九\n",
      "hypo: 跟周公诉车祸九\n",
      "hypo: 跟踪公诉车或九\n",
      "hypo: 温州光树车或九\n",
      "hypo: 跟踪关系车或九\n",
      "hypo: 分钟关系车或九\n",
      "hypo: 温州光束车火九\n",
      "hypo: 温州光速车祸九\n",
      "hypo: 温州光速出火九\n",
      "hypo: 温州光速车火爆\n",
      "hypo: 温州光速戳火九\n",
      "hypo: 温州光塑车火九\n",
      "hypo: 温州公诉车祸九\n",
      "hypo: 温州光束车或九\n",
      "hypo: 温州光速说或九\n",
      "hypo: 温州光速出获九\n",
      "hypo: 温州光速出货九\n",
      "hypo: 温州光素车或九\n",
      "hypo: 温州光速车火午\n",
      "hypo: 跟踪公诉车祸九\n",
      "hypo: 温州光束车祸九\n",
      "hypo: 跟周公诉车火九\n",
      "hypo: 跟踪光宿车或九\n",
      "hypo: 温州高速车火九\n",
      "hypo: 跟周公诉车火救\n",
      "hypo: 跟周公诉车伙救\n",
      "hypo: 温州光速车火了\n",
      "remeined hypothes: 18\n",
      "hypo: 温州光速车火九二\n",
      "hypo: 温州光速车或九二\n",
      "hypo: 跟周公诉车祸九二\n",
      "hypo: 温州光速车祸九二\n",
      "hypo: 温州光塑车或九二\n",
      "hypo: 温州光速出火九二\n",
      "hypo: 温州光束车火九二\n",
      "hypo: 温州公诉车祸九二\n",
      "hypo: 温州光速戳火九二\n",
      "hypo: 温州光塑车火九二\n",
      "hypo: 温州光速出货九二\n",
      "hypo: 温州光束车祸九二\n",
      "hypo: 温州高速车火九二\n",
      "hypo: 跟踪公诉车祸九二\n",
      "hypo: 跟踪公诉车或九二\n",
      "hypo: 跟周公诉车或九二\n",
      "hypo: 温州光树车或九二\n",
      "hypo: 温州光速车火午二\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车火九二八\n",
      "hypo: 温州光速车或九二八\n",
      "hypo: 跟周公诉车祸九二八\n",
      "hypo: 温州光速车祸九二八\n",
      "hypo: 温州光塑车或九二八\n",
      "hypo: 温州光速出火九二八\n",
      "hypo: 温州光束车火九二八\n",
      "hypo: 温州公诉车祸九二八\n",
      "hypo: 温州光速出货九二八\n",
      "hypo: 温州光速戳火九二八\n",
      "hypo: 温州光塑车火九二八\n",
      "hypo: 温州光束车祸九二八\n",
      "hypo: 跟踪公诉车祸九二八\n",
      "hypo: 温州高速车火九二八\n",
      "hypo: 跟踪公诉车或九二八\n",
      "hypo: 跟周公诉车或九二八\n",
      "hypo: 温州光树车或九二八\n",
      "hypo: 温州光速车火午二八\n",
      "hypo: 温州光速车火九二磅\n",
      "hypo: 温州光速车火九二邦\n",
      "hypo: 温州光速车或九二磅\n",
      "hypo: 温州光速车火九二光\n",
      "hypo: 温州光速车火九二包\n",
      "hypo: 跟周公诉车祸九二磅\n",
      "hypo: 温州光速车火九二爆\n",
      "hypo: 温州光塑车或九二磅\n",
      "hypo: 温州光速车祸九二磅\n",
      "hypo: 温州光速车火午二方\n",
      "hypo: 温州光束车火九二磅\n",
      "hypo: 温州光速出火九二磅\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车火九二八三\n",
      "hypo: 温州光速车或九二八三\n",
      "hypo: 跟周公诉车祸九二八三\n",
      "hypo: 温州光速车祸九二八三\n",
      "hypo: 温州光塑车或九二八三\n",
      "hypo: 温州公诉车祸九二八三\n",
      "hypo: 温州光速出货九二八三\n",
      "hypo: 温州光速出火九二八三\n",
      "hypo: 温州光速车火九二八大\n",
      "hypo: 温州光束车火九二八三\n",
      "hypo: 温州光速戳火九二八三\n",
      "hypo: 温州光束车祸九二八三\n",
      "hypo: 温州光塑车火九二八三\n",
      "hypo: 跟踪公诉车祸九二八三\n",
      "hypo: 跟踪公诉车或九二八三\n",
      "hypo: 跟周公诉车或九二八三\n",
      "hypo: 温州光速车或九二八大\n",
      "hypo: 温州光树车或九二八三\n",
      "hypo: 温州高速车火九二八三\n",
      "hypo: 温州光速车火九二八一\n",
      "hypo: 温州光速车火九二八当\n",
      "hypo: 跟周公诉车祸九二八大\n",
      "hypo: 温州光塑车或九二八大\n",
      "hypo: 温州光速车火午二八三\n",
      "hypo: 温州光速车祸九二八大\n",
      "hypo: 温州光速车火九二八弹\n",
      "hypo: 温州光速出火九二八大\n",
      "hypo: 温州光束车火九二八大\n",
      "hypo: 温州光速车火九二八塌\n",
      "hypo: 温州公诉车祸九二八大\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车火九二八三米\n",
      "hypo: 温州光速车或九二八三米\n",
      "hypo: 温州光速车祸九二八三米\n",
      "hypo: 温州光速车火九二八三鸣\n",
      "hypo: 跟周公诉车祸九二八三米\n",
      "hypo: 温州光速车或九二八三鸣\n",
      "hypo: 温州光塑车或九二八三米\n",
      "hypo: 跟周公诉车祸九二八三名\n",
      "hypo: 温州公诉车祸九二八三米\n",
      "hypo: 温州光束车祸九二八三米\n",
      "hypo: 温州光速车火九二八大鸣\n",
      "hypo: 温州光束车火九二八三米\n",
      "hypo: 温州光速车火九二八大米\n",
      "hypo: 温州光速出货九二八三米\n",
      "hypo: 跟周公诉车祸九二八三鸣\n",
      "hypo: 温州光速车火九二八三名\n",
      "hypo: 温州光塑车火九二八三米\n",
      "hypo: 温州公诉车祸九二八三名\n",
      "hypo: 跟踪公诉车祸九二八三米\n",
      "hypo: 温州光速车祸九二八三鸣\n",
      "hypo: 温州光速车或九二八三名\n",
      "hypo: 温州光塑车或九二八三鸣\n",
      "hypo: 温州光速戳火九二八三米\n",
      "hypo: 温州光树车或九二八三米\n",
      "hypo: 温州光速出火九二八三鸣\n",
      "hypo: 温州光速出火九二八三米\n",
      "hypo: 跟踪公诉车或九二八三米\n",
      "hypo: 温州光速车或九二八大鸣\n",
      "hypo: 温州光速戳火九二八三鸣\n",
      "hypo: 温州光速车火九二八三年\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车或九二八三米糟\n",
      "hypo: 温州光速车火九二八三米糟\n",
      "hypo: 温州光速车火九二八三米遭\n",
      "hypo: 温州光速车或九二八三米招\n",
      "hypo: 跟周公诉车祸九二八三名遭\n",
      "hypo: 温州光速车火九二八三名遭\n",
      "hypo: 温州光速车或九二八三米遭\n",
      "hypo: 温州公诉车祸九二八三名遭\n",
      "hypo: 温州光速车祸九二八三米糟\n",
      "hypo: 跟周公诉车祸九二八三米招\n",
      "hypo: 温州光塑车或九二八三米糟\n",
      "hypo: 温州光速车火九二八三米招\n",
      "hypo: 温州光速车或九二八三名遭\n",
      "hypo: 温州光速车火九二八三鸣河\n",
      "hypo: 跟周公诉车祸九二八三米糟\n",
      "hypo: 跟周公诉车祸九二八三米遭\n",
      "hypo: 跟周公诉车祸九二八三鸣交\n",
      "hypo: 温州光速车祸九二八三米招\n",
      "hypo: 温州光速车或九二八三鸣交\n",
      "hypo: 温州光速车或九二八三米高\n",
      "hypo: 温州光速车祸九二八三米遭\n",
      "hypo: 跟周公诉车祸九二八三名招\n",
      "hypo: 温州光塑车或九二八三米招\n",
      "hypo: 温州光速车火九二八三米高\n",
      "hypo: 温州光塑车或九二八三米遭\n",
      "hypo: 温州公诉车祸九二八三米遭\n",
      "hypo: 温州光速车祸九二八三米高\n",
      "hypo: 温州光速车或九二八三名招\n",
      "hypo: 温州光束车祸九二八三米糟\n",
      "hypo: 跟踪公诉车祸九二八三米招\n",
      "remeined hypothes: 27\n",
      "hypo: 温州光速车或九二八三米糟糕\n",
      "hypo: 温州光速车火九二八三米糟糕\n",
      "hypo: 跟周公诉车祸九二八三名招呼\n",
      "hypo: 温州光塑车或九二八三米糟糕\n",
      "hypo: 温州光速车祸九二八三米糟糕\n",
      "hypo: 温州光速车或九二八三鸣交通\n",
      "hypo: 跟周公诉车祸九二八三鸣交通\n",
      "hypo: 温州光速车或九二八三名招呼\n",
      "hypo: 温州光速车或九二八三米招呼\n",
      "hypo: 跟周公诉车祸九二八三米糟糕\n",
      "hypo: 跟周公诉车祸九二八三名遭虹\n",
      "hypo: 跟周公诉车祸九二八三鸣交互\n",
      "hypo: 温州光速车或九二八三鸣交互\n",
      "hypo: 温州光速车火九二八三米遭疯\n",
      "hypo: 温州光速车火九二八三鸣河西\n",
      "hypo: 温州光速车火九二八三名遭疯\n",
      "hypo: 温州光速车或九二八三米招局\n",
      "hypo: 跟周公诉车祸九二八三米招呼\n",
      "hypo: 温州光速车或九二八三米遭疯\n",
      "hypo: 温州光速车或九二八三米招西\n",
      "hypo: 温州光速车火九二八三名遭虹\n",
      "hypo: 温州光速车火九二八三米遭弹\n",
      "hypo: 温州光速车或九二八三米糟弹\n",
      "hypo: 温州光束车祸九二八三米糟糕\n",
      "hypo: 跟周公诉车祸九二八三名遭公\n",
      "hypo: 温州光速车或九二八三米高通\n",
      "hypo: 温州光速车火九二八三米糟弹\n",
      "remeined hypothes: 23\n",
      "hypo: 温州光速车火九二八三名遭疯抢\n",
      "hypo: 温州光速车火九二八三米遭疯抢\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥\n",
      "hypo: 温州光速车或九二八三米遭疯抢\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢\n",
      "hypo: 温州光速车火九二八三名遭虹桥\n",
      "hypo: 温州光速车或九二八三米招局遭\n",
      "hypo: 跟周公诉车祸九二八三名招呼将\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢\n",
      "hypo: 跟周公诉车祸九二八三名招呼枪\n",
      "hypo: 温州光速车火九二八三鸣河西边\n",
      "hypo: 跟周公诉车祸九二八三名招呼吁\n",
      "hypo: 温州光速车或九二八三名招呼吁\n",
      "hypo: 温州光速车或九二八三米高通宵\n",
      "hypo: 温州光速车或九二八三鸣交通宵\n",
      "hypo: 跟周公诉车祸九二八三鸣交通宵\n",
      "hypo: 温州光速车或九二八三名招呼将\n",
      "hypo: 温州光速车火九二八三米遭弹枪\n",
      "hypo: 跟周公诉车祸九二八三鸣交互式\n",
      "hypo: 跟周公诉车祸九二八三鸣交通枪\n",
      "hypo: 温州光速车或九二八三鸣交互式\n",
      "hypo: 温州光速车或九二八三米招呼吁\n",
      "hypo: 温州光速车或九二八三米招呼将\n",
      "remeined hypothes: 25\n",
      "hypo: 温州光速车火九二八三名遭疯抢续\n",
      "hypo: 温州光速车火九二八三米遭疯抢续\n",
      "hypo: 温州光速车或九二八三米遭疯抢续\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续\n",
      "hypo: 温州光速车火九二八三名遭虹桥市\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢续\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市\n",
      "hypo: 温州光速车或九二八三米高通宵蓄\n",
      "hypo: 跟周公诉车祸九二八三鸣交通枪示\n",
      "hypo: 跟周公诉车祸九二八三鸣交通宵序\n",
      "hypo: 温州光速车火九二八三鸣河西边路\n",
      "hypo: 温州光速车或九二八三米招呼将大\n",
      "hypo: 温州光速车火九二八三米遭疯抢市\n",
      "hypo: 温州光速车或九二八三鸣交通宵序\n",
      "hypo: 温州光速车或九二八三鸣交通宵蓄\n",
      "hypo: 温州光速车或九二八三米高通宵序\n",
      "hypo: 温州光速车火九二八三米遭弹枪续\n",
      "hypo: 温州光速车或九二八三名招呼将陷\n",
      "hypo: 温州光速车或九二八三米招局遭遇\n",
      "hypo: 温州光速车火九二八三米遭弹枪示\n",
      "hypo: 跟周公诉车祸九二八三鸣交通宵室\n",
      "hypo: 跟周公诉车祸九二八三名招呼将陷\n",
      "hypo: 温州光速车或九二八三鸣交通宵室\n",
      "hypo: 温州光速车或九二八三米遭疯抢市\n",
      "remeined hypothes: 15\n",
      "hypo: 温州光速车火九二八三名遭疯抢续航\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续航\n",
      "hypo: 温州光速车火九二八三米遭疯抢续一\n",
      "hypo: 温州光速车火九二八三名遭疯抢续一\n",
      "hypo: 温州光速车或九二八三米招呼将大鸣\n",
      "hypo: 温州光速车或九二八三米遭疯抢续一\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续一\n",
      "hypo: 温州光速车火九二八三鸣河西边路边\n",
      "hypo: 温州光速车或九二八三名招呼将陷入\n",
      "hypo: 温州光速车或九二八三米遭疯抢续航\n",
      "hypo: 跟周公诉车祸九二八三名招呼将陷入\n",
      "hypo: 温州光速车火九二八三米遭疯抢续航\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域\n",
      "remeined hypothes: 24\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭\n",
      "hypo: 温州光速车火九二八三米遭疯抢续一遭\n",
      "hypo: 温州光速车火九二八三名遭疯抢续一遭\n",
      "hypo: 温州光速车或九二八三米遭疯抢续一遭\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续一遭\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续航班\n",
      "hypo: 温州光速车或九二八三米招呼将大鸣遭\n",
      "hypo: 温州光速车火九二八三名遭疯抢续一击\n",
      "hypo: 温州光速车火九二八三名遭疯抢续航班\n",
      "hypo: 温州光速车火九二八三名遭疯抢续航下\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续航线\n",
      "hypo: 温州光速车或九二八三米遭疯抢续一击\n",
      "hypo: 温州光速车火九二八三名遭疯抢续航线\n",
      "hypo: 温州光速车或九二八三米遭疯抢续航线\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二\n",
      "hypo: 温州光速车火九二八三米遭疯抢续一击\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续一家\n",
      "hypo: 温州光速车火九二八三米遭疯抢续航线\n",
      "hypo: 温州光速车火九二八三米遭疯抢续航班\n",
      "hypo: 温州光速车或九二八三米遭疯抢续航班\n",
      "hypo: 跟周公诉车祸九二八三名遭公抢续一击\n",
      "remeined hypothes: 18\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭疯\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭疯\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭疯\n",
      "hypo: 温州光速车火九二八三米遭疯抢续一遭疯\n",
      "hypo: 温州光速车或九二八三米遭疯抢续一遭疯\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二包\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭公\n",
      "hypo: 温州光速车火九二八三名遭疯抢续一遭疯\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家西\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭弹\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭响\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家路\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭铜\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭响\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭响\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭弹\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭昏\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭遇\n",
      "remeined hypothes: 30\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭疯抢\n",
      "hypo: 温州光速车火九二八三米遭疯抢续一遭疯抢\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭疯抢\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭疯抢\n",
      "hypo: 温州光速车或九二八三米遭疯抢续一遭疯抢\n",
      "hypo: 温州光速车火九二八三名遭疯抢续一遭疯抢\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭公抢\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二包大\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭弹强\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭昏抢\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭弹强\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭铜枪\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家路一\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭疯狂\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭公顷\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家西安\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭铜器\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭遇到\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭疯狂\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家西路\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭疯狂\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家西边\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭弹枪\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭响堵\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭响泥\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭响堵\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭响泥\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二包当\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭响堵\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭弹枪\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "remeined hypothes: 24\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭疯抢续\n",
      "hypo: 温州光速车火九二八三米遭疯抢续一遭疯抢续\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭疯抢续\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭疯抢续\n",
      "hypo: 温州光速车或九二八三米遭疯抢续一遭疯抢续\n",
      "hypo: 温州光速车火九二八三名遭疯抢续一遭疯抢续\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭公抢续\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭昏抢续\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二包大米\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二包大名\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭弹强续\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二包大鸣\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭铜枪示\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家路一遭\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭弹强续\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家西路边\n",
      "hypo: 跟周公诉车祸九二八三名遭虹桥市域遭公顷续\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭弹强蓄\n",
      "hypo: 温州光速车或九二八三名招呼将陷入二包当名\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭弹枪续\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭疯抢市\n",
      "hypo: 温州光速车火九二八三名遭虹桥市域遭弹强市\n",
      "hypo: 跟周公诉车祸九二八三名遭虹抢市域遭遇到虹\n",
      "hypo: 温州光速车火九二八三鸣河西边路边家西安邦\n",
      "no hypothesis. Finish decoding.\n",
      "正解： 温州高速车祸九二包大米遭哄抢续带头者被拘\n"
     ]
    }
   ],
   "source": [
    "args = Args(30, 1, 21)\n",
    "b = 2\n",
    "n = 1\n",
    "sample = te_dataset[b][n][1]\n",
    "input_tensor = torch.tensor(kaldi_io.read_mat(sample[\"input\"][0][\"feat\"]))\n",
    "a = input_tensor.shape[0]//4*4\n",
    "input_tensor = input_tensor[0:(a if a < MAX_LENGTH*4 else MAX_LENGTH*4 ),:]\n",
    "\n",
    "g = input_tensor.shape[0]\n",
    "print(torch.tensor([g]))\n",
    "\n",
    "input_tensor = input_tensor.unsqueeze(0)\n",
    "print(\"input_tensor:\",input_tensor.shape)\n",
    "b = model.recognize(input_tensor.to(device),torch.tensor([g]), char_list, args)\n",
    "print(\"正解：\", sample[\"output\"][0][\"text\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "torch.cuda.memory_cached()/1000000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset[2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_list[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for ch in data_list[1][\"corpus\"]:\n",
    "    print(ch.item())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d = torch.tensor([[[1,2,3],[4,5,6],[7,8,9],[10,11,12]]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "lang.index2word[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d.view((4,1,3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
