{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import glob"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.autograd import Variable\n",
    "import torch.nn.functional as F\n",
    "from torch import optim\n",
    "from torch.utils.data import Dataset, DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torchaudio\n",
    "import librosa\n",
    "from torchaudio import transforms\n",
    "from model import WaveNet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import IPython.display\n",
    "import numpy as np\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(32825,)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_audio, _ = librosa.load('./VCTK/p225/p225_001.wav', sr=16000, mono=True)\n",
    "test_audio.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'torch.Tensor'>\n",
      "torch.Size([32825, 1])\n"
     ]
    }
   ],
   "source": [
    "test_wav_tensor = torch.from_numpy(test_audio).unsqueeze(1)\n",
    "print(type(test_wav_tensor))\n",
    "print(test_wav_tensor.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class VCTK(Dataset):\n",
    "    def __init__(self, path='./VCTK/', speaker='p225', transform=None,\n",
    "                 sr=16000, top_db=10):\n",
    "        self.wav_list = glob.glob(path + speaker + '/*.wav')\n",
    "        self.wav_ids = sorted([f.split('/')[-1] for f in glob.glob(path + '*')])\n",
    "        self.transform = transform\n",
    "        self.sr = sr\n",
    "        self.top_db = top_db\n",
    "        \n",
    "    def __getitem__(self, index):\n",
    "        f = self.wav_list[index]  # example, './VCTK/p225/p225_001.wav'\n",
    "        audio, _ = librosa.load(f, sr=self.sr, mono=True)\n",
    "        \"\"\"librosa.load返回(ndarray, sr), \n",
    "        sr, sampling rate, 即每秒采集多少个样本, \n",
    "        mono, 单声道,\n",
    "        所以audio是ndarray, shape (n,)\"\"\"\n",
    "        \n",
    "        audio, _ = librosa.effects.trim(audio, top_db=self.top_db, frame_length=2048)\n",
    "        \"\"\"去掉无声部分\n",
    "        Trim, leading and trailing silence from an audio signal.\n",
    "        top_db, The threshold (in decibels) below reference to consider as silence.\n",
    "        frame_length, The number of samples per analysis frame\"\"\"\n",
    "        \n",
    "        audio = np.clip(audio, -1, 1)  # 设定audio中值的上下限, 超过上下限转化为上下限值\n",
    "        \n",
    "        wav_tensor = torch.from_numpy(audio).unsqueeze(1)\n",
    "        \"\"\"torch.unsqueeze, 返回一个新的张量, 在指定位置插入维度 1.\n",
    "        所以wav_tensor, shape (n, 1)\"\"\"\n",
    "        \n",
    "        wav_id = f.split('/')[3]  # example, 'p255_001.wav'\n",
    "        if self.transform is not None:\n",
    "            wav_tensor = self.transform(wav_tensor)\n",
    "            \n",
    "        return wav_tensor\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.wav_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "t = transforms.Compose([transforms.MuLawEncoding(), transforms.LC2CL()])\n",
    "\"\"\"MuLawEncoding, 转化[-1, 1]的值至[0, 255]\n",
    "LC2CL, Permute a 2d tensor from samples (n x c) to (c x n)\n",
    "\"\"\"\n",
    "\n",
    "def collate_fn_(batch_data, max_len=40000):\n",
    "    # 如果audio_len超过40000, 则随机截取一段40000的片段\n",
    "    audio = batch_data[0]\n",
    "    audio_len = audio.size(1)\n",
    "    if audio_len > max_len:\n",
    "        idx = random.randint(0, audio_len - max_len)\n",
    "        return audio[:, idx:(idx + max_len)]\n",
    "    else:\n",
    "        return audio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'collate_fn, merges a list of samples to form a mini-batch.\\ndata in training_data, shape (1, n), the first \"1\" is the batch size.\\n'"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vctk = VCTK(speaker='p225', transform=t, sr=16000)\n",
    "# vctk, shape (1, n), range [0, 255]\n",
    "\n",
    "training_data = DataLoader(vctk, batch_size=1, shuffle=True, collate_fn=collate_fn_)\n",
    "\"\"\"collate_fn, merges a list of samples to form a mini-batch.\n",
    "data in training_data, shape (1, n), the first \"1\" is the batch size.\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([1, 18944])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "iter(training_data).next().shape  # n比原音频小是因为去掉了无声部分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = WaveNet()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_step = optim.Adam(model.parameters(), lr=2e-3, eps=1e-4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "scheduler = optim.lr_scheduler.MultiStepLR(train_step, milestones=[50, 150, 250], gamma=0.5)\n",
    "# lr = 2e-3 * gamma       if epoch < 50\n",
    "# lr = 2e-3 * gamma**2    if 50 <= epoch < 150\n",
    "# lr = 2e-3 * gamma**3    if epoch >= 250"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 18943, 32])\n",
      "torch.Size([1, 32, 18943])\n"
     ]
    }
   ],
   "source": [
    "import torch.nn as nn\n",
    "\n",
    "\n",
    "test_pre = nn.Embedding(256, 32)  # in_depth=256, res_channels=32\n",
    "test_data = iter(training_data).next()\n",
    "test_x = test_data[:, :-1]\n",
    "test_pre_out = test_pre(test_x)\n",
    "print(test_pre_out.shape)\n",
    "test_pre_out = test_pre_out.transpose(1, 2)\n",
    "print(test_pre_out.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 5.5691237449646\n",
      "1 5.527717590332031\n",
      "2 5.486761093139648\n",
      "3 5.4462409019470215\n",
      "4 5.405642986297607\n",
      "5 5.368988990783691\n",
      "6 5.342849254608154\n",
      "7 5.32550048828125\n",
      "8 5.308935642242432\n",
      "9 5.290231227874756\n",
      "10 5.271295070648193\n",
      "11 5.253927707672119\n",
      "12 5.237802505493164\n",
      "13 5.221366882324219\n",
      "14 5.203128814697266\n",
      "15 5.182265281677246\n",
      "16 5.158449649810791\n",
      "17 5.131231784820557\n",
      "18 5.100296497344971\n",
      "19 5.065127372741699\n",
      "20 5.0242462158203125\n",
      "21 4.975534915924072\n",
      "22 4.917816638946533\n",
      "23 4.850963115692139\n",
      "24 4.781714916229248\n",
      "25 4.917372226715088\n",
      "26 5.341699123382568\n",
      "27 5.038628101348877\n",
      "28 4.877258777618408\n",
      "29 4.788681507110596\n",
      "30 4.773149490356445\n",
      "31 4.692018032073975\n",
      "32 4.610084533691406\n",
      "33 4.561102390289307\n",
      "34 4.510151386260986\n",
      "35 4.446015357971191\n",
      "36 4.3676557540893555\n",
      "37 4.2941436767578125\n",
      "38 4.23598051071167\n",
      "39 4.179023265838623\n",
      "40 4.121458530426025\n",
      "41 4.067913055419922\n",
      "42 4.014148235321045\n",
      "43 3.954289436340332\n",
      "44 3.903211832046509\n",
      "45 3.8437442779541016\n",
      "46 3.7834410667419434\n",
      "47 3.7275497913360596\n",
      "48 3.672804594039917\n",
      "49 3.6160576343536377\n",
      "50 3.5580854415893555\n",
      "51 3.516829490661621\n",
      "52 3.484809160232544\n",
      "53 3.44490647315979\n",
      "54 3.415836811065674\n",
      "55 3.3771026134490967\n",
      "56 3.3379244804382324\n",
      "57 3.3000950813293457\n",
      "58 3.26019549369812\n",
      "59 3.2228822708129883\n",
      "60 3.178501605987549\n",
      "61 3.136929988861084\n",
      "62 3.09566068649292\n",
      "63 3.0492279529571533\n",
      "64 3.004485607147217\n",
      "65 2.9590985774993896\n",
      "66 2.9100663661956787\n",
      "67 2.8614256381988525\n",
      "68 2.812385082244873\n",
      "69 2.761094093322754\n",
      "70 2.7081050872802734\n",
      "71 2.6534535884857178\n",
      "72 2.599346399307251\n",
      "73 2.544528007507324\n",
      "74 2.4884793758392334\n",
      "75 2.4341275691986084\n",
      "76 2.382848024368286\n",
      "77 2.3342061042785645\n",
      "78 2.2774834632873535\n",
      "79 2.201362133026123\n",
      "80 2.1234612464904785\n",
      "81 2.0690994262695312\n",
      "82 2.016566276550293\n",
      "83 1.9433528184890747\n",
      "84 1.8766793012619019\n",
      "85 1.829817771911621\n",
      "86 1.7738401889801025\n",
      "87 1.6835302114486694\n",
      "88 1.6089677810668945\n",
      "89 1.5513824224472046\n",
      "90 1.4812474250793457\n",
      "91 1.4110114574432373\n",
      "92 1.3578834533691406\n",
      "93 1.3016383647918701\n",
      "94 1.2285727262496948\n",
      "95 1.1752665042877197\n",
      "96 1.1312263011932373\n",
      "97 1.0357335805892944\n",
      "98 0.9719963669776917\n",
      "99 0.9378964900970459\n",
      "100 0.8593854308128357\n",
      "101 0.8040611743927002\n",
      "102 0.7552483677864075\n",
      "103 0.6845027804374695\n",
      "104 0.6508575081825256\n",
      "105 0.5916467308998108\n",
      "106 0.5455920696258545\n",
      "107 0.5054912567138672\n",
      "108 0.4536572992801666\n",
      "109 0.42217954993247986\n",
      "110 0.3789195716381073\n",
      "111 0.3494807481765747\n",
      "112 0.31361714005470276\n",
      "113 0.28700628876686096\n",
      "114 0.2582753598690033\n",
      "115 0.2351953089237213\n",
      "116 0.211981400847435\n",
      "117 0.1931275725364685\n",
      "118 0.17398886382579803\n",
      "119 0.15919293463230133\n",
      "120 0.1438109576702118\n",
      "121 0.1318906992673874\n",
      "122 0.12008847296237946\n",
      "123 0.11003131419420242\n",
      "124 0.10134376585483551\n",
      "125 0.0929187759757042\n",
      "126 0.08613543957471848\n",
      "127 0.07967767864465714\n",
      "128 0.073842354118824\n",
      "129 0.06893665343523026\n",
      "130 0.06422941386699677\n",
      "131 0.06006031110882759\n",
      "132 0.056445274502038956\n",
      "133 0.053016457706689835\n",
      "134 0.049917399883270264\n",
      "135 0.04720735549926758\n",
      "136 0.04468487575650215\n",
      "137 0.042329080402851105\n",
      "138 0.040238603949546814\n",
      "139 0.03835633769631386\n",
      "140 0.0365694984793663\n",
      "141 0.034913551062345505\n",
      "142 0.033443864434957504\n",
      "143 0.03208986669778824\n",
      "144 0.030792711302638054\n",
      "145 0.029597550630569458\n",
      "146 0.02852318249642849\n",
      "147 0.027516720816493034\n",
      "148 0.026553519070148468\n",
      "149 0.025661960244178772\n",
      "150 0.024848774075508118\n",
      "151 0.024460697546601295\n",
      "152 0.02408132702112198\n",
      "153 0.02371748723089695\n",
      "154 0.02337554097175598\n",
      "155 0.023054732009768486\n",
      "156 0.022748656570911407\n",
      "157 0.02245054580271244\n",
      "158 0.022158607840538025\n",
      "159 0.02187538892030716\n",
      "160 0.021603871136903763\n",
      "161 0.021344734355807304\n",
      "162 0.02109525352716446\n",
      "163 0.020852388814091682\n",
      "164 0.020614467561244965\n",
      "165 0.020381994545459747\n",
      "166 0.02015652135014534\n",
      "167 0.01993827149271965\n",
      "168 0.01972665823996067\n",
      "169 0.01952029950916767\n",
      "170 0.01931801065802574\n",
      "171 0.019119685515761375\n",
      "172 0.018925802782177925\n",
      "173 0.018736429512500763\n",
      "174 0.01855163462460041\n",
      "175 0.018370725214481354\n",
      "176 0.018193310126662254\n",
      "177 0.0180191807448864\n",
      "178 0.017848171293735504\n",
      "179 0.017680369317531586\n",
      "180 0.017515840008854866\n",
      "181 0.017354171723127365\n",
      "182 0.01719539426267147\n",
      "183 0.01703922264277935\n",
      "184 0.016885600984096527\n",
      "185 0.01673448085784912\n",
      "186 0.016585858538746834\n",
      "187 0.016439499333500862\n",
      "188 0.01629558764398098\n",
      "189 0.016153767704963684\n",
      "190 0.016014039516448975\n",
      "191 0.015876470133662224\n",
      "192 0.015740826725959778\n",
      "193 0.015607256442308426\n",
      "194 0.015475503169000149\n",
      "195 0.015345661900937557\n",
      "196 0.015217579901218414\n",
      "197 0.015091230161488056\n",
      "198 0.014966619201004505\n",
      "199 0.014843696728348732\n",
      "200 0.01472239289432764\n",
      "201 0.01460267510265112\n",
      "202 0.014484591782093048\n",
      "203 0.014367983676493168\n",
      "204 0.01425287127494812\n",
      "205 0.014139257371425629\n",
      "206 0.014027048833668232\n",
      "207 0.013916224241256714\n",
      "208 0.013806809671223164\n",
      "209 0.01369878463447094\n",
      "210 0.013592052273452282\n",
      "211 0.013486628420650959\n",
      "212 0.013382445089519024\n",
      "213 0.013279535807669163\n",
      "214 0.013177910819649696\n",
      "215 0.013077445328235626\n",
      "216 0.012978200800716877\n",
      "217 0.012880085967481136\n",
      "218 0.012783143669366837\n",
      "219 0.012687362730503082\n",
      "220 0.012592695653438568\n",
      "221 0.012499055825173855\n",
      "222 0.012406573630869389\n",
      "223 0.012315125204622746\n",
      "224 0.012224677950143814\n",
      "225 0.012135316617786884\n",
      "226 0.012046875432133675\n",
      "227 0.011959454044699669\n",
      "228 0.011873125098645687\n",
      "229 0.011787612922489643\n",
      "230 0.011703072115778923\n",
      "231 0.011619477532804012\n",
      "232 0.01153682079166174\n",
      "233 0.011455059051513672\n",
      "234 0.011374136433005333\n",
      "235 0.011294141411781311\n",
      "236 0.011214998550713062\n",
      "237 0.011136685498058796\n",
      "238 0.011059220880270004\n",
      "239 0.010982614010572433\n",
      "240 0.010906790383160114\n",
      "241 0.010831722058355808\n",
      "242 0.0107575049623847\n",
      "243 0.010684042237699032\n",
      "244 0.01061137206852436\n",
      "245 0.010539463721215725\n",
      "246 0.010468298569321632\n",
      "247 0.010397844016551971\n",
      "248 0.010328135453164577\n",
      "249 0.010259145870804787\n",
      "250 0.010190869681537151\n",
      "251 0.010157021693885326\n",
      "252 0.010123394429683685\n",
      "253 0.010089992545545101\n",
      "254 0.010056796483695507\n",
      "255 0.010023842565715313\n",
      "256 0.00999104231595993\n",
      "257 0.009958469308912754\n",
      "258 0.009926014579832554\n",
      "259 0.009893780574202538\n",
      "260 0.009861686266958714\n",
      "261 0.009829802438616753\n",
      "262 0.009798048995435238\n",
      "263 0.009766437113285065\n",
      "264 0.00973503477871418\n",
      "265 0.00970373023301363\n",
      "266 0.009672543033957481\n",
      "267 0.009641553275287151\n",
      "268 0.009610682725906372\n",
      "269 0.009579946286976337\n",
      "270 0.009549307636916637\n",
      "271 0.009518882259726524\n",
      "272 0.009488523937761784\n",
      "273 0.009458320215344429\n",
      "274 0.009428289718925953\n",
      "275 0.009398326277732849\n",
      "276 0.009368509985506535\n",
      "277 0.009338791482150555\n",
      "278 0.009309216402471066\n",
      "279 0.009279755875468254\n",
      "280 0.009250420145690441\n",
      "281 0.009221173822879791\n",
      "282 0.009192080236971378\n",
      "283 0.009163105860352516\n",
      "284 0.009134260937571526\n",
      "285 0.009105521254241467\n",
      "286 0.009076838381588459\n",
      "287 0.009048355743288994\n",
      "288 0.009019981138408184\n",
      "289 0.008991637267172337\n",
      "290 0.008963487111032009\n",
      "291 0.008935420773923397\n",
      "292 0.008907451294362545\n",
      "293 0.008879638276994228\n",
      "294 0.008851924911141396\n",
      "295 0.008824272081255913\n",
      "296 0.008796759881079197\n",
      "297 0.008769333362579346\n",
      "298 0.00874201487749815\n",
      "299 0.008714849129319191\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(300):\n",
    "    loss_ = []\n",
    "    scheduler.step()  # update lr\n",
    "    for data in training_data:\n",
    "        data = Variable(data)  # 转化data为可以计算Gradient的Tensor, shape (1, n)\n",
    "        x = data[:, :-1]  # 去掉data最后1位, 因为后续标签数据y整体后错1位\n",
    "        \n",
    "        logits = model(x)\n",
    "        # shape (1, 256, m)\n",
    "        \n",
    "        y = data[:, -logits.size(2):]  # 截取data尾部和logits长度相等的内容, shape (1, m)\n",
    "        \n",
    "        loss = F.cross_entropy(logits.transpose(1, 2).contiguous().view(-1, 256) ,y.view(-1))\n",
    "        '''logits返回预测结果值出现在[0, 255]的对应值, 未进行softmax激活.\n",
    "        logits, shape (1, 256, m) -> (1, m, 256) -> (m, 256).\n",
    "        .view(-1, 256)可以替换为.squeeze() ?\n",
    "        --------\n",
    "        \n",
    "        contiguous()\n",
    "        调用view之前最好先contiguous\n",
    "        x.contiguous().view() \n",
    "\n",
    "        因为view需要tensor的内存是整块的 \n",
    "\n",
    "        contiguous：view只能用在contiguous的variable上。如果在view之前用了transpose, permute等，需要用contiguous()来返回一个contiguous copy。 \n",
    "        一种可能的解释是： \n",
    "        有些tensor并不是占用一整块内存，而是由不同的数据块组成，而tensor的view()操作依赖于内存是整块的，这时只需要执行contiguous()这个函数，把tensor变成在内存中连续分布的形式。 \n",
    "        判断是否contiguous用torch.Tensor.is_contiguous()函数。\n",
    "        --------\n",
    "        \n",
    "        y.view(-1), 转化y为1d, 长度由元素个数决定\n",
    "        logits.transpose(1, 2), 调换logits的1， 2两个dimension\n",
    "        \n",
    "        '''\n",
    "        \n",
    "        train_step.zero_grad()  # 初始化optimizer中的grandient\n",
    "        loss.backward()  # 求解grandient, backward计算基于loss function\n",
    "        train_step.step()  # 更新参数\n",
    "        loss_.append(loss.item())\n",
    "    \n",
    "    print(epoch, np.mean(loss_))\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(me"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
