{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "3fef32c2",
   "metadata": {},
   "source": [
    "# **Phoneme Classification**"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e8a53c74",
   "metadata": {},
   "source": [
    "## The DARPA TIMIT Acoustic-Phonetic Continuous Speech Corpus (TIMIT)\n",
    "The TIMIT corpus of reading speech has been designed to provide speech data for the acquisition of acoustic-phonetic knowledge and for the development and evaluation of automatic speech recognition systems.\n",
    "\n",
    "This homework is a multiclass classification task, \n",
    "we are going to train a deep neural network classifier to predict the phonemes for each frame from the speech corpus TIMIT."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1cac388f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import Dataset, DataLoader, random_split\n",
    "\n",
    "import gc"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a1ad7819",
   "metadata": {},
   "source": [
    "## Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "d10f5d84",
   "metadata": {},
   "outputs": [],
   "source": [
    "def same_seeds(seed):\n",
    "    torch.manual_seed(seed)\n",
    "    if torch.cuda.is_available():\n",
    "        torch.cuda.manual_seed(seed)\n",
    "        torch.cuda.manual_seed_all(seed)  \n",
    "    np.random.seed(seed)  \n",
    "    torch.backends.cudnn.benchmark = False\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "\n",
    "def train_valid_split(train_data, train_label_data, valid_ratio):\n",
    "    '''Split provided training data into training set and validation set'''\n",
    "    percent = int(train_data.shape[0] * (1 - valid_ratio))\n",
    "    train_x, train_y, val_x, val_y = train_data[:percent], train_label_data[:percent], train_data[percent:], train_label_data[percent:]\n",
    "    return train_x, train_y, val_x, val_y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "739dec86",
   "metadata": {},
   "outputs": [],
   "source": [
    "class TIMITDataset(Dataset):\n",
    "    def __init__(self, X, y=None):\n",
    "        self.data = torch.from_numpy(X).float()\n",
    "        if y is not None:\n",
    "            y = y.astype(np.int)\n",
    "            self.label = torch.LongTensor(y)\n",
    "        else:\n",
    "            self.label = None\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        if self.label is not None:\n",
    "            return self.data[idx], self.label[idx]\n",
    "        else:\n",
    "            return self.data[idx]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "bb3764f2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading data ...\n",
      "Size of training data: (1229932, 429)\n",
      "Size of testing data: (451552, 429)\n"
     ]
    }
   ],
   "source": [
    "print('Loading data ...')\n",
    "\n",
    "data_root='./timit_11/'\n",
    "train = np.load(data_root + 'train_11.npy')\n",
    "train_label = np.load(data_root + 'train_label_11.npy')\n",
    "test = np.load(data_root + 'test_11.npy')\n",
    "\n",
    "print('Size of training data: {}'.format(train.shape))\n",
    "print('Size of testing data: {}'.format(test.shape))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "37e05455",
   "metadata": {},
   "source": [
    "## Create Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c0d88202",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Classifier(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Classifier, self).__init__()\n",
    "        self.layer1 = nn.Linear(429, 1024)\n",
    "        self.layer2 = nn.Linear(1024, 512)\n",
    "        self.layer3 = nn.Linear(512, 128)\n",
    "        self.out = nn.Linear(128, 39) \n",
    "\n",
    "        self.act_fn = nn.Sigmoid()\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.layer1(x)\n",
    "        x = self.act_fn(x)\n",
    "\n",
    "        x = self.layer2(x)\n",
    "        x = self.act_fn(x)\n",
    "\n",
    "        x = self.layer3(x)\n",
    "        x = self.act_fn(x)\n",
    "\n",
    "        x = self.out(x)\n",
    "        \n",
    "        return x"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "73465e59",
   "metadata": {},
   "source": [
    "## Training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "03aa7aea",
   "metadata": {},
   "outputs": [],
   "source": [
    "# start training\n",
    "def trainer(train_loader, valid_loader, config, device):\n",
    "    model = Classifier().to(device)\n",
    "    criterion = nn.CrossEntropyLoss() \n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=config['learning_rate'])\n",
    "\n",
    "    best_acc = 0.0\n",
    "    for epoch in range(config['num_epoch']):\n",
    "        train_acc = 0.0\n",
    "        train_loss = 0.0\n",
    "        val_acc = 0.0\n",
    "        val_loss = 0.0\n",
    "\n",
    "        # training\n",
    "        model.train() # set the model to training mode\n",
    "        for i, data in enumerate(train_loader):\n",
    "            inputs, labels = data\n",
    "            inputs, labels = inputs.to(device), labels.to(device)\n",
    "            optimizer.zero_grad() \n",
    "            outputs = model(inputs) \n",
    "            batch_loss = criterion(outputs, labels)\n",
    "            _, train_pred = torch.max(outputs, 1) # get the index of the class with the highest probability\n",
    "            batch_loss.backward() \n",
    "            optimizer.step() \n",
    "\n",
    "            train_acc += (train_pred.cpu() == labels.cpu()).sum().item()\n",
    "            train_loss += batch_loss.item()\n",
    "\n",
    "        # validation\n",
    "        model.eval() # set the model to evaluation mode\n",
    "        with torch.no_grad():\n",
    "            for i, data in enumerate(valid_loader):\n",
    "                inputs, labels = data\n",
    "                inputs, labels = inputs.to(device), labels.to(device)\n",
    "                outputs = model(inputs)\n",
    "                batch_loss = criterion(outputs, labels) \n",
    "                _, val_pred = torch.max(outputs, 1) \n",
    "            \n",
    "                val_acc += (val_pred.cpu() == labels.cpu()).sum().item() # get the index of the class with the highest probability\n",
    "                val_loss += batch_loss.item()\n",
    "\n",
    "            print('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f} | Val Acc: {:3.6f} loss: {:3.6f}'.format(\n",
    "                epoch + 1, config['num_epoch'], train_acc/config['train_set_len'], train_loss/len(train_loader), val_acc/config['val_set_len'], val_loss/len(valid_loader)\n",
    "            ))\n",
    "\n",
    "            # if the model improves, save a checkpoint at this epoch\n",
    "            if val_acc > best_acc:\n",
    "                best_acc = val_acc\n",
    "                torch.save(model.state_dict(), config['model_path'])\n",
    "                print('saving model with acc {:.3f}'.format(best_acc/config['val_set_len']))\n",
    "\n",
    "    # if not validating, save the last epoch\n",
    "    if config['val_set_len'] == 0:\n",
    "        torch.save(model.state_dict(), config['model_path'])\n",
    "        print('saving model at last epoch')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "b0e2f3f0",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "config = {\n",
    "    'val_ratio': 0.2, \n",
    "    'batch_size': 64, \n",
    "    'num_epoch': 20,\n",
    "    'learning_rate': 0.0001,\n",
    "    'model_path': './model.ckpt',\n",
    "    'seed': 5201314\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d84a2242",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DEVICE: cuda\n",
      "Size of training set: (983945, 429)\n",
      "Size of validation set: (245987, 429)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Dearest0\\AppData\\Local\\Temp\\ipykernel_4984\\43596008.py:5: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  y = y.astype(np.int)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[001/020] Train Acc: 0.466612 Loss: 1.813197 | Val Acc: 0.566920 loss: 1.436640\n",
      "saving model with acc 0.567\n",
      "[002/020] Train Acc: 0.593293 Loss: 1.335053 | Val Acc: 0.630684 loss: 1.209483\n",
      "saving model with acc 0.631\n",
      "[003/020] Train Acc: 0.644113 Loss: 1.155062 | Val Acc: 0.657710 loss: 1.104951\n",
      "saving model with acc 0.658\n",
      "[004/020] Train Acc: 0.672913 Loss: 1.051337 | Val Acc: 0.675889 loss: 1.039122\n",
      "saving model with acc 0.676\n",
      "[005/020] Train Acc: 0.691091 Loss: 0.983191 | Val Acc: 0.683833 loss: 1.006717\n",
      "saving model with acc 0.684\n",
      "[006/020] Train Acc: 0.705226 Loss: 0.932951 | Val Acc: 0.689134 loss: 0.984606\n",
      "saving model with acc 0.689\n",
      "[007/020] Train Acc: 0.716287 Loss: 0.893109 | Val Acc: 0.693346 loss: 0.966599\n",
      "saving model with acc 0.693\n",
      "[008/020] Train Acc: 0.725886 Loss: 0.859650 | Val Acc: 0.697561 loss: 0.953749\n",
      "saving model with acc 0.698\n",
      "[009/020] Train Acc: 0.733735 Loss: 0.830983 | Val Acc: 0.698447 loss: 0.952048\n",
      "saving model with acc 0.698\n",
      "[010/020] Train Acc: 0.741057 Loss: 0.805346 | Val Acc: 0.701159 loss: 0.941104\n",
      "saving model with acc 0.701\n",
      "[011/020] Train Acc: 0.747212 Loss: 0.782964 | Val Acc: 0.696988 loss: 0.954500\n",
      "[012/020] Train Acc: 0.753200 Loss: 0.761935 | Val Acc: 0.702444 loss: 0.938562\n",
      "saving model with acc 0.702\n",
      "[013/020] Train Acc: 0.759004 Loss: 0.742970 | Val Acc: 0.700736 loss: 0.944304\n",
      "[014/020] Train Acc: 0.764096 Loss: 0.725172 | Val Acc: 0.701899 loss: 0.938265\n",
      "[015/020] Train Acc: 0.768766 Loss: 0.708538 | Val Acc: 0.700289 loss: 0.948108\n",
      "[016/020] Train Acc: 0.773918 Loss: 0.692637 | Val Acc: 0.702444 loss: 0.941743\n",
      "[017/020] Train Acc: 0.778544 Loss: 0.678052 | Val Acc: 0.702765 loss: 0.945989\n",
      "saving model with acc 0.703\n",
      "[018/020] Train Acc: 0.782595 Loss: 0.663849 | Val Acc: 0.700622 loss: 0.954213\n",
      "[019/020] Train Acc: 0.786519 Loss: 0.650362 | Val Acc: 0.701025 loss: 0.958437\n",
      "[020/020] Train Acc: 0.790660 Loss: 0.637638 | Val Acc: 0.700143 loss: 0.967776\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# fix random seed for reproducibility\n",
    "same_seeds(config['seed'])\n",
    "\n",
    "print(f'DEVICE: {device}')\n",
    "\n",
    "train_x, train_y, val_x, val_y = train_valid_split(train, train_label, config['val_ratio'])\n",
    "print('Size of training set: {}'.format(train_x.shape))\n",
    "print('Size of validation set: {}'.format(val_x.shape))\n",
    "\n",
    "train_set = TIMITDataset(train_x, train_y)\n",
    "val_set = TIMITDataset(val_x, val_y)\n",
    "config['train_set_len'] = len(train_set)\n",
    "config['val_set_len'] = len(val_set)\n",
    "train_loader = DataLoader(train_set, batch_size=config['batch_size'], shuffle=True) #only shuffle the training data\n",
    "val_loader = DataLoader(val_set, batch_size=config['batch_size'], shuffle=False)\n",
    "\n",
    "del train, train_label, train_x, train_y, val_x, val_y\n",
    "gc.collect()\n",
    "\n",
    "trainer(train_loader, val_loader, config, device)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f27602d7",
   "metadata": {},
   "source": [
    "## Testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "4e58838d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<All keys matched successfully>"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# create testing dataset\n",
    "test_set = TIMITDataset(test, None)\n",
    "test_loader = DataLoader(test_set, batch_size=config['batch_size'], shuffle=False)\n",
    "\n",
    "# create model and load weights from checkpoint\n",
    "model = Classifier().to(device)\n",
    "model.load_state_dict(torch.load(config['model_path']))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3bd05ffc",
   "metadata": {},
   "source": [
    "## model prediction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "cc624cc0",
   "metadata": {},
   "outputs": [],
   "source": [
    "predict = []\n",
    "model.eval() # set the model to evaluation mode\n",
    "with torch.no_grad():\n",
    "    for i, data in enumerate(test_loader):\n",
    "        inputs = data\n",
    "        inputs = inputs.to(device)\n",
    "        outputs = model(inputs)\n",
    "        _, test_pred = torch.max(outputs, 1) # get the index of the class with the highest probability\n",
    "\n",
    "        for y in test_pred.cpu().numpy():\n",
    "            predict.append(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a3c4fe3e",
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('prediction.csv', 'w') as f:\n",
    "    f.write('Id,Class\\n')\n",
    "    for i, y in enumerate(predict):\n",
    "        f.write('{},{}\\n'.format(i, y))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
