{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "extend_to = None #<= CHANGE THIS. The original is 178 symbols\n",
    "\n",
    "#⚠️ Ensure the total number of symbols in meldataset.py matches your extend_to!"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load packages\n",
    "%cd ..\n",
    "import yaml\n",
    "import torch\n",
    "from torch import nn\n",
    "import os\n",
    "\n",
    "from models import *\n",
    "from utils import *\n",
    "\n",
    "def represent_list_flow(dumper, data):\n",
    "    return dumper.represent_sequence(\n",
    "        yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG,\n",
    "        data,\n",
    "        flow_style=True\n",
    "    )\n",
    "yaml.SafeDumper.add_representer(list, represent_list_flow)\n",
    "\n",
    "try:\n",
    "    config = yaml.safe_load(open(\"./Configs/config.yaml\"))\n",
    "    model_params = recursive_munch(config['model_params'])\n",
    "    model = build_model(model_params)\n",
    "except Exception as e:\n",
    "    print(e)\n",
    "device = 'cpu'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "keys_to_keep = {'predictor', 'decoder', 'text_encoder', 'style_encoder', 'text_aligner', 'pitch_extractor', 'mpd', 'msd'}\n",
    "params_whole = torch.load(\"Models/Finetune/base_model.pth\", map_location='cpu')\n",
    "params = params_whole['net']\n",
    "params = {key: value for key, value in params.items() if key in keys_to_keep}\n",
    "\n",
    "for key in list(model.keys()):\n",
    "    if key not in keys_to_keep:\n",
    "        del model[key]\n",
    "\n",
    "for key in model:\n",
    "    if key in params:\n",
    "        print('%s loaded' % key)\n",
    "        try:\n",
    "            model[key].load_state_dict(params[key])\n",
    "        except:\n",
    "            from collections import OrderedDict\n",
    "            state_dict = params[key]\n",
    "            new_state_dict = OrderedDict()\n",
    "            for k, v in state_dict.items():\n",
    "                name = k[7:] # remove `module.`\n",
    "                new_state_dict[name] = v\n",
    "            # load params\n",
    "            model[key].load_state_dict(new_state_dict, strict=False)\n",
    "\n",
    "old_weight = [\n",
    "    model['text_encoder'].embedding,\n",
    "    model['text_aligner'].ctc_linear[2].linear_layer,\n",
    "    model['text_aligner'].asr_s2s.embedding,\n",
    "    model['text_aligner'].asr_s2s.project_to_n_symbols\n",
    "]\n",
    "print(\"\\nOld shape:\") \n",
    "for module in old_weight:\n",
    "    print(module, module.weight.shape)\n",
    "\n",
    "for i in range(len(old_weight)):\n",
    "    new_shape = (extend_to, old_weight[i].weight.shape[1])\n",
    "    new_weight = torch.randn(new_shape) * 0.01 #init mean=0, std=0.01\n",
    "    with torch.no_grad():\n",
    "        new_weight[:old_weight[i].weight.size(0), :] = old_weight[i].weight.detach().clone()\n",
    "    new_param = nn.Parameter(new_weight, requires_grad=True)\n",
    "\n",
    "    if isinstance(old_weight[i], nn.Embedding):\n",
    "        old_weight[i].num_embeddings = extend_to\n",
    "        \n",
    "    if isinstance(old_weight[i], nn.Linear):\n",
    "        old_weight[i].out_features = extend_to\n",
    "        #update bias\n",
    "        old_bias = old_weight[i].bias.detach()\n",
    "        old_dim = old_bias.shape[0]\n",
    "        new_bias = torch.zeros(extend_to)\n",
    "        new_bias[:old_dim] = old_bias.clone()\n",
    "        old_weight[i].bias.data = new_bias\n",
    "\n",
    "    old_weight[i].weight = new_param\n",
    "\n",
    "print(\"\\nNew shape:\")\n",
    "for module in old_weight:\n",
    "    print(module, module.weight.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_path = \"./Extend/New_Weights\"\n",
    "\n",
    "if not os.path.exists(save_path):\n",
    "    os.mkdir(save_path)\n",
    "\n",
    "#save new weights\n",
    "state = {\n",
    "    'net':  {key: model[key].state_dict() for key in model}, \n",
    "    'optimizer': params_whole['optimizer'],\n",
    "    'iters': params_whole['iters'],\n",
    "    'val_loss': params_whole['val_loss'],\n",
    "    'epoch': params_whole['epoch'],\n",
    "}\n",
    "torch.save(state, os.path.join(save_path, 'extended.pth'))\n",
    "\n",
    "#save new config\n",
    "config['model_params']['ASR_params']['n_token'] = extend_to\n",
    "config['model_params']['n_token'] = extend_to\n",
    "with open(os.path.join(save_path, 'config.yaml'), 'w') as outfile:\n",
    "    yaml.safe_dump(config, outfile, \n",
    "                    default_flow_style=False,\n",
    "                    sort_keys=False,\n",
    "                    indent=4)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
