{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fd820b47-fc68-44be-b9fa-e1b63b4c8789",
   "metadata": {},
   "outputs": [],
   "source": [
    "!python vits_infer.py --config ./configs/bert_vits.json --model logs/bert_vits/G_700000.pth\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "73073301-a6ae-4e14-b90f-3ef7b08bde3c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import sys\n",
    "import numpy as np\n",
    "\n",
    "import torch\n",
    "import utils\n",
    "import argparse\n",
    "\n",
    "from scipy.io import wavfile\n",
    "from text.symbols import symbols\n",
    "from text import cleaned_text_to_sequence\n",
    "from vits_pinyin import VITS_PinYin"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "a14db732-49ed-4208-aeb2-f1609f965d0f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-06-13 19:50:40,606 WETEXT INFO found existing fst: /root/miniconda3/lib/python3.8/site-packages/tn/zh_tn_tagger.fst\n",
      "2024-06-13 19:50:40,606 WETEXT INFO found existing fst: /root/miniconda3/lib/python3.8/site-packages/tn/zh_tn_tagger.fst\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:wetext-zh_normalizer:found existing fst: /root/miniconda3/lib/python3.8/site-packages/tn/zh_tn_tagger.fst\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-06-13 19:50:40,608 WETEXT INFO                     /root/miniconda3/lib/python3.8/site-packages/tn/zh_tn_verbalizer.fst\n",
      "2024-06-13 19:50:40,608 WETEXT INFO                     /root/miniconda3/lib/python3.8/site-packages/tn/zh_tn_verbalizer.fst\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:wetext-zh_normalizer:                    /root/miniconda3/lib/python3.8/site-packages/tn/zh_tn_verbalizer.fst\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-06-13 19:50:40,609 WETEXT INFO skip building fst for zh_normalizer ...\n",
      "2024-06-13 19:50:40,609 WETEXT INFO skip building fst for zh_normalizer ...\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:wetext-zh_normalizer:skip building fst for zh_normalizer ...\n"
     ]
    }
   ],
   "source": [
    "config_path = './configs/bert_vits.json'\n",
    "model_path = './logs/bert_vits/G_20000.pth'\n",
    "\n",
    "def save_wav(wav, path, rate):\n",
    "    wav *= 32767 / max(0.01, np.max(np.abs(wav))) * 0.6\n",
    "    wavfile.write(path, rate, wav.astype(np.int16))\n",
    "\n",
    "# 设备配置\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "# 拼音\n",
    "tts_front = VITS_PinYin(\"./bert\", device)\n",
    "\n",
    "# 加载配置\n",
    "hps = utils.get_hparams_from_file(config_path)\n",
    "\n",
    "# 加载模型\n",
    "net_g = utils.load_class(hps.train.eval_class)(\n",
    "    len(symbols),\n",
    "    hps.data.filter_length // 2 + 1,\n",
    "    hps.train.segment_size // hps.data.hop_length,\n",
    "    **hps.model)\n",
    "\n",
    "utils.load_model(model_path, net_g)\n",
    "net_g.eval()\n",
    "net_g.to(device)\n",
    "\n",
    "os.makedirs(\"./vits_infer_out/\", exist_ok=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "d39791b1-29e0-4a49-a6a1-59c85a3a48ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "item = \"你好\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "34e57f88-8029-434a-b088-b12408e98a88",
   "metadata": {},
   "outputs": [],
   "source": [
    "phonemes, char_embeds = tts_front.chinese_to_phonemes(item)\n",
    "input_ids = cleaned_text_to_sequence(phonemes)\n",
    "with torch.no_grad():\n",
    "    x_tst = torch.LongTensor(input_ids).unsqueeze(0).to(device)\n",
    "    x_tst_lengths = torch.LongTensor([len(input_ids)]).to(device)\n",
    "    x_tst_prosody = torch.FloatTensor(char_embeds).unsqueeze(0).to(device)\n",
    "    audio = net_g.infer(x_tst, x_tst_lengths, x_tst_prosody, noise_scale=0.5,\n",
    "                        length_scale=1)[0][0, 0].data.cpu().float().numpy()\n",
    "save_wav(audio, f\"./vits_infer_out/bert_vits.wav\", hps.data.sampling_rate)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83738d66-b9be-4215-84b8-941dfdd66ae1",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
