{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "9EqJBfYhSvIt"
      },
      "outputs": [],
      "source": [
        "!pip install -q pysrt\n",
        "!pip install -q pysubs2"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KSY40g3JSZDp"
      },
      "source": [
        "### 1.根据字幕切割出分段音频"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "43RQDWgNSlFz"
      },
      "outputs": [],
      "source": [
        "import os\n",
        "import re\n",
        "import subprocess\n",
        "from collections import Counter\n",
        "\n",
        "import chardet\n",
        "import pysrt\n",
        "import pysubs2\n",
        "from tqdm import tqdm"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zoXRJDUkTQJA"
      },
      "source": [
        "#### 常用函数\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "huizrd80TENm"
      },
      "outputs": [],
      "source": [
        "def get_subdir(directory):\n",
        "    subdirectories = []\n",
        "    for dirpath, dirnames, files in os.walk(directory):\n",
        "        for dirname in dirnames:\n",
        "            subdirectories.append(os.path.join(dirpath, dirname))\n",
        "    subdirectories.sort()\n",
        "    return subdirectories\n",
        "\n",
        "def get_filename(directory,format=None):\n",
        "    file_list = []\n",
        "    for root, dirs, files in os.walk(directory):\n",
        "        for file in files:\n",
        "            if format:\n",
        "                if file.endswith(format):\n",
        "                    file_path = os.path.join(root, file)\n",
        "                    file_list.append([file,file_path])\n",
        "            else:\n",
        "                file_path = os.path.join(root, file)\n",
        "                file_list.append([file, file_path])\n",
        "    file_list.sort()\n",
        "    return file_list\n",
        "\n",
        "\n",
        "#获取一级子目录\n",
        "def get_first_subdir(directory):\n",
        "    subdirectories = []\n",
        "    for name in os.listdir(directory):\n",
        "        if os.path.isdir(os.path.join(directory, name)):\n",
        "            subdirectories.append(os.path.join(directory, name))\n",
        "    subdirectories.sort()\n",
        "    return subdirectories"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "XULhgXcLTH6H"
      },
      "outputs": [],
      "source": [
        "def detect_encoding(file_name):\n",
        "    with open(file_name, 'rb') as file:\n",
        "        result = chardet.detect(file.read())\n",
        "    return result['encoding']\n",
        "\n",
        "def most_common_element(lst,num=1):\n",
        "    counter = Counter(lst)\n",
        "    most = counter.most_common(num)\n",
        "    return most\n",
        "\n",
        "\n",
        "def make_filename_safe(filename):\n",
        "    # 将非法字符替换为下划线\n",
        "    filename = re.sub(r'[\\\\/:*?\"<>|]', '_', filename)\n",
        "    # 去除多余的空格\n",
        "    filename = re.sub(r'\\s+', ' ', filename)\n",
        "    # 去除开头和结尾的空格\n",
        "    filename = filename.strip()\n",
        "    return filename\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "vimRKLB1TXXp"
      },
      "source": [
        "#### VideoSegmentation"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "OePQQ39ARtdv"
      },
      "outputs": [],
      "source": [
        "\n",
        "\n",
        "class VideoSegmentation:\n",
        "    def __init__(self, video_lis_pth,audio_out_dir,subtitle_dir):\n",
        "        self.video_lis_pth = video_lis_pth\n",
        "        self.audio_out_dir = audio_out_dir\n",
        "        self.subtitle_dir = subtitle_dir\n",
        "\n",
        "\n",
        "    def process(self):\n",
        "        video_lis = get_filename(self.video_lis_pth)\n",
        "\n",
        "        style = ''\n",
        "        sub_format = ''\n",
        "        voice_dir = 'voice'\n",
        "        for file, pth in tqdm(video_lis[:], desc='Processing Videos'):\n",
        "\n",
        "            filename, format = os.path.splitext(file)\n",
        "            # 创建对应的音频文件夹\n",
        "            os.makedirs(f'{self.audio_out_dir}/{filename}', exist_ok=True)\n",
        "            os.makedirs(f'{self.audio_out_dir}/{filename}/{voice_dir}', exist_ok=True)\n",
        "\n",
        "            if self.subtitle_dir:\n",
        "                if not sub_format:\n",
        "                    # 选择一个字幕文件 获取字幕文件的格式和编码\n",
        "                    one_subtitle_file = os.path.join(self.subtitle_dir,os.listdir(self.subtitle_dir)[0])\n",
        "                    sub_file,sub_format = os.path.splitext(one_subtitle_file)\n",
        "                    encoding = detect_encoding(one_subtitle_file)\n",
        "\n",
        "                # 获取当前视频对应的字幕文件\n",
        "                cur_sub_file = f'{self.subtitle_dir}/{filename}{sub_format}'\n",
        "                # 获取对应字幕\n",
        "                if sub_format == '.srt':\n",
        "\n",
        "                    srt_file = pysrt.open(cur_sub_file, encoding=encoding)\n",
        "                    for index, subtitle in enumerate(srt_file[:]):\n",
        "                        # 获取开始和结束时间\n",
        "\n",
        "                        start_time = subtitle.start\n",
        "                        end_time = subtitle.end\n",
        "\n",
        "                        start_time = start_time.to_time()\n",
        "                        end_time = end_time.to_time()\n",
        "                        # print(f'开始时间：{start_time}，结束时间：{end_time}')\n",
        "\n",
        "                        # 使用FFmpeg切割视频 改成mp3就无法输出\n",
        "                        audio_output = f'{self.audio_out_dir}/{filename}/{voice_dir}/{index}_{make_filename_safe(subtitle.text)}.wav'\n",
        "\n",
        "                        command = ['ffmpeg', '-ss', str(start_time), '-to', str(end_time), '-i', f'{pth}', \"-vn\",  '-c:a', 'pcm_s16le',\n",
        "                                         audio_output,  '-loglevel', 'quiet']\n",
        "\n",
        "                        subprocess.run(command)\n",
        "                elif sub_format == '.ass':\n",
        "                    subs = pysubs2.load(cur_sub_file, encoding=encoding)\n",
        "                    if not style:\n",
        "                        style_lis = [sub.style for sub in subs]\n",
        "                        most_1 = most_common_element(style_lis)\n",
        "                        style = most_1[0][0]\n",
        "\n",
        "                    new_subs = [sub for sub in subs if sub.style == style]\n",
        "                    for index, subtitle in enumerate(new_subs[:]):\n",
        "                        # 获取开始和结束时间\n",
        "                        if subtitle.style == style:\n",
        "                            start_time = subtitle.start\n",
        "                            end_time = subtitle.end\n",
        "\n",
        "                            start_time = start_time / 1000\n",
        "                            end_time = end_time / 1000\n",
        "\n",
        "\n",
        "                            # 使用FFmpeg切割视频 改成mp3就无法输出\n",
        "                            # audio_output = f'{self.audio_out_dir}/{filename}/{index}_{make_filename_safe(subtitle.text)}_ass.wav'\n",
        "                            audio_output = f'{self.audio_out_dir}/{filename}/{voice_dir}/{index}_{make_filename_safe(subtitle.text)}.wav'\n",
        "\n",
        "                            command = ['ffmpeg', '-ss', str(start_time), '-to', str(end_time), '-i', f'{pth}', \"-vn\",  '-c:a', 'pcm_s16le',\n",
        "                                             audio_output,  '-loglevel', 'quiet']\n",
        "\n",
        "                            subprocess.run(command)\n",
        "\n",
        "        exit()\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zN29Mm7oT54t"
      },
      "source": [
        "#### 自定义config参数"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "n77bOc_hT9md"
      },
      "outputs": [],
      "source": [
        "# 自定义修改下面字典的value值  标有**的文件夹，需要有对应文件。\n",
        "video_config = {\"video_lis_pth\": \"/mnt/sda/baidu_disk/凉宫春日/lg_video/video\",  # **视频所在文件夹 **需要准备视\n",
        "                }\n",
        "audio_config = {\n",
        "    \"audio_model_pth\": '/mnt/sda/huggingface_weight/voicemodel/',\n",
        "    # **模型权重路径 需要下载模型→ git clone https://huggingface.co/scixing/voicemodel\n",
        "    \"audio_roles_dir\": '/mnt/sda/baidu_disk/lg/scixing/roles',  # **分类好的角色音频路径 需要手动分类\n",
        "    \"audio_out_dir\": \"/mnt/sda/baidu_disk/凉宫春日/lg_video/audio\",  # 视频切割输出的音频路径\n",
        "}\n",
        "\n",
        "srt_config = {\n",
        "    \"subtitle_dir\": \"/mnt/sda/baidu_disk/lg/zim/Subtitle_SC_SRT\",  # **视频对应字幕，视频和字幕名称需要一致 需要准备 ,\n",
        "    \"srt_out_dir\": \"./srt_predict_out\",\n",
        "    # 1.预测的角色类型输出路径  2.预测输出文本之后→进行人工核之后，文件重命名，增加'annotate',举例 a.txt →a.annotate.txt,可以进行增量训练\n",
        "\n",
        "}\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9PgLSO5OTlib"
      },
      "source": [
        "#### 运行1 音频提取分割"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "_LQVJAZTSWsh"
      },
      "outputs": [],
      "source": [
        "# video_segmentor = VideoSegmentation(video_config['video_lis_pth'],\n",
        "#                                         audio_config['audio_out_dir'],\n",
        "#                                         srt_config['subtitle_dir'])\n",
        "# video_segmentor.process()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "oIWUNrcukmGw"
      },
      "outputs": [],
      "source": [
        "# !git clone https://huggingface.co/scixing/voicemodel"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "45LvR5uOnt1w"
      },
      "source": [
        "### 2.音频特征提取\n",
        "Audio Feature Extraction  "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "I_I36FzElbVv"
      },
      "source": []
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "JT45f9PCiA-g"
      },
      "outputs": [],
      "source": [
        "\n",
        "import os\n",
        "import numpy as np\n",
        "import torch\n",
        "import pickle\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "from torch.nn import Parameter\n",
        "import random\n",
        "import sys\n",
        "from datetime import datetime\n",
        "import librosa\n",
        "from torch.utils import data\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "TTwWayPi2qMV"
      },
      "source": [
        "#### Audio模型定义辅助函数"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "uTiSNi9UwIK2"
      },
      "outputs": [],
      "source": [
        "def SE_Res2Block(channels, kernel_size, stride, padding, dilation, scale):\n",
        "    return nn.Sequential(\n",
        "        Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),\n",
        "        Res2Conv1dReluBn(channels, kernel_size, stride, padding, dilation, scale=scale),\n",
        "        Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),\n",
        "        SE_Connect(channels)\n",
        "    )\n",
        "\n",
        "\n",
        "def load_audio(audio_path,\n",
        "               feature_method='melspectrogram',\n",
        "               mode='train',\n",
        "               sr=16000,\n",
        "               chunk_duration=3,\n",
        "               min_duration=0.5,\n",
        "               augmentors=None):\n",
        "    \"\"\"\n",
        "    加载并预处理音频\n",
        "    :param audio_path: 音频路径\n",
        "    :param feature_method: 预处理方法\n",
        "    :param mode: 对数据处理的方式，包括train，eval，infer\n",
        "    :param sr: 采样率\n",
        "    :param chunk_duration: 训练或者评估使用的音频长度\n",
        "    :param min_duration: 最小训练或者评估的音频长度\n",
        "    :param augmentors: 数据增强方法\n",
        "    :return:\n",
        "    \"\"\"\n",
        "    # 读取音频数据\n",
        "    wav, sr_ret = librosa.load(audio_path, sr=sr)\n",
        "    num_wav_samples = wav.shape[0]\n",
        "    # 数据太短不利于训练\n",
        "    if mode == 'train':\n",
        "        if num_wav_samples < int(min_duration * sr):\n",
        "            raise Exception(f'音频长度小于{min_duration}s，实际长度为：{(num_wav_samples / sr):.2f}s')\n",
        "    # 对小于训练长度的复制补充\n",
        "    num_chunk_samples = int(chunk_duration * sr)\n",
        "    if num_wav_samples <= num_chunk_samples:\n",
        "        shortage = num_chunk_samples - num_wav_samples\n",
        "        wav = np.pad(wav, (0, shortage), 'wrap')\n",
        "    # 裁剪需要的数据\n",
        "    if mode == 'train':\n",
        "        # 随机裁剪\n",
        "        num_wav_samples = wav.shape[0]\n",
        "        num_chunk_samples = int(chunk_duration * sr)\n",
        "        if num_wav_samples > num_chunk_samples + 1:\n",
        "            start = random.randint(0, num_wav_samples - num_chunk_samples - 1)\n",
        "            stop = start + num_chunk_samples\n",
        "            wav = wav[start:stop]\n",
        "            # 对每次都满长度的再次裁剪\n",
        "            if random.random() > 0.5:\n",
        "                wav[:random.randint(1, sr // 4)] = 0\n",
        "                wav = wav[:-random.randint(1, sr // 4)]\n",
        "        # 数据增强\n",
        "        if augmentors is not None:\n",
        "            for key, augmentor in augmentors.items():\n",
        "                if key == 'specaug': continue\n",
        "                wav = augmentor(wav)\n",
        "    elif mode == 'eval':\n",
        "        # 为避免显存溢出，只裁剪指定长度\n",
        "        num_wav_samples = wav.shape[0]\n",
        "        num_chunk_samples = int(chunk_duration * sr)\n",
        "        if num_wav_samples > num_chunk_samples + 1:\n",
        "            wav = wav[:num_chunk_samples]\n",
        "    # 获取音频特征\n",
        "    if feature_method == 'melspectrogram':\n",
        "        # 计算梅尔频谱\n",
        "        features = librosa.feature.melspectrogram(y=wav, sr=sr, n_fft=400, n_mels=80, hop_length=160, win_length=400)\n",
        "    elif feature_method == 'spectrogram':\n",
        "        # 计算声谱图\n",
        "        linear = librosa.stft(wav, n_fft=400, win_length=400, hop_length=160)\n",
        "        features, _ = librosa.magphase(linear)\n",
        "    else:\n",
        "        raise Exception(f'预处理方法 {feature_method} 不存在！')\n",
        "    features = librosa.power_to_db(features, ref=1.0, amin=1e-10, top_db=None)\n",
        "    # 数据增强\n",
        "    if mode == 'train' and augmentors is not None:\n",
        "        for key, augmentor in augmentors.items():\n",
        "            if key == 'specaug':\n",
        "                features = augmentor(features)\n",
        "    # 归一化\n",
        "    mean = np.mean(features, 0, keepdims=True)\n",
        "    std = np.std(features, 0, keepdims=True)\n",
        "    features = (features - mean) / (std + 1e-5)\n",
        "    return features\n",
        "\n",
        "\n",
        "class Res2Conv1dReluBn(nn.Module):\n",
        "    def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False, scale=4):\n",
        "        super().__init__()\n",
        "        assert channels % scale == 0, \"{} % {} != 0\".format(channels, scale)\n",
        "        self.scale = scale\n",
        "        self.width = channels // scale\n",
        "        self.nums = scale if scale == 1 else scale - 1\n",
        "\n",
        "        self.convs = []\n",
        "        self.bns = []\n",
        "        for i in range(self.nums):\n",
        "            self.convs.append(nn.Conv1d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias))\n",
        "            self.bns.append(nn.BatchNorm1d(self.width))\n",
        "        self.convs = nn.ModuleList(self.convs)\n",
        "        self.bns = nn.ModuleList(self.bns)\n",
        "\n",
        "    def forward(self, x):\n",
        "        out = []\n",
        "        spx = torch.split(x, self.width, 1)\n",
        "        for i in range(self.nums):\n",
        "            if i == 0:\n",
        "                sp = spx[i]\n",
        "            else:\n",
        "                sp = sp + spx[i]\n",
        "            # Order: conv -> relu -> bn\n",
        "            sp = self.convs[i](sp)\n",
        "            sp = self.bns[i](F.relu(sp))\n",
        "            out.append(sp)\n",
        "        if self.scale != 1:\n",
        "            out.append(spx[self.nums])\n",
        "        out = torch.cat(out, dim=1)\n",
        "        return out\n",
        "\n",
        "\n",
        "class Conv1dReluBn(nn.Module):\n",
        "    def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):\n",
        "        super().__init__()\n",
        "        self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)\n",
        "        self.bn = nn.BatchNorm1d(out_channels)\n",
        "\n",
        "    def forward(self, x):\n",
        "        return self.bn(F.relu(self.conv(x)))\n",
        "\n",
        "\n",
        "class SE_Connect(nn.Module):\n",
        "    def __init__(self, channels, s=2):\n",
        "        super().__init__()\n",
        "        assert channels % s == 0, \"{} % {} != 0\".format(channels, s)\n",
        "        self.linear1 = nn.Linear(channels, channels // s)\n",
        "        self.linear2 = nn.Linear(channels // s, channels)\n",
        "\n",
        "    def forward(self, x):\n",
        "        out = x.mean(dim=2)\n",
        "        out = F.relu(self.linear1(out))\n",
        "        out = torch.sigmoid(self.linear2(out))\n",
        "        out = x * out.unsqueeze(2)\n",
        "        return out\n",
        "\n",
        "\n",
        "class AttentiveStatsPool(nn.Module):\n",
        "    def __init__(self, in_dim, bottleneck_dim):\n",
        "        super().__init__()\n",
        "        # Use Conv1d with stride == 1 rather than Linear, then we don't need to transpose inputs.\n",
        "        self.linear1 = nn.Conv1d(in_dim, bottleneck_dim, kernel_size=1)  # equals W and b in the paper\n",
        "        self.linear2 = nn.Conv1d(bottleneck_dim, in_dim, kernel_size=1)  # equals V and k in the paper\n",
        "\n",
        "    def forward(self, x):\n",
        "        # DON'T use ReLU here! In experiments, I find ReLU hard to converge.\n",
        "        alpha = torch.tanh(self.linear1(x))\n",
        "        alpha = torch.softmax(self.linear2(alpha), dim=2)\n",
        "        mean = torch.sum(alpha * x, dim=2)\n",
        "        residuals = torch.sum(alpha * x ** 2, dim=2) - mean ** 2\n",
        "        std = torch.sqrt(residuals.clamp(min=1e-9))\n",
        "        return torch.cat([mean, std], dim=1)\n",
        "\n",
        "\n",
        "class EcapaTdnn(nn.Module):\n",
        "    def __init__(self, input_size=80, channels=512, embd_dim=192):\n",
        "        super().__init__()\n",
        "        self.layer1 = Conv1dReluBn(input_size, channels, kernel_size=5, padding=2, dilation=1)\n",
        "        self.layer2 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=2, dilation=2, scale=8)\n",
        "        self.layer3 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=3, dilation=3, scale=8)\n",
        "        self.layer4 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=4, dilation=4, scale=8)\n",
        "\n",
        "        cat_channels = channels * 3\n",
        "        out_channels = cat_channels * 2\n",
        "        self.emb_size = embd_dim\n",
        "        self.conv = nn.Conv1d(cat_channels, cat_channels, kernel_size=1)\n",
        "        self.pooling = AttentiveStatsPool(cat_channels, 128)\n",
        "        self.bn1 = nn.BatchNorm1d(out_channels)\n",
        "        self.linear = nn.Linear(out_channels, embd_dim)\n",
        "        self.bn2 = nn.BatchNorm1d(embd_dim)\n",
        "\n",
        "    def forward(self, x):\n",
        "        out1 = self.layer1(x)\n",
        "        out2 = self.layer2(out1) + out1\n",
        "        out3 = self.layer3(out1 + out2) + out1 + out2\n",
        "        out4 = self.layer4(out1 + out2 + out3) + out1 + out2 + out3\n",
        "\n",
        "        out = torch.cat([out2, out3, out4], dim=1)\n",
        "        out = F.relu(self.conv(out))\n",
        "        out = self.bn1(self.pooling(out))\n",
        "        out = self.bn2(self.linear(out))\n",
        "        return out\n",
        "\n",
        "\n",
        "class SpeakerIdetification(nn.Module):\n",
        "    def __init__(\n",
        "            self,\n",
        "            backbone,\n",
        "            num_class=1,\n",
        "            lin_blocks=0,\n",
        "            lin_neurons=192,\n",
        "            dropout=0.1, ):\n",
        "        \"\"\"The speaker identification model, which includes the speaker backbone network\n",
        "           and the a linear transform to speaker class num in training\n",
        "\n",
        "        Args:\n",
        "            backbone (Paddle.nn.Layer class): the speaker identification backbone network model\n",
        "            num_class (_type_): the speaker class num in the training dataset\n",
        "            lin_blocks (int, optional): the linear layer transform between the embedding and the final linear layer. Defaults to 0.\n",
        "            lin_neurons (int, optional): the output dimension of final linear layer. Defaults to 192.\n",
        "            dropout (float, optional): the dropout factor on the embedding. Defaults to 0.1.\n",
        "        \"\"\"\n",
        "        super(SpeakerIdetification, self).__init__()\n",
        "        # speaker idenfication backbone network model\n",
        "        # the output of the backbond network is the target embedding\n",
        "        self.backbone = backbone\n",
        "        if dropout > 0:\n",
        "            self.dropout = nn.Dropout(dropout)\n",
        "        else:\n",
        "            self.dropout = None\n",
        "\n",
        "        # construct the speaker classifer\n",
        "        input_size = self.backbone.emb_size\n",
        "        self.blocks = list()\n",
        "        for i in range(lin_blocks):\n",
        "            self.blocks.extend([\n",
        "                nn.BatchNorm1d(input_size),\n",
        "                nn.Linear(in_features=input_size, out_features=lin_neurons),\n",
        "            ])\n",
        "            input_size = lin_neurons\n",
        "\n",
        "        # the final layer\n",
        "        self.weight = Parameter(torch.FloatTensor(num_class, input_size), requires_grad=True)\n",
        "        nn.init.xavier_normal_(self.weight, gain=1)\n",
        "\n",
        "    def forward(self, x):\n",
        "        \"\"\"Do the speaker identification model forwrd,\n",
        "           including the speaker embedding model and the classifier model network\n",
        "\n",
        "        Args:\n",
        "            x (paddle.Tensor): input audio feats,\n",
        "                               shape=[batch, dimension, times]\n",
        "            lengths (paddle.Tensor, optional): input audio length.\n",
        "                                        shape=[batch, times]\n",
        "                                        Defaults to None.\n",
        "\n",
        "        Returns:\n",
        "            paddle.Tensor: return the logits of the feats\n",
        "        \"\"\"\n",
        "        # x.shape: (N, C, L)\n",
        "        x = self.backbone(x)  # (N, emb_size)\n",
        "        if self.dropout is not None:\n",
        "            x = self.dropout(x)\n",
        "\n",
        "        for fc in self.blocks:\n",
        "            x = fc(x)\n",
        "\n",
        "        logits = F.linear(F.normalize(x), F.normalize(self.weight, dim=-1))\n",
        "\n",
        "        return logits\n",
        "\n",
        "\n",
        "# 数据加载器\n",
        "class CustomDataset(data.Dataset):\n",
        "    \"\"\"\n",
        "    加载并预处理音频\n",
        "    :param data_list_path: 数据列表\n",
        "    :param feature_method: 预处理方法\n",
        "    :param mode: 对数据处理的方式，包括train，eval，infer\n",
        "    :param sr: 采样率\n",
        "    :param chunk_duration: 训练或者评估使用的音频长度\n",
        "    :param min_duration: 最小训练或者评估的音频长度\n",
        "    :param augmentors: 数据增强方法\n",
        "    :return:\n",
        "    \"\"\"\n",
        "\n",
        "    def __init__(self, data_list_path,\n",
        "                 feature_method='melspectrogram',\n",
        "                 mode='train',\n",
        "                 sr=16000,\n",
        "                 chunk_duration=3,\n",
        "                 min_duration=0.5,\n",
        "                 augmentors=None):\n",
        "        super(CustomDataset, self).__init__()\n",
        "        # 当预测时不需要获取数据\n",
        "        if data_list_path is not None:\n",
        "            with open(data_list_path, 'r') as f:\n",
        "                self.lines = f.readlines()\n",
        "        self.feature_method = feature_method\n",
        "        self.mode = mode\n",
        "        self.sr = sr\n",
        "        self.chunk_duration = chunk_duration\n",
        "        self.min_duration = min_duration\n",
        "        self.augmentors = augmentors\n",
        "\n",
        "    def __getitem__(self, idx):\n",
        "        try:\n",
        "            audio_path, label = self.lines[idx].replace('\\n', '').split('\\t')\n",
        "            # 加载并预处理音频\n",
        "            features = load_audio(audio_path, feature_method=self.feature_method, mode=self.mode, sr=self.sr,\n",
        "                                  chunk_duration=self.chunk_duration, min_duration=self.min_duration,\n",
        "                                  augmentors=self.augmentors)\n",
        "            return features, np.array(int(label), dtype=np.int64)\n",
        "        except Exception as ex:\n",
        "            print(f\"[{datetime.now()}] 数据: {self.lines[idx]} 出错，错误信息: {ex}\", file=sys.stderr)\n",
        "            rnd_idx = np.random.randint(self.__len__())\n",
        "            return self.__getitem__(rnd_idx)\n",
        "\n",
        "    def __len__(self):\n",
        "        return len(self.lines)\n",
        "\n",
        "    @property\n",
        "    def input_size(self):\n",
        "        if self.feature_method == 'melspectrogram':\n",
        "            return 80\n",
        "        elif self.feature_method == 'spectrogram':\n",
        "            return 201\n",
        "        else:\n",
        "            raise Exception(f'预处理方法 {self.feature_method} 不存在！')\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JLiuN00s2lJr"
      },
      "source": [
        "#### AudioFeatureExtraction"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "pgwnzX6p1Z8m"
      },
      "outputs": [],
      "source": [
        "class AudioFeatureExtraction:\n",
        "    def __init__(self, model_local_pth, audio_duration=3, feature_method='melspectrogram', ):\n",
        "        self.use_model = ''\n",
        "        self.audio_duration = audio_duration\n",
        "        self.feature_method = feature_method\n",
        "        self.resume = model_local_pth\n",
        "        self.model = None\n",
        "        self.device = None\n",
        "        self.load_model()\n",
        "\n",
        "    def load_model(self):\n",
        "        dataset = CustomDataset(data_list_path=None, feature_method=self.feature_method)\n",
        "        ecapa_tdnn = EcapaTdnn(input_size=dataset.input_size)\n",
        "        self.model = SpeakerIdetification(backbone=ecapa_tdnn)\n",
        "        self.device = torch.device(\"cuda\")\n",
        "        self.model.to(self.device)\n",
        "\n",
        "        # 加载模型\n",
        "        model_path = os.path.join(self.resume, self.use_model, 'model.pth')\n",
        "        model_dict = self.model.state_dict()\n",
        "        param_state_dict = torch.load(model_path)\n",
        "        for name, weight in model_dict.items():\n",
        "            if name in param_state_dict.keys():\n",
        "                if list(weight.shape) != list(param_state_dict[name].shape):\n",
        "                    param_state_dict.pop(name, None)\n",
        "        self.model.load_state_dict(param_state_dict, strict=False)\n",
        "        print(f\"成功加载模型参数和优化方法参数：{model_path}\")\n",
        "        self.model.eval()\n",
        "\n",
        "    def infer(self, audio_path):\n",
        "        data = load_audio(audio_path, mode='infer', feature_method=self.feature_method,\n",
        "                          chunk_duration=self.audio_duration)\n",
        "        data = data[np.newaxis, :]\n",
        "        data = torch.tensor(data, dtype=torch.float32, device=self.device)\n",
        "        feature = self.model.backbone(data)\n",
        "        return feature.data.cpu().numpy()\n",
        "\n",
        "    def extract_features(self, root_dir):\n",
        "        sub_dirs = get_subdir(root_dir)\n",
        "\n",
        "        for dir in sub_dirs:\n",
        "            voice_files = get_filename(os.path.join(dir, 'voice'))\n",
        "            for file, pth in voice_files:\n",
        "                new_dir = os.path.join(dir, 'feature')\n",
        "                os.makedirs(new_dir, exist_ok=True)\n",
        "                feature = self.infer(pth)[0]\n",
        "                with open(f\"{new_dir}/{file}.pkl\", \"wb\") as f:\n",
        "                    pickle.dump(feature, f)\n",
        "        print('音频特征提取完成')"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sr4PIZVA2yr7"
      },
      "source": [
        "#### 运行2 音频embedding生成"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "dEtTsba823EL"
      },
      "outputs": [],
      "source": [
        "# 模型参数在第一部分 自定义config参数\n",
        "# audio_feature_extractor = AudioFeatureExtraction(audio_config['audio_model_pth'])\n",
        "# audio_feature_extractor.extract_features(audio_config['audio_out_dir'])"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "s-JM9J4N6nDK"
      },
      "source": [
        "### 3.识别台本角色"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nHgJU3Sp-Bvm"
      },
      "source": [
        "#### 导包"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "AWLkvOdT6OPK"
      },
      "outputs": [],
      "source": [
        "from sklearn.neighbors import KNeighborsClassifier\n",
        "from sklearn.model_selection import cross_val_score\n",
        "from scipy.spatial.distance import cosine"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "lA3hf-vj96-i"
      },
      "source": [
        "#### My_Classifier"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "S749oqiV9q7v"
      },
      "outputs": [],
      "source": [
        "class KNN_Classifier_Custom:\n",
        "    def __init__(self, feature, labels,n_neighbors):\n",
        "        self.feature = feature\n",
        "        self.labels = labels\n",
        "\n",
        "    def predict(self, x):\n",
        "        min_dist = float('inf')\n",
        "        predicted_label = None\n",
        "\n",
        "        for i, f in enumerate(self.feature):\n",
        "            dist = cosine(x, f)\n",
        "            if dist < min_dist:\n",
        "                min_dist = dist\n",
        "                predicted_label = self.labels[i]\n",
        "\n",
        "        return predicted_label, min_dist\n",
        "\n",
        "\n",
        "class KNN_Classifier:\n",
        "    def __init__(self, feature, labels,n_neighbors=3):\n",
        "        self.feature = feature\n",
        "        self.labels = labels\n",
        "        self.classifier = KNeighborsClassifier(n_neighbors=n_neighbors, metric='cosine')\n",
        "        self.classifier.fit(self.feature, self.labels)\n",
        "\n",
        "    def predict(self, x):\n",
        "        # Predict the class label\n",
        "        predicted_label = self.classifier.predict(x.reshape(1, -1))\n",
        "\n",
        "        # Get the distances to the nearest neighbors\n",
        "        dist, _ = self.classifier.kneighbors(x.reshape(1, -1))\n",
        "\n",
        "        # Return the label of the most common class and the smallest distance\n",
        "        return predicted_label[0], dist[0].min()\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "v1KykX3V-AAP"
      },
      "source": [
        "#### AudioClassification"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "IdQDjE5s9oFN"
      },
      "outputs": [],
      "source": [
        "class AudioClassification:\n",
        "    def __init__(self, audio_roles_dir,srt_out_dir,audio_out_dir):\n",
        "        self.audio_roles_dir = audio_roles_dir\n",
        "        self.srt_out_dir = srt_out_dir\n",
        "\n",
        "        self.audio_first_dir = get_first_subdir(audio_out_dir)\n",
        "\n",
        "        self.candidate_path = self.audio_first_dir[:]\n",
        "        self.roles, self.roles_list = self.get_roles_list()\n",
        "        self.features, self.labels = self.get_features()\n",
        "\n",
        "    def get_roles_list(self):\n",
        "        roles = os.listdir(self.audio_roles_dir)\n",
        "\n",
        "        \"\"\"\n",
        "        roles\n",
        "            春日 谷口 长门 新川先生 多丸裕 朝比奈 朝仓 ...\n",
        "        roles_list\n",
        "            ['209_或者这座岛有没有被当地人称为「什么什么岛」的传闻？.wav',\n",
        "            '106_我要喝100%果汁.wav', '121_你睡什么觉啊  笨蛋.wav',\n",
        "        \"\"\"\n",
        "        roles_list = []\n",
        "        roles_list_full = []\n",
        "        for role in roles:\n",
        "\n",
        "            sub_list = os.listdir(os.path.join(self.audio_roles_dir,role))\n",
        "            roles_list.append(sub_list)\n",
        "\n",
        "            full_name_list = [self.audio_roles_dir+role+'/'+file for file in sub_list]\n",
        "            roles_list_full.append(full_name_list)\n",
        "\n",
        "        return roles, roles_list\n",
        "\n",
        "\n",
        "\n",
        "    def get_features(self):\n",
        "        features = []\n",
        "        labels = []\n",
        "        dim = 0\n",
        "        count = 0\n",
        "\n",
        "        for role in self.roles:\n",
        "            print(role,end='')\n",
        "            for file in self.roles_list[self.roles.index(role)]:\n",
        "                deal_flag = False\n",
        "                for candidate in self.candidate_path: #'/mnt/sda/baidu_disk/lg/scixing/Haruhi ep1'\n",
        "                    candidate_fname = os.path.join(candidate, 'voice')\n",
        "                    if os.path.exists(candidate_fname):\n",
        "                        deal_flag = True\n",
        "                        feature_fname = os.path.join(candidate,'feature',file) +'.pkl'\n",
        "                        break\n",
        "\n",
        "                if deal_flag == False:\n",
        "                    # print('warning!', file, 'not found')\n",
        "                    continue\n",
        "\n",
        "                if not os.path.exists(feature_fname):\n",
        "                    # print('warning!', feature_fname, 'not found')\n",
        "                    continue\n",
        "\n",
        "                    # pinkle load feature_fname\n",
        "                with open(feature_fname, 'rb') as f:\n",
        "                    feature = pickle.load(f)\n",
        "\n",
        "                count += 1\n",
        "\n",
        "                # append numpy array feature into numpy matrix features\n",
        "                if dim == 0:\n",
        "                    features = feature\n",
        "                    dim = feature.shape[0]\n",
        "                    # print(dim)\n",
        "                else:\n",
        "                    features = np.vstack((features, feature))\n",
        "\n",
        "                labels.append(role)\n",
        "\n",
        "                # print(feature_fname,'found')\n",
        "\n",
        "            # break\n",
        "        return features, labels\n",
        "\n",
        "    def knn_test(self):\n",
        "        \"\"\"\n",
        "        feature是一个N*D的numpy矩阵，每行存储了一个D维特征 labels是一个python的list of string，表示每行对应的数据的标签。\n",
        "        我想验证这批数据使用K近邻分类，在10折交叉时的准确率，请用python为我实现。\n",
        "        \"\"\"\n",
        "        k = 1\n",
        "        knn = KNeighborsClassifier(n_neighbors=k, metric='cosine')\n",
        "\n",
        "        features = np.array(self.features)\n",
        "\n",
        "        labels = np.array(self.labels)\n",
        "\n",
        "        cv_accuracy = cross_val_score(knn, features, labels, cv=5)\n",
        "\n",
        "        for fold, accuracy in enumerate(cv_accuracy,1):\n",
        "            print(f\"Fold {fold}: {accuracy}\")\n",
        "\n",
        "        # 打印平均准确率\n",
        "        mean_accuracy = np.mean(cv_accuracy)\n",
        "        print(f\"Average Accuracy: {mean_accuracy}\")\n",
        "\n",
        "    def gather_feature_label(self,roles, roles_list):\n",
        "        features = []\n",
        "        labels = []\n",
        "        dim = 0\n",
        "\n",
        "        count = 0\n",
        "\n",
        "        for role in roles:\n",
        "            print(role,end=' ')\n",
        "\n",
        "            for file in roles_list[roles.index(role)]:\n",
        "                # print(file)\n",
        "\n",
        "                deal_flag = False\n",
        "\n",
        "                for candidate in self.candidate_path:\n",
        "\n",
        "                    candidate_fname = os.path.join(candidate,'voice',file)\n",
        "\n",
        "                    if os.path.exists(candidate_fname):\n",
        "                        # print(candidate_fname,'found')\n",
        "                        deal_flag = True\n",
        "                        feature_fname = os.path.join(candidate,'feature',file) + '.pkl'\n",
        "                        break\n",
        "\n",
        "                if deal_flag == False:\n",
        "                    print('warning!',file,'not found')\n",
        "                    continue\n",
        "\n",
        "                if not os.path.exists(feature_fname):\n",
        "                    print('warning!',feature_fname,'not found')\n",
        "                    continue\n",
        "\n",
        "                # pinkle load feature_fname\n",
        "                with open(feature_fname,'rb') as f:\n",
        "                    feature = pickle.load(f)\n",
        "\n",
        "                count += 1\n",
        "\n",
        "                # append numpy array feature into numpy matrix features\n",
        "                if dim == 0:\n",
        "                    features = feature\n",
        "                    dim = feature.shape[0]\n",
        "                    # print(dim)\n",
        "                else:\n",
        "                    features = np.vstack((features,feature))\n",
        "\n",
        "                labels.append(role)\n",
        "\n",
        "        return features, labels\n",
        "\n",
        "\n",
        "    def get_feat_sel(self,roles,roles_list):\n",
        "        roles_sel = []\n",
        "        roles_list_sel = []\n",
        "        for role in roles[:]:\n",
        "            wav_list = roles_list[roles.index(role)]\n",
        "\n",
        "            # random pick 5 element from wav_list\n",
        "            random.shuffle(wav_list)\n",
        "            # wav_list = wav_list[:]\n",
        "\n",
        "            roles_sel.append(role)\n",
        "            roles_list_sel.append(wav_list)\n",
        "\n",
        "        feat_sel, label_sel = self.gather_feature_label(roles_sel,roles_list_sel)\n",
        "        return feat_sel, label_sel\n",
        "\n",
        "    def get_sel_predict(self):\n",
        "\n",
        "\n",
        "        corrent_dists = []\n",
        "        wrong_dists = []\n",
        "\n",
        "        for i in range(len(self.labels)):\n",
        "            # read i-th row from features, save as feat\n",
        "            feat = self.features[i, :]\n",
        "            # read i-th row from labels, save as label\n",
        "            label = self.labels[i]\n",
        "\n",
        "            # predict label of i-th row\n",
        "            predicted_label, distance = self.my_classifier.predict(feat)\n",
        "            if label == predicted_label:\n",
        "                corrent_dists.append(distance)\n",
        "            else:\n",
        "                wrong_dists.append(distance)\n",
        "\n",
        "    def create_classifier(self,class_name, features, labels, n_neighbors=None):\n",
        "        classifier_class = globals()[class_name](features, labels, n_neighbors)\n",
        "        return classifier_class\n",
        "\n",
        "    def get_pridict(self,class_name,n_neighbors=3,mark=''):\n",
        "\n",
        "        self.roles, self.roles_list = self.get_roles_list()\n",
        "        self.feat_sel, self.label_sel = self.get_feat_sel(self.roles, self.roles_list)\n",
        "        self.my_classifier = self.create_classifier(class_name,self.feat_sel, self.label_sel,n_neighbors)\n",
        "\n",
        "\n",
        "        threshold_certain = 0.4\n",
        "        threshold_doubt = 0.6\n",
        "        for idx,feature_folder in enumerate(self.candidate_path[:]):\n",
        "            name = feature_folder.split('/')[-1]\n",
        "            if mark:\n",
        "                save_name = os.path.join(self.srt_out_dir,f'{name}_{mark}.txt')\n",
        "            else:\n",
        "                save_name = os.path.join(self.srt_out_dir, f'{name}.txt')\n",
        "            feature_folder = os.path.join(feature_folder,\"feature\")\n",
        "\n",
        "            file_list = os.listdir(feature_folder)\n",
        "\n",
        "            file_list.sort(key = lambda x: int(x.split('_')[0]))\n",
        "            with open(save_name, \"w\", encoding=\"utf-8\") as f_out:\n",
        "                for file in file_list:\n",
        "                    id_str = ''.join(file.split('_')[1:])\n",
        "                    full_file_name = os.path.join(feature_folder, file)\n",
        "\n",
        "                    with open(full_file_name, 'rb') as f:\n",
        "                        feature = pickle.load(f)\n",
        "\n",
        "                    predicted_label, distance = self.my_classifier.predict(feature)\n",
        "\n",
        "                    role_name = ''\n",
        "\n",
        "                    if distance < threshold_certain:\n",
        "                        role_name = predicted_label\n",
        "                    elif distance < threshold_doubt:\n",
        "                        role_name = '(可能)' + predicted_label\n",
        "\n",
        "                    output_str = role_name + ':「' + id_str[:-8] + '」'\n",
        "                    f_out.write(output_str + \"\\n\")\n",
        "\n",
        "\n",
        "                # label = 1\n",
        "                # if distance[0][1] >0.4:\n",
        "                #     label = 'dis0'\n",
        "                # label_lis = len(set([item[0] for item in distance]))\n",
        "                # label1 = 1\n",
        "                # if label_lis>1:\n",
        "                #     label1 = 'dif2'\n",
        "                # print(predicted_label,distance,label,label1,\"\\n\")"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "2U7z1UIT-IFI"
      },
      "source": [
        "#### 运行3 台本识别"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "rU_KoIxT-Ger"
      },
      "outputs": [],
      "source": [
        "audio_classification = AudioClassification(audio_config['audio_roles_dir'],\n",
        "                                               srt_config['srt_out_dir'],\n",
        "                                               audio_config['audio_out_dir'])\n",
        "class_name = ['KNN_Classifier_Custom', 'KNN_Classifier']\n",
        "n_neighbors = 3  # 表示knn n_neighbors的取值\n",
        "mark = ''  # 在默认输出文件后拼接一个字符,a.txt → a_mark.txt; mark 可自定义为01，02...,区分更新数据集后新的结果\n",
        "# audio_classification.get_pridict(class_name[1], n_neighbors, mark)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "32GPvMPN-jhi",
        "outputId": "1f891854-10e4-4c39-d710-e1d3118dc26b"
      },
      "outputs": [
        {
          "ename": "NameError",
          "evalue": "ignored",
          "output_type": "error",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-19-cade2f7cc6f7>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0maudio_classification\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_pridict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
            "\u001b[0;31mNameError\u001b[0m: name 'audio_classification' is not defined"
          ]
        }
      ],
      "source": [
        "audio_classification.get_pridict()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "AJgyCWWy_EmO"
      },
      "outputs": [],
      "source": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "eTwmkNTyLBsY"
      },
      "source": [
        "###  4.增量识别"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "lTeDesXLfKeA"
      },
      "source": [
        "#### 导包"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "bPm3o0u3fPWo"
      },
      "outputs": [],
      "source": [
        "import shutil"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sHRmKSRffSmq"
      },
      "source": [
        "#### IncrementData"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "rYcf8x6tfDwo"
      },
      "outputs": [],
      "source": [
        "class IncrementData():\n",
        "    def __init__(self, audio_out_dir, audio_roles_dir, srt_out_dir):\n",
        "        self.audio_out_dir = audio_out_dir\n",
        "        self.audio_roles_dir = audio_roles_dir\n",
        "        self.srt_out_dir = srt_out_dir\n",
        "\n",
        "    def static_origin(self):\n",
        "        # 统计原始数据\n",
        "        stattics_dic = {}\n",
        "        role_lis = get_first_subdir(self.audio_roles_dir)\n",
        "        for sub_dir in role_lis:\n",
        "            role = sub_dir.split('/')[-1]\n",
        "            # if role == '朝仓':\n",
        "            lis = get_filename(sub_dir)\n",
        "            stattics_dic[role] = [item[0] for item in get_filename(sub_dir)]\n",
        "\n",
        "        origin_lis = [item for sub_lis in list(stattics_dic.values()) for item in sub_lis]\n",
        "        return origin_lis\n",
        "\n",
        "    def process(self):\n",
        "        golden_res = get_filename(self.srt_out_dir, 'annotate.txt')\n",
        "        same_lis = []\n",
        "        i = 0\n",
        "        j = 0\n",
        "        for file, pth in golden_res[:]:\n",
        "            srt_lis = read_tolist(pth)\n",
        "            file_name = '_'.join(file.split('_')[:2])\n",
        "            # 字典- 文本：role\n",
        "            annote_dic = {}\n",
        "            for line in srt_lis:\n",
        "                role, text = line.split(\":「\")\n",
        "                text = text[:-1]\n",
        "                if text not in annote_dic:\n",
        "                    annote_dic[text] = [role]\n",
        "                else:\n",
        "                    annote_dic[text].append(role)\n",
        "            # 去掉重复的话语，因为字幕和音频数量不对等，要找到对应的音频是那句话→比较复杂\n",
        "            real_dic = {k: v[0] for k, v in annote_dic.items() if len(v) == 1}\n",
        "            corres_dir = os.path.join(self.audio_out_dir, f'{file_name}/voice')\n",
        "            audio_lis = get_filename(corres_dir)\n",
        "\n",
        "            # 遍历每一条音频\n",
        "\n",
        "            audio_dic = {}\n",
        "            for aud_name, aud_pth in audio_lis:\n",
        "                file_text = os.path.splitext(aud_name)[0]\n",
        "                audio_text = ''.join(file_text.split('_')[1:])\n",
        "\n",
        "                if audio_text not in audio_dic:\n",
        "                    audio_dic[audio_text] = [[aud_name, aud_pth]]\n",
        "                else:\n",
        "                    audio_dic[audio_text].append([aud_name, aud_pth])\n",
        "            # 同样也是去掉重复的音频，因为字幕和音频数量不是一一对应，要找到对应的音频是那句话→比较复杂\n",
        "            new_audio_dic = {k: v[0] for k, v in audio_dic.items() if len(v) == 1}\n",
        "            # print(f'len(real_dic)  {len(real_dic)}')\n",
        "            # print(f'len(new_audio_dic) {len(new_audio_dic)}')\n",
        "            for audio_text, value in new_audio_dic.items():\n",
        "                aud_name, aud_pth = value\n",
        "\n",
        "                if audio_text in real_dic:\n",
        "                    role = real_dic[audio_text]\n",
        "                    new_aud_dir = os.path.join(self.audio_roles_dir, role)\n",
        "                    os.makedirs(new_aud_dir, exist_ok=True)\n",
        "                    new_aud_pth = os.path.join(new_aud_dir, aud_name)\n",
        "\n",
        "                    # new_aud_dir1 = os.path.join(audio_roles_dir, f'new')\n",
        "                    # os.makedirs(new_aud_dir1, exist_ok=True)\n",
        "                    # new_aud_pth1 = os.path.join(new_aud_dir1, f'{aud_name}')\n",
        "\n",
        "                    if not os.path.exists(new_aud_pth):\n",
        "                        shutil.copy(aud_pth, new_aud_pth)\n",
        "                        # shutil.copy(aud_pth, new_aud_pth1)\n",
        "                        i += 1\n",
        "                        print(f'{role} + 1 {aud_name},{i}')\n",
        "                        pass\n",
        "                    elif os.path.exists(new_aud_pth):\n",
        "                        # j += 1\n",
        "                        # print(aud_name,j)\n",
        "                        # same_lis.append(aud_name)\n",
        "                        pass"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Lmam_AW3foxp"
      },
      "source": [
        "#### 运行4 音频特征增加\n",
        "根据标注的脚本→找到对应的音频文件→复制到标注文件夹（可以考虑直接剪切）"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "KPcqey0FgQyj"
      },
      "outputs": [],
      "source": [
        "# 自定义修改下面字典的value值  标有**的文件夹，需要有对应文件。\n",
        "video_config = {\"video_lis_pth\": \"/mnt/sda/baidu_disk/凉宫春日/lg_video/video\",  # **视频所在文件夹 **需要准备视\n",
        "                }\n",
        "audio_config = {\n",
        "    \"audio_model_pth\": '/mnt/sda/huggingface_weight/voicemodel/',\n",
        "    # **模型权重路径 需要下载模型→ git clone https://huggingface.co/scixing/voicemodel\n",
        "    \"audio_roles_dir\": '/mnt/sda/baidu_disk/lg/scixing/roles',  # **分类好的角色音频路径 需要手动分类\n",
        "    \"audio_out_dir\": \"/mnt/sda/baidu_disk/凉宫春日/lg_video/audio\",  # 视频切割输出的音频路径\n",
        "}\n",
        "\n",
        "srt_config = {\n",
        "    \"subtitle_dir\": \"/mnt/sda/baidu_disk/lg/zim/Subtitle_SC_SRT\",  # **视频对应字幕，视频和字幕名称需要一致 需要准备 ,\n",
        "    \"srt_out_dir\": \"./srt_predict_out\",\n",
        "    # 1.预测的角色类型输出路径  2.预测输出文本之后→进行人工核之后，文件重命名，增加'annotate',举例 a.txt →a.annotate.txt,可以进行增量训练\n",
        "\n",
        "}\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Xprlyh_qhglq"
      },
      "source": []
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "NOo0g3TSfnMq"
      },
      "outputs": [],
      "source": [
        "audio_increment = IncrementData(audio_config['audio_out_dir'],\n",
        "                                    audio_config['audio_roles_dir'],\n",
        "                                    srt_config['srt_out_dir'])\n",
        "\n",
        "audio_increment.process()"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### 5. 再预测"
      ],
      "metadata": {
        "id": "Oj9Rlxqv1XVp"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "audio_classification.get_pridict(class_name[1], n_neighbors, mark='retrain')"
      ],
      "metadata": {
        "id": "Qb7w_MuH1XwS"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FSjL6uIzSi9w"
      },
      "source": []
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "machine_shape": "hm",
      "toc_visible": true,
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}