{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "1d882fed-e432-4cf8-b2d2-9f081cf96f0d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'train': ['sentence', 'accents', 'audio'], 'validation': ['sentence', 'accents', 'audio'], 'test': ['sentence', 'accents', 'audio']}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Filter: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:00<00:00, 1495.62 examples/s]\n",
      "Filter: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:00<00:00, 1591.57 examples/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集最终大小: 100\n",
      "验证集最终大小: 100\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "Map: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 29.64 examples/s]\n",
      "Map: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 28.67 examples/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "trainable params: 442,368 || all params: 242,177,280 || trainable%: 0.1827\n",
      "验证集样本示例: "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "IOPub data rate exceeded.\n",
      "The Jupyter server will temporarily stop sending output\n",
      "to the client in order to avoid crashing it.\n",
      "To change this limit, set the config variable\n",
      "`--ServerApp.iopub_data_rate_limit`.\n",
      "\n",
      "Current values:\n",
      "ServerApp.iopub_data_rate_limit=1000000.0 (bytes/sec)\n",
      "ServerApp.rate_limit_window=3.0 (secs)\n",
      "\n",
      "You're using a WhisperTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n",
      "/usr/local/lib/python3.10/dist-packages/torch/_dynamo/eval_frame.py:838: UserWarning: torch.utils.checkpoint: the use_reentrant parameter should be passed explicitly. In version 2.5 we will raise an exception if use_reentrant is not passed. use_reentrant=False is recommended, but if you need to preserve the current default behavior, you can pass use_reentrant=True. Refer to docs for more details on the differences between the two variants.\n",
      "  return fn(*args, **kwargs)\n",
      "/usr/local/lib/python3.10/dist-packages/torch/utils/checkpoint.py:86: UserWarning: None of the inputs have requires_grad=True. Gradients will be None\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='13' max='13' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [13/13 01:47, Epoch 1/1]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Step</th>\n",
       "      <th>Training Loss</th>\n",
       "      <th>Validation Loss</th>\n",
       "      <th>Wer</th>\n",
       "      <th>Cer</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>10</td>\n",
       "      <td>1.978300</td>\n",
       "      <td>1.303198</td>\n",
       "      <td>0.910000</td>\n",
       "      <td>0.198113</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='13' max='13' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [13/13 01:05]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最终评估结果 - WER: 0.91%, CER: 0.20\n",
      "模型和processor已保存到 models/whisper-small-asr-int8\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoFeatureExtractor, AutoTokenizer, AutoProcessor, AutoModelForSpeechSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer, BitsAndBytesConfig\n",
    "from datasets import DatasetDict, Audio, Dataset, load_dataset\n",
    "from dataclasses import dataclass\n",
    "from typing import Any, Dict, List, Union\n",
    "import numpy as np\n",
    "import json\n",
    "import os\n",
    "import pandas as pd\n",
    "import torch\n",
    "import evaluate  # 新增：用于计算评估指标\n",
    "from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training\n",
    "\n",
    "@dataclass\n",
    "class DataCollatorSpeechSeq2SeqWithPadding:\n",
    "    processor: Any\n",
    "\n",
    "    def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n",
    "        input_features = [{\"input_features\": feature[\"input_features\"]} for feature in features]\n",
    "        batch = self.processor.feature_extractor.pad(input_features, return_tensors=\"pt\")\n",
    "\n",
    "        label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n",
    "        labels_batch = self.processor.tokenizer.pad(label_features, return_tensors=\"pt\")\n",
    "\n",
    "        labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n",
    "        if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item():\n",
    "            labels = labels[:, 1:]\n",
    "\n",
    "        batch[\"labels\"] = labels\n",
    "        return batch\n",
    "\n",
    "def main():\n",
    "    # model_name_or_path = \"openai/whisper-large-v2\"\n",
    "    # model_dir = \"models/whisper-large-v2-asr-int8\"\n",
    "    # 使用openai/whisper-large-v2会OOM，故采用openai/whisper-small\n",
    "    model_name_or_path = \"openai/whisper-small\"  \n",
    "    model_dir = \"models/whisper-small-asr-int8\"\n",
    "    language = \"Chinese (China)\"\n",
    "    # language = \"zh\" # 或 language = \"chinese\"\n",
    "    language_abbr = \"zh-CN\"\n",
    "    task = \"transcribe\"\n",
    "    dataset_base_path = \"data/datasets/common_voice/common_voice_11_0/\"\n",
    "    # dataset_name = \"mozilla-foundation/common_voice_11_0\"\n",
    "    batch_size=8\n",
    "    # 加载并验证数据集\n",
    "    def load_dataset_splits():\n",
    "        splits = {\n",
    "            \"train\": os.path.join(dataset_base_path, \"transcript/zh-CN/dev.tsv\"),\n",
    "            \"validation\": os.path.join(dataset_base_path, \"transcript/zh-CN/dev.tsv\"),\n",
    "            \"test\": os.path.join(dataset_base_path, \"transcript/zh-CN/test.tsv\")\n",
    "        }\n",
    "        for split, path in splits.items():\n",
    "            if not os.path.exists(path):\n",
    "                raise FileNotFoundError(f\"{split} 数据集文件不存在: {path}\")\n",
    "\n",
    "        datasets = DatasetDict()\n",
    "        for split, path in splits.items():\n",
    "            df = pd.read_csv(path, sep=\"\\t\", quoting=3)\n",
    "            datasets[split] = Dataset.from_pandas(df)\n",
    "            datasets[split] = datasets[split].select(range(100))\n",
    "        return datasets\n",
    "\n",
    "    # 在解码之前，过滤掉无效的 token_ids\n",
    "    def filter_valid_token_ids(token_ids):\n",
    "        return [token_id for token_id in token_ids if isinstance(token_id, int) and token_id >= 0]\n",
    "    # 确保 token_ids 的值在模型的词汇表范围内\n",
    "    def check_token_ids_range(token_ids, vocab_size):\n",
    "        for token_id in token_ids:\n",
    "            if not (0 <= token_id < vocab_size):\n",
    "                raise ValueError(f\"Token ID {token_id} is out of range [0, {vocab_size})\")\n",
    "\n",
    "    # 计算WER指标（计算WER（词错误率）和CER（字错误率））\n",
    "    def compute_metrics(pred):\n",
    "        wer_metric = evaluate.load(\"wer\")\n",
    "        cer_metric = evaluate.load(\"cer\")\n",
    "    \n",
    "        pred_ids = pred.predictions\n",
    "        label_ids = pred.label_ids\n",
    "    \n",
    "        # 确保 pred_ids 和 label_ids 是 numpy 数组\n",
    "        if not isinstance(pred_ids, np.ndarray):\n",
    "            pred_ids = np.array(pred_ids)\n",
    "        if not isinstance(label_ids, np.ndarray):\n",
    "            label_ids = np.array(label_ids)\n",
    "    \n",
    "        # 过滤无效 token ID（如 -100）\n",
    "        def filter_invalid_token_ids(token_ids):\n",
    "            return [token_id for token_id in token_ids if isinstance(token_id, (int, np.integer)) and token_id >= 0]\n",
    "    \n",
    "        # 处理预测结果（如果是 logits，取 argmax）\n",
    "        if len(pred_ids.shape) == 3:  # [batch_size, seq_len, vocab_size]\n",
    "            pred_ids = np.argmax(pred_ids, axis=-1)\n",
    "        \n",
    "        # 过滤无效 token ID\n",
    "        pred_ids = [filter_invalid_token_ids(seq) for seq in pred_ids]\n",
    "        label_ids = [filter_invalid_token_ids(seq) for seq in label_ids]\n",
    "    \n",
    "        # 检查是否为空\n",
    "        if len(pred_ids) == 0 or len(label_ids) == 0:\n",
    "            return {\"wer\": 1.0, \"cer\": 1.0}\n",
    "    \n",
    "        # 解码\n",
    "        processor = AutoProcessor.from_pretrained(model_name_or_path)\n",
    "        pred_str = processor.batch_decode(pred_ids, skip_special_tokens=True)\n",
    "        label_str = processor.batch_decode(label_ids, skip_special_tokens=True)\n",
    "    \n",
    "        # 计算 WER 和 CER\n",
    "        wer = wer_metric.compute(predictions=pred_str, references=label_str)\n",
    "        cer = cer_metric.compute(predictions=pred_str, references=label_str)\n",
    "    \n",
    "        return {\"wer\": wer, \"cer\": cer}\n",
    "\n",
    "    feature_extractor = AutoFeatureExtractor.from_pretrained(model_name_or_path)\n",
    "    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, language=language, task=task)\n",
    "    processor = AutoProcessor.from_pretrained(model_name_or_path, language=language, task=task)\n",
    "\n",
    "     # 直接联网一直有问题，所以改由本地加载数据集\n",
    "    # common_voice = DatasetDict()\n",
    "    # common_voice[\"train\"] = load_dataset(dataset_name, language_abbr, split=\"train\")\n",
    "    # common_voice[\"validation\"] = load_dataset(dataset_name, language_abbr, split=\"validation\")\n",
    "\n",
    "    common_voice = load_dataset_splits()\n",
    "    for split in common_voice.keys():\n",
    "        dataset = common_voice[split]\n",
    "        if split in [\"train\", \"validation\"]:\n",
    "            audio_base_path = os.path.join(dataset_base_path, \"audio/zh-CN/dev/zh-CN_dev_0/\")\n",
    "        else:\n",
    "            audio_base_path = os.path.join(dataset_base_path, \"audio/zh-CN/test/zh-CN_test_0/\")\n",
    "\n",
    "        audio_paths = []\n",
    "        for path in dataset[\"path\"]:\n",
    "            full_path = os.path.join(audio_base_path, path)\n",
    "            audio_paths.append(full_path if os.path.exists(full_path) else None)\n",
    "\n",
    "        common_voice[split] = dataset.add_column(\"audio\", audio_paths)\n",
    "\n",
    "    common_voice = common_voice.remove_columns(\n",
    "        [\"age\", \"client_id\", \"down_votes\", \"gender\", \"locale\", \"path\", \"segment\", \"up_votes\"]\n",
    "    )\n",
    "    common_voice = common_voice.cast_column(\"audio\", Audio(sampling_rate=16000))\n",
    "\n",
    "    def prepare_dataset(batch):\n",
    "        audio = batch[\"audio\"]\n",
    "        if audio[\"array\"] is None or len(audio[\"array\"]) == 0:\n",
    "            return None\n",
    "        \n",
    "        # 提取输入特征\n",
    "        input_features = feature_extractor(\n",
    "            audio[\"array\"], sampling_rate=audio[\"sampling_rate\"]\n",
    "        ).input_features[0]\n",
    "        \n",
    "        # 编码标签\n",
    "        labels = tokenizer(batch[\"sentence\"]).input_ids\n",
    "        \n",
    "        if input_features is None or len(labels) == 0:\n",
    "            return None\n",
    "        \n",
    "        return {\n",
    "            \"input_features\": input_features,\n",
    "            \"labels\": labels,\n",
    "        }\n",
    "        \n",
    "    print(common_voice.column_names) # 打印数据集的字段信息\n",
    "    # print(common_voice[\"train\"][:10])\n",
    "    small_common_voice = DatasetDict()\n",
    "    # small_common_voice[\"train\"] = common_voice[\"train\"].shuffle(seed=16).select(range(640))\n",
    "    # small_common_voice[\"validation\"] = common_voice[\"validation\"].shuffle(seed=16).select(range(320))\n",
    "    # 修改原来的数据准备代码\n",
    "    # 动态选择数据量（不超过实际大小）\n",
    "    train_size = min(640, len(common_voice[\"train\"]))\n",
    "    val_size = min(320, len(common_voice[\"validation\"]))\n",
    "    small_common_voice[\"train\"] = (\n",
    "        common_voice[\"train\"]\n",
    "        .filter(lambda x: len(x[\"sentence\"]) > 0)  # 先过滤有效数据\n",
    "        .shuffle(seed=16)\n",
    "        .select(range(train_size))  # 再随机选择\n",
    "    )\n",
    "    small_common_voice[\"validation\"] = (\n",
    "        common_voice[\"validation\"]\n",
    "        .filter(lambda x: len(x[\"sentence\"]) > 0)  # 先过滤有效数据\n",
    "        .shuffle(seed=16)\n",
    "        .select(range(val_size))  # 再随机选择\n",
    "    )\n",
    "    print(\"训练集最终大小:\", len(small_common_voice[\"train\"]))\n",
    "    print(\"验证集最终大小:\", len(small_common_voice[\"validation\"]))\n",
    "    tokenized_common_voice = small_common_voice.map(prepare_dataset)\n",
    "\n",
    "    data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)\n",
    "    # model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name_or_path, load_in_8bit=True, device_map=\"auto\")\n",
    "    # 配置 4-bit 量化\n",
    "    bnb_config = BitsAndBytesConfig(\n",
    "        load_in_4bit=True,\n",
    "        bnb_4bit_use_double_quant=True,  # 可选：双重量化，进一步压缩模型\n",
    "        bnb_4bit_quant_type=\"nf4\",       # 量化类型（推荐 nf4）\n",
    "        bnb_4bit_compute_dtype=torch.bfloat16  # 计算时使用的数据类型\n",
    "    )\n",
    "    \n",
    "    model = AutoModelForSpeechSeq2Seq.from_pretrained(\n",
    "        model_name_or_path,\n",
    "        quantization_config=bnb_config,  # 传入量化配置\n",
    "        device_map=\"auto\"\n",
    "    )\n",
    "    model.config.forced_decoder_ids = None\n",
    "    model.config.suppress_tokens = []\n",
    "\n",
    "    # model = prepare_model_for_int8_training(model) # 新版已弃用，改由prepare_model_for_kbit_training\n",
    "    model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True) # use_gradient_checkpointing=True添加梯度检测\n",
    "\n",
    "    \n",
    "    # from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model\n",
    "    config = LoraConfig(\n",
    "        r=4, lora_alpha=64, target_modules=[\"q_proj\", \"v_proj\"],\n",
    "        lora_dropout=0.05, bias=\"none\"\n",
    "    )\n",
    "    peft_model = get_peft_model(model, config)\n",
    "    peft_model.print_trainable_parameters()\n",
    "\n",
    "  \n",
    "\n",
    "    # 修改训练参数：更频繁的评估（将eval_strategy从\"epoch\"改为\"steps\"，并设置eval_steps=10。每10个训练step评估一次，更及时监控性能）\n",
    "    training_args = Seq2SeqTrainingArguments(\n",
    "        output_dir=model_dir,\n",
    "        per_device_train_batch_size=batch_size,\n",
    "        learning_rate=1e-3,\n",
    "        num_train_epochs=1,\n",
    "        eval_strategy=\"steps\",  # 改为按step评估\n",
    "        eval_steps=10,         # 每10个step评估一次\n",
    "        per_device_eval_batch_size=batch_size,\n",
    "        generation_max_length=128,\n",
    "        logging_steps=5,       # 更频繁的日志输出\n",
    "        remove_unused_columns=False,\n",
    "        label_names=[\"labels\"],\n",
    "        dataloader_pin_memory=False,\n",
    "        load_best_model_at_end=True,  # 新增：保存最佳模型\n",
    "        metric_for_best_model=\"wer\",  # 根据WER选择最佳模型\n",
    "        greater_is_better=False,      # WER越小越好\n",
    "        predict_with_generate=True,  # 关键：确保生成预测结果\n",
    "    )\n",
    "    print(\"验证集样本示例:\", tokenized_common_voice[\"validation\"][0])\n",
    "    trainer = Seq2SeqTrainer(\n",
    "        args=training_args,\n",
    "        model=peft_model,\n",
    "        train_dataset=tokenized_common_voice[\"train\"],\n",
    "        eval_dataset=tokenized_common_voice[\"validation\"],\n",
    "        data_collator=data_collator,\n",
    "        compute_metrics=compute_metrics,  # 新增：传入评估函数\n",
    "        processing_class=processor.tokenizer,    # 避免保存时的警告\n",
    "    )\n",
    "    peft_model.config.use_cache = False\n",
    "\n",
    "    print(\"开始训练（AMD GPU兼容模式）\")\n",
    "    trainer.train()\n",
    "\n",
    "    # 最终评估\n",
    "    eval_results = trainer.evaluate()\n",
    "    # print(eval_results)\n",
    "    print(f\"最终评估结果 - WER: {eval_results['eval_wer']:.2f}%, CER: {eval_results['eval_cer']:.2f}\")\n",
    "\n",
    "    trainer.save_model(model_dir)\n",
    "    # 新增：保存processor（关键修复！处理上面代码保存模型时缺失preprocessor_config.json问题）\n",
    "    processor.save_pretrained(model_dir)\n",
    "    print(f\"模型和processor已保存到 {model_dir}\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "f5a3af64-abfb-4c3d-8f4f-3215596ce704",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "Filter: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:00<00:00, 437.93 examples/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试集最终大小: 100\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Map: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 30.32 examples/s]\n",
      "You're using a WhisperTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='13' max='13' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [13/13 00:20]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最终评估结果 - WER: 1.04%, CER: 2.31\n"
     ]
    }
   ],
   "source": [
    "# 使用测试集评估保存后的模型\n",
    "from transformers import AutoFeatureExtractor, AutoTokenizer, AutoProcessor, AutoModelForSpeechSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer\n",
    "from datasets import DatasetDict, Audio, Dataset\n",
    "from dataclasses import dataclass\n",
    "from typing import Any, Dict, List, Union\n",
    "import numpy as np\n",
    "import os\n",
    "import pandas as pd\n",
    "import torch\n",
    "import evaluate  # 新增：用于计算WER\n",
    "\n",
    "@dataclass\n",
    "class DataCollatorSpeechSeq2SeqWithPadding:\n",
    "    processor: Any\n",
    "\n",
    "    def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n",
    "        input_features = [{\"input_features\": feature[\"input_features\"]} for feature in features]\n",
    "        batch = self.processor.feature_extractor.pad(input_features, return_tensors=\"pt\")\n",
    "\n",
    "        label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n",
    "        labels_batch = self.processor.tokenizer.pad(label_features, return_tensors=\"pt\")\n",
    "\n",
    "        labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n",
    "        if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item():\n",
    "            labels = labels[:, 1:]\n",
    "\n",
    "        batch[\"labels\"] = labels\n",
    "        return batch\n",
    "\n",
    "def main():\n",
    "    model_name_or_path = \"models/whisper-small-asr-int8\"\n",
    "    language = \"zh\" # 或 language = \"chinese\"\n",
    "    # language_abbr = \"zh-CN\"\n",
    "    task = \"transcribe\"\n",
    "    dataset_base_path = \"data/datasets/common_voice/common_voice_11_0/\"\n",
    "\n",
    "    def load_dataset_splits():\n",
    "        splits = {\n",
    "            \"test\": os.path.join(dataset_base_path, \"transcript/zh-CN/test.tsv\")\n",
    "        }\n",
    "        for split, path in splits.items():\n",
    "            if not os.path.exists(path):\n",
    "                raise FileNotFoundError(f\"{split} 数据集文件不存在: {path}\")\n",
    "\n",
    "        datasets = DatasetDict()\n",
    "        for split, path in splits.items():\n",
    "            df = pd.read_csv(path, sep=\"\\t\", quoting=3)\n",
    "            datasets[split] = Dataset.from_pandas(df)\n",
    "            datasets[split] = datasets[split].select(range(100))\n",
    "        return datasets\n",
    "\n",
    "    # 计算WER指标（计算WER（词错误率）和CER（字错误率））\n",
    "    def compute_metrics(pred):\n",
    "        wer_metric = evaluate.load(\"wer\")\n",
    "        cer_metric = evaluate.load(\"cer\")\n",
    "    \n",
    "        pred_ids = pred.predictions\n",
    "        label_ids = pred.label_ids\n",
    "    \n",
    "        # 确保 pred_ids 和 label_ids 是列表\n",
    "        if isinstance(pred_ids, tuple):\n",
    "            pred_ids = pred_ids[0]  # 如果是 (logits, ...) 元组，取第一个元素\n",
    "        if isinstance(label_ids, tuple):\n",
    "            label_ids = label_ids[0]\n",
    "    \n",
    "        # 处理预测结果（如果是 logits，取 argmax）\n",
    "        if len(pred_ids.shape) == 3:  # [batch_size, seq_len, vocab_size]\n",
    "            pred_ids = np.argmax(pred_ids, axis=-1)\n",
    "    \n",
    "        # 确保 pred_ids 和 label_ids 是列表的列表\n",
    "        if isinstance(pred_ids, np.ndarray):\n",
    "            pred_ids = pred_ids.tolist()\n",
    "        if isinstance(label_ids, np.ndarray):\n",
    "            label_ids = label_ids.tolist()\n",
    "    \n",
    "        # 过滤无效 token ID（如 -100）\n",
    "        def filter_invalid_token_ids(token_ids):\n",
    "            return [token_id for token_id in token_ids if isinstance(token_id, (int, np.integer)) and token_id >= 0]\n",
    "    \n",
    "        pred_ids = [filter_invalid_token_ids(seq) for seq in pred_ids]\n",
    "        label_ids = [filter_invalid_token_ids(seq) for seq in label_ids]\n",
    "    \n",
    "        # 检查是否为空\n",
    "        if len(pred_ids) == 0 or len(label_ids) == 0:\n",
    "            return {\"wer\": 1.0, \"cer\": 1.0}\n",
    "    \n",
    "        # 解码\n",
    "        processor = AutoProcessor.from_pretrained(model_name_or_path)\n",
    "        pred_str = processor.batch_decode(pred_ids, skip_special_tokens=True)\n",
    "        label_str = processor.batch_decode(label_ids, skip_special_tokens=True)\n",
    "    \n",
    "        # 计算 WER 和 CER\n",
    "        wer = wer_metric.compute(predictions=pred_str, references=label_str)\n",
    "        cer = cer_metric.compute(predictions=pred_str, references=label_str)\n",
    "    \n",
    "        return {\"wer\": wer, \"cer\": cer}\n",
    "\n",
    "    def prepare_dataset(batch):\n",
    "        audio = batch[\"audio\"]\n",
    "        if audio[\"array\"] is None or len(audio[\"array\"]) == 0:\n",
    "            return None\n",
    "        \n",
    "        # 提取输入特征\n",
    "        input_features = feature_extractor(\n",
    "            audio[\"array\"], sampling_rate=audio[\"sampling_rate\"]\n",
    "        ).input_features[0]\n",
    "        \n",
    "        # 编码标签\n",
    "        labels = tokenizer(batch[\"sentence\"]).input_ids\n",
    "        \n",
    "        if input_features is None or len(labels) == 0:\n",
    "            return None\n",
    "        \n",
    "        return {\n",
    "            \"input_features\": input_features,\n",
    "            \"labels\": labels,\n",
    "        }\n",
    "        \n",
    "    batch_size = 8\n",
    "\n",
    "    # 加载数据集\n",
    "    common_voice = load_dataset_splits()\n",
    "    for split in common_voice.keys():\n",
    "        dataset = common_voice[split]\n",
    "        if split in [\"train\", \"validation\"]:\n",
    "            audio_base_path = os.path.join(dataset_base_path, \"audio/zh-CN/dev/zh-CN_dev_0/\")\n",
    "        else:\n",
    "            audio_base_path = os.path.join(dataset_base_path, \"audio/zh-CN/test/zh-CN_test_0/\")\n",
    "\n",
    "        audio_paths = []\n",
    "        for path in dataset[\"path\"]:\n",
    "            full_path = os.path.join(audio_base_path, path)\n",
    "            audio_paths.append(full_path if os.path.exists(full_path) else None)\n",
    "\n",
    "        common_voice[split] = dataset.add_column(\"audio\", audio_paths)\n",
    "\n",
    "    feature_extractor = AutoFeatureExtractor.from_pretrained(model_name_or_path)\n",
    "    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, language=language, task=task)\n",
    "    processor = AutoProcessor.from_pretrained(model_name_or_path, language=language, task=task)\n",
    "\n",
    "    common_voice = common_voice.remove_columns(\n",
    "        [\"age\", \"client_id\", \"down_votes\", \"gender\", \"locale\", \"path\", \"segment\", \"up_votes\"]\n",
    "    )\n",
    "    common_voice = common_voice.cast_column(\"audio\", Audio(sampling_rate=16000))\n",
    "\n",
    "\n",
    "    small_common_voice = DatasetDict()\n",
    "    test_size = min(320, len(common_voice[\"test\"]))\n",
    "    small_common_voice[\"test\"] = (\n",
    "        common_voice[\"test\"]\n",
    "        .filter(lambda x: len(x[\"sentence\"]) > 0)  # 先过滤有效数据\n",
    "        .shuffle(seed=16)\n",
    "        .select(range(test_size))  # 再随机选择\n",
    "    )\n",
    "    print(\"测试集最终大小:\", len(small_common_voice[\"test\"]))\n",
    "    tokenized_common_voice = small_common_voice.map(prepare_dataset)\n",
    "\n",
    "    data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)\n",
    "    model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name_or_path, device_map=\"auto\")\n",
    "\n",
    "    # 修改训练参数：更频繁的评估（将eval_strategy从\"epoch\"改为\"steps\"，并设置eval_steps=10。每10个训练step评估一次，更及时监控性能）\n",
    "    training_args = Seq2SeqTrainingArguments(\n",
    "        output_dir=\"D:/tmp/train/output\",\n",
    "        per_device_train_batch_size=batch_size,\n",
    "        learning_rate=1e-3,\n",
    "        num_train_epochs=1,\n",
    "        eval_strategy=\"steps\",  # 改为按step评估\n",
    "        eval_steps=10,         # 每10个step评估一次\n",
    "        per_device_eval_batch_size=batch_size,\n",
    "        generation_max_length=128,\n",
    "        logging_steps=5,       # 更频繁的日志输出\n",
    "        remove_unused_columns=False,\n",
    "        label_names=[\"labels\"],\n",
    "        dataloader_pin_memory=False,\n",
    "        load_best_model_at_end=True,  # 新增：保存最佳模型\n",
    "        metric_for_best_model=\"wer\",  # 根据WER选择最佳模型\n",
    "        greater_is_better=False,      # WER越小越好\n",
    "    )\n",
    "\n",
    "    trainer = Seq2SeqTrainer(\n",
    "        args=training_args,\n",
    "        model=model,\n",
    "        eval_dataset=tokenized_common_voice[\"test\"],\n",
    "        data_collator=data_collator,\n",
    "        compute_metrics=compute_metrics,  # 新增：传入评估函数\n",
    "        processing_class=processor.tokenizer,    # 避免保存时的警告\n",
    "    )\n",
    "    # 最终评估\n",
    "    eval_results = trainer.evaluate()\n",
    "    # print(eval_results)\n",
    "    print(f\"最终评估结果 - WER: {eval_results['eval_wer']:.2f}%, CER: {eval_results['eval_cer']:.2f}\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3796854b-0f26-48de-9e2c-024199cba640",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
