# -*- coding:utf-8 -*-
from typing import List

import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import asr_utils  # 同级目录
from pyctcdecode import build_ctcdecoder
from asr_config import Config
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
import editdistance
import os
import glob
import swanlab
swanlab.login (api_key='xfyf3UHaFK0bOJQ64l3K8')
# 导入 Transformer 编码器
from asr_transformer import Encoder, pos_sinusoid_embedding # 确保 transformer.py 在同一目录下

# 定义网络参数
b_stddev = 0.046875
h_stddev = 0.046875

n_hidden = 1024
n_hidden_1 = 1024
n_hidden_2 = 1024
n_hidden_3 = 2 * 1024  # 1024 (扩展层)
n_hidden_4 = 1024      # 新增
n_hidden_5 = 1024
n_hidden_6 = 1024     # 新增
n_cell_dim = 1024 # 这个参数在Transformer版本中不再直接用于LSTM，但可以作为Transformer的维度参考

learning_rate = 0.00005  #   学习率
keep_dropout_rate = 0.1  # dropout的概率
relu_clip = 20  #激活函数的截断

n_input = 39  # 计算美尔倒谱系数的个数
n_context = 12  # 对于每个时间点，要包含上下文样本的个数
batch_size = 16  # 每个批次的样本数量

# Transformer 编码器参数
d_model = n_hidden_5  # Transformer的输入维度，与前馈网络的输出维度一致
nhead = 8             # 多头注意力的头数
num_encoder_layers = 6 # Transformer编码器的层数
d_ff = 2048           # 前馈网络的维度
max_seq_len = 2048    # 最大序列长度，用于位置编码

# 导入训练、测试文件路径和文本标签
(train_wav_files, train_text_labels), (test_wav_files, test_text_labels) = asr_utils.get_wavs_lables()
print('MFCC特征维度：', n_input,
      '上下文数：', n_context,
      '输入维度：', n_input + (2 * n_input * n_context),  # 实际输入到网络的维度
      '隐藏层维度：', n_hidden,
      'Transformer编码器维度：', d_model,
      'Transformer编码器层数：', num_encoder_layers,
      '最终输出维度：', n_hidden_6)  # 最终输出层维度
class ASR(nn.Module):
    def __init__(self, wav_files, text_labels, words_size, words, word_num_map, is_training=True):
        super(ASR, self).__init__()
        self.conf = Config()  #配置文件
        self.wav_files = wav_files  #音频文件路径
        self.text_labels = text_labels  #文本标签
        self.words_size = words_size
        self.words = words
        self.word_num_map = word_num_map

        self.is_training = is_training  # 添加训练/测试模式标志

        # 定义网络层
        self._build_layers()

    def _build_layers(self):
        """
        构建更深层的网络结构，将LSTM替换为Transformer Encoder
        """
        # 增加前馈网络深度
        input_size = n_input + (2 * n_input * n_context)
        self.layer1 = nn.Linear(input_size, n_hidden_1)
        self.ln1 = nn.LayerNorm(n_hidden_1)

        self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)
        self.ln2 = nn.LayerNorm(n_hidden_2)

        self.layer3 = nn.Linear(n_hidden_2, n_hidden_3)
        self.ln3 = nn.LayerNorm(n_hidden_3)

        self.layer4 = nn.Linear(n_hidden_3, n_hidden_4)
        self.ln4 = nn.LayerNorm(n_hidden_4)

        self.layer5 = nn.Linear(n_hidden_4, n_hidden_5)
        self.ln5 = nn.LayerNorm(n_hidden_5)

        # 替换 LSTM 为 Transformer Encoder
        self.encoder = Encoder(
            dropout_emb=keep_dropout_rate, #嵌入层的dropout
            dropout_posffn=0.5,#前馈网络的dropout
            dropout_attn=0.1,#注意力层的dropout
            num_layers=num_encoder_layers,
            enc_dim=d_model,
            num_heads=nhead,
            dff=d_ff,
            tgt_len=max_seq_len,
        )

        # 增加输出层深度
        # Transformer Encoder的输出维度是d_model，即n_hidden_5
        self.layer6 = nn.Linear(d_model, n_hidden_6)
        self.ln6 = nn.LayerNorm(n_hidden_6)
        self.layer7 = nn.Linear(n_hidden_6, self.words_size + 1)  # 输出层

        # Dropout层
        self.dropout = nn.Dropout(1 - keep_dropout_rate)

        # 初始化权重
        self._initialize_weights()

        # 修改优化器
        self.optimizer = optim.AdamW(self.parameters(), lr=learning_rate, weight_decay=1e-5)

    def _initialize_weights(self):
        # 初始化权重，使用TensorFlow的初始化方式
        for m in self.modules():
            if isinstance(m, nn.Linear):    # 初始化线性层
                nn.init.normal_(m.weight,
                                std=h_stddev if m in [self.layer1, self.layer2, self.layer3, self.layer5, self.layer6] else b_stddev)
                if m.bias is not None:
                    nn.init.normal_(m.bias, std=b_stddev if m in [self.layer1, self.layer2, self.layer3,
                                                                  self.layer5, self.layer6] else b_stddev)

    def forward(self, input_tensor, seq_length):
        '''
        前向传播
        '''
        # input_tensor shape: [batch_size, max_time_steps, features]
        batch_size, max_time_steps, features = input_tensor.size()

        # 转成时间序列优先 [batch_size, max_time_steps, features]
        batch_x = input_tensor

        # 再转成2维传入第一层 [batch_size * max_time_steps, features]
        batch_x = batch_x.view(-1, features)

        # 前置全连接层
        layer_1 = F.dropout(torch.clamp(F.relu(self.ln1(self.layer1(batch_x))), 0, relu_clip), p=1 - keep_dropout_rate,
                            training=self.training)
        layer_2 = F.dropout(torch.clamp(F.relu(self.ln2(self.layer2(layer_1))), 0, relu_clip), p=1 - keep_dropout_rate,
                            training=self.training)
        layer_3 = F.dropout(torch.clamp(F.relu(self.ln3(self.layer3(layer_2))), 0, relu_clip), p=1 - keep_dropout_rate,
                            training=self.training)
        layer_4 = F.dropout(torch.clamp(F.relu(self.ln4(self.layer4(layer_3))), 0, relu_clip), p=1 - keep_dropout_rate,
                            training=self.training)
        layer_5 = F.dropout(torch.clamp(F.relu(self.ln5(self.layer5(layer_4))), 0, relu_clip), p=1 - keep_dropout_rate,
                            training=self.training)

        # 重塑为3D [batch_size, max_time_steps, n_hidden_5]
        encoder_input = layer_5.view(batch_size, max_time_steps, -1)

        # Transformer Encoder
        # 1. 根据seq_length生成2D padding mask
        max_len = encoder_input.size(1)
        mask = torch.arange(max_len, device=encoder_input.device)[None, :] < seq_length.to(encoder_input.device)[:,
                                                                             None]
        # 2. 将2D padding mask扩展为3D attention mask以匹配Transformer内部断言
        mask = mask.unsqueeze(1).repeat(1, max_len, 1)

        # 调用Transformer编码器，传入输入、序列长度和mask
        encoder_output = self.encoder(encoder_input, seq_length, mask=mask)

        # 将Transformer的输出展平
        outputs = encoder_output.contiguous().view(-1, d_model)

        # 后置全连接层
        layer_6 = F.dropout(torch.clamp(F.relu(self.ln6(self.layer6(outputs))), 0, relu_clip), p=1 - keep_dropout_rate,
                            training=self.training)
        layer_7 = self.layer7(layer_6)

        # 重塑为最终输出形状
        logits = layer_7.view(batch_size, max_time_steps, self.words_size + 1)

        # 转换为时间优先以匹配CTC Loss的要求
        logits = logits.transpose(0, 1)

        return logits

    def compute_loss(self, logits, texts, seq_length):
        """
        定义loss
        """
        # 使用CTC Loss
        log_probs = F.log_softmax(logits, dim=2)

        # 将texts列表合并成一个张量
        # texts是一个包含多个张量的列表，需要合并
        if isinstance(texts, list) and len(texts) > 0:
            # 合并所有文本张量
            targets = torch.cat(texts, dim=0)
            # 创建对应的target_lengths
            target_lengths = torch.LongTensor([len(text) for text in texts]).to(targets.device)
        else:
            # 如果texts已经是张量
            targets = texts
            target_lengths = torch.LongTensor([len(texts)]).to(targets.device)

        # 确保input_lengths在正确的设备上
        input_lengths = seq_length
        if input_lengths.device != targets.device:
            input_lengths = input_lengths.to(targets.device)

        # 计算CTC损失
        loss_fn = nn.CTCLoss(blank=0, reduction='mean', zero_infinity=True)
        loss = loss_fn(log_probs, targets, input_lengths, target_lengths)
        return loss

    def init_session(self):
        self.savedir = self.conf.get("FILE_DATA").savedir
        # PyTorch不需要显式的Session初始化
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.to(self.device)

        # 优化器，学习率衰减，步长调整
        self.optimizer = optim.AdamW(self.parameters(), lr=learning_rate, weight_decay=1e-5)
        self.scheduler = ReduceLROnPlateau(
            self.optimizer,
            mode='min',  # 最小化损失
            factor=0.6,  # 学习率衰减因子
            patience=6,  # 耐心
            verbose=True,  #
            min_lr=1e-7,
            eps=1e-8# 最小损失阈值
        )

        # 检查是否存在预训练模型
        self.startepo = 0

        # 尝试从最新的checkpoint加载模型
        if os.path.exists(self.savedir):
            # 查找最新的模型文件
            model_files = glob.glob(os.path.join(self.savedir, "Transformer_epoch_*.pth"))
            if model_files:
                # 按照epoch编号排序，获取最新的模型
                model_files.sort(key=lambda x: int(x.split('_')[-1].split('.')[0]))
                latest_model = model_files[-1]  # 获取最新的模型文件名

                try:
                    checkpoint = torch.load(latest_model, map_location=self.device)
                    self.load_state_dict(checkpoint['model_state_dict'])
                    self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                    self.startepo = checkpoint['epoch'] + 1

                    print(
                        f"√ 成功从 {latest_model} 加载模型，继续从epoch {self.startepo} 开始训练，学习率为：{self.optimizer.param_groups[0]['lr']}")
                except Exception as e:
                    print(f"加载模型失败: {e}")
            else:
                print("未找到预训练模型，从头开始训练")
        else:
            print(f"保存目录 {self.savedir} 不存在，从头开始训练")
            # 创建保存目录
            os.makedirs(self.savedir, exist_ok=True)
        try:
            swanlab.init(
                project="asr_trans",
                workspace="1228508641",
                resume=True,
                id="x7ypjvpxx24ar8510dpz4",  # 21位实验id
                config={
                    "learning_rate": learning_rate,
                    "batch_size": batch_size,
                    "n_input": n_input,
                    "n_context": n_context,
                    "n_hidden": n_hidden,
                    "n_cell_dim": n_cell_dim,
                    "keep_dropout_rate": keep_dropout_rate,
                }
            )
        except Exception as e:
            print(f"初始化SwanLab失败: {e}")

    def train_model(self):
        epochs = 300    #总迭代次数
        self.init_session()
        # 准备运行训练步骤
        section = '\n{0:=^40}\n'
        print(section.format(f'开始训练,训练集大小：{len(self.wav_files)}'))

        train_start = time.time()
        for epoch in range(epochs):  # 样本集迭代次数
            epoch_start = time.time()
            if epoch < self.startepo:
                continue

            print("第：", epoch, " 次迭代，一共要迭代 ", epochs, "次")
            n_batches_epoch = int(np.ceil(len(self.text_labels) / batch_size))
            print("在本次迭代中一共循环： ", n_batches_epoch, "每次取：", batch_size)

            train_cost = 0
            train_err = 0
            next_idx = 0

            for batch in range(n_batches_epoch):  # 一次batch_size，取多少次
                torch.cuda.empty_cache()  # 清空缓存
                # 取数据
                next_idx, audio_features, audio_features_len, sparse_labels, wav_files = asr_utils.next_batch(
                    next_idx,
                    batch_size,
                    n_input,
                    n_context,
                    self.text_labels,
                    self.wav_files,
                    self.word_num_map)

                # 转换数据为PyTorch张量
                audio_features_tensor = torch.FloatTensor(audio_features).to(self.device)
                audio_features_len_tensor = torch.IntTensor(audio_features_len).to(self.device)

                # 处理标签
                # 将稀疏标签转换为密集格式
                texts = []
                for i in range(len(sparse_labels[1])):  # values
                    batch_idx = sparse_labels[0][i][0]  # batch index
                    if batch_idx >= len(texts):
                        texts.extend([[] for _ in range(batch_idx - len(texts) + 1)])
                    texts[batch_idx].append(sparse_labels[1][i])

                # 转换为张量列表
                texts_tensor = [torch.LongTensor(text).to(self.device) for text in texts]

                # 前向传播
                logits = self.forward(audio_features_tensor, audio_features_len_tensor)

                # 计算损失
                batch_cost = self.compute_loss(logits, texts_tensor, audio_features_len_tensor)
                train_cost += batch_cost.item()

                # 反向传播和优化
                self.optimizer.zero_grad()
                batch_cost.backward()
                self.optimizer.step()

                if (batch + 1) % 50 == 0:# 每n个批次，打印一次损失和错误率

                    # 解码预测结果
                    decoded = self.decode_ctc(logits, audio_features_len_tensor)

                    # 计算错误率（编辑距离）
                    train_err = self.calculate_edit_distance(decoded, sparse_labels)
                    # 计算当前批次的损失
                    batch_loss = train_cost / (batch + 1)
                    print('循环次数:', batch, '损失: ', batch_loss, '错误率: ', train_err)
                    # 显示示例解码结果
                    dense_labels = asr_utils.trans_tuple_to_texts_ch(sparse_labels, self.words)  #转换为字符串
                    for orig in dense_labels[:2]:  # 只显示前几个示例
                        if len(decoded) > 0:
                            decoded_str = asr_utils.trans_array_to_text_ch(decoded[0], self.words)
                            print('语音原始文本: {}'.format(orig))
                            print('识别出的文本:  {}'.format(decoded_str))
                        break
                    try:
                        swanlab.log({
                            'train/batch_loss': batch_loss,
                            'train/batch_error_rate': train_err,
                        })
                    except Exception as e:
                        print(f"记录训练指标失败: {e}")  # 忽略swanlab记录错误
            epoch_duration = time.time() - epoch_start

            log = '迭代次数 {}/{}, 训练损失: {:.4f}, 错误率: {:.4f}, time: {:.2f} sec'
            print(log.format(epoch, epochs, train_cost/n_batches_epoch, train_err, epoch_duration))
            # 记录训练指标到swanlab
            try:
                swanlab.log({
                    "train/loss": train_cost / n_batches_epoch,
                    "train/error_rate": train_err,
                    "train/epoch": epoch,
                    "train/learning_rate": self.optimizer.param_groups[0]['lr'],
                    "train/duration": epoch_duration
                })
            except Exception as e:
                print(f"记录训练指标失败: {e}")  # 忽略swanlab记录错误
            # 训练完一个epoch后测试模型
            test_loss = self.test_model()
            self.scheduler.step(test_loss)  # 传入验证损失作为监控指标
            # # 使用学习率调度器（每个epoch调整一次）
            # self.scheduler.step()

            # 打印当前学习率
            current_lr = self.optimizer.param_groups[0]['lr']
            print(f'== 当前学习率: {current_lr} ==')
            if epoch % 10 == 0:  #  每几个epoch保存模型
                try:

                    # 保存模型
                    torch.save({
                        'epoch': epoch,
                        'model_state_dict': self.state_dict(),
                        'optimizer_state_dict': self.optimizer.state_dict(),
                        'loss': train_cost,
                    }, self.savedir + f"Transformer_epoch_{epoch}.pth")
                    print(f'√ 模型已保存: {self.savedir}Transformer_epoch_{epoch}.pth')
                except:
                    print("保存模型时发生错误")

        train_duration = time.time() - train_start
        print('训练完成，总时长: {:.2f} min'.format(train_duration / 60))
        # 结束swanlab会话
        try:
            swanlab.finish()
        except Exception as e:
            print(e)
            pass
    def decode_ctc(self, logits, seq_lengths):
        """
        使用CTC解码，功能类似tf.nn.ctc_beam_search_decoder
        :param logits: 模型输出的logits，形状为 [T, B, C]
        :param seq_lengths: 每个序列的有效长度，形状为 [B]
        :return: 解码后的序列列表，每个元素为一个包含字符索引的列表
        """
        with torch.no_grad():
            log_probs = F.log_softmax(logits, dim=2)
            decoded_list = []

            # 对每个序列进行解码
            for i in range(log_probs.size(1)):  # 遍历batch
                seq_len = seq_lengths[i].item()

                # 提取单个序列的log_probs并移到CPU
                seq_log_probs = log_probs[:seq_len, i, :].cpu()  # [T, C]

                # 使用CTC解码
                decoded_seq = self._ctc_beam_search_decode(seq_log_probs)
                # decoded_seq = self._ctc_greedy_decode(seq_log_probs)

                decoded_list.append(decoded_seq)

            return decoded_list

    def _extract_unigrams_from_lm(self, lm_path):
        """
        从ARPA语言模型文件中提取unigrams
        """
        unigrams = []
        try:
            with open(lm_path, 'r', encoding='utf-8') as f:
                # 开始解析1-grams
                in_1grams = False
                for line in f:
                    line = line.strip()
                    if line == '\\1-grams:':
                        in_1grams = True
                        continue
                    elif line.startswith('\\2-grams:') or line.startswith('\\3-grams:'):
                        break
                    elif in_1grams and line and not line.startswith('\\'):
                        # 解析1-gram行: logprob word backoff
                        parts = line.split()
                        if len(parts) >= 2:
                            word = parts[1]
                            # 过滤特殊标记
                            if word not in ['<s>', '</s>', 'SIL']:
                                # 对于字符级模型，我们只保留单个字符
                                if len(word) == 1:
                                    unigrams.append(word)
        except Exception as e:
            print(f"提取unigrams时出错: {e}")
            # 回退到使用词汇表
            unigrams = list(self.words)

        return unigrams
    def _ctc_beam_search_decode(self, log_probs, beam_width=10):
        """
        使用3-gram语言模型的CTC束搜索解码实现
        """
        # 使用实际的词汇表创建解码器
        vocab_list = self.words  # 词汇表列表，索引对应字符
        if not hasattr(self, 'decoder'):    # 只执行一次
            # 从语言模型中提取unigrams
            unigrams = self._extract_unigrams_from_lm("lm_word/word.3gram.lm")
            # unigrams=list(vocab_list)

            # 确保unigrams包含vocab_list中的所有字符
            for char in vocab_list:
                if char not in unigrams:
                    unigrams.append(char)
            # 确保必要标记存在
            for token in ['<unk>']:
                if token in vocab_list and token not in unigrams:
                    unigrams.append(token)
            print('unigrams:',unigrams)
            # 用pyctcdecode创建带语言模型的CTC解码器,只执行一次
            self.decoder = build_ctcdecoder(
            vocab_list,
            kenlm_model_path="lm_word/word.3gram.lm",
            unigrams=unigrams,
            alpha=0.1,  # 更低的语言模型权重,
            beta=4.0,  # 长度惩罚
            unk_score_offset=-0.5,  # 未知词惩罚

            )

        # 将log_probs转换为numpy数组
        log_probs_np = log_probs.numpy()

        # 使用解码器进行束搜索解码
        beam_result = self.decoder.decode(log_probs_np, beam_width=beam_width)

        # 将结果转换为数字列表
        result = []
        for char in beam_result:
            if char in self.word_num_map:
                result.append(self.word_num_map[char])
            elif char == '<blank>':
                result.append(0)

        return result

    def _ctc_greedy_decode(self, log_probs):
        """
        使用贪婪解码进行CTC解码
        :param log_probs: 对数概率，形状为 [T, C]
        :return: 解码后的字符索引列表
        """
        # 创建贪婪解码器
        greedy_decoder = GreedyCTCDecoder(self.words)

        # 使用贪婪解码器获取文本结果
        decoded_text = greedy_decoder(log_probs)

        # 将文本转换为字符索引列表
        result = []
        for char in decoded_text:
            if char in self.word_num_map:
                result.append(self.word_num_map[char])
            # 注意：贪婪解码器已经处理了空白符，不需要额外处理

        # 如果结果为空，返回空列表
        if not result:
            return []

        return result

    def calculate_edit_distance(self, decoded_sequences, sparse_labels):
        """
        计算编辑距离错误率，更贴合原文的tf.edit_distance
        :param decoded_sequences: 模型解码出的序列列表
        :param sparse_labels: 稀疏标签元组 (indices, values, shape)
        :return: 编辑距离错误率 (CER)
        """
        total_distance = 0
        total_length = 0

        # 从sparse_labels稀疏标签重建原始文本
        dense_labels = asr_utils.trans_tuple_to_texts_ch(sparse_labels, self.words)

        for i, (decoded_seq, orig_text) in enumerate(zip(decoded_sequences, dense_labels)):
            # 将解码结果转换为文本
            if isinstance(decoded_seq, torch.Tensor):
                decoded_text = asr_utils.trans_array_to_text_ch(decoded_seq.cpu().numpy(), self.words)
            else:
                # 如果是列表形式的解码结果
                decoded_text = asr_utils.trans_array_to_text_ch(decoded_seq, self.words)

            # 计算编辑距离
            distance = editdistance.eval(orig_text, decoded_text)

            total_distance += distance
            total_length += len(orig_text)

        # 返回字符错误率 (CER)
        if total_length > 0:
            cer = total_distance / total_length
            return min(cer, 1.0)  # 确保不超过1.0
        else:
            return 0.0

    def test_model(self):
        '''
        测试模型在测试集上的性能
        '''
        # 确保模型已初始化
        if not hasattr(self, 'device'):
            self.init_session()
        # 设置为评估模式
        self.eval()
        # 加载测试数据
        self.wav_files = test_wav_files
        self.text_labels = test_text_labels
        print(f'\n==开始测试。测试集大小: {len(self.wav_files)}==\n')
        index = 0
        next_idx = 20

        total_loss = 0  # 测试损失
        total_cer = 0  # 测试字符错误率
        test_count = 1000   # 测试集大小

        # 循环测试
        for index in range(test_count):
            next_idx, audio_features, audio_features_len, sparse_labels, wav_files = asr_utils.next_batch(
                next_idx,
                1,
                n_input,
                n_context,
                self.text_labels,
                self.wav_files,
                self.word_num_map)

            print('-读入测试集: ', wav_files[0],end= '  ')

            # 转换为PyTorch张量
            audio_features_tensor = torch.FloatTensor(audio_features).to(self.device)
            audio_features_len_tensor = torch.IntTensor(audio_features_len).to(self.device)

            # 处理标签
            texts = []
            for i in range(len(sparse_labels[1])):  # values
                batch_idx = sparse_labels[0][i][0]  # batch index
                if batch_idx >= len(texts):
                    texts.extend([[] for _ in range(batch_idx - len(texts) + 1)])
                texts[batch_idx].append(sparse_labels[1][i])

            # 转换为张量列表
            texts_tensor = [torch.LongTensor(text).to(self.device) for text in texts]

            # 前向传播（在评估模式下）

            with torch.no_grad():  # 禁用梯度计算
                logits = self.forward(audio_features_tensor, audio_features_len_tensor)

                # 计算损失（仅用于评估，不用于更新参数）
                loss = self.compute_loss(logits, texts_tensor, audio_features_len_tensor)
                total_loss += loss.item()

            # 解码
            decoded = self.decode_ctc(logits, audio_features_len_tensor)

            # 计算字符错误率
            cer = self.calculate_edit_distance(decoded, sparse_labels)
            total_cer += cer
            print('-测试损失: {:.4f}, 字符错误率: {:.4f}'.format(loss.item(), cer))
            # 记录测试指标到swanlab
            try:
                swanlab.log({
                    "test/batch_loss": loss.item(),
                    "test/batch_error_rate": cer,
                })
            except Exception as e:
                print(e)
                pass  # 忽略swanlab记录错误
            if index < 1000:
                # 只显示前20条示例解码结果
                dense_labels = asr_utils.trans_tuple_to_texts_ch(sparse_labels, self.words)
                for orig in dense_labels[:1]:
                    if len(decoded) > 0:
                        decoded_str = asr_utils.trans_array_to_text_ch(decoded[0], self.words)
                        print('-测试集语音原始文本: {}'.format(orig), end='---')
                        print('-测试集识别出的文本:  {}\n'.format(decoded_str))

                    break
        # 输出平均性能指标
        print('-平均测试损失: {:.4f}, 平均字符错误率: {:.4f}'.format(
            total_loss / test_count, total_cer / test_count))
        # 记录测试指标到swanlab
        try:
            swanlab.log({
                "test/loss": total_loss / test_count,
                "test/error_rate": total_cer / test_count,
            })
        except Exception as e:
            print(e)
            pass  # 忽略swanlab记录错误
        # 恢复训练模式
        self.train()
        self.wav_files=train_wav_files
        self.text_labels=train_text_labels
        print(f'恢复训练模式。训练集大小: {len(self.wav_files)}')

        return total_loss / test_count  # 返回平均损失

    def test_target_wav_file(self, wav_files, txt_labels):
        '''
        测试单条目标语音文件
        :param wav_files: 语音文件路径列表
        :param txt_labels: 语音文件对应的文本标签列表
        :return:
        '''
        print('读入测试语音文件: ', wav_files[0])
        print('开始识别语音数据......')
        self.init_session()  # 初始化会话
        # 这部分需要根据utils.get_audio_mfcc_features的实际实现调整
        audio_features, audio_features_len, text_vector, text_vector_len = asr_utils.get_audio_mfcc_features(
            None,
            wav_files,
            n_input,
            n_context,
            self.word_num_map,
            txt_labels)

        # 转换为PyTorch张量
        audio_features_tensor = torch.FloatTensor(audio_features).to(self.device)
        audio_features_len_tensor = torch.IntTensor(audio_features_len).to(self.device)

        # 前向传播
        logits = self.forward(audio_features_tensor, audio_features_len_tensor)

        # 解码
        decoded = self.decode_ctc(logits, audio_features_len_tensor)

        # 将解码结果转换为文本
        if len(decoded) > 0:
            decoded_str = asr_utils.trans_array_to_text_ch(decoded[0], self.words)
            print('语音原始文本: {}'.format(txt_labels[0]))
            print('识别出来的文本:  {}'.format(decoded_str))
        else:
            print('语音原始文本: {}'.format(txt_labels[0]))
            print('识别出来的文本:  {}'.format("识别失败"))

    def build_train(self):
        self.train_model()

    def build_test(self):
        self.test_model()

    def build_target_wav_file_test(self, wav_files, txt_labels):
        self.test_target_wav_file(wav_files, txt_labels)


class GreedyCTCDecoder(torch.nn.Module):
    def __init__(self, labels, blank=0):
        super().__init__()
        self.labels = labels
        self.blank = blank

    def forward(self, emission: torch.Tensor) -> List[str]:
        """Given a sequence emission over labels, get the best path
        Args:
          emission (Tensor): Logit tensors. Shape `[num_seq, num_label]`.

        Returns:
          List[str]: The resulting transcript
        """
        # 确保emission是2D张量 [T, C]
        if emission.dim() == 3:
            emission = emission.squeeze(0)  # 如果是[T, 1, C]则squeeze成[T, C]

        indices = torch.argmax(emission, dim=-1)  # [T,]
        # 合并连续相同的非空白标签
        decoded_indices = []
        prev_index = -1
        for index in indices:
            if index != self.blank and index != prev_index:
                decoded_indices.append(index.item())
            prev_index = index

        # 转换为字符
        decoded_chars = []
        for idx in decoded_indices:
            if 0 <= idx < len(self.labels):
                decoded_chars.append(self.labels[idx])

        # 合并为字符串并分割（如果使用分隔符）
        result_text = "".join(decoded_chars)
        return result_text  # 返回字符串而不是列

