import os
import json
import math
import random
from pathlib import Path
import numpy as np
from tqdm import tqdm
import csv
import yaml
import argparse
import datetime
import matplotlib
matplotlib.use('Agg') # 使用 Agg 后端，避免 GUI 依赖
import matplotlib.pyplot as plt

import paddle
from paddle.io import Dataset, DataLoader
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.optimizer import AdamW
from paddle.optimizer.lr import LambdaDecay
from paddle.optimizer.lr import LRScheduler
from paddle.nn.functional import pairwise_distance, cosine_similarity

# Split ratio for training and validation sets, 90% for training, 10% for validation
train_split = 0.9

class myDataset(Dataset):
    def __init__(self, meta_dir, meta_file, data_dir, segment_len=128, is_train=True):
        self.is_train = is_train
        self.data_dir = data_dir
        self.segment_len = segment_len

        # Load mapping from speaker name to ID
        mapping_path = Path(meta_dir) / "mapping.json"
        mapping = json.load(mapping_path.open())
        self.speaker2id = mapping["speaker2id"]

        # Load metadata of training data
        metadata_path = Path(meta_dir) / meta_file
        metadata = json.load(open(metadata_path))["speakers"]

        # Get the total number of speakers
        self.speaker_num = len(metadata.keys())
        self.data = []
        for speaker in metadata.keys():
            if is_train:
                all_utterances = metadata[speaker][: int(len(metadata[speaker]) * train_split)]
            else:
                all_utterances = metadata[speaker][int(len(metadata[speaker]) * train_split) :]
            for utterances in all_utterances:
                self.data.append(
                    [
                        utterances["feature_path"].replace(".pt", ".npy"),
                        self.speaker2id[speaker],
                    ]
                )

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        feat_path, speaker = self.data[index]
        # Load preprocessed mel-spectrogram features
        mel = np.load(os.path.join(self.data_dir, feat_path), allow_pickle=True)

        # Segment mel-spectrogram into segment_len frames
        if len(mel) > self.segment_len:
            # Randomly select a starting point for the segment
            start = random.randint(0, len(mel) - self.segment_len)
            # Get a segment with segment_len frames
            mel = paddle.to_tensor(mel[start : start + self.segment_len])
        else:
            mel = paddle.to_tensor(mel)
        # Convert speaker ID to tensor for loss calculation
        speaker = paddle.to_tensor([speaker])
        return mel, speaker

    def get_speaker_number(self):
        return self.speaker_num

class myMetricLearningDataset(Dataset):
    def __init__(self, meta_dir, meta_file, data_dir, segment_len=128, is_train=True):
        self.is_train = is_train
        self.data_dir = data_dir
        self.segment_len = segment_len

        # Load mapping from speaker name to ID
        mapping_path = Path(meta_dir) / "mapping.json"
        mapping = json.load(mapping_path.open())
        self.speaker2id = mapping["speaker2id"]

        # Load metadata of training data
        metadata_path = Path(meta_dir) / meta_file
        metadata = json.load(open(metadata_path))["speakers"]

        # Get the total number of speakers
        self.speaker_num = len(metadata.keys())
        self.speaker2feat = {}
        for speaker in metadata.keys():
            self.speaker2feat[self.speaker2id[speaker]] = []
            if is_train:
                all_utterances = metadata[speaker][
                    : int(len(metadata[speaker]) * train_split)
                ]
            else:
                all_utterances = metadata[speaker][
                    int(len(metadata[speaker]) * train_split) :
                ]
            for utterances in all_utterances:
                self.speaker2feat[self.speaker2id[speaker]].append(
                    utterances["feature_path"].replace(".pt", ".npy")
                )

    def __len__(self):
        return self.speaker_num

    def __getitem__(self, index):
        anchor_speaker = index
        # Randomly choose a different speaker as negative sample
        negative_speaker = random.randint(0, self.speaker_num - 1)
        while negative_speaker == anchor_speaker:
            negative_speaker = random.randint(0, self.speaker_num - 1)

        # Randomly choose 2 different utterances from the anchor speaker
        anchor_utterances = random.sample(self.speaker2feat[anchor_speaker], 2)
        # Randomly choose 1 utterance from the negative speaker
        negative_utterance = random.choice(self.speaker2feat[negative_speaker])

        # Get mel-spectrogram features for all utterances
        anchor_mel = self.get_mel_by_feat(anchor_utterances[0])
        positive_mel = self.get_mel_by_feat(anchor_utterances[1])
        negative_mel = self.get_mel_by_feat(negative_utterance)

        # Return anchor, positive, negative features and their corresponding speaker IDs
        return (
            anchor_mel,
            positive_mel,
            negative_mel,
            paddle.to_tensor([anchor_speaker]),
            paddle.to_tensor([anchor_speaker]),
            paddle.to_tensor([negative_speaker]),
        )

    def get_speaker_number(self):
        return self.speaker_num

    def get_mel_by_feat(self, feat_path):
        # Load preprocessed mel-spectrogram features
        mel = np.load(os.path.join(self.data_dir, feat_path), allow_pickle=True)

        # Segment mel-spectrogram into segment_len frames
        if len(mel) > self.segment_len:
            # Randomly select a starting point for the segment
            start = random.randint(0, len(mel) - self.segment_len)
            # Get a segment with segment_len frames
            mel = paddle.to_tensor(mel[start : start + self.segment_len])
        else:
            mel = paddle.to_tensor(mel)

        return mel

def pad_sequence(sequences, padding_value=0):
    max_size = sequences[0].shape
    trailing_dims = max_size[1:]
    max_len = max([s.shape[0] for s in sequences])
    out_dims = (len(sequences), max_len) + tuple(trailing_dims)
    out_tensor = paddle.empty(out_dims, dtype="float32").fill_(padding_value)
    for i, tensor in enumerate(sequences):
        length = tensor.shape[0]
        # Use index notation to prevent duplicate references to the tensor
        out_tensor[i, :length, ...] = tensor
    return out_tensor

def collate_batch(batch):
    # Process features within a batch
    """Collate a batch of data"""
    mel, speaker = zip(*batch)
    # Since we train the model batch by batch, we need to pad the features in the same batch to make their lengths the same
    mel = pad_sequence(mel, padding_value=-20)
    # mel: (batch size, length, 40)
    return mel, paddle.to_tensor(speaker, dtype="int64")

def collate_metric_learning_batch(batch):
    # Process features within a batch
    """Collate a batch of data"""
    (
        anchor_mel,
        positive_mel,
        negative_mel,
        anchor_speaker,
        positive_speaker,
        negative_speaker,
    ) = zip(*batch)
    # Since we train the model batch by batch, we need to pad the features in the same batch to make their lengths the same
    anchor_mel = pad_sequence(anchor_mel, padding_value=-20)
    positive_mel = pad_sequence(positive_mel, padding_value=-20)
    negative_mel = pad_sequence(negative_mel, padding_value=-20)
    # mel: (batch size, length, 40)
    anchor_speaker = paddle.to_tensor(anchor_speaker, dtype="int64")
    positive_speaker = paddle.to_tensor(positive_speaker, dtype="int64")
    negative_speaker = paddle.to_tensor(negative_speaker, dtype="int64")
    return (
        anchor_mel,
        positive_mel,
        negative_mel,
        anchor_speaker,
        positive_speaker,
        negative_speaker,
    )

def get_dataloader(meta_dir, meta_file, data_dir, batch_size):
    """Generate dataloader"""
    trainset = myDataset(meta_dir, meta_file, data_dir)
    validset = myDataset(meta_dir, meta_file, data_dir, is_train=False)
    speaker_num = trainset.get_speaker_number()
    train_loader = DataLoader(
        trainset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=collate_batch,
    )
    valid_loader = DataLoader(validset, batch_size=batch_size, collate_fn=collate_batch)
    return train_loader, valid_loader, speaker_num

def get_metric_learning_dataloader(meta_dir, meta_file, data_dir, batch_size):
    """Generate dataloader for metric learning"""
    trainset = myMetricLearningDataset(meta_dir, meta_file, data_dir)
    # Use the same dataset for validation
    validset = myDataset(meta_dir, meta_file, data_dir, is_train=False)
    speaker_num = trainset.get_speaker_number()
    train_loader = DataLoader(
        trainset,
        shuffle=True,
        batch_size=batch_size,
        collate_fn=collate_metric_learning_batch,
    )
    valid_loader = DataLoader(validset, batch_size=batch_size, collate_fn=collate_batch)
    return train_loader, valid_loader, speaker_num

# 定义位置编码类 
class PositionalEncoding(nn.Layer):
    """实现 PE (Positional Encoding) 函数"""

    def __init__(self, d_model, max_len=7000):
        """
        初始化位置编码层。
        Args:
            d_model (int): 模型的特征维度 (embedding dim)。
            dropout (float): Dropout 的概率。
            max_len (int): 预先计算的最大序列长度。
        """
        super(PositionalEncoding, self).__init__()
        # self.dropout = nn.Dropout(p=dropout) # Dropout 层

        # 在对数空间中一次性计算位置编码
        pe = paddle.zeros([max_len, d_model]) # 初始化一个形状为 [max_len, d_model] 的零张量
        # 创建一个表示位置的张量，形状为 [max_len, 1]
        position = paddle.arange(0, max_len, dtype=paddle.float32).unsqueeze(1)
        # 计算用于缩放位置的除数项 (div_term)
        # div_term 形状: [d_model / 2]
        div_term = paddle.exp(
            paddle.arange(0, d_model, 2, dtype=paddle.float32) * -(math.log(10000.0) / d_model)
        )
        # 使用正弦函数计算偶数索引的位置编码
        pe[:, 0::2] = paddle.sin(position * div_term)
        # 使用余弦函数计算奇数索引的位置编码
        pe[:, 1::2] = paddle.cos(position * div_term)
        # pe 最终形状: [max_len, d_model]

        # 将 pe 注册为 buffer。buffer 是模型状态的一部分
        self.register_buffer("pe", pe, persistable=False)
        # self.d_model = d_model # 存储 d_model

    def forward(self, x):
        """
        向前传播函数。
        Args:
            x: 输入张量, 形状 [batch_size, seq_len, d_model]
        Returns:
            加上了位置编码并应用了 dropout 的张量, 形状 [batch_size, seq_len, d_model]
        """
        # self.pe 的形状是 [max_len, d_model]
        # 需要选择与输入 x 的序列长度相对应的部分
        seq_len = x.shape[1] # 获取输入的实际序列长度
        # 从预计算的 pe 中取出对应长度的部分，形状为 [seq_len, d_model]
        # 确保 pe 的切片不会超出预计算的 max_len 范围
        pe_slice = self.pe[:seq_len, :]

        # 将位置编码加到输入张量 x 上
        # pe_slice 形状: [seq_len, d_model]
        # 需要增加一个批次维度以进行广播: [1, seq_len, d_model]
        # x 形状: [batch_size, seq_len, d_model]
        x = x + pe_slice.unsqueeze(0) # 使用 unsqueeze(0) 增加批次维度并广播相加
        
        # 应用 dropout
        return x

# TODO 2: Adjust transformer parameters
class Classifier(nn.Layer):
    def __init__(self, d_model=80, nhead=2, num_layers=1, dropout=0.1, n_spks=600, use_positional_encoding=False):
        super().__init__()
        # Project the input feature dimension from 40 to d_model
        self.prenet = nn.Linear(40, d_model)
        
        if use_positional_encoding:
            self.pos_encoder = PositionalEncoding(d_model)
        else:
            self.pos_encoder = None

        # Create transformer encoder layer
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model, nhead=nhead, dim_feedforward=512, dropout=dropout
        )
        self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
        
        # Project feature dimension from d_model to number of speakers
        self.pred_layer = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.ReLU(),
            nn.Linear(d_model, n_spks),
        )

    def forward(self, mels):
        """
        Args:
          mels: (batch size, length, 40)
        Returns:
          out: (batch size, n_spks)
        """
        # out: (batch size, length, d_model)
        out = self.prenet(mels)
        if self.pos_encoder is not None:
            out = self.pos_encoder(out)
        # Pass through encoder
        out = self.encoder(out)
        # Mean pooling
        stats = out.mean(axis=1)
        # Output: (batch, n_spks)
        out = self.pred_layer(stats)
        return out
    
# TODO 4: Implement Conformer model
class ConformerBlock(nn.Layer):
    """Conformer block, including attention, convolution and feedforward modules"""
    def __init__(self, d_model, nhead, d_ffn, kernel_size=32, dropout=0.1, use_positional_encoding=False):
        super().__init__()
        # Feed Forward module 1
        self.ff1 = nn.Sequential(
            nn.LayerNorm(d_model),
            nn.Linear(d_model, d_ffn),
            nn.SELU(),
            nn.Dropout(dropout),
            nn.Linear(d_ffn, d_model),
            nn.Dropout(dropout)
        )
        
        if use_positional_encoding:
            self.pos_encoder = PositionalEncoding(d_model)
        else:
            self.pos_encoder = None
        
        # Multi-head self-attention module
        self.mha_norm = nn.LayerNorm([d_model])
        
        self.mha = nn.MultiHeadAttention(embed_dim= d_model, num_heads=nhead, dropout=dropout)
        self.mha_dropout = nn.Dropout(dropout)
        
        # Convolution module
        self.conv_norm = nn.LayerNorm([d_model])
        self.conv = nn.Sequential(
            nn.Conv1D(d_model, d_model, 1),
            nn.GLU(axis=1),  # Halves the channel dimension
            nn.Conv1D(d_model//2, d_model//2, kernel_size, padding=(kernel_size-1)//2, groups=d_model//2),  # Depthwise
            nn.BatchNorm1D(d_model//2),
            nn.SELU(),
            nn.Conv1D(d_model//2, d_model, 1),  # Pointwise
            nn.Dropout(dropout)
        )
        
        # Feed Forward module 2
        self.ff2 = nn.Sequential(
            nn.LayerNorm(d_model),
            nn.Linear(d_model, d_ffn),
            nn.SELU(),
            nn.Dropout(dropout),
            nn.Linear(d_ffn, d_model),
            nn.Dropout(dropout)
        )
        
        self.norm = nn.LayerNorm(d_model)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, sequence_length, d_model]
        """
        # FFN module 1: scaled by 1/2
        x = x + 0.5 * self.ff1(x)
        
        # MHA module
        residual = x
        x = self.mha_norm(x)
        # encoder input: [batch_size, sequence_length, d_model]
        if self.pos_encoder is not None:
            x = self.pos_encoder(x)
        x = self.mha(x)
        x = self.mha_dropout(x)
        x = residual + x
        # Convolution module: transpose to match convolution input requirements
        residual = x
        x = self.conv_norm(x)
        x = x.transpose([0, 2, 1])  # [batch_size, d_model, length]
        # print(f"line 405 block shape::{x.shape}")
        x = self.conv(x)
        # print(f"line 407 block shape::{x.shape}")
        x = x.transpose([0, 2, 1])  # [batch_size, sequence_length, d_model]
        x = residual + x
        # FFN module 2: scaled by 1/2
        x = x + 0.5 * self.ff2(x)
        
        # Final Layer Norm
        x = self.norm(x)
        return x

class ClassifierConformer(nn.Layer):
    def __init__(self, d_model=80, nhead=2, num_layers=1, dropout=0.1, n_spks=600, \
                 use_positional_encoding=False, d_ffn=256, kernel_size=31):
        super().__init__()
        # Feature projection
        self.prenet = nn.Linear(40, d_model)
        
        # Conformer blocks
        self.conformer_blocks = nn.LayerList([
            ConformerBlock(d_model, nhead, d_ffn, kernel_size, dropout, use_positional_encoding)
            for _ in range(num_layers)
        ])
        
        # Output prediction layer
        self.pred_layer = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.ReLU(),
            nn.Linear(d_model, n_spks),
        )
    
    def forward(self, mels):
        """
        Args:
            mels: [batch_size, length, 40]
        Returns:
            logits: [batch_size, n_spks]
        """
        # Project to d_model dimension
        out = self.prenet(mels)
        
        # Pass through Conformer blocks
        for block in self.conformer_blocks:
            out = block(out)
        # out [batch_size, sequence_length, d_model]
        # Mean pooling
        stats = out.mean(axis=1)
        
        # Output prediction
        out = self.pred_layer(stats)
        return out

def get_cosine_schedule_with_warmup(
    learning_rate,
    num_warmup_steps: int,
    num_training_steps: int,
    num_cycles: float = 0.5,
    last_epoch: int = -1,
):
    """
    Create a learning rate scheduler that increases linearly during warmup and then decreases following cosine curve
    """
    def lr_lambda(current_step):
        # Warmup
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        # Decay
        progress = float(current_step - num_warmup_steps) / float(
            max(1, num_training_steps - num_warmup_steps)
        )
        return max(
            0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
        )

    return LambdaDecay(learning_rate, lr_lambda, last_epoch)

def model_fn(batch, model, criterion):
    """Forward a batch through the model"""
    mels, labels = batch
    labels = paddle.squeeze(labels)
    
    outs = model(mels)
    loss = criterion(outs, labels)
    
    return loss

# TODO 3: Implement triplet loss
def model_fn_metric_learning(batch, model, distance_metric, margin=0.2):
    """Forward a batch through the model using triplet loss"""
    anchor_mel, positive_mel, negative_mel, anchor_speaker, positive_speaker, negative_speaker = batch
    anchor_speaker = paddle.squeeze(anchor_speaker)
    positive_speaker = paddle.squeeze(positive_speaker)
    negative_speaker = paddle.squeeze(negative_speaker)

    # Forward pass
    anchor_outs = model(anchor_mel)
    positive_outs = model(positive_mel)
    negative_outs = model(negative_mel)
    
    # Calculate distances between positive and negative pairs
    pos_dist = distance_metric(anchor_outs, positive_outs)
    neg_dist = distance_metric(anchor_outs, negative_outs)
    
    # Implement triplet loss: d(anchor, positive) - d(anchor, negative) + margin
    # We want positive pairs to be close and negative pairs to be far apart
    loss = paddle.mean(paddle.clip(pos_dist - neg_dist + margin, min=0.0))
    
    return loss

def valid(valid_loader, model):
    """Validate on validation set"""
    model.eval()
    running_accuracy = 0.0
    pbar = tqdm(total=len(valid_loader.dataset), ncols=0, desc="Valid", unit=" uttr")

    for i, batch in enumerate(valid_loader):
        with paddle.no_grad():
            mels, labels = batch
            labels = paddle.squeeze(labels)
            outs = model(mels)
            preds = outs.argmax(1)
            accuracy = paddle.mean(paddle.cast((preds == labels), dtype="float32"))
            running_accuracy += accuracy.item()

        pbar.update(valid_loader.batch_size)
        pbar.set_postfix(accuracy=f"{running_accuracy / (i+1):.2f}")

    pbar.close()
    model.train()
    return running_accuracy / len(valid_loader)

def valid_metric_learning(train_loader, valid_loader, model):
    """Validate on validation set using nearest neighbor classification"""
    model.eval()
    running_accuracy = 0.0

    # Generate embeddings for the entire training set
    train_embeddings = []
    train_labels = []
    pbar = tqdm(total=len(train_loader.dataset), ncols=0, desc="Valid", unit=" uttr")
    for i, batch in enumerate(train_loader):
        with paddle.no_grad():
            mels, labels = batch
            labels = paddle.squeeze(labels)
            outs = model(mels)
            train_embeddings.append(outs)
            train_labels.append(labels)
        pbar.update(train_loader.batch_size)
    pbar.close()
    train_embeddings = paddle.concat(train_embeddings, axis=0)
    train_labels = paddle.concat(train_labels, axis=0)

    # Find the nearest neighbor for each embedding in the validation set
    pbar = tqdm(total=len(valid_loader.dataset), ncols=0, desc="Valid", unit=" uttr")
    for i, batch in enumerate(valid_loader):
        with paddle.no_grad():
            mels, labels = batch
            labels = paddle.squeeze(labels)
            outs = model(mels)
            # Compute distances between embeddings and training set
            # distances: (batch size, train set size)
            distances = paddle.cdist(outs, train_embeddings)
            # Find the nearest neighbor
            nearest_idx = paddle.argmin(distances, axis=1)
            # Get the labels of the nearest neighbor as predictions
            preds = train_labels[nearest_idx]
            accuracy = paddle.mean(paddle.cast((preds == labels), dtype="float32")) 
            running_accuracy += accuracy.item()

        pbar.update(valid_loader.batch_size)
        pbar.set_postfix(accuracy=f"{running_accuracy / (i+1):.2f}")

    pbar.close()
    model.train()
    return running_accuracy / len(valid_loader)

class InferenceDataset(Dataset):
    def __init__(self, meta_file, data_dir, segment_len=128):
        testdata_path = Path(meta_file)
        metadata = json.load(testdata_path.open())
        self.data_dir = data_dir
        self.data = metadata["utterances"]
        self.segment_len = segment_len

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        utterance = self.data[index]
        feat_path = utterance["feature_path"].replace(".pt", ".npy")
        mel = np.load(os.path.join(self.data_dir, feat_path), allow_pickle=True)
        # if len(mel) > self.segment_len:
        #     # Randomly select a starting point for the segment
        #     start = random.randint(0, len(mel) - self.segment_len)
        #     # Get a segment with segment_len frames
        #     mel = paddle.to_tensor(mel[start : start + self.segment_len])
        # else:   
        mel = paddle.to_tensor(mel)
        return feat_path, mel

def inference_collate_batch(batch):
    """Collate a batch of data"""
    feat_paths, mels = zip(*batch)
    return feat_paths, paddle.stack(mels)

def train_model(config, config_path=None, resume=False):
    """训练模型，支持从检查点恢复。"""
    print(f"[Train Info]: 使用配置: {config_path if config_path else '未提供路径'}, resume={resume}")
    
    # 参数设置
    meta_dir = config["meta_dir"]
    meta_file = config["meta_file"]
    data_dir = config["data_dir"]
    save_path = config["save_path"]
    ckpt_path = config.get("ckpt_path", save_path.replace('.ckpt', '_ckpt.pdparams')) # Checkpoint for optimizer etc.
    best_model_path = save_path # Path for the best model state dict
    batch_size = config["batch_size"]
    valid_steps = config["valid_steps"]
    warmup_steps = config["warmup_steps"]
    save_steps = config["save_steps"]
    total_steps = config["total_steps"]
    do_metric_learning = config.get("do_metric_learning", False)
    model_type = config.get("model_type", "transformer")
    d_model = config.get("d_model", 80)
    nhead = config.get("nhead", 2)
    nlayers = config.get("nlayers", 1)
    dropout = config.get("dropout", 0.1)
    learning_rate = config.get("learning_rate", 1e-3) # Use learning_rate from config
    use_positional_encoding = config.get("use_positional_encoding", False)
    # 设置设备
    try:
        paddle.device.set_device('gpu:0' if paddle.device.cuda.device_count() != 0 else 'cpu')
    except:
        print("GPU not available, using CPU for training")
        paddle.device.set_device('cpu')

    # 获取数据加载器
    train_loader, valid_loader, speaker_num = get_dataloader(
        meta_dir, meta_file, data_dir, batch_size
    )
    # Metric learning loader (only initialized if needed)
    train_loader_metric_learning = None
    if do_metric_learning:
        train_loader_metric_learning, _, _ = get_metric_learning_dataloader(
            meta_dir, meta_file, data_dir, batch_size
        )
        train_iterator = iter(train_loader_metric_learning)
    else:
        train_iterator = iter(train_loader)
    print("[Info]: 数据加载完成!", flush=True)

    # 创建模型
    if model_type == "transformer":
        model = Classifier(d_model=d_model, nhead=nhead, num_layers=nlayers, dropout=dropout, n_spks=speaker_num, use_positional_encoding=use_positional_encoding) 
    elif model_type == "conformer":
        # conformer parameters
        ffn_dim = config.get("ffn_dim", 256)
        conv_kernel_size = config.get("conv_kernel_size", 31)
        model = ClassifierConformer(d_model=d_model, nhead=nhead, num_layers=nlayers, dropout=dropout, n_spks=speaker_num, \
                                    use_positional_encoding=use_positional_encoding, d_ffn=ffn_dim, kernel_size=conv_kernel_size)
    else:
        raise ValueError(f"未知的模型类型: {model_type}")

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    distance_metric = nn.PairwiseDistance(p=2)
    scheduler = get_cosine_schedule_with_warmup(learning_rate, warmup_steps, total_steps) # Use configured LR
    optimizer = AdamW(parameters=model.parameters(), learning_rate=scheduler)
    print("[Info]: 模型和优化器创建完成!", flush=True)
    
    start_step = 0
    best_accuracy = -1.0
    best_state_dict = None
    
    # --- 恢复训练逻辑 ---
    if resume:
        if os.path.exists(ckpt_path):
            print(f"[Info]: 发现检查点文件，尝试从 '{ckpt_path}' 恢复训练...")
            try:
                checkpoint = paddle.load(ckpt_path)
                model.set_state_dict(checkpoint['model_state_dict'])
                optimizer.set_state_dict(checkpoint['optimizer_state_dict'])
                scheduler.set_state_dict(checkpoint['scheduler_state_dict']) # 恢复 scheduler 状态
                start_step = checkpoint['step'] + 1 # 从下一个 step 开始
                best_accuracy = checkpoint.get('best_accuracy', -1.0) # 获取之前的最佳准确率
                print(f"[Info]: 成功从 step {start_step - 1} 恢复训练，最佳准确率: {best_accuracy:.4f}")
            except Exception as e:
                print(f"[Warning]: 加载检查点 '{ckpt_path}' 失败: {e}. 将开始新的训练。")
                start_step = 0
                best_accuracy = -1.0
        else:
            print(f"[Warning]: 未找到检查点文件 '{ckpt_path}'。无法恢复，将开始新的训练。")
            start_step = 0
            best_accuracy = -1.0
    # --- 恢复训练逻辑结束 ---

    # 初始化指标记录列表
    valid_accuracies = []
    valid_losses = []
    validation_steps = []

    # 如果是恢复训练，需要跳过已经完成的步骤
    if start_step > 0:
        print(f"[Info]: 快进到 step {start_step}...")
        # 注意：需要相应地推进数据迭代器和 scheduler
        # 推进 scheduler
        # scheduler 的 step 方法应在 optimizer.step() 后调用，
        # 但我们需要将其状态设置为 start_step。
        # LambdaDecay 的 set_state_dict 似乎不足以完全恢复，
        # 我们需要手动设置其 last_epoch 计数器。
        # Hacky way: 手动调用 scheduler.step() start_step 次
        # 更健壮的方式是修改 LambdaDecay 或直接用 Paddle 内置的可恢复的 scheduler
        # scheduler.last_epoch = start_step -1 # 尝试设置内部状态
        # 简单的处理：scheduler 状态已加载，它应该知道当前的 step。
        
        # 推进数据迭代器（这比较困难且可能不精确）
        # 简单忽略，从头开始迭代数据，依赖 scheduler 控制学习率
        print(f"[Info]: 数据迭代器将从头开始。学习率已根据 step {start_step} 设置。")
        # 如果需要精确恢复数据状态，需要更复杂的 dataloader 保存/加载机制

    pbar = tqdm(total=total_steps, initial=start_step, ncols=0, desc="Train", unit=" step")

    for step in range(start_step, total_steps):
        # ... (获取数据 batch 的逻辑) ...
        try:
            batch = next(train_iterator)
        except StopIteration:
            if do_metric_learning:
                train_iterator = iter(train_loader_metric_learning)
            else:
                train_iterator = iter(train_loader)
            batch = next(train_iterator)
            
        model.train() # 确保在训练模式
        # 计算损失
        if do_metric_learning:
            loss = model_fn_metric_learning(batch, model, distance_metric)
        else:
            loss = model_fn(batch, model, criterion)
        batch_loss = loss.item()

        # 更新模型
        optimizer.clear_grad()
        loss.backward()
        optimizer.step()
        scheduler.step() # 更新学习率
        
        # Log (更新 pbar)
        pbar.update(1)
        pbar.set_postfix(
            loss=f"{batch_loss:.4f}", # 显示更多小数位
            lr=f"{optimizer.get_lr():.1e}", # 显示当前学习率
            best_acc=f"{best_accuracy:.4f}" # 显示当前最佳验证精度
        )

        # 执行验证
        if (step + 1) % valid_steps == 0 or step == total_steps - 1: # 在最后一步也验证
            # pbar.close() # 不关闭 pbar，用 pbar.write 输出
            current_lr = optimizer.get_lr()
            pbar.write(f"\nStep {step + 1}/{total_steps}, LR: {current_lr:.2e} - Running Validation...")

            if do_metric_learning:
                 # 注意: valid_metric_learning 可能需要 train_loader 来构建参考嵌入，确保它使用原始的 train_loader
                valid_accuracy = valid_metric_learning(train_loader, valid_loader, model)
            else:
                valid_accuracy = valid(valid_loader, model)
            
            # 记录验证指标
            valid_accuracies.append(valid_accuracy)
            valid_losses.append(batch_loss) # 记录验证前的 batch loss
            validation_steps.append(step + 1)
            
            pbar.write(f"Step {step + 1}: Validation Accuracy: {valid_accuracy:.4f}, Batch Loss: {batch_loss:.4f}")

            # 保存最佳模型
            if valid_accuracy > best_accuracy:
                pbar.write(f"[!] New best accuracy: {valid_accuracy:.4f} (previous: {best_accuracy:.4f}). Saving best model to '{best_model_path}'")
                best_accuracy = valid_accuracy
                best_state_dict = model.state_dict()
                # 确保保存目录存在
                save_dir = os.path.dirname(best_model_path)
                if save_dir and not os.path.exists(save_dir):
                    os.makedirs(save_dir, exist_ok=True)
                paddle.save(best_state_dict, best_model_path)
            
            # pbar = tqdm(total=valid_steps, ncols=0, desc="Train", unit=" step") # 不需要重新创建 pbar

        # 定期保存检查点 (包含模型、优化器、scheduler状态)
        if (step + 1) % save_steps == 0 or step == total_steps - 1: # 在最后一步也保存检查点
            if best_state_dict is not None: # 确保至少验证过一次
                checkpoint_data = {
                    'step': step,
                    'model_state_dict': model.state_dict(), # 保存当前模型状态，而非最佳
                    'optimizer_state_dict': optimizer.state_dict(),
                    'scheduler_state_dict': scheduler.state_dict(),
                    'best_accuracy': best_accuracy,
                    'config': config # 保存配置信息
                }
                # 确保检查点目录存在
                ckpt_dir = os.path.dirname(ckpt_path)
                if ckpt_dir and not os.path.exists(ckpt_dir):
                    os.makedirs(ckpt_dir, exist_ok=True)
                paddle.save(checkpoint_data, ckpt_path)
                pbar.write(f"[i] Checkpoint saved to '{ckpt_path}' at step {step + 1}")

    pbar.close()
    
    # 训练结束
    print("--- Training Finished ---")
    print(f"Final best validation accuracy: {best_accuracy:.4f}")
    
    # --- 恢复绘制验证指标图 --- 
    save_dir = os.path.dirname(best_model_path) 
    if not save_dir: save_dir = '.'
    elif not os.path.exists(save_dir): # 确保目录存在
         os.makedirs(save_dir, exist_ok=True)
         
    model_name_base = Path(best_model_path).stem # 获取不含扩展名的文件名
    
    if validation_steps: # 检查是否执行了验证步骤
        # 绘制 Accuracy 曲线
        plt.figure(figsize=(10, 5))
        plt.plot(validation_steps, valid_accuracies, marker='o', linestyle='-', label='Validation Accuracy')
        plt.title(f'Validation Accuracy over Training Steps ({model_name_base})')
        plt.xlabel('Training Step')
        plt.ylabel('Accuracy')
        plt.grid(True)
        plt.legend()
        acc_plot_path = os.path.join(save_dir, f'{model_name_base}_accuracy.png')
        try:
            plt.savefig(acc_plot_path)
            print(f"[Info]: 验证集准确率曲线图已保存至 {acc_plot_path}")
        except Exception as e:
            print(f"[Error]: 保存准确率曲线图失败: {e}")
        plt.close() # 关闭图形以释放内存

        # 绘制 Loss 曲线 (批次损失)
        plt.figure(figsize=(10, 5))
        plt.plot(validation_steps, valid_losses, marker='x', linestyle='--', color='r', label='Batch Loss (at Validation)')
        plt.title(f'Batch Loss at Validation Steps ({model_name_base})')
        plt.xlabel('Training Step')
        plt.ylabel('Loss')
        plt.grid(True)
        plt.legend()
        loss_plot_path = os.path.join(save_dir, f'{model_name_base}_loss.png')
        try:
            plt.savefig(loss_plot_path)
            print(f"[Info]: 验证时批次损失曲线图已保存至 {loss_plot_path}")
        except Exception as e:
            print(f"[Error]: 保存损失曲线图失败: {e}")
        plt.close() # 关闭图形以释放内存
    else:
        print("[Info]: 未执行验证步骤，跳过绘制曲线图。")
    # --- 绘图代码结束 ---

    return best_accuracy

def test_model(config, config_path=None, is_private=False):
    """测试模型功能，加载在 config["save_path"] 指定的最佳模型。"""
    print(f"[Test Info]: 使用配置: {config_path if config_path else '未提供路径'}, is_private={is_private}")
    # ... (函数其余部分不变，它已经使用了 config["save_path"] 来加载模型)
    # Parameter settings
    meta_dir = config["meta_dir"]
    if is_private:
        meta_file = config["private_meta_file"]
        output_path = config["private_output_path"]
        print(f"[Test Info]: 正在测试私有集: {meta_file}")
    else:
        # Use the main metadata file for training set access
        meta_file_train = config["meta_file"] 
        meta_file = config["public_meta_file"]
        output_path = config["public_output_path"]
        print(f"[Test Info]: 正在测试公共集: {meta_file}")
    
    data_dir = config["data_dir"]
    model_path = config["save_path"] # 加载最佳模型
    batch_size = config["batch_size"]
    do_metric_learning = config.get("do_metric_learning", False)
    model_type = config.get("model_type", "transformer")
    d_model = config.get("d_model", 80)
    nhead = config.get("nhead", 4)
    nlayers = config.get("nlayers", 1)
    dropout = config.get("dropout", 0.1)
    use_positional_encoding = config.get("use_positional_encoding", False)
    # 检查模型文件是否存在
    if not os.path.exists(model_path):
        print(f"[Error]: 模型文件未找到: {model_path}")
        print("请先运行训练模式 (--train 或 --all)。")
        raise FileNotFoundError(f"模型文件未找到: {model_path}")
        
    # Load mapping file
    mapping_path = Path(meta_dir) / "mapping.json"
    mapping = json.load(mapping_path.open())

    # Create test dataloader
    try:
        test_dataset = InferenceDataset(meta_file, data_dir)
    except FileNotFoundError:
         print(f"[Error]: 测试元数据文件未找到: {meta_file}")
         raise
         
    test_loader = DataLoader(
        test_dataset,
        batch_size=1, # 推理时通常 batch_size=1
        shuffle=False,
        drop_last=False,
        # num_workers=8, # 在某些系统上可能导致问题，先注释掉
        collate_fn=inference_collate_batch,
    )
    print("[Info]: 测试数据加载完成!", flush=True)

    # Create and load model
    speaker_num = len(mapping["id2speaker"])
    if model_type == "transformer":
        model = Classifier(d_model=d_model, nhead=nhead, num_layers=nlayers, n_spks=speaker_num, dropout=dropout, use_positional_encoding=use_positional_encoding)
    elif model_type == "conformer":
                 # conformer parameters
        ffn_dim = config.get("ffn_dim", 256)
        conv_kernel_size = config.get("conv_kernel_size", 31)
        model = ClassifierConformer(d_model=d_model, nhead=nhead, num_layers=nlayers, n_spks=speaker_num, dropout=dropout, \
                                    use_positional_encoding=use_positional_encoding, d_ffn=ffn_dim, kernel_size=conv_kernel_size)
    else:
        raise ValueError(f"未知的模型类型: {model_type}")
    
    print(f"[Info]: 正在从 '{model_path}' 加载模型状态...")
    model.set_state_dict(paddle.load(model_path))
    model.eval()
    print("[Info]: 模型创建和加载完成!", flush=True)
    # 打印模型 pos_encoder 参数
    # paddle.summary(model, (1, 120, 40))
    if use_positional_encoding:
        # 如果是Transformer模型
        if model_type == "transformer" and hasattr(model, "pos_encoder"):
            print("[Info]: 正在绘制Transformer模型的位置编码...")
            plot_position_encoding(model.pos_encoder, f"pos_encoding_transformer.png")
        
        # 如果是Conformer模型
        elif model_type == "conformer":
            for i, block in enumerate(model.conformer_blocks):
                if hasattr(block, "pos_encoder") and block.pos_encoder is not None:
                    print(f"[Info]: 正在绘制Conformer模型第{i+1}层的位置编码...")
                    plot_position_encoding(block.pos_encoder, f"pos_encoding_conformer_block_{i+1}.png")
        

    # 度量学习的嵌入准备逻辑
    train_embeddings = None
    train_labels = None
    if do_metric_learning:
        print("[Info][Metric Learning]: 开始准备训练集嵌入向量...")
        # 使用原始的训练集数据 (注意: meta_file_train 需要在上面定义好)
        train_dataset = myDataset(meta_dir, meta_file_train, data_dir, is_train=True) # 加载训练集
        # 创建一个用于生成嵌入的 dataloader
        train_embed_loader = DataLoader(
            train_dataset,
            batch_size=batch_size * 2, # 可以适当增大 batch_size 加速处理
            shuffle=False, # 不需要打乱
            collate_fn=collate_batch, # 使用普通批处理函数
        )
        
        train_embeddings_list = []
        train_labels_list = []
        pbar_embed = tqdm(total=len(train_embed_loader.dataset), ncols=0, desc="Generating Train Embeddings", unit=" uttr")
        for batch in train_embed_loader:
            with paddle.no_grad():
                mels, labels = batch
                labels = paddle.squeeze(labels)
                outs = model(mels) # 获取嵌入向量
                train_embeddings_list.append(outs)
                train_labels_list.append(labels)
            pbar_embed.update(len(mels)) # 更新进度条
        pbar_embed.close()
        
        # 合并所有批次的嵌入和标签
        train_embeddings = paddle.concat(train_embeddings_list, axis=0)
        train_labels = paddle.concat(train_labels_list, axis=0)
        print(f"[Info][Metric Learning]: 训练集嵌入向量准备完毕 ({len(train_embeddings)} samples)。")
        

    # Make predictions and save results
    results = [["Id", "Category"]]
    print("[Info]: 开始推理...")
    pbar_test = tqdm(total=len(test_loader), ncols=0, desc="Testing", unit=" uttr")
    for feat_paths, mels in test_loader:
        with paddle.no_grad():
            outs = model(mels) # 获取测试样本的嵌入向量
            if do_metric_learning:
                # 计算测试样本嵌入与训练集嵌入的距离
                # distances: (1, train_set_size) 因为 test_loader batch_size 为 1
                distances = paddle.cdist(outs, train_embeddings) 
                # 找到最近邻的索引
                nearest_idx = paddle.argmin(distances, axis=1)
                # 获取最近邻的标签作为预测结果
                preds = train_labels[nearest_idx].cpu().numpy()
            else:
                preds = outs.argmax(1).cpu().numpy()
            for feat_path, pred in zip(feat_paths, preds):
                # feat_path 已经是 xxx.npy, 不需要替换 .pt
                results.append([feat_path, mapping["id2speaker"].get(str(pred), "UnknownSpeaker")]) # 使用 .get 以防万一
        pbar_test.update(1)
    pbar_test.close()

    # Ensure output directory exists
    output_dir = os.path.dirname(output_path)
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir, exist_ok=True)
        
    with open(output_path, "w", newline="") as csvfile:
        writer = csv.writer(csvfile)
        writer.writerows(results)
    print(f"[Info]: 推理完成，结果已保存至: {output_path}")
    
    return output_path # 返回保存路径

def evaluate_public_test(config, config_path=None):
    """评估公共测试集上的结果。需要 config["public_output_path"] 对应的CSV文件。"""
    print(f"[Eval Info]: 使用配置: {config_path if config_path else '未提供路径'}")
    output_path = config["public_output_path"]
    meta_dir = config["meta_dir"]
    public_meta_file = config["public_meta_file"]
    
    # 检查必要的输入文件是否存在
    if not os.path.exists(output_path):
        print(f"[Error]: 预测结果文件未找到: {output_path}")
        print("请先运行测试模式 (--test 或 --all)。")
        raise FileNotFoundError(f"预测结果文件未找到: {output_path}")
    if not os.path.exists(public_meta_file):
        print(f"[Error]: 公共测试集元数据文件未找到: {public_meta_file}")
        raise FileNotFoundError(f"公共测试集元数据文件未找到: {public_meta_file}")
        
    num_correct = 0
    num_wrong = 0
    total_samples = 0

    print(f"[Info]: 正在从 '{public_meta_file}' 加载答案...")
    with open(public_meta_file) as file:
        answer = json.load(file)
    query_map = {}
    for meta in answer["utterances"]:
        # 使用 .npy 后缀作为 key，因为 test_model 保存的是 .npy
        query_map[meta["feature_path"].replace(".pt", ".npy")] = meta["id"]

    print(f"[Info]: 正在从 '{output_path}' 读取预测结果并评估...")
    with open(output_path) as f:
        reader = csv.reader(f)
        header = next(reader) # 跳过表头
        if header != ['Id', 'Category']:
            print(f"[Warning]: CSV文件 '{output_path}' 的表头格式不符合预期 ('Id,Category')")
            # 可以在这里选择停止或继续，假设格式仍然是两列
        
        for row in reader:
            if len(row) != 2:
                print(f"[Warning]: 跳过格式错误的行: {row}")
                continue
            id_npy, category = row
            # id = id.replace(".npy", ".pt") # 不再需要，因为 query_map 使用 .npy
            # category = category.replace("\n", "") # csv reader 自动处理换行
            total_samples += 1
            if id_npy in query_map:
                if query_map[id_npy] == category:
                    num_correct += 1
                else:
                    num_wrong += 1
                    # 可以选择打印错误的预测以供调试
                    # print(f"  [Wrong] ID: {id_npy}, Predicted: {category}, Actual: {query_map[id_npy]}")
            else:
                print(f"[Warning]: 在答案文件中未找到预测ID: {id_npy}")
                num_wrong += 1 # 无法匹配的也算错误

    if total_samples == 0:
        print("[Error]: 预测文件为空或格式错误，无法计算准确率。")
        return 0.0

    accuracy = num_correct / float(total_samples) # 使用 total_samples 作为分母
    print(
        f"公共测试集准确率: {accuracy:.4f} ({num_correct}/{total_samples})"
    )
    return accuracy

def generate_report(results_list, output_file):
    """Generate Markdown report from results list with enhanced analytics."""
    if not results_list:
        print("[Warning]: 实验结果列表为空，无法生成报告。")
        # 可以选择生成一个空的报告或者直接返回
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write("# 说话人分类实验结果报告\n\n无实验结果可报告。")
        return

    # 1. Basic header and introduction
    markdown_string = "# 说话人分类实验结果报告\n\n"
    markdown_string += "## 实验概述\n\n"
    
    # Count experiments by type
    total_exps = len(results_list)
    transformer_exps = sum(1 for r in results_list if r.get('model_type') == 'transformer')
    conformer_exps = sum(1 for r in results_list if r.get('model_type') == 'conformer')
    triplet_exps = sum(1 for r in results_list if r.get('do_metric_learning') == True)
    # Count experiments with validation accuracy available
    val_acc_available_exps = sum(1 for r in results_list if 'validation_accuracy' in r)

    markdown_string += f"- 总实验次数: **{total_exps}**\n"
    markdown_string += f"- Transformer模型实验: **{transformer_exps}**\n"
    markdown_string += f"- Conformer模型实验: **{conformer_exps}**\n"
    markdown_string += f"- 使用Triplet Loss的实验: **{triplet_exps}**\n"
    markdown_string += f"- 包含验证集准确率的实验: **{val_acc_available_exps}**\n\n"
    
    # 2. Full results table
    markdown_string += "## 详细实验结果\n\n"
    markdown_string += "| 配置文件 | 时间戳 | 模型类型 | d_model | nhead | nlayers | 度量学习 | 验证集准确率 | 测试集准确率 |\n"
    markdown_string += "|:---|:---|:---|:---|:---|:---|:---|:---|:---|\n"

    # Sort results by timestamp for consistency
    results_list.sort(key=lambda x: x.get('timestamp', ''))

    for result in results_list:
        config_file = result.get('config_file', 'N/A')
        timestamp = result.get('timestamp', 'N/A')
        if len(timestamp) > 19:  # Truncate to just date and time
            timestamp = timestamp[:19]
        model_type = result.get('model_type', 'N/A')
        d_model = result.get('d_model', 'N/A')
        nhead = result.get('nhead', 'N/A')
        nlayers = result.get('nlayers', 'N/A')
        metric_learning = "是" if result.get('do_metric_learning', False) else "否"
        # Use .get() with a default for validation_accuracy
        val_acc_value = result.get('validation_accuracy')
        val_acc = f"{val_acc_value:.4f}" if val_acc_value is not None else "N/A"
        test_acc_value = result.get('public_test_accuracy')
        test_acc = f"{test_acc_value:.4f}" if test_acc_value is not None else "N/A"
        markdown_string += f"| {config_file} | {timestamp} | {model_type} | {d_model} | {nhead} | {nlayers} | {metric_learning} | {val_acc} | {test_acc} |\n"
    
    # 3. Best results section
    markdown_string += "\n## 最佳实验结果\n\n"
    
    # Filter results that have validation accuracy for best validation calculation
    results_with_val_acc = [r for r in results_list if 'validation_accuracy' in r and r.get('validation_accuracy') is not None]
    if results_with_val_acc:
        best_val = max(results_with_val_acc, key=lambda x: x.get('validation_accuracy', 0))
        best_val_acc = best_val.get('validation_accuracy', 0)
        best_val_config = best_val.get('config_file', 'N/A')
        best_val_model = best_val.get('model_type', 'N/A')
        best_val_metric = "是" if best_val.get('do_metric_learning', False) else "否"
        markdown_string += f"- **最佳验证集准确率**: {best_val_acc:.4f} (配置: {best_val_config}, 模型: {best_val_model}, 度量学习: {best_val_metric})\n"
    else:
        markdown_string += "- **最佳验证集准确率**: N/A (无可用数据)\n"

    # Filter results that have public test accuracy
    results_with_test_acc = [r for r in results_list if 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    if results_with_test_acc:
        best_test = max(results_with_test_acc, key=lambda x: x.get('public_test_accuracy', 0))
        best_test_acc = best_test.get('public_test_accuracy', 0)
        best_test_config = best_test.get('config_file', 'N/A')
        best_test_model = best_test.get('model_type', 'N/A')
        best_test_metric = "是" if best_test.get('do_metric_learning', False) else "否"
        markdown_string += f"- **最佳测试集准确率**: {best_test_acc:.4f} (配置: {best_test_config}, 模型: {best_test_model}, 度量学习: {best_test_metric})\n\n"
    else:
        markdown_string += f"- **最佳测试集准确率**: N/A (无可用数据)\n\n"
    
    # 4. Comparison analysis
    markdown_string += "## 模型比较\n\n"
    
    # Average performance by model type (only include results with valid accuracy)
    transformer_val = [r.get('validation_accuracy') for r in results_list if r.get('model_type') == 'transformer' and 'validation_accuracy' in r and r.get('validation_accuracy') is not None]
    transformer_test = [r.get('public_test_accuracy') for r in results_list if r.get('model_type') == 'transformer' and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    conformer_val = [r.get('validation_accuracy') for r in results_list if r.get('model_type') == 'conformer' and 'validation_accuracy' in r and r.get('validation_accuracy') is not None]
    conformer_test = [r.get('public_test_accuracy') for r in results_list if r.get('model_type') == 'conformer' and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    
    transformer_val_avg = sum(transformer_val) / len(transformer_val) if transformer_val else 'N/A'
    transformer_test_avg = sum(transformer_test) / len(transformer_test) if transformer_test else 'N/A'
    conformer_val_avg = sum(conformer_val) / len(conformer_val) if conformer_val else 'N/A'
    conformer_test_avg = sum(conformer_test) / len(conformer_test) if conformer_test else 'N/A'
    
    markdown_string += "### 按模型类型的平均性能\n\n"
    markdown_string += "| 模型类型 | 验证集准确率 | 测试集准确率 |\n"
    markdown_string += "|:---|:---|:---|\n"
    markdown_string += f"| Transformer | {transformer_val_avg if isinstance(transformer_val_avg, str) else f'{transformer_val_avg:.4f}'} | {transformer_test_avg if isinstance(transformer_test_avg, str) else f'{transformer_test_avg:.4f}'} |\n"
    markdown_string += f"| Conformer | {conformer_val_avg if isinstance(conformer_val_avg, str) else f'{conformer_val_avg:.4f}'} | {conformer_test_avg if isinstance(conformer_test_avg, str) else f'{conformer_test_avg:.4f}'} |\n\n"
    
    # Similar analysis for metric learning
    markdown_string += "### 度量学习的影响\n\n"
    
    normal_val = [r.get('validation_accuracy') for r in results_list if not r.get('do_metric_learning', False) and 'validation_accuracy' in r and r.get('validation_accuracy') is not None]
    normal_test = [r.get('public_test_accuracy') for r in results_list if not r.get('do_metric_learning', False) and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    triplet_val = [r.get('validation_accuracy') for r in results_list if r.get('do_metric_learning', False) and 'validation_accuracy' in r and r.get('validation_accuracy') is not None]
    triplet_test = [r.get('public_test_accuracy') for r in results_list if r.get('do_metric_learning', False) and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    
    normal_val_avg = sum(normal_val) / len(normal_val) if normal_val else 'N/A'
    normal_test_avg = sum(normal_test) / len(normal_test) if normal_test else 'N/A'
    triplet_val_avg = sum(triplet_val) / len(triplet_val) if triplet_val else 'N/A'
    triplet_test_avg = sum(triplet_test) / len(triplet_test) if triplet_test else 'N/A'
    
    markdown_string += "| 训练方式 | 验证集准确率 | 测试集准确率 |\n"
    markdown_string += "|:---|:---|:---|\n"
    markdown_string += f"| 交叉熵损失 | {normal_val_avg if isinstance(normal_val_avg, str) else f'{normal_val_avg:.4f}'} | {normal_test_avg if isinstance(normal_test_avg, str) else f'{normal_test_avg:.4f}'} |\n"
    markdown_string += f"| Triplet Loss | {triplet_val_avg if isinstance(triplet_val_avg, str) else f'{triplet_val_avg:.4f}'} | {triplet_test_avg if isinstance(triplet_test_avg, str) else f'{triplet_test_avg:.4f}'} |\n\n"
    
    # 5. Ablation experiment results table
    markdown_string += "## 消融实验结果\n\n"
    markdown_string += "| 方法 | 验证集准确率 | 测试集准确率 |\n"
    markdown_string += "|:---|:---|:---|\n"
    
    # Get best result for each method, ensuring accuracy values exist
    t_normal = [r for r in results_list if r.get('model_type') == 'transformer' and not r.get('do_metric_learning', False) and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    t_triplet = [r for r in results_list if r.get('model_type') == 'transformer' and r.get('do_metric_learning', False) and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    c_normal = [r for r in results_list if r.get('model_type') == 'conformer' and not r.get('do_metric_learning', False) and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    c_triplet = [r for r in results_list if r.get('model_type') == 'conformer' and r.get('do_metric_learning', False) and 'public_test_accuracy' in r and r.get('public_test_accuracy') is not None]
    
    def get_best_accuracies(results): 
        if not results: return 'N/A', 'N/A'
        best_test_run = max(results, key=lambda x: x.get('public_test_accuracy', 0))
        val_acc = best_test_run.get('validation_accuracy')
        test_acc = best_test_run.get('public_test_accuracy')
        val_str = f"{val_acc:.4f}" if val_acc is not None else 'N/A'
        test_str = f"{test_acc:.4f}" if test_acc is not None else 'N/A'
        return val_str, test_str

    val_t_normal, test_t_normal = get_best_accuracies(t_normal)
    markdown_string += f"| Transformer (交叉熵损失) | {val_t_normal} | {test_t_normal} |\n"
        
    val_c_normal, test_c_normal = get_best_accuracies(c_normal)
    markdown_string += f"| Conformer (交叉熵损失) | {val_c_normal} | {test_c_normal} |\n"
        
    val_t_triplet, test_t_triplet = get_best_accuracies(t_triplet)
    markdown_string += f"| Transformer (Triplet Loss) | {val_t_triplet} | {test_t_triplet} |\n"
        
    val_c_triplet, test_c_triplet = get_best_accuracies(c_triplet)
    markdown_string += f"| Conformer (Triplet Loss) | {val_c_triplet} | {test_c_triplet} |\n"
    
    # 6. Conclusion
    markdown_string += "\n## 结论\n\n"
    
    # Determine which method performed best overall based on test accuracy
    methods = [(t_normal, "Transformer (交叉熵损失)"), 
               (c_normal, "Conformer (交叉熵损失)"),
               (t_triplet, "Transformer (Triplet Loss)"),
               (c_triplet, "Conformer (Triplet Loss)")]
    
    valid_methods = [(get_best_accuracies(m[0])[1], m[1]) for m in methods if m[0]] # Get test accuracy and method name
    # Filter out methods with 'N/A' test accuracy before finding max
    valid_methods_with_scores = [(float(m[0]), m[1]) for m in valid_methods if m[0] != 'N/A']

    if valid_methods_with_scores:
        best_method_acc, best_method_name = max(valid_methods_with_scores, key=lambda x: x[0])
        
        markdown_string += f"根据实验结果，**{best_method_name}** 方法在测试集上获得了最高的准确率 **{best_method_acc:.4f}**。\n\n"
        
        # Additional insights (optional, based on the best method)
        if best_method_name.startswith("Conformer"):
            markdown_string += "- Conformer模型可能通过结合CNN和Transformer的优势，表现出更好的性能。\n"
        if best_method_name.endswith("Triplet Loss)"):
            markdown_string += "- Triplet Loss可能通过直接优化特征空间中的距离关系，提高了模型的分类能力。\n"
    else:
        markdown_string += "当前实验数据不足或测试准确率缺失，无法得出明确的最佳方法结论。\n"
    
    markdown_string += "\n*报告生成时间: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "*"
    
    with open(output_file, 'w', encoding='utf-8') as f:
        f.write(markdown_string)
    print(f"[Info]: 增强版报告已生成至 {output_file}")

# 加载配置文件
def load_config(config_path):
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    return config

def save_experiment_result(result_data, results_file='experiment_results.json'):
    """将单次实验结果追加到JSON文件。"""
    all_results = []
    if os.path.exists(results_file):
        try:
            with open(results_file, 'r', encoding='utf-8') as f:
                all_results = json.load(f)
            if not isinstance(all_results, list):
                print(f"Warning: Existing results file '{results_file}' does not contain a list. Initializing new results.")
                all_results = []
        except json.JSONDecodeError:
            print(f"Warning: Could not decode JSON from '{results_file}'. Initializing new results.")
            all_results = []
        except Exception as e:
            print(f"Warning: Error loading results file '{results_file}': {e}. Initializing new results.")
            all_results = []
    
    all_results.append(result_data)

    # 保存更新后的结果
    try:
        with open(results_file, 'w', encoding='utf-8') as f:
            json.dump(all_results, f, indent=4)
        print(f"[Info]: 实验结果已更新至 {results_file}")
        return all_results # 返回更新后的完整列表
    except Exception as e:
        print(f"[Error]: 保存实验结果到 '{results_file}' 时出错: {e}")
        return None # 表示保存失败

def main():
    parser = argparse.ArgumentParser(description="说话人分类训练、测试、评估和报告脚本")
    
    # 主要操作模式组，互斥且至少需要一个
    mode_group = parser.add_mutually_exclusive_group(required=True) 
    mode_group.add_argument('--train', type=str, metavar='<config>.yaml', help='根据配置文件开始新的训练')
    mode_group.add_argument('--resume', type=str, metavar='<config>.yaml', help='根据配置文件从checkpoint恢复训练')
    mode_group.add_argument('--test', type=str, metavar='<config>.yaml', help='根据配置文件加载模型并在公共测试集上测试，保存结果到CSV')
    mode_group.add_argument('--eva', type=str, metavar='<config>.yaml', help='根据配置文件加载模型，评估并将结果更新到 experiment_results.json')
    mode_group.add_argument('--all', type=str, metavar='<config>.yaml', help='顺序执行 train, test, eva, report 全流程')
    mode_group.add_argument('--report', action='store_true', help='从 experiment_results.json 生成报告 (独立于其他模式)')

    args = parser.parse_args()

    results_file = 'experiment_results.json'
    report_file = 'report.md'

    # --- 报告生成模式 (独立处理) ---
    if args.report:
        if os.path.exists(results_file):
            try:
                with open(results_file, 'r', encoding='utf-8') as f:
                    results_list = json.load(f)
                if not isinstance(results_list, list):
                    print(f"[Warning]: 实验结果文件 {results_file} 不包含列表。")
                    results_list = []
                print(f"[Info]: 从 {results_file} 加载了 {len(results_list)} 条实验结果用于生成报告")
                generate_report(results_list, report_file)
                # generate_report 内部会打印生成信息
            except Exception as e:
                print(f"[Error]: 处理报告生成时出错: {e}")
        else:
            print(f"[Error]: 未找到实验结果文件 {results_file}，无法生成报告")
        return # 报告模式结束

    # --- 其他需要配置文件的模式 ---
    config_path = args.train or args.resume or args.test or args.eva or args.all
    if not config_path:
         # 理论上不会执行到这里，因为group是required=True且report已单独处理
        print("[Error]: 缺少配置文件参数。")
        parser.print_help()
        return
        
    if not os.path.exists(config_path):
        print(f"[Error]: 配置文件不存在: {config_path}")
        return

    # 加载配置
    try:
        config = load_config(config_path)
        print(f"[Info]: 已加载配置: {config_path}")
    except Exception as e:
        print(f"[Error]: 加载配置文件时出错 {config_path}: {e}")
        return

    # --- 根据参数执行不同操作 ---
    
    best_validation_accuracy = None # 用于 --train, --resume, --all
    public_test_accuracy = None   # 用于 --eva, --all

    # 训练模式
    if args.train:
        print(f"--- 开始新的训练 ---")
        best_validation_accuracy = train_model(config, config_path=config_path, resume=False)
        print(f"--- 训练完成 ---")
        # 训练完成后可以选择性地执行评估和测试，但当前只执行训练
        # 如果需要，可以在这里添加调用 test_model 和 evaluate_public_test
        # 并调用 save_experiment_result 保存包含验证集准确率的结果

    # 恢复训练模式
    elif args.resume:
        print(f"--- 从检查点恢复训练 ---")
        # !!! 确保 train_model 已正确实现 resume 功能 !!!
        best_validation_accuracy = train_model(config, config_path=config_path, resume=True)
        print(f"--- 恢复训练完成 ---")

    # 测试模式
    elif args.test:
        print(f"--- 开始在公共测试集上测试模型 ---")
        output_csv_path = test_model(config, config_path=config_path, is_private=True) # 运行公共测试
        print(f"--- 测试完成，结果已保存至: {output_csv_path} ---")
        # 可以选择运行私有测试: test_model(config, config_path=config_path, is_private=True)

    # 评估模式
    elif args.eva:
        print(f"--- 开始评估模型（公共测试集） ---")
        public_test_accuracy = evaluate_public_test(config, config_path=config_path)
        print(f"--- 评估完成 ---")
        
        # 准备结果数据，不包含验证准确率
        current_result = {
            "config_file": config_path,
            "timestamp": datetime.datetime.now().isoformat(),
            "model_type": config.get('model_type', 'Unknown'),
            "d_model": config.get('d_model', 'N/A'),
            "nhead": config.get('nhead', 'N/A'),
            "nlayers": config.get('nlayers','N/A'), # 添加 nlayers
            "do_metric_learning": config.get('do_metric_learning', False),
            # "validation_accuracy": None, # 显式不包含
            "public_test_accuracy": public_test_accuracy,
            "use_positional_encoding": config.get('use_positional_encoding', False)
        }
        # 保存结果
        save_experiment_result(current_result, results_file)
        # 打印本次评估结果
        print("\n本次评估结果:")
        print("==========================")
        print(f"Config File: {config_path}")
        print(f"Public test accuracy: {public_test_accuracy:.4f}")
        print("==========================")

    # 全流程模式
    elif args.all:
        print(f"--- 开始执行全流程 (train -> test -> eva -> report) ---")
        all_results_list = [] # 用于最后生成报告

        # 1. Train
        print(f"\n[全流程 Step 1/4]: 开始新的训练...")
        try:
            best_validation_accuracy = train_model(config, config_path=config_path, resume=False)
            print(f"[全流程 Step 1/4]: 训练完成。最佳验证集准确率: {best_validation_accuracy:.4f}")
        except Exception as e:
            print(f"[Error][全流程 Step 1/4]: 训练过程中发生错误: {e}")
            print("--- 全流程提前终止 ---")
            return

        # 2. Test (Public)
        print(f"\n[全流程 Step 2/4]: 开始在公共测试集上测试...")
        try:
            output_csv_path = test_model(config, config_path=config_path, is_private=False)
            print(f"[全流程 Step 2/4]: 测试完成。结果已保存至: {output_csv_path}")
        except Exception as e:
            print(f"[Error][全流程 Step 2/4]: 测试过程中发生错误: {e}")
            print("--- 全流程提前终止 ---")
            return
            
        # 3. Evaluate (Public) & Save Result
        print(f"\n[全流程 Step 3/4]: 开始评估模型（公共测试集）并保存结果...")
        try:
            public_test_accuracy = evaluate_public_test(config, config_path=config_path)
            print(f"[全流程 Step 3/4]: 评估完成。公共测试集准确率: {public_test_accuracy:.4f}")
            
            # 准备本次全流程的结果数据（包含验证和测试准确率）
            current_result = {
                "config_file": config_path,
                "timestamp": datetime.datetime.now().isoformat(),
                "model_type": config.get('model_type', 'Unknown'),
                "d_model": config.get('d_model', 'N/A'),
                "nhead": config.get('nhead', 'N/A'),
                "nlayers": config.get('nlayers','N/A'), # 添加 nlayers
                "do_metric_learning": config.get('do_metric_learning', False),
                "validation_accuracy": best_validation_accuracy, # 包含训练得到的验证准确率
                "public_test_accuracy": public_test_accuracy
            }
            # 保存结果并获取更新后的列表
            updated_results = save_experiment_result(current_result, results_file)
            if updated_results is None:
                 print("[Error][全流程 Step 3/4]: 保存实验结果失败。")
                 # 即使保存失败，也尝试继续生成报告（如果之前有结果的话）
                 if os.path.exists(results_file):
                     with open(results_file, 'r', encoding='utf-8') as f_read:
                         all_results_list = json.load(f_read)
            else:
                all_results_list = updated_results
                
        except Exception as e:
            print(f"[Error][全流程 Step 3/4]: 评估或保存结果过程中发生错误: {e}")
            # 尝试加载现有结果以生成报告
            if os.path.exists(results_file):
                 try:
                     with open(results_file, 'r', encoding='utf-8') as f_read:
                         all_results_list = json.load(f_read)
                 except Exception as read_e:
                      print(f"[Error]: 读取现有结果文件也失败: {read_e}")
                      all_results_list = [] # 无法加载，报告会是空的
            else:
                all_results_list = []


        # 4. Report
        print(f"\n[全流程 Step 4/4]: 生成实验报告...")
        if all_results_list:
            try:
                generate_report(all_results_list, report_file)
                # generate_report 内部会打印成功信息
            except Exception as e:
                print(f"[Error][全流程 Step 4/4]: 生成报告时发生错误: {e}")
        else:
             print("[Warning][全流程 Step 4/4]: 没有可用的实验结果来生成报告。")

        print(f"--- 全流程执行完毕 ---")

    else:
        # 理论上不会执行到这里
        print("[Error]: 未知的操作模式或内部逻辑错误。")
        parser.print_help()

def plot_position_encoding(pos_encoder, filename=None, max_positions=100):
    """
    绘制位置编码参数的可视化图
    
    参数:
    - pos_encoder: PositionalEncoding 实例
    - filename: 保存图像的文件路径，如果为 None，则显示图像
    - max_positions: 要绘制的最大位置数
    """
    # 设置中文字体支持
    plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans', 'Arial Unicode MS', 'Arial']  # 优先使用的中文字体
    plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号
    plt.figure(figsize=(12, 8))
    
    # 获取位置编码参数
    pe = pos_encoder.pe.numpy()
    
    # 限制绘制的位置数量，以便更清晰地查看
    pe = pe[:max_positions, :]
    
    # 绘制热图
    plt.imshow(pe, aspect='auto', cmap='viridis')
    plt.colorbar(label='编码值')
    plt.xlabel('维度')
    plt.ylabel('位置')
    plt.title('位置编码参数可视化')
    
    # 保存或显示图像
    if filename:
        plt.savefig(filename)
        print(f"位置编码图已保存至: {filename}")
    else:
        plt.show()
    
    plt.close()

if __name__ == "__main__":
    main()
