import argparse
import yaml
import json
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm

import paddle
from paddle.io import DataLoader
import paddle.nn as nn
from paddle.optimizer import AdamW
import paddle.vision.transforms as T
from paddle.vision.datasets import Cifar10

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans', 'Arial Unicode MS', 'Arial']  # 优先使用的中文字体
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号

# --- 配置处理 ---
def load_config(config_path):
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    return config

def save_config(config, config_path):
    with open(config_path, 'w', encoding='utf-8') as f:
        yaml.dump(config, f, allow_unicode=True)

# --- 数据集和数据加载器 ---
def get_dataloaders(config):
    img_size = config.get('img_size', 32)
    batch_size = config['batch_size']
    data_path = config.get('data_path', None) # 从配置中获取 data_path
    
    transform = T.Compose(
        [
            T.Resize((img_size, img_size)),
            T.ToTensor(),
            T.Normalize(
                mean=[0.4914, 0.4822, 0.4465],
                std=[0.2023, 0.1994, 0.2010],
                to_rgb=True,
            ),
        ]
    )

    # 根据 data_path 是否提供来设置参数
    common_params = {
        "transform": transform,
        "backend": 'cv2',
    }

    if data_path:
        common_params["data_file"] = data_path
        common_params["download"] = False # 如果提供了路径，则不下载
    else:
        common_params["download"] = True  # 如果未提供路径，则允许下载（默认行为）

    cifar10_train_params = {
        "mode": "train",
        **common_params
    }

    cifar10_test_params = {
        "mode": "test",
        **common_params
    }

    cifar10_train = Cifar10(**cifar10_train_params)
    cifar10_test = Cifar10(**cifar10_test_params)

    train_loader = DataLoader(cifar10_train, batch_size=batch_size, shuffle=True, num_workers=config.get('num_workers', 2))
    test_loader = DataLoader(cifar10_test, batch_size=batch_size, shuffle=False, num_workers=config.get('num_workers', 2))
    return train_loader, test_loader

# --- 模型定义 ---
class Encoder(nn.Layer):
    def __init__(self, in_dim, out_dim):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Conv2D(in_dim, 64, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(64),
            nn.ReLU(),
            nn.Conv2D(64, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(128),
            nn.ReLU(),
            nn.Conv2D(128, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(256),
            nn.ReLU(),
            nn.Conv2D(256, 512, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(512),
            nn.ReLU(),
            nn.Conv2D(512, out_dim, kernel_size=4, stride=2, padding=1), # Output will be (out_dim, 1, 1) for 32x32 input
        )

    def forward(self, images):
        out = self.layers(images)
        return out

class ResidualEncoder(nn.Layer):
    def __init__(self, in_dim, out_dim):
        super().__init__()
        
        # 第一个卷积块 (没有残差连接，因为输入通道数不同)
        self.conv1 = nn.Sequential(
            nn.Conv2D(in_dim, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(64),
            nn.ReLU()
        )
        
        # 第一个残差块
        self.res_block1 = nn.Sequential(
            nn.Conv2D(64, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(64),
            nn.ReLU(),
            nn.Conv2D(64, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(64)
        )
        self.downsample1 = nn.MaxPool2D(kernel_size=2, stride=2)
        
        # 第二个残差块 (通道数变化)
        self.shortcut2 = nn.Sequential(
            nn.Conv2D(64, 128, kernel_size=1, stride=1),
            nn.BatchNorm2D(128)
        )
        self.res_block2 = nn.Sequential(
            nn.Conv2D(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(128),
            nn.ReLU(),
            nn.Conv2D(128, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(128)
        )
        self.downsample2 = nn.MaxPool2D(kernel_size=2, stride=2)
        
        # 第三个残差块 (通道数变化)
        self.shortcut3 = nn.Sequential(
            nn.Conv2D(128, 256, kernel_size=1, stride=1),
            nn.BatchNorm2D(256)
        )
        self.res_block3 = nn.Sequential(
            nn.Conv2D(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(256),
            nn.ReLU(),
            nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(256)
        )
        self.downsample3 = nn.MaxPool2D(kernel_size=2, stride=2)
        
        # 第四个残差块 (通道数变化)
        self.shortcut4 = nn.Sequential(
            nn.Conv2D(256, 512, kernel_size=1, stride=1),
            nn.BatchNorm2D(512)
        )
        self.res_block4 = nn.Sequential(
            nn.Conv2D(256, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(512),
            nn.ReLU(),
            nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2D(512)
        )
        self.downsample4 = nn.MaxPool2D(kernel_size=2, stride=2)
        
        # 最后的输出层
        self.final_conv = nn.Conv2D(512, out_dim, kernel_size=3, stride=1, padding=1)
        self.pool = nn.AdaptiveAvgPool2D((1, 1))  # 适应性池化确保输出为 (out_dim, 1, 1)
        
        self.relu = nn.ReLU()  # 用于残差连接后

    def forward(self, x):
        # 第一个卷积
        x = self.conv1(x)
        
        # 第一个残差块
        identity = x
        x = self.res_block1(x)
        x = x + identity  # 残差连接
        x = self.relu(x)
        x = self.downsample1(x)
        
        # 第二个残差块
        identity = self.shortcut2(x)
        x = self.res_block2(x)
        x = x + identity  # 残差连接
        x = self.relu(x)
        x = self.downsample2(x)
        
        # 第三个残差块
        identity = self.shortcut3(x)
        x = self.res_block3(x)
        x = x + identity  # 残差连接
        x = self.relu(x)
        x = self.downsample3(x)
        
        # 第四个残差块
        identity = self.shortcut4(x)
        x = self.res_block4(x)
        x = x + identity  # 残差连接
        x = self.relu(x)
        x = self.downsample4(x)
        
        # 最后的输出层
        x = self.final_conv(x)
        x = self.pool(x)  # 确保输出尺寸为 (batch, out_dim, 1, 1)
        
        return x

class Decoder(nn.Layer):
    def __init__(self, in_dim, out_dim): # in_dim is latent_dim, out_dim is img_channels
        super().__init__()
        self.layers = nn.Sequential(
            nn.BatchNorm2D(in_dim),
            nn.ReLU(),
            nn.Conv2DTranspose(in_dim, 512, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(512),
            nn.ReLU(),
            nn.Conv2DTranspose(512, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(256),
            nn.ReLU(),
            nn.Conv2DTranspose(256, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(128),
            nn.ReLU(),
            nn.Conv2DTranspose(128, 64, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2D(64),
            nn.ReLU(),
            nn.Conv2DTranspose(64, out_dim, kernel_size=4, stride=2, padding=1),
            nn.Tanh(),
        )

    def forward(self, latent):
        out = self.layers(latent)
        return out

class Classifier(nn.Layer):
    def __init__(self, in_dim, out_dim): # in_dim is feature_dim (e.g., hidden_dim from encoder)
        super().__init__()
        # The input to Flatten will be (batch_size, in_dim, 1, 1) if Encoder output is (in_dim, 1, 1)
        # So after Flatten, it becomes (batch_size, in_dim)
        self.layers = nn.Sequential(
            nn.Flatten(),
            nn.BatchNorm1D(in_dim), # Expects input of shape (N, C) or (N, C, L)
            nn.Linear(in_dim, out_dim)
        )

    def forward(self, latent): # latent is (batch_size, in_dim, 1, 1)
        out = self.layers(latent)
        return out

###########################################################################
# TODO:
#   Finish PureClassifier with Encoder and Classifier defined above
#   `forward` function returns the predicted logits.
###########################################################################
class PureClassifier(nn.Layer):
    def __init__(self, in_dim, hidden_dim, out_dim):
        super().__init__()
        self.encoder = Encoder(in_dim, hidden_dim)
        self.classifier = Classifier(hidden_dim, out_dim)

    def forward(self, images):
        latent = self.encoder(images)
        logits = self.classifier(latent)
        return logits
    

class ContextEncoder(nn.Layer):
    def __init__(self, in_dim, hidden_dim, num_classes):
        super().__init__()
        self.encoder = Encoder(in_dim, hidden_dim)
        self.decoder = Decoder(hidden_dim, in_dim) # Output is in_dim (img_channels)
        self.classifier = Classifier(hidden_dim, num_classes)

    def forward(self, images, task="classification"):
        if task == "reconstruction":
            # Input to forward for reconstruction is (1-M_hat)*x
            # Encoder sees the context, Decoder tries to reconstruct full image
            latent = self.encoder(images) # 'images' here is the masked input
            out = self.decoder(latent) # Decoder reconstructs
            return out
        elif task == "classification":
            # Input to forward for classification is the original image x
            latent = self.encoder(images)
            logits = self.classifier(latent)
            return logits
        else:
            raise ValueError(f"Unknown task: {task}")

    ###########################################################################
    # TODO:
    #   Finish mask_images below.
    #   This function masks a portion of the image at random in proportion
    #   `mask_ratio` used to control the ratio
    #   Return: masked_input_to_encoder, reconstruction_mask (M_hat)
    ###########################################################################
    def mask_images(self, images, mask_ratio=0.3):
        b, c, h, w = images.shape
        
        # Calculate the side of a square mask based on mask_ratio (area)
        mask_area_pixels = int(h * w * mask_ratio)
        mask_side = int(paddle.sqrt(paddle.to_tensor(mask_area_pixels, dtype='float32')).item())
        mask_side = min(max(1, mask_side), min(h, w)) # Ensure valid side length

        # m_hat will be 1 for the masked region (to be predicted), 0 for context.
        # It will have shape (B, C, H, W) for element-wise multiplication.
        m_hat = paddle.zeros_like(images)
        
        for i in range(b):
            # Random top-left corner for the square mask
            top = paddle.randint(0, h - mask_side + 1, shape=[1]).item()
            left = paddle.randint(0, w - mask_side + 1, shape=[1]).item()
            m_hat[i, :, top:top+mask_side, left:left+mask_side] = 1.0
            
        # input_to_F (masked_images) is the original image with the M_hat region zeroed out.
        # This is (1 - M_hat) * images.
        input_to_F = images * (1.0 - m_hat)
        
        return input_to_F, m_hat


# --- 验证函数 ---
def validation(model, data_loader, task="classification"):
    model.eval()
    correct = 0
    total = 0

    with paddle.no_grad():
        for batch in data_loader:
            images, labels = batch
            
            if isinstance(model, ContextEncoder) or isinstance(model, RotationPrediction):
                # 对于支持task参数的模型，始终使用指定的task
                logits = model(images, task=task)
            elif task == "classification": # 原始逻辑
                logits = model(images)
            elif hasattr(model, 'forward_classification'): 
                 logits = model(images, task="classification")
            else: 
                logits = model(images)

            _, predicted = paddle.topk(logits, k=1)
            total += labels.shape[0]
            correct += (predicted.flatten() == labels).sum().numpy()

    accuracy = correct / total
    model.train()
    return accuracy

# --- 绘图函数 ---
def plot_curves(history, save_path, model_name):
    epochs = range(1, len(history['train_loss']) + 1)

    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(epochs, history['train_loss'], 'bo-', label='训练损失')
    if 'val_loss' in history and history['val_loss']: # some pretraining might not have val_loss
        plt.plot(epochs, history['val_loss'], 'ro-', label='验证损失')
    plt.title(f'{model_name} - 损失曲线')
    plt.xlabel('轮次')
    plt.ylabel('损失')
    plt.legend()
    plt.grid(True)

    plt.subplot(1, 2, 2)
    if 'val_accuracy' in history and history['val_accuracy']:
        plt.plot(epochs, history['val_accuracy'], 'go-', label='验证准确率')
        plt.title(f'{model_name} - 验证准确率曲线')
        plt.xlabel('轮次')
        plt.ylabel('准确率')
        plt.legend()
        plt.grid(True)
    else: # If no val_accuracy (e.g. pretraining only), leave this subplot blank or show message
        plt.text(0.5, 0.5, '无验证准确率数据', horizontalalignment='center', verticalalignment='center', transform=plt.gca().transAxes)


    plt.tight_layout()
    plot_filename = os.path.join(save_path, f"{model_name}_training_curves.png")
    plt.savefig(plot_filename)
    plt.close()
    print(f"训练曲线图已保存到: {plot_filename}")


# --- 训练函数 ---
def train_model(model, train_loader, test_loader, config, model_name, start_epoch=0):
    epochs = config['epochs']
    lr = config['learning_rate']
    save_path = config.get('save_path', 'checkpoints')
    os.makedirs(save_path, exist_ok=True)

    optimizer = AdamW(parameters=model.parameters(), learning_rate=lr)
    criterion = nn.CrossEntropyLoss()

    best_accuracy = 0.0
    history = {'train_loss': [], 'val_accuracy': []}

    for epoch in range(start_epoch, epochs):
        model.train()
        epoch_loss = 0
        progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{epochs} [训练]")
        for batch_id, (images, labels) in enumerate(progress_bar):
            optimizer.clear_grad()
            logits = model(images) # Assuming simple classification for now
            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()
            
            epoch_loss += loss.item()
            progress_bar.set_postfix(loss=loss.item())

        avg_epoch_loss = epoch_loss / len(train_loader)
        history['train_loss'].append(avg_epoch_loss)
        
        accuracy = validation(model, test_loader)
        history['val_accuracy'].append(accuracy)
        
        print(f"Epoch {epoch+1}/{epochs}, 平均训练损失: {avg_epoch_loss:.4f}, 验证准确率: {accuracy:.4f}")

        if accuracy > best_accuracy:
            best_accuracy = accuracy
            checkpoint_path = os.path.join(save_path, f"{model_name}_best.pdparams")
            paddle.save(model.state_dict(), checkpoint_path)
            paddle.save(optimizer.state_dict(), os.path.join(save_path, f"{model_name}_optimizer_best.pdopt"))
            print(f"最佳模型已保存到: {checkpoint_path} (准确率: {best_accuracy:.4f})")
        
        if epoch % 10 == 0:
            # Save checkpoint at the end of each epoch
            epoch_checkpoint_path = os.path.join(save_path, f"{model_name}_epoch_{epoch+1}.pdparams")
            paddle.save(model.state_dict(), epoch_checkpoint_path)
            paddle.save(optimizer.state_dict(), os.path.join(save_path, f"{model_name}_optimizer_epoch_{epoch+1}.pdopt"))

    print(f"训练完成。最佳验证准确率: {best_accuracy:.4f}")
    plot_curves(history, save_path, model_name)
    return best_accuracy

# --- 测试函数 ---
def test_model(model, test_loader, config, model_name):
    save_path = config.get('save_path', 'checkpoints')
    model_path = os.path.join(save_path, f"{model_name}_best.pdparams")
    if not os.path.exists(model_path):
        # Try loading last epoch if best not found (e.g. if resume was used and finished)
        epochs = config['epochs']
        model_path = os.path.join(save_path, f"{model_name}_epoch_{epochs}.pdparams")
        if not os.path.exists(model_path):
            print(f"错误: 未找到模型权重文件: {model_path} 或 ..._best.pdparams")
            return 0.0

    model_state_dict = paddle.load(model_path)
    model.set_state_dict(model_state_dict)
    print(f"模型权重已从 {model_path} 加载")

    accuracy = validation(model, test_loader)
    print(f"模型 {model_name} 在测试集上的准确率: {accuracy:.4f}")

    results_file = config.get('results_file', 'experiment_results.json')
    try:
        with open(results_file, 'r') as f:
            results = json.load(f)
    except FileNotFoundError:
        results = {}
    
    results[model_name] = accuracy
    with open(results_file, 'w') as f:
        json.dump(results, f, indent=4)
    print(f"测试结果已保存到: {results_file}")
    return accuracy

# --- 报告生成函数 ---
def generate_report(config):
    results_file = config.get('results_file', 'experiment_results.json')
    report_file = config.get('report_file', 'report.md')

    try:
        with open(results_file, 'r', encoding='utf-8') as f:
            results = json.load(f)
    except FileNotFoundError:
        print(f"错误: 结果文件 {results_file} 未找到。请先运行测试。")
        return

    report_content = "# 实验结果报告\n\n"
    report_content += "| 方法 | 测试准确率 |\n"
    report_content += "|---|---|\n"

    method_order = [
        "Classification w/o Self-Supervised Learning",
        "Classification with Context Encoder",
        "Classification with Rotation Prediction",
        "Classification with SimCLR",
        "Classification with Context Encoder Fine-Tune All",
        "Classification with Rotation Prediction Fine-Tune All",
        "Classification with SimCLR Fine-Tune All"
    ]
    
    model_name_map = {
        "PureClassifier": "Classification w/o Self-Supervised Learning",
        "ContextEncoder_classifier": "Classification with Context Encoder",
        "ContextEncoderFineTuneAll_classifier": "Classification with Context Encoder Fine-Tune All",
        "RotationPrediction_classifier": "Classification with Rotation Prediction",
        "RotationPredictionFineTuneAll_classifier": "Classification with Rotation Prediction Fine-Tune All",
        "SimCLR_classifier": "Classification with SimCLR",
        "SimCLRFineTuneAll_classifier": "Classification with SimCLR Fine-Tune All"
    }

    for report_method_name in method_order:
        accuracy_found = False
        # Check if the report_method_name exists as a value in model_name_map
        # or directly as a key in results if no mapping is defined for it yet.
        for model_key_internal, mapped_name in model_name_map.items():
            if mapped_name == report_method_name and model_key_internal in results:
                report_content += f"| {report_method_name} | {results[model_key_internal]:.4f} |\n"
                accuracy_found = True
                break
        # Fallback for names not in model_name_map but potentially in results directly (e.g. if model_name was used directly)
        if not accuracy_found and report_method_name in results:
             report_content += f"| {report_method_name} | {results[report_method_name]:.4f} |\n"
             accuracy_found = True
        
        if not accuracy_found:
            report_content += f"| {report_method_name} |  |\n"

    with open(report_file, 'w', encoding='utf-8') as f:
        f.write(report_content)
    print(f"报告已生成: {report_file}")

# --- Helper function for ContextEncoder training ---
def train_context_encoder_model(model, train_loader, test_loader_for_eval, config, base_model_name):
    pretrain_epochs = config.get('pretrain_epochs', 10)
    mask_ratio = config.get('mask_ratio', 0.3)
    lr = config['learning_rate']
    save_path = config.get('save_path', 'checkpoints')
    frozen_pretrain = config.get('frozen_pretrain', False)  # 获取frozen_pretrain参数，默认为False
    os.makedirs(save_path, exist_ok=True)

    # Naming conventions
    pretrained_model_file_name = f"{base_model_name}_pretrained"
    classifier_model_file_name = f"{base_model_name}_classifier"

    # Phase 1: Pretraining (Reconstruction)
    print(f"--- 开始 ContextEncoder 预训练 ({pretrained_model_file_name}) ---")
    pretrain_optimizer = AdamW(parameters=model.parameters(), learning_rate=lr)
    # 定义 L2 损失 (MSE) criterion 用于重建
    reconstruction_criterion = nn.MSELoss(reduction='sum') 

    pretrain_history = {'pretrain_loss': []}

    for epoch in range(pretrain_epochs):
        model.train()
        epoch_loss = 0
        progress_bar = tqdm(train_loader, desc=f"Pretrain Epoch {epoch+1}/{pretrain_epochs}")
        for images, _ in progress_bar:
            pretrain_optimizer.clear_grad()
            input_to_F, m_hat = model.mask_images(images, mask_ratio)
            reconstructed_output = model(input_to_F, task="reconstruction")
            
            # L2 损失的平方和，只作用于被遮挡的区域
            # 目标 (原始图像) 在遮挡区域的部分: images * m_hat
            # 重建输出在遮挡区域的部分: reconstructed_output * m_hat
            loss_sum_sq = reconstruction_criterion(reconstructed_output * m_hat, images * m_hat)
            
            num_masked_elements = paddle.sum(m_hat)
            
            if num_masked_elements.item() > 0:
                reconstruction_loss = loss_sum_sq / num_masked_elements
                reconstruction_loss.backward()
                pretrain_optimizer.step()
                epoch_loss += reconstruction_loss.item()
                progress_bar.set_postfix(loss=reconstruction_loss.item())
        
        avg_epoch_loss = epoch_loss / len(train_loader)
        pretrain_history['pretrain_loss'].append(avg_epoch_loss)
        print(f"Pretrain Epoch {epoch+1}/{pretrain_epochs}, 平均重建损失: {avg_epoch_loss:.4f}")

    pretrained_checkpoint_path = os.path.join(save_path, f"{pretrained_model_file_name}.pdparams")
    paddle.save(model.state_dict(), pretrained_checkpoint_path)
    print(f"预训练模型已保存到: {pretrained_checkpoint_path}")
    # plot_curves(pretrain_history, save_path, pretrained_model_file_name) # Optional: plot pretrain loss
    print(f"--- ContextEncoder 预训练完成 ({pretrained_model_file_name}) ---")

    # Phase 2: Fine-tuning (Classification)
    print(f"--- 开始 ContextEncoder 分类微调 ({classifier_model_file_name}) ---")
    
    # 如果frozen_pretrain为True，冻结encoder层参数
    if frozen_pretrain:
        print("冻结预训练encoder参数，只训练分类器")
        for param in model.encoder.parameters():
            param.stop_gradient = True
        # 只优化分类器参数
        finetune_optimizer = AdamW(parameters=model.classifier.parameters(), learning_rate=lr)
    else:
        # 优化所有参数
        finetune_optimizer = AdamW(parameters=model.parameters(), learning_rate=lr)
    
    classification_criterion = nn.CrossEntropyLoss()
    
    best_accuracy = 0.0
    finetune_epochs = config['epochs'] 
    history = {'train_loss': [], 'val_accuracy': []}

    for epoch in range(finetune_epochs):
        model.train()
        epoch_class_loss = 0
        progress_bar_ft = tqdm(train_loader, desc=f"Finetune Epoch {epoch+1}/{finetune_epochs} [训练]")
        for images, labels in progress_bar_ft:
            finetune_optimizer.clear_grad()
            logits = model(images, task="classification")
            loss = classification_criterion(logits, labels)
            loss.backward()
            finetune_optimizer.step()
            epoch_class_loss += loss.item()
            progress_bar_ft.set_postfix(loss=loss.item())
        
        avg_epoch_class_loss = epoch_class_loss / len(train_loader)
        history['train_loss'].append(avg_epoch_class_loss)
        
        accuracy = validation(model, test_loader_for_eval)
        history['val_accuracy'].append(accuracy)
        
        print(f"Finetune Epoch {epoch+1}/{finetune_epochs}, 平均训练损失: {avg_epoch_class_loss:.4f}, 验证准确率: {accuracy:.4f}")

        if accuracy > best_accuracy:
            best_accuracy = accuracy
            checkpoint_path = os.path.join(save_path, f"{classifier_model_file_name}_best.pdparams")
            paddle.save(model.state_dict(), checkpoint_path)
            paddle.save(finetune_optimizer.state_dict(), os.path.join(save_path, f"{classifier_model_file_name}_optimizer_best.pdopt"))
            print(f"最佳分类模型已保存到: {checkpoint_path} (准确率: {best_accuracy:.4f})")

        if epoch % 10 == 0:
            epoch_checkpoint_path = os.path.join(save_path, f"{classifier_model_file_name}_epoch_{epoch+1}.pdparams")
            paddle.save(model.state_dict(), epoch_checkpoint_path)
            paddle.save(finetune_optimizer.state_dict(), os.path.join(save_path, f"{classifier_model_file_name}_optimizer_epoch_{epoch+1}.pdopt"))
    
    # 恢复encoder参数的可训练状态（如果之前被冻结）
    if frozen_pretrain:
        for param in model.encoder.parameters():
            param.stop_gradient = False
    
    print(f"ContextEncoder 微调完成。最佳验证准确率: {best_accuracy:.4f}")
    plot_curves(history, save_path, classifier_model_file_name)
    return best_accuracy

class RotationPrediction(nn.Layer):
    def __init__(self, in_dim, hidden_dim, num_classes):
        super().__init__()
        self.encoder = Encoder(in_dim, hidden_dim)
        self.classifier = Classifier(hidden_dim, num_classes)
        # 旋转分类器，预测图像的旋转角度 (0, 90, 180, 270 度)
        self.rotation_classifier = Classifier(hidden_dim, 4)

    def forward(self, images, task="classification"):
        if task == "classification":
            latent = self.encoder(images)
            logits = self.classifier(latent)
            return logits
        elif task == "rotation_prediction":
            latent = self.encoder(images)
            rotation_logits = self.rotation_classifier(latent)
            return rotation_logits
        else:
            raise ValueError(f"Unknown task: {task}")
            
    def rotate_images(self, images, angle=0):
        ###########################################################################
        # TODO:
        #   Finish rotate_images below
        #   This function rotates the image by `angle` degrees counterclockwise
        #   Return: rotated images
        ###########################################################################
        # 支持的旋转角度: 0, 90, 180, 270 度
        if angle == 0:
            return images
        elif angle == 90:
            # 逆时针旋转90度: 转置后垂直翻转
            return paddle.transpose(images, perm=[0, 1, 3, 2]).flip(axis=2)
        elif angle == 180:
            # 逆时针旋转180度: 水平和垂直翻转
            return images.flip(axis=[2, 3])
        elif angle == 270:
            # 逆时针旋转270度: 转置后水平翻转
            return paddle.transpose(images, perm=[0, 1, 3, 2]).flip(axis=3)
        else:
            raise ValueError(f"Unsupported rotation angle: {angle}")

def train_rotation_encoder_model(model, train_loader, test_loader_for_eval, config, base_model_name):
    pretrain_epochs = config.get('pretrain_epochs', 10)
    lr = config['learning_rate']
    save_path = config.get('save_path', 'checkpoints')
    frozen_pretrain = config.get('frozen_pretrain', False)  # 获取frozen_pretrain参数，默认为False
    os.makedirs(save_path, exist_ok=True)

    # 命名约定
    pretrained_model_file_name = f"{base_model_name}_pretrained"
    classifier_model_file_name = f"{base_model_name}_classifier"

    # 阶段1: 自监督预训练(旋转预测)
    print(f"--- 开始 RotationPrediction 预训练 ({pretrained_model_file_name}) ---")
    pretrain_optimizer = AdamW(parameters=model.parameters(), learning_rate=lr)
    # 定义交叉熵损失用于旋转角度分类
    rotation_criterion = nn.CrossEntropyLoss()

    pretrain_history = {'pretrain_loss': []}

    for epoch in range(pretrain_epochs):
        model.train()
        epoch_loss = 0
        progress_bar = tqdm(train_loader, desc=f"Pretrain Epoch {epoch+1}/{pretrain_epochs}")
        for images, _ in progress_bar:
            pretrain_optimizer.clear_grad()
            
            # 创建旋转图像批次和对应标签
            batch_size = images.shape[0]
            all_images = []
            all_labels = []
            
            # 对每个角度(0, 90, 180, 270)创建旋转后的图像
            for rot_idx, angle in enumerate([0, 90, 180, 270]):
                rotated_images = model.rotate_images(images, angle)
                all_images.append(rotated_images)
                # 对应的标签是旋转角度的索引(0,1,2,3)
                all_labels.append(paddle.ones([batch_size], dtype='int64') * rot_idx)
            
            # 合并为一个批次
            all_images = paddle.concat(all_images, axis=0)
            all_labels = paddle.concat(all_labels, axis=0)
            
            # 旋转预测任务
            rotation_logits = model(all_images, task="rotation_prediction")
            rotation_loss = rotation_criterion(rotation_logits, all_labels)
            
            rotation_loss.backward()
            pretrain_optimizer.step()
            epoch_loss += rotation_loss.item()
            progress_bar.set_postfix(loss=rotation_loss.item())
        
        avg_epoch_loss = epoch_loss / len(train_loader)
        pretrain_history['pretrain_loss'].append(avg_epoch_loss)
        print(f"Pretrain Epoch {epoch+1}/{pretrain_epochs}, 平均旋转预测损失: {avg_epoch_loss:.4f}")

    pretrained_checkpoint_path = os.path.join(save_path, f"{pretrained_model_file_name}.pdparams")
    paddle.save(model.state_dict(), pretrained_checkpoint_path)
    print(f"预训练模型已保存到: {pretrained_checkpoint_path}")
    print(f"--- RotationPrediction 预训练完成 ({pretrained_model_file_name}) ---")

    # 阶段2: 微调(分类)
    print(f"--- 开始 RotationPrediction 分类微调 ({classifier_model_file_name}) ---")
    
    # 如果frozen_pretrain为True，冻结encoder层参数
    if frozen_pretrain:
        print("冻结预训练encoder参数，只训练分类器")
        for param in model.encoder.parameters():
            param.stop_gradient = True
        # 只优化分类器参数
        finetune_optimizer = AdamW(parameters=model.classifier.parameters(), learning_rate=lr)
    else:
        # 优化所有参数
        finetune_optimizer = AdamW(parameters=model.parameters(), learning_rate=lr)
    
    classification_criterion = nn.CrossEntropyLoss()
    
    best_accuracy = 0.0
    finetune_epochs = config['epochs'] 
    history = {'train_loss': [], 'val_accuracy': []}

    for epoch in range(finetune_epochs):
        model.train()
        epoch_class_loss = 0
        progress_bar_ft = tqdm(train_loader, desc=f"Finetune Epoch {epoch+1}/{finetune_epochs} [训练]")
        for images, labels in progress_bar_ft:
            finetune_optimizer.clear_grad()
            logits = model(images, task="classification")
            loss = classification_criterion(logits, labels)
            loss.backward()
            finetune_optimizer.step()
            epoch_class_loss += loss.item()
            progress_bar_ft.set_postfix(loss=loss.item())
        
        avg_epoch_class_loss = epoch_class_loss / len(train_loader)
        history['train_loss'].append(avg_epoch_class_loss)
        
        accuracy = validation(model, test_loader_for_eval, task="classification")
        history['val_accuracy'].append(accuracy)
        
        print(f"Finetune Epoch {epoch+1}/{finetune_epochs}, 平均训练损失: {avg_epoch_class_loss:.4f}, 验证准确率: {accuracy:.4f}")

        if accuracy > best_accuracy:
            best_accuracy = accuracy
            checkpoint_path = os.path.join(save_path, f"{classifier_model_file_name}_best.pdparams")
            paddle.save(model.state_dict(), checkpoint_path)
            paddle.save(finetune_optimizer.state_dict(), os.path.join(save_path, f"{classifier_model_file_name}_optimizer_best.pdopt"))
            print(f"最佳分类模型已保存到: {checkpoint_path} (准确率: {best_accuracy:.4f})")
        if epoch % 10 == 0:
            epoch_checkpoint_path = os.path.join(save_path, f"{classifier_model_file_name}_epoch_{epoch+1}.pdparams")
            paddle.save(model.state_dict(), epoch_checkpoint_path)
            paddle.save(finetune_optimizer.state_dict(), os.path.join(save_path, f"{classifier_model_file_name}_optimizer_epoch_{epoch+1}.pdopt"))
    
    # 恢复encoder参数的可训练状态（如果之前被冻结）
    if frozen_pretrain:
        for param in model.encoder.parameters():
            param.stop_gradient = False
    
    print(f"RotationPrediction 微调完成。最佳验证准确率: {best_accuracy:.4f}")
    plot_curves(history, save_path, classifier_model_file_name)
    return best_accuracy

class SimCLR(nn.Layer):
    def __init__(self, in_dim, hidden_dim, projection_dim, num_classes, projector_layers=2, use_bn=True):
        super().__init__()
        # 使用 ResidualEncoder 替换原始 Encoder 来获得更好的性能
        self.encoder = ResidualEncoder(in_dim, hidden_dim)
        # self.encoder = Encoder(in_dim, hidden_dim)

        # 根据参数动态构建投影头
        self.projector = self._build_projector(hidden_dim, projection_dim, projector_layers, use_bn)
        self.classifier = Classifier(hidden_dim, num_classes)

    def _build_projector(self, hidden_dim, projection_dim, layers=2, use_bn=True):
        """
        构建投影头，可根据参数配置层数和是否使用 BatchNorm
        
        Args:
            hidden_dim: 输入维度
            projection_dim: 输出维度 
            layers: MLP 层数 (最少2层)
            use_bn: 是否使用 BatchNorm
            
        Returns:
            nn.Sequential: 配置好的投影头
        """
        modules = [nn.Flatten()]
        
        # 至少需要2层 (输入层和输出层)
        layers = max(2, layers)
        
        # 第一层到倒数第二层都有 ReLU 激活函数
        for i in range(layers - 1):
            if i == 0:
                # 第一层: hidden_dim -> hidden_dim
                modules.append(nn.Linear(hidden_dim, hidden_dim))
            else:
                # 中间层: hidden_dim -> hidden_dim
                modules.append(nn.Linear(hidden_dim, hidden_dim))
                
            # 根据 use_bn 决定是否添加 BatchNorm
            if use_bn:
                modules.append(nn.BatchNorm1D(hidden_dim))
                
            modules.append(nn.ReLU())
        
        # 最后一层没有 ReLU 和 BatchNorm
        modules.append(nn.Linear(hidden_dim, projection_dim))
        
        return nn.Sequential(*modules)

    def forward(self, images, task="classification"):
        latent = self.encoder(images)
        
        if task == "SimCLR":
            out = self.projector(latent)
            return out
        elif task == "classification":
            logits = self.classifier(latent)
            return logits
        else:
            raise ValueError(f"Unknown task: {task}")

    def augment_images(self, images, img_size=32):
        # 创建随机变换的组合，每次调用都会应用不同的随机变换
        transform = T.Compose([
            T.RandomCrop(size=(img_size, img_size)),
            T.RandomHorizontalFlip(),
            T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2),
            T.RandomErasing(prob=0.4),        # 随机擦除
        ])
        # 应用随机变换  [1, 128, 3, 32, 32]
        augmented_images = transform(images)
        # 单独应用随机旋转
        for i in range(augmented_images.shape[0]):
            rotation_transform = T.RandomRotation(degrees=30)
            augmented_images[i] = rotation_transform(augmented_images[i].squeeze(0))
        return augmented_images

    ###########################################################################
    # TODO:
    #   Finish contrastive_loss below.
    #   This function compute contrastive loss between images and augmented images.
    # HINT:
    #   You can refer to the SimCLR paper(https://arxiv.org/abs/2002.05709) for details.
    ###########################################################################
    def contrastive_loss(self, features, augmented_features, temperature=0.1):
        # features: 来自原始图像的表示 (N, D)
        # augmented_features: 来自增强图像的表示 (N, D)
        # temperature: 温度参数 tau
        
        batch_size = features.shape[0] # N
        
        # 1. L2 正则化特征
        features_norm = paddle.nn.functional.normalize(features, p=2, axis=1)
        augmented_features_norm = paddle.nn.functional.normalize(augmented_features, p=2, axis=1)
        
        # 2. 将原始特征和增强特征拼接在一起
        # representations shape: (2N, D)
        representations = paddle.concat([features_norm, augmented_features_norm], axis=0)
        
        # 3. 计算两两之间的余弦相似度
        # similarity_matrix shape: (2N, 2N)
        similarity_matrix = paddle.matmul(representations, representations, transpose_y=True)
        
        # 4. 从相似度矩阵中排除对角线元素 (自身与自身的比较)
        # 这些不应该作为负样本。交叉熵损失的 logits 不应包含自身作为分母的一部分。
        
        # 将相似度矩阵除以温度参数
        logits = similarity_matrix / temperature
        
        # 创建一个掩码，标记非对角线元素
        mask = paddle.ones_like(logits) - paddle.eye(2 * batch_size)
        mask = paddle.cast(mask, 'bool')
        
        # 使用布尔索引提取非对角线元素
        # 注意：paddle不直接支持布尔索引，需要使用where和reshape组合实现
        non_diag_indices = paddle.nonzero(mask)
        non_diag_values = paddle.gather_nd(logits, non_diag_indices)
        
        # 重塑为(2N, 2N-1)形状，每行包含除自身外的所有元素
        logits = non_diag_values.reshape([2 * batch_size, 2 * batch_size - 1])
        
        # 5. 创建标签，用于识别正样本对
        # 对于 representations 中的第 i 个样本 (0 <= i < N, 原始特征)
        # 其正样本是第 i+N 个样本 (增强特征)
        # 对于 representations 中的第 j 个样本 (N <= j < 2N, 增强特征)
        # 其正样本是第 j-N 个样本 (原始特征)
        labels = paddle.concat([
            paddle.arange(batch_size-1, 2 * batch_size-1, dtype='int64'), # 前N个样本的正样本索引
            paddle.arange(0, batch_size, dtype='int64')             # 后N个样本的正样本索引
        ])
        # 6. 计算交叉熵损失
        # cross_entropy(logits, labels)
        # logits[i, labels[i]] 是正样本对的相似度
        # log(sum_k exp(logits[i, k])) 是log-sum-exp项，由于对角线被屏蔽，k != i
        loss = paddle.nn.functional.cross_entropy(input=logits, label=labels, reduction='mean')
        
        return loss

# --- 训练 SimCLR 模型 ---
def train_SimCLR_model(model, train_loader, test_loader_for_eval, config, base_model_name):
    pretrain_epochs = config.get('pretrain_epochs', 50) # SimCLR通常需要更多预训练轮次
    lr = config.get('learning_rate', 0.001) # SimCLR 可能受益于稍大的学习率和 LARS 优化器，但这里用 AdamW
    temperature = config.get('temperature', 0.1)
    save_path = config.get('save_path', 'checkpoints')
    frozen_pretrain = config.get('frozen_pretrain', False)
    img_size = config.get('img_size', 32)
    os.makedirs(save_path, exist_ok=True)

    pretrained_model_file_name = f"{base_model_name}_pretrained"
    classifier_model_file_name = f"{base_model_name}_classifier"

    # 阶段 1: SimCLR 自监督预训练
    print(f"--- 开始 SimCLR 预训练 ({pretrained_model_file_name}) ---")
    # 对于 SimCLR，优化器通常应用于模型的所有参数（encoder + projector）
    pretrain_optimizer = AdamW(parameters=model.parameters(), learning_rate=lr, weight_decay=0.0001)
    
    pretrain_history = {'pretrain_loss': []}

    for epoch in range(pretrain_epochs):
        model.train()
        epoch_loss = 0
        progress_bar = tqdm(train_loader, desc=f"SimCLR Pretrain Epoch {epoch+1}/{pretrain_epochs}")
        for images, _ in progress_bar:
            pretrain_optimizer.clear_grad()
            
            # 1. 生成同一图像的两个增强视图
            # 注意：SimCLR论文中，两个增强视图来自同一个原始图像。
            # 这里的 augment_images 每次调用都应用随机变换。
            # 因此，调用两次 augment_images(images) 将得到两个不同的增强版本集。
            # 或者，一次性生成两组增强。
            # 为了简单，我们假设 train_loader 返回的是原始图像，然后我们做两次增强。
            # 或者更标准的做法是，数据加载器本身就提供两个增强视图。
            # 此处我们手动创建两个视图
            
            images_v1 = model.augment_images(images, img_size=img_size)
            images_v2 = model.augment_images(images, img_size=img_size)

            # 2. 通过模型（encoder + projector）获取特征
            features_v1 = model(images_v1, task="SimCLR") # (N, projection_dim)
            features_v2 = model(images_v2, task="SimCLR") # (N, projection_dim)
            
            # 3. 计算对比损失
            loss = model.contrastive_loss(features_v1, features_v2, temperature=temperature)
            
            loss.backward()
            pretrain_optimizer.step()
            
            epoch_loss += loss.item()
            progress_bar.set_postfix(loss=loss.item())
            
        avg_epoch_loss = epoch_loss / len(train_loader)
        pretrain_history['pretrain_loss'].append(avg_epoch_loss)
        print(f"SimCLR Pretrain Epoch {epoch+1}/{pretrain_epochs}, 平均对比损失: {avg_epoch_loss:.4f}")

    pretrained_checkpoint_path = os.path.join(save_path, f"{pretrained_model_file_name}.pdparams")
    paddle.save(model.state_dict(), pretrained_checkpoint_path)
    print(f"SimCLR 预训练模型已保存到: {pretrained_checkpoint_path}")
    # plot_curves(pretrain_history, save_path, pretrained_model_file_name) # 可选: 绘制预训练损失曲线
    print(f"--- SimCLR 预训练完成 ({pretrained_model_file_name}) ---")

    # 阶段 2: 分类微调
    print(f"--- 开始 SimCLR 分类微调 ({classifier_model_file_name}) ---")
    
    if frozen_pretrain:
        print("冻结预训练 encoder 参数，只训练分类器")
        for param in model.encoder.parameters():
            param.stop_gradient = True
        # 仅优化分类器参数
        finetune_optimizer = AdamW(parameters=model.classifier.parameters(), learning_rate=lr)
    else:
        print("微调整个模型（encoder + 分类器）")
        # 如果不冻结，通常需要更小的学习率来微调整个模型
        finetune_optimizer = AdamW(parameters=model.parameters(), learning_rate=lr / 10 if lr > 1e-4 else lr)

    classification_criterion = nn.CrossEntropyLoss()
    
    best_accuracy = 0.0
    finetune_epochs = config.get('epochs', 10) # 分类微调的轮次
    history = {'train_loss': [], 'val_accuracy': []}

    for epoch in range(finetune_epochs):
        model.train() # 确保模型处于训练模式
        epoch_class_loss = 0
        progress_bar_ft = tqdm(train_loader, desc=f"SimCLR Finetune Epoch {epoch+1}/{finetune_epochs} [训练]")
        for images, labels in progress_bar_ft:
            finetune_optimizer.clear_grad()
            # 使用原始（未增强）图像进行分类
            logits = model(images, task="classification")
            loss = classification_criterion(logits, labels)
            loss.backward()
            finetune_optimizer.step()
            epoch_class_loss += loss.item()
            progress_bar_ft.set_postfix(loss=loss.item())
            
        avg_epoch_class_loss = epoch_class_loss / len(train_loader)
        history['train_loss'].append(avg_epoch_class_loss)
        
        accuracy = validation(model, test_loader_for_eval, task="classification") # 确保验证时使用分类任务
        history['val_accuracy'].append(accuracy)
        
        print(f"SimCLR Finetune Epoch {epoch+1}/{finetune_epochs}, 平均训练损失: {avg_epoch_class_loss:.4f}, 验证准确率: {accuracy:.4f}")

        if accuracy > best_accuracy:
            best_accuracy = accuracy
            checkpoint_path = os.path.join(save_path, f"{classifier_model_file_name}_best.pdparams")
            paddle.save(model.state_dict(), checkpoint_path)
            paddle.save(finetune_optimizer.state_dict(), os.path.join(save_path, f"{classifier_model_file_name}_optimizer_best.pdopt"))
            print(f"最佳分类模型已保存到: {checkpoint_path} (准确率: {best_accuracy:.4f})")
        if epoch % 10 == 0:
            epoch_checkpoint_path = os.path.join(save_path, f"{classifier_model_file_name}_epoch_{epoch+1}.pdparams")
            paddle.save(model.state_dict(), epoch_checkpoint_path)
            paddle.save(finetune_optimizer.state_dict(), os.path.join(save_path, f"{classifier_model_file_name}_optimizer_epoch_{epoch+1}.pdopt"))

    plot_curves(history, save_path, classifier_model_file_name) # 绘制微调曲线
    print(f"--- SimCLR 分类微调完成 ({classifier_model_file_name}) ---")
    return best_accuracy

# --- 主函数 ---
def main():
    parser = argparse.ArgumentParser(description="自监督学习 CIFAR-10 分类")
    # parser.add_argument('--config', type=str, required=True, help='配置文件路径 (*.yaml)') # 用户已注释掉
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--train', type=str, metavar='<config>.yaml', help='从头开始训练模型')
    # group.add_argument('--resume', type=str, metavar='<config>.yaml', help='从检查点恢复训练') # Resume functionality removed
    group.add_argument('--test', type=str, metavar='<config>.yaml', help='测试模型并在测试集上评估')
    group.add_argument('--report', action='store_true', help='生成实验结果报告')
    group.add_argument('--all', type=str, metavar='<config>.yaml', help='执行训练、测试和报告完整流程')
    args = parser.parse_args()

    config_file_path = None
    action_type = None
    config = None

    if args.train:
        config_file_path = args.train
        action_type = "train"
    elif args.test:
        config_file_path = args.test
        action_type = "test"
    elif args.all:
        config_file_path = args.all
        action_type = "all"
    elif args.report: # Standalone report
        print("--- 开始生成报告 (独立模式) ---")
        generate_report({}) # Pass an empty dict to use defaults in generate_report
        print("--- 报告生成完成 ---")
        return # Exit after standalone report
    else:
        parser.print_help() # Should be unreachable due to required=True in group
        return

    # If we are here, config_file_path must be set for train, resume, test, all.
    if not config_file_path:
        parser.error(f"Action '{action_type}' requires a config file path.") # Should not happen
        return

    config = load_config(config_file_path)

    # Common setup for actions requiring a loaded config and model
    model_type = config.get('model_type', 'PureClassifier')
    model_name = config.get('model_name', model_type) # This model_name is used for saving files etc.
    img_channels = config.get('img_channels', 3)
    hidden_dim = config.get('hidden_dim', 256)
    num_classes = config.get('num_classes', 10)
    
    model = None
    if model_type == 'PureClassifier':
        model = PureClassifier(img_channels, hidden_dim, num_classes)
    elif model_type == 'ContextEncoder':
        model = ContextEncoder(img_channels, hidden_dim, num_classes)
    elif model_type == 'RotationPrediction':
        model = RotationPrediction(img_channels, hidden_dim, num_classes)
    elif model_type == 'SimCLR':
        projection_dim = config.get('projection_dim', 128)
        projector_layers = config.get('projector_layers', 2)  # 获取层数参数，默认2层
        use_batch_norm = config.get('use_batch_norm', True)  # 获取是否使用BN参数，默认使用
        model = SimCLR(
            img_channels, 
            hidden_dim, 
            projection_dim, 
            num_classes,
            projector_layers=projector_layers,
            use_bn=use_batch_norm
        )
    else:
        raise ValueError(f"未知的模型类型: {model_type} in config: {config_file_path}")

    if model is None:
        raise ValueError(f"模型未能初始化: {model_type}")

    # Execute actions based on action_type
    if action_type == "all":
        print(f"执行完整流程: 训练 -> 测试 -> 报告 (配置: {config_file_path})")
        train_loader, test_loader_for_eval = get_dataloaders(config) 

        if model_type == 'ContextEncoder':
            # Context Encoder: Two-phase training handled by a dedicated function
            train_context_encoder_model(model, train_loader, test_loader_for_eval, config, model_name)
            print(f"--- ContextEncoder 完整训练流程完成 ({model_name}) ---")
        elif model_type == 'RotationPrediction':
            train_rotation_encoder_model(model, train_loader, test_loader_for_eval, config, model_name)
            print(f"--- RotationPrediction 完整训练流程完成 ({model_name}) ---")
        elif model_type == 'SimCLR':
            train_SimCLR_model(model, train_loader, test_loader_for_eval, config, model_name)
            print(f"--- SimCLR 完整训练流程完成 ({model_name}) ---")
        else: 
            print(f"--- 开始训练 ({model_name}) ---")
            train_model(model, train_loader, test_loader_for_eval, config, model_name, start_epoch=0)
            print(f"--- 训练完成 ({model_name}) ---")
        
        # 2. 测试
        print(f"--- 开始测试 ({model_name if model_type != 'ContextEncoder' else f'{model_name}_classifier'}) ---")
        # For ContextEncoder, test the _classifier model
        test_model_name = f"{model_name}_classifier" if model_type != 'PureClassifier' else model_name
        _, test_loader_for_test = get_dataloaders(config) 
        test_model(model, test_loader_for_test, config, test_model_name)
        print(f"--- 测试完成 ({test_model_name}) ---")
        
        # 3. 报告 (uses the config loaded for --all)
        print("--- 开始生成报告 ---")
        generate_report(config)
        print("--- 报告生成完成 ---")

    elif action_type == "train": 
        if model_type == 'ContextEncoder':
            print(f"--- 开始 ContextEncoder 训练 (预训练 + 微调) ({model_name}, 配置: {config_file_path}) ---")
            train_loader, test_loader_for_eval = get_dataloaders(config)
            train_context_encoder_model(model, train_loader, test_loader_for_eval, config, model_name)
        elif model_type == 'RotationPrediction':
            print(f"--- 开始 RotationPrediction 训练 ({model_name}, 配置: {config_file_path}) ---")
            train_rotation_encoder_model(model, train_loader, test_loader_for_eval, config, model_name)
        elif model_type == 'SimCLR':
            print(f"--- 开始 SimCLR 训练 ({model_name}, 配置: {config_file_path}) ---")
            train_SimCLR_model(model, train_loader, test_loader_for_eval, config, model_name)
        # Generic train for non-ContextEncoder models
        elif model_type == 'PureClassifier': 
            print(f"--- 开始 {action_type} ({model_name}, 配置: {config_file_path}) ---")
            train_loader, test_loader_for_eval = get_dataloaders(config) 
            start_epoch = 0
            train_model(model, train_loader, test_loader_for_eval, config, model_name, start_epoch=start_epoch)
        
        # Use model_name for the final print message; specific names are used internally or for saving.
        print(f"--- {action_type} 完成 ({model_name}) ---")

    elif action_type == "test":
        print(f"--- 开始测试 ({model_name if model_type != 'ContextEncoder' else f'{model_name}_classifier'}, 配置: {config_file_path}) ---")
        _, test_loader = get_dataloaders(config)
        test_model_name = f"{model_name}_classifier" if model_type != 'PureClassifier' else model_name
        test_model(model, test_loader, config, test_model_name)
        print(f"--- 测试完成 ({test_model_name}) ---")

if __name__ == '__main__':
    main()
