"""
CN-CLIP PyTorch Lightning模块
基于现有的WenwuDataset，简化微调实验流程
"""

import os
import sys
import json
from pathlib import Path
from typing import Dict, Any, Optional, List, Tuple
from datetime import datetime

import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import STEP_OUTPUT, OptimizerLRScheduler
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger

import numpy as np
from cn_clip.clip import load_from_name, tokenize

# 使用标准化的数据集函数和通用评估模块
sys.path.append(str(Path(__file__).parent.parent))
from dataset.dataset import get_train_set, get_val_set, get_test_set
from common.eval import compute_clip_retrieval_metrics, print_metrics


class CNClipLightning(pl.LightningModule):
    """
    CN-CLIP PyTorch Lightning模块
    
    支持功能：
    - 多种微调策略（全量、LoRA、冻结）
    - 自动评估和检索指标计算
    - 实验数据自动保存
    - 可重现性保证
    """
    
    def __init__(
        self,
        model_name: str = "ViT-H-14",
        learning_rate: float = 5e-7,
        weight_decay: float = 0.01,
        freeze_strategy: str = "none",  # none, encoder, text, projection
        experiment_name: Optional[str] = None,
        data_scale: float = 1.0,
        save_predictions: bool = True,
        **kwargs
    ):
        super().__init__()
        
        self.save_hyperparameters()
        
        # 加载预训练模型
        print(f"Loading model: {model_name}")
        self.clip_model, self.preprocess = load_from_name(
            model_name, 
            device="cpu",  # Lightning会自动处理设备
            download_root="./pretrained_weights"
        )
        
        # 应用冻结策略
        self._apply_freeze_strategy(freeze_strategy)
        
        # 验证和追踪变量
        self.val_step_outputs = []
        
        # 创建实验目录（如果指定实验名称）
        if experiment_name:
            self.experiment_dir = self._create_experiment_dir(experiment_name)
        else:
            self.experiment_dir = None
            
        # 🔑 应用数值稳定性处理 (参考legacy_archive/train_helpers.py中的tackle函数)
        self._apply_numerical_stability()
        
        # 打印可训练参数
        self.print_trainable_parameters()
    
    def _apply_freeze_strategy(self, strategy: str):
        """应用新的冻结策略系统"""
        # 首先解冻所有参数
        for param in self.clip_model.parameters():
            param.requires_grad = True
            
        if strategy == "none":
            print("Strategy: 全量微调 (Full fine-tuning)")
            return
        
        # 解析组合策略 (支持 + 连接)
        freeze_components = [comp.strip() for comp in strategy.split('+')]
        
        frozen_modules = []
        for component in freeze_components:
            frozen_count = self._freeze_component(component)
            if frozen_count > 0:
                frozen_modules.append(f"{component}({frozen_count:,}参数)")
        
        if frozen_modules:
            print(f"Applied freeze strategy: {' + '.join(frozen_modules)}")
        else:
            print(f"Warning: Unknown freeze strategy '{strategy}', using full fine-tuning")
    
    def _freeze_component(self, component: str) -> int:
        """冻结特定组件，返回冻结的参数数量"""
        frozen_count = 0
        
        if component == "all":
            # 全部冻结 (仅测试用)
            for param in self.clip_model.parameters():
                param.requires_grad = False
                frozen_count += param.numel()
                
        elif component == "visual":
            # 冻结视觉编码器
            for param in self.clip_model.visual.parameters():
                param.requires_grad = False
                frozen_count += param.numel()
                
        elif component == "text":
            # 冻结文本编码器 
            for param in self.clip_model.bert.parameters():
                param.requires_grad = False
                frozen_count += param.numel()
                
        elif component == "both-encoders":
            # 冻结两个编码器
            for param in self.clip_model.visual.parameters():
                param.requires_grad = False
                frozen_count += param.numel()
            for param in self.clip_model.bert.parameters():
                param.requires_grad = False
                frozen_count += param.numel()
                
        elif component == "visual-backbone":
            # 冻结视觉骨干的所有transformer层，仅保留最终投影层
            for name, param in self.clip_model.visual.named_parameters():
                # 只保留最后的投影层 (如 proj, visual.proj 等)
                if name.endswith('.proj') or name == 'proj':
                    continue  # 保持可训练
                else:
                    param.requires_grad = False
                    frozen_count += param.numel()
                    
        elif component == "text-backbone":
            # 冻结文本骨干，保留投影层
            for name, param in self.clip_model.bert.named_parameters():
                if 'pooler' not in name.lower():  # BERT的最后投影层
                    param.requires_grad = False
                    frozen_count += param.numel()
                    
        elif component == "backbones":
            # 冻结两个骨干网络，保留投影层
            frozen_count += self._freeze_component("visual-backbone")
            frozen_count += self._freeze_component("text-backbone")
            
        elif component == "visual-early-layers":
            # 冻结视觉编码器前50%层
            frozen_count += self._freeze_early_layers("visual")
            
        elif component == "text-early-layers":
            # 冻结文本编码器前50%层
            frozen_count += self._freeze_early_layers("text")
            
        elif component == "projections":
            # 仅冻结投影层
            for name, param in self.clip_model.named_parameters():
                if any(proj_name in name.lower() for proj_name in 
                      ['projection', 'proj', 'pooler', 'logit_scale']):
                    param.requires_grad = False
                    frozen_count += param.numel()
        
        return frozen_count
    
    def _freeze_early_layers(self, encoder_type: str) -> int:
        """冻结编码器的前50%层"""
        frozen_count = 0
        
        if encoder_type == "visual":
            # 对于Vision Transformer，冻结前50%的transformer blocks
            if hasattr(self.clip_model.visual, 'transformer'):
                total_layers = len(self.clip_model.visual.transformer.resblocks)
                freeze_layers = total_layers // 2
                
                for i in range(freeze_layers):
                    for param in self.clip_model.visual.transformer.resblocks[i].parameters():
                        param.requires_grad = False
                        frozen_count += param.numel()
                        
        elif encoder_type == "text":
            # 对于BERT，冻结前50%的encoder layers
            if hasattr(self.clip_model.bert, 'encoder'):
                total_layers = len(self.clip_model.bert.encoder.layer)
                freeze_layers = total_layers // 2
                
                for i in range(freeze_layers):
                    for param in self.clip_model.bert.encoder.layer[i].parameters():
                        param.requires_grad = False
                        frozen_count += param.numel()
        
        return frozen_count
    
    def _apply_numerical_stability(self):
        """
        应用数值稳定性处理 - 参考legacy_archive/train_helpers.py的tackle函数
        
        关键改进：
        1. 强制MultiheadAttention使用FP32计算
        2. 防止NaN传播
        """
        import types
        
        def fp32_mha_forward(self, q, k, v, **kw):
            """MultiheadAttention FP32前向传播"""
            out_dtype = q.dtype
            q = q.float()
            k = k.float() 
            v = v.float()
            
            # 使用备份的原始forward方法
            attn_out, attn_weights = self._orig_forward(q, k, v, **kw)
            return attn_out.to(out_dtype), attn_weights
        
        # 🔑 对所有MultiheadAttention模块应用FP32计算
        mha_count = 0
        for module in self.clip_model.modules():
            if isinstance(module, torch.nn.MultiheadAttention):
                module._orig_forward = module.forward  # 备份原始forward
                module.forward = types.MethodType(fp32_mha_forward, module)
                mha_count += 1
                
        print(f"✅ 数值稳定性处理已应用：{mha_count} 个 MultiheadAttention 模块")
    
    def _create_experiment_dir(self, experiment_name: str) -> Path:
        """创建实验目录到cnclip_finetune下"""
        # 直接使用实验名称，不添加日期前缀
        exp_dir = Path("codebase/cnclip_finetune/experiments") / experiment_name
        exp_dir.mkdir(parents=True, exist_ok=True)
        
        # 创建必需的子目录
        (exp_dir / "config").mkdir(exist_ok=True)
        (exp_dir / "results").mkdir(exist_ok=True)
        (exp_dir / "logs").mkdir(exist_ok=True)
        (exp_dir / "commands").mkdir(exist_ok=True)
        (exp_dir / "raw_outputs").mkdir(exist_ok=True)
        
        # 保存配置
        config_file = exp_dir / "config" / "model_config.json"
        with open(config_file, 'w', encoding='utf-8') as f:
            json.dump(self.hparams, f, indent=2, ensure_ascii=False, default=str)
        
        # 保存系统信息
        system_info = {
            "python_version": sys.version,
            "pytorch_version": torch.__version__,
            "cuda_version": torch.version.cuda if torch.cuda.is_available() else None,
            "gpu_name": torch.cuda.get_device_name() if torch.cuda.is_available() else None,
            "timestamp": datetime.now().isoformat(),
        }
        
        system_info_file = exp_dir / "logs" / "system_info.json"
        with open(system_info_file, 'w', encoding='utf-8') as f:
            json.dump(system_info, f, indent=2, ensure_ascii=False)
        
        print(f"Created experiment directory: {exp_dir}")
        return exp_dir
    
    def forward(self, images: torch.Tensor, text_tokens: torch.Tensor) -> Dict[str, torch.Tensor]:
        """
        前向传播 - 参考legacy_archive中的稳定实现
        
        关键改进：
        1. 直接使用CLIP模型的原生forward
        2. L2归一化在模型内部处理
        3. 简化logit_scale处理
        """
        # 🔑 使用CLIP模型的原生三元组输出 (参考legacy get_loss函数)
        image_features, text_features, logit_scale = self.clip_model(images, text_tokens)
        
        # 🔑 logit_scale在CN-CLIP中已经是exp()后的值，直接使用
        # 计算相似度矩阵 (参考legacy train_helpers.py:181)
        logits_per_image = logit_scale * image_features @ text_features.T  # [B, B]
        logits_per_text = logits_per_image.T  # [B, B]
        
        return {
            'logits_per_image': logits_per_image,
            'logits_per_text': logits_per_text,
            'image_features': image_features,
            'text_features': text_features,
            'logit_scale': logit_scale
        }
    
    def _get_loss(self, logits_per_image: torch.Tensor, 
                                logits_per_text: torch.Tensor) -> torch.Tensor:
        """计算CLIP对比损失，增加数值稳定性检查"""
        batch_size = logits_per_image.size(0)
        labels = torch.arange(batch_size, device=logits_per_image.device)
        
        # print(logits_per_image)
        # 🛡️ NaN/Inf 检查
        if torch.isnan(logits_per_image).any() or torch.isinf(logits_per_image).any():
            print("⚠️ logits_per_image 包含异常值，使用默认损失")
            return torch.tensor(0.7, device=logits_per_image.device, requires_grad=True)
            
        if torch.isnan(logits_per_text).any() or torch.isinf(logits_per_text).any():
            print("⚠️ logits_per_text 包含异常值，使用默认损失") 
            return torch.tensor(0.7, device=logits_per_text.device, requires_grad=True)
        
        # 🛡️ 更激进的数值裁剪防止溢出
        logits_per_image = torch.clamp(logits_per_image, -50, 50)  # 更严格的限制
        logits_per_text = torch.clamp(logits_per_text, -50, 50)
        
        loss_i = F.cross_entropy(logits_per_image, labels)
        loss_t = F.cross_entropy(logits_per_text, labels)
        
        # 🛡️ 最终损失检查
        total_loss = (loss_i + loss_t) / 2
        if torch.isnan(total_loss) or torch.isinf(total_loss):
            print("⚠️ 最终损失包含异常值，使用默认损失")
            return torch.tensor(0.7, device=total_loss.device, requires_grad=True)
        
        return total_loss
    
    def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
        """训练步骤"""
        images, text_tokens, img_ids = batch
        
        outputs = self(images, text_tokens)
        loss = self._get_loss(
            outputs['logits_per_image'],
            outputs['logits_per_text']
        )
        
        # 计算准确率
        batch_size = images.size(0)
        labels = torch.arange(batch_size, device=images.device)
        
        img_acc = (outputs['logits_per_image'].argmax(dim=-1) == labels).float().mean()
        txt_acc = (outputs['logits_per_text'].argmax(dim=-1) == labels).float().mean()
        
        # 🍯 加料：更多训练监控指标
        logits_img = outputs['logits_per_image']
        logits_txt = outputs['logits_per_text']
        
        # 1. 分解损失 - 查看I2T和T2I损失分布
        loss_i2t = F.cross_entropy(logits_img, labels)
        loss_t2i = F.cross_entropy(logits_txt, labels)
        
        # 2. 相似度统计 - 监控特征质量
        similarities = logits_img / outputs['logit_scale']  # 原始相似度
        pos_sim = torch.diag(similarities).mean()  # 正样本相似度
        neg_sim = (similarities.sum() - torch.diag(similarities).sum()) / (batch_size * (batch_size - 1))  # 负样本相似度
        sim_gap = pos_sim - neg_sim  # 相似度间隔
        
        # 3. Top-K 准确率 - 更细粒度评估
        top3_img = (logits_img.topk(3, dim=1)[1] == labels.unsqueeze(1)).any(dim=1).float().mean()
        top3_txt = (logits_txt.topk(3, dim=1)[1] == labels.unsqueeze(1)).any(dim=1).float().mean()
        
        # 4. 特征多样性 - 防止模式崩溃
        img_feat_std = outputs['image_features'].std(dim=0).mean()
        txt_feat_std = outputs['text_features'].std(dim=0).mean()
        
        # 详细的训练日志
        self.log('train_loss', loss, prog_bar=True, on_step=True, on_epoch=True)
        
        # 🍯 基础指标
        self.log('train_img_acc', img_acc, prog_bar=True, on_step=False, on_epoch=True)
        self.log('train_txt_acc', txt_acc, prog_bar=True, on_step=False, on_epoch=True)
        self.log('logit_scale', outputs['logit_scale'], prog_bar=False, on_step=True, on_epoch=False)
        
        # 🍯 分解损失
        self.log('loss_i2t', loss_i2t, prog_bar=False, on_step=True, on_epoch=True)
        self.log('loss_t2i', loss_t2i, prog_bar=False, on_step=True, on_epoch=True)
        
        # 🍯 相似度分析
        self.log('pos_sim', pos_sim, prog_bar=False, on_step=True, on_epoch=True)
        self.log('neg_sim', neg_sim, prog_bar=False, on_step=True, on_epoch=True) 
        self.log('sim_gap', sim_gap, prog_bar=True, on_step=False, on_epoch=True)
        
        # 🍯 Top-K 准确率
        self.log('top3_img_acc', top3_img, prog_bar=False, on_step=False, on_epoch=True)
        self.log('top3_txt_acc', top3_txt, prog_bar=False, on_step=False, on_epoch=True)
        
        # 🍯 特征多样性
        self.log('img_feat_std', img_feat_std, prog_bar=False, on_step=True, on_epoch=True)
        self.log('txt_feat_std', txt_feat_std, prog_bar=False, on_step=True, on_epoch=True)
        return loss
    
    def validation_step(self, batch, batch_idx) -> Optional[STEP_OUTPUT]:
        """验证步骤"""
        images, text_tokens, img_ids = batch
        
        outputs = self(images, text_tokens)
        loss = self._get_loss(
            outputs['logits_per_image'],
            outputs['logits_per_text']
        )
        
        # 收集验证输出用于计算检索指标
        self.val_step_outputs.append({
            'image_features': outputs['image_features'].cpu(),
            'text_features': outputs['text_features'].cpu(),
            'img_ids': img_ids.cpu(),
            'loss': loss.cpu()
        })
        
        return loss
    
    def on_validation_epoch_end(self) -> None:
        """验证周期结束，计算检索指标"""
        if len(self.val_step_outputs) == 0:
            return
        
        # 🔍 NaN 检查和修复
        valid_outputs = []
        nan_count = 0
        
        for i, output in enumerate(self.val_step_outputs):
            has_nan = (torch.isnan(output['image_features']).any() or 
                      torch.isnan(output['text_features']).any() or 
                      torch.isnan(output['loss']))
            
            if has_nan:
                nan_count += 1
                print(f"⚠️  Step {i}: 检测到 NaN，跳过此批次")
            else:
                valid_outputs.append(output)
        
        if len(valid_outputs) == 0:
            print("❌ 所有验证输出都包含 NaN，跳过评估")
            self.val_step_outputs.clear()
            return
        
        if nan_count > 0:
            print(f"⚠️  总计 {nan_count}/{len(self.val_step_outputs)} 批次包含 NaN")
        
        # 合并有效的验证输出
        all_image_features = torch.cat([x['image_features'] for x in valid_outputs], dim=0)
        all_text_features = torch.cat([x['text_features'] for x in valid_outputs], dim=0)
        all_img_ids = torch.cat([x['img_ids'] for x in valid_outputs], dim=0)
        all_losses = torch.stack([x['loss'] for x in valid_outputs])
        
        # 计算平均损失
        avg_loss = all_losses.mean()
        
        # 计算检索指标
        retrieval_metrics = self._compute_retrieval_metrics(
            all_image_features, all_text_features, all_img_ids
        )
        
        # 记录指标
        self.log('val_loss', avg_loss, prog_bar=True)
        for metric_name, metric_value in retrieval_metrics.items():
            self.log(f'val_{metric_name}', metric_value, prog_bar=False)
        
        # 修正epoch显示：在验证结束时epoch已完成，使用current_epoch
        current_epoch = self.trainer.current_epoch
        print(f"\n[Epoch {current_epoch} 验证结果]")
        print(f"Val Loss: {avg_loss:.4f}")
        print(f"I2T R@1: {retrieval_metrics['i2t_r1']:.3f}, R@5: {retrieval_metrics['i2t_r5']:.3f}")
        print(f"T2I R@1: {retrieval_metrics['t2i_r1']:.3f}, R@5: {retrieval_metrics['t2i_r5']:.3f}")
        print(f"Mean R@1: {retrieval_metrics['mean_r1']:.3f}, Mean R@5: {retrieval_metrics['mean_r5']:.3f}")
        
        # 保存验证结果到实验目录
        if self.experiment_dir:
            results_file = self.experiment_dir / "results" / f"val_epoch_{current_epoch}.json"
            results = {
                'epoch': current_epoch,
                'val_loss': float(avg_loss),
                'retrieval_metrics': {k: float(v) for k, v in retrieval_metrics.items()}
            }
            
            with open(results_file, 'w', encoding='utf-8') as f:
                json.dump(results, f, indent=2, ensure_ascii=False)
        
        # 清空输出缓存
        self.val_step_outputs.clear()
    
    def _compute_retrieval_metrics(
        self, 
        image_features: torch.Tensor, 
        text_features: torch.Tensor, 
        img_ids: torch.Tensor
    ) -> Dict[str, float]:
        """
        使用通用评估模块计算CLIP检索指标
        
        Args:
            image_features: 图像特征 [N, D]
            text_features: 文本特征 [N, D]  
            img_ids: 图像ID [N] - 用于标识（当前未使用）
            
        Returns:
            包含各种召回率指标的字典
        """
        # 使用通用评估模块计算检索指标
        metrics = compute_clip_retrieval_metrics(
            image_features=image_features,
            text_features=text_features,
            k_values=[1, 5, 10],
            temperature=0.07,
            normalize=True
        )
        
        # 转换为字典格式以保持兼容性
        return metrics.to_dict()
    
    def configure_optimizers(self) -> OptimizerLRScheduler:
        """配置优化器和学习率调度器"""
        # 获取可训练参数
        trainable_params = [p for p in self.clip_model.parameters() if p.requires_grad]
        
        optimizer = torch.optim.AdamW(
            trainable_params,
            lr=self.hparams.learning_rate,
            weight_decay=self.hparams.weight_decay,
            eps=1e-8
        )
        
        # 学习率调度器
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=self.trainer.max_epochs,
            eta_min=self.hparams.learning_rate * 0.01
        )
        
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": scheduler,
                "interval": "epoch",
            }
        }
    
    def on_before_optimizer_step(self, optimizer):
        """在优化器步骤前检查梯度"""
        # 🍯 计算梯度范数 - 重要的训练健康指标（参考legacy不使用梯度裁剪）
        with torch.no_grad():
            grad_norm = 0.0
            for param in self.parameters():
                if param.grad is not None:
                    grad_norm += param.grad.norm().item() ** 2
            grad_norm = grad_norm ** 0.5
        self.log('grad_norm', grad_norm, prog_bar=False, on_step=True, on_epoch=False)
        
        # 检查梯度是否包含 NaN 或无穷大
        nan_count = 0
        for name, param in self.named_parameters():
            if param.grad is not None:
                if torch.isnan(param.grad).any() or torch.isinf(param.grad).any():
                    print(f"⚠️  检测到异常梯度在参数: {name}")
                    # 将异常梯度设为0
                    param.grad.data.zero_()
                    nan_count += 1
        
        # 🍯 记录异常梯度统计
        if nan_count > 0:
            self.log('nan_grad_count', float(nan_count), prog_bar=False, on_step=True, on_epoch=False)
    
    def print_trainable_parameters(self):
        """打印详细的可训练参数统计"""
        trainable_params = sum(p.numel() for p in self.clip_model.parameters() if p.requires_grad)
        total_params = sum(p.numel() for p in self.clip_model.parameters())
        frozen_params = total_params - trainable_params
        
        print(f"\n{'='*50}")
        print(f"📊 参数统计 (Strategy: {self.hparams.freeze_strategy})")
        print(f"{'='*50}")
        print(f"🟢 可训练参数: {trainable_params:,} ({100.0 * trainable_params / total_params:.1f}%)")
        print(f"🔒 冻结参数:   {frozen_params:,} ({100.0 * frozen_params / total_params:.1f}%)")
        print(f"📦 总参数:     {total_params:,}")
        
        # 按模块统计
        self._print_module_stats()
        print(f"{'='*50}\n")
    
    def _print_module_stats(self):
        """打印各模块的参数统计"""
        # 视觉编码器统计
        visual_total = sum(p.numel() for p in self.clip_model.visual.parameters())
        visual_trainable = sum(p.numel() for p in self.clip_model.visual.parameters() if p.requires_grad)
        
        # 文本编码器统计  
        text_total = sum(p.numel() for p in self.clip_model.bert.parameters())
        text_trainable = sum(p.numel() for p in self.clip_model.bert.parameters() if p.requires_grad)
        
        # 其他参数(投影层、logit_scale等)
        other_total = sum(p.numel() for name, p in self.clip_model.named_parameters() 
                         if not any(module in name for module in ['visual.', 'bert.']))
        other_trainable = sum(p.numel() for name, p in self.clip_model.named_parameters() 
                             if not any(module in name for module in ['visual.', 'bert.']) and p.requires_grad)
        
        print(f"┌─ 📷 视觉编码器: {visual_trainable:,}/{visual_total:,} ({100.0 * visual_trainable / visual_total:.1f}%)")
        print(f"├─ 📝 文本编码器: {text_trainable:,}/{text_total:,} ({100.0 * text_trainable / text_total:.1f}%)")  
        print(f"└─ 🔗 投影层等:   {other_trainable:,}/{other_total:,} ({100.0 * other_trainable / other_total:.1f}%)")
    
    def save_final_results(self):
        """保存最终实验结果"""
        if not self.experiment_dir:
            return
        
        # 保存最终模型
        model_path = self.experiment_dir / "results" / "final_model.pt"
        torch.save({
            'model_state_dict': self.clip_model.state_dict(),
            'hyperparameters': self.hparams,
            'epoch': self.current_epoch
        }, model_path)
        
        print(f"Final model saved to: {model_path}")


def create_data_module(
    train_set,
    val_set,
    batch_size: int = 32,
    num_workers: int = 4,
) -> pl.LightningDataModule:
    """创建数据模块"""
    
    def collate_fn(batch):
        """自定义批处理函数，处理文本tokenization"""
        images, captions, img_ids = zip(*batch)
        
        # 堆叠图片tensor
        images = torch.stack(images, dim=0)
        
        # tokenize文本
        from cn_clip.clip import tokenize
        text_tokens = tokenize(list(captions), context_length=77)
        
        # 转换img_ids为tensor
        img_ids = torch.tensor(img_ids, dtype=torch.long)
        
        return images, text_tokens, img_ids
    
    class WenwuDataModule(pl.LightningDataModule):
        def __init__(self):
            super().__init__()
        
        def setup(self, stage: Optional[str] = None):
            # 使用标准化的数据集函数
            self.train_dataset = train_set
            self.val_dataset = val_set
        
        def train_dataloader(self):
            return torch.utils.data.DataLoader(
                self.train_dataset,
                batch_size=batch_size,
                shuffle=True,
                num_workers=num_workers,
                pin_memory=True,
                persistent_workers=num_workers > 0,
                collate_fn=collate_fn
            )
        
        def val_dataloader(self):
            return torch.utils.data.DataLoader(
                self.val_dataset,
                batch_size=batch_size,
                shuffle=False,
                num_workers=num_workers,
                pin_memory=True,
                persistent_workers=num_workers > 0,
                collate_fn=collate_fn
            )
    
    return WenwuDataModule()