import os
import argparse
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from torchvision import transforms
try:
    from torchvision.transforms import RandAugment
except Exception:
    RandAugment = None
from PIL import Image
from sklearn.metrics import accuracy_score, f1_score
from transformers import AutoTokenizer
import time
from tqdm import tqdm
import warnings
import logging
import json
from datetime import datetime
import matplotlib
matplotlib.use('Agg')  # 设置后端
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import seaborn as sns
from collections import defaultdict

# 过滤sklearn警告
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')

# 强制设置中文字体 - 全局配置
plt.rcParams['font.sans-serif'] = ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
matplotlib.rcParams['font.family'] = ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# 清除字体缓存
try:
    fm._rebuild()
except:
    pass

from moe_classifier import MultiModalMoEClassifier


IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)


class TrainingLogger:
    """训练日志记录器"""
    
    def __init__(self, log_dir: str, experiment_name: str = None):
        self.log_dir = log_dir
        os.makedirs(log_dir, exist_ok=True)
        
        # 实验名称
        if experiment_name is None:
            experiment_name = f"experiment_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
        self.experiment_name = experiment_name
        
        # 设置日志文件
        log_file = os.path.join(log_dir, f"{experiment_name}.log")
        
        # 配置日志记录器
        self.logger = logging.getLogger(experiment_name)
        self.logger.setLevel(logging.INFO)
        
        # 清除已有的处理器
        for handler in self.logger.handlers[:]:
            self.logger.removeHandler(handler)
        
        # 文件处理器
        file_handler = logging.FileHandler(log_file, encoding='utf-8')
        file_handler.setLevel(logging.INFO)
        
        # 控制台处理器
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.INFO)
        
        # 格式化器
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S'
        )
        file_handler.setFormatter(formatter)
        console_handler.setFormatter(formatter)
        
        self.logger.addHandler(file_handler)
        self.logger.addHandler(console_handler)
        
        # 训练指标存储
        self.metrics = defaultdict(list)
        self.epoch_times = []
        
        # JSON日志文件
        self.json_log_file = os.path.join(log_dir, f"{experiment_name}_metrics.json")
        
    def log_epoch(self, epoch: int, train_loss: float, train_acc: float, train_f1: float,
                  val_acc: float, val_f1: float, lr: float, epoch_time: float, val_loss: float = None):
        """记录每个epoch的训练指标"""
        
        # 记录到内存
        self.metrics['epoch'].append(epoch)
        self.metrics['train_loss'].append(train_loss)
        self.metrics['train_acc'].append(train_acc)
        self.metrics['train_f1'].append(train_f1)
        self.metrics['val_acc'].append(val_acc)
        self.metrics['val_f1'].append(val_f1)
        self.metrics['learning_rate'].append(lr)
        self.epoch_times.append(epoch_time)
        
        if val_loss is not None:
            self.metrics['val_loss'].append(val_loss)
        
        # 记录到日志文件
        log_msg = (f"Epoch {epoch:03d} | "
                  f"Train Loss: {train_loss:.4f} | "
                  f"Train Acc: {train_acc:.4f} | "
                  f"Train F1: {train_f1:.4f} | "
                  f"Val Acc: {val_acc:.4f} | "
                  f"Val F1: {val_f1:.4f} | "
                  f"LR: {lr:.2e} | "
                  f"Time: {epoch_time:.1f}s")
        
        if val_loss is not None:
            log_msg += f" | Val Loss: {val_loss:.4f}"
            
        self.logger.info(log_msg)
        
        # 保存到JSON文件
        self._save_metrics_json()
    
    def log_info(self, message: str):
        """记录信息日志"""
        self.logger.info(message)
    
    def log_config(self, config: dict):
        """记录训练配置"""
        self.logger.info("=" * 80)
        self.logger.info("🚀 训练配置信息")
        self.logger.info("=" * 80)
        
        for key, value in config.items():
            self.logger.info(f"{key}: {value}")
        
        self.logger.info("=" * 80)
        
        # 保存配置到JSON文件
        config_file = os.path.join(self.log_dir, f"{self.experiment_name}_config.json")
        with open(config_file, 'w', encoding='utf-8') as f:
            json.dump(config, f, indent=2, ensure_ascii=False)
    
    def _save_metrics_json(self):
        """保存指标到JSON文件"""
        metrics_data = dict(self.metrics)
        metrics_data['epoch_times'] = self.epoch_times
        
        with open(self.json_log_file, 'w', encoding='utf-8') as f:
            json.dump(metrics_data, f, indent=2, ensure_ascii=False)
    
    def get_metrics(self):
        """获取训练指标"""
        return dict(self.metrics)


class TrainingVisualizer:
    """训练可视化器"""
    
    def __init__(self, log_dir: str, experiment_name: str):
        self.log_dir = log_dir
        self.experiment_name = experiment_name
        self.plots_dir = os.path.join(log_dir, "plots")
        os.makedirs(self.plots_dir, exist_ok=True)
        
        # 设置绘图样式
        plt.style.use('seaborn-v0_8')
        sns.set_palette("husl")
        
        # 确保中文字体设置
        plt.rcParams['font.sans-serif'] = ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
    
    def plot_training_curves(self, metrics: dict, save_plots: bool = True):
        """绘制训练曲线"""
        
        if not metrics or 'epoch' not in metrics:
            print("⚠️  没有可用的训练指标数据")
            return
        
        # 强制设置中文字体
        plt.rcParams['font.sans-serif'] = ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        # 尝试获取中文字体
        chinese_font = None
        for font_name in ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS']:
            try:
                chinese_font = fm.FontProperties(fname=fm.findfont(fm.FontProperties(family=[font_name])))
                break
            except:
                continue
        
        if chinese_font is None:
            chinese_font = fm.FontProperties()
        
        epochs = metrics['epoch']
        
        # 创建子图
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle(f'训练曲线 - {self.experiment_name}', fontsize=16, fontweight='bold', fontproperties=chinese_font)
        
        # 1. 损失曲线
        ax1 = axes[0, 0]
        if 'train_loss' in metrics:
            ax1.plot(epochs, metrics['train_loss'], 'b-', label='训练损失', linewidth=2, marker='o', markersize=4)
        if 'val_loss' in metrics:
            ax1.plot(epochs, metrics['val_loss'], 'r-', label='验证损失', linewidth=2, marker='s', markersize=4)
        ax1.set_title('损失变化曲线', fontsize=14, fontweight='bold', fontproperties=chinese_font)
        ax1.set_xlabel('Epoch', fontsize=12)
        ax1.set_ylabel('Loss', fontsize=12)
        ax1.legend(fontsize=10, prop=chinese_font)
        ax1.grid(True, alpha=0.3)
        
        # 2. 准确率曲线
        ax2 = axes[0, 1]
        if 'train_acc' in metrics:
            ax2.plot(epochs, metrics['train_acc'], 'g-', label='训练准确率', linewidth=2, marker='o', markersize=4)
        if 'val_acc' in metrics:
            ax2.plot(epochs, metrics['val_acc'], 'orange', label='验证准确率', linewidth=2, marker='s', markersize=4)
        ax2.set_title('准确率变化曲线', fontsize=14, fontweight='bold', fontproperties=chinese_font)
        ax2.set_xlabel('Epoch', fontsize=12)
        ax2.set_ylabel('Accuracy', fontsize=12)
        ax2.legend(fontsize=10, prop=chinese_font)
        ax2.grid(True, alpha=0.3)
        
        # 3. F1分数曲线
        ax3 = axes[1, 0]
        if 'train_f1' in metrics:
            ax3.plot(epochs, metrics['train_f1'], 'purple', label='训练F1', linewidth=2, marker='o', markersize=4)
        if 'val_f1' in metrics:
            ax3.plot(epochs, metrics['val_f1'], 'brown', label='验证F1', linewidth=2, marker='s', markersize=4)
        ax3.set_title('F1分数变化曲线', fontsize=14, fontweight='bold', fontproperties=chinese_font)
        ax3.set_xlabel('Epoch', fontsize=12)
        ax3.set_ylabel('F1 Score', fontsize=12)
        ax3.legend(fontsize=10, prop=chinese_font)
        ax3.grid(True, alpha=0.3)
        
        # 4. 学习率曲线
        ax4 = axes[1, 1]
        if 'learning_rate' in metrics:
            ax4.plot(epochs, metrics['learning_rate'], 'red', label='学习率', linewidth=2, marker='d', markersize=4)
            ax4.set_title('学习率变化曲线', fontsize=14, fontweight='bold', fontproperties=chinese_font)
            ax4.set_xlabel('Epoch', fontsize=12)
            ax4.set_ylabel('Learning Rate', fontsize=12)
            ax4.set_yscale('log')  # 使用对数坐标
            ax4.legend(fontsize=10, prop=chinese_font)
            ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        if save_plots:
            plot_path = os.path.join(self.plots_dir, f"{self.experiment_name}_training_curves.png")
            plt.savefig(plot_path, dpi=300, bbox_inches='tight', facecolor='white')
            print(f"📊 训练曲线已保存至: {plot_path}")
        
        plt.show()
        return fig
    
    def plot_loss_comparison(self, metrics: dict, save_plots: bool = True):
        """Plot loss comparison chart"""
        
        if not metrics or 'epoch' not in metrics:
            return
        
        # 强制设置中文字体
        plt.rcParams['font.sans-serif'] = ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        # 尝试获取中文字体
        chinese_font = None
        for font_name in ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS']:
            try:
                chinese_font = fm.FontProperties(fname=fm.findfont(fm.FontProperties(family=[font_name])))
                break
            except:
                continue
        
        if chinese_font is None:
            chinese_font = fm.FontProperties()
        
        epochs = metrics['epoch']
        
        plt.figure(figsize=(10, 6))
        
        if 'train_loss' in metrics:
            plt.plot(epochs, metrics['train_loss'], 'b-', label='训练损失', linewidth=2, marker='o', markersize=5)
        if 'val_loss' in metrics:
            plt.plot(epochs, metrics['val_loss'], 'r-', label='验证损失', linewidth=2, marker='s', markersize=5)
        
        plt.title(f'训练与验证损失对比 - {self.experiment_name}', fontsize=16, fontweight='bold', fontproperties=chinese_font)
        plt.xlabel('Epoch', fontsize=14)
        plt.ylabel('Loss', fontsize=14)
        plt.legend(fontsize=12, prop=chinese_font)
        plt.grid(True, alpha=0.3)
        
        if save_plots:
            plot_path = os.path.join(self.plots_dir, f"{self.experiment_name}_loss_comparison.png")
            plt.savefig(plot_path, dpi=300, bbox_inches='tight', facecolor='white')
            print(f"📊 损失对比图已保存至: {plot_path}")
        
        plt.show()
    
    def plot_accuracy_comparison(self, metrics: dict, save_plots: bool = True):
        """Plot accuracy comparison chart"""
        
        if not metrics or 'epoch' not in metrics:
            return
        
        # 强制设置中文字体
        plt.rcParams['font.sans-serif'] = ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        # 尝试获取中文字体
        chinese_font = None
        for font_name in ['Noto Sans CJK SC', 'SimHei', 'Microsoft YaHei', 'Arial Unicode MS']:
            try:
                chinese_font = fm.FontProperties(fname=fm.findfont(fm.FontProperties(family=[font_name])))
                break
            except:
                continue
        
        if chinese_font is None:
            chinese_font = fm.FontProperties()
        
        epochs = metrics['epoch']
        
        plt.figure(figsize=(10, 6))
        
        if 'train_acc' in metrics:
            plt.plot(epochs, metrics['train_acc'], 'g-', label='训练准确率', linewidth=2, marker='o', markersize=5)
        if 'val_acc' in metrics:
            plt.plot(epochs, metrics['val_acc'], 'orange', label='验证准确率', linewidth=2, marker='s', markersize=5)
        
        plt.title(f'训练与验证准确率对比 - {self.experiment_name}', fontsize=16, fontweight='bold', fontproperties=chinese_font)
        plt.xlabel('Epoch', fontsize=14)
        plt.ylabel('Accuracy', fontsize=14)
        plt.legend(fontsize=12, prop=chinese_font)
        plt.grid(True, alpha=0.3)
        
        if save_plots:
            plot_path = os.path.join(self.plots_dir, f"{self.experiment_name}_accuracy_comparison.png")
            plt.savefig(plot_path, dpi=300, bbox_inches='tight', facecolor='white')
            print(f"📊 准确率对比图已保存至: {plot_path}")
        
        plt.show()
    
    def create_summary_report(self, metrics: dict, config: dict, total_time: float):
        """创建训练总结报告"""
        
        if not metrics or 'epoch' not in metrics:
            return
        
        # 获取最佳指标
        best_val_acc = max(metrics['val_acc']) if 'val_acc' in metrics else 0
        best_val_f1 = max(metrics['val_f1']) if 'val_f1' in metrics else 0
        best_val_acc_epoch = metrics['val_acc'].index(best_val_acc) + 1 if 'val_acc' in metrics else 0
        best_val_f1_epoch = metrics['val_f1'].index(best_val_f1) + 1 if 'val_f1' in metrics else 0
        
        # 创建报告
        report = f"""
# 训练总结报告 - {self.experiment_name}

## 训练配置
- 模型类别数: {config.get('num_classes', 'N/A')}
- 批次大小: {config.get('batch_size', 'N/A')}
- 学习率: {config.get('lr', 'N/A')}
- 训练轮数: {config.get('epochs', 'N/A')}
- 总训练时间: {total_time/3600:.2f} 小时

## 最佳性能指标
- 最佳验证准确率: {best_val_acc:.4f} (第 {best_val_acc_epoch} 轮)
- 最佳验证F1分数: {best_val_f1:.4f} (第 {best_val_f1_epoch} 轮)

## 训练统计
- 总训练轮数: {len(metrics['epoch'])}
- 平均每轮时间: {np.mean([t for t in metrics.get('epoch_times', [])]) if 'epoch_times' in metrics else 0:.1f} 秒

生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
"""
        
        # 保存报告
        report_path = os.path.join(self.log_dir, f"{self.experiment_name}_summary.md")
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(report)
        
        print(f"📋 训练总结报告已保存至: {report_path}")
        return report


class DiseaseTextImageDataset(Dataset):
    def __init__(self, csv_path: str, image_root: str, tokenizer, image_size: int = 224, max_len: int = 64,
                 is_train: bool = False, augment: str = 'basic', rand_magnitude: int = 7, jitter: float = 0.3):
        super().__init__()
        self.df = pd.read_csv(csv_path)
        assert {'image', 'text', 'label'}.issubset(set(self.df.columns)), "CSV必须包含列: image, text, label"
        self.image_root = image_root
        self.tokenizer = tokenizer
        self.max_len = max_len
        # 数据增强策略
        if is_train and augment == 'strong':
            aug_list = [
                transforms.RandomResizedCrop(image_size, scale=(0.6, 1.0)),
                transforms.RandomHorizontalFlip(p=0.5),
            ]
            if RandAugment is not None:
                aug_list.append(RandAugment(magnitude=rand_magnitude))
            aug_list.extend([
                transforms.ColorJitter(brightness=jitter, contrast=jitter, saturation=jitter, hue=min(0.1, jitter)),
                transforms.ToTensor(),
                transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
            ])
            self.transform = transforms.Compose(aug_list)
        elif is_train and augment == 'basic':
            self.transform = transforms.Compose([
                transforms.Resize((image_size, image_size)),
                transforms.RandomHorizontalFlip(p=0.5),
                transforms.ToTensor(),
                transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
            ])
        else:
            self.transform = transforms.Compose([
                transforms.Resize((image_size, image_size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
            ])

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        img_path = os.path.join(self.image_root, row['image'])
        text = str(row['text'])
        label = int(row['label'])

        image = Image.open(img_path).convert('RGB')
        image = self.transform(image)

        tokens = self.tokenizer(
            text,
            truncation=True,
            padding='max_length',
            max_length=self.max_len,
            return_tensors='pt'
        )

        item = {
            'image': image,
            'input_ids': tokens['input_ids'].squeeze(0),
            'attention_mask': tokens['attention_mask'].squeeze(0),
            'label': torch.tensor(label, dtype=torch.long)
        }
        return item


def count_parameters(model):
    """统计模型参数数量"""
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    return total_params, trainable_params


def train_one_epoch(model, dataloader, optimizer, device, epoch, lb_coeff=0.01,
                    gate_monitor: bool = False, gate_report_dir: str = None):
    model.train()
    ce_loss = nn.CrossEntropyLoss()
    total_loss = 0.0
    all_preds, all_labels = [], []
    # 门控监控累积器
    sum_gate_probs = None
    top1_counts = None
    topk_counts = None
    total_samples = 0
    
    # 创建进度条
    pbar = tqdm(dataloader, desc=f'Epoch {epoch:03d} [Train]', leave=False)
    
    for batch_idx, batch in enumerate(pbar):
        images = batch['image'].to(device)
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['label'].to(device)

        optimizer.zero_grad()
        logits, gate_probs, lb_loss = model(images, input_ids, attention_mask)
        # 辅助视觉损失
        aux_loss = torch.zeros((), device=device)
        if getattr(model, 'use_visual_aux', False):
            aux_loss = model.compute_visual_aux_loss(images, labels)
        loss = ce_loss(logits, labels) + lb_coeff * lb_loss + aux_loss
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        optimizer.step()

        total_loss += loss.item() * images.size(0)
        preds = logits.argmax(dim=-1).detach().cpu().numpy()
        all_preds.extend(list(preds))
        all_labels.extend(list(labels.detach().cpu().numpy()))
        # 监控门控统计
        if gate_monitor:
            with torch.no_grad():
                bs = images.size(0)
                total_samples += bs
                if sum_gate_probs is None:
                    sum_gate_probs = gate_probs.sum(dim=0)
                    top1_counts = torch.zeros(gate_probs.size(-1), device=gate_probs.device)
                    topk_counts = torch.zeros(gate_probs.size(-1), device=gate_probs.device)
                else:
                    sum_gate_probs += gate_probs.sum(dim=0)
                top1 = gate_probs.argmax(dim=-1)
                for i in range(gate_probs.size(-1)):
                    top1_counts[i] += (top1 == i).sum()
                k = getattr(model, 'top_k', 1)
                if k > 1:
                    topk_idx = torch.topk(gate_probs, k=k, dim=-1).indices
                    for i in range(gate_probs.size(-1)):
                        topk_counts[i] += (topk_idx == i).any(dim=-1).sum()

        # 更新进度条
        current_acc = accuracy_score(all_labels, all_preds)
        pbar.set_postfix({
            'Loss': f'{loss.item():.4f}',
            'Acc': f'{current_acc:.4f}',
            'LR': f'{optimizer.param_groups[0]["lr"]:.2e}'
        })

    avg_loss = total_loss / len(dataloader.dataset)
    acc = accuracy_score(all_labels, all_preds)
    f1_macro = f1_score(all_labels, all_preds, average='macro')
    # 如需输出门控监控
    if gate_monitor and sum_gate_probs is not None and total_samples > 0:
        mean_probs = (sum_gate_probs / total_samples).detach().cpu().numpy().tolist()
        top1_rates = (top1_counts / total_samples).detach().cpu().numpy().tolist()
        if topk_counts is not None:
            topk_rates = (topk_counts / total_samples).detach().cpu().numpy().tolist()
        else:
            topk_rates = None
        gate_stats = {
            'epoch': epoch,
            'mean_gate_probs': mean_probs,
            'top1_selected_rate': top1_rates,
            'topk_selected_rate': topk_rates,
            'num_experts': int(top1_counts.numel()),
            'top_k': int(getattr(model, 'top_k', 1)),
        }
        if gate_report_dir is not None:
            os.makedirs(gate_report_dir, exist_ok=True)
            ts = datetime.now().strftime('%Y%m%d_%H%M%S')
            out_path = os.path.join(gate_report_dir, f"gate_usage_epoch{epoch}_{ts}.json")
            with open(out_path, 'w', encoding='utf-8') as f:
                json.dump(gate_stats, f, indent=2, ensure_ascii=False)
            print(f"🧭 门控统计已保存: {out_path}")
    return avg_loss, acc, f1_macro


@torch.no_grad()
def evaluate(model, dataloader, device, epoch=None):
    model.eval()
    all_preds, all_labels = [], []
    
    desc = f'Epoch {epoch:03d} [Val]' if epoch is not None else 'Evaluating'
    pbar = tqdm(dataloader, desc=desc, leave=False)
    
    for batch in pbar:
        images = batch['image'].to(device)
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['label'].to(device)

        logits, _, _ = model(images, input_ids, attention_mask)
        preds = logits.argmax(dim=-1).detach().cpu().numpy()
        all_preds.extend(list(preds))
        all_labels.extend(list(labels.detach().cpu().numpy()))
        
        # 更新进度条
        current_acc = accuracy_score(all_labels, all_preds)
        pbar.set_postfix({'Acc': f'{current_acc:.4f}'})

    acc = accuracy_score(all_labels, all_preds)
    f1_macro = f1_score(all_labels, all_preds, average='macro')
    return acc, f1_macro


def print_system_info(device, args):
    """打印系统和训练信息"""
    print("=" * 80)
    print("🚀 训练配置信息")
    print("=" * 80)
    
    # GPU信息
    if torch.cuda.is_available():
        print(f"🔥 GPU设备: {torch.cuda.get_device_name(device)}")
        print(f"📊 GPU内存: {torch.cuda.get_device_properties(device).total_memory / 1024**3:.1f} GB")
        
        # 显示实际的物理GPU ID
        if args.gpu_id is not None:
            print(f"🎯 物理GPU ID: {args.gpu_id} (映射为 {device})")
        else:
            print(f"🎯 当前GPU: {device}")
        print(f"💾 可用GPU数量: {torch.cuda.device_count()}")
    else:
        print("⚠️  使用CPU训练")
    
    print(f"📁 训练数据: {args.train_csv}")
    print(f"📁 验证数据: {args.val_csv}")
    print(f"🖼️  图像路径: {args.image_root}")
    print(f"🏷️  类别数量: {args.num_classes}")
    print(f"📦 批次大小: {args.batch_size}")
    print(f"🔄 训练轮数: {args.epochs}")
    print(f"📈 学习率: {args.lr}")
    print(f"⚖️  权重衰减: {args.weight_decay}")
    print(f"🎨 图像大小: {args.image_size}")
    print(f"📝 文本长度: {args.max_len}")
    print(f"🔧 专家数量: {args.num_experts}")
    print(f"🎯 Top-K: {args.top_k}")
    print(f"💡 特征维度: {args.feature_dim}")
    print("=" * 80)


def main():
    parser = argparse.ArgumentParser(description='多模态MoE分类器训练脚本')
    parser.add_argument('--train_csv', type=str, default='data/train.csv', help='训练数据CSV文件路径')
    parser.add_argument('--val_csv', type=str, default='data/val.csv', help='验证数据CSV文件路径')
    parser.add_argument('--image_root', type=str, default='data/images', help='图像根目录')
    parser.add_argument('--num_classes', type=int, required=True, help='分类类别数量')
    parser.add_argument('--img_backbone', type=str, default='resnet50', help='图像骨干网络')
    parser.add_argument('--text_model', type=str, default='distilbert-base-uncased', help='文本模型')
    parser.add_argument('--feature_dim', type=int, default=512, help='特征维度')
    parser.add_argument('--num_experts', type=int, default=3, help='专家网络数量')
    parser.add_argument('--top_k', type=int, default=2, help='Top-K专家选择')
    parser.add_argument('--train_text_encoder', action='store_true', help='是否训练文本编码器')
    parser.add_argument('--batch_size', type=int, default=32, help='批次大小')
    parser.add_argument('--epochs', type=int, default=20, help='训练轮数')
    parser.add_argument('--lr', type=float, default=3e-4, help='学习率')
    parser.add_argument('--weight_decay', type=float, default=1e-4, help='权重衰减')
    parser.add_argument('--image_size', type=int, default=224, help='图像尺寸')
    parser.add_argument('--max_len', type=int, default=64, help='文本最大长度')
    parser.add_argument('--lb_coeff', type=float, default=0.01, help='负载均衡系数')
    parser.add_argument('--output_dir', type=str, default='checkpoints', help='输出目录')
    parser.add_argument('--gpu_id', type=int, default=None, help='指定GPU ID (如: 0, 1, 2, 3)')
    parser.add_argument('--num_workers', type=int, default=4, help='数据加载器工作进程数')
    # 新增：数据增强与门控/视觉选项
    parser.add_argument('--augment_train', type=str, default='basic', choices=['none','basic','strong'], help='训练集增强策略')
    parser.add_argument('--rand_magnitude', type=int, default=7, help='RandAugment幅度(5-9推荐)')
    parser.add_argument('--jitter', type=float, default=0.3, help='ColorJitter强度')
    parser.add_argument('--visual_pretrained', action='store_true', help='使用预训练视觉骨干')
    parser.add_argument('--use_visual_aux', action='store_true', help='启用视觉辅助分类头')
    parser.add_argument('--visual_aux_weight', type=float, default=0.4, help='视觉辅助损失权重')
    parser.add_argument('--gating_strategy', type=str, default='softmax_topk', choices=['softmax_topk','prob_norm','softmax_temp','power'], help='门控融合权重策略')
    parser.add_argument('--gating_temperature', type=float, default=1.0, help='Softmax温度')
    parser.add_argument('--gating_power_alpha', type=float, default=1.0, help='幂次加权alpha')
    parser.add_argument('--second_prob_threshold', type=float, default=0.0, help='Top-2次优阈值，低于则退化Top-1')
    parser.add_argument('--gate_monitor', action='store_true', help='记录门控概率与Top-K选择率')
    parser.add_argument('--visual_mandatory_epochs', type=int, default=0, help='训练初期视觉必选的轮数(衰减至0)')
    parser.add_argument('--visual_mandatory_init_strength', type=float, default=0.5, help='视觉必选初始权重(0-1)')

    args = parser.parse_args()
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置GPU
    if args.gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
        # 重新初始化CUDA上下文
        if torch.cuda.is_available():
            torch.cuda.init()
            device = torch.device('cuda:0')
        else:
            device = torch.device('cpu')
    else:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 打印系统信息
    print_system_info(device, args)

    # 加载数据
    print("📚 加载数据集...")
    tokenizer = AutoTokenizer.from_pretrained(args.text_model)
    train_ds = DiseaseTextImageDataset(args.train_csv, args.image_root, tokenizer,
                                       image_size=args.image_size, max_len=args.max_len,
                                       is_train=True, augment=args.augment_train,
                                       rand_magnitude=args.rand_magnitude, jitter=args.jitter)
    val_ds = DiseaseTextImageDataset(args.val_csv, args.image_root, tokenizer,
                                     image_size=args.image_size, max_len=args.max_len,
                                     is_train=False, augment='basic')

    print(f"✅ 训练集样本数: {len(train_ds)}")
    print(f"✅ 验证集样本数: {len(val_ds)}")

    train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, 
                            num_workers=args.num_workers, pin_memory=True)
    val_loader = DataLoader(val_ds, batch_size=args.batch_size, shuffle=False, 
                          num_workers=args.num_workers, pin_memory=True)

    # 创建模型
    print("🏗️  构建模型...")
    model = MultiModalMoEClassifier(
        num_classes=args.num_classes,
        img_backbone=args.img_backbone,
        text_model=args.text_model,
        feature_dim=args.feature_dim,
        num_experts=args.num_experts,
        top_k=args.top_k,
        train_text_encoder=args.train_text_encoder,
        visual_pretrained=args.visual_pretrained,
        use_visual_aux=args.use_visual_aux,
        visual_aux_weight=args.visual_aux_weight,
        gating_strategy=args.gating_strategy,
        gating_temperature=args.gating_temperature,
        gating_power_alpha=args.gating_power_alpha,
        second_prob_threshold=args.second_prob_threshold,
    ).to(device)

    # 统计模型参数
    total_params, trainable_params = count_parameters(model)
    print(f"📊 模型总参数: {total_params:,}")
    print(f"📊 可训练参数: {trainable_params:,}")
    print(f"📊 参数大小: {total_params * 4 / 1024**2:.1f} MB")

    # 优化器和学习率调度器
    optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), 
                          lr=args.lr, weight_decay=args.weight_decay)
    
    # 学习率调度器 - 余弦退火
    scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=args.lr * 0.01)
    
    # 初始化日志记录器和可视化器
    experiment_name = f"MoE_cls_{args.num_classes}classes_{datetime.now().strftime('%m%d_%H%M')}"
    log_dir = os.path.join(args.output_dir, "logs")
    
    logger = TrainingLogger(log_dir, experiment_name)
    visualizer = TrainingVisualizer(log_dir, experiment_name)
    
    # 记录训练配置
    config = {
        "实验名称": experiment_name,
        "模型类别数": args.num_classes,
        "图像骨干网络": args.img_backbone,
        "文本模型": args.text_model,
        "特征维度": args.feature_dim,
        "专家网络数量": args.num_experts,
        "Top-K专家": args.top_k,
        "批次大小": args.batch_size,
        "训练轮数": args.epochs,
        "学习率": args.lr,
        "权重衰减": args.weight_decay,
        "图像尺寸": args.image_size,
        "文本最大长度": args.max_len,
        "负载均衡系数": args.lb_coeff,
        "训练集样本数": len(train_ds),
        "验证集样本数": len(val_ds),
        "GPU设备": str(device),
        "模型总参数": total_params,
        "可训练参数": trainable_params
    }
    
    logger.log_config(config)
    logger.log_info("🎯 开始训练...")
    
    best_f1 = 0.0
    start_time = time.time()
    
    try:
        for epoch in range(1, args.epochs + 1):
            epoch_start = time.time()
            
            try:
                # 视觉必选衰减设置
                if args.visual_mandatory_epochs > 0:
                    strength = max(0.0, args.visual_mandatory_init_strength * (1.0 - (epoch-1) / max(1, args.visual_mandatory_epochs)))
                    # 将强度写入模型，forward中使用
                    model.visual_mandatory_strength = float(strength)
                # 训练
                train_loss, train_acc, train_f1 = train_one_epoch(
                    model, train_loader, optimizer, device, epoch, lb_coeff=args.lb_coeff,
                    gate_monitor=args.gate_monitor, gate_report_dir=os.path.join(log_dir, 'gate_logs'))
                
                # 验证
                val_acc, val_f1 = evaluate(model, val_loader, device, epoch)
                
                # 更新学习率
                current_lr = optimizer.param_groups[0]['lr']
                scheduler.step()
                
                epoch_time = time.time() - epoch_start
                
                # 记录训练指标
                logger.log_epoch(
                    epoch=epoch,
                    train_loss=train_loss,
                    train_acc=train_acc,
                    train_f1=train_f1,
                    val_acc=val_acc,
                    val_f1=val_f1,
                    lr=current_lr,
                    epoch_time=epoch_time
                )

                # 保存最佳模型
                if val_f1 > best_f1:
                    best_f1 = val_f1
                    ckpt_path = os.path.join(args.output_dir, f"best_epoch{epoch}_f1{best_f1:.4f}.pt")
                    torch.save({
                        'model': model.state_dict(), 
                        'args': vars(args),
                        'epoch': epoch,
                        'best_f1': best_f1,
                        'optimizer': optimizer.state_dict(),
                        'scheduler': scheduler.state_dict()
                    }, ckpt_path)
                    logger.log_info(f"💾 保存最佳模型: {ckpt_path}")
                
                # 每5个epoch绘制一次训练曲线
                if epoch % 5 == 0 or epoch == args.epochs:
                    metrics = logger.get_metrics()
                    visualizer.plot_training_curves(metrics, save_plots=True)
                    
            except KeyboardInterrupt:
                logger.log_info(f"⚠️  训练在第 {epoch} 轮被用户中断")
                # 保存当前状态
                interrupt_ckpt = os.path.join(args.output_dir, f"interrupted_epoch{epoch}.pt")
                torch.save({
                    'model': model.state_dict(), 
                    'args': vars(args),
                    'epoch': epoch,
                    'best_f1': best_f1,
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict()
                }, interrupt_ckpt)
                logger.log_info(f"💾 已保存中断时的模型: {interrupt_ckpt}")
                
                # 生成中断时的可视化报告
                metrics = logger.get_metrics()
                if metrics:
                    visualizer.plot_training_curves(metrics, save_plots=True)
                    visualizer.create_summary_report(metrics, config, time.time() - start_time)
                raise
                
    except KeyboardInterrupt:
        logger.log_info("🛑 训练被用户中断，正在清理...")
        total_time = time.time() - start_time
        logger.log_info(f"⏱️  已训练时间: {total_time/3600:.2f} 小时")
        logger.log_info(f"🏆 当前最佳验证F1: {best_f1:.4f}")
        return

    # 保存最终模型
    final_ckpt = os.path.join(args.output_dir, "final.pt")
    torch.save({
        'model': model.state_dict(), 
        'args': vars(args),
        'epoch': args.epochs,
        'optimizer': optimizer.state_dict(),
        'scheduler': scheduler.state_dict()
    }, final_ckpt)
    
    total_time = time.time() - start_time
    
    # 生成最终的训练报告和可视化
    metrics = logger.get_metrics()
    visualizer.plot_training_curves(metrics, save_plots=True)
    visualizer.plot_loss_comparison(metrics, save_plots=True)
    visualizer.plot_accuracy_comparison(metrics, save_plots=True)
    visualizer.create_summary_report(metrics, config, total_time)
    
    logger.log_info("=" * 80)
    logger.log_info("🎉 训练完成!")
    logger.log_info(f"⏱️  总训练时间: {total_time/3600:.2f} 小时")
    logger.log_info(f"🏆 最佳验证F1: {best_f1:.4f}")
    logger.log_info(f"💾 最终模型保存至: {final_ckpt}")
    logger.log_info("=" * 80)


if __name__ == '__main__':
    main()