import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import _LRScheduler
import os
import numpy as np
from typing import Tuple, Dict, Any, Optional, List
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP

from src.config_loader import ConfigLoader
from src.logger import Logger
from src.model_registry import ModelRegistry  # 新增: 模型注册表


class ModelManager:
    """模型管理组件，负责模型训练、保存和加载，支持分布式训练"""

    def __init__(self, config: ConfigLoader, data_processor, logger):
        self.config = config
        self.data_processor = data_processor
        self.logger = logger
        # 先初始化分布式训练标识
        self.is_distributed = config.get('training.distributed', False)
        # 再获取设备（此时is_distributed已定义）
        self.device = self._get_device()
        self.model = None
        self.ddp_model = None  # 分布式模型
        self.optimizer = None
        self.scheduler = None
        self.criterion = None
        self.best_score = 0.0

        # 多任务支持
        self.tasks = config.get('data.tasks', ["price_range"])
        self.task_output_dims = {}

        # 训练历史记录
        self.history = {
            'train_loss': [], 'train_acc': [],
            'val_loss': [], 'val_acc': [], 'val_auc': []
        }

        # 初始化分布式环境
        if self.is_distributed:
            self._init_distributed()

    def _init_distributed(self) -> None:
        """初始化分布式训练环境"""
        try:
            dist.init_process_group(backend='nccl')
            self.rank = dist.get_rank()
            self.world_size = dist.get_world_size()
            self.logger.info(f"分布式训练初始化完成 - 进程: {self.rank}/{self.world_size}")
        except Exception as e:
            self.logger.error(f"分布式训练初始化失败: {str(e)}")
            raise

    def _get_device(self) -> torch.device:
        """自动选择计算设备"""
        device_config = self.config.get('system.device', 'auto')
        if device_config == 'auto':
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        else:
            device = torch.device(device_config)

        # 分布式环境下设置设备
        if self.is_distributed and device.type == 'cuda':
            local_rank = int(os.environ.get('LOCAL_RANK', 0))
            device = torch.device(f'cuda:{local_rank}')
            torch.cuda.set_device(device)

        self.logger.info(f"使用计算设备: {device}")
        return device

    def build_model(self, input_dim: int, output_dims: Dict[str, int]) -> None:
        """构建模型架构，支持多模型类型和多任务"""
        model_type = self.config.get('model.architecture.type', 'price_mlp')
        hidden_dims = self.config.get('model.architecture.hidden_dims', [256, 192, 128, 64, 48, 32])
        dropout_rate = self.config.get('model.architecture.dropout_rate', 0.25)
        attention = self.config.get('model.architecture.attention', True)

        # 保存任务输出维度
        self.task_output_dims = output_dims

        # 从注册表获取模型类
        model_class = ModelRegistry.get_model(model_type)

        # 根据模型类型和任务数量初始化模型
        if model_type == 'multi_task_model':
            self.model = model_class(
                input_dim=input_dim,
                output_dims=output_dims,
                hidden_dims=hidden_dims,
                dropout_rate=dropout_rate,
                attention=attention
            ).to(self.device)
        else:
            # 单任务模型，使用第一个任务的输出维度
            primary_task = next(iter(output_dims.keys()))
            self.model = model_class(
                input_dim=input_dim,
                output_dim=output_dims[primary_task],
                hidden_dims=hidden_dims,
                dropout_rate=dropout_rate,
                attention=attention
            ).to(self.device)

        # 分布式包装
        if self.is_distributed:
            self.ddp_model = DDP(self.model, device_ids=[self.device])

        self.logger.info(
            f"模型构建完成 - 类型: {model_type}, 输入维度: {input_dim}, "
            f"输出维度: {output_dims}, 分布式: {self.is_distributed}"
        )

    def _get_optimizer(self) -> optim.Optimizer:
        """获取优化器"""
        opt_type = self.config.get('training.optimizer.type', 'AdamW')
        opt_params = self.config.get('training.optimizer.params', {})
        self.logger.info(f"优化器参数: {opt_params}")

        optimizer_cls = getattr(optim, opt_type, None)
        if not optimizer_cls:
            raise ValueError(f"不支持的优化器类型: {opt_type}")

        # 分布式环境下使用ddp_model的参数
        model = self.ddp_model if self.is_distributed else self.model
        return optimizer_cls(model.parameters(), **opt_params)

    def _get_scheduler(self) -> Optional[_LRScheduler]:
        """获取学习率调度器"""
        sched_type = self.config.get('training.scheduler.type')
        if not sched_type:
            return None

        sched_params = self.config.get('training.scheduler.params', {})
        scheduler_cls = getattr(optim.lr_scheduler, sched_type, None)
        if not scheduler_cls:
            self.logger.warning(f"不支持的调度器类型: {sched_type}，将不使用调度器")
            return None

        return scheduler_cls(self.optimizer, **sched_params)

    def _get_criterion(self, class_weights: Optional[Dict[str, np.ndarray]] = None) -> Dict[str, nn.Module]:
        """获取损失函数，支持多任务"""
        criterions = {}

        # 单任务情况处理
        if not class_weights or isinstance(class_weights, np.ndarray):
            class_weights = {self.tasks[0]: class_weights}

        for task in self.tasks:
            weights = class_weights.get(task)
            if weights is not None:
                weights_tensor = torch.FloatTensor(weights).to(self.device)
                criterions[task] = nn.CrossEntropyLoss(weight=weights_tensor)
            else:
                criterions[task] = nn.CrossEntropyLoss()

        return criterions

    def train(self, train_set: TensorDataset, val_set: TensorDataset,
              class_weights: Optional[Dict[str, np.ndarray]] = None,
              train_sampler: Optional[Any] = None) -> Tuple[float, float]:
        """训练模型，支持分布式训练和多任务"""
        # 初始化训练组件
        self.optimizer = self._get_optimizer()
        self.scheduler = self._get_scheduler()
        self.criterions = self._get_criterion(class_weights)

        # 数据加载器
        batch_size = self.config.get('training.batch_size', 64)
        train_loader = DataLoader(
            train_set,
            batch_size=batch_size,
            shuffle=(train_sampler is None),
            sampler=train_sampler
        )
        val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

        # 训练参数
        epochs = self.config.get('training.epochs', 200)
        early_stopping = self.config.get('training.early_stopping.enable', True)
        patience = self.config.get('training.early_stopping.patience', 25)
        counter = 0

        # 选择使用的模型（分布式或非分布式）
        model = self.ddp_model if self.is_distributed else self.model

        model.train()
        for epoch in range(1, epochs + 1):
            # 分布式训练时设置epoch
            if train_sampler:
                train_sampler.set_epoch(epoch)

            # 训练阶段
            train_metrics = self._train_epoch(train_loader, model)
            self.history['train_loss'].append(train_metrics['loss'])
            self.history['train_acc'].append(train_metrics['acc'])

            # 验证阶段
            val_metrics = self._validate(val_loader, model)
            self.history['val_loss'].append(val_metrics['loss'])
            self.history['val_acc'].append(val_metrics['acc'])
            self.history['val_auc'].append(val_metrics['auc'])

            # 主进程打印日志
            if not self.is_distributed or self.rank == 0:
                self.logger.info(
                    f"Epoch {epoch}/{epochs} - "
                    f"训练损失: {train_metrics['loss']:.4f}, 训练准确率: {train_metrics['acc']:.4f} - "
                    f"验证损失: {val_metrics['loss']:.4f}, 验证准确率: {val_metrics['acc']:.4f}, 验证AUC: {val_metrics['auc']:.4f}"
                )

                # 计算综合评分
                current_score = self._calculate_score(val_metrics)

                # 保存最佳模型（仅主进程）
                if current_score > self.best_score:
                    self.best_score = current_score
                    self.save_model()
                    counter = 0
                elif early_stopping:
                    counter += 1
                    if counter >= patience:
                        self.logger.info(f"早停触发于第 {epoch} 轮")
                        break

            # 更新学习率调度器
            if self.scheduler:
                self.scheduler.step()

        # 清理分布式环境
        if self.is_distributed:
            dist.destroy_process_group()

        return self.best_score, self.history['val_auc'][np.argmax(self.history['val_acc'])]

    def _train_epoch(self, dataloader: DataLoader, model: nn.Module) -> Dict[str, float]:
        """训练单轮"""
        model.train()
        total_loss = 0.0
        correct = 0
        total = 0

        for x, y in dataloader:
            x, y = x.to(self.device), y.to(self.device)

            # 数据增强
            # x = self.data_processor.data_augmentation(x)

            # 前向传播
            self.optimizer.zero_grad()

            # 处理多任务输出
            outputs = model(x)
            if isinstance(outputs, dict):
                # 多任务训练，计算总损失
                loss = 0.0
                for task, out in outputs.items():
                    # 假设y包含所有任务的标签，这里简化处理
                    task_loss = self.criterions[task](out, y)
                    loss += task_loss
                loss = loss / len(outputs)  # 平均各任务损失
                # 使用主要任务计算准确率
                primary_task = next(iter(outputs.keys()))
                _, predicted = torch.max(outputs[primary_task].data, 1)
            else:
                # 单任务训练
                loss = self.criterions[self.tasks[0]](outputs, y)
                _, predicted = torch.max(outputs.data, 1)

            # 反向传播
            loss.backward()
            self.optimizer.step()

            # 统计
            total_loss += loss.item() * x.size(0)
            total += y.size(0)
            correct += (predicted == y).sum().item()

        return {
            'loss': total_loss / total,
            'acc': correct / total
        }

    def _validate(self, dataloader: DataLoader, model: nn.Module) -> Dict[str, float]:
        """验证模型"""
        model.eval()
        total_loss = 0.0
        correct = 0
        total = 0
        all_probs = []
        all_labels = []

        with torch.no_grad():
            for x, y in dataloader:
                x, y = x.to(self.device), y.to(self.device)
                outputs = model(x)

                # 处理多任务输出
                if isinstance(outputs, dict):
                    # 多任务验证
                    loss = 0.0
                    for task, out in outputs.items():
                        task_loss = self.criterions[task](out, y)
                        loss += task_loss
                    loss = loss / len(outputs)

                    # 使用主要任务计算指标
                    primary_task = next(iter(outputs.keys()))
                    logits = outputs[primary_task]
                    probs = torch.softmax(logits, dim=1).cpu().numpy()
                else:
                    # 单任务验证
                    logits = outputs
                    loss = self.criterions[self.tasks[0]](logits, y)
                    probs = torch.softmax(logits, dim=1).cpu().numpy()

                # 统计
                total_loss += loss.item() * x.size(0)
                _, predicted = torch.max(logits.data, 1)
                total += y.size(0)
                correct += (predicted == y).sum().item()

                # 收集概率和标签用于AUC计算
                all_probs.extend(probs)
                all_labels.extend(y.cpu().numpy())

        # 计算AUC
        from sklearn.metrics import roc_auc_score
        auc = roc_auc_score(
            all_labels,
            all_probs,
            multi_class='ovr',
            average='macro'
        )

        return {
            'loss': total_loss / total,
            'acc': correct / total,
            'auc': auc
        }

    def _calculate_score(self, val_metrics: Dict[str, float]) -> float:
        """计算综合评分"""
        # 可以根据配置动态调整评分公式
        return (0.4 * val_metrics['acc'] +
                0.2 * val_metrics['auc'] +
                0.2 * self._get_class_f1(val_metrics, 1) +
                0.2 * self._get_class_f1(val_metrics, 2))

    def _get_class_f1(self, val_metrics: Dict[str, float], class_id: int) -> float:
        """获取特定类别的F1分数"""
        return val_metrics.get(f'f1_{class_id}', 0.5)  # 默认值用于示例

    def save_model(self) -> None:
        """保存模型权重，仅主进程执行"""
        if self.is_distributed and self.rank != 0:
            return

        model_name = self.config.get('model.name', 'model')
        model_path = os.path.join(
            self.config.get('system.artifacts.model_dir'),
            f'{model_name}_best.pt'
        )

        # 保存原始模型（非ddp包装的）
        torch.save(self.model.state_dict(), model_path)
        self.logger.info(f"最佳模型已保存至: {model_path}")

    def load_model(self, input_dim: int, output_dims: Dict[str, int]) -> None:
        """加载模型权重"""
        self.build_model(input_dim, output_dims)
        model_name = self.config.get('model.name', 'model')
        model_path = os.path.join(
            self.config.get('system.artifacts.model_dir'),
            f'{model_name}_best.pt'
        )

        if os.path.exists(model_path):
            self.model.load_state_dict(torch.load(model_path, map_location=self.device))
            self.logger.info(f"模型已从 {model_path} 加载")
        else:
            raise FileNotFoundError(f"模型文件不存在: {model_path}")