import torch
import torch.nn as nn
import os
import logging
from network import get_model
from data.data_loader import get_num_classes


def get_device():
    """获取可用设备"""
    if torch.cuda.is_available():
        device = torch.device('cuda')
        print(f"使用GPU: {torch.cuda.get_device_name()}")
    else:
        device = torch.device('cpu')
        print("使用CPU")
    return device


def initialize_model(config):
    """初始化模型"""
    
    model = get_model(
        model_name=config["model"]["name"],
        n_channels=3,  # RGB图像
        n_classes=get_num_classes(config),
        pretrained=config["model"]["pretrained"]
    )
    
    # 如果有预训练权重路径，加载权重
    if config["model"]["pretrained_path"] and os.path.exists(config["model"]["pretrained_path"]):
        load_pretrained_weights(model, config["model"]["pretrained_path"])
    
    return model


def load_pretrained_weights(model, pretrained_path):
    """加载预训练权重"""
    
    try:
        checkpoint = torch.load(pretrained_path, map_location='cpu')
        
        if 'model_state_dict' in checkpoint:
            model.load_state_dict(checkpoint['model_state_dict'])
        else:
            model.load_state_dict(checkpoint)
        
        print(f"成功加载预训练权重: {pretrained_path}")
    except Exception as e:
        print(f"加载预训练权重失败: {e}")


def save_model(model, optimizer, epoch, loss, save_path, is_best=False):
    """保存模型"""
    
    checkpoint = {
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'loss': loss,
    }
    
    torch.save(checkpoint, save_path)
    
    if is_best:
        best_path = save_path.replace('.pth', '_best.pth')
        torch.save(checkpoint, best_path)
        print(f"保存最佳模型: {best_path}")
    
    print(f"保存模型检查点: {save_path}")


def load_checkpoint(model, optimizer, checkpoint_path):
    """加载检查点进行断点续训"""
    
    if not os.path.exists(checkpoint_path):
        print(f"检查点文件不存在: {checkpoint_path}")
        return 0, float('inf')
    
    try:
        checkpoint = torch.load(checkpoint_path, map_location='cpu')
        
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        
        start_epoch = checkpoint['epoch'] + 1
        best_loss = checkpoint.get('loss', float('inf'))
        
        print(f"成功加载检查点: {checkpoint_path}")
        print(f"从第 {start_epoch} 轮开始训练")
        
        return start_epoch, best_loss
    
    except Exception as e:
        print(f"加载检查点失败: {e}")
        return 0, float('inf')


def load_model_for_inference(model_path, config):
    """为推理加载模型"""
    
    model = initialize_model(config)
    device = get_device()
    
    try:
        checkpoint = torch.load(model_path, map_location=device)
        
        if 'model_state_dict' in checkpoint:
            model.load_state_dict(checkpoint['model_state_dict'])
        else:
            model.load_state_dict(checkpoint)
        
        model.to(device)
        model.eval()
        
        print(f"成功加载推理模型: {model_path}")
        return model, device
    
    except Exception as e:
        print(f"加载推理模型失败: {e}")
        return None, device


def count_parameters(model):
    """计算模型参数量"""
    
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    print(f"总参数量: {total_params:,}")
    print(f"可训练参数量: {trainable_params:,}")
    
    return total_params, trainable_params


def get_optimizer(model, config):
    """获取优化器"""
    
    optimizer_name = config["training"]["optimizer"].lower()
    learning_rate = config["training"]["learning_rate"]
    
    if optimizer_name == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    elif optimizer_name == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
    elif optimizer_name == 'adamw':
        optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
    else:
        raise ValueError(f"不支持的优化器: {optimizer_name}")
    
    return optimizer


def get_loss_function(config):
    """获取损失函数"""
    
    loss_name = config["training"]["loss_function"].lower()
    
    if loss_name == 'crossentropyloss':
        criterion = nn.CrossEntropyLoss()
    elif loss_name == 'bceloss':
        criterion = nn.BCELoss()
    elif loss_name == 'bcewithlogitsloss':
        criterion = nn.BCEWithLogitsLoss()
    elif loss_name == 'diceloss':
        criterion = DiceLoss()
    elif loss_name == 'crossentropydiceloss':
        criterion = CrossEntropyDiceLoss()
    else:
        raise ValueError(f"不支持的损失函数: {loss_name}")
    
    return criterion


class DiceLoss(nn.Module):
    """Dice损失函数"""
    
    def __init__(self, smooth=1e-6):
        super(DiceLoss, self).__init__()
        self.smooth = smooth
    
    def forward(self, pred, target):
        # 将预测结果转换为概率
        pred = torch.softmax(pred, dim=1)
        
        # 将目标转换为one-hot编码
        target_one_hot = torch.zeros_like(pred)
        target_one_hot.scatter_(1, target.unsqueeze(1), 1)
        
        # 计算Dice系数
        intersection = (pred * target_one_hot).sum(dim=(2, 3))
        union = pred.sum(dim=(2, 3)) + target_one_hot.sum(dim=(2, 3))
        
        dice = (2. * intersection + self.smooth) / (union + self.smooth)
        dice_loss = 1 - dice.mean()
        
        return dice_loss


class CrossEntropyDiceLoss(nn.Module):
    """交叉熵与Dice损失的组合"""
    
    def __init__(self, ce_weight=1.0, dice_weight=1.0, smooth=1e-6):
        super(CrossEntropyDiceLoss, self).__init__()
        self.ce_weight = ce_weight
        self.dice_weight = dice_weight
        self.ce_loss = nn.CrossEntropyLoss()
        self.dice_loss = DiceLoss(smooth=smooth)
    
    def forward(self, pred, target):
        ce_loss = self.ce_loss(pred, target)
        dice_loss = self.dice_loss(pred, target)
        
        total_loss = self.ce_weight * ce_loss + self.dice_weight * dice_loss
        return total_loss


def setup_logging(log_dir):
    """设置日志"""
    
    os.makedirs(log_dir, exist_ok=True)
    log_file = os.path.join(log_dir, 'training.log')
    
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_file),
            logging.StreamHandler()
        ]
    )
    
    return logging.getLogger(__name__)


def create_model_save_dirs(config):
    """创建模型保存目录"""
    
    save_dir = config["training"]["save_dir"]
    log_dir = config["training"]["log_dir"]
    
    os.makedirs(save_dir, exist_ok=True)
    os.makedirs(log_dir, exist_ok=True)
    
    return save_dir, log_dir


if __name__ == "__main__":
    # 测试模型工具函数
    import yaml
    
    # 创建测试配置
    test_config = {
        "model": {
            "name": "unet",
            "pretrained": False,
            "pretrained_path": ""
        },
        "data": {
            "num_classes": 2
        },
        "training": {
            "optimizer": "Adam",
            "learning_rate": 0.001,
            "loss_function": "CrossEntropyDiceLoss",
            "save_dir": "./test_models",
            "log_dir": "./test_logs"
        }
    }
    
    print("--- 测试模型工具函数 ---")
    
    # 测试设备检测
    device = get_device()
    
    # 测试模型初始化
    model = initialize_model(test_config)
    print(f"模型类型: {type(model).__name__}")
    
    # 测试参数计算
    count_parameters(model)
    
    # 测试优化器
    optimizer = get_optimizer(model, test_config)
    print(f"优化器类型: {type(optimizer).__name__}")
    
    # 测试损失函数
    criterion = get_loss_function(test_config)
    print(f"损失函数类型: {type(criterion).__name__}")
    
    # 测试目录创建
    save_dir, log_dir = create_model_save_dirs(test_config)
    print(f"保存目录: {save_dir}")
    print(f"日志目录: {log_dir}")
    
    print("模型工具函数测试完成！")

