import torch
import torch.nn as nn
import torch.distributed as dist
from model.loss import *  # Import all loss functions
from model.training_strategy.optimizer import get_optimizer
from model.training_strategy.scheduler import get_scheduler
from dataset.augmentation.classification_preset_train import ClassificationPresetTrain
from dataset.augmentation.classification_preset_eval import ClassificationPresetEval
from train import Trainer
import yaml
from dataset.dataset_dataloader import create_dataloader
import datetime
import os
import logging
import math

# Function to load augmentation strategies from YAML file
def load_augmentation_config(file_path):
    with open(file_path, 'r') as file:
        return yaml.safe_load(file)

# Function to dynamically import a class from a string name
def get_class_by_name(module_name, class_name):
    module = __import__(module_name, fromlist=[class_name])
    return getattr(module, class_name)

# Function to dynamically import a model class from a string name
def get_model_class_by_name(module_base, class_name):
    module_name = f'{module_base}.{class_name}'
    module = __import__(module_name, fromlist=[class_name])
    return getattr(module, class_name)

# Function to initialize model weights
def initialize_weights(model, init_type='xavier_uniform'):
    """
    初始化模型权重
    
    Args:
        model: 要初始化的模型
        init_type: 初始化类型 ('xavier_uniform', 'xavier_normal', 'kaiming_uniform', 'kaiming_normal', 'normal', 'uniform')
    """
    def init_func(m):
        classname = m.__class__.__name__
        
        if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
            if init_type == 'xavier_uniform':
                nn.init.xavier_uniform_(m.weight.data)
            elif init_type == 'xavier_normal':
                nn.init.xavier_normal_(m.weight.data)
            elif init_type == 'kaiming_uniform':
                nn.init.kaiming_uniform_(m.weight.data, mode='fan_out', nonlinearity='relu')
            elif init_type == 'kaiming_normal':
                nn.init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity='relu')
            elif init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, 0.01)  # 更小的标准差
            elif init_type == 'uniform':
                nn.init.uniform_(m.weight.data, -0.05, 0.05)  # 更小的范围
            else:
                raise NotImplementedError(f'Initialization method {init_type} is not implemented')
            
            # 初始化偏置
            if hasattr(m, 'bias') and m.bias is not None:
                nn.init.constant_(m.bias.data, 0.0)
        
        # 批归一化层初始化
        elif classname.find('BatchNorm') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0.0)
        
        # LayerNorm初始化 (用于Transformer)
        elif classname.find('LayerNorm') != -1:
            nn.init.constant_(m.weight.data, 1.0)
            nn.init.constant_(m.bias.data, 0.0)
    
    print(f"Initializing model weights with {init_type} method...")
    logging.info(f"Initializing model weights with {init_type} method...")
    model.apply(init_func)

# Function to setup and start training
def setup_and_train(args):

    # Load augmentation strategies
    augmentation_config = load_augmentation_config(args.augmentation_config)

    # Create DataLoader
    train_loader, val_loader = create_dataloader(
        args,
        batch_size=args.batch_size,
        augmentation_config_path=args.augmentation_config
    )

    # Select model
    model_class = get_model_class_by_name('model.backbone', args.model_name)
    model = model_class(num_classes=args.num_classes)  # Adjust output layer based on args

    # 权重加载和初始化
    weights_loaded = False
    
    # Check if pre-trained weights path is provided
    if args.pre_trained_weights_path is not None and args.pre_trained_weights_path != "None":
        try:
            # Load pre-trained weights while ignoring the last fully connected layer
            state_dict = torch.load(args.pre_trained_weights_path)
            
            # Remove the last layer weights based on model type
            if 'fc.weight' in state_dict:
                state_dict.pop('fc.weight', None)
                state_dict.pop('fc.bias', None)
            elif 'head.weight' in state_dict:  # For SwinTransformer
                state_dict.pop('head.weight', None)
                state_dict.pop('head.bias', None)
            
            model.load_state_dict(state_dict, strict=False)
            print(f"Loaded pre-trained weights from {args.pre_trained_weights_path}, ignoring the last layer.")
            logging.info(f"Loaded pre-trained weights from {args.pre_trained_weights_path}, ignoring the last layer.")
            weights_loaded = True
        except Exception as e:
            print(f"Error loading pre-trained weights from {args.pre_trained_weights_path}: {str(e)}")
            logging.error(f"Error loading pre-trained weights from {args.pre_trained_weights_path}: {str(e)}")
    elif args.load_previous_weight_path is not None and args.load_previous_weight_path != "None":
        try:
            # Load previous training weights with flexible matching
            state_dict = torch.load(args.load_previous_weight_path, map_location='cpu')
            
            # 处理可能的DDP前缀 (module.)
            if any(key.startswith('module.') for key in state_dict.keys()):
                print("🔧 检测到DDP权重格式，移除 'module.' 前缀...")
                new_state_dict = {}
                for key, value in state_dict.items():
                    if key.startswith('module.'):
                        new_key = key[7:]  # 移除 'module.' 前缀
                        new_state_dict[new_key] = value
                    else:
                        new_state_dict[key] = value
                state_dict = new_state_dict
                print(f"✅ 已移除DDP前缀，权重键数量: {len(state_dict)}")
            
            # 获取当前模型的状态字典
            model_state_dict = model.state_dict()
            
            # 过滤掉不匹配的权重
            filtered_state_dict = {}
            missing_keys = []
            unexpected_keys = []
            
            for key, value in state_dict.items():
                if key in model_state_dict:
                    if model_state_dict[key].shape == value.shape:
                        filtered_state_dict[key] = value
                    else:
                        print(f"⚠️ 形状不匹配，跳过: {key} - 权重: {value.shape}, 模型: {model_state_dict[key].shape}")
                        missing_keys.append(key)
                else:
                    unexpected_keys.append(key)
            
            # 找出模型中缺失的键
            for key in model_state_dict.keys():
                if key not in state_dict:
                    missing_keys.append(key)
            
            # 加载过滤后的权重
            model.load_state_dict(filtered_state_dict, strict=False)
            
            print(f"✅ 成功加载权重: {len(filtered_state_dict)}/{len(model_state_dict)} 个参数")
            if missing_keys:
                print(f"⚠️ 缺失的键: {len(missing_keys)} 个")
            if unexpected_keys:
                print(f"⚠️ 多余的键: {len(unexpected_keys)} 个")
            
            print(f"Loaded previous training weights from {args.load_previous_weight_path}.")
            logging.info(f"Loaded previous training weights from {args.load_previous_weight_path}.")
            weights_loaded = True
        except Exception as e:
            print(f"Error loading previous training weights from {args.load_previous_weight_path}: {str(e)}")
            logging.error(f"Error loading previous training weights from {args.load_previous_weight_path}: {str(e)}")
    
    # 如果没有加载任何权重，进行权重初始化
    if not weights_loaded:
        # 获取初始化方法，默认使用kaiming_normal（适合ReLU激活函数）
        init_type = getattr(args, 'weight_init_type', 'kaiming_normal')
        initialize_weights(model, init_type)

    # Move model to GPU
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)

    # Select loss function
    criterion_class = get_class_by_name('model.loss', args.loss_function)
    criterion = criterion_class(config=args)  # Pass args as config

    # Select optimizer
    optimizer = get_optimizer(model, args)

    # Select scheduler
    scheduler = get_scheduler(optimizer, args)


    # Set log directory with dataset name, model name, and timestamp
    current_time = datetime.datetime.now().strftime("%m%d%H%M")
    dataset_name = os.path.basename(args.data_dir).split('_')[0]  # e.g., web400
    log_dir = os.path.join(args.log_dir, f"{dataset_name}_{args.model_name}_{current_time}")
    os.makedirs(log_dir, exist_ok=True)
    
    # Update args.log_dir to point to the new subdirectory
    args.log_dir = log_dir

    # Initialize trainer and start training
    trainer = Trainer(args, criterion=criterion, optimizer=optimizer, scheduler=scheduler, model=model, train_loader=train_loader, val_loader=val_loader, log_dir=log_dir)
    trainer.train()

