import torch
import torch.onnx
import argparse
import os
import yaml
import importlib
from pathlib import Path
from typing import Dict, Any, Tuple, Optional

def load_experiment_config(experiment_dir: str) -> Dict[str, Any]:
    """从实验目录加载配置文件"""
    config_path = Path(experiment_dir) / "experiment_config.yaml"
    if not config_path.exists():
        print(f"Warning: Config file not found: {config_path.absolute()}")
        return {}
    
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    return config

def create_model(model_type: str, model_config: Dict[str, Any]) -> torch.nn.Module:
    """根据模型类型创建模型"""
    model_type = model_type.lower()
    
    # 设置默认参数
    in_channels = model_config.get('in_channels', 3)
    up_mode = model_config.get('up_mode', 'upsample')
    merge_mode = model_config.get('merge_mode', 'concat')
    enable_segmentation = model_config.get('enable_segmentation', False)
    seg_use_multiscale = model_config.get('seg_use_multiscale', False)
    seg_use_dropout = model_config.get('seg_use_dropout', False)
    seg_dropout_rate = model_config.get('seg_dropout_rate', 0.1)
    encoder_mode = model_config.get('encoder_mode', 'patch')
    enable_cam = model_config.get('enable_cam', False)
    
    if model_type == 'myunet':
        from model.myunet import HighResolutionModel
        model = HighResolutionModel()
        print("Using HighResolutionModel (Enhanced U-Net)")
        
    elif model_type in ('aynet', 'model_aynet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat'):
        model = _create_dynamic_model(model_type, {
            'in_channels': in_channels,
            'up_mode': up_mode,
            'merge_mode': merge_mode,
            'enable_segmentation': enable_segmentation,
            'encoder_mode': encoder_mode,
            'enable_cam': enable_cam,
            'seg_use_multiscale': seg_use_multiscale,
            'seg_use_dropout': seg_use_dropout,
            'seg_dropout_rate': seg_dropout_rate,
        })
        
    else:
        supported_types = ['aynet', 'model_aynet', 'myunet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat']
        raise ValueError(f"Unsupported model type: {model_type}. Supported types: {supported_types}")
    
    return model

def _create_dynamic_model(model_type: str, model_kwargs: Dict[str, Any]) -> torch.nn.Module:
    """创建动态导入的模型"""
    try:
        module_name = f"model.{model_type}"
        model_module = importlib.import_module(module_name)
        
        if 'aynet' in model_type:
            ModelClass = getattr(model_module, 'AYNet')
        else:
            ModelClass = getattr(model_module, 'YNet')
        
        import inspect
        model_signature = inspect.signature(ModelClass.__init__)
        model_params = set(model_signature.parameters.keys())
        
        filtered_kwargs = {k: v for k, v in model_kwargs.items() if k in model_params}
        
        model = ModelClass(**filtered_kwargs)
        
        param_info = ", ".join([f"{k}={v}" for k, v in filtered_kwargs.items() if k != 'in_channels'])
        print(f"Using {ModelClass.__name__} from {module_name} with {param_info}")
        
        return model
        
    except ImportError as e:
        raise ImportError(f"Failed to import model {model_type}: {e}")
    except AttributeError as e:
        raise AttributeError(f"Model class not found in {model_type}: {e}")

def load_model_from_checkpoint(model: torch.nn.Module, checkpoint_path: str, device: torch.device) -> torch.nn.Module:
    """从检查点加载模型权重"""
    checkpoint = torch.load(checkpoint_path, map_location=device)
    
    if 'state_dict' not in checkpoint:
        raise KeyError(f"Checkpoint missing 'state_dict' key. Available keys: {list(checkpoint.keys())}")
    
    state_dict = {}
    for k, v in checkpoint['state_dict'].items():
        if k.startswith('model.'):
            state_dict[k[6:]] = v
    
    if not state_dict:
        raise ValueError("No model parameters found in checkpoint with 'model.' prefix")
    
    model_keys = set(model.state_dict().keys())
    filtered_state_dict = {k: v for k, v in state_dict.items() if k in model_keys}
    
    unexpected_keys = set(state_dict.keys()) - model_keys
    missing_keys = model_keys - set(filtered_state_dict.keys())
    
    if unexpected_keys:
        print(f"Warning: Found {len(unexpected_keys)} unexpected keys in checkpoint (will be ignored)")
    
    if missing_keys:
        print(f"Warning: Found {len(missing_keys)} missing keys in model")
    
    try:
        model.load_state_dict(filtered_state_dict, strict=True)
        print("Model loaded successfully")
    except RuntimeError as e:
        print(f"Warning: Loading with strict=False due to: {e}")
        model.load_state_dict(filtered_state_dict, strict=False)
    
    return model

def export_model_to_onnx(model_path, output_path=None, input_shapes=None, experiment_dir=None, model_type=None):
    """
    Export PyTorch model to ONNX format
    
    Args:
        model_path (str): Path to the PyTorch model file (.pth or .pt) or checkpoint
        output_path (str): Output path for ONNX file (optional)
        input_shapes (dict): Dictionary of input tensor shapes
        experiment_dir (str): Experiment directory path for loading config
        model_type (str): Model type if not using experiment config
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    try:
        # Load configuration if experiment directory is provided
        config = {}
        if experiment_dir:
            config = load_experiment_config(experiment_dir)
        
        # Determine model type and configuration
        model_config = config.get('model', {})
        if not model_type:
            model_type = model_config.get('model_type', 'myunet')
        
        print(f"Loading model type: {model_type}")
        
        # Create model
        model = create_model(model_type, model_config)
        model.to(device)
        model.eval()
        
        # Load checkpoint weights
        model = load_model_from_checkpoint(model, model_path, device)
        
        # Set default input shapes if not provided
        if input_shapes is None:
            input_shapes = {
                'sensor_input': (1, 1, 2560, 64),
                'image_input': (1, 1, 256, 128)
            }
    
        # Create dummy inputs
        dummy_inputs = []
        input_names = []
        dynamic_axes = {}
        
        for name, shape in input_shapes.items():
            dummy_input = torch.randn(shape).to(device)
            dummy_inputs.append(dummy_input)
            input_names.append(name)
            dynamic_axes[name] = {0: 'batch_size'}
        
        # Set output path
        if output_path is None:
            model_name = Path(model_path).stem
            output_path = f"{model_name}_{model_type}.onnx"
        
        # Determine output names based on model capabilities
        enable_segmentation = model_config.get('enable_segmentation', False)
        if enable_segmentation:
            output_names = ['main_output', 'segmentation_output']
            dynamic_axes.update({
                'main_output': {0: 'batch_size'},
                'segmentation_output': {0: 'batch_size'}
            })
        else:
            output_names = ['output']
            dynamic_axes['output'] = {0: 'batch_size'}
        
        # Export to ONNX
        torch.onnx.export(
            model,
            tuple(dummy_inputs) if len(dummy_inputs) > 1 else dummy_inputs[0],
            output_path,
            export_params=True,
            opset_version=11,
            do_constant_folding=True,
            input_names=input_names,
            output_names=output_names,
            dynamic_axes=dynamic_axes,
            verbose=True
        )
        
        print(f"Model successfully exported to: {output_path}")
        print(f"Input shapes: {input_shapes}")
        print(f"You can now view the network structure in Netron by opening: {output_path}")
        
    except Exception as e:
        print(f"Error exporting model: {str(e)}")
        import traceback
        traceback.print_exc()

def main():
    parser = argparse.ArgumentParser(description='Export PyTorch model to ONNX format')
    parser.add_argument('model_path', type=str, help='Path to PyTorch model checkpoint file')
    parser.add_argument('--output', '-o', type=str, default=None, 
                       help='Output ONNX file path (default: model_name_modeltype.onnx)')
    parser.add_argument('--experiment_dir', type=str, default=None,
                       help='Experiment directory path to load config from')
    parser.add_argument('--model_type', type=str, default=None,
                       choices=['aynet', 'model_aynet', 'myunet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat'],
                       help='Model type (required if not using experiment_dir)')
    parser.add_argument('--sensor_shape', type=str, default='1,1,2560,64',
                       help='Sensor input shape as comma-separated values (default: 1,1,2560,64)')
    parser.add_argument('--image_shape', type=str, default='1,1,256,128',
                       help='Image input shape as comma-separated values (default: 1,1,256,128)')
    
    args = parser.parse_args()
    
    # Check if model file exists
    if not os.path.exists(args.model_path):
        print(f"Error: Model file not found: {args.model_path}")
        return
    
    # Parse input shapes
    sensor_shape = tuple(map(int, args.sensor_shape.split(',')))
    image_shape = tuple(map(int, args.image_shape.split(',')))
    
    input_shapes = {
        'sensor_input': sensor_shape,
        'image_input': image_shape
    }
    
    # Export model
    export_model_to_onnx(
        args.model_path, 
        args.output, 
        input_shapes,
        args.experiment_dir,
        args.model_type
    )

if __name__ == "__main__":
    main()
