import os
import argparse
import torch
from net import create_lightweight_pretrained_model

def load_model(model_path, device):
    """Load the trained model from checkpoint."""
    print(f"Loading model from {model_path}...")
    
    # Create model with same parameters as training
    num_classes = 7
    model = create_lightweight_pretrained_model(num_classes=num_classes, width_mult=0.5, use_pretrained=False)
    
    # Load the saved state dict
    checkpoint = torch.load(model_path, map_location=device)
    model.load_state_dict(checkpoint['model_state_dict'])
    
    # Set to evaluation mode
    model.eval()
    model.to(device)
    
    print(f"Model loaded successfully!")
    return model

def export_onnx(model, save_path, input_shape=(1, 3, 112, 112), device="cpu"):
    """Export PyTorch model to ONNX format."""
    # Create dummy input tensor
    dummy_input = torch.randn(input_shape, device=device)
    
    # Export the model
    print(f"Exporting model to ONNX format...")
    torch.onnx.export(
        model,                     # PyTorch model
        dummy_input,               # Input tensor
        save_path,                 # Output file path
        export_params=True,        # Store the trained weights
        opset_version=12,          # ONNX version
        do_constant_folding=True,  # Optimization
        input_names=['input'],     # Input tensor name
        output_names=['output'],   # Output tensor name
        dynamic_axes={
            'input': {0: 'batch_size'},  # Variable batch size
            'output': {0: 'batch_size'}
        }
    )
    print(f"ONNX model exported to: {save_path}")

def main():
    parser = argparse.ArgumentParser(description="Export PyTorch model to ONNX format")
    parser.add_argument('--model', type=str, required=True, help='Path to the .pth model file')
    parser.add_argument('--input_shape', type=str, default='1,3,112,112', 
                        help='Input shape in format batch,channels,height,width (default: 1,3,112,112)')
    
    args = parser.parse_args()
    
    # Parse input shape
    input_shape = tuple(map(int, args.input_shape.split(',')))
    
    # Set device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    
    # Determine output path (same directory as input with .onnx extension)
    model_dir = os.path.dirname(args.model)
    model_name = os.path.splitext(os.path.basename(args.model))[0]
    onnx_path = os.path.join(model_dir, f"{model_name}.onnx")
    
    # Load model
    model = load_model(args.model, device)
    
    # Export to ONNX
    export_onnx(model, onnx_path, input_shape, device)
    
    print("Export completed successfully!")
    print(f"Input shape: {input_shape}")
    print(f"Model saved to: {onnx_path}")

if __name__ == "__main__":
    main()
