from omnidet.models.detection_decoder import YoloDecoder, YOLOLayer
import torch
from torchvision import transforms
from PIL import Image
import os
import cv2
import numpy as np
from omnidet.models.resnet import ResnetEncoder
import argparse
import json
from pathlib import Path
import torchvision.transforms as T
import yaml
from omnidet.utils import Tupperware
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
# from ..data_loader.woodscape_loader import WoodScapeRawDataset
# from torch.utils.data import DataLoader
from omnidet.train_utils.detection_utils import non_max_suppression
import torch
import torch.onnx
from torch.autograd import Variable

def inputs_to_device(self, inputs):
    for key, ipt in inputs.items():
        inputs[key] = ipt.to(self.device)

def printj(dic):
    return print(json.dumps(dic, indent=4))

def collect_args() -> argparse.Namespace:
    """Set command line arguments"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', help="Config file", type=str, default=Path(__file__).parent / "../data/params.yaml")
    args = parser.parse_args()
    return args

def collect_tupperware() -> Tupperware:
    config = collect_args()
    params = yaml.safe_load(open(config.config))
    args = Tupperware(params)
    printj(args)
    return args

def main():
    args = collect_tupperware()
    args.model_checkpoint_encoder = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/encoder.pth'
    args.model_checkpoint_detection = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/detection.pth'
    feed_width = args.input_width
    feed_height = args.input_height
    # 初始化模型和设备
    device = torch.device("cpu")
    # --- Init Detection model ---
    encoder = ResnetEncoder(num_layers=18, pretrained=False).to(device)  # 假设使用18层的ResNet
    decoder = YoloDecoder(encoder.num_ch_enc, args).to(device)

    checkpoint_encoder = torch.load(args.model_checkpoint_encoder, map_location=device, weights_only=True)
    checkpoint_detection = torch.load(args.model_checkpoint_detection, map_location=device, weights_only=True)

    # 获取当前encoder模型的权重字典
    model_dict_encoder = encoder.state_dict()

    # 自适应键加载：将预训练的权重与当前模型的权重字典匹配
    pretrained_dict_encoder = {k: v for k, v in checkpoint_encoder.items() if k in model_dict_encoder}

    # 加载匹配的预训练权重到encoder
    model_dict_encoder.update(pretrained_dict_encoder)
    encoder.load_state_dict(model_dict_encoder)
    decoder.load_state_dict(checkpoint_detection)

    # 将模型切换到评估模式
    encoder.eval()
    decoder.eval()

    # 合并模型（通常你将encoder和decoder连接为一个模块，可以选择把它们放在nn.Module的子模块中）
    class CombinedModel(torch.nn.Module):
        def __init__(self, encoder, decoder):
            super(CombinedModel, self).__init__()
            self.encoder = encoder
            self.decoder = decoder

        def forward(self, x):
            # img_dim = [feed_width, feed_height]
            encoded = self.encoder(x)
            output = self.decoder(encoded,img_dim=[feed_width, feed_height])
            return output

    # 创建一个包含encoder和decoder的完整模型
    combined_model = CombinedModel(encoder, decoder).to('cpu')

    # 生成一个示例输入（根据你模型的实际输入形状调整）
    dummy_input = torch.randn(1, 3, 288, 544).to('cpu')
    # dummy_input = torch.randn(1, 3, 544, 288).to('cpu')

    # 将模型转换为ONNX格式
    onnx_path = "fishcapture_detection_model.onnx"
    torch.onnx.export(combined_model,
                      dummy_input,
                      onnx_path,
                      export_params=True,
                      opset_version=12,  # 使用合适的ONNX opset版本
                      input_names=["input"],
                      output_names=["output"])

    print(f"ONNX model has been saved to {onnx_path}")


if __name__ == "__main__":
    main()
