import os
import sys
import torch
import torch.nn as nn
from mmengine import Config
from mmdet.registry import MODELS
from mmdet.utils import register_all_modules
register_all_modules()

# 导入_base_目录下的模块
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
from _base_.model_arch import ModelParser

def printModelStructure(cfgfile):
    cfg = Config.fromfile(cfgfile)
    model = MODELS.build(cfg.model)
    # print(model)
    model_parser = ModelParser()
    model_info = model_parser.parse(str(model))
    print(model_parser.to_json(model_info))

def count_parameters_by_layer(model):
    layer_params = {}
    for name, module in model.named_modules():
        if list(module.children()):  # 如果模块有子模块，跳过
            continue
        num_params = sum(p.numel() for p in module.parameters() if p.requires_grad)
        if num_params > 0:
            layer_params[name] = num_params
    return layer_params

def printModelSummary(cfgfile):
    cfg = Config.fromfile(cfgfile)

    # 构建模型
    model = MODELS.build(cfg.model)
    model.eval()

    # 将模型移动到GPU上（如果可用）
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)

    # 统计每层参数量
    layer_params = count_parameters_by_layer(model)

    # 打印每层参数量
    print("Parameters by layer:")
    for layer_name, num_params in layer_params.items():
        print(f"{layer_name}: {num_params}")

    # 计算总参数量
    total_params = sum(p.numel() for p in model.parameters())
    print(f'Total parameters: {total_params}')

    # 计算可训练参数量
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f'Trainable parameters: {trainable_params}')

def count_parameters(module):
    total_params = sum(p.numel() for p in module.parameters())
    trainable_params = sum(p.numel() for p in module.parameters() if p.requires_grad)
    return total_params, trainable_params

def print_tensor_shape(module, input, output):
     print(f'{module.__class__.__name__} 输入尺寸: {[x.shape for x in input]}')
     print(f'{module.__class__.__name__} 输出尺寸: {output.shape}')

def estimate_model_size(config_file):
    # 加载配置文件
    cfg = Config.fromfile(config_file)
    
    # 构建模型（不加载权重）
    model = MODELS.build(cfg.model)
    for name, module in model.named_modules():
        if isinstance(module, (nn.Conv2d, nn.Linear, nn.MaxPool2d)):
            module.register_forward_hook(print_tensor_shape)

    # 统计每层参数
    layer_params = {}
    total_params = 0
    total_trainable_params = 0
    
    for name, module in model.named_modules():
        if list(module.children()):  # 跳过有子模块的模块
            continue
        num_params, num_trainable_params = count_parameters(module)
        if num_params > 0:
            layer_params[name] = (num_params, num_trainable_params)
            total_params += num_params
            total_trainable_params += num_trainable_params
    
    # 打印结果
    print("Estimated parameters by layer:")
    for name, (num_params, num_trainable_params) in layer_params.items():
        print(f"{name}:")
        print(f"  Total parameters: {num_params:,}")
        print(f"  Trainable parameters: {num_trainable_params:,}")
    
    print(f"\nEstimated total parameters: {total_params}")
    print(f"Estimated total trainable parameters: {total_trainable_params}")

printModelStructure(sys.argv[1])
# printModelSummary(sys.argv[1])
# estimate_model_size(sys.argv[1])
