import torch
import torch.nn as nn
from typing import Dict, List, Tuple, Union


def conv3x3(in_channels: int, out_channels: int, stride: int = 1) -> nn.Conv2d:
    """创建3x3卷积层"""
    return nn.Conv2d(
        in_channels, out_channels,
        kernel_size=3, stride=stride,
        padding=1, bias=False
    )


def model_summary(model: nn.Module, input_size: Union[Tuple, List[Tuple]]) -> None:
    """打印模型结构摘要"""

    def register_hook(module):
        def hook(module, input, output):
            class_name = str(module.__class__).split(".")[-1].split("'")[0]
            module_idx = len(summary)

            m_key = f"{class_name}-{module_idx + 1}"
            summary[m_key] = {}
            summary[m_key]["input_shape"] = list(input[0].size())

            if isinstance(output, dict):
                summary[m_key]["output_shape"] = {k: list(v.size()) for k, v in output.items()}
            else:
                summary[m_key]["output_shape"] = list(output.size())

            params = 0
            if hasattr(module, "weight") and hasattr(module.weight, "size"):
                params += torch.prod(torch.LongTensor(list(module.weight.size())))
                summary[m_key]["trainable"] = module.weight.requires_grad
            if hasattr(module, "bias") and hasattr(module.bias, "size"):
                params += torch.prod(torch.LongTensor(list(module.bias.size())))
            summary[m_key]["nb_params"] = params

        if not isinstance(module, (nn.Sequential, nn.ModuleList)):
            hooks.append(module.register_forward_hook(hook))

    summary = {}
    hooks = []

    model.apply(register_hook)

    # 创建假输入
    if isinstance(input_size[0], (list, tuple)):
        x = [torch.rand(2, *in_size) for in_size in input_size]
    else:
        x = torch.rand(2, *input_size)

    # 前向传播
    model(x)

    # 移除hooks
    for h in hooks:
        h.remove()

    # 打印摘要
    _print_model_summary(summary)


def _print_model_summary(summary: Dict) -> None:
    """打印模型摘要信息"""
    print("=" * 80)
    print(f"{'Layer (type)':<30} {'Output Shape':<25} {'Param #':<15}")
    print("=" * 80)

    total_params = 0
    trainable_params = 0

    for layer in summary:
        output_shape_str = str(summary[layer]['output_shape'])
        if len(output_shape_str) > 25:
            output_shape_str = output_shape_str[:22] + "..."

        line_new = f"{layer:<30} {output_shape_str:<25} {summary[layer]['nb_params']:>15,}"
        total_params += summary[layer]["nb_params"]

        if "trainable" in summary[layer] and summary[layer]["trainable"]:
            trainable_params += summary[layer]["nb_params"]

        print(line_new)

    print("=" * 80)
    print(f"Total params: {total_params:,}")
    print(f"Trainable params: {trainable_params:,}")
    print(f"Non-trainable params: {total_params - trainable_params:,}")
    print("=" * 80)
