# ------------------#
# 两个领域的attention
# ------------------#
import torch
from ._nlp_attentions import DotProductAttention, AdditiveAttention, MultiHeadAttention
from ._cbam_attentions import CBAM, ResBlock_CBAM, ChannelAttentionModule, SpatialAttentionModule


def get_total_parameters(model):
    # 遍历模型的每个参数并累加参数数量
    _is_layer = not isinstance(model, torch.nn.modules.container.Sequential)
    if _is_layer:
        layer_params = 0
        layer = model
        for param in layer.parameters():
            layer_params += param.numel()
        # if debug:  print('layer_params:', layer_params)
        return layer_params

    total_params = 0
    with torch.no_grad():
        for layer in model:
            # X = layer(X)
            layer_params = 0
            for param in layer.parameters():
                layer_params += param.numel()
            total_params += layer_params
            # if dbg:
            #     print('layer_params:', layer_params, '---', layer.__class__.__name__, 'output shape:\t', X.shape)
    return total_params


if __name__ == '__main__':
    pass