import gc
import warnings

import sklearn.exceptions
import torch
from thop import profile, clever_format

from models.CLAttention import create_model
from models.LightX3ECG import LightX3ECG
from models.lwCETModel import lwCET
from models.models import ecgTransForm
from utils import to_device

warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)

from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class

if __name__ == "__main__":
    test_model_name = "../experiments_logs/Brn/lightx3Ecg-268_2025_11_03_11_47/checkpoint_best.pt"
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    dataset_class = get_dataset_class('mit4')
    hparams_class = get_hparams_class("supervised")

    # model = ecgTransForm(configs=dataset_class(), hparams=hparams_class().train_params)
    # model = lwCET(configs=dataset_class(), hparams=hparams_class().train_params)
    # model = create_model(
    #     model_type="cnn_lstm_attention",
    #     input_channels=1,
    #     seq_length=268,
    #     hidden_size=128,
    #     num_layers=2,
    #     num_classes=4,
    #     dropout=0.3
    # )
    # model = ecgTransForm(configs=dataset_class(), hparams=hparams_class().train_params)
    model = LightX3ECG(268,4)
    chkpoint = torch.load(test_model_name, map_location=device)
    model.load_state_dict(chkpoint['model'])
    model = model.to(device)

    inputData1 = torch.randn(1, 1, 268)
    inputData1 = to_device(inputData1, device)

    inputData2 = torch.randn(1, 52)
    inputData2 = to_device(inputData2, device)
    with torch.no_grad():
        flops, params = profile(model, inputs=(inputData1, ))
        print(f"FLOPs: {flops / 1e6:.2f} M")
        print(f"参数量: {params / 1e6:.2f} M")
        f, t = clever_format([flops, params], "%.2f")
        print(f, t)

# if __name__ == '__main__':
#     # feature_extractor_test = CNN1DClassifier(1, 4)
#     test_model_name = "../prune/final_pruned_model.pth"
#     device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#     model = torch.load(test_model_name, map_location=device, weights_only=False)
#     model.eval()
#
#     # 3. 计算参数量
#     total_params = sum(p.numel() for p in model.parameters())
#     trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
#     print(f"总参数量: {total_params / 1e6:.2f} M")
#     print(f"可训练参数量: {trainable_params / 1e6:.2f} M")
#
#     inputData1 = torch.randn(1, 1, 268)
#     inputData1 = to_device(inputData1, device)
#
#     # inputData2 = torch.randn(1, 52)
#     # inputData2 = to_device(inputData2, device)
#     with torch.no_grad():
#         flops, params = profile(model, inputs=(inputData1,))
#         print(f"FLOPs: {flops  / 1e6:.2f} M")
#         print(f"参数量: {params / 1e6:.2f} M")
#         f, t = clever_format([flops, params], "%.2f")
#         print(f, t)
#
#     gc.collect()
#     torch.cuda.empty_cache()
