import os

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
import torch_pruning as tp

from dataloader import get_class_weight
# from models.lwCETModel import lwCET
from models.models import ecgTransForm
from myutils.dataset import SignalDataset
from myutils.modelUtils import get_configs
from utils import to_device


def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # 1. 加载微调数据
    data_path = '../data/mit/alldata/data_4.csv'
    batch_size = 128
    train_data1 = pd.read_csv(data_path, header=None).values
    raw_train, raw_valid, _, _ = train_test_split(train_data1, list(train_data1[:, 0]), test_size=0.3,
                                                  random_state=1, stratify=list(train_data1[:, 0]))
    raw_valid, raw_test, _, _ = train_test_split(raw_valid, list(raw_valid[:, 0]), test_size=0.66,
                                                 random_state=1, stratify=list(raw_valid[:, 0]))
    print(train_data1.shape)
    print(raw_train.shape)
    print(raw_valid.shape)
    print(raw_test.shape)

    train_data = SignalDataset(raw_train)
    valid_data = SignalDataset(raw_valid)
    test_data = SignalDataset(raw_test)
    train_dl = DataLoader(dataset=train_data,
                          batch_size=batch_size,
                          num_workers=2,
                          shuffle=True)
    val_dl = DataLoader(dataset=valid_data,
                        batch_size=batch_size,
                        num_workers=2,
                        shuffle=True)
    test_dl = DataLoader(dataset=test_data,
                         batch_size=batch_size,
                         num_workers=2,
                         shuffle=True)

    cw_dict = get_class_weight(train_data._label)
    weights = [float(value) for value in cw_dict.values()]
    # Now convert the list of floats to a numpy array, then to a PyTorch tensor
    weights_array = np.array(weights).astype(np.float32)  # Ensuring the correct dtype
    weights_tensor = torch.tensor(weights_array).to(device)
    cross_entropy = torch.nn.CrossEntropyLoss(weight=weights_tensor)

    # 2. 加载预训练模型
    exp_log_dir = '../experiments_logs/Brn/ecg-trans-268_2025_10_09_21_20'
    test_model_name = os.path.join(exp_log_dir, "checkpoint_best.pt")
    dataset_configs, hparams_class1 = get_configs()
    hparams = hparams_class1.train_params
    # model = lwCET(dataset_configs, hparams).eval()
    original_model = ecgTransForm(dataset_configs, hparams).eval()
    model = ecgTransForm(dataset_configs, hparams).eval()
    model.load_state_dict(torch.load(test_model_name, device)['model'])
    model = model.to(device)

    input1 = torch.randn(128, 1, 268)  # 第一个输入，例如ECG信号
    input2 = torch.randn(128, 52)  # 第二个输入，例如特征向量
    # example_inputs = (input1, input2)
    example_inputs = input1

    # 3. 获取所有可剪枝的分组（自动识别）
    ignored_layers = []
    for module in model.modules():
        if isinstance(module, nn.Linear) and module.out_features == 4:  # 分类头
            ignored_layers.append(module)

    # 4. 定义剪枝策略并执行
    pruning_ratio = 0.2  # 最终目标剪枝比例
    num_iterations = 2  # 迭代次数（例如5次）
    fine_tune_epochs = 1  # 每次剪枝后的微调轮数
    # 创建优化器和损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    eval(device, model, test_dl)
    # 迭代剪枝循环
    for iter in range(num_iterations):
        print(f"\n=== 开始第 {iter + 1}/{num_iterations} 轮剪枝 ===")
        model.to('cpu')
        # 计算当前轮次的剪枝比例（逐步增加）
        current_ratio = pruning_ratio * (iter + 1) / num_iterations

        pruner = tp.pruner.MagnitudePruner(
            model,
            example_inputs=example_inputs,
            importance=tp.importance.MagnitudeImportance(p=2),  # L2范数重要性
            global_pruning=True,  # 全局剪枝（考虑所有层的权重重要性）
            pruning_ratio=current_ratio,
            ignored_layers=ignored_layers,  # 不剪枝的层（如分类头）
        )
        pruner.step()  # 执行剪枝

        # 计算当前稀疏度
        total_params = sum(p.numel() for p in model.parameters())
        zero_params = sum((p == 0).sum().item() for p in model.parameters())
        sparsity = 100.0 * zero_params / total_params
        print(f"当前稀疏度: {sparsity:.2f}%")

        # 微调模型 、
        print(f"开始微调 ({fine_tune_epochs} 轮)...")
        model.to(device)
        model.train()
        for epoch in range(fine_tune_epochs):
            running_loss = 0.0
            # 假设 train_loader 是您的数据加载器
            for step, batches in enumerate(train_dl):
                batches = to_device(batches, device)

                data = batches[0].float()
                feature1 = batches[1].float()
                # print( data.shape)
                labels = batches[2].long()
                optimizer.zero_grad()
                data = data.unsqueeze(1)
                outputs = model(data)

                loss = cross_entropy(outputs, labels)
                loss.backward()
                optimizer.step()
                running_loss += loss.item()

                if step % 100 == 99:  # 每100批次打印一次
                    print(f"  批次 {step + 1}, 损失: {running_loss / 100:.4f}")
                    running_loss = 0.0
        eval(device, model, test_dl)
        # 保存中间模型（可选）
        torch.save(model.state_dict(), f"pruned_model_iter_{iter + 1}.pth")

    # 保存最终模型
    torch.save(model.state_dict(), "final_pruned_model_state_dict.pth")
    torch.save(model, "final_pruned_model.pth")
    print("✅ 迭代剪枝完成！")

    print(f"剪枝前参数量: {count_parameters(original_model)}")
    print(f"剪枝后参数量: {count_parameters(model)}")


def eval(device, model, val_dl):
    # 最终评估
    model.eval()
    with torch.no_grad():
        # 在验证集上评估性能
        pred_labels = np.array([])
        true_labels = np.array([])
        for batches in val_dl:
            batches = to_device(batches, device)
            data = batches[0].float()
            feature1 = batches[1].float()
            # print( data.shape)
            labels = batches[2].long()

            data = data.unsqueeze(1)
            outputs = model(data)
            # _, predicted = torch.max(outputs.data, 1)
            pred_labels = np.append(pred_labels, outputs.max(1)[1].cpu().numpy())
            true_labels = np.append(true_labels, labels.data.cpu().numpy())

        pred_labels = np.array(pred_labels).astype(int)
        true_labels = np.array(true_labels).astype(int)

        r = classification_report(true_labels, pred_labels, digits=4, output_dict=True,
                                  target_names=['N', 'S', 'V', 'F'])
        df = pd.DataFrame(r)
        accuracy = accuracy_score(true_labels, pred_labels)
        print(f"\n最终剪枝模型准确率: {accuracy * 100:.2f}%")
        print(f"\n最终剪枝模型F1: {df['macro avg']['f1-score'] * 100:.2f}%")


# 计算剪枝前后的参数量
def count_parameters(models):
    return sum(p.numel() for p in models.parameters() if p.requires_grad)


if __name__ == '__main__':
    torch.multiprocessing.freeze_support()
    main()
