import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import pandas as pd
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import joblib
from torch.nn.utils import prune
import time
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

matplotlib.use('Agg')
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

#固定随机种子
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

#剪枝结构
def apply_and_finalize_structured_pruning(model, amount=0.3):
    import torch.nn.utils.prune as prune
    # Step 1: 应用结构化剪枝（按通道）
    prune.ln_structured(model.conv1, name='weight', amount=amount, n=2, dim=0)
    prune.ln_structured(model.conv2, name='weight', amount=amount, n=2, dim=0)
    prune.ln_structured(model.conv3, name='weight', amount=amount, n=2, dim=0)
    print(f"Structured pruning (amount={amount}) applied.")

    # Step 2: 移除掩码，真正减少参数体积
    prune.remove(model.conv1, 'weight')
    prune.remove(model.conv2, 'weight')
    prune.remove(model.conv3, 'weight')
    print("Pruned weights finalized and masks removed (model permanently slimmed).")

#保存模型
def save_model(model, path):
    torch.save(model.state_dict(), path)
    print(f"Model saved to {path}")

#加载模型
def load_model(model, path):
    model.load_state_dict(torch.load(path))
    print(f"Model loaded from {path}")

#加载数据
def load_npy_from_folder(folder_path):
    all_data = []
    for file in os.listdir(folder_path):
        if file.endswith('.npy'):
            data = np.load(os.path.join(folder_path, file))
            all_data.append(data)
    return np.concatenate(all_data, axis=0)

#滑窗
def create_sliding_window(data, window_size=128, step_size=64):
    X, y, indices = [], [], []
    for i in range(0, len(data) - window_size, step_size):
        window = data[i:i + window_size]
        features = window[:, 1:]  # 去掉第一列
        target = window[-1, 0]    # 最后一帧的目标值
        X.append(features)
        y.append(target)
        indices.append(i + window_size - 1)  # 对应原始数据中目标值的位置
    return np.array(X), np.array(y), np.array(indices)

#数据转维
def inverse_transform(scaler, data):
    # **确保形状为二维 (样本数, 特征数)**
    if len(data.shape) == 1:
        data = data.reshape(-1, 1)
    elif len(data.shape) == 2 and data.shape[1] == 1:
        # **扩展维度到与训练时一致 (58201, 128) 或 (batch_size, 128)**
        data = np.tile(data, (1, scaler.scale_.shape[0]))
    return scaler.inverse_transform(data).flatten()

#归一化
def load_scaler(save_dir):
    scaler_path = os.path.join(save_dir, 'scaler.pkl')
    if os.path.exists(scaler_path):
        scaler = joblib.load(scaler_path)
        print(f"Scaler loaded from {scaler_path}")
        return scaler
    else:
        raise FileNotFoundError("Scaler file not found.")

#数据预处理
def preprocess_data(data, save_dir, is_train=True):
    x_scaler_path = os.path.join(save_dir, 'x_scaler.pkl')
    y_scaler_path = os.path.join(save_dir, 'y_scaler.pkl')
    x_scaler = StandardScaler()
    y_scaler = StandardScaler()

    # 生成滑窗数据
    X, y, indices = create_sliding_window(data)

    # X: (samples, window_size, features) → 不转置（模型中自行 permute）
    if is_train:
        # 拟合并保存 scaler
        x_scaler.fit(X.reshape(-1, X.shape[-1]))
        joblib.dump(x_scaler, x_scaler_path)

        y = y.reshape(-1, 1)
        y_scaler.fit(y)
        joblib.dump(y_scaler, y_scaler_path)
    else:
        # 加载已保存的 scaler
        x_scaler = joblib.load(x_scaler_path)
        y_scaler = joblib.load(y_scaler_path)

    # 归一化处理
    X = x_scaler.transform(X.reshape(-1, X.shape[-1])).reshape(X.shape)
    y = y_scaler.transform(y.reshape(-1, 1))  # 保持二维

    # 转换为 tensor
    return (
        torch.tensor(X, dtype=torch.float32),
        torch.tensor(y, dtype=torch.float32),
        y_scaler  # 注意返回的是 y_scaler，而非 x_scaler
    )

#余弦退火
def cosine_annealing_scheduler(optimizer, epochs):
    return optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)

#loss曲线可视化
def visualize_loss(losses, save_path):
    plt.figure(figsize=(24, 8))
    plt.plot(losses, label='Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss')
    plt.legend()
    plt.savefig(os.path.join(save_path, 'loss_curve-剪枝0.3.png'))

#预测结果可视化
def visualize_predictions(y_true, y_pred, save_path, mode):
    plt.figure(figsize=(24, 8))

    # 确保是一维数组
    y_true = np.array(y_true).flatten()
    y_pred = np.array(y_pred).flatten()

    # 横坐标为样本编号
    x = np.arange(len(y_true))

    plt.plot(x, y_true,color='blue', label='True Values', alpha=0.6)
    plt.plot(x, y_pred,color='red', label='Predicted Values', alpha=0.6)

    plt.xlabel('Sample')
    plt.ylabel('Value')
    plt.title(f'Prediction vs True ({mode})')
    plt.legend()
    plt.tight_layout()
    plt.savefig(os.path.join(save_path, f'{mode}_scatter_predictions.png'))
    plt.close()



# 获取保留通道索引
def round_up_multiple(x, base=16):
    return base * ((x + base - 1) // base)

#########################卷积核剪枝####################################
def get_kept_channels(weight_tensor, amount=0.3, n=2, dim=0):
    norms = weight_tensor.norm(p=n, dim=tuple(i for i in range(weight_tensor.dim()) if i != dim))
    num_prune = int(amount * weight_tensor.shape[dim])
    _, prune_idx = torch.topk(norms, k=num_prune, largest=False)
    all_idx = set(range(weight_tensor.shape[dim]))
    keep_idx = sorted(list(all_idx - set(prune_idx.cpu().numpy())))
    #保证输出通道数是 16 的倍数
    target_len = round_up_multiple(len(keep_idx), 16)
    if len(keep_idx) > target_len:
        keep_idx = keep_idx[:target_len]
    elif len(keep_idx) < target_len:
        # 如果不够，重复已有的索引填满
        keep_idx += keep_idx[:(target_len - len(keep_idx))]

    return keep_idx


# 小模型结构
class SlimModelCNN(nn.Module):
    def __init__(self, ch1, ch2):
        super(SlimModelCNN, self).__init__()
        self.conv1 = nn.Conv1d(15, ch1, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool1d(2)
        self.conv2 = nn.Conv1d(ch1, ch2, kernel_size=3, padding=1)
        self.pool2 = nn.MaxPool1d(2)
        self.conv3 = nn.Conv1d(ch2, 1, kernel_size=3, padding=1)
        self.conv4 = nn.Conv1d(32, 1, kernel_size=1)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.permute(0, 2, 1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = x.permute(0, 2, 1)
        x = self.conv4(x)
        x = x.squeeze(1)
        return x


# 权重迁移函数
def transfer_weights_model_cnn(original_model, slim_model, idx1, idx2):
    with torch.no_grad():
        slim_model.conv1.weight.copy_(original_model.conv1.weight[idx1])
        slim_model.conv1.bias.copy_(original_model.conv1.bias[idx1])

        slim_model.conv2.weight.copy_(original_model.conv2.weight[idx2][:, idx1])
        slim_model.conv2.bias.copy_(original_model.conv2.bias[idx2])

        with torch.no_grad():
            # 注意：idx2 是 conv2 的保留输出通道索引 → conv3 的输入通道
            slim_model.conv3.weight.copy_(original_model.conv3.weight[:, idx2])
            slim_model.conv3.bias.copy_(original_model.conv3.bias)

        slim_model.conv4.load_state_dict(original_model.conv4.state_dict())

#↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓泰勒估计，用于评价剪枝依据↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
def get_kept_filters_taylor(model, data_loader, criterion, amount, device):
    model.eval()
    inputs, targets = next(iter(data_loader))
    inputs, targets = inputs.to(device), targets.to(device)

    # 保证梯度可用
    model.conv1.weight.requires_grad_(True)
    model.conv2.weight.requires_grad_(True)

    outputs = model(inputs)
    loss = criterion(outputs, targets)
    loss.backward()

    # conv1
    g1 = model.conv1.weight.grad
    w1 = model.conv1.weight
    score1 = torch.abs(g1 * w1).sum(dim=(1, 2)).detach().cpu().numpy()

    # conv2
    g2 = model.conv2.weight.grad
    w2 = model.conv2.weight
    score2 = torch.abs(g2 * w2).sum(dim=(1, 2)).detach().cpu().numpy()

    def get_idx(score, keep_ratio):
        num = int(len(score) * (1 - keep_ratio))
        prune_idx = np.argsort(score)[:num]
        keep_idx = sorted(list(set(range(len(score))) - set(prune_idx)))
        return keep_idx

    idx1 = get_idx(score1, 1 - amount)
    idx2 = get_idx(score2, 1 - amount)

    return idx1, idx2
#↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑泰勒估计，用于评价剪枝依据↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑

#模型重构瘦身
def build_and_save_slim_model_cnn(model, save_path, amount=0.3):
    idx1 = get_kept_channels(model.conv1.weight, amount)
    idx2 = get_kept_channels(model.conv2.weight, amount)

    slim_model = SlimModelCNN(len(idx1), len(idx2)).to(model.conv1.weight.device)
    transfer_weights_model_cnn(model, slim_model, idx1, idx2)

    torch.save(slim_model.state_dict(), save_path)
    print(f"Slim model saved to {save_path}")
    return slim_model, idx1, idx2


#########################卷积核剪枝####################################

###########################通道剪枝####################################

def get_kept_input_channels(weight_tensor, amount=0.3, n=2):
    # 计算每个输入通道的 L2 范数，dim=(0,2)：output通道 + 卷积核尺寸
    norms = weight_tensor.norm(p=n, dim=(0, 2))
    num_prune = int(amount * weight_tensor.shape[1])
    _, prune_idx = torch.topk(norms, k=num_prune, largest=False)
    all_idx = set(range(weight_tensor.shape[1]))
    keep_idx = sorted(list(all_idx - set(prune_idx.cpu().numpy())))
    return keep_idx

class SlimModelCNN_ChannelPruned(nn.Module):
    def __init__(self, ch1, kept_input_idx_conv2):
        super(SlimModelCNN_ChannelPruned, self).__init__()
        self.kept_idx = kept_input_idx_conv2
        self.conv1 = nn.Conv1d(15, ch1, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool1d(2)
        self.conv2 = nn.Conv1d(len(kept_input_idx_conv2), 8, kernel_size=3, padding=1)
        self.pool2 = nn.MaxPool1d(2)
        self.conv3 = nn.Conv1d(8, 1, kernel_size=3, padding=1)
        self.conv4 = nn.Conv1d(32, 1, kernel_size=1)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.permute(0, 2, 1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool1(x)
        x = x[:, self.kept_idx, :]  # 选取被保留的通道（关键点！）
        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = x.permute(0, 2, 1)
        x = self.conv4(x)
        x = x.squeeze(1)
        return x

def transfer_weights_channel_pruned(model, slim_model, kept_input_idx):
    with torch.no_grad():
        slim_model.conv1.load_state_dict(model.conv1.state_dict())
        slim_model.conv2.weight.copy_(model.conv2.weight[:, kept_input_idx, :])
        slim_model.conv2.bias.copy_(model.conv2.bias)
        slim_model.conv3.load_state_dict(model.conv3.state_dict())
        slim_model.conv4.load_state_dict(model.conv4.state_dict())

def get_kept_channels_bn_gamma(model, bn_layer_name='bn2', amount=0.3):
    bn_layer = dict(model.named_modules())[bn_layer_name]
    gamma = bn_layer.weight.detach().abs().cpu().numpy()
    num_prune = int(amount * len(gamma))
    prune_idx = np.argsort(gamma)[:num_prune]
    keep_idx = sorted(set(range(len(gamma))) - set(prune_idx))
    return keep_idx

def build_and_save_channel_pruned_model(model, save_path, amount=0.3):
    kept_idx = get_kept_channels_bn_gamma(model, bn_layer_name='bn2', amount=amount)
    ch1 = model.conv1.out_channels  # 输入通道不变
    slim_model = SlimModelCNN_ChannelPruned(ch1, kept_idx).to(model.conv1.weight.device)
    transfer_weights_channel_pruned(model, slim_model, kept_idx)

    torch.save(slim_model.state_dict(), save_path)
    print(f"Channel-pruned model saved to {save_path}")
    return slim_model, kept_idx


###########################通道剪枝####################################


###########################layer剪枝####################################

class SlimModelCNN_LayerPruned(nn.Module):
    def __init__(self, use_conv2=True, use_conv3=True):
        super(SlimModelCNN_LayerPruned, self).__init__()
        self.use_conv2 = use_conv2
        self.use_conv3 = use_conv3

        self.conv1 = nn.Conv1d(15, 16, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool1d(2)
        if self.use_conv2:
            self.conv2 = nn.Conv1d(16, 8, kernel_size=3, padding=1)
            self.pool2 = nn.MaxPool1d(2)
        if self.use_conv3:
            self.conv3 = nn.Conv1d(8 if self.use_conv2 else 16, 1, kernel_size=3, padding=1)

        self.final_channels = None  # 留作动态确定

        self.conv4 = None  # 占位，之后会在 forward 里动态定义

        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.permute(0, 2, 1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool1(x)

        if self.use_conv2:
            x = self.conv2(x)
            x = self.relu(x)
            x = self.pool2(x)

        if self.use_conv3:
            x = self.conv3(x)

        x = x.permute(0, 2, 1)
        x = x.permute(0, 2, 1)

        # 只有第一次运行时构造 conv4
        if self.conv4 is None:
            self.final_channels = x.size(1)
            self.conv4 = nn.Conv1d(self.final_channels, 1, kernel_size=1).to(x.device)

        x = self.conv4(x)
        x = x.squeeze(1)
        return x

def build_and_save_layer_pruned_model(use_conv2, use_conv3, save_path):
    model = SlimModelCNN_LayerPruned(use_conv2=use_conv2, use_conv3=use_conv3).to(device)
    torch.save({
        'state_dict': model.state_dict(),
        'use_conv2': use_conv2,
        'use_conv3': use_conv3
    }, save_path)
    print(f"Layer-pruned model saved to {save_path}")
    return model

###########################layer剪枝####################################

class model_cnn(nn.Module):
    def __init__(self):
        super(model_cnn, self).__init__()
        self.conv1 = nn.Conv1d(in_channels=15, out_channels=16, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm1d(16)  ###########
        self.pool1 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv1d(in_channels=16, out_channels=8, kernel_size=3, stride=1, padding=1)
        self.pool2 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv1d(in_channels=8, out_channels=1, kernel_size=3, stride=1, padding=1)
        self.conv4 = nn.Conv1d(in_channels=32, out_channels=1, kernel_size=1, stride=1)  # ← 修改这行
        self.relu = nn.ReLU()
    def forward(self, x):
        # x: [batch, 128, 15]
        x = x.permute(0, 2, 1)  # [batch, 15, 128]
        x = self.conv1(x)
        x = self.bn2(x) ###########
        x = self.relu(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = x.permute(0,2,1)
        x = self.conv4(x)
        x = x.squeeze(1)
        return x
#计算平均计算时间
def benchmark_model(model, input_shape, repeat=100):
    model.eval()
    # 注意：输入应该是 [batch, seq_len, features] → [1, 128, 15]
    x = torch.randn(1, 128, model.conv1.in_channels).to(device)
    with torch.no_grad():
        start = time.time()
        for _ in range(repeat):
            _ = model(x)
        end = time.time()
    avg_time_ms = (end - start) / repeat * 1000
    print(f"Avg inference time over {repeat} runs: {avg_time_ms:.2f} ms")
    return avg_time_ms


def train_model(model, train_loader, criterion, optimizer, scheduler, y_scaler, save_dir, epochs=20):
    model.train()
    losses = []
    for epoch in range(epochs):
        running_loss = 0.0
        for inputs, targets in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{epochs}"):
            inputs, targets = inputs.to(device), targets.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        scheduler.step()
        avg_loss = running_loss / len(train_loader)
        losses.append(avg_loss)
        print(f"Epoch [{epoch + 1}/{epochs}] - Loss: {avg_loss:.6f}")

    # 验证部分
    model.eval()
    all_preds = []
    all_targets = []
    with torch.no_grad():
        for inputs, targets in train_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs).cpu().numpy().squeeze()
            all_preds.extend(outputs)
            all_targets.extend(targets.cpu().numpy().squeeze())

        # 确保是二维传入 scaler
        all_preds = np.array(all_preds).reshape(-1, 1)
        all_targets = np.array(all_targets).reshape(-1, 1)

        # 正确使用 y_scaler
        all_preds = y_scaler.inverse_transform(all_preds).flatten()
        all_targets = y_scaler.inverse_transform(all_targets).flatten()

        mse = mean_squared_error(all_targets, all_preds)
        rmse = np.sqrt(mse)
        mae = mean_absolute_error(all_targets, all_preds)
        r2 = r2_score(all_targets, all_preds)

        print(f"[Train Accuracy]")
        print(f"  MSE  = {mse:.6f}")
        print(f"  RMSE = {rmse:.6f}")
        print(f"  MAE  = {mae:.6f}")
        print(f"  R²   = {r2:.6f}")

        visualize_predictions(all_targets, all_preds, save_dir, f'train-剪枝0.3')
        print(f"Sample of Predicted Values (train): {all_preds[:7]}")
        print(f"Sample of True Values (train): {all_targets[:7]}")

    return losses



# def main():
#     save_dir = "/home/scuee_user06/myh/轻量化/code/model_pruning-512"
#     os.makedirs(save_dir, exist_ok=True)
#
#     batch_size = 128
#     learning_rate = 0.005
#     epochs = 512
#     mode = 'eval'  # 模式选择：'train' 或 'eval'
#     amount = 0.3
#     pruning_type = 'filter'  # 'filter', 'channel', 'layer'
#     use_conv2 = True
#     use_conv3 = False
#
#     train_data = load_npy_from_folder("/home/scuee_user06/myh/轻量化/飞行数据/processed_train-new")
#     test_data = load_npy_from_folder("/home/scuee_user06/myh/轻量化/飞行数据/processed_test-new")
#     # 训练阶段
#     if mode == 'train':
#         X_train, y_train, y_scaler = preprocess_data(train_data, save_dir, is_train=True)
#         X_test, y_test, _ = preprocess_data(test_data, save_dir, is_train=False)
#
#         model = model_cnn().to(device)
#         apply_and_finalize_structured_pruning(model, amount=amount)
#         optimizer = optim.Adam(model.parameters(), lr=learning_rate)
#         criterion = nn.MSELoss()
#         scheduler = cosine_annealing_scheduler(optimizer, epochs=epochs)
#         train_loader = DataLoader(data.TensorDataset(X_train, y_train), batch_size=batch_size, shuffle=True)
#
#         losses = train_model(model, train_loader, criterion, optimizer, scheduler, y_scaler, save_dir, epochs=epochs)
#         torch.save(model.state_dict(), os.path.join(save_dir, f"cnn_model-512-剪枝-{amount}.pth"))
#         total_params = sum(p.numel() for p in model.parameters())
#         print(f"Final pruned model has {total_params:,} parameters")
#
#         if pruning_type == 'filter':
#             slim_model, idx1, idx2 = build_and_save_slim_model_cnn(model, os.path.join(save_dir, f"cnn_model_slim-{amount}.pth"),
#                                                                    amount=amount)
#             np.save(os.path.join(save_dir, 'idx1.npy'), np.array(idx1))
#             np.save(os.path.join(save_dir, 'idx2.npy'), np.array(idx2))
#         elif pruning_type == 'channel':
#             slim_model, kept_input_idx = build_and_save_channel_pruned_model(model, os.path.join(save_dir,
#                                                                                                  f"cnn_model_channel_pruned-{amount}.pth"),
#                                                                              amount=amount)
#             np.save(os.path.join(save_dir, 'kept_input_idx.npy'), np.array(kept_input_idx))
#         elif pruning_type == 'layer':
#             slim_model = build_and_save_layer_pruned_model(use_conv2, use_conv3,
#                                                            os.path.join(save_dir, f"cnn_model_layer_pruned-{amount}.pth"))
#
#         visualize_loss(losses, save_dir)
#         benchmark_model(model, input_shape=None)  # input_shape 参数现在不再需要传
#
#
#     elif mode == 'eval':
#         if amount == 0.0:
#             # 不剪枝：加载完整模型
#             model = model_cnn().to(device)
#             load_model(model, os.path.join(save_dir, "cnn_model-512-剪枝前.pth"))
#         elif pruning_type == 'filter':
#             # 加载 filter-pruned 模型
#             idx1 = np.load(os.path.join(save_dir, 'idx1.npy'))
#             idx2 = np.load(os.path.join(save_dir, 'idx2.npy'))
#             model = SlimModelCNN(len(idx1), len(idx2)).to(device)
#             load_model(model, os.path.join(save_dir, f"cnn_model_slim-{amount}.pth"))
#         elif pruning_type == 'channel':
#             # 加载 channel-pruned 模型
#             kept_input_idx = np.load(os.path.join(save_dir, 'kept_input_idx.npy')).tolist()
#             model = SlimModelCNN_ChannelPruned(16, kept_input_idx).to(device)
#             load_model(model, os.path.join(save_dir, f"cnn_model_channel_pruned-{amount}.pth"))
#         elif pruning_type == 'layer':
#             checkpoint = torch.load(os.path.join(save_dir, f"cnn_model_layer_pruned-{amount}.pth"))
#             use_conv2 = checkpoint['use_conv2']
#             use_conv3 = checkpoint['use_conv3']
#             model = SlimModelCNN_LayerPruned(use_conv2=use_conv2, use_conv3=use_conv3).to(device)
#             model.load_state_dict(checkpoint['state_dict'])
#
#         else:
#             raise ValueError("Unknown pruning type")
#         model.eval()
#         # 加载scaler对象
#         y_scaler_path = os.path.join(save_dir, 'y_scaler.pkl')
#         y_scaler = joblib.load(y_scaler_path)
#         X_test, y_test, _ = preprocess_data(test_data, save_dir, is_train=False)
#         with torch.no_grad():
#             X_test = X_test.to(device)
#             # y_pred = model(X_test).cpu().numpy().squeeze()
#             y_pred = model(X_test).cpu().numpy()
#
#             # 如果输出是 [batch_size, seq_len, 1] 或 [batch_size, seq_len]，就取最后一个时间步或均值
#             if y_pred.ndim == 3:
#                 y_pred = y_pred[:, -1, 0]  # 取最后一个时间步的输出
#             elif y_pred.ndim == 2 and y_pred.shape[1] > 1:
#                 y_pred = y_pred[:, -1]  # 或者用 y_pred.mean(axis=1)
#             else:
#                 y_pred = y_pred.flatten()
#
#             y_true = y_test.cpu().numpy().squeeze()
#
#             # reshape 成二维再反归一化
#             y_pred = y_scaler.inverse_transform(y_pred.reshape(-1, 1)).flatten()
#             y_true = y_scaler.inverse_transform(y_true.reshape(-1, 1)).flatten()
#
#             mse = mean_squared_error(y_true, y_pred)
#             rmse = np.sqrt(mse)
#             mae = mean_absolute_error(y_true, y_pred)
#             r2 = r2_score(y_true, y_pred)
#
#             print(f"[Eval Accuracy]")
#             print(f"  MSE  = {mse:.6f}")
#             print(f"  RMSE = {rmse:.6f}")
#             print(f"  MAE  = {mae:.6f}")
#             print(f"  R²   = {r2:.6f}")
#
#             visualize_predictions(y_true, y_pred, save_dir, f'eval-剪枝0.3-{pruning_type}')
#             print(f"Sample of Predicted Values: {y_pred[:7]}")
#             print(f"Sample of True Values: {y_true[:7]}")
#
#             benchmark_model(model, input_shape=None)
#
#             df = pd.DataFrame({
#                 'True': y_true,
#                 'Pred': y_pred
#             })
#             df.to_csv(os.path.join(save_dir, 'predictions.csv'), index=False)
#
#
#     else:
#         print("Invalid mode: choose 'train' or 'eval'")


########################################循环遍历网格搜索########################################################

def run_experiment(pruning_type, amount, mode='train', use_conv2=True, use_conv3=False):
    print(f"\n[RUNNING] pruning_type={pruning_type}, amount={amount:.2f}, mode={mode}")

    save_dir = "/home/scuee_user06/myh/轻量化/code/model_pruning-512"
    os.makedirs(save_dir, exist_ok=True)

    batch_size = 128
    learning_rate = 0.005
    epochs = 512

    train_data = load_npy_from_folder("/home/scuee_user06/myh/轻量化/飞行数据/processed_train-new")
    test_data = load_npy_from_folder("/home/scuee_user06/myh/轻量化/飞行数据/processed_test-new")
    # 训练阶段
    if mode == 'train':
        X_train, y_train, y_scaler = preprocess_data(train_data, save_dir, is_train=True)
        X_test, y_test, _ = preprocess_data(test_data, save_dir, is_train=False)

        model = model_cnn().to(device)
        apply_and_finalize_structured_pruning(model, amount=amount)
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        criterion = nn.MSELoss()
        scheduler = cosine_annealing_scheduler(optimizer, epochs=epochs)
        train_loader = DataLoader(data.TensorDataset(X_train, y_train), batch_size=batch_size, shuffle=True)

        losses = train_model(model, train_loader, criterion, optimizer, scheduler, y_scaler, save_dir, epochs=epochs)
        torch.save(model.state_dict(), os.path.join(save_dir, f"cnn_model-512-{pruning_type}-{amount:.2f}.pth"))
        total_params = sum(p.numel() for p in model.parameters())
        print(f"Final pruned model has {total_params:,} parameters")

        if pruning_type == 'filter':
            idx1, idx2 = get_kept_filters_taylor(model, train_loader, criterion, amount, device)
            slim_model = SlimModelCNN(len(idx1), len(idx2)).to(device)
            transfer_weights_model_cnn(model, slim_model, idx1, idx2)
            np.save(os.path.join(save_dir, f'idx1-{pruning_type}-{amount:.2f}.npy'), np.array(idx1))
            np.save(os.path.join(save_dir, f'idx2-{pruning_type}-{amount:.2f}.npy'), np.array(idx2))
        elif pruning_type == 'channel':
            slim_model, kept_input_idx = build_and_save_channel_pruned_model(model, os.path.join(save_dir,
                                                                                                 f"cnn_model_channel_pruned-{amount:.2f}.pth"),
                                                                             amount=amount)
            np.save(os.path.join(save_dir, f'kept_input_idx-{pruning_type}-{amount:.2f}.npy'), np.array(kept_input_idx))
        elif pruning_type == 'layer':
            model = SlimModelCNN_LayerPruned(use_conv2, use_conv3).to(device)
            torch.save({
                'state_dict': model.state_dict(),
                'use_conv2': use_conv2,
                'use_conv3': use_conv3
            }, os.path.join(save_dir, f"cnn_model_layer_pruned-{amount:.2f}.pth"))

        visualize_loss(losses, save_dir)
        benchmark_model(model, input_shape=None)  # input_shape 参数现在不再需要传


    elif mode == 'eval':
        if amount == 0.0:
            # 不剪枝：加载完整模型
            model = model_cnn().to(device)
            load_model(model, os.path.join(save_dir, "cnn_model-512-剪枝前.pth"))
        elif pruning_type == 'filter':
            # 加载 filter-pruned 模型
            idx1 = np.load(os.path.join(save_dir, f'idx1-{pruning_type}-{amount:.2f}.npy'))
            idx2 = np.load(os.path.join(save_dir, f'idx2-{pruning_type}-{amount:.2f}.npy'))
            model = SlimModelCNN(len(idx1), len(idx2)).to(device)
            load_model(model, os.path.join(save_dir, f"cnn_model_slim-{amount:.2f}.pth"))
        elif pruning_type == 'channel':
            # 加载 channel-pruned 模型
            kept_input_idx = np.load(os.path.join(save_dir, f'kept_input_idx-{pruning_type}-{amount:.2f}.npy')).tolist()
            model = SlimModelCNN_ChannelPruned(16, kept_input_idx).to(device)
            load_model(model, os.path.join(save_dir, f"cnn_model_channel_pruned-{amount:.2f}.pth"))
        elif pruning_type == 'layer':
            checkpoint = torch.load(os.path.join(save_dir, f"cnn_model_layer_pruned-{amount:.2f}.pth"))
            use_conv2 = checkpoint['use_conv2']
            use_conv3 = checkpoint['use_conv3']
            model = SlimModelCNN_LayerPruned(use_conv2=use_conv2, use_conv3=use_conv3).to(device)
            model.load_state_dict(checkpoint['state_dict'])

        else:
            raise ValueError("Unknown pruning type")
        model.eval()
        # 加载scaler对象
        y_scaler_path = os.path.join(save_dir, 'y_scaler.pkl')
        y_scaler = joblib.load(y_scaler_path)
        X_test, y_test, _ = preprocess_data(test_data, save_dir, is_train=False)
        with torch.no_grad():
            X_test = X_test.to(device)
            # y_pred = model(X_test).cpu().numpy().squeeze()
            y_pred = model(X_test).cpu().numpy()

            # 如果输出是 [batch_size, seq_len, 1] 或 [batch_size, seq_len]，就取最后一个时间步或均值
            if y_pred.ndim == 3:
                y_pred = y_pred[:, -1, 0]  # 取最后一个时间步的输出
            elif y_pred.ndim == 2 and y_pred.shape[1] > 1:
                y_pred = y_pred[:, -1]  # 或者用 y_pred.mean(axis=1)
            else:
                y_pred = y_pred.flatten()

            y_true = y_test.cpu().numpy().squeeze()

            # reshape 成二维再反归一化
            y_pred = y_scaler.inverse_transform(y_pred.reshape(-1, 1)).flatten()
            y_true = y_scaler.inverse_transform(y_true.reshape(-1, 1)).flatten()

            mse = mean_squared_error(y_true, y_pred)
            rmse = np.sqrt(mse)
            mae = mean_absolute_error(y_true, y_pred)
            r2 = r2_score(y_true, y_pred)

            print(f"[Eval Accuracy]")
            print(f"  MSE  = {mse:.6f}")
            print(f"  RMSE = {rmse:.6f}")
            print(f"  MAE  = {mae:.6f}")
            print(f"  R²   = {r2:.6f}")

            visualize_predictions(y_true, y_pred, save_dir, f'eval-{pruning_type}-{amount:.2f}')
            print(f"Sample of Predicted Values: {y_pred[:7]}")
            print(f"Sample of True Values: {y_true[:7]}")

            benchmark_model(model, input_shape=None)
            # 测试推理时间（返回值单位 ms）
            inference_time_ms = benchmark_model(model, input_shape=None)

            df = pd.DataFrame({
                'True': y_true,
                'Pred': y_pred
            })
            df.to_csv(os.path.join(save_dir, f'predictions-{pruning_type}-{amount:.2f}.csv'), index=False)

            # 保存所有剪枝实验的性能指标
            result_log_path = os.path.join(save_dir, "all_eval_results.csv")
            result_row = {
                'pruning_type': pruning_type,
                'amount': round(amount, 2),
                'MSE': mse,
                'RMSE': rmse,
                'MAE': mae,
                'R2': r2,
                'InferenceTime(ms)': round(inference_time_ms, 2)
            }

            # 如果文件存在就追加，否则创建新表头
            if os.path.exists(result_log_path):
                existing_df = pd.read_csv(result_log_path)
                updated_df = pd.concat([existing_df, pd.DataFrame([result_row])], ignore_index=True)
            else:
                updated_df = pd.DataFrame([result_row])

            # 保存结果
            updated_df.to_csv(result_log_path, index=False)
            print(f"[Logged] Evaluation metrics appended to: {result_log_path}")



    else:
        print("Invalid mode: choose 'train' or 'eval'")

########################################循环遍历网格搜索########################################################


if __name__ == "__main__":
    set_seed(42)
    for pruning_type in ['filter']: #'filter', 'channel', 'layer'
        for amount in np.linspace(0.1, 0.5, 10):
            print(f"\n>>> Running: {pruning_type} pruning @ amount={amount:.2f}")
            run_experiment(pruning_type=pruning_type, amount=round(amount, 2), mode='train', use_conv2=True, use_conv3=False)
