import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import pandas as pd
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import joblib
from torch.nn.utils import prune
import time
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

matplotlib.use('Agg')
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


def apply_and_finalize_structured_pruning(model, amount=0.3):
    import torch.nn.utils.prune as prune
    # Step 1: 应用结构化剪枝（按通道）
    prune.ln_structured(model.conv1, name='weight', amount=amount, n=2, dim=0)
    prune.ln_structured(model.conv2, name='weight', amount=amount, n=2, dim=0)
    prune.ln_structured(model.conv3, name='weight', amount=amount, n=2, dim=0)
    print(f"Structured pruning (amount={amount}) applied.")

    # Step 2: 移除掩码，真正减少参数体积
    prune.remove(model.conv1, 'weight')
    prune.remove(model.conv2, 'weight')
    prune.remove(model.conv3, 'weight')
    print("Pruned weights finalized and masks removed (model permanently slimmed).")


def save_model(model, path):
    torch.save(model.state_dict(), path)
    print(f"Model saved to {path}")

def load_model(model, path):
    model.load_state_dict(torch.load(path))
    print(f"Model loaded from {path}")

def load_npy_from_folder(folder_path):
    all_data = []
    for file in os.listdir(folder_path):
        if file.endswith('.npy'):
            data = np.load(os.path.join(folder_path, file))
            all_data.append(data)
    return np.concatenate(all_data, axis=0)


def create_sliding_window(data, window_size=128, step_size=64):
    X, y, indices = [], [], []
    for i in range(0, len(data) - window_size, step_size):
        window = data[i:i + window_size]
        features = window[:, 1:]  # 去掉第一列
        target = window[-1, 0]    # 最后一帧的目标值
        X.append(features)
        y.append(target)
        indices.append(i + window_size - 1)  # 对应原始数据中目标值的位置
    return np.array(X), np.array(y), np.array(indices)


def inverse_transform(scaler, data):
    # **确保形状为二维 (样本数, 特征数)**
    if len(data.shape) == 1:
        data = data.reshape(-1, 1)
    elif len(data.shape) == 2 and data.shape[1] == 1:
        # **扩展维度到与训练时一致 (58201, 128) 或 (batch_size, 128)**
        data = np.tile(data, (1, scaler.scale_.shape[0]))
    return scaler.inverse_transform(data).flatten()


def load_scaler(save_dir):
    scaler_path = os.path.join(save_dir, 'scaler.pkl')
    if os.path.exists(scaler_path):
        scaler = joblib.load(scaler_path)
        print(f"Scaler loaded from {scaler_path}")
        return scaler
    else:
        raise FileNotFoundError("Scaler file not found.")

def preprocess_data(data, save_dir, is_train=True):
    x_scaler_path = os.path.join(save_dir, 'x_scaler.pkl')
    y_scaler_path = os.path.join(save_dir, 'y_scaler.pkl')
    x_scaler = StandardScaler()
    y_scaler = StandardScaler()

    # 生成滑窗数据
    X, y, indices = create_sliding_window(data)

    # X: (samples, window_size, features) → 不转置（模型中自行 permute）
    if is_train:
        # 拟合并保存 scaler
        x_scaler.fit(X.reshape(-1, X.shape[-1]))
        joblib.dump(x_scaler, x_scaler_path)

        y = y.reshape(-1, 1)
        y_scaler.fit(y)
        joblib.dump(y_scaler, y_scaler_path)
    else:
        # 加载已保存的 scaler
        x_scaler = joblib.load(x_scaler_path)
        y_scaler = joblib.load(y_scaler_path)

    # 归一化处理
    X = x_scaler.transform(X.reshape(-1, X.shape[-1])).reshape(X.shape)
    y = y_scaler.transform(y.reshape(-1, 1))  # 保持二维

    # 转换为 tensor
    return (
        torch.tensor(X, dtype=torch.float32),
        torch.tensor(y, dtype=torch.float32),
        y_scaler  # 注意返回的是 y_scaler，而非 x_scaler
    )


def cosine_annealing_scheduler(optimizer, epochs):
    return optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)

def visualize_loss(losses, save_path):
    plt.figure(figsize=(24, 8))
    plt.plot(losses, label='Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss')
    plt.legend()
    plt.savefig(os.path.join(save_path, 'loss_curve-剪枝前.png'))

def visualize_predictions(y_true, y_pred, save_path, mode):
    plt.figure(figsize=(24, 8))

    # 确保是一维数组
    y_true = np.array(y_true).flatten()
    y_pred = np.array(y_pred).flatten()

    # 横坐标为样本编号
    x = np.arange(len(y_true))

    plt.plot(x, y_true,color='blue', label='True Values', alpha=0.6)
    plt.plot(x, y_pred,color='red', label='Predicted Values', alpha=0.6)
    plt.xlabel('Sample')
    plt.ylabel('Value')
    plt.title(f'Prediction vs True ({mode})')
    plt.legend()
    plt.tight_layout()
    plt.savefig(os.path.join(save_path, f'{mode}_scatter_predictions.png'))
    plt.close()



# 获取保留通道索引
def round_up_multiple(x, base=16):
    return base * ((x + base - 1) // base)

#########################卷积核剪枝####################################
def get_kept_channels(weight_tensor, amount=0.3, n=2, dim=0):
    norms = weight_tensor.norm(p=n, dim=tuple(i for i in range(weight_tensor.dim()) if i != dim))
    num_prune = int(amount * weight_tensor.shape[dim])
    _, prune_idx = torch.topk(norms, k=num_prune, largest=False)
    all_idx = set(range(weight_tensor.shape[dim]))
    keep_idx = sorted(list(all_idx - set(prune_idx.cpu().numpy())))
    #保证输出通道数是 16 的倍数
    target_len = round_up_multiple(len(keep_idx), 16)
    if len(keep_idx) > target_len:
        keep_idx = keep_idx[:target_len]
    elif len(keep_idx) < target_len:
        # 如果不够，重复已有的索引填满
        keep_idx += keep_idx[:(target_len - len(keep_idx))]

    return keep_idx


# 小模型结构
class SlimModelCNN(nn.Module):
    def __init__(self, ch1, ch2):
        super(SlimModelCNN, self).__init__()
        self.conv1 = nn.Conv1d(15, ch1, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool1d(2)
        self.conv2 = nn.Conv1d(ch1, ch2, kernel_size=3, padding=1)
        self.pool2 = nn.MaxPool1d(2)
        self.conv3 = nn.Conv1d(ch2, 1, kernel_size=3, padding=1)
        self.conv4 = nn.Conv1d(32, 1, kernel_size=1)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.permute(0, 2, 1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = x.permute(0, 2, 1)
        x = self.conv4(x)
        x = x.squeeze(1)
        return x


# 权重迁移函数
def transfer_weights_model_cnn(original_model, slim_model, idx1, idx2):
    with torch.no_grad():
        slim_model.conv1.weight.copy_(original_model.conv1.weight[idx1])
        slim_model.conv1.bias.copy_(original_model.conv1.bias[idx1])

        slim_model.conv2.weight.copy_(original_model.conv2.weight[idx2][:, idx1])
        slim_model.conv2.bias.copy_(original_model.conv2.bias[idx2])

        with torch.no_grad():
            # 注意：idx2 是 conv2 的保留输出通道索引 → conv3 的输入通道
            slim_model.conv3.weight.copy_(original_model.conv3.weight[:, idx2])
            slim_model.conv3.bias.copy_(original_model.conv3.bias)

        slim_model.conv4.load_state_dict(original_model.conv4.state_dict())


def build_and_save_slim_model_cnn(model, save_path, amount=0.3):
    idx1 = get_kept_channels(model.conv1.weight, amount)
    idx2 = get_kept_channels(model.conv2.weight, amount)

    slim_model = SlimModelCNN(len(idx1), len(idx2)).to(model.conv1.weight.device)
    transfer_weights_model_cnn(model, slim_model, idx1, idx2)

    torch.save(slim_model.state_dict(), save_path)
    print(f"Slim model saved to {save_path}")
    return slim_model, idx1, idx2

#########################卷积核剪枝####################################

###########################通道剪枝####################################

def get_kept_input_channels(weight_tensor, amount=0.3, n=2):
    # 计算每个输入通道的 L2 范数，dim=(0,2)：output通道 + 卷积核尺寸
    norms = weight_tensor.norm(p=n, dim=(0, 2))
    num_prune = int(amount * weight_tensor.shape[1])
    _, prune_idx = torch.topk(norms, k=num_prune, largest=False)
    all_idx = set(range(weight_tensor.shape[1]))
    keep_idx = sorted(list(all_idx - set(prune_idx.cpu().numpy())))
    return keep_idx

class SlimModelCNN_ChannelPruned(nn.Module):
    def __init__(self, ch1, kept_input_idx_conv2):
        super(SlimModelCNN_ChannelPruned, self).__init__()
        self.kept_idx = kept_input_idx_conv2
        self.conv1 = nn.Conv1d(15, ch1, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool1d(2)
        self.conv2 = nn.Conv1d(len(kept_input_idx_conv2), 8, kernel_size=3, padding=1)
        self.pool2 = nn.MaxPool1d(2)
        self.conv3 = nn.Conv1d(8, 1, kernel_size=3, padding=1)
        self.conv4 = nn.Conv1d(32, 1, kernel_size=1)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.permute(0, 2, 1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool1(x)
        x = x[:, self.kept_idx, :]  # 选取被保留的通道（关键点！）
        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = x.permute(0, 2, 1)
        x = self.conv4(x)
        x = x.squeeze(1)
        return x

def transfer_weights_channel_pruned(model, slim_model, kept_input_idx):
    with torch.no_grad():
        slim_model.conv1.load_state_dict(model.conv1.state_dict())
        slim_model.conv2.weight.copy_(model.conv2.weight[:, kept_input_idx, :])
        slim_model.conv2.bias.copy_(model.conv2.bias)
        slim_model.conv3.load_state_dict(model.conv3.state_dict())
        slim_model.conv4.load_state_dict(model.conv4.state_dict())

def build_and_save_channel_pruned_model(model, save_path, amount=0.3):
    kept_idx = get_kept_input_channels(model.conv2.weight, amount)
    ch1 = model.conv1.out_channels  # 输入通道不变
    slim_model = SlimModelCNN_ChannelPruned(ch1, kept_idx).to(model.conv1.weight.device)
    transfer_weights_channel_pruned(model, slim_model, kept_idx)

    torch.save(slim_model.state_dict(), save_path)
    print(f"Channel-pruned model saved to {save_path}")
    return slim_model, kept_idx


###########################通道剪枝####################################

class spatial_attention(nn.Module):
    ''' 空间注意力 '''

    def __init__(self,spatial_dim,ratio):
        super(spatial_attention, self).__init__()
        self.spatial_dim = spatial_dim
        self.fc = nn.Sequential(
            nn.Linear(spatial_dim, spatial_dim//ratio, False),
            nn.ReLU(),
            nn.Linear(spatial_dim//ratio, spatial_dim, False)
        )
        self.max_pool = nn.AdaptiveMaxPool1d(1)
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        b, h, w = x.size()
        max_pool_out = self.max_pool(x).view([b, h])
        avg_pool_out = self.avg_pool(x).view([b, h])

        max_fc_out = self.fc(max_pool_out)
        avg_fc_out = self.fc(avg_pool_out)

        out = max_fc_out + avg_fc_out
        return out


class temporal_attention(nn.Module):
    ''' 时间注意力 '''

    def __init__(self,temporal_dim,ratio):
        super(temporal_attention, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(temporal_dim, temporal_dim//ratio, False),
            nn.ReLU(),
            nn.Linear(temporal_dim//ratio, temporal_dim, False)
        )

    def forward(self, x):
        out = self.fc(x)
        return out


class spatial_temporal_attention(nn.Module):
    ''' 时空注意力 '''
    def __init__(self,spatial_dim,temporal_dim,ratio=0.5):
        super(spatial_temporal_attention, self).__init__()
        self.spatial_dim = spatial_dim
        self.temporal_dim = temporal_dim
        self.ratio = ratio
        self.spatial_attention = spatial_attention(self.spatial_dim,ratio)
        self.temporal_attention = temporal_attention(self.temporal_dim,ratio)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        b, h, w = x.size()
        spatial_attention_out = self.spatial_attention(x)
        spatial_attention_out = spatial_attention_out.unsqueeze(2).expand(b, h, w)  # 将空间注意力进行复制，使其维度与空间注意力相同，便于两个注意力相加
        temporal_attention_out = self.temporal_attention(x)
        # spatial_temporal_attention = spatial_attention_out + temporal_attention_out  # 时间注意力与空间注意力相加
        spatial_temporal_attention = temporal_attention_out  #仅使用时间注意力
        spatial_temporal_attention = self.sigmoid(spatial_temporal_attention)
        out = torch.clamp(spatial_temporal_attention, 0.01)  # 注意力稀疏化
        out = x * out
        return out




class se_block(nn.Module):
    def __init__(self, channel, ratio=5):
        super(se_block, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
                nn.Linear(channel, channel // ratio, bias=False),
                nn.ReLU(inplace=True),
                nn.Linear(channel // ratio, channel, bias=False),
                nn.Sigmoid()
        )

    def forward(self, x):
        b, h, w = x.size()
        y = self.avg_pool(x).view(b, h)
        y = self.fc(y).view(b, h, 1)
        return x * y

class eca_block(nn.Module):
    def __init__(self, channel, b=1, gamma=2):
        super(eca_block, self).__init__()
        kernel_size = int(abs((math.log(channel, 2) + b) / gamma))
        kernel_size = kernel_size if kernel_size % 2 else kernel_size + 1

        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)
        self.sigmoid = nn.Sigmoid()
    def forward(self, x):
        y = self.avg_pool(x)
        y = y.permute(0,2,1)
        y = self.conv(y)
        y = self.sigmoid(y)
        y = y.permute(0,2,1)
        y = torch.clamp(y,min=0.01)
        return x * y.expand_as(x)

class model_cnn(nn.Module):
    def __init__(self):
        super(model_cnn, self).__init__()
        self.sta = spatial_temporal_attention(spatial_dim=32, temporal_dim=18, ratio=5)
        self.se = se_block(32)
        self.eca = eca_block(32)
        self.conv1 = nn.Conv1d(in_channels=15, out_channels=16, kernel_size=3, stride=1, padding=1)
        self.pool1 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv1d(in_channels=16, out_channels=8, kernel_size=3, stride=1, padding=1)
        self.pool2 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv1d(in_channels=8, out_channels=1, kernel_size=3, stride=1, padding=1)
        self.conv4 = nn.Conv1d(in_channels=32, out_channels=1, kernel_size=1, stride=1)  # ← 修改这行
        self.relu = nn.ReLU()
    def forward(self, x):
        # x: [batch, 128, 15]
        x = x.permute(0, 2, 1)  # [batch, 15, 128]
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = x.permute(0,2,1)
        x = self.conv4(x)
        x = x.squeeze(1)
        return x
def benchmark_model(model, input_shape, repeat=100):
    model.eval()
    # 注意：输入应该是 [batch, seq_len, features] → [1, 128, 15]
    x = torch.randn(1, 128, model.conv1.in_channels).to(device)
    with torch.no_grad():
        start = time.time()
        for _ in range(repeat):
            _ = model(x)
        end = time.time()
    print(f"Avg inference time over {repeat} runs: {(end - start)/repeat*1000:.2f} ms")


def train_model(model, train_loader, criterion, optimizer, scheduler, y_scaler, save_dir, epochs=20):
    model.train()
    losses = []
    for epoch in range(epochs):
        running_loss = 0.0
        for inputs, targets in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{epochs}"):
            inputs, targets = inputs.to(device), targets.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        scheduler.step()
        avg_loss = running_loss / len(train_loader)
        losses.append(avg_loss)
        print(f"Epoch [{epoch + 1}/{epochs}] - Loss: {avg_loss:.6f}")

    # 验证部分
    model.eval()
    all_preds = []
    all_targets = []
    with torch.no_grad():
        for inputs, targets in train_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs).cpu().numpy().squeeze()
            all_preds.extend(outputs)
            all_targets.extend(targets.cpu().numpy().squeeze())

        # 确保是二维传入 scaler
        all_preds = np.array(all_preds).reshape(-1, 1)
        all_targets = np.array(all_targets).reshape(-1, 1)

        # 正确使用 y_scaler
        all_preds = y_scaler.inverse_transform(all_preds).flatten()
        all_targets = y_scaler.inverse_transform(all_targets).flatten()

        mse = mean_squared_error(all_targets, all_preds)
        rmse = np.sqrt(mse)
        mae = mean_absolute_error(all_targets, all_preds)
        r2 = r2_score(all_targets, all_preds)

        print(f"[Train Accuracy]")
        print(f"  MSE  = {mse:.6f}")
        print(f"  RMSE = {rmse:.6f}")
        print(f"  MAE  = {mae:.6f}")
        print(f"  R²   = {r2:.6f}")

        visualize_predictions(all_targets, all_preds, save_dir, 'train-剪枝前')
        print(f"Sample of Predicted Values (train): {all_preds[:7]}")
        print(f"Sample of True Values (train): {all_targets[:7]}")

    return losses



def main():
    save_dir = "/home/scuee_user06/myh/轻量化/code/model_pruning-512"
    os.makedirs(save_dir, exist_ok=True)

    batch_size = 128
    learning_rate = 0.005
    epochs = 512
    mode = 'train'  # 模式选择：'train' 或 'eval'
    amount = 0.0
    pruning_type = 'channel'  # 'filter' or 'channel'

    train_data = load_npy_from_folder("/home/scuee_user06/myh/轻量化/飞行数据/processed_train-new")
    test_data = load_npy_from_folder("/home/scuee_user06/myh/轻量化/飞行数据/processed_test-new")
    # 训练阶段
    if mode == 'train':
        X_train, y_train, y_scaler = preprocess_data(train_data, save_dir, is_train=True)
        X_test, y_test, _ = preprocess_data(test_data, save_dir, is_train=False)

        model = model_cnn().to(device)
        apply_and_finalize_structured_pruning(model, amount=amount)
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        criterion = nn.MSELoss()
        scheduler = cosine_annealing_scheduler(optimizer, epochs=epochs)
        train_loader = DataLoader(data.TensorDataset(X_train, y_train), batch_size=batch_size, shuffle=True)

        losses = train_model(model, train_loader, criterion, optimizer, scheduler, y_scaler, save_dir, epochs=epochs)
        torch.save(model.state_dict(), os.path.join(save_dir, "cnn_model-512-剪枝前.pth"))
        total_params = sum(p.numel() for p in model.parameters())
        print(f"Final pruned model has {total_params:,} parameters")

        if pruning_type == 'filter':
            slim_model, idx1, idx2 = build_and_save_slim_model_cnn(model, os.path.join(save_dir, "cnn_model_slim.pth"),
                                                                   amount=amount)
            np.save(os.path.join(save_dir, 'idx1.npy'), np.array(idx1))
            np.save(os.path.join(save_dir, 'idx2.npy'), np.array(idx2))
        elif pruning_type == 'channel':
            slim_model, kept_input_idx = build_and_save_channel_pruned_model(model, os.path.join(save_dir,
                                                                                                 "cnn_model_channel_pruned.pth"),
                                                                             amount=amount)
            np.save(os.path.join(save_dir, 'kept_input_idx.npy'), np.array(kept_input_idx))

        visualize_loss(losses, save_dir)
        benchmark_model(model, input_shape=None)  # input_shape 参数现在不再需要传


    elif mode == 'eval':
        if amount == 0.0:
            # 不剪枝：加载完整模型
            model = model_cnn().to(device)
            load_model(model, os.path.join(save_dir, "cnn_model-512-剪枝前.pth"))
        elif pruning_type == 'filter':
            # 加载 filter-pruned 模型
            idx1 = np.load(os.path.join(save_dir, 'idx1.npy'))
            idx2 = np.load(os.path.join(save_dir, 'idx2.npy'))
            model = SlimModelCNN(len(idx1), len(idx2)).to(device)
            load_model(model, os.path.join(save_dir, "cnn_model_slim.pth"))
        elif pruning_type == 'channel':
            # 加载 channel-pruned 模型
            kept_input_idx = np.load(os.path.join(save_dir, 'kept_input_idx.npy')).tolist()
            model = SlimModelCNN_ChannelPruned(16, kept_input_idx).to(device)
            load_model(model, os.path.join(save_dir, "cnn_model_channel_pruned.pth"))
        else:
            raise ValueError("Unknown pruning type")
        model.eval()
        # 加载scaler对象
        y_scaler_path = os.path.join(save_dir, 'y_scaler.pkl')
        y_scaler = joblib.load(y_scaler_path)
        X_test, y_test, _ = preprocess_data(test_data, save_dir, is_train=False)
        with torch.no_grad():
            X_test = X_test.to(device)
            y_pred = model(X_test).cpu().numpy().squeeze()
            y_true = y_test.cpu().numpy().squeeze()

            # reshape 成二维再反归一化
            y_pred = y_scaler.inverse_transform(y_pred.reshape(-1, 1)).flatten()
            y_true = y_scaler.inverse_transform(y_true.reshape(-1, 1)).flatten()

            mse = mean_squared_error(y_true, y_pred)
            rmse = np.sqrt(mse)
            mae = mean_absolute_error(y_true, y_pred)
            r2 = r2_score(y_true, y_pred)

            print(f"[Eval Accuracy]")
            print(f"  MSE  = {mse:.6f}")
            print(f"  RMSE = {rmse:.6f}")
            print(f"  MAE  = {mae:.6f}")
            print(f"  R²   = {r2:.6f}")

            visualize_predictions(y_true, y_pred, save_dir, 'eval-剪枝前')
            print(f"Sample of Predicted Values: {y_pred[:7]}")
            print(f"Sample of True Values: {y_true[:7]}")

            benchmark_model(model, input_shape=None)

            df = pd.DataFrame({
                'True': y_true,
                'Pred': y_pred
            })
            df.to_csv(os.path.join(save_dir, 'predictions_剪枝前.csv'), index=False)


    else:
        print("Invalid mode: choose 'train' or 'eval'")


if __name__ == "__main__":
    set_seed(42)
    main()
