from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
import os
import numpy  as np
from keras.models import Sequential

# --- 1. 四元数工具函数 ---
def quat_multiply(q1, q2):
    """四元数乘法 (支持批量计算)"""
    w1, x1, y1, z1 = q1[..., 0], q1[..., 1], q1[..., 2], q1[..., 3]
    w2, x2, y2, z2 = q2[..., 0], q2[..., 1], q2[..., 2], q2[..., 3]
    w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
    x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
    y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
    z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2
    return np.stack([w, x, y, z], axis=-1)


def quat_inverse(q):
    """四元数共轭 (支持批量计算)"""
    q_inv = q.copy()
    q_inv[..., 1:] *= -1  # 只对虚部取反
    return q_inv


def quat_normalize(q):
    """四元数归一化 (支持批量计算)"""
    norm = np.linalg.norm(q, axis=-1, keepdims=True)
    return np.divide(q, norm, out=np.zeros_like(q), where=norm != 0)


def quat_to_euler(q):
    """四元数转欧拉角 (Roll, Pitch, Yaw - ZYX顺序) (支持批量计算)"""
    w, x, y, z = q[..., 0], q[..., 1], q[..., 2], q[..., 3]
    # 计算横滚角 (Roll)
    sinr_cosp = 2 * (w * x + y * z)
    cosr_cosp = 1 - 2 * (x * x + y * y)
    roll = np.arctan2(sinr_cosp, cosr_cosp)

    # 计算俯仰角 (Pitch)
    sinp = 2 * (w * y - z * x)
    pitch = np.where(np.abs(sinp) >= 1, np.sign(sinp) * np.pi / 2, np.arcsin(sinp))

    # 计算偏航角 (Yaw)
    siny_cosp = 2 * (w * z + x * y)
    cosy_cosp = 1 - 2 * (y * y + z * z)
    yaw = np.arctan2(siny_cosp, cosy_cosp)

    return np.stack([roll, pitch, yaw], axis=-1)


def quat_mean(q_array):
    """计算四元数数组的平均值 (Karcher mean近似算法)"""
    # 简单平均后归一化，适用于变化不大的情况
    q_mean = np.mean(q_array, axis=0)
    return quat_normalize(q_mean.reshape(1, -1)).flatten()


def quat_cosine_similarity(q1, q2):
    """计算两个四元数之间的余弦相似度"""
    # 四元数q和-q表示相同旋转，取绝对值确保相似度正确
    dot_product = np.abs(np.dot(q1, q2))
    # 由于四元数已归一化，分母为1
    return dot_product


# --- 2. 动态分割算法 ---
def adaptive_segmentation(Q, window_size=10, change_threshold_percentile=85, sim_threshold=0.99, min_segment_length=10):
    """
    基于欧拉角变化率和四元数相似度的自适应分割算法
    参数:
        Q: 四元数序列 (N×4)
        window_size: 滑动窗口大小
        change_threshold_percentile: 变化率阈值百分位
        sim_threshold: 相似度阈值
        min_segment_length: 最小片段长度
    返回:
        分割后的索引列表
    """
    # 如果序列太短，直接返回整个序列
    if Q.shape[0] < min_segment_length:
        return [np.arange(Q.shape[0])]

    # 转换为欧拉角并计算差分
    euler = quat_to_euler(Q)
    diff_euler = np.diff(euler, axis=0, prepend=euler[0:1])
    diff_norm = np.linalg.norm(diff_euler, axis=1)

    # 计算滑动窗口平均变化率
    if len(diff_norm) >= window_size:
        local_mean_change = np.convolve(diff_norm, np.ones(window_size) / window_size, mode='valid')
        # 边缘填充保持长度一致
        local_mean_change_padded = np.pad(local_mean_change,
                                          (window_size // 2, window_size - window_size // 2 - 1),
                                          mode='edge')
    else:
        local_mean_change_padded = diff_norm

    # 计算变化率阈值
    change_threshold = np.percentile(local_mean_change_padded, change_threshold_percentile)

    # 开始分割
    segments = []
    start_idx = 0
    prev_segment_mean_q = None  # 存储上一个片段的平均四元数

    for i in range(min_segment_length, Q.shape[0]):
        is_split_candidate = False

        # 条件1：基于变化率阈值
        if local_mean_change_padded[i] > change_threshold:
            is_split_candidate = True

        # 条件2：基于余弦相似度
        if not is_split_candidate and prev_segment_mean_q is not None and (i - start_idx) >= min_segment_length:
            current_segment_q = Q[start_idx:i + 1]
            current_mean_q = quat_mean(current_segment_q)
            similarity = quat_cosine_similarity(prev_segment_mean_q, current_mean_q)
            # 如果相似度过高，强制分割以避免过长的相似片段
            if similarity > sim_threshold:
                is_split_candidate = True

        # 如果满足分割条件且长度足够
        if is_split_candidate and (i - start_idx) >= min_segment_length:
            segments.append(np.arange(start_idx, i))
            start_idx = i
            # 更新上一个片段的平均四元数
            if start_idx > 0:
                prev_seg_q = Q[start_idx - min_segment_length:start_idx]
                prev_segment_mean_q = quat_mean(prev_seg_q)
            else:
                prev_segment_mean_q = None

    # 添加最后一个片段
    if start_idx < Q.shape[0]:
        segments.append(np.arange(start_idx, Q.shape[0]))

    return segments


# --- 3. 特征提取 ---
def extract_features(Q_segment):
    """为单个动作片段提取特征
    参数:
        Q_segment: 单个动作片段的四元数序列
    返回:
        特征向量 (12维: 时域特征 + 频域特征)
    """
    # 如果片段太短，返回零向量
    if Q_segment.shape[0] < 2:
        return np.zeros(12)  # 3 (mean_euler) + 3 (std_euler) + 3 (mean_diff_euler) + 3 (std_diff_euler)

    # --- 时域特征 ---
    # 转换为欧拉角
    euler = quat_to_euler(Q_segment)

    # 计算统计特征
    mean_euler = np.mean(euler, axis=0)  # 欧拉角均值
    std_euler = np.std(euler, axis=0)  # 欧拉角标准差

    # 计算差分特征
    diff_euler = np.diff(euler, axis=0)
    mean_diff_euler = np.mean(diff_euler, axis=0)  # 差分均值
    std_diff_euler = np.std(diff_euler, axis=0)  # 差分标准差

    # 组合时域特征
    time_features = np.concatenate([mean_euler, std_euler, mean_diff_euler, std_diff_euler])

    # --- 频域特征 ---
    # 对欧拉角进行FFT变换
    fft_vals = np.fft.fft(euler, axis=0)
    magnitude_spectrum = np.abs(fft_vals[:euler.shape[0] // 2, :])  # 取前半部分频谱

    # 计算频域统计特征
    mean_freq = np.mean(magnitude_spectrum, axis=0)  # 频谱均值
    std_freq = np.std(magnitude_spectrum, axis=0)  # 频谱标准差

    # 组合频域特征
    freq_features = np.concatenate([mean_freq, std_freq])

    # --- 组合所有特征 ---
    features = np.concatenate([time_features, freq_features])
    return features


# --- 4. 数据加载与预处理 ---
def load_and_preprocess_data_with_segmentation(data):
    """加载数据并进行预处理(包含动态分割)
    参数:
        data: 原始数据 (包含四元数和标签)
    返回:
        特征矩阵和标签向量
    """
    print("开始数据预处理和动态分割...")

    # 检查数据维度
    if data.ndim != 2:
        raise ValueError("输入数据必须是2维数组")

    # 检查是否有足够的列
    if data.shape[1] < 5:
        print(f"警告: 数据只有 {data.shape[1]} 列，但需要至少5列(4列四元数+1列标签)。使用模拟数据。")
        data = generate_sample_data(num_samples_per_class=50)

    Q_full = data[:, :4]  # 前4列是四元数 (w,x,y,z)
    labels_full = data[:, 4].astype(int)  # 第5列是动作标签

    segmented_features = []
    segmented_labels = []

    # 获取所有唯一的动作标签
    unique_labels = np.unique(labels_full)
    print(f"发现 {len(unique_labels)} 个动作类别: {unique_labels}")

    # 对每个动作类别单独处理
    for label in unique_labels:
        # 获取当前类别的所有数据点索引
        label_indices = np.where(labels_full == label)[0]

        if len(label_indices) == 0:
            continue

        # 提取当前类别的连续数据块
        start_idx = label_indices[0]
        end_idx = label_indices[-1] + 1
        Q_block = Q_full[start_idx:end_idx]

        # 应用动态分割算法
        segments = adaptive_segmentation(Q_block,
                                         window_size=15,
                                         change_threshold_percentile=80,
                                         sim_threshold=0.995,
                                         min_segment_length=15)
        print(f"  类别 {label}: 原始长度 {len(Q_block)}, 分割为 {len(segments)} 个片段")

        # 对每个片段提取特征
        for seg_indices in segments:
            if len(seg_indices) >= 2:  # 确保片段有足够长度
                Q_segment = Q_block[seg_indices]
                try:
                    feature_vector = extract_features(Q_segment)
                    segmented_features.append(feature_vector)
                    segmented_labels.append(label)
                except Exception as e:
                    print(f"  警告: 提取特征时出错 (片段长度 {len(seg_indices)}): {e}")

    # 转换为numpy数组
    segmented_features = np.array(segmented_features)
    segmented_labels = np.array(segmented_labels)

    if segmented_features.size == 0:
        raise ValueError("预处理后没有生成任何特征样本。")

    print(f"预处理完成。共生成 {segmented_features.shape[0]} 个特征样本")
    return segmented_features, segmented_labels


# --- 5. LSTM模型构建 ---
# 改进后的ConvLSTM模型构建函数
def create_convlstm_model(input_shape, num_classes, class_weights=None):
    """创建ConvLSTM模型
    参数:
        input_shape: 输入形状 (timesteps, height, width, channels)
        num_classes: 类别数量
        class_weights: 类别权重字典
    返回:
        编译好的Keras模型
    """
    from keras.layers import ConvLSTM2D, Flatten, Reshape, TimeDistributed

    # 重塑输入形状为(序列长度, 高度, 宽度, 通道数)
    # 假设我们将特征重塑为2D网格
    feature_dim = int(np.sqrt(input_shape[1]))  # 计算近似的方形尺寸
    if feature_dim * feature_dim < input_shape[1]:
        feature_dim += 1
    padding = feature_dim * feature_dim - input_shape[1]

    model = Sequential([
        # 重塑层：将特征转换为2D网格形式
        Reshape((1, feature_dim, feature_dim, 1), input_shape=(input_shape[0], input_shape[1])),

        # 第一层ConvLSTM
        ConvLSTM2D(filters=64, kernel_size=(3, 3),
                   padding='same', return_sequences=True,
                   activation='tanh', recurrent_activation='hard_sigmoid'),
        Dropout(0.3),

        # 第二层ConvLSTM
        ConvLSTM2D(filters=32, kernel_size=(3, 3),
                   padding='same', return_sequences=False),
        Dropout(0.3),

        # 展平层
        Flatten(),

        # 全连接层
        Dense(256, activation='relu'),
        Dropout(0.4),

        # 输出层
        Dense(num_classes, activation='softmax')
    ])

    # 编译模型
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'],
                  weighted_metrics=['accuracy'] if class_weights else None)

    return model


# 修改后的训练评估函数
def train_evaluate_and_cv(X, y, class_names, n_splits=5):
    """执行模型训练、评估和交叉验证（改进版）"""
    print("\n开始模型训练、评估和交叉验证...")

    # 数据划分
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, stratify=y, random_state=42)

    # 特征标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)

    # 计算类别权重
    from sklearn.utils.class_weight import compute_class_weight
    unique_classes = np.unique(y_train)
    class_weights = compute_class_weight('balanced', classes=unique_classes, y=y_train)
    class_weight_dict = dict(zip(unique_classes, class_weights))
    print(f"类别权重: {class_weight_dict}")

    # 数据重塑供ConvLSTM使用
    # 计算特征重塑后的维度
    feature_dim = int(np.sqrt(X_train_scaled.shape[1]))
    if feature_dim * feature_dim < X_train_scaled.shape[1]:
        feature_dim += 1
    padding = feature_dim * feature_dim - X_train_scaled.shape[1]

    # 添加padding使特征数适合方形
    X_train_padded = np.pad(X_train_scaled, ((0, 0), (0, padding)), 'constant')
    X_test_padded = np.pad(X_test_scaled, ((0, 0), (0, padding)), 'constant')

    # 重塑为(样本数, 时间步长, 高度, 宽度, 通道数)
    X_train_lstm = X_train_padded.reshape((X_train_padded.shape[0], 1, feature_dim, feature_dim, 1))
    X_test_lstm = X_test_padded.reshape((X_test_padded.shape[0], 1, feature_dim, feature_dim, 1))
    input_shape = (X_train_lstm.shape[1], X_train_lstm.shape[2], X_train_lstm.shape[3], X_train_lstm.shape[4])

    num_classes = len(class_names)

    # 模型训练
    model = create_convlstm_model(input_shape, num_classes, class_weight_dict)

    early_stop = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)

    print("训练ConvLSTM模型...")
    history = model.fit(X_train_lstm, y_train,
                        validation_data=(X_test_lstm, y_test),
                        epochs=100,
                        batch_size=32,
                        callbacks=[early_stop],
                        class_weight=class_weight_dict,
                        verbose=1)

    # 模型评估
    y_pred_probs = model.predict(X_test_lstm)
    y_pred = np.argmax(y_pred_probs, axis=1)

    print("\n--- 分类报告 ---")
    print(classification_report(y_test, y_pred, target_names=class_names))

    # 交叉验证
    print(f"\n执行 {n_splits} 折交叉验证...")

    def create_model_for_cv():
        """创建用于交叉验证的模型"""
        cv_model = Sequential([
            LSTM(256, return_sequences=True, input_shape=(1, X.shape[1])),
            Dropout(0.4),
            LSTM(128),
            Dropout(0.3),
            Dense(64, activation='relu'),
            Dropout(0.2),
            Dense(num_classes, activation='softmax')
        ])
        cv_model.compile(optimizer='adam',
                         loss='sparse_categorical_crossentropy',
                         metrics=['accuracy'])
        return cv_model

    scaler_cv = StandardScaler()
    X_scaled_cv = scaler_cv.fit_transform(X)
    X_scaled_cv = X_scaled_cv.reshape((X_scaled_cv.shape[0], 1, X_scaled_cv.shape[1]))

    keras_model_wrapper = KerasClassifier(build_fn=create_model_for_cv,
                                          epochs=50,
                                          batch_size=32,
                                          class_weight=class_weight_dict,
                                          verbose=0)

    cv_scores = cross_val_score(keras_model_wrapper, X_scaled_cv, y,
                                cv=StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42),
                                scoring='accuracy')

    print(f"\n{n_splits} 折交叉验证准确率分数: {cv_scores}")
    print(f"平均准确率: {cv_scores.mean():.4f} (± {cv_scores.std() * 2:.4f})")

    plot_results(history, y_test, y_pred, class_names, cv_scores)

    return model, history, cv_scores

# --- 6. 模型训练与评估 ---
import tensorflow as tf



def create_lstm_model(input_shape, num_classes):
    """
    创建一个简单的LSTM模型用于分类任务。

    参数:
        input_shape (tuple): 输入数据的形状，例如 (timesteps, features)。
        num_classes (int): 分类任务的类别总数。

    返回:
        model (tf.keras.Model): 编译后的Keras模型实例。
    """
    model = Sequential()

    # 添加LSTM层，返回序列最后一个输出
    # 注意：input_shape 不包含 batch_size 维度
    model.add(LSTM(128, input_shape=input_shape, return_sequences=False))

    # 可选：添加Dropout层防止过拟合
    model.add(Dropout(0.5))

    # 添加全连接层输出类别概率
    # 使用 softmax 激活函数进行多类分类
    model.add(Dense(num_classes, activation='softmax'))

    # 编译模型
    # optimizer, loss, metrics 可根据需要调整
    model.compile(
        optimizer='adam',
        loss='sparse_categorical_crossentropy',  # 适用于整数标签
        metrics=['accuracy']
    )

    return model


# --- 示例：如何在 train_evaluate_and_cv 函数中调用 ---
# 假设 X_features.shape 为 (samples, timesteps, features)
# input_shape = (X_features.shape[1], X_features.shape[2])
# num_classes = len(class_names)
# model = create_lstm_model(input_shape, num_classes)
# model.summary() # 可用于查看模型结构


def train_evaluate_and_cv(X, y, class_names, n_splits=5):
    """执行模型训练、评估和交叉验证
    参数:
        X: 特征矩阵
        y: 标签向量
        class_names: 类别名称列表
        n_splits: 交叉验证折数
    返回:
        训练好的模型、训练历史和交叉验证分数
    """
    print("\n开始模型训练、评估和交叉验证...")

    # --- 数据划分 ---
    # 按8:2划分训练集和测试集，保持类别分布
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, stratify=y, random_state=42)

    # --- 特征标准化 ---
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)  # 拟合训练集并转换
    X_test_scaled = scaler.transform(X_test)  # 用训练集的参数转换测试集

    # --- 数据重塑供LSTM使用 ---
    # LSTM需要3D输入: (样本数, 时间步长, 特征数)
    # 这里我们把每个样本看作1个时间步
    X_train_lstm = X_train_scaled.reshape((X_train_scaled.shape[0], 1, X_train_scaled.shape[1]))
    X_test_lstm = X_test_scaled.reshape((X_test_scaled.shape[0], 1, X_test_scaled.shape[1]))
    input_shape = (X_train_lstm.shape[1], X_train_lstm.shape[2])

    # 获取类别数量
    num_classes = len(class_names)

    # --- 模型训练 ---
    model = create_lstm_model(input_shape, num_classes)

    # 早停策略: 当验证损失10轮不下降时停止训练，并恢复最佳权重
    early_stop = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)

    print("训练LSTM模型...")
    history = model.fit(X_train_lstm, y_train,
                        validation_data=(X_test_lstm, y_test),
                        epochs=100,  # 最大训练轮数
                        batch_size=32,  # 批大小
                        callbacks=[early_stop],
                        verbose=1)

    # --- 模型评估 ---
    y_pred_probs = model.predict(X_test_lstm)
    y_pred = np.argmax(y_pred_probs, axis=1)  # 取概率最大的类别作为预测结果

    print("\n--- 分类报告 ---")
    print(classification_report(y_test, y_pred, target_names=class_names))

    # --- 交叉验证 ---
    print(f"\n执行 {n_splits} 折交叉验证...")

    def create_model_for_cv():
        """创建用于交叉验证的简单DNN模型"""
        cv_model = Sequential([
            Dense(64, activation='relu', input_shape=(X.shape[1],)),
            Dropout(0.3),
            Dense(32, activation='relu'),
            Dropout(0.3),
            Dense(num_classes, activation='softmax')
        ])
        cv_model.compile(optimizer='adam',
                         loss='sparse_categorical_crossentropy',
                         metrics=['accuracy'])
        return cv_model

    # 对完整数据进行标准化
    scaler_cv = StandardScaler()
    X_scaled_cv = scaler_cv.fit_transform(X)

    # 创建Keras分类器包装器
    keras_model_wrapper = KerasClassifier(build_fn=create_model_for_cv,
                                          epochs=50,
                                          batch_size=32,
                                          verbose=0)

    # 执行分层K折交叉验证
    cv_scores = cross_val_score(keras_model_wrapper, X_scaled_cv, y,
                                cv=StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42),
                                scoring='accuracy')

    print(f"\n{n_splits} 折交叉验证准确率分数: {cv_scores}")
    print(f"平均准确率: {cv_scores.mean():.4f} (± {cv_scores.std() * 2:.4f})")

    # --- 可视化结果 ---
    plot_results(history, y_test, y_pred, class_names, cv_scores)

    return model, history, cv_scores


def plot_results(history, y_true, y_pred, class_names, cv_scores):
    """绘制训练结果和评估指标
    参数:
        history: 训练历史对象
        y_true: 真实标签
        y_pred: 预测标签
        class_names: 类别名称
        cv_scores: 交叉验证分数
    """
    fig, axes = plt.subplots(2, 2, figsize=(15, 12))

    # 1. 训练和验证准确率曲线
    axes[0, 0].plot(history.history['accuracy'], label='训练准确率')
    axes[0, 0].plot(history.history['val_accuracy'], label='验证准确率')
    axes[0, 0].set_title('模型准确率')
    axes[0, 0].set_xlabel('训练轮次')
    axes[0, 0].set_ylabel('准确率')
    axes[0, 0].legend()

    # 2. 训练和验证损失曲线
    axes[0, 1].plot(history.history['loss'], label='训练损失')
    axes[0, 1].plot(history.history['val_loss'], label='验证损失')
    axes[0, 1].set_title('模型损失')
    axes[0, 1].set_xlabel('训练轮次')
    axes[0, 1].set_ylabel('损失')
    axes[0, 1].legend()

    # 3. 混淆矩阵
    cm = confusion_matrix(y_true, y_pred)
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=class_names, yticklabels=class_names, ax=axes[1, 0])
    axes[1, 0].set_title('混淆矩阵')
    axes[1, 0].set_xlabel('预测标签')
    axes[1, 0].set_ylabel('真实标签')

    # 4. 交叉验证分数
    axes[1, 1].bar(range(1, len(cv_scores) + 1), cv_scores, color='skyblue', edgecolor='black')
    axes[1, 1].set_title(f'{len(cv_scores)}折交叉验证分数')
    axes[1, 1].set_xlabel('折数')
    axes[1, 1].set_ylabel('准确率')
    axes[1, 1].set_xticks(range(1, len(cv_scores) + 1))
    # 添加平均线
    axes[1, 1].axhline(y=cv_scores.mean(), color='r', linestyle='--',
                       label=f'平均值: {cv_scores.mean():.4f}')
    axes[1, 1].legend()

    plt.tight_layout()
    plt.show()


# --- 7. 模拟数据生成 ---
def generate_sample_data(num_samples_per_class=100):
    """生成模拟四元数数据用于演示
    参数:
        num_samples_per_class: 每个类别的样本数
    返回:
        包含四元数和标签的模拟数据
    """
    print("生成模拟四元数数据...")
    np.random.seed(42)
    classes = ['拾取', '运输', '释放', '返回']  # 中文类别名称
    data_list = []

    for class_id, class_name in enumerate(classes):
        for sample_id in range(num_samples_per_class):
            # 生成基础四元数
            base_q = np.random.randn(4)
            base_q = base_q / np.linalg.norm(base_q)

            # 随机生成序列长度
            seq_len = np.random.randint(50, 150)

            Q_seq = []
            for i in range(seq_len):
                # 添加噪声和趋势
                noise = np.random.randn(4) * 0.05
                trend_factor = i / seq_len

                # 根据不同动作类别添加不同趋势
                if class_name == '拾取':
                    trend = np.array([0, 0, 0, 0.01 * trend_factor])  # z轴轻微旋转
                elif class_name == '运输':
                    trend = np.array([0, 0.005 * trend_factor, 0, 0])  # x轴轻微旋转
                elif class_name == '释放':
                    trend = np.array([0, 0, 0.01 * trend_factor, 0])  # y轴旋转
                else:  # 返回
                    trend = np.array([0.01 * trend_factor, 0, 0, 0])  # w分量变化

                q = base_q + noise + trend
                q = q / np.linalg.norm(q)  # 归一化
                Q_seq.append(q)

            Q_seq = np.array(Q_seq)
            labels_seq = np.full(seq_len, class_id)

            # 添加到数据列表
            for i in range(seq_len):
                data_list.append(np.concatenate([Q_seq[i], [labels_seq[i]]]))

    data = np.array(data_list)
    # 部分打乱数据以模拟真实场景
    np.random.shuffle(data[:len(data) // 2])
    print(f"共生成 {data.shape[0]} 行模拟数据。")
    return data


# --- 主程序 ---
if __name__ == "__main__":
    # 数据文件路径
    DATA_FILE_PATH = "/root/test.txt"  # 指定数据文件路径

    # 加载数据
    if DATA_FILE_PATH and os.path.exists(DATA_FILE_PATH):
        print(f"从文件 {DATA_FILE_PATH} 加载数据...")
        try:
            raw_data = np.loadtxt(DATA_FILE_PATH)
            print(f"OK成功加载数据，形状: {raw_data.shape}")
        except Exception as e:
            print(f"加载数据文件失败: {e}")
            print("使用模拟数据代替。")
            raw_data = generate_sample_data(num_samples_per_class=50)
    else:
        print("未找到指定数据文件，使用模拟数据。")
        raw_data = generate_sample_data(num_samples_per_class=50)
