# main_1.py (修改后)
import numpy as np
import torch
from sklearn.model_selection import train_test_split

from auth_config import RAW_CSI_FEAT_DIM
from dataload_1 import load_raw_train_data, load_raw_test_data, filter_low_quality_samples
from mody_1 import shufflenet_enhanced
from  mody_cnnlstm import shufflenet_lstm_attention_improved
from train_1 import train_with_validation
from evaluate_1 import evaluate_detailed_classification,  semi_supervised_finetune, \
    split_labeled_data_by_ratio


def check_data_shape_compatibility(model, data_sample):
    """检查数据形状与模型是否兼容"""
    try:
        device = next(model.parameters()).device
        sample_tensor = torch.tensor(data_sample).unsqueeze(0).float().to(device)
        print(f"测试样本形状: {sample_tensor.shape}，设备: {device}")

        with torch.no_grad():
            output = model(sample_tensor)

        print(f"模型输出形状: {output.shape}")
        print("数据形状与模型兼容！")
        return True
    except Exception as e:
        print(f"数据形状与模型不兼容: {str(e)}")
        return False


if __name__ == "__main__":
    # 新增：动态调整模块开关
    ENABLE_QUALITY_FILTER = True  # 低质样本裁剪
    ENABLE_CONFIDENCE_WEIGHT = True  # 置信度引导权重
    ENABLE_TEMPORAL_CONSISTENCY = True  # 时序一致性维护
    # 质量过滤参数
    QUALITY_THRESHOLD = 0.3  # 皮尔逊系数阈值

    # 新增：数据类型选择
    USE_RAW_CSI = True  # True: 使用原始CSI, False: 使用去信道指纹
    USE_DUT11_NOISE = False  # 启用DUT11生成的多信道干扰（核心开关）
    # 配置参数
    DEVICE_LIST = [
        "DUT1",  "DUT5", "DUT11",
        "DUT13","DUT20"
    ]
    TRAIN_SCENARIOS = ["MOVE01_20250822"]
    TARGET_SCENARIO = "MOVE02_20250822"

    print(f"[调试] 准备执行条件判断，USE_RAW_CSI={USE_RAW_CSI}")
    if USE_RAW_CSI:
        ORIGINAL_FEAT_DIM = RAW_CSI_FEAT_DIM  # 50
        data_loader_train = load_raw_train_data
        data_loader_test = load_raw_test_data
        print(f": 使用csi")
    print(f"[调试] 条件判断执行完毕，当前ORIGINAL_FEAT_DIM={ORIGINAL_FEAT_DIM}")
    # 滑动窗口参数
    WINDOW_SIZE = 10
    STEP = 1
    FEAT_DIM = ORIGINAL_FEAT_DIM * WINDOW_SIZE

    # 训练参数
    SOURCE_EPOCHS = 20
    SOURCE_BATCH = 16
    SOURCE_LR = 0.001
    VALIDATION_SPLIT = 0.2
    WEIGHT_DECAY = 3e-3
    EARLY_STOP_PATIENCE = 8

    # 统一设备管理
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # ===================== 步骤1：加载多场景训练数据并检查形状 =====================
    print("Step 1: 加载多场景训练数据...")
    all_X_source = []
    all_y_source = []

    # 加载每个场景的训练数据并合并 - 核心修改
    for scenario in TRAIN_SCENARIOS:
        print(f"正在加载场景 {scenario} 的数据...")
        X_scene, y_scene = load_raw_train_data(  #load_mixed_train_data(NPY)       load_raw_train_data  (CSI)
            DEVICE_LIST,
            source_scenario=scenario,
            use_dut11_noise=USE_DUT11_NOISE,  # 加入DUT11的LOS/NLOS/MOV干扰
            window_size=WINDOW_SIZE,
            step=STEP,
            enable_filter=ENABLE_QUALITY_FILTER,  # 新增参数
            quality_threshold=QUALITY_THRESHOLD,  # 新增参数
            enable_temporal_consistency=ENABLE_TEMPORAL_CONSISTENCY  # 新增参数
        )
        all_X_source.append(X_scene)
        all_y_source.append(y_scene)

    # 合并多场景数据
    X_source = np.concatenate(all_X_source, axis=0)
    y_source = np.concatenate(all_y_source, axis=0)
    print(f"多场景合并后源域数据形状: {X_source.shape}")

    # 调整数据形状为2通道
    if len(X_source.shape) == 2:
        try:
            X_source = X_source.reshape(-1, 2, FEAT_DIM // 2)
            print(f"调整后源域数据形状: {X_source.shape}")
        except ValueError as e:
            print(f"数据形状调整失败: {str(e)}")
            print(f"确保特征维度 {FEAT_DIM} 可以被2整除")
            exit(1)

    # 拆分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_source,
        y_source,
        test_size=VALIDATION_SPLIT,
        random_state=42,
        stratify=y_source
    )
    print(f"数据拆分完成：训练集{X_train.shape} | 验证集{X_val.shape}")

    # ===================== 步骤2：初始化模型并检查兼容性 =====================
    print("\nStep 2: 初始化模型并检查兼容性...")
    #两个模型对比一下
    base_model = shufflenet_enhanced(num_classes=len(DEVICE_LIST)).to(device)
    #base_model = shufflenet_lstm_attention_improved(num_classes=len(DEVICE_LIST)).to(device)

    if len(X_train) > 0:
        check_data_shape_compatibility(base_model, X_train[0])
    else:
        print("没有加载到源域数据，无法检查兼容性")
        exit(1)

    # ===================== 步骤3：加载目标域测试集 =====================
    print("\nStep 3: 加载目标域测试集...")
    X_test, y_test, device_names = load_raw_test_data(      #load_cross_test_data    load_raw_test_data(CSI)
        DEVICE_LIST,
        target_scenario=TARGET_SCENARIO,
        window_size=WINDOW_SIZE,
        step=STEP
    )
    print(f"【预处理前】X_test样本数：{len(X_test)}，y_test样本数：{len(y_test)}")  # 应该都是6240
    if ENABLE_QUALITY_FILTER:
        X_test_flat = X_test.reshape(-1, X_test.shape[1] * X_test.shape[2])
        X_test_flat, y_test = filter_low_quality_samples(X_test_flat, y_test, QUALITY_THRESHOLD)
        # 恢复2通道形状（注意：这里之前漏了把 X_test_flat 赋值给 X_test！！！原代码bug）
        X_test = X_test_flat.reshape(-1, 2, RAW_CSI_FEAT_DIM)  # 关键修复：原代码没更新X_test
    # 形状调整（针对未裁剪的情况）
    elif len(X_test.shape) == 2:
        X_test = X_test.reshape(-1, 2, FEAT_DIM // 2)

    # 新增：强制检查裁剪后样本数是否一致
    assert len(X_test) == len(y_test), \
        f"预处理后样本数不一致！X_test：{len(X_test)}，y_test：{len(y_test)}"
    print(f"【预处理后】X_test样本数：{len(X_test)}，y_test样本数：{len(y_test)}")  # 应该都是195



    # ===================== 步骤4：训练模型 =====================
    print("\nStep 4: 训练模型...")
    base_model = train_with_validation(
        model=base_model,
        X_train=X_train,
        y_train=y_train,
        X_val=X_val,
        y_val=y_val,
        X_test=X_test,
        y_test=y_test,
        num_epochs=SOURCE_EPOCHS,
        batch_size=SOURCE_BATCH,
        lr=SOURCE_LR,
        weight_decay=WEIGHT_DECAY,
        patience=EARLY_STOP_PATIENCE,
        save_path="best_multi_scene_model.pth",  # 修改模型保存名
    enable_confidence_weight = ENABLE_CONFIDENCE_WEIGHT  # 新增：传递置信度权重开关
    )

    print("\nStep 4.5: 目标域测试阶段在线微调...")
    # 1. 按5%比例划分有标签/无标签数据
    X_labeled, y_labeled, X_unlabeled = split_labeled_data_by_ratio(
        X_test=X_test,
        y_test=y_test,
        label_ratio=1,  # 5%有标签数据
        min_label_per_class=2  # 每个设备至少2个标签
    )
    # 调用 evaluate_1.py 中的 finetune_on_target 函数，仅微调分类器（轻量模块）
    base_model = semi_supervised_finetune(
        model=base_model,  # 源域训练好的模型
        X_labeled=X_labeled,
        y_labeled=y_labeled,
        X_unlabeled=X_unlabeled,
        epochs=1,  # 微调轮次（2~3轮足够，避免过拟合）
        lr=3e-5,  # 小学习率（避免灾难性遗忘源域知识）
        conf_thresh=0.6  # 合理置信度阈值
    )


    # ===================== 步骤5：评估模型性能 =====================
    print("\nStep 5: 评估模型性能...")
    class_acc, overall_acc, error_details = evaluate_detailed_classification(
        model=base_model,
        X_test=X_test,
        y_test=y_test,
        device_list=DEVICE_LIST
    )

    # 打印结果
    print(f"\n{'=' * 80}")
    print(f"【多场景训练模型 - 设备分类准确率】")
    print(f"{'=' * 80}")
    print(f"{'设备名':<10} | {'总样本数':<10} | {'正确样本数':<12} | {'分类准确率':<15}")
    print(f"{'-' * 80}")
    for res in class_acc:
        print(
            f"{res['device_name']:<10} | "
            f"{res['total_samples']:<10} | "
            f"{res['correct_samples']:<12} | "
            f"{res['classification_acc']:.2f}%"
        )
    print(f"\n多场景训练模型 - 整体跨域分类准确率：{overall_acc:.2f}%")
    print(f"{'=' * 80}")