from gc import callbacks
import tensorflow as tf
import numpy as np
from models import DeepFM
import pandas as pd
from sklearn.model_selection import KFold  # 替换train_test_split为KFold
from sklearn.metrics import roc_curve, auc
import os  # 添加os模块用于创建目录
from tensorflow.keras.callbacks import EarlyStopping  # 导入早停回调

# ====================== 1. 准备测试数据 ======================
# 定义特征列（与模型训练时保持一致）
# 密集特征
dense_features = [
    {'name': 'user_user_login_count_7d'},
    {'name': 'user_user_total_clicks'},
    {'name': 'user_user_click_ts_diff_min'},
    {'name': 'user_user_click_ts_diff_mean'},
    {'name': 'user_user_click_ts_diff_std'},
    {'name': 'user_user_click_ts_diff_max'},
    {'name': 'user_user_click_ts_hour_mean'},
    {'name': 'item_category_rank'},
    {'name': 'item_position_rank'},
    {'name': 'item_item_click_num_3d'},
    {'name': 'item_item_click_num_7d'},
    {'name': 'item_item_click_num_14d'},
    {'name': 'item_item_click_num_21d'},
    {'name': 'item_item_click_rank_3d'},
    {'name': 'item_item_click_rank_7d'},
    {'name': 'item_item_click_rank_14d'},
    {'name': 'item_item_click_rank_21d'}
]  # 密集特征

# 离散特征
sparse_features = [
    {'name': 'month', 'vocab_size': 13, 'embed_dim': 8},
    {'name': 'day_of_month', 'vocab_size': 32, 'embed_dim': 8},
    {'name': 'day_of_week', 'vocab_size': 8, 'embed_dim': 8},
    {'name': 'day_of_year', 'vocab_size': 367, 'embed_dim': 8},
    {'name': 'is_weekend', 'vocab_size': 2, 'embed_dim': 8},
    {'name': 'id_encoded', 'vocab_size': 50, 'embed_dim': 8},
    {'name': 'user_id_encoded', 'vocab_size': 30, 'embed_dim': 8},
    {'name': 'content_id_encoded', 'vocab_size': 30, 'embed_dim': 8},
    {'name': 'last_login_day_of_year', 'vocab_size': 367, 'embed_dim': 8},
    {'name': 'last_login_month', 'vocab_size': 13, 'embed_dim': 8},
    {'name': 'last_login_day_of_week', 'vocab_size': 8, 'embed_dim': 8},
    {'name': 'last_login_day_of_month', 'vocab_size': 32, 'embed_dim': 8},
    {'name': 'last_login_is_weekend', 'vocab_size': 2, 'embed_dim': 8},
    {'name': 'ip-1', 'vocab_size': 256, 'embed_dim': 8},
    {'name': 'ip-2', 'vocab_size': 256, 'embed_dim': 8},
    {'name': 'ip-3', 'vocab_size': 256, 'embed_dim': 8},
    {'name': 'ip-4', 'vocab_size': 256, 'embed_dim': 8},
    {'name': 'user_user_is_login_3d', 'vocab_size': 2, 'embed_dim': 8}
]

df = pd.read_csv("/data/GuoCu_data/processed_data/toGBDT.csv")
# 从读取的数据中分离密集特征、离散特征和标签
# 假设标签列名为 'label'（请根据实际数据调整）
label_column = 'label'

dense_feature_names = [feature['name'] for feature in dense_features]
sparse_feature_names = [feature['name'] for feature in sparse_features]

# 提取密集特征数据
dense_data = df[dense_feature_names].values

# 提取离散特征数据
sparse_data = df[sparse_feature_names].values

# 提取标签数据
labels = df[label_column].values.reshape(-1, 1)  # 重塑为列向量

# 将离散特征转换为整数类型
sparse_data = sparse_data.astype(int)

# 合并密集特征和离散特征数据
all_features = np.hstack([dense_data, sparse_data])

# 将数据划分为训练集和测试集，测试集占比 20% -> 替换为五折交叉验证
# train_features_np, test_features_np, train_labels_np, test_labels_np = train_test_split(
#     all_features, labels, test_size=0.2, random_state=42
# )

# 初始化五折交叉验证
kfold = KFold(n_splits=5, shuffle=True, random_state=42)

# 存储每一折的测试结果
fold_accuracies = []
fold_aucs = []

# 创建保存目录
save_dir = '/data/GuoCu_data/models/deepFM'
os.makedirs(save_dir, exist_ok=True)

# 进行五折交叉验证
for fold, (train_indices, test_indices) in enumerate(kfold.split(all_features, labels), 1):
    print(f'\n===== 第 {fold} 折 =====')

    # 根据索引分割数据
    train_features_np = all_features[train_indices]
    test_features_np = all_features[test_indices]
    train_labels_np = labels[train_indices]
    test_labels_np = labels[test_indices]
    # 将拆分后的结果转换为TensorFlow张量
    train_features = tf.convert_to_tensor(train_features_np, dtype=tf.float32)
    test_features = tf.convert_to_tensor(test_features_np, dtype=tf.float32)
    train_labels = tf.convert_to_tensor(train_labels_np, dtype=tf.float32)
    test_labels = tf.convert_to_tensor(test_labels_np, dtype=tf.float32)

    # 创建模型实例
    model = DeepFM(
        feature_columns=(dense_features, sparse_features),
        hidden_units=(128, 64),
        dropout_rate=0.2
    )

    # # 构建模型
    # model.build_graph(input_shape=(train_features.shape[1],))
    # model.summary()  # 打印模型结构

    # 编译模型前添加学习率调度器
    initial_learning_rate = 0.001
    decay_steps = 1000  # 衰减步数
    decay_rate = 0.96    # 衰减率
    
    # 定义指数衰减学习率
    learning_rate_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate,
    decay_steps=decay_steps,
    decay_rate=decay_rate,
    staircase=True  # 如果为True，则学习率在decay_steps的整数倍时衰减
    )
    
    # 使用衰减学习率的优化器
    model.compile(
    optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate_schedule),
    loss=tf.keras.losses.BinaryCrossentropy(),
    metrics=[tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC()]
    )

    # 训练模型前添加早停回调
    early_stopping = EarlyStopping(
        monitor='val_auc',  # 监控验证集的AUC指标
        patience=5,         # 连续5个epoch没有提升就停止
        mode='max',         # 因为AUC越大越好，所以使用max模式
        verbose=1,          # 打印早停信息
        restore_best_weights=True  # 恢复到最佳权重
    )

    # 训练模型
    history = model.fit(
        train_features,
        train_labels,
        batch_size=2,
        epochs=100,
        validation_split=0.1,
        verbose=2,
        callbacks=[early_stopping]
    )

    # 使用未参与训练的测试集数据进行预测
    predictions = model.predict(test_features)
    
    # 计算AUC指标
    fpr, tpr, _ = roc_curve(test_labels.numpy().flatten(), predictions.flatten())
    roc_auc = auc(fpr, tpr)
    
    # 记录当前折的AUC
    fold_aucs.append(roc_auc)
    
    # 打印当前折的AUC
    print(f'第 {fold} 折的AUC指标: {roc_auc}')

# 计算并打印五折交叉验证的平均AUC
average_auc = np.mean(fold_aucs)
print(f'\n五折交叉验证的平均AUC指标: {average_auc}')

