import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

import pandas as pd
import numpy as np
import pickle
import logging
from datetime import datetime
"""
绘图依赖改为在函数内部按需导入，避免环境缺失导致脚本顶层导入失败。
"""

from src.models.deep_hybrid_model import DeepHybridModel
from src.training.hybrid_trainer import HybridTrainer

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)

# 降低TensorFlow日志级别，只显示错误
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf

gpus = tf.config.list_physical_devices('GPU')
print("可用GPU列表:", gpus)
if gpus:
    try:
        # 设置GPU内存动态增长
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print("已启用GPU内存动态增长！")
    except RuntimeError as e:
        print(e)
else:
    print("未检测到GPU，当前使用CPU。")


def plot_and_save_training_curves(history_obj, save_prefix: str):
    """根据 Keras History 绘制训练曲线并保存为图片与CSV

    参数:
    - history_obj: keras.callbacks.History 或其 history 字典
    - save_prefix: 保存文件前缀，例如 models/hybrid_model_20250101_010101
    生成:
    - {save_prefix}_training_curves.png
    - {save_prefix}_training_history.csv
    """
    # 延迟导入绘图库，缺失时优雅降级
    try:
        import importlib
        matplotlib = importlib.import_module('matplotlib')
        matplotlib.use('Agg')  # 无界面环境
        plt = importlib.import_module('matplotlib.pyplot')
        sns = importlib.import_module('seaborn')
    except Exception as e:
        print(f"⚠️ 未安装或无法加载绘图依赖(matplotlib/seaborn)，跳过绘图。错误: {e}")
        # 仍尝试保存 CSV
        hist_dict = getattr(history_obj, 'history', history_obj) or {}
        if isinstance(hist_dict, dict) and len(hist_dict) > 0:
            try:
                pd.DataFrame(hist_dict).to_csv(f"{save_prefix}_training_history.csv", index=False)
            except Exception:
                pass
        return

    # 兼容 History 对象或 dict
    hist = getattr(history_obj, 'history', history_obj) or {}
    if not isinstance(hist, dict) or len(hist) == 0:
        print("⚠️ 未发现可用的训练历史，跳过绘图。")
        return

    # 保存 CSV 便于后续分析
    try:
        pd.DataFrame(hist).to_csv(f"{save_prefix}_training_history.csv", index=False)
    except Exception as e:
        print(f"⚠️ 保存训练历史CSV失败: {e}")

    # 准备绘图
    sns.set_theme(style="whitegrid")
    epochs = range(1, len(hist.get('loss', [])) + 1)

    # 创建子图: 左侧 Loss, 右侧 Metrics(MAE/MSE)
    fig, axes = plt.subplots(1, 2, figsize=(14, 5))

    # 1) Loss 曲线
    ax = axes[0]
    if 'loss' in hist:
        ax.plot(epochs, hist['loss'], label='train_loss', color='#1f77b4')
    if 'val_loss' in hist:
        ax.plot(epochs, hist['val_loss'], label='val_loss', color='#ff7f0e')
    ax.set_title('Loss over epochs')
    ax.set_xlabel('Epoch')
    ax.set_ylabel('Loss')
    ax.legend()

    # 2) 指标曲线（如果存在）
    ax = axes[1]
    plotted_any_metric = False
    for metric in ['mae', 'mse', 'rmse']:
        if metric in hist:
            ax.plot(epochs, hist[metric], label=f'train_{metric}')
            plotted_any_metric = True
        if f'val_{metric}' in hist:
            ax.plot(epochs, hist[f'val_{metric}'], label=f'val_{metric}')
            plotted_any_metric = True
    if plotted_any_metric:
        ax.set_title('Metrics over epochs')
        ax.set_xlabel('Epoch')
        ax.set_ylabel('Value')
        ax.legend()
    else:
        ax.axis('off')

    plt.tight_layout()
    out_path = f"{save_prefix}_training_curves.png"
    try:
        fig.savefig(out_path, dpi=200)
        print(f"🖼️ 训练曲线已保存: {out_path}")
    except Exception as e:
        print(f"⚠️ 保存训练曲线失败: {e}")
    finally:
        plt.close(fig)

def main():
    print("🚀 开始训练深度学习混合模型...")
    
    try:
        # 1. 加载预处理的数据
        print("📥 加载预处理数据...")
        processed_dir = "data/processed/"
        
        # 加载特征数据
        user_features = pd.read_pickle(f"{processed_dir}/user_features.pkl")
        movie_features_original = pd.read_pickle(f"{processed_dir}/movie_features.pkl")
        
        # 加载映射关系
        with open(f"{processed_dir}/user_id_map.pkl", 'rb') as f:
            user_id_map = pickle.load(f)
        with open(f"{processed_dir}/movie_id_map.pkl", 'rb') as f:
            movie_id_map = pickle.load(f)
        
        # 加载交互数据
        interactions = pd.read_pickle(f"{processed_dir}/interaction_features.pkl")
        
        print(f"📊 数据加载完成:")
        print(f"   用户数: {len(user_id_map)}")
        print(f"   电影数: {len(movie_id_map)}")
        print(f"   交互记录数: {len(interactions)}")

        # 2. 准备或加载缓存的训练数据
        model_config = {
            'embedding_dim': 64,
            'hidden_layers': [512, 256, 128],
            'dropout_rates': [0.3, 0.3, 0.2],
            'learning_rate': 0.001,
            'batch_size': 2048,
            'epochs': 50,
            'use_attention': True,
            'use_batch_norm': True,
            'l2_reg': 0.0001
        }
        
        train_cache_path = f"{processed_dir}/train_data_cache.pkl"
        val_cache_path = f"{processed_dir}/val_data_cache.pkl"
        movie_features_cache_path = f"{processed_dir}/movie_features_cache.pkl"

        if os.path.exists(train_cache_path) and os.path.exists(val_cache_path) and os.path.exists(movie_features_cache_path):
            print("💡 加载缓存的训练/验证数据...")
            with open(train_cache_path, 'rb') as f:
                train_data = pickle.load(f)
            with open(val_cache_path, 'rb') as f:
                val_data = pickle.load(f)
            with open(movie_features_cache_path, 'rb') as f:
                movie_features = pickle.load(f)
        else:
            print("📋 缓存不存在，准备新的训练数据...")
            trainer_for_prep = HybridTrainer(None, config=model_config)
            train_data, val_data, movie_features = trainer_for_prep.prepare_training_data(
                ratings_df=interactions,
                user_features=user_features,
                movie_features=movie_features_original,
                user_id_map=user_id_map,
                movie_id_map=movie_id_map,
                test_size=0.2,
                sample_ratio=0.3  # 使用30%的数据进行训练
            )
            print("💾 保存数据到缓存文件...")
            with open(train_cache_path, 'wb') as f:
                pickle.dump(train_data, f)
            with open(val_cache_path, 'wb') as f:
                pickle.dump(val_data, f)
            with open(movie_features_cache_path, 'wb') as f:
                pickle.dump(movie_features, f)

        # 3. 构建模型
        print("🧠 构建深度学习混合模型...")
        n_users = len(user_id_map)
        n_movies = len(movie_id_map)
        user_feature_dim = user_features.shape[1]
        movie_feature_dim = movie_features.shape[1]

        model_builder = DeepHybridModel(
            n_users=n_users,
            n_movies=n_movies,
            user_feature_dim=user_feature_dim,
            movie_feature_dim=movie_feature_dim,
            config=model_config
        )

        model = model_builder.build_model()
        model.summary()
        
        # 4. 创建训练器并开始训练
        trainer = HybridTrainer(model, config=model_config)
        print("🎯 开始模型训练...")
        history = trainer.train(
            train_data=train_data,
            val_data=val_data,
            epochs=model_config['epochs'],
            batch_size=model_config['batch_size']
        )
        
        # 5. 评估模型
        print("📈 评估模型性能...")
        if val_data is not None:
            metrics = trainer.evaluate(val_data)
        
        # 6. 保存模型
        print("💾 保存模型...")
        model_dir = "models/"
        os.makedirs(model_dir, exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        model_path = f"{model_dir}hybrid_model_{timestamp}"
        
        model_builder.save_model(model_path)
        trainer.save_training_info(f"{model_path}_training_info.pkl")
        # 7. 绘制并保存训练曲线与历史
        plot_and_save_training_curves(history, model_path)
        print("✅ 模型训练完成！")
        print(f"模型已保存到: {model_path}")
    except Exception as e:
        logging.error(f"训练过程中出错: {e}", exc_info=True)
        raise

if __name__ == "__main__":
    main()