import os
import pickle
import numpy as np
from scipy.sparse import load_npz
from implicit.als import AlternatingLeastSquares
from lightgbm import LGBMRanker
from sklearn.model_selection import train_test_split
from sklearn.metrics import ndcg_score
from config import Config
import pandas as pd


def train_als_model(interaction_matrix):
    """训练协同过滤模型 (ALS)"""
    print("Training ALS model...")

    # 初始化模型
    als_model = AlternatingLeastSquares(
        factors=Config.ALS_PARAMS['factors'],
        regularization=Config.ALS_PARAMS['regularization'],
        iterations=Config.ALS_PARAMS['iterations'],
        random_state=Config.ALS_PARAMS['random_state']
    )

    # 训练模型
    als_model.fit(interaction_matrix.T)

    # 保存模型
    os.makedirs(Config.MODEL_DIR, exist_ok=True)
    with open(Config.ALS_MODEL_FILE, 'wb') as f:
        pickle.dump(als_model, f)

    print(f"ALS model saved to {Config.ALS_MODEL_FILE}")
    return als_model


def train_content_model(user_features, item_features, interactions):
    """训练内容推荐模型 (LightGBM)"""
    print("Training content-based model...")

    # 准备数据
    data = interactions.merge(user_features, on='user_id')
    data = data.merge(item_features, on='product_id')

    # 特征和目标
    features = data.drop(['user_id', 'product_id', 'weighted_action'], axis=1)
    target = data['weighted_action']

    # 划分数据集
    X_train, X_test, y_train, y_test = train_test_split(
        features, target, test_size=0.2, random_state=42
    )

    # 训练模型
    model = LGBMRanker(
        objective="lambdarank",
        metric="ndcg",
        boosting_type="gbdt",
        n_estimators=200,
        importance_type='gain',
        random_state=42
    )

    # 使用分组信息（每个用户的交互数量）
    user_counts = data.groupby('user_id').size()
    qids_train = X_train.merge(user_counts.rename('count'), left_on='user_id', right_index=True)['count']
    qids_test = X_test.merge(user_counts.rename('count'), left_on='user_id', right_index=True)['count']

    model.fit(
        X_train.drop('user_id', axis=1), y_train,
        group=qids_train,
        eval_set=[(X_test.drop('user_id', axis=1), y_test)],
        eval_group=[qids_test],
        eval_metric='ndcg',
        early_stopping_rounds=20,
        verbose=10
    )

    # 保存模型
    with open(Config.CONTENT_MODEL_FILE, 'wb') as f:
        pickle.dump(model, f)

    print(f"Content model saved to {Config.CONTENT_MODEL_FILE}")
    return model


def train_hybrid_model(als_model, content_model, user_features, item_features):
    """创建混合推荐模型"""
    # 这里我们保存两个模型和特征数据
    # 在实际服务中，我们会组合两个模型的预测结果
    hybrid_model = {
        'als': als_model,
        'content': content_model,
        'user_features': user_features,
        'item_features': item_features
    }

    with open(Config.HYBRID_MODEL_FILE, 'wb') as f:
        pickle.dump(hybrid_model, f)

    print(f"Hybrid model saved to {Config.HYBRID_MODEL_FILE}")
    return hybrid_model


def evaluate_model(model, test_data, interaction_matrix, user_id_map, item_id_map):
    """评估模型性能"""
    # 这里简化评估过程，实际应使用NDCG等指标
    print("Evaluating model...")

    # 示例：计算测试集上的NDCG
    # 实际实现需要更完整的评估逻辑
    test_scores = []

    for user_id in test_data['user_id'].unique():
        # 获取用户测试交互
        user_test = test_data[test_data['user_id'] == user_id]
        true_items = user_test['product_id'].values

        # 获取预测
        preds = model.recommend(
            user_id,
            interaction_matrix,
            N=len(true_items),
            filter_already_liked_items=True
        )

        # 计算NDCG
        # 简化的评估，实际应使用完整实现
        if len(true_items) > 0:
            test_scores.append(1 if set(true_items) & set(preds) else 0)

    mean_score = np.mean(test_scores)
    print(f"Test set hit rate: {mean_score:.4f}")
    return mean_score


def train_models():
    """训练所有模型"""
    # 加载数据
    user_features = pd.read_pickle(Config.USER_FEATURES_FILE)
    item_features = pd.read_pickle(Config.ITEM_FEATURES_FILE)
    interaction_matrix = load_npz(Config.INTERACTION_MATRIX_FILE)

    # 加载映射
    user_id_map = pd.read_pickle(os.path.join(Config.PROCESSED_DATA_DIR, "user_id_map.pkl")).to_dict()
    item_id_map = pd.read_pickle(os.path.join(Config.PROCESSED_DATA_DIR, "item_id_map.pkl")).to_dict()

    # 加载交互数据
    interactions = pd.read_csv(os.path.join(Config.PROCESSED_DATA_DIR, "train_interactions.csv"))

    # 训练模型
    als_model = train_als_model(interaction_matrix)
    content_model = train_content_model(user_features, item_features, interactions)
    hybrid_model = train_hybrid_model(als_model, content_model, user_features, item_features)

    # 评估模型（简化）
    test_data = pd.read_csv(os.path.join(Config.PROCESSED_DATA_DIR, "test_interactions.csv"))
    evaluate_model(als_model, test_data, interaction_matrix, user_id_map, item_id_map)

    return hybrid_model