from datetime import datetime
from flask import Blueprint, request, jsonify
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, recall_score, f1_score, precision_score
from imblearn.over_sampling import SMOTE
import logging
from sqlalchemy import func, distinct, case
from exts import csrf
from models import CleanedUserBehavior, db

# 创建蓝图
bp = Blueprint('pr', __name__, url_prefix='/pr')

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


# 从数据库加载数据（使用聚合查询）
def load_data_from_db(user_id):
    """
    从数据库中的 CleanedUserBehavior 表加载指定用户的数据
    """
    try:
        # 使用 SQLAlchemy 直接进行聚合查询
        query = (
            db.session.query(
                CleanedUserBehavior.user_id,
                CleanedUserBehavior.item_id,
                CleanedUserBehavior.category_id,
                CleanedUserBehavior.behavior_type,
                CleanedUserBehavior.timestamp,
                func.count(CleanedUserBehavior.behavior_type).label('behavior_count')
            )
            .filter(CleanedUserBehavior.user_id == user_id)  # 过滤指定用户
            .group_by(
                CleanedUserBehavior.user_id,
                CleanedUserBehavior.item_id,
                CleanedUserBehavior.category_id,
                CleanedUserBehavior.behavior_type,
                CleanedUserBehavior.timestamp
            )
            .all()
        )

        # 将查询结果转换为 DataFrame
        data = pd.DataFrame(query, columns=['user_id', 'item_id', 'category_id', 'behavior_type', 'timestamp',
                                            'behavior_count'])
        return data
    except Exception as e:
        logger.error(f"从数据库加载数据失败: {str(e)}")
        raise


def evaluate_data_quality(data):
    """评估数据质量"""
    if len(data) < 10:  # 修改为10条
        return 'very_low'
    elif len(data) < 50:  # 降低标准
        return 'low'
    elif len(data['behavior_type'].unique()) < 2:
        return 'unbalanced'
    else:
        return 'high'


# 数据预处理
def preprocess_data(data, user_id, prediction_type=None):
    """
    数据预处理
    """
    # 如果数据为空，抛出错误
    if data.empty:
        raise ValueError(f"用户ID {user_id} 不存在于数据集中")

    data_quality = evaluate_data_quality(data)
    if data_quality == 'very_low':  # 只对极少数据返回None
        logger.warning(f"用户 {user_id} 数据质量: {data_quality}")
        return None, None, None, None, None, None, data_quality

    # 特征工程：将 behavior_type 转换为数值特征
    behavior_mapping = {'pv': 0, 'cart': 1, 'fav': 2, 'buy': 3}
    data['behavior_type_num'] = data['behavior_type'].map(behavior_mapping)

    # 根据预测类型选择特征
    if prediction_type == 'churn_risk':
        # 流失风险预测 - 使用时间特征
        data['days_since_last_activity'] = (datetime.now() - pd.to_datetime(data['timestamp'])).dt.days
        X = data[['days_since_last_activity', 'behavior_type_num']]
        # 流失风险标签：30天内无购买行为为流失风险
        y = (data['days_since_last_activity'] > 30).astype(int)
    elif prediction_type == 'repurchase':
        # 复购预测 - 使用时间和行为特征
        data['days_since_last_activity'] = (datetime.now() - pd.to_datetime(data['timestamp'])).dt.days
        # 计算购买频率特征
        buy_data = data[data['behavior_type'] == 'buy']
        if not buy_data.empty:
            avg_purchase_interval = buy_data['days_since_last_activity'].diff().mean()
            if pd.isna(avg_purchase_interval):
                avg_purchase_interval = 30  # 默认值
        else:
            avg_purchase_interval = 30  # 默认值

        X = data[['days_since_last_activity', 'behavior_type_num']]
        # 复购标签：如果用户有购买行为且平均购买间隔小于30天，则标记为可能复购
        y = (data['behavior_type'] == 'buy').astype(int)
    else:
        # 默认 - 使用商品和行为特征
        X = data[['item_id', 'category_id', 'behavior_type_num']]
        y = data['behavior_type_num']  # 目标变量

    # 检查y的类别数量
    unique_classes = np.unique(y)
    if len(unique_classes) < 2:
        return None, None, None, None, None, None, 'unbalanced'

    # 更合理的数据划分策略
    if len(data) >= 1000:  # 数据量较大时
        # 60%训练集，20%验证集，20%测试集
        X_train, X_temp, y_train, y_temp = train_test_split(
            X, y, test_size=0.4, random_state=42, stratify=y)
        X_val, X_test, y_val, y_test = train_test_split(
            X_temp, y_temp, test_size=0.5, random_state=42, stratify=y_temp)
    elif len(data) >= 100:  # 数据量中等
        # 70%训练集，15%验证集，15%测试集
        X_train, X_temp, y_train, y_temp = train_test_split(
            X, y, test_size=0.3, random_state=42, stratify=y)
        X_val, X_test, y_val, y_test = train_test_split(
            X_temp, y_temp, test_size=0.5, random_state=42, stratify=y_temp)
    else:  # 数据量较小时
        # 80%训练集，20%测试集（不单独划分验证集）
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, stratify=y)
        X_val, y_val = X_test, y_test  # 用测试集作为验证集

    return X_train, X_val, X_test, y_train, y_val, y_test, data_quality


# 训练模型
def train_model(model_type, X_train, y_train):
    """
    训练模型
    """
    if model_type == 'LogisticRegression':
        model = LogisticRegression(solver='lbfgs', max_iter=1000, class_weight='balanced')
    elif model_type == 'DecisionTree':
        model = DecisionTreeClassifier(max_depth=5, random_state=42, class_weight='balanced')
    elif model_type == 'RandomForest':
        model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42, class_weight='balanced')
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")

    model.fit(X_train, y_train)
    return model


def evaluate_model(model, X_test, y_test):
    """模型评估"""
    y_pred = model.predict(X_test)

    # 获取所有唯一类别
    unique_classes = np.unique(y_test)
    logger.info(f"评估模型 - 唯一类别: {unique_classes}")  # 添加日志记录

    # 计算准确率
    accuracy = accuracy_score(y_test, y_pred)

    # 根据类别数量选择评估方式
    if len(unique_classes) == 2:
        # 二分类情况
        recall = recall_score(y_test, y_pred, pos_label=unique_classes[1], zero_division=0)
        precision = precision_score(y_test, y_pred, pos_label=unique_classes[1], zero_division=0)
        f1 = f1_score(y_test, y_pred, pos_label=unique_classes[1], zero_division=0)
        logger.info(f"二分类评估 - 使用正类标签: {unique_classes[1]}")
    else:
        # 多分类情况
        recall = recall_score(y_test, y_pred, average='weighted', zero_division=0)
        precision = precision_score(y_test, y_pred, average='weighted', zero_division=0)
        f1 = f1_score(y_test, y_pred, average='weighted', zero_division=0)
        logger.info("多分类评估 - 使用加权平均")

    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1_score': f1
    }


# 个性化商品推荐

def recommend_items(user_id, data, prediction_type=None, prediction_result=None):
    """
    基于用户行为生成个性化商品推荐
    """
    try:
        # 如果数据为空，返回空列表
        if data.empty:
            logger.warning(f"用户 {user_id} 的数据为空")
            return []

        # 流失风险预测不推荐商品
        if prediction_type == 'churn_risk':
            logger.info(f"流失风险预测不推荐商品，用户ID: {user_id}")
            return []

        # 复购预测且结果为不回购，不推荐商品
        if prediction_type == 'repurchase' and not prediction_result:
            logger.info(f"复购预测结果为不回购，不推荐商品，用户ID: {user_id}")
            return []

        # 更精细的推荐逻辑
        # 行为权重定义（浏览1分，加购3分，收藏4分，购买5分）
        behavior_weights = {
            'pv': 1,  # 浏览
            'cart': 3,  # 加购
            'fav': 4,  # 收藏
            'buy': 5  # 购买
        }

        # 1. 计算用户对每个商品的行为得分
        data['weight'] = data['behavior_type'].map(behavior_weights)

        # 2. 考虑时间衰减因子 (最近的行为更重要,30天指数衰减)
        current_time = datetime.now()
        data['days_ago'] = (current_time - pd.to_datetime(data['timestamp'])).dt.days
        data['time_decay'] = np.exp(-data['days_ago'] / 30)  # 30天衰减因子

        # 3. 计算综合得分,综合得分 = 行为权重 × 时间衰减
        data['score'] = data['weight'] * data['time_decay']

        # 4. 计算每个商品的推荐分数(聚合商品得分)(按用户行为)
        item_scores = data.groupby('item_id')['score'].sum().reset_index()

        # 5. 获取用户最近浏览的3个类别
        recent_categories = data[data['behavior_type'] == 'pv'] \
                                .sort_values('timestamp', ascending=False) \
                                ['category_id'].unique()[:3]  # 取最近浏览的3个类别

        # 6. 查询这些类别的热门商品（按出现次数排序）
        top_category_items = (
            db.session.query(
                CleanedUserBehavior.item_id,
                func.count(CleanedUserBehavior.item_id).label('popularity')
            )
            .filter(CleanedUserBehavior.category_id.in_(recent_categories))
            .group_by(CleanedUserBehavior.item_id)
            .order_by(func.count(CleanedUserBehavior.item_id).desc())
            .limit(20)
            .all()
        )

        # 7. 将热门商品转换为DataFrame(合并个性化推荐和热门推荐)
        popular_items = pd.DataFrame(top_category_items, columns=['item_id', 'popularity'])

        # 合并个性化推荐和热门推荐
        if not item_scores.empty:
            # 合并两个推荐源
            recommendations = pd.merge(
                item_scores,
                popular_items,
                on='item_id',
                how='outer'
            ).fillna(0)

            # 计算综合推荐分 (个性化得分 + 0.3*热门度)
            recommendations['final_score'] = recommendations['score'] + 0.3 * recommendations['popularity']

            # 归一化推荐分数到0-100范围
            max_score = recommendations['final_score'].max()
            min_score = recommendations['final_score'].min()
            if max_score != min_score:
                recommendations['recommendation_score'] = (
                        (recommendations['final_score'] - min_score) /
                        (max_score - min_score) * 100
                ).round(1)
            else:
                recommendations['recommendation_score'] = 80.0  # 默认值

            # 按综合推荐分排序
            recommendations = recommendations.sort_values('final_score', ascending=False)

            # 返回推荐商品ID列表 (取前6个)
            recommended_item_ids = recommendations['item_id'].head(6).tolist()
            recommendation_scores = recommendations['recommendation_score'].head(6).tolist()
        else:
            # 如果没有个性化推荐数据，只返回热门商品
            recommended_item_ids = [item[0] for item in top_category_items][:6]
            recommendation_scores = [80.0] * len(recommended_item_ids)  # 默认推荐度80%

        # 从数据库获取商品详细信息
        recommended_items_with_details = []
        for idx, item_id in enumerate(recommended_item_ids):
            # 获取商品基本信息
            item_info = (
                db.session.query(
                    CleanedUserBehavior.item_id,
                    func.count(distinct(CleanedUserBehavior.user_id)).label('user_count'),
                    func.sum(case((CleanedUserBehavior.behavior_type == 'buy', 1), else_=0)).label('buy_count')
                )
                .filter(CleanedUserBehavior.item_id == item_id)
                .group_by(CleanedUserBehavior.item_id)
                .first()
            )

            if item_info:
                # 计算购买率 (购买用户数/总用户数)
                buy_rate = float(
                    item_info.buy_count / item_info.user_count * 100) if item_info.user_count > 0 else 0.0

                # 这里可以添加爬虫获取的商品图片URL
                image_url = f"https://picsum.photos/200/150?random={item_id}"

                recommended_items_with_details.append({
                    'item_id': item_info.item_id,
                    'buy_rate': buy_rate,
                    'image_url': image_url,
                    'recommendation_score': recommendation_scores[idx]  # 添加推荐度
                })

        logger.info(f"为用户 {user_id} 生成 {len(recommended_items_with_details)} 条推荐")
        return recommended_items_with_details
    except Exception as e:
        logger.error(f"推荐商品失败: {str(e)}")
        return []


def get_recommendation_trust_factors(user_id):
    """
    从数据库获取推荐可信度分析数据
    """
    try:
        # 1. 获取相似用户偏好数据
        # 获取用户交互过的商品ID
        user_item_ids = db.session.query(
            distinct(CleanedUserBehavior.item_id)
        ).filter(
            CleanedUserBehavior.user_id == user_id
        ).all()
        user_item_ids = [item[0] for item in user_item_ids]

        # 获取相似用户数
        similar_users = db.session.query(
            func.count(distinct(CleanedUserBehavior.user_id))
        ).filter(
            CleanedUserBehavior.item_id.in_(user_item_ids),
            CleanedUserBehavior.user_id != user_id
        ).scalar() or 0

        # 2. 获取总行为数
        total_actions = db.session.query(
            func.count(CleanedUserBehavior.id)
        ).filter(
            CleanedUserBehavior.user_id == user_id
        ).scalar() or 1

        # 3. 获取匹配行为数(购买和收藏)
        matched_behaviors = db.session.query(
            func.count(CleanedUserBehavior.id)
        ).filter(
            CleanedUserBehavior.user_id == user_id,
            CleanedUserBehavior.behavior_type.in_(['buy', 'fav'])
        ).scalar() or 0

        # 4. 获取购买行为数
        buy_count = db.session.query(
            func.count(CleanedUserBehavior.id)
        ).filter(
            CleanedUserBehavior.user_id == user_id,
            CleanedUserBehavior.behavior_type == 'buy'
        ).scalar() or 0

        # 5. 获取最后活跃时间
        last_activity = db.session.query(
            func.max(CleanedUserBehavior.timestamp)
        ).filter(
            CleanedUserBehavior.user_id == user_id
        ).scalar()

        # 计算各项指标
        behavior_match = (matched_behaviors / total_actions) * 100 if total_actions > 0 else 0
        category_preference = (buy_count / total_actions) * 100 if total_actions > 0 else 0
        activity_score = max(0, 100 - ((datetime.now() - last_activity).days * 5)) if last_activity else 0

        return {
            'similar_users': min(similar_users, 100),
            'behavior_match': behavior_match,
            'category_preference': category_preference,
            'activity_score': activity_score
        }

    except Exception as e:
        logger.error(f"获取推荐可信度数据失败: {str(e)}")
        return {
            'similar_users': 0,
            'behavior_match': 0,
            'category_preference': 0,
            'activity_score': 0
        }


# 预测用户行为
@bp.route('/predict', methods=['POST'])
@csrf.exempt
def predict():
    try:
        # 获取请求数据
        request_data = request.get_json()
        user_id = request_data.get('user_id')
        model_type = request_data.get('model')
        prediction_type = request_data.get('prediction_type')

        if not user_id or not model_type:
            return jsonify({'success': False, 'message': '缺少 user_id 或 model 参数'})

        # 从数据库加载数据
        data = load_data_from_db(user_id)

        # 检查目标变量类别
        unique_classes = data['behavior_type'].unique()
        if len(unique_classes) == 1 and prediction_type not in ['churn_risk', 'repurchase']:
            logger.warning(f"目标变量只有一个类别: {unique_classes[0]}，跳过模型训练")
            return jsonify({
                'success': True,
                'model': model_type,
                'prediction_type': prediction_type,
                'metrics': None,
                'recommended_items': recommend_items(user_id, data),
                'prediction_result': None,
                'prediction_prob': None,
                'data_quality': 'low'  # 新增数据质量标识
            })

        # 数据预处理
        preprocess_result = preprocess_data(data, user_id, prediction_type)
        if preprocess_result[-1] != 'high':  # 检查数据质量
            return jsonify({
                'success': True,
                'model': model_type,
                'prediction_type': prediction_type,
                'metrics': None,
                'recommended_items': recommend_items(user_id, data),
                'prediction_result': None,
                'prediction_prob': None,
                'data_quality': preprocess_result[-1]  # 返回实际数据质量
            })
        X_train, X_val, X_test, y_train, y_val, y_test, _ = preprocess_result

        # 检查预处理结果
        if X_train is None:
            return jsonify({
                'success': True,
                'model': model_type,
                'prediction_type': prediction_type,
                'metrics': None,
                'recommended_items': recommend_items(user_id, data),
                'prediction_result': None,
                'prediction_prob': None,
                'data_quality': 'low'
            })

        # 训练模型
        model = train_model(model_type, X_train, y_train)

        # 评估模型
        metrics = evaluate_model(model, X_test, y_test)

        # 预测结果
        prediction_result = None
        if prediction_type in ['churn_risk', 'repurchase']:
            # 对于二分类预测，使用整个数据集进行预测
            X = pd.concat([X_train, X_test])
            y_pred = model.predict(X)
            y_prob = model.predict_proba(X)[:, 1] if hasattr(model, 'predict_proba') else None

            if prediction_type == 'churn_risk':
                prediction_result = bool(y_pred.mean() > 0.5)  # 流失风险预测结果
            elif prediction_type == 'repurchase':
                prediction_result = bool(y_pred.mean() > 0.5)  # 复购预测结果

        # 生成个性化推荐商品
        recommended_items = recommend_items(user_id, data, prediction_type, prediction_result)
        # 获取推荐可信度数据
        trust_factors = get_recommendation_trust_factors(user_id)

        # 返回结果 - 修改返回数据结构
        return jsonify({
            'success': True,
            'model': model_type,
            'prediction_type': prediction_type,
            'metrics': metrics,
            'recommended_items': recommended_items if recommended_items else [],  # 确保返回列表
            'prediction_result': prediction_result,
            'prediction_prob': metrics.get('roc_auc'),  # 使用roc_auc作为概率
            'trust_factors': {
                'similar_users': trust_factors.get('similar_users', 0),
                'behavior_match': trust_factors.get('behavior_match', 0),
                'category_preference': trust_factors.get('category_preference', 0),
                'activity_score': trust_factors.get('activity_score', 0)
            },
            'data_quality': 'high' if X_train is not None else 'low'
        })

    except Exception as e:
        logger.error(f"预测失败: {str(e)}")
        return jsonify({
            'success': False,
            'message': str(e),
            'recommended_items': [],  # 错误时返回空列表
            'trust_factors': {  # 错误时返回默认值
                'similar_users': 0,
                'behavior_match': 0,
                'category_preference': 0,
                'activity_score': 0
            }
        })
