import pandas as pd
import numpy as np
import time
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
import mysql.connector
from mysql.connector import Error
import json


class PredictionModel:
    def __init__(self, data_processor, sample_ratio=0.1):
        self.data_processor = data_processor
        self.data = data_processor.data
        self.sample_ratio = sample_ratio
        self.sample_data = self.data.sample(frac=sample_ratio, random_state=42)
        print(f"使用{sample_ratio * 100:.1f}%的数据进行训练，样本量: {len(self.sample_data)}")
        self.item_features = self._prepare_item_features()
        self.model = None  # 存储训练好的模型
        self.purchase_intention_ranking = None  # 缓存购买意向排名结果

    def _get_db_connection(self):
        """获取数据库连接"""
        try:
            connection = mysql.connector.connect(
                host=self.data_processor.db_config['host'],
                user=self.data_processor.db_config['user'],
                password=self.data_processor.db_config['password'],
                database=self.data_processor.db_config['database'],
                port=self.data_processor.db_config['port']
            )
            return connection
        except Error as e:
            print(f"数据库连接错误: {e}")
            return None

    def _cache_result(self, func_name, result):
        """将结果存入数据库而非文件"""
        connection = self._get_db_connection()
        if not connection:
            return

        try:
            cursor = connection.cursor()

            # 先删除旧数据
            delete_query = "DELETE FROM data_cache WHERE func_name = %s"
            cursor.execute(delete_query, (func_name,))

            if isinstance(result, dict):
                for key, value in result.items():
                    data_type = ""
                    data_str = ""

                    if isinstance(value, pd.DataFrame):
                        data_type = "dataframe"
                        data_str = value.to_json(orient='records')
                    elif isinstance(value, (dict, list)):
                        data_type = "json"
                        data_str = json.dumps(value)

                    if data_type:
                        insert_query = """
                        INSERT INTO data_cache (func_name, key_name, data_type, data)
                        VALUES (%s, %s, %s, %s)
                        """
                        cursor.execute(insert_query, (func_name, key, data_type, data_str))

            elif isinstance(result, pd.DataFrame):
                data_str = result.to_json(orient='records')
                insert_query = """
                INSERT INTO data_cache (func_name, data_type, data)
                VALUES (%s, %s, %s)
                """
                cursor.execute(insert_query, (func_name, "dataframe", data_str))

            connection.commit()

        except Error as e:
            print(f"缓存数据到数据库失败: {e}")
            connection.rollback()
        finally:
            if connection.is_connected():
                cursor.close()
                connection.close()

    def _load_cached_result(self, func_name):
        """从数据库加载缓存结果"""
        connection = self._get_db_connection()
        if not connection:
            return None

        try:
            cursor = connection.cursor(dictionary=True)
            query = "SELECT * FROM data_cache WHERE func_name = %s"
            cursor.execute(query, (func_name,))
            results = cursor.fetchall()

            if not results:
                return None

            # 处理单条记录（非字典类型结果）
            if len(results) == 1 and results[0]['key_name'] is None:
                if results[0]['data_type'] == 'dataframe':
                    return pd.read_json(results[0]['data'])
                elif results[0]['data_type'] == 'json':
                    return json.loads(results[0]['data'])

            # 处理多条记录（字典类型结果）
            result_dict = {}
            for item in results:
                key = item['key_name']
                if item['data_type'] == 'dataframe':
                    result_dict[key] = pd.read_json(item['data'])
                elif item['data_type'] == 'json':
                    result_dict[key] = json.loads(item['data'])

            return result_dict

        except Error as e:
            print(f"从数据库加载缓存失败: {e}")
            return None
        finally:
            if connection.is_connected():
                cursor.close()
                connection.close()

    def _prepare_item_features(self):
        cached_result = self._load_cached_result('item_features')
        if cached_result is not None:
            print("从数据库缓存中加载商品特征...")
            return cached_result

        start_time = time.time()
        print("正在准备商品特征...")

        item_stats = self.sample_data.groupby('item_id').agg(
            pv_count=('behavior_type', lambda x: (x == 'pv').sum()),
            buy_count=('behavior_type', lambda x: (x == 'buy').sum()),
            cart_count=('behavior_type', lambda x: (x == 'cart').sum()),
            fav_count=('behavior_type', lambda x: (x == 'fav').sum()),
            user_count=('user_id', 'nunique'),
            category_id=('category_id', 'first')
        ).reset_index()

        item_stats['转化率'] = item_stats['buy_count'] / (item_stats['pv_count'] + 1)
        item_stats['加购率'] = item_stats['cart_count'] / (item_stats['pv_count'] + 1)
        item_stats['收藏率'] = item_stats['fav_count'] / (item_stats['pv_count'] + 1)

        print(f"商品特征准备完成，耗时: {time.time() - start_time:.2f}秒")
        self._cache_result('item_features', item_stats)  # 保存到数据库缓存
        return item_stats

    def prepare_purchase_intention_data(self):
        cached_result = self._load_cached_result('purchase_intention_features')
        if cached_result is not None:
            print("从数据库缓存中加载用户-商品交互特征...")
            return cached_result

        start_time = time.time()
        print("正在准备用户-商品交互特征...")

        group_cols = ['user_id', 'item_id']
        features = self.sample_data.groupby(group_cols).agg(
            pv_count=('behavior_type', lambda x: (x == 'pv').sum()),
            buy_count=('behavior_type', lambda x: (x == 'buy').sum()),
            cart_count=('behavior_type', lambda x: (x == 'cart').sum()),
            fav_count=('behavior_type', lambda x: (x == 'fav').sum()),
            first_time=('datetime', lambda x: x.min().timestamp()),
            last_time=('datetime', lambda x: x.max().timestamp()),
            interact_count=('datetime', 'count')
        ).reset_index()

        features['time_interval'] = features['last_time'] - features['first_time']
        features['interact_freq'] = features['interact_count'] / (features['time_interval'] + 1)
        features['is_buy'] = features['buy_count'] > 0

        item_features = self.item_features[['item_id', '转化率', '加购率', '收藏率']]
        features = pd.merge(features, item_features, on='item_id', how='left')

        print(f"用户-商品特征准备完成，耗时: {time.time() - start_time:.2f}秒，样本量: {len(features)}")
        features = features.fillna(0)
        self._cache_result('purchase_intention_features', features)  # 保存到数据库缓存
        return features

    def train_purchase_intention_model(self):
        # 如果已有缓存的排名结果，但模型未加载，则先训练模型
        if self.purchase_intention_ranking is not None and self.model is None:
            print("缓存的排名结果存在但模型未加载，重新训练模型...")
            self.purchase_intention_ranking = None  # 重置排名结果，触发完整训练流程

        if self.purchase_intention_ranking is not None:
            print("使用缓存的购买意向排名结果")
            return self.purchase_intention_ranking

        cached_result = self._load_cached_result('purchase_intention_ranking')
        if cached_result is not None:
            # 缓存存在时，确保模型也被训练
            self.purchase_intention_ranking = cached_result
            # 检查模型是否存在，如果不存在则训练
            if self.model is None:
                self._train_model()
            return cached_result

        start_time = time.time()
        print("开始训练购买意向预测模型...")

        features = self.prepare_purchase_intention_data()
        selected_features = [
            'pv_count', 'cart_count', 'fav_count',
            'interact_count', 'time_interval', 'interact_freq',
            '转化率', '加购率', '收藏率'
        ]

        if len(features) > 1000000:
            features = features.sample(frac=0.5, random_state=42)
            print(f"数据量过大，进一步抽样至{len(features)}条记录")

        X = features[selected_features]
        y = features['is_buy']
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

        # 模型选择和训练
        self._train_model(X_train, y_train, X_test, y_test)

        # 评估
        y_prob = self.model.predict_proba(X_test)[:, 1]
        auc = roc_auc_score(y_test, y_prob)
        print(f"模型评估: AUC = {auc:.4f}")

        # 生成带进度条的推荐
        recommend_start = time.time()
        # 使用.copy()避免SettingWithCopyWarning
        features_copy = features.copy()
        features_copy['购买概率'] = self.model.predict_proba(features_copy[selected_features])[:, 1]

        # 为每个用户生成推荐时添加进度条
        print("正在生成用户购买概率排行榜...")
        user_ids = features_copy['user_id'].unique()  # 获取所有用户ID

        def get_top_recommendations(user_id):
            user_data = features_copy[features_copy['user_id'] == user_id].copy()
            top_items = user_data.nlargest(3, '购买概率', keep='all')  # 取Top3
            return top_items[['user_id', 'item_id', '购买概率']]

        with ThreadPoolExecutor() as executor:
            top_recommendations = list(
                tqdm(executor.map(get_top_recommendations, user_ids), desc="生成推荐", unit="用户",
                     total=len(user_ids)))

        # 合并结果
        top_recommendations = pd.concat(top_recommendations, ignore_index=True)

        print(f"推荐生成完成，耗时: {time.time() - recommend_start:.2f}秒")
        print(f"总耗时: {time.time() - start_time:.2f}秒")

        self.purchase_intention_ranking = top_recommendations
        self._cache_result('purchase_intention_ranking', top_recommendations)

        return top_recommendations

    def _train_model(self, X_train=None, y_train=None, X_test=None, y_test=None):
        """单独的模型训练方法，确保模型被正确初始化"""
        # 如果没有提供训练数据，则准备数据
        if X_train is None or y_train is None:
            features = self.prepare_purchase_intention_data()
            selected_features = [
                'pv_count', 'cart_count', 'fav_count',
                'interact_count', 'time_interval', 'interact_freq',
                '转化率', '加购率', '收藏率'
            ]
            X = features[selected_features]
            y = features['is_buy']
            X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

        # 模型选择
        try:
            import lightgbm as lgb
            self.model = lgb.LGBMClassifier(
                n_estimators=50,
                learning_rate=0.1,
                max_depth=3,
                num_leaves=31,
                n_jobs=-1,
                random_state=42,
                verbosity=-1
            )
            print("使用LightGBM模型")
        except ImportError:
            import xgboost as xgb
            self.model = xgb.XGBClassifier(
                n_estimators=50,
                learning_rate=0.1,
                max_depth=3,
                n_jobs=-1,
                random_state=42,
                verbosity=0
            )
            print("使用XGBoost模型")

        # 训练模型
        model_start = time.time()
        self.model.fit(X_train, y_train)
        print(f"模型训练完成，耗时: {time.time() - model_start:.2f}秒")

    def build_recommendation_model(self):
        if self.model is None:
            self.train_purchase_intention_model()
        return self.model  # 返回训练好的模型

    def get_recommendations(self, user_id, num_recs=10):
        # 确保模型已加载
        if self.model is None:
            self.build_recommendation_model()

        features = self.prepare_purchase_intention_data()
        selected_features = [
            'pv_count', 'cart_count', 'fav_count',
            'interact_count', 'time_interval', 'interact_freq',
            '转化率', '加购率', '收藏率'
        ]

        model = self.build_recommendation_model()
        # 使用.copy()创建副本，避免修改原始数据的视图
        user_data = features[features['user_id'] == user_id].copy()
        if user_data.empty:
            return pd.DataFrame(columns=['user_id', 'item_id', '购买概率'])

        # 使用.loc确保修改的是副本而不是视图
        user_data.loc[:, '购买概率'] = model.predict_proba(user_data[selected_features])[:, 1]
        recommendations = user_data.nlargest(num_recs, '购买概率', keep='all')[['user_id', 'item_id', '购买概率']]
        return recommendations
