import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from sklearn.metrics.pairwise import cosine_similarity
import lightgbm as lgb
import tensorflow as tf
from keras import layers, Model
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
import pickle
import warnings
from datetime import datetime, timedelta
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict, Counter
import random
from scipy import stats
import math

warnings.filterwarnings('ignore')


class ImprovedTieredRecommendationSystem:
    """
    改进的分层反马太效应推荐系统
    """

    def __init__(self, sample_ratio=0.2, fast_mode=True):
        # 基础组件
        self.user_encoder = LabelEncoder()
        self.movie_encoder = LabelEncoder()
        self.genre_columns = []
        self.sample_ratio = sample_ratio
        self.fast_mode = fast_mode

        # 分层模型
        self.cold_start_model = None
        self.growth_model = None
        self.mature_model = None
        self.long_tail_model = None

        # 数据和特征
        self.movie_features = None
        self.user_features = None
        self.user_preferences = {}
        self.movie_similarity_matrix = None

        # 分层管理（调整分层标准）
        self.movie_pools = {
            'cold_start': set(),
            'test': set(),
            'growth': set(),
            'mature': set(),
            'long_tail': set()
        }

        # 测试池管理
        self.test_pool_performance = {}
        self.test_pool_exposure = {}

        # 优化的流量分配策略
        self.traffic_allocation = {
            'cold_start': 0.08,  # 增加到8%
            'test': 0.12,  # 增加到12%
            'growth': 0.25,  # 保持25%
            'mature': 0.35,  # 减少到35%
            'long_tail': 0.20  # 增加到20%
        }

        # 强化的多样性控制
        self.diversity_controller = {
            'max_same_genre': 2,  # 减少到2个
            'max_popular_ratio': 0.3,  # 减少到30%
            'min_long_tail_ratio': 0.25,  # 增加到25%
            'similarity_threshold': 0.7,  # 降低到0.7
            'exploration_boost': 1.3  # 探索加成
        }

        # 评估结果存储
        self.evaluation_results = {}
        self.pool_performance = {}
        self.model_performance = {}
        self.anti_matthew_metrics = {}
        self.diversity_metrics = {}
        self.business_metrics = {}

    def load_and_prepare_data(self, file_path):
        """加载并预处理数据"""
        print("=== 加载数据 ===")
        df = pd.read_csv(file_path)

        if self.fast_mode and len(df) > 50000:
            print(f"快速模式：采样 {self.sample_ratio * 100}% 的数据")
            df = df.sample(frac=self.sample_ratio, random_state=42).reset_index(drop=True)

        # 类型列处理
        self.genre_columns = [
            '(no genres listed)', 'Action', 'Adventure', 'Animation', 'Children',
            'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir',
            'Horror', 'IMAX', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
            'Thriller', 'War', 'Western'
        ]

        for col in self.genre_columns:
            if col not in df.columns:
                df[col] = 0

        # ID编码
        df['user_id_encoded'] = self.user_encoder.fit_transform(df['userId'])
        df['movie_id_encoded'] = self.movie_encoder.fit_transform(df['movieId'])

        # 时间特征
        df['datetime'] = pd.to_datetime(df['timestamp'], unit='s')
        df['days_since_first'] = (df['datetime'] - df['datetime'].min()).dt.days

        print(f"数据处理完成：{len(df)} 条记录，{df['userId'].nunique()} 用户，{df['movieId'].nunique()} 电影")

        return df

    def extract_enhanced_features(self, df):
        """提取增强的特征"""
        print("=== 提取增强特征 ===")

        # 电影特征
        movie_stats = df.groupby('movieId').agg({
            'rating': ['count', 'mean', 'std', 'min', 'max'],
            'timestamp': ['min', 'max'],
            'userId': 'nunique'
        }).reset_index()

        movie_stats.columns = ['movieId', 'rating_count', 'avg_rating', 'rating_std',
                               'min_rating', 'max_rating', 'first_rating_time', 'last_rating_time', 'unique_users']
        movie_stats['rating_std'] = movie_stats['rating_std'].fillna(1.0)

        # 增强的电影特征
        current_time = df['timestamp'].max()
        movie_stats['days_since_first_rating'] = (current_time - movie_stats['first_rating_time']) / (24 * 3600)
        movie_stats['rating_velocity'] = movie_stats['rating_count'] / (movie_stats['days_since_first_rating'] + 1)
        movie_stats['rating_range'] = movie_stats['max_rating'] - movie_stats['min_rating']
        movie_stats['user_penetration'] = movie_stats['unique_users'] / df['userId'].nunique()

        # 合并类型信息
        movie_genre_info = df.groupby('movieId')[self.genre_columns].first().reset_index()
        movie_stats = movie_stats.merge(movie_genre_info, on='movieId')
        movie_stats['genre_count'] = movie_stats[self.genre_columns].sum(axis=1)

        # 改进的电影质量评估
        movie_stats['quality_score'] = (
                movie_stats['avg_rating'] * 0.4 +
                np.log1p(movie_stats['rating_count']) * 0.3 +
                (5 - movie_stats['rating_std']) * 0.2 +
                movie_stats['user_penetration'] * 0.1
        )

        # 电影流行度分层
        popularity_quantiles = movie_stats['rating_count'].quantile([0.5, 0.8, 0.95])
        movie_stats['popularity_tier'] = pd.cut(
            movie_stats['rating_count'],
            bins=[-1, popularity_quantiles[0.5], popularity_quantiles[0.8], popularity_quantiles[0.95], np.inf],
            labels=['niche', 'moderate', 'popular', 'viral']
        )

        self.movie_features = movie_stats

        # 用户特征增强
        user_stats = df.groupby('userId').agg({
            'rating': ['count', 'mean', 'std', 'min', 'max'],
            'movieId': 'nunique',
            'timestamp': ['min', 'max']
        }).reset_index()

        user_stats.columns = ['userId', 'rating_count', 'avg_rating', 'rating_std',
                              'min_rating', 'max_rating', 'unique_movies', 'first_time', 'last_time']
        user_stats['rating_std'] = user_stats['rating_std'].fillna(1.0)
        user_stats['active_days'] = (user_stats['last_time'] - user_stats['first_time']) / (24 * 3600)
        user_stats['rating_frequency'] = user_stats['rating_count'] / (user_stats['active_days'] + 1)
        user_stats['rating_range'] = user_stats['max_rating'] - user_stats['min_rating']
        user_stats['exploration_tendency'] = user_stats['unique_movies'] / user_stats['rating_count']

        self.user_features = user_stats

        # 构建电影相似度矩阵（用于多样性控制）
        self._build_movie_similarity_matrix(df)

        print(f"增强特征提取完成：电影特征 {len(self.movie_features)} 个，用户特征 {len(self.user_features)} 个")

    def _build_movie_similarity_matrix(self, df):
        """构建电影相似度矩阵"""
        print("构建电影相似度矩阵...")

        # 基于类型的相似度
        genre_features = self.movie_features[self.genre_columns].values
        genre_similarity = cosine_similarity(genre_features)

        # 基于评分模式的相似度
        movie_user_matrix = df.pivot_table(index='movieId', columns='userId', values='rating', fill_value=0)
        rating_similarity = cosine_similarity(movie_user_matrix.values)

        # 综合相似度
        self.movie_similarity_matrix = 0.7 * genre_similarity + 0.3 * rating_similarity

        print("电影相似度矩阵构建完成")

    def analyze_and_categorize_movies_improved(self, df):
        """改进的电影分池策略"""
        print("=== 改进的电影分池分析 ===")

        for _, movie_row in self.movie_features.iterrows():
            movie_id = movie_row['movieId']
            rating_count = movie_row['rating_count']
            avg_rating = movie_row['avg_rating']
            quality_score = movie_row['quality_score']
            rating_velocity = movie_row['rating_velocity']
            popularity_tier = movie_row['popularity_tier']

            # 改进的分池逻辑
            if rating_count < 5:  # 降低冷启动阈值
                self.movie_pools['cold_start'].add(movie_id)
            elif rating_count < 25:  # 降低测试池阈值
                self.movie_pools['test'].add(movie_id)
                self.test_pool_performance[movie_id] = {
                    'avg_rating': avg_rating,
                    'rating_velocity': rating_velocity,
                    'quality_score': quality_score,
                    'exposure_count': 0,
                    'click_count': 0,
                    'entry_time': datetime.now(),
                    'promotion_score': 0.0
                }
            elif rating_count < 100:  # 降低成长池阈值
                self.movie_pools['growth'].add(movie_id)
            else:
                self.movie_pools['mature'].add(movie_id)

            # 更宽松的长尾池标准
            if (rating_count < 80 and avg_rating >= 3.8) or \
                    (rating_count < 40 and avg_rating >= 4.0) or \
                    (popularity_tier in ['niche', 'moderate'] and quality_score >= 3.8):
                self.movie_pools['long_tail'].add(movie_id)

        # 打印分池结果
        for pool_name, movies in self.movie_pools.items():
            print(f"{pool_name:12}: {len(movies):5} 个电影")

    def analyze_user_preferences_enhanced(self, df):
        """增强的用户偏好分析"""
        print("=== 增强的用户偏好分析 ===")

        for user_id in df['userId'].unique():
            user_data = df[df['userId'] == user_id]
            user_high_rated = user_data[user_data['rating'] >= 4.0]

            if len(user_high_rated) == 0:
                self.user_preferences[user_id] = self._default_user_preference()
                continue

            # 分析用户在各池的偏好
            pool_preferences = {}
            for pool_name, pool_movies in self.movie_pools.items():
                pool_ratings = user_high_rated[user_high_rated['movieId'].isin(pool_movies)]
                pool_preferences[pool_name] = len(pool_ratings) / len(user_high_rated) if len(
                    user_high_rated) > 0 else 0

            # 用户类型标签（更细致的分类）
            user_type = self._classify_user_type_enhanced(user_data, pool_preferences)

            # 偏好类型分析
            user_genres = user_high_rated[self.genre_columns].sum()
            preferred_genres = user_genres.sort_values(ascending=False).head(3).index.tolist()

            # 增强的探索倾向计算
            exploration_score = (
                    pool_preferences.get('test', 0) * 1.5 +
                    pool_preferences.get('long_tail', 0) * 1.2 +
                    pool_preferences.get('cold_start', 0) * 1.0
            )

            # 多样性偏好
            user_movie_genres = set()
            for _, rating_row in user_high_rated.iterrows():
                movie_genres = [g for g in self.genre_columns if rating_row.get(g, 0) == 1]
                user_movie_genres.update(movie_genres)

            diversity_preference = len(user_movie_genres) / len(self.genre_columns)

            self.user_preferences[user_id] = {
                'pool_preferences': pool_preferences,
                'preferred_genres': preferred_genres,
                'user_type': user_type,
                'exploration_score': min(1.0, exploration_score),
                'diversity_preference': diversity_preference,
                'avg_rating': user_high_rated['rating'].mean(),
                'rating_count': len(user_data),
                'quality_sensitivity': user_data['rating'].std(),
                'novelty_seeking': exploration_score * diversity_preference
            }

    def _classify_user_type_enhanced(self, user_data, pool_preferences):
        """增强的用户类型分类"""
        mature_pref = pool_preferences.get('mature', 0)
        long_tail_pref = pool_preferences.get('long_tail', 0)
        test_pref = pool_preferences.get('test', 0)
        cold_start_pref = pool_preferences.get('cold_start', 0)

        exploration_score = test_pref + long_tail_pref + cold_start_pref

        if mature_pref > 0.7:
            return 'mainstream'
        elif exploration_score > 0.4:
            return 'explorer'
        elif test_pref > 0.15:
            return 'early_adopter'
        elif long_tail_pref > 0.2:
            return 'niche_lover'
        else:
            return 'balanced'

    def _default_user_preference(self):
        """默认用户偏好"""
        return {
            'pool_preferences': {
                'cold_start': 0.08,
                'test': 0.12,
                'growth': 0.25,
                'mature': 0.35,
                'long_tail': 0.20
            },
            'preferred_genres': ['Drama', 'Comedy', 'Action'],
            'user_type': 'balanced',
            'exploration_score': 0.4,
            'diversity_preference': 0.5,
            'avg_rating': 3.5,
            'rating_count': 0,
            'quality_sensitivity': 1.0,
            'novelty_seeking': 0.3
        }

    # ==================== 改进的模型架构 ====================

    def build_improved_neural_model(self, user_vocab_size, movie_vocab_size, feature_dim,
                                    embedding_dim=64, model_type='enhanced'):
        """构建改进的神经网络模型"""

        # 用户塔
        user_input = layers.Input(shape=(), name='user_id')
        user_emb = layers.Embedding(user_vocab_size, embedding_dim,
                                    embeddings_regularizer=tf.keras.regularizers.l2(0.001))(user_input)
        user_vec = layers.Reshape((embedding_dim,))(user_emb)

        # 电影塔
        movie_input = layers.Input(shape=(), name='movie_id')
        movie_feature_input = layers.Input(shape=(feature_dim,), name='movie_features')

        movie_emb = layers.Embedding(movie_vocab_size, embedding_dim,
                                     embeddings_regularizer=tf.keras.regularizers.l2(0.001))(movie_input)
        movie_vec = layers.Reshape((embedding_dim,))(movie_emb)

        # 特征预处理
        movie_features_normalized = layers.BatchNormalization()(movie_feature_input)
        movie_features_dense = layers.Dense(embedding_dim // 2, activation='relu')(movie_features_normalized)

        # 电影向量合并
        movie_concat = layers.Concatenate()([movie_vec, movie_features_dense])

        if model_type == 'lightweight':
            # 轻量级模型（冷启动用）
            user_dense = layers.Dense(64, activation='relu')(user_vec)
            user_dense = layers.Dropout(0.2)(user_dense)
            user_tower = layers.Dense(32, activation='relu')(user_dense)

            movie_dense = layers.Dense(64, activation='relu')(movie_concat)
            movie_dense = layers.Dropout(0.2)(movie_dense)
            movie_tower = layers.Dense(32, activation='relu')(movie_dense)

        else:
            # 增强模型（成长期和成熟期用）
            # 用户塔深度网络
            user_dense = layers.Dense(128, activation='relu')(user_vec)
            user_dense = layers.BatchNormalization()(user_dense)
            user_dense = layers.Dropout(0.3)(user_dense)
            user_dense = layers.Dense(64, activation='relu')(user_dense)
            user_tower = layers.Dense(32, activation='relu')(user_dense)

            # 电影塔深度网络
            movie_dense = layers.Dense(128, activation='relu')(movie_concat)
            movie_dense = layers.BatchNormalization()(movie_dense)
            movie_dense = layers.Dropout(0.3)(movie_dense)
            movie_dense = layers.Dense(64, activation='relu')(movie_dense)
            movie_tower = layers.Dense(32, activation='relu')(movie_dense)

        # 交互层
        # 点积交互
        dot_product = layers.Dot(axes=-1)([user_tower, movie_tower])

        # 元素乘积交互
        element_wise = layers.Multiply()([user_tower, movie_tower])

        # 拼接交互
        concat_interaction = layers.Concatenate()([user_tower, movie_tower, element_wise])

        # 最终预测层
        if model_type == 'lightweight':
            final_layer = layers.Dense(16, activation='relu')(concat_interaction)
        else:
            final_layer = layers.Dense(64, activation='relu')(concat_interaction)
            final_layer = layers.Dropout(0.2)(final_layer)
            final_layer = layers.Dense(32, activation='relu')(final_layer)

        # 添加点积作为额外特征
        final_input = layers.Concatenate()([final_layer, layers.Reshape((1,))(dot_product)])
        output = layers.Dense(1, activation='sigmoid')(final_input)

        model = Model(inputs=[user_input, movie_input, movie_feature_input], outputs=output)

        # 使用不同的优化器和学习率
        if model_type == 'lightweight':
            optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        else:
            optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)

        model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['AUC', 'Precision', 'Recall'])

        return model

    def build_improved_lgb_model(self):
        """构建改进的LightGBM模型"""
        params = {
            'objective': 'binary',
            'metric': 'auc',
            'boosting_type': 'gbdt',
            'num_leaves': 128,  # 增加复杂度
            'learning_rate': 0.03,  # 降低学习率
            'feature_fraction': 0.8,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'lambda_l1': 0.1,
            'lambda_l2': 0.1,
            'min_data_in_leaf': 10,  # 减少最小叶子节点数据
            'max_depth': 8,  # 增加深度
            'verbose': -1,
            'seed': 42
        }
        return params

    def prepare_enhanced_training_data(self, df, pool_name):
        """准备增强的训练数据"""
        print(f"准备 {pool_name} 池增强训练数据...")

        pool_movies = self.movie_pools[pool_name]
        if not pool_movies:
            return None, None, None

        # 过滤该池子的数据
        pool_df = df[df['movieId'].isin(pool_movies)]
        if len(pool_df) < 50:  # 降低最小数据要求
            print(f"{pool_name} 池数据量不足，跳过训练")
            return None, None, None

        # 更平衡的正负样本
        pos_samples = pool_df[pool_df['rating'] >= 4.0]
        mid_samples = pool_df[(pool_df['rating'] >= 3.0) & (pool_df['rating'] < 4.0)]

        # 增加样本数量
        sample_size = min(5000, len(pos_samples))
        if sample_size < len(pos_samples):
            pos_samples = pos_samples.sample(n=sample_size, random_state=42)

        # 负样本生成改进
        neg_samples = []
        users = pool_df['userId'].unique()
        all_movies = list(pool_movies)
        user_movie_pairs = set(zip(pool_df['userId'], pool_df['movieId']))

        # 生成更多负样本
        neg_count = len(pos_samples) * 2  # 1:2的正负比例
        attempts = 0
        max_attempts = neg_count * 10

        while len(neg_samples) < neg_count and attempts < max_attempts:
            user = np.random.choice(users)
            movie = np.random.choice(all_movies)
            if (user, movie) not in user_movie_pairs:
                neg_samples.append({'userId': user, 'movieId': movie, 'rating': 0})
            attempts += 1

        neg_df = pd.DataFrame(neg_samples[:neg_count])

        # 合并数据
        pos_samples_clean = pos_samples[['userId', 'movieId']].copy()
        pos_samples_clean['label'] = 1
        neg_df['label'] = 0
        neg_samples_clean = neg_df[['userId', 'movieId', 'label']]

        train_data = pd.concat([pos_samples_clean, neg_samples_clean], ignore_index=True)

        # 编码
        train_data['user_id_encoded'] = self.user_encoder.transform(train_data['userId'])
        train_data['movie_id_encoded'] = self.movie_encoder.transform(train_data['movieId'])

        # 增强特征
        movie_features_subset = self.movie_features[self.movie_features['movieId'].isin(pool_movies)]
        feature_cols = ['avg_rating', 'rating_count', 'quality_score', 'rating_velocity',
                        'genre_count', 'user_penetration', 'rating_range']
        train_data = train_data.merge(
            movie_features_subset[['movieId'] + feature_cols],
            on='movieId',
            how='left'
        ).fillna(0)

        X = train_data[['user_id_encoded', 'movie_id_encoded'] + feature_cols]
        y = train_data['label']

        return X, y, feature_cols

    def train_improved_models(self, df):
        """训练改进的模型"""
        print("=== 训练改进的分层模型 ===")

        vocab_sizes = {
            'user': df['userId'].nunique() + 1,
            'movie': df['movieId'].nunique() + 1
        }

        # 1. 训练冷启动轻量模型
        X_cold, y_cold, feature_cols = self.prepare_enhanced_training_data(df, 'cold_start')
        if X_cold is not None:
            print("训练改进的冷启动模型...")
            self.cold_start_model = self.build_improved_neural_model(
                vocab_sizes['user'], vocab_sizes['movie'], len(feature_cols),
                embedding_dim=32, model_type='lightweight'
            )

            X_train, X_test, y_train, y_test = train_test_split(X_cold, y_cold, test_size=0.2, random_state=42)

            train_input = [
                X_train['user_id_encoded'].values,
                X_train['movie_id_encoded'].values,
                X_train[feature_cols].values
            ]
            test_input = [
                X_test['user_id_encoded'].values,
                X_test['movie_id_encoded'].values,
                X_test[feature_cols].values
            ]

            # 添加callbacks
            callbacks = [
                EarlyStopping(monitor='val_auc', patience=5, restore_best_weights=True, mode='max'),
                ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=0.0001)
            ]

            history = self.cold_start_model.fit(
                train_input, y_train,
                validation_data=(test_input, y_test),
                epochs=20, batch_size=256, verbose=1,
                callbacks=callbacks
            )

            self._evaluate_neural_model(self.cold_start_model, test_input, y_test, 'cold_start')

        # 2. 训练成长期模型
        X_growth, y_growth, _ = self.prepare_enhanced_training_data(df, 'growth')
        if X_growth is not None:
            print("训练改进的成长期模型...")
            self.growth_model = self.build_improved_neural_model(
                vocab_sizes['user'], vocab_sizes['movie'], len(feature_cols),
                embedding_dim=64, model_type='enhanced'
            )

            X_train, X_test, y_train, y_test = train_test_split(X_growth, y_growth, test_size=0.2, random_state=42)

            train_input = [
                X_train['user_id_encoded'].values,
                X_train['movie_id_encoded'].values,
                X_train[feature_cols].values
            ]
            test_input = [
                X_test['user_id_encoded'].values,
                X_test['movie_id_encoded'].values,
                X_test[feature_cols].values
            ]

            callbacks = [
                EarlyStopping(monitor='val_auc', patience=8, restore_best_weights=True, mode='max'),
                ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4, min_lr=0.0001)
            ]

            history = self.growth_model.fit(
                train_input, y_train,
                validation_data=(test_input, y_test),
                epochs=30, batch_size=512, verbose=1,
                callbacks=callbacks
            )

            self._evaluate_neural_model(self.growth_model, test_input, y_test, 'growth')

        # 3. 成熟期模型
        self.mature_model = self.growth_model

        # 4. 训练长尾保护模型
        X_longtail, y_longtail, _ = self.prepare_enhanced_training_data(df, 'long_tail')
        if X_longtail is not None:
            print("训练改进的长尾保护模型...")

            # 为长尾样本增加权重
            sample_weights = np.where(y_longtail == 1, 2.0, 1.0)  # 增加权重

            X_train, X_test, y_train, y_test, w_train, w_test = train_test_split(
                X_longtail[feature_cols], y_longtail, sample_weights, test_size=0.2, random_state=42
            )

            lgb_params = self.build_improved_lgb_model()
            train_data = lgb.Dataset(X_train, label=y_train, weight=w_train)
            val_data = lgb.Dataset(X_test, label=y_test, weight=w_test)

            self.long_tail_model = lgb.train(
                lgb_params, train_data, valid_sets=[val_data],
                num_boost_round=200, callbacks=[lgb.early_stopping(15), lgb.log_evaluation(20)]
            )

            self._evaluate_lgb_model(self.long_tail_model, X_test, y_test, 'long_tail')

        print("改进的分层模型训练完成")

    # ==================== 改进的候选生成和排序 ====================

    def enhanced_candidate_generation(self, user_id, total_candidates=500):  # 增加候选数量
        """增强的候选生成"""
        candidates_by_pool = {}
        user_pref = self.user_preferences.get(user_id, self._default_user_preference())

        for pool_name, pool_movies in self.movie_pools.items():
            if not pool_movies:
                continue

            # 基础配额
            base_quota = int(total_candidates * self.traffic_allocation[pool_name])

            # 根据用户偏好调整
            user_pool_pref = user_pref['pool_preferences'].get(pool_name, self.traffic_allocation[pool_name])
            adjustment_factor = (user_pool_pref / self.traffic_allocation[pool_name]) * 0.5 + 0.75
            adjusted_quota = int(base_quota * adjustment_factor)

            # 为探索型用户增加长尾和测试池配额
            if user_pref['user_type'] in ['explorer', 'early_adopter', 'niche_lover']:
                if pool_name in ['long_tail', 'test', 'cold_start']:
                    adjusted_quota = int(adjusted_quota * 1.3)
                elif pool_name == 'mature':
                    adjusted_quota = int(adjusted_quota * 0.8)

            pool_candidates = self._generate_enhanced_pool_candidates(
                user_id, pool_name, pool_movies, adjusted_quota, user_pref
            )
            candidates_by_pool[pool_name] = pool_candidates

        all_candidates = []
        for pool_candidates in candidates_by_pool.values():
            all_candidates.extend(pool_candidates)

        return all_candidates, candidates_by_pool

    def _generate_enhanced_pool_candidates(self, user_id, pool_name, pool_movies, quota, user_pref):
        """增强的池候选生成"""
        pool_movies_list = list(pool_movies)
        candidates = []

        if pool_name == 'long_tail':
            # 长尾池：多样化策略
            candidates = self._select_diversified_long_tail(pool_movies_list, quota, user_pref)

        elif pool_name == 'test':
            # 测试池：潜力挖掘
            candidates = self._select_potential_test_movies(pool_movies_list, quota, user_pref)

        elif pool_name == 'cold_start':
            # 冷启动池：高质量优先
            candidates = self._select_quality_cold_start(pool_movies_list, quota, user_pref)

        else:
            # 成长池和成熟池：平衡策略
            candidates = self._select_balanced_candidates(pool_movies_list, quota, user_pref)

        return candidates

    def _select_diversified_long_tail(self, movies, quota, user_pref):
        """选择多样化的长尾电影"""
        if not movies:
            return []

        candidates = []
        preferred_genres = user_pref.get('preferred_genres', [])

        # 30%来自用户偏好类型
        genre_quota = int(quota * 0.3)
        quality_quota = int(quota * 0.4)
        random_quota = quota - genre_quota - quality_quota

        # 偏好类型长尾
        genre_candidates = []
        for movie_id in movies:
            movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
            if not movie_data.empty:
                movie_row = movie_data.iloc[0]
                movie_genres = [g for g in self.genre_columns if movie_row.get(g, 0) == 1]
                if any(genre in preferred_genres for genre in movie_genres):
                    genre_candidates.append((movie_id, movie_row['quality_score']))

        genre_candidates.sort(key=lambda x: x[1], reverse=True)
        candidates.extend([m for m, _ in genre_candidates[:genre_quota]])

        # 高质量长尾
        remaining_movies = [m for m in movies if m not in candidates]
        quality_candidates = []
        for movie_id in remaining_movies:
            movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
            if not movie_data.empty:
                quality_score = movie_data.iloc[0]['quality_score']
                quality_candidates.append((movie_id, quality_score))

        quality_candidates.sort(key=lambda x: x[1], reverse=True)
        candidates.extend([m for m, _ in quality_candidates[:quality_quota]])

        # 随机探索
        remaining_movies = [m for m in movies if m not in candidates]
        if remaining_movies and random_quota > 0:
            random_selected = random.sample(remaining_movies, min(random_quota, len(remaining_movies)))
            candidates.extend(random_selected)

        return candidates[:quota]

    def _select_potential_test_movies(self, movies, quota, user_pref):
        """选择有潜力的测试电影"""
        candidates_with_score = []

        for movie_id in movies:
            if movie_id in self.test_pool_performance:
                perf = self.test_pool_performance[movie_id]
                # 计算潜力分数
                potential_score = (
                        perf['avg_rating'] * 0.4 +
                        perf['quality_score'] * 0.3 +
                        min(perf['rating_velocity'] * 10, 1.0) * 0.3
                )
                candidates_with_score.append((movie_id, potential_score))

        # 添加其他测试池电影
        for movie_id in movies:
            if movie_id not in [c[0] for c in candidates_with_score]:
                movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
                if not movie_data.empty:
                    quality_score = movie_data.iloc[0]['quality_score']
                    candidates_with_score.append((movie_id, quality_score))

        candidates_with_score.sort(key=lambda x: x[1], reverse=True)
        return [movie_id for movie_id, _ in candidates_with_score[:quota]]

    def _select_quality_cold_start(self, movies, quota, user_pref):
        """选择高质量冷启动电影"""
        candidates_with_quality = []

        for movie_id in movies:
            movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
            if not movie_data.empty:
                movie_row = movie_data.iloc[0]
                # 冷启动电影的质量评估
                quality_score = (
                        movie_row['avg_rating'] * 0.5 +
                        movie_row['quality_score'] * 0.3 +
                        movie_row['genre_count'] * 0.2  # 类型丰富度
                )
                candidates_with_quality.append((movie_id, quality_score))

        candidates_with_quality.sort(key=lambda x: x[1], reverse=True)
        return [movie_id for movie_id, _ in candidates_with_quality[:quota]]

    def _select_balanced_candidates(self, movies, quota, user_pref):
        """选择平衡的候选"""
        # 简单的随机选择，可以根据需要进一步优化
        return random.sample(movies, min(quota, len(movies)))

    def enhanced_diversify_recommendations(self, candidates_with_scores, user_id, target_count=10):
        """增强的多样性控制"""
        user_pref = self.user_preferences.get(user_id, self._default_user_preference())

        # 按池子分组
        pool_candidates = {pool: [] for pool in self.movie_pools.keys()}
        for movie_id, score in candidates_with_scores:
            for pool_name, pool_movies in self.movie_pools.items():
                if movie_id in pool_movies:
                    pool_candidates[pool_name].append((movie_id, score))
                    break

        final_recommendations = []
        selected_genres = Counter()

        # 强制流量分配
        for pool_name, ratio in self.traffic_allocation.items():
            pool_quota = max(1, int(target_count * ratio))

            # 为探索型用户调整配额
            if user_pref['user_type'] in ['explorer', 'early_adopter']:
                if pool_name in ['long_tail', 'test']:
                    pool_quota = min(target_count - len(final_recommendations), int(pool_quota * 1.2))
                elif pool_name == 'mature':
                    pool_quota = max(1, int(pool_quota * 0.8))

            pool_items = sorted(pool_candidates[pool_name], key=lambda x: x[1], reverse=True)
            selected_count = 0

            for movie_id, score in pool_items:
                if selected_count >= pool_quota or len(final_recommendations) >= target_count:
                    break

                # 增强的多样性检查
                if self._enhanced_diversity_check(movie_id, final_recommendations, selected_genres, user_pref):
                    final_recommendations.append((movie_id, score, pool_name))
                    selected_count += 1

                    # 更新类型计数
                    movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
                    if not movie_data.empty:
                        movie_row = movie_data.iloc[0]
                        for genre in self.genre_columns:
                            if movie_row.get(genre, 0) == 1:
                                selected_genres[genre] += 1

        # 填充剩余位置
        remaining_slots = target_count - len(final_recommendations)
        if remaining_slots > 0:
            used_movies = {item[0] for item in final_recommendations}
            all_remaining = [(m, s) for m, s in candidates_with_scores if m not in used_movies]
            all_remaining.sort(key=lambda x: x[1], reverse=True)

            for movie_id, score in all_remaining:
                if len(final_recommendations) >= target_count:
                    break
                if self._enhanced_diversity_check(movie_id, final_recommendations, selected_genres, user_pref):
                    pool_name = self._get_movie_pool(movie_id)
                    final_recommendations.append((movie_id, score, pool_name))

        return final_recommendations[:target_count]

    def _enhanced_diversity_check(self, movie_id, current_recommendations, selected_genres, user_pref):
        """增强的多样性检查"""
        movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
        if movie_data.empty:
            return True

        movie_row = movie_data.iloc[0]
        movie_genres = [col for col in self.genre_columns if movie_row.get(col, 0) == 1]

        # 类型多样性检查
        max_same_genre = self.diversity_controller['max_same_genre']
        if user_pref['diversity_preference'] > 0.7:  # 高多样性偏好用户
            max_same_genre = max(1, max_same_genre - 1)

        for genre in movie_genres:
            if selected_genres[genre] >= max_same_genre:
                return False

        # 相似度检查
        current_movie_ids = [rec[0] for rec in current_recommendations]
        if current_movie_ids:
            try:
                movie_idx = list(self.movie_features['movieId']).index(movie_id)
                for current_movie_id in current_movie_ids[-3:]:  # 只检查最近的3个
                    current_idx = list(self.movie_features['movieId']).index(current_movie_id)
                    if (hasattr(self, 'movie_similarity_matrix') and
                            self.movie_similarity_matrix[movie_idx][current_idx] > self.diversity_controller[
                                'similarity_threshold']):
                        return False
            except (ValueError, IndexError):
                pass  # 如果找不到索引，跳过相似度检查

        return True

    def _get_movie_pool(self, movie_id):
        """获取电影所属池子"""
        for pool_name, pool_movies in self.movie_pools.items():
            if movie_id in pool_movies:
                return pool_name
        return 'unknown'

    # ==================== 改进的推荐生成 ====================

    def improved_recommend(self, user_id, top_k=10):
        """改进的推荐生成"""
        # 1. 增强候选生成
        all_candidates, candidates_by_pool = self.enhanced_candidate_generation(user_id, total_candidates=500)

        if not all_candidates:
            return []

        # 2. 分层排序
        candidates_with_scores = self.tiered_ranking(user_id, candidates_by_pool)

        # 3. 增强多样性控制
        final_recommendations = self.enhanced_diversify_recommendations(candidates_with_scores, user_id, top_k)

        return [movie_id for movie_id, _, _ in final_recommendations]

    def tiered_ranking(self, user_id, candidates_by_pool):
        """分层排序"""
        try:
            user_encoded = self.user_encoder.transform([user_id])[0]
        except:
            user_encoded = 0

        all_scores = []

        for pool_name, pool_candidates in candidates_by_pool.items():
            if not pool_candidates:
                continue

            # 选择对应的模型
            if pool_name == 'cold_start' and self.cold_start_model:
                scores = self._score_with_neural_model(user_encoded, pool_candidates, self.cold_start_model)
            elif pool_name == 'growth' and self.growth_model:
                scores = self._score_with_neural_model(user_encoded, pool_candidates, self.growth_model)
            elif pool_name == 'mature' and self.mature_model:
                scores = self._score_with_neural_model(user_encoded, pool_candidates, self.mature_model)
            elif pool_name == 'long_tail' and self.long_tail_model:
                scores = self._score_with_lgb_model(user_id, pool_candidates, self.long_tail_model)
            else:
                scores = [0.5] * len(pool_candidates)

            # 应用池子特定的分数调整
            adjusted_scores = self._apply_enhanced_score_adjustments(
                user_id, pool_candidates, scores, pool_name
            )

            for movie_id, score in zip(pool_candidates, adjusted_scores):
                all_scores.append((movie_id, score))

        return all_scores

    def _apply_enhanced_score_adjustments(self, user_id, candidates, scores, pool_name):
        """增强的分数调整"""
        user_pref = self.user_preferences.get(user_id, self._default_user_preference())
        adjusted_scores = []

        for movie_id, score in zip(candidates, scores):
            # 基础调整
            if pool_name == 'test':
                score = self.gradual_traffic_expansion(movie_id, score)
            elif pool_name == 'long_tail':
                score = min(1.0, score * self.diversity_controller['exploration_boost'])
            elif pool_name == 'cold_start':
                score = min(1.0, score * 1.1)

            # 个性化调整
            if user_pref['user_type'] == 'explorer':
                if pool_name in ['long_tail', 'test', 'cold_start']:
                    score = min(1.0, score * 1.15)  # 探索者给予探索性内容更高分数
            elif user_pref['user_type'] == 'mainstream':
                if pool_name == 'mature':
                    score = min(1.0, score * 1.1)  # 主流用户偏好成熟内容

            # 质量调整
            movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
            if not movie_data.empty:
                quality_score = movie_data.iloc[0]['quality_score']
                if quality_score > 4.5:  # 高质量内容
                    score = min(1.0, score * 1.05)

            adjusted_scores.append(score)

        return adjusted_scores

    def _score_with_neural_model(self, user_encoded, candidates, model):
        """使用神经网络模型打分"""
        if not candidates:
            return []

        try:
            movie_features_list = []
            valid_candidates = []

            for movie_id in candidates:
                try:
                    movie_encoded = self.movie_encoder.transform([movie_id])[0]
                    movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]

                    if not movie_data.empty:
                        movie_row = movie_data.iloc[0]
                        feature_cols = ['avg_rating', 'rating_count', 'quality_score', 'rating_velocity',
                                        'genre_count', 'user_penetration', 'rating_range']
                        features = [movie_row.get(col, 0) for col in feature_cols]
                        movie_features_list.append(features)
                        valid_candidates.append(movie_encoded)
                except:
                    continue

            if not valid_candidates:
                return [0.5] * len(candidates)

            user_ids = np.array([user_encoded] * len(valid_candidates))
            movie_ids = np.array(valid_candidates)
            movie_features = np.array(movie_features_list)

            scores = model.predict([user_ids, movie_ids, movie_features], verbose=0)
            return scores.flatten().tolist()

        except Exception as e:
            print(f"神经网络打分出错: {e}")
            return [0.5] * len(candidates)

    def _score_with_lgb_model(self, user_id, candidates, model):
        """使用LightGBM模型打分"""
        if not candidates:
            return []

        try:
            features_list = []
            for movie_id in candidates:
                movie_data = self.movie_features[self.movie_features['movieId'] == movie_id]
                if not movie_data.empty:
                    movie_row = movie_data.iloc[0]
                    feature_cols = ['avg_rating', 'rating_count', 'quality_score', 'rating_velocity',
                                    'genre_count', 'user_penetration', 'rating_range']
                    features = [movie_row.get(col, 0) for col in feature_cols]
                    features_list.append(features)
                else:
                    features_list.append([3.5, 10, 3.5, 0.1, 2, 0.01, 1.0])

            scores = model.predict(np.array(features_list))
            return scores.tolist()

        except Exception as e:
            print(f"LightGBM打分出错: {e}")
            return [0.5] * len(candidates)

    # ==================== 辅助方法 ====================

    def gradual_traffic_expansion(self, movie_id, base_score):
        """渐进式流量扩展"""
        if movie_id in self.test_pool_performance:
            perf = self.test_pool_performance[movie_id]

            if perf['exposure_count'] < 10:
                traffic_weight = 0.2  # 提高初始权重
            elif perf.get('ctr', 0) > 0.1:
                traffic_weight = min(2.0, 1.0 + perf['ctr'] * 15)  # 增强表现好的电影
            else:
                traffic_weight = max(0.2, 1.0 - (50 - perf['exposure_count']) * 0.005)

            return base_score * traffic_weight

        return base_score

    def update_test_pool_performance(self, movie_id, user_interaction):
        """更新测试池电影表现"""
        if movie_id in self.test_pool_performance:
            perf = self.test_pool_performance[movie_id]
            perf['exposure_count'] += 1

            if user_interaction.get('clicked', False):
                perf['click_count'] += 1

            perf['ctr'] = perf['click_count'] / max(perf['exposure_count'], 1)

            # 更宽松的晋级条件
            if perf['exposure_count'] >= 30:  # 降低曝光要求
                if perf['ctr'] >= 0.03 and perf['avg_rating'] >= 3.8:  # 降低要求
                    self.movie_pools['test'].discard(movie_id)
                    self.movie_pools['growth'].add(movie_id)
                    print(f"电影 {movie_id} 从测试池晋级到成长池")
                elif perf['exposure_count'] >= 80 and perf['ctr'] < 0.01:  # 更宽松的降级条件
                    self.movie_pools['test'].discard(movie_id)
                    self.movie_pools['cold_start'].add(movie_id)

    # ==================== 评估方法（使用原有的评估框架） ====================

    def _evaluate_neural_model(self, model, X_test, y_test, model_name):
        """评估神经网络模型"""
        print(f"\n=== 评估改进的 {model_name} 模型 ===")

        y_pred_proba = model.predict(X_test, verbose=0).flatten()
        y_pred = (y_pred_proba > 0.5).astype(int)

        accuracy = accuracy_score(y_test, y_pred)
        precision = precision_score(y_test, y_pred, zero_division=0)
        recall = recall_score(y_test, y_pred, zero_division=0)
        f1 = f1_score(y_test, y_pred, zero_division=0)
        auc = roc_auc_score(y_test, y_pred_proba)

        ndcg_5 = self._calculate_ndcg(y_test, y_pred_proba, k=5)
        ndcg_10 = self._calculate_ndcg(y_test, y_pred_proba, k=10)

        self.model_performance[model_name] = {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1_score': f1,
            'auc': auc,
            'ndcg@5': ndcg_5,
            'ndcg@10': ndcg_10,
            'model_type': 'improved_neural_network'
        }

        print(f"准确率: {accuracy:.4f}")
        print(f"精确率: {precision:.4f}")
        print(f"召回率: {recall:.4f}")
        print(f"F1分数: {f1:.4f}")
        print(f"AUC: {auc:.4f}")
        print(f"NDCG@5: {ndcg_5:.4f}")
        print(f"NDCG@10: {ndcg_10:.4f}")

    def _evaluate_lgb_model(self, model, X_test, y_test, model_name):
        """评估LightGBM模型"""
        print(f"\n=== 评估改进的 {model_name} 模型 ===")

        y_pred_proba = model.predict(X_test)
        y_pred = (y_pred_proba > 0.5).astype(int)

        accuracy = accuracy_score(y_test, y_pred)
        precision = precision_score(y_test, y_pred, zero_division=0)
        recall = recall_score(y_test, y_pred, zero_division=0)
        f1 = f1_score(y_test, y_pred, zero_division=0)
        auc = roc_auc_score(y_test, y_pred_proba)

        ndcg_5 = self._calculate_ndcg(y_test, y_pred_proba, k=5)
        ndcg_10 = self._calculate_ndcg(y_test, y_pred_proba, k=10)

        feature_importance = model.feature_importance()

        self.model_performance[model_name] = {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1_score': f1,
            'auc': auc,
            'ndcg@5': ndcg_5,
            'ndcg@10': ndcg_10,
            'feature_importance': feature_importance,
            'model_type': 'improved_lightgbm'
        }

        print(f"准确率: {accuracy:.4f}")
        print(f"精确率: {precision:.4f}")
        print(f"召回率: {recall:.4f}")
        print(f"F1分数: {f1:.4f}")
        print(f"AUC: {auc:.4f}")
        print(f"NDCG@5: {ndcg_5:.4f}")
        print(f"NDCG@10: {ndcg_10:.4f}")

    def _calculate_ndcg(self, y_true, y_score, k=10):
        """计算NDCG@K"""

        def dcg_at_k(r, k):
            r = np.asfarray(r)[:k]
            if r.size:
                return np.sum(r / np.log2(np.arange(2, r.size + 2)))
            return 0.

        y_true = np.array(y_true)
        y_score = np.array(y_score)

        if len(y_true) == 0 or np.sum(y_true) == 0:
            return 0.0

        order = np.argsort(y_score)[::-1]
        y_true_sorted = y_true[order[:k]]

        ideal_sorted = np.sort(y_true)[::-1][:k]
        dcg_max = dcg_at_k(ideal_sorted, k)

        if dcg_max == 0:
            return 0.0

        actual_dcg = dcg_at_k(y_true_sorted, k)
        ndcg = actual_dcg / dcg_max

        return max(0.0, min(1.0, ndcg))

    # ==================== 完整训练流程 ====================

    def train_improved_system(self, file_path):
        """训练改进的系统"""
        start_time = datetime.now()
        print("=== 开始训练改进的分层推荐系统 ===")

        # 1. 数据加载和处理
        df = self.load_and_prepare_data(file_path)

        # 2. 增强特征提取
        self.extract_enhanced_features(df)

        # 3. 改进电影分池
        self.analyze_and_categorize_movies_improved(df)

        # 4. 增强用户偏好分析
        self.analyze_user_preferences_enhanced(df)

        # 5. 训练改进模型
        self.train_improved_models(df)

        total_time = datetime.now() - start_time
        print(f"\n=== 改进系统训练完成，总耗时: {total_time} ===")

        return df

    def get_movie_info(self, movie_id, df):
        """获取电影详细信息（包含长尾质量信息）"""
        movie_data = df[df['movieId'] == movie_id]
        if movie_data.empty:
            return {
                'movieId': movie_id,
                'genres': [],
                'avg_rating': 0,
                'rating_count': 0,
                'tier': 'unknown',
                'long_tail_quality': 'none',
                'pool': 'unknown'
            }

        movie_row = movie_data.iloc[0]
        genres = [genre for genre in self.genre_columns if movie_row.get(genre, 0) == 1]

        # 判断电影层级
        if hasattr(self, 'hot_movies') and movie_id in self.hot_movies:
            tier = 'hot'
            pool = 'mature'  # 热门电影通常在成熟池
            long_tail_quality = 'none'
        elif hasattr(self, 'mid_movies') and movie_id in self.mid_movies:
            tier = 'mid'
            pool = 'growth'  # 中等电影通常在成长池
            long_tail_quality = 'none'
        elif hasattr(self, 'tail_movies') and movie_id in self.tail_movies:
            tier = 'tail'
            pool = 'long_tail'
            # 判断长尾质量
            if hasattr(self, 'long_tail_repository'):
                if movie_id in self.long_tail_repository.get('high_quality', []):
                    long_tail_quality = 'high'
                elif movie_id in self.long_tail_repository.get('medium_quality', []):
                    long_tail_quality = 'medium'
                elif movie_id in self.long_tail_repository.get('discovery', []):
                    long_tail_quality = 'discovery'
                else:
                    long_tail_quality = 'unknown'
            else:
                long_tail_quality = 'unknown'
        else:
            tier = 'unknown'
            pool = 'unknown'
            long_tail_quality = 'none'

        # 检查具体池子
        if hasattr(self, 'movie_pools'):
            for pool_name, pool_movies in self.movie_pools.items():
                if movie_id in pool_movies:
                    pool = pool_name
                    break

        # 获取电影特征信息
        avg_rating = 0
        rating_count = 0
        quality_score = 0

        if hasattr(self, 'movie_features') and self.movie_features is not None:
            movie_features = self.movie_features[self.movie_features['movieId'] == movie_id]
            if not movie_features.empty:
                feature_row = movie_features.iloc[0]
                avg_rating = float(feature_row.get('avg_rating', 0))
                rating_count = int(feature_row.get('rating_count', 0))
                quality_score = float(feature_row.get('quality_score', 0))

        return {
            'movieId': movie_id,
            'genres': genres,
            'avg_rating': avg_rating,
            'rating_count': rating_count,
            'quality_score': quality_score,
            'tier': tier,
            'pool': pool,
            'long_tail_quality': long_tail_quality
        }


# ==================== 示例使用 ====================
if __name__ == "__main__":
    # 初始化改进的系统
    improved_rec_system = ImprovedTieredRecommendationSystem(sample_ratio=0.8, fast_mode=True)

    # 训练模型
    file_path = "data/merged_df.csv"

    try:
        df = improved_rec_system.train_improved_system(file_path)

        print("\n" + "=" * 80)
        print("                    改进效果验证")
        print("=" * 80)

        # 为多个用户生成推荐并分析改进效果
        test_users = [1, 2, 3, 4, 5]

        for user_id in test_users:
            print(f"\n为用户 {user_id} 生成改进的推荐:")
            recommendations = improved_rec_system.improved_recommend(user_id, top_k=10)

            # 分析推荐分布
            pool_distribution = Counter()
            genre_distribution = Counter()

            for movie_id in recommendations:
                # 池子分布
                pool = improved_rec_system._get_movie_pool(movie_id)
                pool_distribution[pool] += 1

                # 类型分布
                movie_info = improved_rec_system.get_movie_info(movie_id, df)
                for genre in movie_info['genres']:
                    genre_distribution[genre] += 1

            print(f"池子分布: {dict(pool_distribution)}")
            print(f"类型分布: {dict(genre_distribution.most_common(3))}")

            # 计算关键指标
            long_tail_count = pool_distribution.get('long_tail', 0) + pool_distribution.get('test', 0)
            exploration_ratio = long_tail_count / len(recommendations)
            genre_diversity = len(genre_distribution) / len(improved_rec_system.genre_columns)

            print(f"探索性内容比例: {exploration_ratio:.2%}")
            print(f"类型多样性: {genre_diversity:.3f}")

            if exploration_ratio >= 0.25:
                print("✅ 探索性内容比例达标")
            else:
                print("⚠️  探索性内容比例仍需提升")

    except FileNotFoundError:
        print(f"文件 {file_path} 不存在，请检查文件路径")
    except Exception as e:
        print(f"运行出错: {e}")
        import traceback

        traceback.print_exc()