# 智能新闻推荐系统
import math
import json
import time
import logging
import numpy as np
from datetime import datetime, timedelta
from collections import defaultdict, Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import jieba
import mysql.connector

logger = logging.getLogger(__name__)

class IntelligentNewsRecommender:
    """智能新闻推荐系统
    
    集成多种推荐算法：
    1. 协同过滤（用户相似度）
    2. 内容相似度推荐
    3. 热度衰减机制
    4. 多样性优化
    5. 实时热点推荐
    """
    
    def __init__(self, db_config):
        self.db_config = db_config
        self.tfidf_vectorizer = TfidfVectorizer(
            max_features=5000,
            stop_words=None,
            ngram_range=(1, 2)
        )
        self.news_vectors = None
        self.news_ids = None
        self.user_similarity_cache = {}
        self.content_similarity_cache = {}
        self.last_cache_update = 0
        self.cache_ttl = 3600  # 缓存1小时
        
        # 新增：用户推荐总数缓存，避免分页时数量变动
        self.user_recommendation_totals = {}
        
    def get_db_connection(self):
        """获取数据库连接"""
        return mysql.connector.connect(**self.db_config)
    
    def get_recommendations(self, user_id, page=1, per_page=6, include_explain=False):
        """获取智能推荐新闻列表
        
        Args:
            user_id: 用户ID
            page: 页码
            per_page: 每页数量（默认6条）
            include_explain: 是否包含推荐解释
            
        Returns:
            dict: 推荐结果
        """
        try:
            # 获取用户行为数据
            user_behavior = self._get_user_behavior(user_id)
            
            if not user_behavior['has_behavior']:
                # 新用户或无行为数据，使用热门推荐
                return self._get_trending_recommendations(user_id, page, per_page, include_explain)
            
            # 多策略融合推荐（针对单用户场景优化）
            recommendations = []
            
            # 1. 协同过滤推荐 - 单用户时可能为0
            collab_recs = self._collaborative_filtering(user_id, limit=int(per_page * 0.3))
            recommendations.extend([(r, 'collaborative') for r in collab_recs])
            
            # 2. 内容相似度推荐 (增加权重)
            content_recs = self._content_based_filtering(user_id, limit=int(per_page * 0.5))
            recommendations.extend([(r, 'content') for r in content_recs])
            
            # 3. 基于用户偏好分类的推荐（新增）
            category_recs = self._category_based_filtering(user_id, limit=int(per_page * 0.4))
            recommendations.extend([(r, 'category') for r in category_recs])
            
            # 4. 热点推荐 (增加权重)
            trending_recs = self._get_trending_news(user_id, limit=int(per_page * 0.4))
            recommendations.extend([(r, 'trending') for r in trending_recs])
            
            # 5. 多样性推荐 (增加权重)
            diverse_recs = self._diversify_recommendations(user_id, limit=int(per_page * 0.3))
            recommendations.extend([(r, 'diverse') for r in diverse_recs])
            
            # 自适应补充：如果推荐数量不足，增加补充推荐
            total_recs = len([r for r, _ in recommendations])
            if total_recs < per_page * 0.8:  # 如果推荐数量少于预期的80%
                logger.info(f"推荐数量不足({total_recs})，启动补充推荐")
                
                # 补充更多热点新闻
                extra_trending = self._get_trending_news(user_id, limit=per_page)
                # 去除已有的推荐
                existing_ids = {r[0]['id'] for r in recommendations if r[0]}
                extra_trending = [r for r in extra_trending if r['id'] not in existing_ids]
                recommendations.extend([(r, 'trending_extra') for r in extra_trending[:per_page - total_recs]])
            
            # 去重并排序
            final_recs = self._merge_and_rank_recommendations(recommendations, user_behavior, page, per_page)
            
            # 计算推荐的总数（基于实际推荐结果而不是全部新闻）
            # 使用缓存机制确保总数不变动
            total = self._get_cached_recommendation_total(user_id, len(final_recs), page, per_page)
            
            result = {
                "status": "success",
                "recommendations": final_recs,
                "pagination": {
                    "total": total,
                    "page": page,
                    "per_page": per_page,
                    "pages": (total + per_page - 1) // per_page
                }
            }
            
            if include_explain:
                result["explain"] = self._generate_explanation(recommendations, user_behavior)
            
            return result
            
        except Exception as e:
            logger.error(f"智能推荐错误: {e}")
            # 降级到简单推荐
            return self._fallback_recommendations(user_id, page, per_page)
    
    def _get_user_behavior(self, user_id):
        """获取用户行为数据"""
        try:
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    # 收藏行为
                    cursor.execute("""
                        SELECT n.category, COUNT(*) as count, MAX(uf.created_at) as last_action
                        FROM user_favorites uf
                        JOIN news n ON uf.news_id = n.id
                        WHERE uf.user_id = %s AND uf.created_at >= NOW() - INTERVAL 90 DAY
                        GROUP BY n.category
                        ORDER BY count DESC
                    """, (user_id,))
                    favorite_categories = cursor.fetchall()
                    
                    # 浏览行为（如果有浏览记录表）
                    cursor.execute("""
                        SELECT category, COUNT(*) as views, AVG(read_duration) as avg_duration
                        FROM user_reading_behavior urb
                        JOIN news n ON urb.news_id = n.id
                        WHERE urb.user_id = %s AND urb.created_at >= NOW() - INTERVAL 30 DAY
                        GROUP BY category
                        ORDER BY views DESC
                    """, (user_id,))
                    reading_behavior = cursor.fetchall()
                    
                    # 活跃时间段
                    cursor.execute("""
                        SELECT HOUR(created_at) as hour, COUNT(*) as activity
                        FROM user_favorites
                        WHERE user_id = %s AND created_at >= NOW() - INTERVAL 30 DAY
                        GROUP BY HOUR(created_at)
                        ORDER BY activity DESC
                        LIMIT 3
                    """, (user_id,))
                    active_hours = cursor.fetchall()
                    
                    return {
                        'has_behavior': len(favorite_categories) > 0 or len(reading_behavior) > 0,
                        'favorite_categories': favorite_categories,
                        'reading_behavior': reading_behavior,
                        'active_hours': [h['hour'] for h in active_hours],
                        'total_favorites': sum(c['count'] for c in favorite_categories),
                        'total_reads': sum(r['views'] for r in reading_behavior)
                    }
        except Exception as e:
            logger.warning(f"获取用户行为数据失败: {e}")
            return {'has_behavior': False}
    
    def _collaborative_filtering(self, user_id, limit=10):
        """协同过滤推荐"""
        try:
            # 检查是否有足够的用户数据进行协同过滤
            with self.get_db_connection() as conn:
                with conn.cursor() as cursor:
                    cursor.execute("SELECT COUNT(DISTINCT user_id) as user_count FROM user_favorites")
                    user_count = cursor.fetchone()[0]
                    
                    # 如果用户数量少于2个，跳过协同过滤
                    if user_count < 2:
                        logger.info(f"用户数量不足({user_count})，跳过协同过滤推荐")
                        return []
            
            # 如果缓存过期，重新计算用户相似度
            if (time.time() - self.last_cache_update) > self.cache_ttl:
                self._update_user_similarity_cache()
            
            # 找到相似用户
            similar_users = self._find_similar_users(user_id, top_k=20)
            
            if not similar_users:
                logger.info("未找到相似用户，跳过协同过滤推荐")
                return []
            
            # 获取相似用户收藏的新闻
            similar_user_ids = [u['user_id'] for u in similar_users]
            
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    placeholders = ','.join(['%s'] * len(similar_user_ids))
                    cursor.execute(f"""
                        SELECT n.*, COUNT(*) as recommendation_score,
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name
                        FROM user_favorites uf
                        JOIN news n ON uf.news_id = n.id
                        WHERE uf.user_id IN ({placeholders})
                          AND n.id NOT IN (
                              SELECT news_id FROM user_favorites WHERE user_id = %s
                          )
                          AND (n.published_date >= NOW() - INTERVAL 60 DAY 
                               OR n.created_at >= NOW() - INTERVAL 60 DAY)
                        GROUP BY n.id
                        ORDER BY recommendation_score DESC, n.hot_score DESC
                        LIMIT %s
                    """, similar_user_ids + [user_id, limit])
                    
                    return cursor.fetchall()
        except Exception as e:
            logger.warning(f"协同过滤推荐失败: {e}")
            return []
    
    def _content_based_filtering(self, user_id, limit=10):
        """基于内容的推荐"""
        try:
            # 获取用户最近收藏的新闻
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    cursor.execute("""
                        SELECT n.id, n.title, n.content, n.category
                        FROM user_favorites uf
                        JOIN news n ON uf.news_id = n.id
                        WHERE uf.user_id = %s
                        ORDER BY uf.created_at DESC
                        LIMIT 10
                    """, (user_id,))
                    user_news = cursor.fetchall()
            
            if not user_news:
                return []
            
            # 构建用户兴趣向量
            user_text = ' '.join([n['title'] + ' ' + (n['content'] or '')[:200] 
                                 for n in user_news])
            user_text = self._preprocess_text(user_text)
            
            # 获取候选新闻
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    cursor.execute("""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name
                        FROM news n
                        WHERE n.id NOT IN (
                            SELECT news_id FROM user_favorites WHERE user_id = %s
                        )
                        AND (n.published_date >= NOW() - INTERVAL 45 DAY 
                             OR n.created_at >= NOW() - INTERVAL 45 DAY)
                        ORDER BY n.created_at DESC
                        LIMIT 200
                    """, (user_id,))
                    candidate_news = cursor.fetchall()
            
            # 计算相似度
            similarities = []
            for news in candidate_news:
                news_text = self._preprocess_text(
                    news['title'] + ' ' + (news['content'] or '')[:200]
                )
                similarity = self._calculate_text_similarity(user_text, news_text)
                similarities.append((news, similarity))
            
            # 排序并返回
            similarities.sort(key=lambda x: x[1], reverse=True)
            return [item[0] for item in similarities[:limit]]
            
        except Exception as e:
            logger.warning(f"内容推荐失败: {e}")
            return []
    
    def _category_based_filtering(self, user_id, limit=10):
        """基于用户偏好分类的推荐"""
        try:
            # 获取用户偏好分类
            user_behavior = self._get_user_behavior(user_id)
            preferred_categories = [c['category'] for c in user_behavior.get('favorite_categories', [])[:3]]  # 取前3个喜欢的分类
            
            if not preferred_categories:
                return []
            
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    placeholders = ','.join(['%s'] * len(preferred_categories))
                    cursor.execute(f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name
                        FROM news n
                        WHERE n.category IN ({placeholders})
                          AND n.id NOT IN (
                              SELECT news_id FROM user_favorites WHERE user_id = %s
                          )
                          AND (n.published_date >= NOW() - INTERVAL 30 DAY 
                               OR n.created_at >= NOW() - INTERVAL 30 DAY)
                        ORDER BY n.hot_score DESC, n.created_at DESC
                        LIMIT %s
                    """, preferred_categories + [user_id, limit])
                    
                    return cursor.fetchall()
        except Exception as e:
            logger.warning(f"分类推荐失败: {e}")
            return []
    
    def _get_trending_news(self, user_id, limit=10):
        """获取热点新闻"""
        try:
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    # 使用热度衰减算法
                    cursor.execute("""
                        SELECT n.*, 
                               (n.hot_score * EXP(-0.1 * TIMESTAMPDIFF(HOUR, n.published_date, NOW()))) as decayed_score,
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name
                        FROM news n
                        WHERE n.id NOT IN (
                            SELECT news_id FROM user_favorites WHERE user_id = %s
                        )
                        AND (n.published_date >= NOW() - INTERVAL 21 DAY 
                             OR n.created_at >= NOW() - INTERVAL 21 DAY)
                        ORDER BY decayed_score DESC
                        LIMIT %s
                    """, (user_id, limit))
                    
                    return cursor.fetchall()
        except Exception as e:
            logger.warning(f"热点推荐失败: {e}")
            return []
    
    def _diversify_recommendations(self, user_id, limit=5):
        """多样性推荐 - 推荐用户较少关注的分类"""
        try:
            # 获取用户偏好分类
            user_behavior = self._get_user_behavior(user_id)
            preferred_categories = [c['category'] for c in user_behavior.get('favorite_categories', [])]
            
            # 所有分类
            all_categories = ['general', 'politics', 'economics', 'technology', 
                            'society', 'international', 'sports', 'entertainment', 'military']
            
            # 找出用户较少关注的分类
            less_preferred = [cat for cat in all_categories if cat not in preferred_categories[:3]]
            
            if not less_preferred:
                return []
            
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    placeholders = ','.join(['%s'] * len(less_preferred))
                    cursor.execute(f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name
                        FROM news n
                        WHERE n.category IN ({placeholders})
                          AND n.id NOT IN (
                              SELECT news_id FROM user_favorites WHERE user_id = %s
                          )
                          AND (n.published_date >= NOW() - INTERVAL 7 DAY 
                               OR n.created_at >= NOW() - INTERVAL 7 DAY)
                        ORDER BY n.hot_score DESC
                        LIMIT %s
                    """, less_preferred + [user_id, limit])
                    
                    return cursor.fetchall()
        except Exception as e:
            logger.warning(f"多样性推荐失败: {e}")
            return []
    
    def _find_similar_users(self, user_id, top_k=20):
        """找到相似用户"""
        try:
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    # 获取当前用户的收藏分类分布
                    cursor.execute("""
                        SELECT category, COUNT(*) as count
                        FROM user_favorites uf
                        JOIN news n ON uf.news_id = n.id
                        WHERE uf.user_id = %s AND uf.created_at >= NOW() - INTERVAL 90 DAY
                        GROUP BY category
                    """, (user_id,))
                    user_categories = {row['category']: row['count'] for row in cursor.fetchall()}
                    
                    if not user_categories:
                        return []
                    
                    # 获取其他用户的分类分布
                    cursor.execute("""
                        SELECT uf.user_id, n.category, COUNT(*) as count
                        FROM user_favorites uf
                        JOIN news n ON uf.news_id = n.id
                        WHERE uf.user_id != %s AND uf.created_at >= NOW() - INTERVAL 90 DAY
                        GROUP BY uf.user_id, n.category
                        HAVING COUNT(*) >= 3
                    """, (user_id,))
                    
                    other_users = defaultdict(dict)
                    for row in cursor.fetchall():
                        other_users[row['user_id']][row['category']] = row['count']
                    
                    # 计算余弦相似度
                    similarities = []
                    for other_user_id, other_categories in other_users.items():
                        similarity = self._calculate_user_similarity(user_categories, other_categories)
                        if similarity > 0.05:  # 降低相似度阈值
                            similarities.append({
                                'user_id': other_user_id,
                                'similarity': similarity
                            })
                    
                    # 排序并返回最相似的用户
                    similarities.sort(key=lambda x: x['similarity'], reverse=True)
                    return similarities[:top_k]
                    
        except Exception as e:
            logger.warning(f"查找相似用户失败: {e}")
            return []
    
    def _calculate_user_similarity(self, user1_categories, user2_categories):
        """计算两个用户的相似度"""
        all_categories = set(user1_categories.keys()) | set(user2_categories.keys())
        
        if not all_categories:
            return 0
        
        # 构建向量
        vector1 = [user1_categories.get(cat, 0) for cat in all_categories]
        vector2 = [user2_categories.get(cat, 0) for cat in all_categories]
        
        # 计算余弦相似度
        dot_product = sum(a * b for a, b in zip(vector1, vector2))
        magnitude1 = math.sqrt(sum(a * a for a in vector1))
        magnitude2 = math.sqrt(sum(b * b for b in vector2))
        
        if magnitude1 == 0 or magnitude2 == 0:
            return 0
        
        return dot_product / (magnitude1 * magnitude2)
    
    def _preprocess_text(self, text):
        """文本预处理"""
        if not text:
            return ""
        
        # 使用jieba分词
        words = jieba.cut(text)
        # 过滤停用词和短词
        filtered_words = [word.strip() for word in words if len(word.strip()) > 1]
        return ' '.join(filtered_words)
    
    def _calculate_text_similarity(self, text1, text2):
        """计算文本相似度"""
        try:
            # 使用TF-IDF计算相似度
            tfidf = TfidfVectorizer(max_features=1000)
            tfidf_matrix = tfidf.fit_transform([text1, text2])
            similarity_matrix = cosine_similarity(tfidf_matrix)
            return similarity_matrix[0][1]
        except:
            # 简单的词汇重叠度计算
            words1 = set(text1.split())
            words2 = set(text2.split())
            intersection = words1 & words2
            union = words1 | words2
            return len(intersection) / len(union) if union else 0
    
    def _merge_and_rank_recommendations(self, recommendations, user_behavior, page, per_page):
        """合并和排序推荐结果"""
        # 去重
        seen_ids = set()
        merged = []
        
        for news_item, source in recommendations:
            news_id = news_item['id']
            if news_id not in seen_ids:
                seen_ids.add(news_id)
                news_item['recommendation_source'] = source
                news_item['recommendation_score'] = self._calculate_final_score(
                    news_item, user_behavior, source
                )
                merged.append(news_item)
        
        # 按综合得分排序
        merged.sort(key=lambda x: x['recommendation_score'], reverse=True)
        
        # 分页
        start_idx = (page - 1) * per_page
        end_idx = start_idx + per_page
        
        # 获取当前页的结果
        page_results = merged[start_idx:end_idx]
        
        # 如果当前页结果不足，尝试获取更多推荐来填充
        if len(page_results) < per_page and page == 1:
            logger.info(f"第一页推荐不足({len(page_results)})，尝试获取更多推荐")
            # 这里可以添加更多的补充逻辑，但为了简化，暂时保持原样
        
        # 格式化时间戳
        for item in page_results:
            if item.get('published_date'):
                item['published_date'] = item['published_date'].isoformat()
            if item.get('created_at'):
                item['created_at'] = item['created_at'].isoformat()
        
        return page_results
    
    def _calculate_final_score(self, news_item, user_behavior, source):
        """计算最终推荐得分"""
        score = 0
        
        # 基础热度分
        base_score = news_item.get('hot_score', 0)
        score += base_score * 0.3
        
        # 时间衰减
        hours_ago = self._get_hours_since_publish(news_item)
        time_decay = math.exp(-0.05 * hours_ago)  # 每小时衰减5%
        score += time_decay * 20
        
        # 分类偏好加分
        news_category = news_item.get('category', 'general')
        for cat_info in user_behavior.get('favorite_categories', []):
            if cat_info['category'] == news_category:
                score += cat_info['count'] * 2
                break
        
        # 推荐源权重
        source_weights = {
            'collaborative': 1.2,
            'content': 1.0,
            'trending': 0.8,
            'diverse': 0.6
        }
        score *= source_weights.get(source, 1.0)
        
        return score
    
    def _get_hours_since_publish(self, news_item):
        """获取发布后的小时数"""
        try:
            if news_item.get('published_date'):
                if isinstance(news_item['published_date'], str):
                    publish_time = datetime.fromisoformat(news_item['published_date'])
                else:
                    publish_time = news_item['published_date']
            else:
                publish_time = news_item.get('created_at', datetime.now())
            
            if isinstance(publish_time, str):
                publish_time = datetime.fromisoformat(publish_time)
            
            delta = datetime.now() - publish_time
            return delta.total_seconds() / 3600
        except:
            return 0
    
    def _count_available_recommendations(self, user_id):
        """计算可推荐新闻总数"""
        try:
            with self.get_db_connection() as conn:
                with conn.cursor() as cursor:
                    cursor.execute("""
                        SELECT COUNT(*) as total
                        FROM news n
                        WHERE n.id NOT IN (
                            SELECT news_id FROM user_favorites WHERE user_id = %s
                        )
                        AND (n.published_date >= NOW() - INTERVAL 30 DAY 
                             OR n.created_at >= NOW() - INTERVAL 30 DAY)
                    """, (user_id,))
                    result = cursor.fetchone()
                    return result[0] if result else 0
        except:
            return 100  # 默认值
    
    def _get_cached_recommendation_total(self, user_id, current_results_count, page, per_page):
        """获取缓存的推荐总数，基于实际推荐结果计算"""
        cache_key = f"user_{user_id}_total"
        
        # 如果是第一页，重新计算并缓存总数
        if page == 1:
            if current_results_count < per_page:
                # 第一页就不满，说明推荐结果就这么多
                total = current_results_count
            else:
                # 第一页满了，通过实际生成推荐来计算总数
                total = self._calculate_actual_total_by_generation(user_id, per_page)
            
            # 缓存总数
            self.user_recommendation_totals[cache_key] = total
            return total
        else:
            # 非第一页，使用缓存的总数
            if cache_key in self.user_recommendation_totals:
                cached_total = self.user_recommendation_totals[cache_key]
                # 如果当前页面没有结果，说明已经到底了，更新总数
                if current_results_count == 0:
                    actual_total = (page - 1) * per_page
                    self.user_recommendation_totals[cache_key] = actual_total
                    return actual_total
                # 如果当前页面不满，更新总数为实际已展示的数量
                elif current_results_count < per_page:
                    actual_total = (page - 1) * per_page + current_results_count
                    self.user_recommendation_totals[cache_key] = actual_total
                    return actual_total
                return cached_total
            else:
                # 如果没有缓存，使用保守估算
                return (page - 1) * per_page + current_results_count
    
    def _calculate_actual_total_by_generation(self, user_id, per_page):
        """通过实际生成推荐来计算真实的总数"""
        try:
            # 获取用户行为数据
            user_behavior = self._get_user_behavior(user_id)
            
            if not user_behavior['has_behavior']:
                # 新用户：生成更多页面的热门推荐来计算实际总数
                return self._calculate_trending_total_by_generation(user_id, per_page)
            else:
                # 有行为数据的用户：生成更多页面的智能推荐来计算实际总数
                return self._calculate_intelligent_total_by_generation(user_id, per_page)
                
        except Exception as e:
            logger.warning(f"生成计算总数失败: {e}")
            return per_page * 2  # 默认2页
    
    def _calculate_trending_total_by_generation(self, user_id, per_page):
        """通过实际生成热门推荐来计算总数"""
        try:
            all_recommendations = []
            max_pages = 10  # 最多检查10页
            
            for check_page in range(1, max_pages + 1):
                page_recs = self._get_trending_news(user_id, limit=per_page)
                if not page_recs:
                    break
                
                # 去重处理
                existing_ids = {r['id'] for r in all_recommendations}
                new_recs = [r for r in page_recs if r['id'] not in existing_ids]
                
                if not new_recs:
                    break
                    
                all_recommendations.extend(new_recs)
                
                # 如果这一页的新推荐少于per_page，说明后面没有更多了
                if len(new_recs) < per_page:
                    break
            
            return len(all_recommendations)
            
        except Exception as e:
            logger.warning(f"生成热门推荐总数失败: {e}")
            return per_page * 2  # 默认2页
    
    def _calculate_intelligent_total_by_generation(self, user_id, per_page):
        """通过实际生成智能推荐来计算总数"""
        try:
            user_behavior = self._get_user_behavior(user_id)
            all_recommendations = []
            max_pages = 8  # 最多检查8页
            
            for check_page in range(1, max_pages + 1):
                # 生成一页的推荐
                page_recommendations = []
                
                # 1. 协同过滤推荐
                collab_recs = self._collaborative_filtering(user_id, limit=int(per_page * 0.3))
                page_recommendations.extend([(r, 'collaborative') for r in collab_recs])
                
                # 2. 内容相似度推荐
                content_recs = self._content_based_filtering(user_id, limit=int(per_page * 0.5))
                page_recommendations.extend([(r, 'content') for r in content_recs])
                
                # 3. 基于用户偏好分类的推荐
                category_recs = self._category_based_filtering(user_id, limit=int(per_page * 0.4))
                page_recommendations.extend([(r, 'category') for r in category_recs])
                
                # 4. 热点推荐
                trending_recs = self._get_trending_news(user_id, limit=int(per_page * 0.4))
                page_recommendations.extend([(r, 'trending') for r in trending_recs])
                
                # 5. 多样性推荐
                diverse_recs = self._diversify_recommendations(user_id, limit=int(per_page * 0.3))
                page_recommendations.extend([(r, 'diverse') for r in diverse_recs])
                
                # 去重并排序
                existing_ids = {r['id'] for r in all_recommendations}
                new_recs = []
                for news_item, source in page_recommendations:
                    if news_item and news_item['id'] not in existing_ids:
                        existing_ids.add(news_item['id'])
                        new_recs.append(news_item)
                
                if not new_recs:
                    break
                    
                all_recommendations.extend(new_recs[:per_page])  # 限制每页数量
                
                # 如果这一页的新推荐少于per_page，说明后面没有更多了
                if len(new_recs) < per_page:
                    break
            
            return len(all_recommendations)
            
        except Exception as e:
            logger.warning(f"生成智能推荐总数失败: {e}")
            return per_page * 2  # 默认2页
    
    def _calculate_actual_recommendation_total(self, user_id):
        """计算实际的推荐总数（基于推荐算法而非数据库全部数据）"""
        try:
            # 获取用户行为数据
            user_behavior = self._get_user_behavior(user_id)
            
            if not user_behavior['has_behavior']:
                # 新用户：使用热门推荐的总数，但限制在合理范围内
                with self.get_db_connection() as conn:
                    with conn.cursor() as cursor:
                        cursor.execute("""
                            SELECT COUNT(*) as total
                            FROM news n
                            WHERE n.id NOT IN (
                                SELECT news_id FROM user_favorites WHERE user_id = %s
                            )
                            AND (n.published_date >= NOW() - INTERVAL 21 DAY 
                                 OR n.created_at >= NOW() - INTERVAL 21 DAY)
                            AND n.hot_score > 0
                        """, (user_id,))
                        total = cursor.fetchone()[0]
                        # 限制新用户的推荐总数在合理范围内
                        return min(total, 60)  # 最多10页，每页6条
            else:
                # 有行为数据的用户：使用更保守的估算
                # 基于用户兴趣分类的推荐数量
                favorite_categories = user_behavior.get('favorite_categories', [])
                if len(favorite_categories) >= 2:
                    # 有多个兴趣分类，可能有更多推荐
                    return 48  # 8页
                else:
                    # 兴趣相对单一，推荐数量较少
                    return 30  # 5页
        except Exception as e:
            logger.warning(f"计算推荐总数失败: {e}")
            return 24  # 默认4页
    
    def _estimate_recommendation_total(self, user_id, current_results_count, page, per_page):
        """估算推荐结果的总数量
        
        对于智能推荐系统，我们不能简单地使用全部新闻数量，
        而是需要基于实际的推荐结果来估算总数。
        """
        # 如果是第一页且结果不满，说明推荐结果就这么多
        if page == 1 and current_results_count < per_page:
            return current_results_count
        
        # 如果第一页结果满了，估算可能还有更多页
        if page == 1 and current_results_count == per_page:
            # 估算可能有 5-8 页的推荐结果
            return per_page * 6  # 保守估算
        
        # 对于后续页面，基于当前结果数量进行动态估算
        if current_results_count == per_page:
            # 当前页面满了，可能还有下一页
            return (page + 1) * per_page
        else:
            # 当前页面不满，说明已经到最后一页
            return (page - 1) * per_page + current_results_count
    
    def _generate_explanation(self, recommendations, user_behavior):
        """生成推荐解释"""
        source_counts = Counter(source for _, source in recommendations)
        
        return {
            'total_recommendations': len(recommendations),
            'source_distribution': dict(source_counts),
            'user_favorite_categories': user_behavior.get('favorite_categories', []),
            'recommendation_strategy': '多策略融合推荐：协同过滤、内容相似度、热点推荐、多样性推荐'
        }
    
    def _get_trending_recommendations(self, user_id, page, per_page, include_explain):
        """新用户热门推荐"""
        try:
            offset = (page - 1) * per_page
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    cursor.execute("""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name
                        FROM news n
                        WHERE n.id NOT IN (
                            SELECT news_id FROM user_favorites WHERE user_id = %s
                        )
                        AND (n.published_date >= NOW() - INTERVAL 21 DAY 
                             OR n.created_at >= NOW() - INTERVAL 21 DAY)
                        ORDER BY n.hot_score DESC, n.published_date DESC
                        LIMIT %s OFFSET %s
                    """, (user_id, per_page, offset))
                    
                    recommendations = cursor.fetchall()
                    
                    # 使用合理的推荐总数，而不是数据库中的全部数量
                    # 新用户热门推荐一般不会超过30-60条
                    actual_count = len(recommendations)
                    if page == 1:
                        if actual_count < per_page:
                            total = actual_count  # 第一页不满，说明就这么多
                        else:
                            # 第一页满了，估算可能的总数
                            total = min(actual_count * 8, 60)  # 最多60条（10页）
                    else:
                        # 非第一页，使用保守估算
                        total = page * per_page + per_page
                    
                    # 格式化时间戳
                    for item in recommendations:
                        if item.get('published_date'):
                            item['published_date'] = item['published_date'].isoformat()
                        if item.get('created_at'):
                            item['created_at'] = item['created_at'].isoformat()
                    
                    result = {
                        "status": "success",
                        "recommendations": recommendations,
                        "pagination": {
                            "total": total,
                            "page": page,
                            "per_page": per_page,
                            "pages": (total + per_page - 1) // per_page
                        }
                    }
                    
                    if include_explain:
                        result["explain"] = {
                            'strategy': '新用户热门推荐',
                            'description': '基于全站热度的推荐，适用于新用户或无行为数据用户'
                        }
                    
                    return result
                    
        except Exception as e:
            logger.error(f"热门推荐失败: {e}")
            return self._fallback_recommendations(user_id, page, per_page)
    
    def _fallback_recommendations(self, user_id, page, per_page):
        """降级推荐 - 简单的最新新闻推荐"""
        try:
            offset = (page - 1) * per_page
            with self.get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    cursor.execute("""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name
                        FROM news n
                        WHERE n.id NOT IN (
                            SELECT news_id FROM user_favorites WHERE user_id = %s
                        )
                        AND (n.published_date >= NOW() - INTERVAL 30 DAY 
                               OR n.created_at >= NOW() - INTERVAL 30 DAY)
                        ORDER BY n.created_at DESC
                        LIMIT %s OFFSET %s
                    """, (user_id, per_page, offset))
                    
                    recommendations = cursor.fetchall()
                    
                    # 使用合理的推荐总数，而不是数据库中的全部数量
                    # 降级推荐一般提供24-36条推荐
                    actual_count = len(recommendations)
                    if page == 1:
                        if actual_count < per_page:
                            total = actual_count  # 第一页不满，说明就这么多
                        else:
                            # 第一页满了，估算可能的总数
                            total = min(actual_count * 6, 36)  # 最多36条（6页）
                    else:
                        # 非第一页，使用保守估算
                        total = page * per_page + per_page
                    
                    # 格式化时间戳
                    for item in recommendations:
                        if item.get('published_date'):
                            item['published_date'] = item['published_date'].isoformat()
                        if item.get('created_at'):
                            item['created_at'] = item['created_at'].isoformat()
                    
                    return {
                        "status": "success",
                        "recommendations": recommendations,
                        "pagination": {
                            "total": total,
                            "page": page,
                            "per_page": per_page,
                            "pages": (total + per_page - 1) // per_page if total > 0 else 1
                        }
                    }
                    
        except Exception as e:
            logger.error(f"降级推荐失败: {e}")
            return {"status": "error", "message": "推荐系统暂时不可用"}
    
    def _update_user_similarity_cache(self):
        """更新用户相似度缓存"""
        try:
            # 这里可以实现缓存更新逻辑
            self.last_cache_update = time.time()
            logger.info("用户相似度缓存已更新")
        except Exception as e:
            logger.warning(f"更新用户相似度缓存失败: {e}")
    
    def clear_user_recommendation_cache(self, user_id):
        """清除用户推荐缓存（用于刷新时）"""
        cache_key = f"user_{user_id}_total"
        if cache_key in self.user_recommendation_totals:
            del self.user_recommendation_totals[cache_key]
    
    def track_user_behavior(self, user_id, news_id, action, duration=None, extra_data=None):
        """跟踪用户行为"""
        try:
            with self.get_db_connection() as conn:
                with conn.cursor() as cursor:
                    cursor.execute("""
                        INSERT INTO user_reading_behavior 
                        (user_id, news_id, action, read_duration, extra_data, created_at)
                        VALUES (%s, %s, %s, %s, %s, NOW())
                    """, (user_id, news_id, action, duration, json.dumps(extra_data) if extra_data else None))
                    conn.commit()
        except Exception as e:
            logger.warning(f"记录用户行为失败: {e}")