import os
import feedparser
import mysql.connector
import requests
import threading
import re
import schedule
from datetime import datetime, timedelta
from flask import Flask, jsonify, render_template, request, session, redirect, url_for
from flask_cors import CORS
from werkzeug.security import generate_password_hash, check_password_hash
import time
import logging
import json
import chardet  # 用于自动检测字符编码
from bs4 import BeautifulSoup  # 用于解析HTML内容
from direct_crawler import DirectNewsCrawler, DIRECT_NEWS_SOURCES  # 直接抓取模块
from intelligent_classifier import news_classifier  # 智能分类器
from intelligent_recommender import IntelligentNewsRecommender  # 智能推荐系统

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 初始化智能推荐系统
intelligent_recommender = None

# 明确设置模板文件夹路径
template_dir = os.path.abspath('templates')
app = Flask(__name__, template_folder=template_dir)
CORS(app)

# 为会话管理设置密钥（使用环境变量，未设置则使用开发默认值）
app.secret_key = os.environ.get('SECRET_KEY', 'dev-secret-key-change-me')
# 会话超时配置（1小时）
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=1)

# 数据库配置 - 字符编码设置
DB_CONFIG = {
    'host': 'localhost',
    'user': 'root',
    'password': '962464',
    'database': 'news_aggregator',
    'charset': 'utf8mb4',  # 支持所有Unicode字符
    'collation': 'utf8mb4_unicode_ci'
}

# DeepSeek API配置
DEEPSEEK_API_KEY = 'sk-02e5cd8120274a0daccc3a7c26bf38cd'  # 替换为实际密钥
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"

# 新闻源RSS地址（只保留有效的中文源）
NEWS_SOURCES = [
    # 中文综合新闻源（正常工作）
    {'id': 1, 'name': '中国新闻网', 'rss_url': 'https://www.chinanews.com/rss/scroll-news.xml', 'category': 'general'},
    {'id': 2, 'name': '央视新闻', 'rss_url': 'https://news.cctv.com/2019/07/gaiban/cmsdatainterface/page/news_1.jsonp?cb=news', 'category': 'general'},
    {'id': 3, 'name': '新华网', 'rss_url': 'http://www.news.cn/politics/news_politics.xml', 'category': 'general'},
    {'id': 4, 'name': '中国日报', 'rss_url': 'https://www.chinadaily.com.cn/rss/china_rss.xml', 'category': 'general'},
    {'id': 5, 'name': '环球时报', 'rss_url': 'https://www.globaltimes.cn/rss/outbrain.xml', 'category': 'general'},
    
    # 中文科技新闻源（正常工作）
    {'id': 6, 'name': '虎嗅网', 'rss_url': 'https://www.huxiu.com/rss/0.xml', 'category': 'technology'},
    {'id': 7, 'name': 'IT之家', 'rss_url': 'https://www.ithome.com/rss/', 'category': 'technology'},
    
    # 已删除失效的RSS源：
    # {'id': 8, 'name': '新浪科技', 'rss_url': 'https://tech.sina.com.cn/rss/roll.xml', 'category': 'technology'}, # 404错误
    # {'id': 9, 'name': '网易科技', 'rss_url': 'https://tech.163.com/special/000944OI/tech_datalist.xml', 'category': 'technology'}, # 404错误
    # {'id': 10, 'name': '中关村在线', 'rss_url': 'https://www.zol.com.cn/rss/all.xml', 'category': 'technology'}, # 404错误
]

def get_db_connection():
    """建立数据库连接并处理可能的错误"""
    try:
        conn = mysql.connector.connect(**DB_CONFIG)
        return conn
    except Exception as e:
        logger.error(f"数据库连接错误: {str(e)}")
        raise

def clean_xml_content(content):
    """清理XML内容，修复常见格式问题"""
    if not content:
        return content
    
    # 移除XML注释
    content = re.sub(r'<!--.*?-->', '', content, flags=re.DOTALL)
    # 修复不规范的自闭合标签
    content = re.sub(r'<(\w+)\s+([^>]+?)/\s*>', r'<\1 \2></\1>', content)
    # 移除无效字符
    content = re.sub(r'[^\x20-\x7E\xA0-\xFF\u0100-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]', '', content)
    
    return content

def detect_encoding(content):
    """自动检测内容编码"""
    result = chardet.detect(content)
    encoding = result['encoding']
    confidence = result['confidence']
    
    # 如果检测可信度低，使用常见编码
    if not encoding or confidence < 0.7:
        possible_encodings = ['utf-8', 'gbk', 'gb2312', 'iso-8859-1']
        for enc in possible_encodings:
            try:
                content.decode(enc)
                return enc
            except UnicodeDecodeError:
                continue
        return 'utf-8'  # 最后的 fallback
    return encoding

def fetch_rss_feed(url, timeout=15):
    """获取并解析RSS订阅源（支持XML和特殊JSON格式）"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
            'Accept': 'application/rss+xml, application/xml, text/xml, application/json;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Accept-Encoding': 'gzip, deflate'
        }
        
        response = requests.get(url, headers=headers, timeout=timeout, allow_redirects=True)
        
        if response.status_code == 404:
            logger.error(f"RSS地址不存在 (404): {url}")
            return []
        if response.status_code >= 400:
            logger.error(f"获取失败，状态码: {response.status_code}，地址: {url}")
            return []
            
        # 特殊处理：央视新闻是JSON格式
        if 'news.cctv.com' in url:
            try:
                # 自动检测编码
                encoding = detect_encoding(response.content)
                # 使用检测到的编码解码
                content = response.content.decode(encoding, errors='replace')
                
                # 提取JSONP中的数据
                json_str = content.split('(', 1)[1].rsplit(')', 1)[0]
                data = json.loads(json_str)
                items = []
                for item in data.get('data', {}).get('list', []):
                    items.append({
                        'title': item.get('title', '').strip() if item.get('title') else '无标题',
                        'link': item.get('url', '').strip() if item.get('url') else '',
                        'description': item.get('brief', '').strip() if item.get('brief') else '',
                        'published': item.get('pubtime', '').strip() if item.get('pubtime') else ''
                    })
                return items
            except Exception as e:
                logger.error(f"解析央视新闻JSON失败: {str(e)}")
                return []
        
        # 处理标准XML RSS
        encoding = detect_encoding(response.content)
        try:
            content = response.content.decode(encoding)
        except UnicodeDecodeError:
            encodings = ['utf-8', 'gbk', 'gb2312', 'iso-8859-1']
            for enc in encodings:
                try:
                    content = response.content.decode(enc)
                    break
                except UnicodeDecodeError:
                    continue
            else:
                content = response.text
                
        cleaned_content = clean_xml_content(content)
        if not (cleaned_content.startswith('<?xml') or '<rss' in cleaned_content or '<feed' in cleaned_content):
            logger.warning(f"不是有效的RSS/XML内容: {url}")
            return []
            
        feed = feedparser.parse(cleaned_content)
        if feed.bozo != 0:
            logger.warning(f"解析警告 {url}: {str(feed.bozo_exception)[:100]}")
        if not feed.entries:
            logger.warning(f"未找到新闻条目: {url}")
            return []
            
        return feed.entries
        
    except requests.exceptions.ConnectionError:
        logger.error(f"连接失败: {url}")
        return []
    except requests.exceptions.Timeout:
        logger.error(f"超时: {url}")
        return []
    except Exception as e:
        logger.error(f"获取RSS出错 {url}: {str(e)}")
        return []

def store_news_items(source, items):
    """存储新闻项到数据库（去重）"""
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute("SET innodb_lock_wait_timeout = 5")
                new_count = 0
                
                for item in items:
                    # 按链接去重
                    cursor.execute("SELECT id FROM news WHERE link = %s", (item.get('link', ''),))
                    if cursor.fetchone():
                        continue
                        
                    # 处理新闻数据
                    title = item.get('title', '').strip() if item.get('title') else '无标题'
                    title = re.sub(r'[^\w\s\-\.,，。！!？?；;：:]', '', title)
                    
                    link = item.get('link', '').strip() if item.get('link') else ''
                    
                    content = item.get('description', '').strip()[:5000]
                    content = re.sub(r'[^\w\s\-\.,，。！!？?；;：:<>""\'\'\(\)\[\]]', '', content)
                    
                    published_date = None
                    
                    # 解析发布时间
                    if 'published' in item:
                        for fmt in ['%a, %d %b %Y %H:%M:%S %Z', '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d %H:%M:%S']:
                            try:
                                published_date = datetime.strptime(item.published, fmt)
                                break
                            except:
                                continue
                    if not published_date:
                        published_date = datetime.now()
                    
                    # 使用智能分类器进行分类
                    intelligent_category = news_classifier.classify_news(
                        title=title,
                        content=content,
                        source_name=source.get('name', '')
                    )
                    
                    # 插入数据库（添加category字段）
                    cursor.execute(
                        """INSERT INTO news (source_id, title, link, content, category, published_date, created_at)
                           VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
                        (source['id'], title, link, content, intelligent_category, published_date)
                    )
                    new_count += 1
                
                conn.commit()
                logger.info(f"成功存储 {source['name']} 的 {new_count} 条新新闻")
                return new_count
    except Exception as e:
        logger.error(f"存储新闻错误: {e}")
        return 0

def fetch_and_store_all_news():
    """抓取所有新闻源并存储新新闻"""
    logger.info("=== 开始执行新闻抓取任务 ===")
    total_new = 0
    
    # RSS抓取
    for source in NEWS_SOURCES:
        logger.info(f"正在抓取 {source['name']} ({source['rss_url']})")
        items = fetch_rss_feed(source['rss_url'])
        
        if items:
            new_count = store_news_items(source, items)
            total_new += new_count
        else:
            logger.warning(f"{source['name']} 未获取到新闻条目")
            
        time.sleep(3)  # 延迟避免请求过于频繁
    
    # 直接抓取
    direct_new = fetch_and_store_direct_news()
    total_new += direct_new
    
    logger.info(f"=== 抓取任务完成，共新增 {total_new} 条新闻 ===")
    return total_new

def store_direct_news_items(source, items):
    """存储直接抓取的新闻项到数据库（去重）"""
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute("SET innodb_lock_wait_timeout = 5")
                new_count = 0
                
                for item in items:
                    # 按链接去重
                    cursor.execute("SELECT id FROM news WHERE link = %s", (item.get('link', ''),))
                    if cursor.fetchone():
                        continue
                    
                    # 处理新闻数据
                    title = item.get('title', '').strip() if item.get('title') else '无标题'
                    title = re.sub(r'[^\w\s\-\.,，。！!？?；;：::]', '', title)
                    
                    link = item.get('link', '').strip() if item.get('link') else ''
                    
                    # 直接抓取的内容可能更丰富
                    content = item.get('summary', '').strip()[:5000] if item.get('summary') else ''
                    if not content:
                        content = title  # 如果没有摘要，使用标题
                    
                    published_date = item.get('publish_time', datetime.now())
                    if not isinstance(published_date, datetime):
                        published_date = datetime.now()
                    
                    # 使用智能分类器进行分类
                    intelligent_category = news_classifier.classify_news(
                        title=title,
                        content=content,
                        source_name=source.get('name', '')
                    )
                    
                    # 插入数据库（添加category字段）
                    cursor.execute(
                        """INSERT INTO news (source_id, title, link, content, category, published_date, created_at)
                           VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
                        (source['id'], title, link, content, intelligent_category, published_date)
                    )
                    new_count += 1
                
                conn.commit()
                logger.info(f"成功存储 {source['name']} 的 {new_count} 条新新闻")
                return new_count
    except Exception as e:
        logger.error(f"存储直接抓取新闻错误: {e}")
        return 0

def fetch_and_store_direct_news():
    """直接抓取并存储新闻"""
    logger.info("=== 开始执行直接抓取任务 ===")
    crawler = DirectNewsCrawler()
    total_new = 0
    
    # 抓取所有新闻源
    all_news = crawler.crawl_all_sources()
    
    # 按源分组并存储
    source_news = {}
    for news_item in all_news:
        source_name = news_item.get('source', '未知来源')
        if source_name not in source_news:
            source_news[source_name] = []
        source_news[source_name].append(news_item)
    
    # 为每个来源存储新闻
    for source_config in DIRECT_NEWS_SOURCES:
        source_name = source_config['name']
        if source_name in source_news:
            items = source_news[source_name]
            new_count = store_direct_news_items(source_config, items)
            total_new += new_count
    
    logger.info(f"=== 直接抓取任务完成，共新增 {total_new} 条新闻 ===")
    return total_new

def extract_article_publish_time(soup, url):
    """从原文页面提取真实的发布时间"""
    import re
    from datetime import datetime
    
    # 常见的时间选择器和属性
    time_selectors = [
        # 结构化数据
        'time[datetime]',
        '[datetime]',
        
        # 常见的时间类名
        '.publish-time', '.publish-date', '.pub-time', '.pub-date',
        '.article-time', '.article-date', '.post-time', '.post-date',
        '.news-time', '.news-date', '.date', '.time',
        '.created-time', '.update-time', '.source-time',
        
        # 中文网站常用类名
        '.fabushijian', '.shijian', '.riqi', '.chubanshijian',
        '.publish', '.published', '.release-time',
        
        # ID选择器
        '#publish-time', '#publish-date', '#article-time', '#article-date',
        '#news-time', '#news-date', '#date', '#time'
    ]
    
    # 针对不同网站的特殊处理
    if 'chinanews.com' in url:
        time_selectors.insert(0, '.left-time')
        time_selectors.insert(0, '.article-date')
        time_selectors.insert(0, '.content-date')
    elif 'news.cctv.com' in url:
        time_selectors.insert(0, '.info')
        time_selectors.insert(0, '.date')
    elif 'xinhuanet.com' in url or 'news.cn' in url:
        time_selectors.insert(0, '.time')
        time_selectors.insert(0, '.source')
    elif 'chinadaily.com.cn' in url:
        time_selectors.insert(0, '.date')
        time_selectors.insert(0, '.publish')
    elif 'ithome.com' in url:
        time_selectors.insert(0, '.post-date')
        time_selectors.insert(0, '.meta')
    
    # 先尝试从结构化数据中提取
    for selector in time_selectors:
        elements = soup.select(selector)
        for element in elements:
            # 检查datetime属性
            datetime_attr = element.get('datetime')
            if datetime_attr:
                try:
                    return datetime.fromisoformat(datetime_attr.replace('Z', '+00:00'))
                except:
                    continue
            
            # 检查content属性（适用于meta标签）
            content_attr = element.get('content')
            if content_attr:
                parsed_time = parse_time_string(content_attr)
                if parsed_time:
                    return parsed_time
            
            # 检查元素的文本内容
            text_content = element.get_text(strip=True)
            if text_content:
                parsed_time = parse_time_string(text_content)
                if parsed_time:
                    return parsed_time
    
    # 如果以上方法都没有找到，在整个页面中搜索时间模式
    page_text = soup.get_text()
    return parse_time_from_page_text(page_text)

def parse_time_string(time_str):
    """解析时间字符串为 datetime 对象"""
    if not time_str or len(time_str) < 8:
        return None
    
    import re
    from datetime import datetime
    
    # 常见的时间格式
    patterns = [
        # ISO 格式
        r'(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})',
        r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})',
        r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2})',
        r'(\d{4}-\d{2}-\d{2})',
        
        # 中文日期格式
        r'(\d{4}年\d{1,2}月\d{1,2}日\s*\d{1,2}:\d{2})',
        r'(\d{4}年\d{1,2}月\d{1,2}日)',
        
        # 其他常见格式
        r'(\d{4}/\d{1,2}/\d{1,2}\s+\d{1,2}:\d{2})',
        r'(\d{4}/\d{1,2}/\d{1,2})',
        r'(\d{1,2}/\d{1,2}/\d{4})',
        
        # 特殊格式（如中国新闻网）
        r'(\d{4}年\d{2}月\d{2}日\s*\d{2}:\d{2})',
        r'(\d{2}-\d{2}\s+\d{2}:\d{2})',  # 只有月日和时间
    ]
    
    for pattern in patterns:
        match = re.search(pattern, time_str)
        if match:
            date_str = match.group(1)
            try:
                # 尝试不同的解析格式
                formats = [
                    '%Y-%m-%dT%H:%M:%S',
                    '%Y-%m-%d %H:%M:%S',
                    '%Y-%m-%d %H:%M',
                    '%Y-%m-%d',
                    '%Y年%m月%d日 %H:%M',
                    '%Y年%m月%d日',
                    '%Y/%m/%d %H:%M',
                    '%Y/%m/%d',
                    '%m/%d/%Y',
                ]
                
                for fmt in formats:
                    try:
                        return datetime.strptime(date_str, fmt)
                    except ValueError:
                        continue
                        
                # 处理只有月日的情况（默认为当年）
                if re.match(r'\d{2}-\d{2}\s+\d{2}:\d{2}', date_str):
                    current_year = datetime.now().year
                    full_date_str = f"{current_year}-{date_str}"
                    return datetime.strptime(full_date_str, '%Y-%m-%d %H:%M')
                    
            except ValueError:
                continue
    
    return None

def parse_time_from_page_text(page_text):
    """从页面文本中提取时间"""
    import re
    from datetime import datetime
    
    # 在页面文本中搜索时间模式
    time_patterns = [
        r'(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})',
        r'(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2})',
        r'(\d{4}年\d{1,2}月\d{1,2}日\s*\d{1,2}:\d{2})',
        r'(\d{4}年\d{1,2}月\d{1,2}日)',
    ]
    
    for pattern in time_patterns:
        matches = re.findall(pattern, page_text)
        for match in matches:
            parsed_time = parse_time_string(match)
            if parsed_time:
                return parsed_time
    
    return None

def fetch_full_content(url):
    """获取新闻原文的完整内容（精确提取正文）"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Accept-Encoding': 'gzip, deflate'
        }
        
        response = requests.get(url, headers=headers, timeout=10, allow_redirects=True)
        response.raise_for_status()
        
        # 自动检测编码
        encoding = detect_encoding(response.content)
        try:
            content = response.content.decode(encoding)
        except UnicodeDecodeError:
            content = response.text
        
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(content, 'html.parser')
        
        # 特殊处理：中新网图片页面
        if 'chinanews.com' in url and ('/tp/hd' in url or '/photo/' in url):
            try:
                # 提取图片页面的特殊结构
                title_elem = soup.find('h1', class_='page_title')
                title = title_elem.get_text(strip=True) if title_elem else ''
                
                # 提取时间和来源
                source_time_elem = soup.find('div', class_='source_time')
                image_publish_time = datetime.now()
                if source_time_elem:
                    time_text = source_time_elem.get_text(strip=True)
                    time_match = re.search(r'(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})', time_text)
                    if time_match:
                        try:
                            image_publish_time = datetime.strptime(time_match.group(1), '%Y-%m-%d %H:%M:%S')
                        except:
                            pass
                
                # 提取主图片
                main_img_elem = soup.find('div', class_='current_img')
                main_img_html = ''
                if main_img_elem:
                    main_img = main_img_elem.find('img')
                    if main_img:
                        main_img_src = main_img.get('src', '')
                        if main_img_src.startswith('//'):
                            main_img_src = 'https:' + main_img_src
                        elif main_img_src.startswith('/'):
                            main_img_src = 'https://www.chinanews.com' + main_img_src
                        
                        img_alt = main_img.get('alt', title)
                        main_img_html = f'<div class="image-container" style="text-align: center; margin: 20px 0;"><img src="{main_img_src}" alt="{img_alt}" style="max-width: 100%; height: auto; border-radius: 8px; box-shadow: 0 4px 8px rgba(0,0,0,0.1);"></div>'
                
                # 提取图片描述
                desc_elem = soup.find('div', class_='desc')
                desc_html = ''
                if desc_elem:
                    desc_paragraphs = desc_elem.find_all('p')
                    for p in desc_paragraphs:
                        desc_text = p.get_text(strip=True)
                        if desc_text and not desc_text.startswith('【编辑'):
                            desc_html += f'<p style="margin: 10px 0; line-height: 1.6; color: #374151;">{desc_text}</p>'
                
                # 提取摄影者信息
                photographer_elem = soup.find('div', style=re.compile(r'text-align:center'))
                photographer_html = ''
                if photographer_elem:
                    photographer = photographer_elem.get_text(strip=True)
                    if photographer:
                        photographer_html = f'<p style="text-align: center; margin: 15px 0; font-style: italic; color: #6b7280; font-size: 0.9em;">{photographer}</p>'
                
                # 提取缩略图信息
                thumbnail_html = ''
                thumbnail_list = soup.find('ul', id='scrool_wrap')
                if thumbnail_list:
                    thumbnails = thumbnail_list.find_all('img')
                    if len(thumbnails) > 1:  # 如果有多张图片
                        thumbnail_html += '<div class="thumbnail-gallery" style="margin: 30px 0; padding: 20px; background-color: #f9fafb; border-radius: 12px;">'
                        thumbnail_html += f'<h3 style="margin-bottom: 15px; color: #1f2937; font-size: 1.1em;">图集共 {len(thumbnails)} 张图片</h3>'
                        thumbnail_html += '<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); gap: 15px;">'
                        
                        for i, thumb in enumerate(thumbnails, 1):
                            thumb_src = thumb.get('src', '')
                            if thumb_src.startswith('//'):
                                thumb_src = 'https:' + thumb_src
                            elif thumb_src.startswith('/'):
                                thumb_src = 'https://www.chinanews.com' + thumb_src
                            
                            thumb_alt = thumb.get('alt', f'{title} - 第{i}张')
                            thumbnail_html += f'<div style="text-align: center;"><img src="{thumb_src}" alt="{thumb_alt}" style="width: 100%; height: 100px; object-fit: cover; border-radius: 8px; transition: transform 0.3s ease;"><p style="margin-top: 8px; font-size: 0.8em; color: #6b7280;">第 {i} 张</p></div>'
                        
                        thumbnail_html += '</div></div>'
                
                # 构建完整的图片新闻HTML
                final_html = f'''
                <div class="chinanews-photo-article" style="max-width: 800px; margin: 0 auto; padding: 20px;">
                    <h1 style="font-size: 1.8em; font-weight: bold; color: #1f2937; margin-bottom: 20px; text-align: center; line-height: 1.4;">{title}</h1>
                    
                    {main_img_html}
                    
                    {photographer_html}
                    
                    <div class="article-content" style="margin: 25px 0;">
                        {desc_html}
                    </div>
                    
                    {thumbnail_html}
                    
                    <div class="source-info" style="margin-top: 30px; padding: 15px; background-color: #f0f8ff; border-radius: 8px; border-left: 4px solid #3b82f6;">
                        <p style="margin: 0; color: #6b7280; font-size: 0.9em;">
                            <strong>来源：</strong>中国新闻网 | <strong>发布时间：</strong>{image_publish_time.strftime('%Y-%m-%d %H:%M:%S')}
                        </p>
                    </div>
                </div>
                '''
                
                logger.info(f"成功提取中新网图片新闻内容，长度: {len(final_html)}")
                return {"content": final_html, "publish_time": image_publish_time}
                
            except Exception as e:
                logger.error(f"处理中新网图片页面失败: {e}")
                # 继续使用传统方法
        
        # 特殊处理：澎湃新闻使用Next.js，内容在JSON数据中
        if 'thepaper.cn' in url:
            # 查找页面中的JSON数据
            script_tag = soup.find('script', {'id': '__NEXT_DATA__'})
            if script_tag:
                try:
                    import json
                    json_data = json.loads(script_tag.string)
                    detail_data = json_data.get('props', {}).get('pageProps', {}).get('detailData', {})
                    content_detail = detail_data.get('contentDetail', {})
                    
                    if content_detail.get('content'):
                        # 从JSON中获取完整内容
                        article_content_html = content_detail['content']
                        
                        # 解析HTML内容
                        article_soup = BeautifulSoup(article_content_html, 'html.parser')
                        
                        # 获取发布时间
                        pub_time = content_detail.get('pubTime')
                        if pub_time:
                            try:
                                article_publish_time = datetime.strptime(pub_time, '%Y-%m-%d %H:%M')
                            except:
                                article_publish_time = datetime.now()
                        else:
                            article_publish_time = datetime.now()
                        
                        # 获取作者和编辑信息
                        author = content_detail.get('author', '')
                        image_editor = content_detail.get('imageEditor', '')
                        respon_editor = content_detail.get('responEditor', '')
                        
                        # 构建完整的HTML内容，包含所有段落
                        final_html = str(article_soup)
                        
                        # 在内容末尾添加编辑信息（模拟原页面结构）
                        if respon_editor or image_editor:
                            final_html += '<div class="editor-info" style="margin-top: 2em; padding: 1em; background-color: #f8f9fa; border-radius: 8px;">'
                            if respon_editor:
                                final_html += f'<p><strong>责任编辑：</strong>{respon_editor}</p>'
                            if image_editor:
                                final_html += f'<p><strong>图片编辑：</strong>{image_editor}</p>'
                            final_html += '</div>'
                        
                        # 添加版权信息
                        final_html += '<div class="copyright-info" style="margin-top: 1em; padding: 1em; background-color: #f0f8ff; border-radius: 8px; border-left: 4px solid #165DFF;">'
                        final_html += '<p style="margin: 0; color: #6b7280; font-size: 0.9em;">'
                        final_html += '<strong>澎湃新闻报料：</strong>021-962866<br>'
                        final_html += '<strong>版权声明：</strong>澎湃新闻，未经授权不得转载'
                        final_html += '</p></div>'
                        
                        logger.info(f"成功从澎湃新闻JSON数据中提取内容，长度: {len(final_html)}")
                        return {"content": final_html, "publish_time": article_publish_time}
                        
                except Exception as e:
                    logger.error(f"解析澎湃新闻JSON数据失败: {e}")
                    # 继续使用传统方法
        
        # 提取原文发布时间
        article_publish_time = extract_article_publish_time(soup, url)
        
        # 移除HTML注释（包括栏目导航注释）
        from bs4 import Comment
        comments = soup.find_all(string=lambda text: isinstance(text, Comment))
        for comment in comments:
            comment.extract()
        
        # 移除干扰元素（更全面）
        unwanted_tags = [
            'script', 'style', 'nav', 'header', 'footer', 'aside', 
            'iframe', 'form', 'button', 'input', 'select', 'textarea',
            'noscript', 'meta', 'link', 'title'
        ]
        for tag in soup(unwanted_tags):
            tag.decompose()
        
        # 移除常见的干扰内容区域（按类名和ID）
        unwanted_selectors = [
            # 广告相关
            '.ad', '.ads', '.advertisement', '.advert', '.sponsor',
            '#ad', '#ads', '[class*="ad_"]', '[id*="ad_"]',
            '.ad-container', '.ad-banner', '.ad-block',
            
            # 导航和菜单
            '.nav', '.navigation', '.menu', '.sidebar', '.side-bar',
            '.breadcrumb', '.breadcrumbs', '.crumb', '.crumbs',
            
            # 评论和互动
            '.comment', '.comments', '.discuss', '.discussion',
            '.reply', '.replies', '.feedback', '.review',
            '.social', '.share', '.sharing', '.like', '.follow',
            
            # 推荐和相关
            '.recommend', '.recommendation', '.related', '.similar',
            '.more', '.other', '.popular', '.hot', '.trending',
            '.tags', '.tag', '.category', '.categories',
            
            # 页面结构
            '.header', '.footer', '.top', '.bottom',
            '.left', '.right', '.sidebar-left', '.sidebar-right',
            
            # 其他干扰内容
            '.promo', '.promotion', '.banner', '.popup', '.modal',
            '.newsletter', '.subscription', '.subscribe',
            '.download', '.app-download', '.qrcode', '.qr-code',
            '.copyright', '.legal', '.privacy', '.terms',
            '.author-bio', '.author-info', '.byline-extra'
        ]
        
        for selector in unwanted_selectors:
            for element in soup.select(selector):
                element.decompose()
        
        # 移除包含特定关键词的元素
        unwanted_keywords = [
            '点击进入', '扫一扫', '二维码', '下载客户端',
            '更多精彩', '返回顶部', '分享到', '关注我们',
            '版权所有', '违法和不良信息', '举报电话',
            '友情链接', '广告服务', '合作伙伴',
            '联系我们', '站点地图', '免责声明',
            # 新增：专门针对新闻栏目导航的关键词
            '频道新闻精选', '精选新闻', '新闻精选', '栏目导航',
            '相关新闻', '推荐阅读', '热点新闻', '最新消息',
            '专题报道', '图片新闻', '视频新闻', '滚动新闻',
            '开始', '结束', 'start', 'end', '880*X',
            '加载更多', '下方按钮', '左侧内容', '右侧内容',
            '顶部导航', '底部导航', '侧边栏', '广告位',
            '推广信息', '商业推广', '赞助商', '合作方',
            '网站导航', '栏目列表', '分类导航', '频道导航',
            # IT之家相关推广内容关键词
            '下载IT之家APP', '下载 IT之家 APP', '下载IT之家app',
            '安装IT之家APP', '获取IT之家APP', 'IT之家APP下载',
            'IT之家客户端', '软媒旗下网站', '软媒', 'QQ群',
            '相关文章', '相关推荐', 'APP下载', '客户端下载',
            # 第一财经相关作者和版权信息关键词
            '第一财经版权', 'banquan@yicai.com', '文章作者',
            '责任编辑', '应稿作者', '责编', '投稿', '反馈',
            '联系编辑', '举报', '投诉', '版权归第一财经所有',
            '相关阅读', '推荐阅读', '热门阅读', '更多阅读',
            # 第一财经特有的导航和推荐内容
            '一财最热', '一财号', '第一财经', '财经头条', '今日推荐',
            '评论', 'comment', '回到顶部', '更多资讯',
            '广告', '推广', '赞助', '合作', '商务合作'
        ]
        
        for keyword in unwanted_keywords:
            for element in soup.find_all(text=lambda text: text and keyword in text):
                if element.parent:
                    element.parent.decompose()
        
        # 尝试查找文章主体内容（按优先级排序）
        article_selectors = [
            # 最高优先级：语义化文章标签
            'article',
            'main article',
            '[role="main"] article',
            
            # 高优先级：常见的文章内容区域
            '.article-content',
            '.article-body',
            '.post-content',
            '.post-body',
            '.content-body',
            '.news-content',
            '.news-body',
            '.story-content',
            '.story-body',
            '.text-content',
            
            # 中优先级：通用内容区域
            '.content',
            '.main-content',
            '.entry-content',
            '.post-entry',
            '.article-text',
            
            # 低优先级：ID选择器
            '#content',
            '#article',
            '#main-content',
            '#post-content',
            
            # 最后备选：主体区域
            'main',
            '.main',
            '[role="main"]'
        ]
        
        article_content = None
        for selector in article_selectors:
            article_content = soup.select_one(selector)
            if article_content:
                # 检查内容质量：确保有足够的文本内容
                text_content = article_content.get_text(strip=True)
                
                # 更严格的内容验证
                if len(text_content) > 200:  # 至少200个字符
                    # 检查是否包含太多导航关键词
                    nav_keywords = ['频道', '栏目', '导航', '精选', '推荐', '热点']
                    nav_count = sum(1 for keyword in nav_keywords if keyword in text_content)
                    
                    # 检查段落结构：好的文章应该有多个段落
                    paragraphs = article_content.find_all(['p', 'div'])
                    paragraph_texts = [p.get_text(strip=True) for p in paragraphs if len(p.get_text(strip=True)) > 50]
                    
                    # 如果导航关键词太多或段落太少，则跳过
                    if nav_count <= 2 and len(paragraph_texts) >= 2:
                        break
                    else:
                        article_content = None
                else:
                    article_content = None
        
        # 如果仍未找到合适内容，使用最后手段
        if not article_content:
            # 查找包含最多文本的div
            all_divs = soup.find_all('div')
            best_div = None
            max_text_length = 0
            
            for div in all_divs:
                text_length = len(div.get_text(strip=True))
                if text_length > max_text_length and text_length > 200:
                    # 检查是否为干扰内容
                    div_classes = ' '.join(div.get('class', []))
                    div_id = div.get('id', '')
                    
                    if not any(keyword in (div_classes + div_id).lower() 
                             for keyword in ['ad', 'nav', 'menu', 'sidebar', 'comment', 'footer', 'header']):
                        max_text_length = text_length
                        best_div = div
            
            article_content = best_div
        
        if article_content:
            # 在选定的内容区域内进一步清理
            for unwanted in article_content.select('.ad, .advertisement, .related, .comment, .share, .social'):
                unwanted.decompose()
            
            # 针对中国新闻网特殊处理：在评论区域前截断内容
            if 'chinanews.com' in url:
                # 查找评论相关的分隔标识
                comment_markers = [
                    lambda tag: tag.name and tag.get_text(strip=True) == '发表评论',
                    lambda tag: tag.name and '发表评论' in tag.get_text(),
                    lambda tag: tag.name and tag.get('class') and any('comment' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('id') and 'comment' in str(tag.get('id')).lower(),
                    lambda tag: tag.name and 'pinglun' in str(tag.get('class', [])).lower(),
                    lambda tag: tag.name and '网友评论' in tag.get_text(),
                    lambda tag: tag.name == 'h3' and '发表评论' in tag.get_text(),
                ]
                
                for marker_func in comment_markers:
                    comment_section = article_content.find(marker_func)
                    if comment_section:
                        # 删除评论区域及其后面的所有内容
                        current = comment_section
                        while current:
                            next_sibling = current.next_sibling
                            if hasattr(current, 'decompose'):
                                current.decompose()
                            current = next_sibling
                        break
            
            # 针对IT之家特殊处理：在APP推广区域前截断内容
            if 'ithome.com' in url:
                # 查找APP推广相关的标识
                app_markers = [
                    lambda tag: tag.name and '下载IT之家APP' in tag.get_text(),
                    lambda tag: tag.name and '下载 IT之家 APP' in tag.get_text(),
                    lambda tag: tag.name and 'IT之家APP下载' in tag.get_text(),
                    lambda tag: tag.name and '相关文章' in tag.get_text(),
                    lambda tag: tag.name and '相关推荐' in tag.get_text(),
                    lambda tag: tag.name and 'APP下载' in tag.get_text(),
                    lambda tag: tag.name and '软媒旗下网站' in tag.get_text(),
                    lambda tag: tag.name and tag.get('class') and any('download' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('class') and any('app' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('class') and any('related' in str(cls).lower() for cls in tag.get('class')),
                ]
                
                for marker_func in app_markers:
                    app_section = article_content.find(marker_func)
                    if app_section:
                        # 删除APP推广区域及其后面的所有内容
                        current = app_section
                        while current:
                            next_sibling = current.next_sibling
                            if hasattr(current, 'decompose'):
                                current.decompose()
                            current = next_sibling
                        break
            
            # 针对第一财经特殊处理：在作者信息区域前截断内容
            if 'yicai.com' in url:
                # 查找第一财经作者信息区域的标识，按优先级排序
                author_markers = [
                    lambda tag: tag.name and '相关阅读' in tag.get_text(),  # 最高优先级
                    lambda tag: tag.name and '作者：' in tag.get_text(),
                    lambda tag: tag.name and '作者:' in tag.get_text(),
                    lambda tag: tag.name and '文章作者' in tag.get_text(),
                    lambda tag: tag.name and '举报' in tag.get_text(),
                    lambda tag: tag.name and '版权' in tag.get_text(),
                    lambda tag: tag.name and '第一财经版权' in tag.get_text(),
                    lambda tag: tag.name and 'banquan@yicai.com' in tag.get_text(),
                    lambda tag: tag.name and tag.get('class') and any('author' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('class') and any('copyright' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('class') and any('report' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('class') and any('related' in str(cls).lower() for cls in tag.get('class')),
                ]
                
                for marker_func in author_markers:
                    author_section = article_content.find(marker_func)
                    if author_section:
                        # 删除作者信息区域及其后面的所有内容
                        current = author_section
                        parent = current.parent
                        
                        # 先获取当前元素及其后面的所有兄弟元素
                        elements_to_remove = []
                        found_target = False
                        
                        if parent:
                            for sibling in parent.children:
                                if hasattr(sibling, 'name') and sibling.name:  # 确保是有效的HTML元素
                                    if sibling == current:
                                        found_target = True
                                    if found_target:
                                        elements_to_remove.append(sibling)
                        
                        # 删除标记的元素
                        for elem in elements_to_remove:
                            if hasattr(elem, 'decompose'):
                                elem.decompose()
                        break
            
            # 针对澎湃新闻特殊处理：在作者信息区域前截断内容
            if 'thepaper.cn' in url:
                # 查找澎湃新闻作者信息区域的标识，按优先级排序
                author_markers = [
                    lambda tag: tag.name and '登录' in tag.get_text(),  # 最高优先级
                    lambda tag: tag.name and '作者：' in tag.get_text(),
                    lambda tag: tag.name and '作者:' in tag.get_text(),
                    lambda tag: tag.name and '文章作者' in tag.get_text(),
                    lambda tag: tag.name and '图片编辑' in tag.get_text(),
                    lambda tag: tag.name and '校对' in tag.get_text(),
                    lambda tag: tag.name and '来源：' in tag.get_text(),
                    lambda tag: tag.name and '澎湃新闻' in tag.get_text(),
                    lambda tag: tag.name and '收藏' in tag.get_text(),
                    lambda tag: tag.name and '版权' in tag.get_text(),
                    lambda tag: tag.name and '举报' in tag.get_text(),
                    lambda tag: tag.name and '021-962866' in tag.get_text(),
                    lambda tag: tag.name and '未经授权不得转载' in tag.get_text(),
                    lambda tag: tag.name and tag.get('class') and any('author' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('class') and any('copyright' in str(cls).lower() for cls in tag.get('class')),
                    lambda tag: tag.name and tag.get('class') and any('contact' in str(cls).lower() for cls in tag.get('class')),
                ]
                
                for marker_func in author_markers:
                    author_section = article_content.find(marker_func)
                    if author_section:
                        # 删除作者信息区域及其后面的所有内容
                        current = author_section
                        parent = current.parent
                        
                        # 先获取当前元素及其后面的所有兄弟元素
                        elements_to_remove = []
                        found_target = False
                        
                        if parent:
                            for sibling in parent.children:
                                if hasattr(sibling, 'name') and sibling.name:  # 确保是有效的HTML元素
                                    if sibling == current:
                                        found_target = True
                                    if found_target:
                                        elements_to_remove.append(sibling)
                        
                        # 删除标记的元素
                        for elem in elements_to_remove:
                            if hasattr(elem, 'decompose'):
                                elem.decompose()
                        break
            
            # 清理残留的导航内容（根据内容特征）
            elements_to_remove = []
            for element in article_content.find_all(text=True):
                if element.strip():
                    text = element.strip()
                    
                    # 特别针对中国新闻网的评论区域标识
                    if 'chinanews.com' in url and ('发表评论' in text or '网友评论' in text):
                        # 找到评论区域，删除当前元素及其后面的所有兄弟元素
                        parent = element.parent
                        if parent:
                            # 获取父元素的所有子元素
                            found_comment = False
                            for child in list(parent.parent.children if parent.parent else parent.children):
                                if found_comment and hasattr(child, 'decompose'):
                                    child.decompose()
                                elif child == parent or (hasattr(child, 'get_text') and '发表评论' in child.get_text()):
                                    found_comment = True
                                    if hasattr(child, 'decompose'):
                                        child.decompose()
                        continue
                    
                    # 特别针对第一财经的作者和版权信息区域标识
     

                    # 特别针对第一财经的作者和版权信息区域标识
                    if 'yicai.com' in url:
                        # 首先检查是否包含"相关阅读"，这是最高优先级
                        if '相关阅读' in text:
                            # 找到相关阅读区域，删除当前元素及其后面的所有兄弟元素
                            parent = element.parent
                            if parent and parent.parent:
                                # 获取父级容器的所有子元素
                                found_related = False
                                elements_to_remove = []
                                for child in list(parent.parent.children):
                                    if found_related and hasattr(child, 'decompose'):
                                        elements_to_remove.append(child)
                                    elif child == parent or (hasattr(child, 'get_text') and '相关阅读' in child.get_text()):
                                        found_related = True
                                        elements_to_remove.append(child)
                                # 执行删除
                        for elem in elements_to_remove:
                            if hasattr(elem, 'decompose'):
                                elem.decompose()
                    continue


  
                    # 其他第一财经相关关键词
                    yicai_author_keywords = [
                        '作者：', '作者:', '文章作者', '举报',
                        '版权', '第一财经版权', 'banquan@yicai.com',
                        '反馈', '投诉', '投稿', '联系编辑',
                        '应稿作者', '责任编辑', '责编'
                    ]
                    if any(keyword in text for keyword in yicai_author_keywords):
                        # 找到作者信息区域，删除当前元素及其后面的所有兄弟元素
                        parent = element.parent
                        if parent:
                            # 获取父元素的所有子元素
                            found_author = False
                            for child in list(parent.parent.children if parent.parent else parent.children):
                                if found_author and hasattr(child, 'decompose'):
                                    child.decompose()
                                elif child == parent or (hasattr(child, 'get_text') and any(keyword in child.get_text() for keyword in yicai_author_keywords)):
                                    found_author = True
                                    if hasattr(child, 'decompose'):
                                        child.decompose()
                        continue
                    
                if 'ithome.com' in url:
                        ithome_app_keywords = [
                            '下载IT之家APP', '下载 IT之家 APP', 'IT之家APP下载',
                            '相关文章', '相关推荐', 'APP下载', '软媒旗下网站'
                        ]
                        if any(keyword in text for keyword in ithome_app_keywords):
                            # 找到APP推广区域，删除当前元素及其后面的所有兄弟元素
                            parent = element.parent
                            if parent:
                                # 获取父元素的所有子元素
                                found_app = False
                                for child in list(parent.parent.children if parent.parent else parent.children):
                                    if found_app and hasattr(child, 'decompose'):
                                        child.decompose()
                                    elif child == parent or (hasattr(child, 'get_text') and any(keyword in child.get_text() for keyword in ithome_app_keywords)):
                                        found_app = True
                                        if hasattr(child, 'decompose'):
                                            child.decompose()
                        continue
                    
                # 如果是明显的导航文本，则移除
                nav_patterns = [
                        r'.*频道.*精选.*',
                        r'.*栏目.*导航.*',
                        r'.*开始.*\d+.*',
                        r'.*start.*\d+.*',
                        r'.*加载更多.*下方按钮.*',
                        r'^[一-龥]{0,10}开始$',
                        r'^[一-龥]{0,10}结束$',
                        r'^\s*<!--.*-->\s*$',
                        # IT之家相关推广内容模式
                        r'.*下载.*IT之家.*APP.*',
                        r'.*IT之家.*APP.*下载.*',
                        r'.*软媒旗下.*网站.*',
                        r'.*相关文章.*',
                        r'.*相关推荐.*',
                        r'.*APP下载.*',
                        r'.*客户端下载.*',
                        # 第一财经相关作者和版权信息模式
                        r'.*作者：.*',
                        r'.*作者:.*',
                        r'.*责任编辑.*',
                        r'.*第一财经版权.*',
                        r'.*banquan@yicai\.com.*',
                        r'.*举报.*',
                        r'.*投诉.*',
                        r'.*反馈.*'
                    ]
                    
            should_remove = False
            for pattern in nav_patterns:
                    if re.match(pattern, text, re.IGNORECASE):
                        should_remove = True
                        break
                
                # 如果文本太短且包含导航关键词，也移除
            short_text_keywords = [
                    '频道', '栏目', '精选', '导航', '开始', '结束', 'APP下载', '相关文章', '软媒',
                    # 第一财经相关关键词
                    '作者', '责编', '版权', '举报', '投诉', '反馈'
                ]
            if len(text) < 20 and any(keyword in text for keyword in short_text_keywords):
                    should_remove = True
                
            if should_remove and hasattr(element, 'parent') and element.parent:
                    elements_to_remove.append(element.parent)
            
            # 统一移除标记的元素
            for elem in elements_to_remove:
                if elem:
                    elem.decompose()
            
            # 处理图片：转换相对链接为绝对链接，添加懒加载支持
            from urllib.parse import urljoin, urlparse
            if 'ithome.com' in url:
                        ithome_app_keywords = [
                            '下载IT之家APP', '下载 IT之家 APP', 'IT之家APP下载',
                            '相关文章', '相关推荐', 'APP下载', '软媒旗下网站'
                        ]
                        if any(keyword in text for keyword in ithome_app_keywords):
                            # 找到APP推广区域，删除当前元素及其后面的所有兄弟元素
                            parent = element.parent
                            if parent:
                                # 获取父元素的所有子元素
                                found_app = False
                                for child in list(parent.parent.children if parent.parent else parent.children):
                                    if found_app and hasattr(child, 'decompose'):
                                        child.decompose()
                                    elif child == parent or (hasattr(child, 'get_text') and any(keyword in child.get_text() for keyword in ithome_app_keywords)):
                                        found_app = True
                                        if hasattr(child, 'decompose'):
                                            child.decompose()
                                    # 移除空的div标签
                                    if hasattr(child, 'name') and child.name == 'div' and not child.get_text(strip=True):
                                        child.decompose()
            # 如果是明显的导航文本，则移除
            nav_patterns = [
                        r'.*频道.*精选.*',
                        r'.*栏目.*导航.*',
                        r'.*开始.*\d+.*',
                        r'.*start.*\d+.*',
                        r'.*加载更多.*下方按钮.*',
                        r'^[一-龥]{0,10}开始$',
                        r'^[一-龥]{0,10}结束$',
                        r'^\s*<!--.*-->\s*$',
                        # IT之家相关推广内容模式
                        r'.*下载.*IT之家.*APP.*',
                        r'.*IT之家.*APP.*下载.*',
                        r'.*软媒旗下.*网站.*',
                        r'.*相关文章.*',
                        r'.*相关推荐.*',
                        r'.*APP下载.*',
                        r'.*客户端下载.*',
                        # 第一财经相关作者和版权信息模式
                        r'.*作者：.*',
                        r'.*作者:.*',
                        r'.*责任编辑.*',
                        r'.*第一财经版权.*',
                        r'.*banquan@yicai\.com.*',
                        r'.*举报.*',
                        r'.*投诉.*',
                        r'.*反馈.*'
                    ]
                    
            should_remove = False
            for pattern in nav_patterns:
                if re.match(pattern, text, re.IGNORECASE):
                    should_remove = True
                    break
            
            # 如果文本太短且包含导航关键词，也移除
            short_text_keywords = [
                '频道', '栏目', '精选', '导航', '开始', '结束', 'APP下载', '相关文章', '软媒',
                # 第一财经相关关键词
                '作者', '责编', '版权', '举报', '投诉', '反馈'
            ]
            if len(text) < 20 and any(keyword in text for keyword in short_text_keywords):
                should_remove = True
            
            if should_remove and hasattr(element, 'parent') and element.parent:
                elements_to_remove.append(element.parent)
            
            # 统一移除标记的元素
            for elem in elements_to_remove:
                if elem:
                    elem.decompose()
            
            # 处理图片：转换相对链接为绝对链接，添加懒加载支持
            from urllib.parse import urljoin, urlparse
            base_domain = f"{urlparse(url).scheme}://{urlparse(url).netloc}"
            
            for img in article_content.find_all('img'):
                # 特殊处理IT之家的懒加载图片（data-original属性）
                if 'ithome.com' in url and img.get('data-original'):
                    img['src'] = urljoin(base_domain, img['data-original'])
                    del img['data-original']
                    # 移除懒加载相关的class和属性
                    if img.get('class'):
                        img['class'] = [cls for cls in img.get('class', []) if 'lazy' not in cls.lower()]
                        if not img['class']:  # 如果class为空，则删除class属性
                            del img['class']
                    # 添加懒加载属性
                    img['loading'] = 'lazy'
                    img['decoding'] = 'async'
                # 处理src属性
                elif img.get('src'):
                    # 检查是否为IT之家占位符图片，如果是且有data-original则优先使用data-original
                    if 'ithome.com' in url and 'images/v2/t.png' in img.get('src', '') and img.get('data-original'):
                        img['src'] = urljoin(base_domain, img['data-original'])
                        del img['data-original']
                    else:
                        img['src'] = urljoin(base_domain, img['src'])
                    # 添加懒加载属性
                    img['loading'] = 'lazy'
                    img['decoding'] = 'async'
                # 处理data-src属性（针对已应用懒加载的图片）
                elif img.get('data-src'):
                    img['src'] = urljoin(base_domain, img['data-src'])
                    del img['data-src']
                    # 保持懒加载属性
                    img['loading'] = 'lazy'
                    img['decoding'] = 'async'
                else:
                    # 移除没有有效src、data-src或data-original的图片
                    img.decompose()

            # 移除所有iframe（广告或其他嵌入内容）
            for iframe in article_content.find_all('iframe'):
                iframe.decompose()

            # 移除所有视频元素
            for video in article_content.find_all('video'):
                video.decompose()

            # 移除所有音频元素
            for audio in article_content.find_all('audio'):
                audio.decompose()

            # 移除所有canvas元素
            for canvas in article_content.find_all('canvas'):
                canvas.decompose()

            # 移除所有noscript元素
            for noscript in article_content.find_all('noscript'):
                noscript.decompose()

            # 处理链接：转换相对链接为绝对链接
            for a in article_content.find_all('a'):
                if a.get('href'):
                    a['href'] = urljoin(base_domain, a['href'])
                    # 添加外部链接标识
                    if urlparse(a['href']).netloc != urlparse(url).netloc:
                        a['target'] = '_blank'
                        a['rel'] = 'noopener noreferrer'
                        # 添加外部链接样式
                        if 'class' in a.attrs:
                            a['class'] = a.get('class', []) + ['external-link']
                        else:
                            a['class'] = ['external-link']

            # 添加内容清理标记
            # 检查是否需要添加截断提示
            if 'chinanews.com' in url or 'yicai.com' in url or 'ithome.com' in url:
                # 创建提示信息元素
                truncation_notice = soup.new_tag('div')
                truncation_notice['class'] = ['content-truncation-notice']
                truncation_notice['style'] = 'margin: 1em 0; padding: 0.8em; background-color: #f8f9fa; border-left: 4px solid #165DFF; color: #6b7280; font-style: italic;'
                
                if 'chinanews.com' in url:
                    truncation_notice.string = '[内容在此截断，已过滤推广内容]'
                elif 'yicai.com' in url:
                    truncation_notice.string = '[内容在此截断，已过滤作者和版权信息]'
                elif 'ithome.com' in url:
                    truncation_notice.string = '[内容在此截断，已过滤推广内容]'
                
                # 在内容末尾添加提示
                last_element = article_content.find_all(recursive=False)[-1] if article_content.find_all(recursive=False) else None
                if last_element:
                    last_element.append(truncation_notice)
                else:
                    article_content.append(truncation_notice)

            # 清理空的段落和div
            for tag in article_content.find_all(['p', 'div', 'span', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']):
                if not tag.text.strip() and not tag.find_all(['img', 'video', 'audio', 'iframe']):
                    tag.decompose()

            # 清理空的链接
            for a in article_content.find_all('a'):
                if not a.text.strip():
                    a.unwrap()  # 移除空链接，保留其中的内容

            # 最后，获取清理后的内容
            cleaned_content = str(article_content)
            
            # 进一步清理：移除多余的空白和换行
            cleaned_content = re.sub(r'\s+<', '<', cleaned_content)
            cleaned_content = re.sub(r'>\s+', '>', cleaned_content)
            
            return cleaned_content
        
        if article_content:
            # 在选定的内容区域内进一步清理
            for unwanted in article_content.select('.ad, .advertisement, .related, .comment, .share, .social'):
                unwanted.decompose()
            
            # 处理图片：转换相对链接为绝对链接
            for img in article_content.find_all('img'):
                # 特殊处理IT之家的懒加载图片（data-original属性）
                if 'ithome.com' in url and img.get('data-original'):
                    img['src'] = urljoin(url, img['data-original'])
                    del img['data-original']
                    img['onerror'] = "this.style.display='none'"
                    img['style'] = "max-width: 100%; height: auto; margin: 10px 0;"
                elif img.get('src'):
                    # 检查是否为IT之家占位符图片，如果是且有data-original则优先使用data-original
                    if 'ithome.com' in url and 'images/v2/t.png' in img.get('src', '') and img.get('data-original'):
                        img['src'] = urljoin(url, img['data-original'])
                        del img['data-original']
                    else:
                        img['src'] = urljoin(url, img['src'])
                    img['onerror'] = "this.style.display='none'"
                    img['style'] = "max-width: 100%; height: auto; margin: 10px 0;"
                elif img.get('data-src'):
                    img['src'] = urljoin(url, img['data-src'])
                    del img['data-src']
                    img['onerror'] = "this.style.display='none'"
                    img['style'] = "max-width: 100%; height: auto; margin: 10px 0;"
                else:
                    # 移除没有有效图片源的图片
                    img.decompose()# ... existing code ...
                    continue

                    # 其他第一财经相关关键词
                    yicai_author_keywords = [
                        '作者：', '作者:', '文章作者', '举报',
                        '版权', '第一财经版权', 'banquan@yicai.com',
                        '反馈', '投诉', '投稿', '联系编辑',
                        '应稿作者', '责任编辑', '责编'
                    ]
                    if any(keyword in text for keyword in yicai_author_keywords):
                        # 找到作者信息区域，删除当前元素及其后面的所有兄弟元素
                        parent = element.parent
                        if parent:
                            # 获取父元素的所有子元素
                            found_author = False
                            for child in list(parent.parent.children if parent.parent else parent.children):
                                if found_author and hasattr(child, 'decompose'):
                                    child.decompose()
                                elif child == parent or (hasattr(child, 'get_text') and any(keyword in child.get_text() for keyword in yicai_author_keywords)):
                                    found_author = True
                                    if hasattr(child, 'decompose'):
                                        child.decompose()
                        continue
                    
                    if 'ithome.com' in url:
                        ithome_app_keywords = [
                            '下载IT之家APP', '下载 IT之家 APP', 'IT之家APP下载',
                            '相关文章', '相关推荐', 'APP下载', '软媒旗下网站'
                        ]
                        if any(keyword in text for keyword in ithome_app_keywords):
                            # 找到APP推广区域，删除当前元素及其后面的所有兄弟元素
                            parent = element.parent
                            if parent:
                                # 获取父元素的所有子元素
                                found_app = False
                                for child in list(parent.parent.children if parent.parent else parent.children):
                                    if found_app and hasattr(child, 'decompose'):
                                        child.decompose()
                                    elif child == parent or (hasattr(child, 'get_text') and any(keyword in child.get_text() for keyword in ithome_app_keywords)):
                                        found_app = True
                                        if hasattr(child, 'decompose'):
                                            child.decompose()
                        continue
# ... existing code ...
            
            # 处理链接
            for link in article_content.find_all('a'):
                if link.get('href'):
                    link['href'] = urljoin(url, link['href'])
                    link['target'] = '_blank'
            
            # 清理不必要的属性
            for tag in article_content.find_all():
                allowed_attrs = ['src', 'href', 'target', 'alt', 'title', 'style']
                tag.attrs = {k: v for k, v in tag.attrs.items() if k in allowed_attrs}
            
            # 获取最终的HTML内容
            full_html = str(article_content)
            
            # 针对中国新闻网，在评论区域前截断内容
            if 'chinanews.com' in url:
                comment_markers = ['发表评论', '网友评论', '评论区', '留言板']
                for marker in comment_markers:
                    marker_pos = full_html.find(marker)
                    if marker_pos != -1:
                        # 在评论标识前截断内容
                        full_html = full_html[:marker_pos]
                        # 添加截断说明
                        full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤评论区域]</p>'
                        break
            
            # 针对IT之家，在APP推广区域前截断内容
            if 'ithome.com' in url:
                app_markers = [
                    '下载IT之家APP', '下载 IT之家 APP', '下载IT之家app',
                    '安装IT之家APP', '获取IT之家APP', 'IT之家APP下载',
                    '相关文章', '相关推荐', 'APP下载', '客户端下载',
                    '软媒旗下网站', '软媒', '广告', '推广'
                ]
                for marker in app_markers:
                    marker_pos = full_html.find(marker)
                    if marker_pos != -1:
                        # 在推广标识前截断内容
                        full_html = full_html[:marker_pos]
                        # 添加截断说明
                        full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤推广内容]</p>'
                        break
            
            # 针对第一财经，在作者信息区域前截断内容
            if 'yicai.com' in url:
                # 按优先级排序的截断标识符，"相关阅读"优先级最高
                author_markers = [
                    # 第一优先级：相关阅读类
                    '相关阅读', '推荐阅读', '热门阅读', '更多阅读',
                    # 第二优先级：一财特有内容
                    '一财最热', '一财号', '财经头条',
                    # 第三优先级：作者信息
                    '作者：', '作者:', '文章作者', '责任编辑', '责编',
                    # 第四优先级：版权和投诉
                    '举报', '版权', '第一财经版权', 'banquan@yicai.com',
                    '反馈', '投诉', '投稿', '联系编辑', '应稿作者',
                    # 第五优先级：评论和交互
                    'comment', '评论', '<comment', '评论区',
                    # 最低优先级：其他广告内容
                    '今日推荐', '更多资讯', '回到顶部'
                ]
                
                # 找到最早出现的截断标识符
                earliest_pos = -1
                found_marker = None
                for marker in author_markers:
                    marker_pos = full_html.find(marker)
                    if marker_pos != -1 and (earliest_pos == -1 or marker_pos < earliest_pos):
                        earliest_pos = marker_pos
                        found_marker = marker
                
                if earliest_pos != -1:
                    # 在找到的标识符前截断所有内容
                    full_html = full_html[:earliest_pos]
                    # 添加截断说明，特别标明相关阅读已被截断
                    if found_marker in ['相关阅读', '推荐阅读', '热门阅读', '更多阅读']:
                        full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，"{found_marker}"之后的所有内容已被删除]</p>'
                    elif found_marker in ['一财最热', '一财号', '财经头条']:
                        full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤网站导航内容]</p>'
                    elif found_marker in ['comment', '评论', '<comment', '评论区']:
                        full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤评论区域]</p>'
                    else:
                        full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤作者和相关阅读内容]</p>'
            
            # 针对澎湃新闻，在作者信息区域前截断内容
            if 'thepaper.cn' in url:
                # 澎湃新闻的截断标识符（更加保守的策略，避免误截断正文）
                author_markers = [
                    # 第一优先级：明确的作者信息区域
                    '责任编辑：', '责编：', '责任编辑:', '责编:',
                    '澎湃新闻记者', '本文来自澎湃新闻',
                    
                    # 第二优先级：版权和投诉（通常在文章末尾）
                    '021-962866', '未经授权不得转载', '版权声明',
                    '投稿邮箱', '新闻热线', '举报',
                    
                    # 第三优先级：评论和交互（通常在文章末尾）
                    'comment', '评论', '<comment', '评论区', '发表评论',
                    
                    # 第四优先级：其他导航内容（最保守）
                    '相关推荐', '热门推荐', '今日推荐', '更多资讯', 
                    '回到顶部', '分享到微博', '分享到微信'
                ]
                
                # 找到最早出现的截断标识符
                earliest_pos = -1
                found_marker = None
                for marker in author_markers:
                    marker_pos = full_html.find(marker)
                    if marker_pos != -1 and (earliest_pos == -1 or marker_pos < earliest_pos):
                        earliest_pos = marker_pos
                        found_marker = marker
                
                # 只有在找到明确的截断标识符且位置在文章后半部分时才截断
                if earliest_pos != -1:
                    # 计算文章长度，只有在截断位置超过文章60%长度时才执行截断
                    article_length = len(full_html)
                    if earliest_pos > article_length * 0.6:  # 只在文章后60%位置才截断
                        # 在找到的标识符前截断所有内容
                        full_html = full_html[:earliest_pos]
                        # 添加截断说明
                        if found_marker in ['责任编辑：', '责编：', '责任编辑:', '责编:', '澎湃新闻记者', '本文来自澎湃新闻']:
                            full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤作者信息]</p>'
                        elif found_marker in ['021-962866', '未经授权不得转载', '版权声明', '投稿邮箱', '新闻热线', '举报']:
                            full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤版权和联系信息]</p>'
                        elif found_marker in ['comment', '评论', '<comment', '评论区', '发表评论']:
                            full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤评论区域]</p>'
                        else:
                            full_html += f'<p class="text-gray-500 italic mt-4">[内容在此截断，已过滤推荐内容]</p>'
                    else:
                        # 如果截断位置太靠前，则不截断，保留完整内容
                        logger.info(f"澎湃新闻截断位置过早 ({earliest_pos}/{article_length})，保留完整内容")
            
            # 限制内容长度
            if len(full_html) > 50000:
                full_html = full_html[:50000] + "<p><em>[内容过长，已截取部分内容]</em></p>"
            
            # 最后检查：确保有有意义的内容
            text_content = BeautifulSoup(full_html, 'html.parser').get_text(strip=True)
            if len(text_content) < 100:
                return {"content": "<p>抱歉，无法提取到足够的文章内容</p>", "publish_time": article_publish_time}
            
            return {"content": full_html, "publish_time": article_publish_time}
        
        return {"content": "<p>无法获取文章内容</p>", "publish_time": article_publish_time}
        
    except requests.exceptions.RequestException as e:
        logger.error(f"获取原文失败: {url}, 错误: {str(e)}")
        return {"content": f"<p class='text-red-500'>获取原文失败: {str(e)[:50]}</p>", "publish_time": None}
    except Exception as e:
        logger.error(f"解析内容失败: {url}, 错误: {str(e)}")
        return {"content": f"<p class='text-red-500'>解析内容失败: {str(e)[:50]}</p>", "publish_time": None}

def generate_summary_text(content):
    """生成新闻摘要"""
    if not DEEPSEEK_API_KEY:
        return "未配置API密钥，无法生成摘要"
    
    try:
        payload = {
            "model": "deepseek-chat",
            "messages": [
                {"role": "system", "content": "请为以下新闻内容生成一段简洁的摘要，不超过100字。"},
                {"role": "user", "content": content[:2000]}
            ]
        }
        
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {DEEPSEEK_API_KEY}"
        }
        
        response = requests.post(DEEPSEEK_API_URL, json=payload, headers=headers, timeout=10)
        response.raise_for_status()
        return response.json()['choices'][0]['message']['content']
    except Exception as e:
        logger.error(f"调用DeepSeek API错误: {e}")
        return f"生成摘要失败: {str(e)[:50]}"

def run_scheduled_tasks():
    """运行定时任务（每30分钟抓取一次新闻）"""
    # 立即执行一次初始抓取
    fetch_and_store_all_news()
    # 配置定时任务（每30分钟）
    schedule.every(30).minutes.do(fetch_and_store_all_news)
    logger.info("定时任务已启动（每30分钟抓取一次新闻）")
    
    # 循环执行定时任务
    while True:
        schedule.run_pending()
        time.sleep(60)

# 用户认证相关函数
def create_user(username, email, password):
    """创建新用户（密码哈希存储）"""
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 用户名/邮箱去重
                cursor.execute("SELECT id FROM users WHERE username = %s", (username,))
                if cursor.fetchone():
                    return {"status": "error", "message": "用户名已存在"}
                
                cursor.execute("SELECT id FROM users WHERE email = %s", (email,))
                if cursor.fetchone():
                    return {"status": "error", "message": "邮箱已被注册"}
                
                # 密码哈希
                password_hash = generate_password_hash(password, method='pbkdf2:sha256')
                
                # 插入用户
                cursor.execute(
                    """INSERT INTO users (username, email, password_hash, created_at)
                       VALUES (%s, %s, %s, NOW())""",
                    (username, email, password_hash)
                )
                conn.commit()
                return {"status": "success", "message": "注册成功"}
    except Exception as e:
        logger.error(f"创建用户错误: {e}")
        return {"status": "error", "message": str(e)[:50]}

def authenticate_user(username, password):
    """验证用户凭据"""
    try:
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                cursor.execute("SELECT * FROM users WHERE username = %s", (username,))
                user = cursor.fetchone()
                
                if not user:
                    return {"status": "error", "message": "用户名不存在"}
                if not check_password_hash(user['password_hash'], password):
                    return {"status": "error", "message": "密码错误"}
                
                # 返回用户信息（不含密码）
                return {
                    "status": "success", 
                    "user": {
                        "id": user['id'], 
                        "username": user['username'], 
                        "email": user['email']
                    }
                }
    except Exception as e:
        logger.error(f"用户认证错误: {e}")
        return {"status": "error", "message": str(e)[:50]}

# 用户收藏功能
def add_favorite_news(user_id, news_id):
    """添加收藏新闻"""
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 检查新闻是否存在
                cursor.execute("SELECT id FROM news WHERE id = %s", (news_id,))
                if not cursor.fetchone():
                    return {"status": "error", "message": "新闻不存在"}
                
                # 检查是否已经收藏
                cursor.execute(
                    "SELECT id FROM user_favorites WHERE user_id = %s AND news_id = %s", 
                    (user_id, news_id)
                )
                if cursor.fetchone():
                    # 如果已经收藏，返回成功状态而不是错误
                    return {"status": "success", "message": "已收藏"}
                
                # 添加收藏
                cursor.execute(
                    "INSERT INTO user_favorites (user_id, news_id, created_at) VALUES (%s, %s, NOW())",
                    (user_id, news_id)
                )
                # 同步更新新闻的收藏计数与热度
                cursor.execute(
                    """
                    UPDATE news 
                    SET favorites_count = COALESCE(favorites_count, 0) + 1,
                        hot_score = COALESCE(favorites_count, 0) * 3 + COALESCE(views, 0) + 3
                    WHERE id = %s
                    """,
                    (news_id,)
                )
                conn.commit()
                return {"status": "success", "message": "收藏成功"}
    except Exception as e:
        logger.error(f"添加收藏错误: {e}")
        return {"status": "error", "message": str(e)[:50]}

def remove_favorite_news(user_id, news_id):
    """取消收藏新闻"""
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 检查是否已收藏
                cursor.execute(
                    "SELECT id FROM user_favorites WHERE user_id = %s AND news_id = %s", 
                    (user_id, news_id)
                )
                if not cursor.fetchone():
                    # 如果没有收藏，返回成功状态而不是错误
                    return {"status": "success", "message": "未收藏"}
                
                # 删除收藏
                cursor.execute(
                    "DELETE FROM user_favorites WHERE user_id = %s AND news_id = %s",
                    (user_id, news_id)
                )
                # 同步更新新闻的收藏计数与热度（避免负数）
                cursor.execute(
                    """
                    UPDATE news 
                    SET favorites_count = GREATEST(COALESCE(favorites_count, 0) - 1, 0),
                        hot_score = GREATEST(COALESCE(favorites_count, 0) - 1, 0) * 3 + COALESCE(views, 0)
                    WHERE id = %s
                    """,
                    (news_id,)
                )
                conn.commit()
                return {"status": "success", "message": "取消收藏成功"}
    except Exception as e:
        logger.error(f"取消收藏错误: {e}")
        return {"status": "error", "message": str(e)[:50]}

def get_user_favorites(user_id, page=1, per_page=20):
    """获取用户收藏的新闻列表"""
    try:
        offset = (page - 1) * per_page
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 获取收藏总数
                cursor.execute(
                    "SELECT COUNT(*) as total FROM user_favorites WHERE user_id = %s", 
                    (user_id,)
                )
                total = cursor.fetchone()['total']
                
                # 获取收藏的新闻
                cursor.execute("""
                    SELECT n.*, 
                           (SELECT name FROM (
                               SELECT 1 as id, '中国新闻网' as name UNION ALL
                               SELECT 2, '央视新闻' UNION ALL
                               SELECT 3, '新华网' UNION ALL
                               SELECT 4, '中国日报' UNION ALL
                               SELECT 5, '环球时报' UNION ALL
                               SELECT 6, '虎嗅网' UNION ALL
                               SELECT 7, 'IT之家' UNION ALL
                               SELECT 11, '人民网' UNION ALL
                               SELECT 12, '第一财经' UNION ALL
                               SELECT 13, '知乎热榜' UNION ALL
                               SELECT 14, '路透社' UNION ALL
                               SELECT 15, '36氪' UNION ALL
                               SELECT 16, '澎湃新闻'
                           ) as sources WHERE sources.id = n.source_id) as source_name,
                           COALESCE(n.category, 'general') as category,
                           ns.summary,
                           uf.created_at as favorited_at
                    FROM user_favorites uf
                    JOIN news n ON uf.news_id = n.id
                    LEFT JOIN news_summaries ns ON n.id = ns.news_id
                    WHERE uf.user_id = %s
                    ORDER BY uf.created_at DESC
                    LIMIT %s OFFSET %s
                """, (user_id, per_page, offset))
                
                favorites = cursor.fetchall()
                
                # 格式化时间戳
                for item in favorites:
                    if item['published_date']:
                        item['published_date'] = item['published_date'].isoformat()
                    if item['created_at']:
                        item['created_at'] = item['created_at'].isoformat()
                    if item['favorited_at']:
                        item['favorited_at'] = item['favorited_at'].isoformat()
                
                return {
                    "status": "success",
                    "favorites": favorites,
                    "pagination": {
                        "total": total,
                        "page": page,
                        "per_page": per_page,
                        "pages": (total + per_page - 1) // per_page
                    }
                }
    except Exception as e:
        logger.error(f"获取收藏列表错误: {e}")
        return {"status": "error", "message": str(e)[:50]}

def get_recommended_news(user_id, page=1, per_page=20, include_explain=False):
    """新的智能推荐系统
    
    使用多策略融合的推荐算法：
    - 协同过滤（用户相似度）
    - 内容相似度推荐
    - 热度衰减机制
    - 多样性优化
    - 实时热点推荐
    """
    global intelligent_recommender
    
    if intelligent_recommender is None:
        # 降级到简单推荐
        return get_recommended_news_fallback(user_id, page, per_page, include_explain)
    
    try:
        return intelligent_recommender.get_recommendations(
            user_id, page, per_page, include_explain
        )
    except Exception as e:
        logger.error(f"智能推荐失败，降级到简单算法: {e}")
        return get_recommended_news_fallback(user_id, page, per_page, include_explain)

def get_recommended_news_fallback(user_id, page=1, per_page=6, include_explain=False):
    """简单的推荐算法（降级方案）"""
    try:
        offset = (page - 1) * per_page
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 获取用户收藏的分类分布
                cursor.execute("""
                    SELECT COALESCE(n.category, 'general') AS category, COUNT(*) AS cnt
                    FROM user_favorites uf
                    JOIN news n ON uf.news_id = n.id
                    WHERE uf.user_id = %s
                    GROUP BY COALESCE(n.category, 'general')
                    ORDER BY cnt DESC
                """, (user_id,))
                category_counts = cursor.fetchall()
                preferred_categories = [row['category'] for row in category_counts[:3]]

                # 查询推荐新闻
                if preferred_categories:
                    placeholders = ",".join(["%s"] * len(preferred_categories))
                    cursor.execute(f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category
                        FROM news n
                        WHERE COALESCE(n.category, 'general') IN ({placeholders})
                          AND n.id NOT IN (
                              SELECT news_id FROM user_favorites WHERE user_id = %s
                          )
                          AND (n.published_date >= NOW() - INTERVAL 30 DAY 
                               OR n.created_at >= NOW() - INTERVAL 30 DAY)
                        ORDER BY n.hot_score DESC, n.published_date DESC
                        LIMIT %s OFFSET %s
                    """, preferred_categories + [user_id, per_page, offset])
                else:
                    # 新用户，推荐热门新闻
                    cursor.execute("""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category
                        FROM news n
                        WHERE (n.published_date >= NOW() - INTERVAL 21 DAY 
                               OR n.created_at >= NOW() - INTERVAL 21 DAY)
                        ORDER BY n.hot_score DESC, n.published_date DESC
                        LIMIT %s OFFSET %s
                    """, (per_page, offset))
                
                recommendations = cursor.fetchall()
                
                # 格式化时间
                for item in recommendations:
                    if item.get('published_date'):
                        item['published_date'] = item['published_date'].isoformat()
                    if item.get('created_at'):
                        item['created_at'] = item['created_at'].isoformat()
                
                # 使用合理的推荐总数估算，而不是数据库中的全部数量
                actual_count = len(recommendations)
                if page == 1:
                    if actual_count < per_page:
                        total = actual_count  # 第一页不满，说明就这么多
                    else:
                        # 第一页满了，基于用户偏好估算总数
                        if preferred_categories:
                            # 有偏好分类，可能有更多推荐
                            total = min(actual_count * 5, 30)  # 最多30条（5页）
                        else:
                            # 新用户，使用热门推荐
                            total = min(actual_count * 4, 24)  # 最多24条（4页）
                else:
                    # 非第一页，使用保守估算
                    total = page * per_page + per_page
                
                response = {
                    "status": "success",
                    "recommendations": recommendations,
                    "pagination": {
                        "total": total,
                        "page": page,
                        "per_page": per_page,
                        "pages": (total + per_page - 1) // per_page
                    }
                }
                
                if include_explain:
                    response["explain"] = {
                        "strategy": "简单推荐算法",
                        "category_distribution": category_counts,
                        "used_categories": preferred_categories,
                        "description": "基于用户历史收藏的分类偏好进行推荐"
                    }
                
                return response
                
    except Exception as e:
        logger.error(f"简单推荐算法错误: {e}")
        return {"status": "error", "message": str(e)[:50]}

# 路由
@app.route('/api/search-news', methods=['GET'])
def search_news():
    """新闻检索API - 支持多维度搜索（无需登录）"""
    try:
        # 获取搜索参数（支持多种参数名）
        query = request.args.get('q', '').strip() or request.args.get('keyword', '').strip()  # 搜索关键词
        scope = request.args.get('scope', 'all')  # 搜索范围：all, title, content
        title_only = request.args.get('title_only', 'false').lower() == 'true' or scope == 'title'  # 仅搜索标题
        source = request.args.get('source', '')  # 指定来源
        category = request.args.get('category', '')  # 指定分类
        date_from = request.args.get('date_from', '')  # 开始日期
        date_to = request.args.get('date_to', '')  # 结束日期
        sort_by = request.args.get('sort_by', 'relevance')  # 排序方式: relevance, date, title
        page = request.args.get('page', 1, type=int)
        per_page = min(request.args.get('per_page', 20, type=int), 50)  # 限制最大5o条
        
        # 放宽搜索条件验证，只要有任何一个有意义的参数即可
        if not query and not source and not category and not date_from and not date_to:
            return jsonify({"status": "error", "message": "请提供至少一个搜索条件"}), 400
        
        # 检查用户登录状态
        user_id = session.get('user_id')
        
        # 计算偏移量
        offset = (page - 1) * per_page
        
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 构建搜索条件
                where_conditions = []
                query_params = []
                
                # 关键词搜索
                if query:
                    if title_only:
                        where_conditions.append("n.title LIKE %s")
                        query_params.append(f"%{query}%")
                    else:
                        where_conditions.append("(n.title LIKE %s OR n.content LIKE %s)")
                        query_params.extend([f"%{query}%", f"%{query}%"])
                
                # 来源筛选
                if source:
                    where_conditions.append("n.source_id = %s")
                    query_params.append(source)
                
                # 分类筛选
                if category:
                    where_conditions.append("COALESCE(n.category, 'general') = %s")
                    query_params.append(category)
                
                # 日期范围筛选
                if date_from:
                    where_conditions.append("DATE(n.created_at) >= %s")
                    query_params.append(date_from)
                
                if date_to:
                    where_conditions.append("DATE(n.created_at) <= %s")
                    query_params.append(date_to)
                
                where_clause = "WHERE " + " AND ".join(where_conditions) if where_conditions else ""
                
                # 构建排序子句
                if sort_by == 'date':
                    order_clause = "ORDER BY n.created_at DESC"
                elif sort_by == 'title':
                    order_clause = "ORDER BY n.title ASC"
                else:  # relevance - 按相关性排序
                    if query:
                        # 使用MATCH AGAINST或者简单的相关性计算
                        order_clause = f"ORDER BY (CASE WHEN n.title LIKE '%{query}%' THEN 2 ELSE 1 END) DESC, n.created_at DESC"
                    else:
                        order_clause = "ORDER BY n.created_at DESC"
                
                # 获取符合条件的总数
                count_query = f"SELECT COUNT(*) as total FROM news n {where_clause}"
                cursor.execute(count_query, query_params)
                total = cursor.fetchone()['total']
                
                if total == 0:
                    return jsonify({
                        'status': 'success',
                        'news': [],
                        'pagination': {
                            'total': 0,
                            'page': page,
                            'per_page': per_page,
                            'pages': 0
                        },
                        'search_info': {
                            'query': query,
                            'total_results': 0,
                            'search_time': 0
                        }
                    })
                
                # 获取搜索结果
                import time
                start_time = time.time()
                
                if user_id:
                    # 已登录用户：获取新闻和收藏状态
                    main_query = f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻' UNION ALL
                                   SELECT 17, '中新网'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category,
                               ns.summary,
                               CASE WHEN uf.news_id IS NOT NULL THEN 1 ELSE 0 END as is_favorited,
                               COALESCE(n.original_publish_time, n.published_date) as display_publish_time
                        FROM news n
                        LEFT JOIN news_summaries ns ON n.id = ns.news_id
                        LEFT JOIN user_favorites uf ON n.id = uf.news_id AND uf.user_id = %s
                        {where_clause}
                        {order_clause}
                        LIMIT %s OFFSET %s
                    """
                    query_params_with_pagination = [user_id] + query_params + [per_page, offset]
                else:
                    # 未登录用户：只获取新闻基本信息
                    main_query = f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻' UNION ALL
                                   SELECT 17, '中新网'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category,
                               ns.summary,
                               0 as is_favorited,
                               COALESCE(n.original_publish_time, n.published_date) as display_publish_time
                        FROM news n
                        LEFT JOIN news_summaries ns ON n.id = ns.news_id
                        {where_clause}
                        {order_clause}
                        LIMIT %s OFFSET %s
                    """
                    query_params_with_pagination = query_params + [per_page, offset]
                
                cursor.execute(main_query, query_params_with_pagination)
                news_items = cursor.fetchall()
                
                search_time = round((time.time() - start_time) * 1000, 2)  # 毫秒
                
                # 格式化时间戳
                for item in news_items:
                    if item['published_date']:
                        item['published_date'] = item['published_date'].isoformat()
                    if item['created_at']:
                        item['created_at'] = item['created_at'].isoformat()
                    
                    # 添加高亮信息
                    if query:
                        item['highlight'] = {
                            'title': highlight_text(item['title'], query),
                            'content': highlight_text(item['content'] or '', query, max_length=200)
                        }
                
                # 记录搜索历史（仅登录用户）
                if user_id and query:
                    try:
                        cursor.execute(
                            "INSERT INTO search_history (user_id, query, results_count, created_at) VALUES (%s, %s, %s, NOW())",
                            (user_id, query, total)
                        )
                        conn.commit()
                    except Exception as e:
                        logger.warning(f"记录搜索历史失败: {e}")
                
                return jsonify({
                    'status': 'success',
                    'news': news_items,
                    'pagination': {
                        'total': total,
                        'page': page,
                        'per_page': per_page,
                        'pages': (total + per_page - 1) // per_page
                    },
                    'search_info': {
                        'query': query,
                        'total_results': total,
                        'search_time': search_time,
                        'sort_by': sort_by,
                        'filters': {
                            'source': source,
                            'category': category,
                            'date_from': date_from,
                            'date_to': date_to,
                            'title_only': title_only
                        }
                    }
                })
                
    except Exception as e:
        logger.error(f"新闻搜索错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

def highlight_text(text, query, max_length=None):
    """在文本中高亮搜索关键词"""
    if not text or not query:
        return text[:max_length] if max_length else text
    
    import re
    # 转义特殊字符
    escaped_query = re.escape(query)
    
    # 查找匹配位置
    pattern = re.compile(escaped_query, re.IGNORECASE)
    match = pattern.search(text)
    
    if match and max_length:
        # 截取包含匹配词的段落
        start_pos = max(0, match.start() - max_length // 2)
        end_pos = min(len(text), start_pos + max_length)
        text = text[start_pos:end_pos]
        if start_pos > 0:
            text = "..." + text
        if end_pos < len(text):
            text = text + "..."
    elif max_length:
        text = text[:max_length]
        if len(text) == max_length:
            text += "..."
    
    # 高亮关键词
    highlighted = pattern.sub(f'<mark class="search-highlight">{query}</mark>', text)
    return highlighted

@app.route('/api/search-suggestions', methods=['GET'])
def get_search_suggestions():
    """获取搜索建议"""
    try:
        query = request.args.get('q', '').strip()
        limit = min(request.args.get('limit', 10, type=int), 20)
        
        suggestions = []
        
        if len(query) >= 2:  # 至少两个字符才显示建议
            with get_db_connection() as conn:
                with conn.cursor(dictionary=True) as cursor:
                    # 基于新闻标题的建议
                    cursor.execute("""
                        SELECT DISTINCT 
                            SUBSTRING_INDEX(SUBSTRING_INDEX(title, ' ', 3), ' ', -1) as suggestion,
                            COUNT(*) as frequency
                        FROM news 
                        WHERE title LIKE %s 
                        GROUP BY suggestion
                        HAVING LENGTH(suggestion) >= 2
                        ORDER BY frequency DESC, suggestion ASC
                        LIMIT %s
                    """, (f"%{query}%", limit))
                    
                    results = cursor.fetchall()
                    suggestions = [{
                        'text': result['suggestion'],
                        'frequency': result['frequency']
                    } for result in results if query.lower() in result['suggestion'].lower()]
        
        return jsonify({
            'status': 'success',
            'suggestions': suggestions
        })
        
    except Exception as e:
        logger.error(f"获取搜索建议错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/search-history', methods=['GET'])
def get_search_history():
    """获取用户搜索历史（需登录）"""
    if 'user_id' not in session:
        return jsonify({"status": "error", "message": "请先登录"}), 401
    
    try:
        user_id = session['user_id']
        limit = min(request.args.get('limit', 20, type=int), 50)
        
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                cursor.execute("""
                    SELECT query, 
                           MAX(results_count) as results_count, 
                           MAX(created_at) as created_at,
                           COUNT(*) as search_count,
                           MAX(created_at) as last_searched
                    FROM search_history 
                    WHERE user_id = %s 
                    GROUP BY query
                    ORDER BY last_searched DESC, search_count DESC
                    LIMIT %s
                """, (user_id, limit))
                
                history = cursor.fetchall()
                
                # 格式化时间
                for item in history:
                    if item['created_at']:
                        item['created_at'] = item['created_at'].isoformat()
                    if item['last_searched']:
                        item['last_searched'] = item['last_searched'].isoformat()
                
                return jsonify({
                    'status': 'success',
                    'history': history
                })
                
    except Exception as e:
        logger.error(f"获取搜索历史错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/search-history', methods=['DELETE'])
def clear_search_history():
    """清空用户搜索历史（需登录）"""
    if 'user_id' not in session:
        return jsonify({"status": "error", "message": "请先登录"}), 401
    
    try:
        user_id = session['user_id']
        
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute("DELETE FROM search_history WHERE user_id = %s", (user_id,))
                deleted_count = cursor.rowcount
                conn.commit()
                
                return jsonify({
                    'status': 'success',
                    'message': f'已清空 {deleted_count} 条搜索历史'
                })
                
    except Exception as e:
        logger.error(f"清空搜索历史错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/popular-searches', methods=['GET'])
def get_popular_searches():
    """获取热门搜索词"""
    try:
        limit = min(request.args.get('limit', 10, type=int), 20)
        days = min(request.args.get('days', 7, type=int), 30)  # 最近30天内
        
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                cursor.execute("""
                    SELECT query, 
                           COUNT(*) as search_count,
                           COUNT(DISTINCT user_id) as user_count,
                           AVG(results_count) as avg_results
                    FROM search_history 
                    WHERE created_at >= DATE_SUB(NOW(), INTERVAL %s DAY)
                      AND LENGTH(query) >= 2
                    GROUP BY query
                    HAVING search_count >= 2
                    ORDER BY search_count DESC, user_count DESC
                    LIMIT %s
                """, (days, limit))
                
                popular = cursor.fetchall()
                
                return jsonify({
                    'status': 'success',
                    'popular_searches': popular,
                    'period_days': days
                })
                
    except Exception as e:
        logger.error(f"获取热门搜索错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

# 初始化搜索历史表
def init_search_tables():
    """初始化搜索相关数据库表"""
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 创建搜索历史表
                cursor.execute("""
                    CREATE TABLE IF NOT EXISTS search_history (
                        id INT AUTO_INCREMENT PRIMARY KEY,
                        user_id INT NOT NULL,
                        query VARCHAR(255) NOT NULL,
                        results_count INT DEFAULT 0,
                        created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                        INDEX idx_user_id (user_id),
                        INDEX idx_query (query),
                        INDEX idx_created_at (created_at),
                        FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
                    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
                """)
                
                conn.commit()
                logger.info("搜索历史表初始化完成")
                
    except Exception as e:
        logger.warning(f"初始化搜索表失败: {e}")

# 初始化新闻热度相关列
def init_news_heat_columns():
    """确保 news 表存在热度相关字段: views, favorites_count, hot_score"""
    try:
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 检查并添加 views 列
                cursor.execute("SHOW COLUMNS FROM news LIKE 'views'")
                if not cursor.fetchone():
                    cursor.execute("ALTER TABLE news ADD COLUMN views INT NOT NULL DEFAULT 0")
                    logger.info("已为 news 表添加 views 列")

                # 检查并添加 favorites_count 列
                cursor.execute("SHOW COLUMNS FROM news LIKE 'favorites_count'")
                if not cursor.fetchone():
                    cursor.execute("ALTER TABLE news ADD COLUMN favorites_count INT NOT NULL DEFAULT 0")
                    logger.info("已为 news 表添加 favorites_count 列")

                # 检查并添加 hot_score 列
                cursor.execute("SHOW COLUMNS FROM news LIKE 'hot_score'")
                if not cursor.fetchone():
                    cursor.execute("ALTER TABLE news ADD COLUMN hot_score INT NOT NULL DEFAULT 0")
                    logger.info("已为 news 表添加 hot_score 列")

                conn.commit()
    except Exception as e:
        logger.warning(f"初始化新闻热度字段失败: {e}")

@app.route('/api/news', methods=['GET'])
def get_news():
    """获取新闻列表API"""
    try:
        # 获取参数
        page = request.args.get('page', 1, type=int)
        per_page = min(request.args.get('per_page', 12, type=int), 50)  # 最夙50条
        category = request.args.get('category', 'all')
        sort_by = request.args.get('sort_by', 'date')
        
        # 检查用户登录状态
        user_id = session.get('user_id')
        
        # 计算偏移量
        offset = (page - 1) * per_page
        
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 构建查询条件
                where_clause = ""
                query_params = []
                
                if category != 'all':
                    where_clause = "WHERE COALESCE(n.category, 'general') = %s"
                    query_params.append(category)
                
                # 获取总数
                count_query = f"SELECT COUNT(*) as total FROM news n {where_clause}"
                cursor.execute(count_query, query_params)
                total = cursor.fetchone()['total']
                
                # 排序子句
                if sort_by == 'hot':
                    order_by_clause = "ORDER BY COALESCE(n.hot_score, 0) DESC, n.created_at DESC"
                else:
                    order_by_clause = "ORDER BY n.created_at DESC"

                if user_id:
                    # 已登录用户：获取新闻和收藏状态
                    main_query = f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻' UNION ALL
                                   SELECT 17, '中新网'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category,
                               ns.summary,
                               CASE WHEN uf.news_id IS NOT NULL THEN 1 ELSE 0 END as is_favorited,
                               COALESCE(n.original_publish_time, n.published_date) as display_publish_time,
                               COALESCE(n.views, 0) as views,
                               COALESCE(n.favorites_count, 0) as favorites_count,
                               COALESCE(n.hot_score, 0) as hot_score
                        FROM news n
                        LEFT JOIN news_summaries ns ON n.id = ns.news_id
                        LEFT JOIN user_favorites uf ON n.id = uf.news_id AND uf.user_id = %s
                        {where_clause}
                        {order_by_clause}
                        LIMIT %s OFFSET %s
                    """
                    query_params_with_pagination = [user_id] + query_params + [per_page, offset]
                else:
                    # 未登录用户：只获取新闻基本信息
                    main_query = f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻' UNION ALL
                                   SELECT 17, '中新网'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category,
                               ns.summary,
                               0 as is_favorited,
                               COALESCE(n.original_publish_time, n.published_date) as display_publish_time,
                               COALESCE(n.views, 0) as views,
                               COALESCE(n.favorites_count, 0) as favorites_count,
                               COALESCE(n.hot_score, 0) as hot_score
                        FROM news n
                        LEFT JOIN news_summaries ns ON n.id = ns.news_id
                        {where_clause}
                        {order_by_clause}
                        LIMIT %s OFFSET %s
                    """
                    query_params_with_pagination = query_params + [per_page, offset]
                
                cursor.execute(main_query, query_params_with_pagination)
                news_items = cursor.fetchall()
                
                # 格式化时间戳
                for item in news_items:
                    if item['published_date']:
                        item['published_date'] = item['published_date'].isoformat()
                    if item['created_at']:
                        item['created_at'] = item['created_at'].isoformat()
                    if item['display_publish_time']:
                        item['display_publish_time'] = item['display_publish_time'].isoformat()
                
                return jsonify({
                    'status': 'success',
                    'news': news_items,
                    'pagination': {
                        'total': total,
                        'page': page,
                        'per_page': per_page,
                        'pages': (total + per_page - 1) // per_page
                    }
                })
                
    except Exception as e:
        logger.error(f"获取新闻列表失败: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500



@app.route('/api/generate-summary', methods=['POST'])
def generate_summary_api():
    """基于完整内容生成新闻摘要（需登录）"""
    if 'user_id' not in session:
        return jsonify({"status": "error", "message": "请先登录"}), 401
        
    data = request.get_json()
    news_id = data.get('news_id')
    if not news_id:
        return jsonify({"status": "error", "message": "缺少新闻ID"}), 400
    
    try:
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 获取新闻链接和基本内容
                cursor.execute("SELECT link, content FROM news WHERE id = %s", (news_id,))
                news = cursor.fetchone()
                if not news:
                    return jsonify({"status": "error", "message": "未找到新闻"}), 404
                
                # 首先尝试获取完整内容
                full_content_html = None
                if news['link']:
                    try:
                        result = fetch_full_content(news['link'])
                        # 处理新的返回格式
                        if isinstance(result, dict):
                            full_content_html = result.get('content', '')
                            publish_time = result.get('publish_time')
                            
                            # 如果提取到了真实发布时间，更新数据库
                            if publish_time:
                                try:
                                    cursor.execute(
                                        "UPDATE news SET original_publish_time = %s WHERE id = %s",
                                        (publish_time, news_id)
                                    )
                                    conn.commit()
                                except Exception as e:
                                    logger.warning(f"更新原文发布时间失败: {e}")
                        else:
                            # 兼容旧的返回格式
                            full_content_html = result
                            
                        # 提取纯文本用于生成摘要
                        if full_content_html and not full_content_html.startswith('<p class="text-red-500">'):
                            soup = BeautifulSoup(full_content_html, 'html.parser')
                            full_text = soup.get_text(strip=True)
                            if len(full_text) > 100:
                                content_for_summary = full_text
                            else:
                                content_for_summary = news['content']
                        else:
                            content_for_summary = news['content']
                    except Exception as e:
                        logger.warning(f"获取完整内容失败，使用基本内容: {str(e)}")
                        content_for_summary = news['content']
                else:
                    content_for_summary = news['content']
                
                if not content_for_summary:
                    return jsonify({"status": "error", "message": "未找到可用的内容"}), 404
                
                # 生成摘要
                summary = generate_summary_text(content_for_summary)
                
                # 存储摘要
                cursor.execute(
                    """INSERT INTO news_summaries (news_id, summary, created_at, updated_at) 
                       VALUES (%s, %s, NOW(), NOW()) 
                       ON DUPLICATE KEY UPDATE summary = %s, updated_at = NOW()""",
                    (news_id, summary, summary)
                )
                conn.commit()
                
                return jsonify({"status": "success", "summary": summary})
    except Exception as e:
        logger.error(f"生成摘要错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

# 用户认证路由
@app.route('/api/register', methods=['POST'])
def register():
    """用户注册"""
    data = request.get_json()
    if not all(k in data for k in ['username', 'email', 'password']):
        return jsonify({"status": "error", "message": "缺少必要参数"}), 400
    
    result = create_user(data['username'], data['email'], data['password'])
    if result['status'] == 'error':
        return jsonify(result), 400
    return jsonify(result)

@app.route('/api/login', methods=['POST'])
def login():
    """用户登录"""
    data = request.get_json()
    if not all(k in data for k in ['username', 'password']):
        return jsonify({"status": "error", "message": "缺少必要参数"}), 400
    
    result = authenticate_user(data['username'], data['password'])
    if result['status'] == 'success':
        # 设置会话（持久化）
        session.permanent = True
        session['user_id'] = result['user']['id']
        session['username'] = result['user']['username']
    
    return jsonify(result)

@app.route('/api/logout', methods=['POST'])
def logout():
    """用户登出"""
    session.clear()
    return jsonify({"status": "success", "message": "已成功登出"})

@app.route('/api/user')
def get_current_user():
    """获取当前登录用户信息"""
    if 'user_id' in session:
        return jsonify({
            "status": "success",
            "user": {
                "id": session['user_id'],
                "username": session['username']
            }
        })
    return jsonify({"status": "error", "message": "未登录"}), 401

# 收藏功能路由
@app.route('/api/favorites/add', methods=['POST'])
def add_favorite():
    """添加收藏新闻"""
    if 'user_id' not in session:
        return jsonify({"status": "error", "message": "请先登录"}), 401
    
    data = request.get_json()
    news_id = data.get('news_id')
    if not news_id:
        return jsonify({"status": "error", "message": "缺少新闻ID"}), 400
    
    result = add_favorite_news(session['user_id'], news_id)
    if result['status'] == 'error':
        return jsonify(result), 400
    return jsonify(result)

@app.route('/api/favorites/remove', methods=['POST'])
def remove_favorite():
    """取消收藏新闻"""
    if 'user_id' not in session:
        return jsonify({"status": "error", "message": "请先登录"}), 401
    
    data = request.get_json()
    news_id = data.get('news_id')
    if not news_id:
        return jsonify({"status": "error", "message": "缺少新闻ID"}), 400
    
    result = remove_favorite_news(session['user_id'], news_id)
    if result['status'] == 'error':
        return jsonify(result), 400
    return jsonify(result)

@app.route('/api/favorites')
def get_favorites():
    """获取用户收藏的新闻列表"""
    if 'user_id' not in session:
        return jsonify({"status": "error", "message": "请先登录"}), 401
    
    # 获取分页参数
    page = request.args.get('page', 1, type=int)
    per_page = request.args.get('per_page', 20, type=int)
    
    result = get_user_favorites(session['user_id'], page, per_page)
    if result['status'] == 'error':
        return jsonify(result), 500
    
    return jsonify(result)

@app.route('/api/recommendations')
def get_recommendations():
    """基于智能算法的新闻推荐列表（需登录）"""
    if 'user_id' not in session:
        return jsonify({"status": "error", "message": "请先登录"}), 401

    page = request.args.get('page', 1, type=int)
    per_page = request.args.get('per_page', 6, type=int)
    explain = request.args.get('explain', 'false').lower() == 'true'
    refresh = request.args.get('refresh', 'false').lower() == 'true'  # 新增刷新参数
    
    # 如果是刷新请求，清除推荐缓存
    if refresh and intelligent_recommender:
        intelligent_recommender.clear_user_recommendation_cache(session['user_id'])

    result = get_recommended_news(session['user_id'], page, per_page, include_explain=explain)
    status = result.get('status', 'success')
    if status == 'error':
        return jsonify(result), 500
    return jsonify(result)

@app.route('/api/track-behavior', methods=['POST'])
def track_user_behavior():
    """跟踪用户行为"""
    try:
        if 'user_id' not in session:
            return jsonify({"status": "error", "message": "需要登录"}), 401
        
        data = request.get_json()
        if not data:
            return jsonify({"status": "error", "message": "缺少请求数据"}), 400
        
        user_id = session['user_id']
        news_id = data.get('news_id')
        action = data.get('action')  # 'view', 'click', 'read', 'share'
        duration = data.get('duration')  # 阅读时长（秒）
        extra_data = data.get('extra_data')  # 额外数据
        
        if not news_id or not action:
            return jsonify({"status": "error", "message": "缺少必要参数"}), 400
        
        # 使用智能推荐系统记录行为
        global intelligent_recommender
        if intelligent_recommender:
            intelligent_recommender.track_user_behavior(
                user_id, news_id, action, duration, extra_data
            )
        
        return jsonify({"status": "success", "message": "行为记录成功"})
        
    except Exception as e:
        logger.error(f"记录用户行为失败: {e}")
        return jsonify({"status": "error", "message": "记录失败"}), 500

@app.route('/api/news/full-content', methods=['POST'])
def get_full_content():
    """获取新闻的完整内容（允许未登录用户访问）"""
    data = request.get_json()
    news_id = data.get('news_id')
    if not news_id:
        return jsonify({"status": "error", "message": "缺少新闻ID"}), 400
    
    try:
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 获取新闻链接
                cursor.execute("SELECT link FROM news WHERE id = %s", (news_id,))
                news = cursor.fetchone()
                if not news or not news['link']:
                    return jsonify({"status": "error", "message": "未找到新闻链接"}), 404
                
                # 获取完整内容和发布时间
                result = fetch_full_content(news['link'])
                
                # 处理返回结果
                if isinstance(result, dict):
                    full_content = result.get('content', '')
                    publish_time = result.get('publish_time')
                    
                    # 如果提取到了真实发布时间，更新数据库
                    if publish_time:
                        try:
                            cursor.execute(
                                "UPDATE news SET original_publish_time = %s WHERE id = %s",
                                (publish_time, news_id)
                            )
                            conn.commit()
                            logger.info(f"更新新闻 {news_id} 的原文发布时间: {publish_time}")
                        except Exception as e:
                            logger.warning(f"更新原文发布时间失败: {e}")
                else:
                    # 兼容旧的返回格式
                    full_content = result
                    publish_time = None
                
                # 视为一次有效浏览，累加浏览量并更新热度
                try:
                    cursor.execute(
                        """
                        UPDATE news 
                        SET views = COALESCE(views, 0) + 1,
                            hot_score = COALESCE(favorites_count, 0) * 3 + (COALESCE(views, 0) + 1)
                        WHERE id = %s
                        """,
                        (news_id,)
                    )
                    conn.commit()
                except Exception as e:
                    logger.warning(f"更新新闻浏览量失败: {e}")
                
                return jsonify({
                    "status": "success", 
                    "full_content": full_content,
                    "source_url": news['link'],
                    "original_publish_time": publish_time.isoformat() if publish_time else None
                })
    except Exception as e:
        logger.error(f"获取完整内容错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/crawl-direct', methods=['POST'])
def trigger_direct_crawl():
    """手动触发直接抓取任务"""
    try:
        total_new = fetch_and_store_direct_news()
        return jsonify({
            "status": "success", 
            "message": f"直接抓取完成，新增 {total_new} 条新闻",
            "new_count": total_new
        })
    except Exception as e:
        logger.error(f"手动触发直接抓取失败: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/reclassify-news', methods=['POST'])
def reclassify_existing_news():
    """重新分类现有新闻"""
    try:
        data = request.get_json() or {}
        limit = data.get('limit', 100)  # 默认处理100条
        
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 检查category字段是否存在
                try:
                    cursor.execute("SHOW COLUMNS FROM news LIKE 'category'")
                    if not cursor.fetchone():
                        # 添加category字段
                        cursor.execute("ALTER TABLE news ADD COLUMN category VARCHAR(50) DEFAULT 'general'")
                        logger.info("已添加category字段到news表")
                except Exception as e:
                    logger.warning(f"检查/添加category字段时出错: {e}")
                
                # 获取需要重新分类的新闻
                cursor.execute("""
                    SELECT id, title, content, source_id,
                           (SELECT name FROM (
                               SELECT 1 as id, '中国新闻网' as name UNION ALL
                               SELECT 2, '央视新闻' UNION ALL
                               SELECT 3, '新华网' UNION ALL
                               SELECT 4, '中国日报' UNION ALL
                               SELECT 5, '环球时报' UNION ALL
                               SELECT 6, '虎嗅网' UNION ALL
                               SELECT 7, 'IT之家' UNION ALL
                               SELECT 11, '人民网' UNION ALL
                               SELECT 12, '第一财经' UNION ALL
                               SELECT 13, '知乎热榜' UNION ALL
                               SELECT 14, '路透社' UNION ALL
                               SELECT 15, '36氪' UNION ALL
                               SELECT 16, '澎湃新闻'
                           ) as sources WHERE sources.id = n.source_id) as source_name
                    FROM news n 
                    WHERE category IS NULL OR category = 'general'
                    ORDER BY created_at DESC 
                    LIMIT %s
                """, (limit,))
                
                news_items = cursor.fetchall()
                
                if not news_items:
                    return jsonify({
                        "status": "success",
                        "message": "没有需要重新分类的新闻",
                        "reclassified_count": 0
                    })
                
                reclassified_count = 0
                category_stats = {}
                
                for news_item in news_items:
                    # 使用智能分类器进行分类
                    new_category = news_classifier.classify_news(
                        title=news_item['title'] or '',
                        content=news_item['content'] or '',
                        source_name=news_item['source_name'] or ''
                    )
                    
                    # 更新数据库
                    cursor.execute(
                        "UPDATE news SET category = %s WHERE id = %s",
                        (new_category, news_item['id'])
                    )
                    
                    reclassified_count += 1
                    category_stats[new_category] = category_stats.get(new_category, 0) + 1
                
                conn.commit()
                
                return jsonify({
                    "status": "success",
                    "message": f"成功重新分类 {reclassified_count} 条新闻",
                    "reclassified_count": reclassified_count,
                    "category_distribution": {
                        news_classifier.get_category_name(k): v 
                        for k, v in category_stats.items()
                    }
                })
                
    except Exception as e:
        logger.error(f"重新分类新闻失败: {e}")
        return jsonify({"status": "error", "message": str(e)[:100]}), 500

@app.route('/api/category-stats', methods=['GET'])
def get_category_statistics():
    """获取分类统计信息"""
    try:
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 获取各分类的新闻数量
                cursor.execute("""
                    SELECT 
                        COALESCE(category, 'general') as category,
                        COUNT(*) as count,
                        COUNT(CASE WHEN created_at >= DATE_SUB(NOW(), INTERVAL 24 HOUR) THEN 1 END) as recent_count
                    FROM news 
                    GROUP BY COALESCE(category, 'general')
                    ORDER BY count DESC
                """)
                
                category_stats = cursor.fetchall()
                
                # 转换为中文名称
                formatted_stats = []
                for stat in category_stats:
                    formatted_stats.append({
                        'category': stat['category'],
                        'category_name': news_classifier.get_category_name(stat['category']),
                        'count': stat['count'],
                        'recent_count': stat['recent_count']
                    })
                
                return jsonify({
                    "status": "success",
                    "category_stats": formatted_stats,
                    "total_categories": len(formatted_stats)
                })
                
    except Exception as e:
        logger.error(f"获取分类统计失败: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/sources-status', methods=['GET'])
def get_sources_status():
    """获取所有新闻源的状态信息"""
    try:
        rss_sources = []
        direct_sources = []
        
        # RSS源状态检查
        for source in NEWS_SOURCES:
            try:
                response = requests.head(source['rss_url'], timeout=5)
                status = '正常' if response.status_code == 200 else f'异常({response.status_code})'
                status_code = 'success' if response.status_code == 200 else 'error'
            except Exception as e:
                status = f'错误: {str(e)[:30]}'
                status_code = 'error'
            
            rss_sources.append({
                'id': source['id'],
                'name': source['name'],
                'category': source['category'],
                'url': source['rss_url'],
                'type': 'RSS',
                'status': status,
                'status_code': status_code
            })
        
        # 直接抓取源状态
        for source in DIRECT_NEWS_SOURCES:
            direct_sources.append({
                'id': source['id'],
                'name': source['name'],
                'category': source['category'],
                'crawler': source['crawler'],
                'type': '直接抓取',
                'status': '可用（需测试）',
                'status_code': 'info'
            })
        
        # 获取最近抓取统计
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                cursor.execute("""
                    SELECT source_id, COUNT(*) as count, MAX(created_at) as last_update
                    FROM news 
                    WHERE created_at >= DATE_SUB(NOW(), INTERVAL 24 HOUR)
                    GROUP BY source_id
                """)
                recent_stats = {row['source_id']: row for row in cursor.fetchall()}
        
        # 添加统计信息
        for source in rss_sources + direct_sources:
            source_id = source['id']
            if source_id in recent_stats:
                stats = recent_stats[source_id]
                source['recent_count'] = stats['count']
                source['last_update'] = stats['last_update'].isoformat() if stats['last_update'] else None
            else:
                source['recent_count'] = 0
                source['last_update'] = None
        
        return jsonify({
            'status': 'success',
            'rss_sources': rss_sources,
            'direct_sources': direct_sources,
            'summary': {
                'total_sources': len(rss_sources) + len(direct_sources),
                'rss_sources_count': len(rss_sources),
                'direct_sources_count': len(direct_sources),
                'active_rss_sources': len([s for s in rss_sources if s['status_code'] == 'success']),
                'check_time': datetime.now().isoformat()
            }
        })
    except Exception as e:
        logger.error(f"获取源状态失败: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/test-crawler', methods=['POST'])
def test_crawler():
    """测试抓取器功能"""
    try:
        crawler = DirectNewsCrawler()
        # 只测试一个源
        test_news = crawler.crawl_zhihu_hot()
        return jsonify({
            "status": "success",
            "message": f"测试成功，获取 {len(test_news)} 条知乎热榜新闻",
            "sample_news": test_news[:3] if test_news else []
        })
    except Exception as e:
        logger.error(f"测试抓取器失败: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/api/init-db', methods=['POST'])
def init_database():
    """初始化数据库表（仅开发环境使用）"""
    global intelligent_recommender
    
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 创建用户收藏表
                cursor.execute("""
                    CREATE TABLE IF NOT EXISTS user_favorites (
                        id INT AUTO_INCREMENT PRIMARY KEY COMMENT '收藏记录ID',
                        user_id INT NOT NULL COMMENT '用户ID',
                        news_id INT NOT NULL COMMENT '新闻ID',
                        created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT '收藏时间',
                        INDEX idx_user_id (user_id),
                        INDEX idx_news_id (news_id),
                        INDEX idx_user_news (user_id, news_id),
                        UNIQUE KEY unique_user_news (user_id, news_id) COMMENT '防止重复收藏'
                    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='用户收藏新闻表'
                """)
                
                # 创建推荐系统相关表
                try:
                    # 检查是否存在推荐表
                    cursor.execute("SHOW TABLES LIKE 'user_reading_behavior'")
                    if not cursor.fetchone():
                        logger.info("创建推荐系统相关表...")
                        # 读取SQL文件并执行
                        try:
                            with open('create_recommendation_tables.sql', 'r', encoding='utf-8') as f:
                                sql_script = f.read()
                                # 执行多条SQL语句
                                for statement in sql_script.split(';'):
                                    if statement.strip():
                                        cursor.execute(statement)
                                logger.info("推荐系统相关表创建完成")
                        except FileNotFoundError:
                            logger.warning("未找到create_recommendation_tables.sql文件，使用简单表结构")
                            # 创建基本的用户行为表
                            cursor.execute("""
                                CREATE TABLE IF NOT EXISTS user_reading_behavior (
                                    id INT AUTO_INCREMENT PRIMARY KEY,
                                    user_id INT NOT NULL,
                                    news_id INT NOT NULL,
                                    action VARCHAR(50) NOT NULL COMMENT '行为类型: view, click, read, share',
                                    read_duration INT DEFAULT NULL COMMENT '阅读时长（秒）',
                                    extra_data JSON DEFAULT NULL,
                                    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                                    INDEX idx_user_id (user_id),
                                    INDEX idx_news_id (news_id),
                                    INDEX idx_action (action),
                                    FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE,
                                    FOREIGN KEY (news_id) REFERENCES news(id) ON DELETE CASCADE
                                ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
                            """)
                except Exception as e:
                    logger.warning(f"创建推荐表失败: {e}")
                
                # 添加原文发布时间字段（如果不存在）
                try:
                    cursor.execute("""
                        ALTER TABLE news 
                        ADD COLUMN original_publish_time DATETIME NULL COMMENT '原文真实发布时间'
                    """)
                    logger.info("成功添加 original_publish_time 字段")
                except Exception as e:
                    if "Duplicate column name" in str(e):
                        logger.info("original_publish_time 字段已存在")
                    else:
                        logger.warning(f"添加 original_publish_time 字段失败: {e}")
                
                conn.commit()
                
                # 初始化智能推荐系统
                try:
                    intelligent_recommender = IntelligentNewsRecommender(DB_CONFIG)
                    logger.info("智能推荐系统初始化完成")
                except Exception as e:
                    logger.warning(f"智能推荐系统初始化失败: {e}")
                
                return jsonify({"status": "success", "message": "数据库表创建成功，智能推荐系统已初始化"})
    except Exception as e:
        logger.error(f"创建数据库表错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500


@app.route('/api/news', methods=['GET'])
def get_news_list():
    """获取新闻列表API"""
    try:
        # 获取参数
        page = request.args.get('page', 1, type=int)
        per_page = min(request.args.get('per_page', 12, type=int), 50)  # 最夕50条
        category = request.args.get('category', 'all')
        sort_by = request.args.get('sort_by', 'date')
        
        # 检查用户登录状态
        user_id = session.get('user_id')
        
        # 计算偏移量
        offset = (page - 1) * per_page
        
        with get_db_connection() as conn:
            with conn.cursor(dictionary=True) as cursor:
                # 构建查询条件
                where_clause = ""
                query_params = []
                
                if category != 'all':
                    where_clause = "WHERE COALESCE(n.category, 'general') = %s"
                    query_params.append(category)
                
                # 获取总数
                count_query = f"SELECT COUNT(*) as total FROM news n {where_clause}"
                cursor.execute(count_query, query_params)
                total = cursor.fetchone()['total']
                
                # 排序子句
                if sort_by == 'hot':
                    order_by_clause = "ORDER BY COALESCE(n.hot_score, 0) DESC, n.created_at DESC"
                else:
                    order_by_clause = "ORDER BY n.created_at DESC"

                if user_id:
                    # 已登录用户：获取新闻和收藏状态
                    main_query = f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻' UNION ALL
                                   SELECT 17, '中新网'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category,
                               ns.summary,
                               CASE WHEN uf.news_id IS NOT NULL THEN 1 ELSE 0 END as is_favorited,
                               COALESCE(n.original_publish_time, n.published_date) as display_publish_time,
                               COALESCE(n.views, 0) as views,
                               COALESCE(n.favorites_count, 0) as favorites_count,
                               COALESCE(n.hot_score, 0) as hot_score
                        FROM news n
                        LEFT JOIN news_summaries ns ON n.id = ns.news_id
                        LEFT JOIN user_favorites uf ON n.id = uf.news_id AND uf.user_id = %s
                        {where_clause}
                        {order_by_clause}
                        LIMIT %s OFFSET %s
                    """
                    query_params_with_pagination = [user_id] + query_params + [per_page, offset]
                else:
                    # 未登录用户：只获取新闻基本信息
                    main_query = f"""
                        SELECT n.*, 
                               (SELECT name FROM (
                                   SELECT 1 as id, '中国新闻网' as name UNION ALL
                                   SELECT 2, '央视新闻' UNION ALL
                                   SELECT 3, '新华网' UNION ALL
                                   SELECT 4, '中国日报' UNION ALL
                                   SELECT 5, '环球时报' UNION ALL
                                   SELECT 6, '虎嗅网' UNION ALL
                                   SELECT 7, 'IT之家' UNION ALL
                                   SELECT 11, '人民网' UNION ALL
                                   SELECT 12, '第一财经' UNION ALL
                                   SELECT 13, '知乎热榜' UNION ALL
                                   SELECT 14, '路透社' UNION ALL
                                   SELECT 15, '36氪' UNION ALL
                                   SELECT 16, '澎湃新闻' UNION ALL
                                   SELECT 17, '中新网'
                               ) as sources WHERE sources.id = n.source_id) as source_name,
                               COALESCE(n.category, 'general') as category,
                               ns.summary,
                               0 as is_favorited,
                               COALESCE(n.original_publish_time, n.published_date) as display_publish_time,
                               COALESCE(n.views, 0) as views,
                               COALESCE(n.favorites_count, 0) as favorites_count,
                               COALESCE(n.hot_score, 0) as hot_score
                        FROM news n
                        LEFT JOIN news_summaries ns ON n.id = ns.news_id
                        {where_clause}
                        {order_by_clause}
                        LIMIT %s OFFSET %s
                    """
                    query_params_with_pagination = query_params + [per_page, offset]
                
                cursor.execute(main_query, query_params_with_pagination)
                news_items = cursor.fetchall()
                
                # 格式化时间戳
                for item in news_items:
                    if item['published_date']:
                        item['published_date'] = item['published_date'].isoformat()
                    if item['created_at']:
                        item['created_at'] = item['created_at'].isoformat()
                    if item['display_publish_time']:
                        item['display_publish_time'] = item['display_publish_time'].isoformat()
                
                return jsonify({
                    'status': 'success',
                    'news': news_items,
                    'pagination': {
                        'total': total,
                        'page': page,
                        'per_page': per_page,
                        'pages': (total + per_page - 1) // per_page
                    }
                })
                
    except Exception as e:
        logger.error(f"创建数据库表错误: {e}")
        return jsonify({"status": "error", "message": str(e)[:50]}), 500

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/favicon.ico')
def favicon():
    """返回favicon图标"""
    from flask import send_from_directory
    import os
    # 返回一个简单的SVG图标
    return '''<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
        <rect width="100" height="100" fill="#165DFF"/>
        <text x="50" y="70" font-size="60" text-anchor="middle" fill="white">📰</text>
    </svg>''', 200, {'Content-Type': 'image/svg+xml'}

# 启动定时任务与服务
if __name__ == '__main__':
    # 确保模板文件夹存在
    if not os.path.exists('templates'):
        os.makedirs('templates')
    
    # 安装chardet（如果尚未安装）
    try:
        import chardet
    except ImportError:
        import subprocess
        import sys
        subprocess.check_call([sys.executable, "-m", "pip", "install", "chardet"])
    
    # 初始化数据表与列
    init_search_tables()
    init_news_heat_columns()
    
    # 初始化智能推荐系统
    try:
        intelligent_recommender = IntelligentNewsRecommender(DB_CONFIG)
        # 更新全局变量
        globals()['intelligent_recommender'] = intelligent_recommender
        logger.info("智能推荐系统初始化成功")
    except Exception as e:
        logger.warning(f"智能推荐系统初始化失败: {e}")
        logger.info("将使用简单推荐算法")
    
    print("🎆 新闻聚合平台已启动")
    print("💻 访问地址: http://127.0.0.1:5000")
    print("🔍 新功能: 完善的新闻检索、搜索历史、热门搜索")
    print("📊 智能分类: 支持政治、财经、科技等九大分类")
    print("🚫 内容过滤: 已对IT之家、第一财经等进行内容截断优化")
    
    # 启动定时任务线程（后台运行，不阻塞Flask服务）
    scheduled_thread = threading.Thread(target=run_scheduled_tasks, daemon=True)
    scheduled_thread.start()
    
    # 启动Flask服务
    app.run(debug=True, use_reloader=False)
    