import sqlite3
import os
import re
import json
from datetime import datetime
from typing import Dict, List, Any, Optional

# 在文件开头添加新导入

def extract_article_metadata(content: str) -> Dict[str, Any]:
    """
    从文章内容中提取元数据
    """
    if not content:
        return {}
    
    metadata = {}
    
    # 提取关键词（假设关键词以"关键词："或"Keywords:"开头）
    keyword_patterns = [
        r'[关键词|关键字][：:]\s*([^\n]+)',
        r'(?:Keywords?|keywords?)[：:]\s*([^\n]+)'
    ]
    
    for pattern in keyword_patterns:
        match = re.search(pattern, content, re.IGNORECASE)
        if match:
            keywords = match.group(1).strip()
            metadata['keywords'] = [kw.strip() for kw in re.split(r'[,，、;/]', keywords) if kw.strip()]
            break
    
    # 提取摘要（假设摘要以"摘要："或"Abstract:"开头）
    abstract_patterns = [
        r'[摘要|概要][：:]\s*([^\n]+)',
        r'(?:Abstract|Summary)[：:]\s*([^\n]+)'
    ]
    
    for pattern in abstract_patterns:
        match = re.search(pattern, content, re.IGNORECASE)
        if match:
            metadata['abstract'] = match.group(1).strip()
            break
    
    return metadata

def analyze_article_content(content: str) -> Dict[str, int]:
    """
    分析文章内容统计信息
    """
    if not content:
        return {}
    
    stats = {
        'char_count': len(content),
        'char_count_no_spaces': len(re.sub(r'\s', '', content)),
        'word_count': len(re.findall(r'\w+', content)),
        'sentence_count': len(re.split(r'[.!?。！？]', content)) - 1,
        'paragraph_count': len([p for p in content.split('\n\n') if p.strip()])
    }
    
    return stats

def extract_tags_from_content(content: str) -> List[str]:
    """
    从文章内容中提取标签
    """
    if not content:
        return []
    
    # 提取 #标签 格式的标签
    hashtag_pattern = re.compile(r'#([^\s#]+)')
    hashtags = hashtag_pattern.findall(content)
    
    # 提取 [标签] 格式的标签
    bracket_pattern = re.compile(r'\[([^\]]+)\]')
    brackets = bracket_pattern.findall(content)
    
    # 合并并去重
    all_tags = list(set(hashtags + brackets))
    return all_tags

def generate_content_summary(content: str, max_length: int = 200) -> str:
    """
    生成文章内容摘要
    """
    if not content:
        return ""
    
    # 简单的摘要生成：取前max_length个字符
    # 实际应用中可以使用更复杂的摘要算法
    clean_content = re.sub(r'\s+', ' ', content.strip())
    if len(clean_content) <= max_length:
        return clean_content
    else:
        # 尝试在句子边界截断
        sentences = re.split(r'[.!?。！？]', clean_content)
        summary = ""
        for sentence in sentences:
            if len(summary) + len(sentence) <= max_length - 3:  # 为...留空间
                summary += sentence + "."
            else:
                if not summary:
                    summary = sentence[:max_length-3] + "..."
                else:
                    summary += "..."
                break
        return summary if summary else clean_content[:max_length] + "..."

def categorize_article(title: str, content: str, categories: List[str]) -> List[str]:
    """
    根据关键词对文章进行分类
    """
    if not content or not title:
        return []
    
    found_categories = []
    full_text = (title + " " + content).lower()
    
    for category in categories:
        if category.lower() in full_text:
            found_categories.append(category)
    
    return found_categories

def export_articles_to_json(articles: List[tuple], filename: str = "exported_articles.json"):
    """
    将文章导出为JSON格式
    """
    export_data = []
    categories = ["科技", "经济", "政治", "文化", "体育", "娱乐"]  # 示例分类
    
    for article in articles:
        title, link, date, source, content = article
        
        processed_article = {
            "title": title,
            "link": link,
            "date": date,
            "source": source,
            "has_content": bool(content and content.strip()),
            "content_length": len(content) if content else 0
        }
        
        if content and content.strip():
            # 添加增强数据处理结果
            processed_article.update({
                "metadata": extract_article_metadata(content),
                "stats": analyze_article_content(content),
                "tags": extract_tags_from_content(content),
                "summary": generate_content_summary(content),
                "categories": categorize_article(title, content, categories)
            })
        
        export_data.append(processed_article)
    
    try:
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(export_data, f, ensure_ascii=False, indent=2)
        print(f"✅ 文章数据已导出到 {filename}")
        return True
    except Exception as e:
        print(f"❌ 导出失败: {e}")
        return False

# 修改 read_articles_from_db 函数以包含数据处理功能
def read_articles_from_db(enhanced: bool = False):
    """从数据库读取文章内容"""
    try:
        # 连接数据库
        db_path = os.path.join(os.path.dirname(__file__), r'E:\新闻聚合与摘要平台\high-quality-info-aggregation-main\high-quality-info-aggregation-main\database\title_link.db')
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        # 查询所有文章
        cursor.execute('''
            SELECT title, link, date, source, content 
            FROM title_link 
            ORDER BY date DESC
        ''')
        
        articles = cursor.fetchall()
        conn.close()
        
        print(f"📊 数据库中共有 {len(articles)} 篇文章")
        print("=" * 60)
        
        # 显示文章列表
        for i, (title, link, date, source, content) in enumerate(articles, 1):
            has_content = "✅" if content and content.strip() else "❌"
            print(f"{i:2d}. {has_content} {title}")
            print(f"     📅 {date} | 📰 {source}")
            print(f"     🔗 {link}")
            
            if content and content.strip():
                print(f"     📝 内容长度: {len(content)} 字符")
                print(f"     📄 内容预览: {content[:100]}...")
                
                # 如果启用增强功能，显示额外信息
                if enhanced:
                    stats = analyze_article_content(content)
                    tags = extract_tags_from_content(content)
                    summary = generate_content_summary(content)
                    
                    print(f"     📊 字数统计: {stats['word_count']} 字")
                    print(f"     🏷️  标签: {', '.join(tags) if tags else '无'}")
                    print(f"     📋 内容摘要: {summary}")
            print("-" * 60)
        
        # 统计
        articles_with_content = sum(1 for _, _, _, _, content in articles if content and content.strip())
        print(f"\n📈 统计信息:")
        print(f"   总文章数: {len(articles)}")
        print(f"   有内容的文章: {articles_with_content}")
        print(f"   无内容的文章: {len(articles) - articles_with_content}")
        
        return articles
        
    except Exception as e:
        print(f"❌ 读取数据库失败: {e}")
        return []

# 修改 read_specific_article 函数
def read_specific_article(title_keyword, enhanced: bool = False):
    """根据标题关键字读取特定文章内容，支持模糊查询和分页显示"""
    try:
        db_path = os.path.join(os.path.dirname(__file__), r'E:\新闻聚合与摘要平台\high-quality-info-aggregation-main\high-quality-info-aggregation-main\database\title_link.db')
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        # 支持模糊查询
        cursor.execute('''
            SELECT title, link, date, source, content 
            FROM title_link 
            WHERE title LIKE ?
            ORDER BY date DESC
        ''', (f'%{title_keyword}%',))
        
        articles = cursor.fetchall()
        conn.close()
        
        if not articles:
            print(f"❌ 未找到标题包含 '{title_keyword}' 的文章")
            return None
        
        categories = ["科技", "经济", "政治", "文化", "体育", "娱乐"]  # 示例分类
        
        for idx, (title, link, date, source, content) in enumerate(articles, 1):
            print(f"\n📰 文章 {idx}:")
            print(f"标题: {title}")
            print(f"链接: {link}")
            print(f"日期: {date}")
            print(f"来源: {source}")
            
            if enhanced and content:
                # 添加增强信息
                metadata = extract_article_metadata(content)
                stats = analyze_article_content(content)
                tags = extract_tags_from_content(content)
                article_categories = categorize_article(title, content, categories)
                
                if metadata:
                    print(f"元数据: {metadata}")
                print(f"统计信息: {stats}")
                if tags:
                    print(f"标签: {', '.join(tags)}")
                if article_categories:
                    print(f"分类: {', '.join(article_categories)}")
            
            print("=" * 60)
            print("文章内容:")
            print("=" * 60)
            if content and content.strip():
                # 如果启用增强功能，先显示摘要
                if enhanced:
                    summary = generate_content_summary(content)
                    print(f"📋 内容摘要: {summary}\n")
                
                # 分页显示内容
                page_size = 300
                for i in range(0, len(content), page_size):
                    print(content[i:i+page_size])
                    if i + page_size < len(content):
                        input("按回车继续...")
            else:
                print("❌ 该文章暂无内容")
            print("-" * 60)
        return articles

    except Exception as e:
        print(f"❌ 读取文章失败: {e}")
        return None

# 添加新的交互式功能
def interactive_enhanced_menu():
    """交互式增强功能菜单"""
    articles = read_articles_from_db(enhanced=True)
    if not articles:
        return
    
    while True:
        print("\n" + "="*50)
        print("增强功能菜单:")
        print("1. 查看文章详细分析")
        print("2. 导出文章数据为JSON")
        print("3. 按标签筛选文章")
        print("4. 按分类筛选文章")
        print("5. 返回主菜单")
        print("="*50)
        
        choice = input("请选择功能 (1-5): ").strip()
        
        if choice == '1':
            idx = input("请输入要分析的文章编号: ")
            try:
                idx = int(idx)
                if 1 <= idx <= len(articles):
                    title = articles[idx-1][0]
                    read_specific_article(title, enhanced=True)
                else:
                    print("编号超出范围")
            except ValueError:
                print("请输入有效的编号")
                
        elif choice == '2':
            filename = input("请输入导出文件名 (默认: exported_articles.json): ").strip()
            if not filename:
                filename = "exported_articles.json"
            if not filename.endswith('.json'):
                filename += '.json'
            export_articles_to_json(articles, filename)
            
        elif choice == '3':
            all_tags = set()
            for _, _, _, _, content in articles:
                if content:
                    tags = extract_tags_from_content(content)
                    all_tags.update(tags)
            
            if all_tags:
                print(f"可用标签: {', '.join(sorted(all_tags))}")
                selected_tag = input("请输入要筛选的标签: ").strip()
                if selected_tag:
                    filtered_articles = [
                        article for article in articles 
                        if selected_tag in extract_tags_from_content(article[4])
                    ]
                    if filtered_articles:
                        print(f"\n找到 {len(filtered_articles)} 篇包含标签 '{selected_tag}' 的文章:")
                        for i, (title, link, date, source, content) in enumerate(filtered_articles, 1):
                            print(f"{i}. {title}")
                    else:
                        print(f"未找到包含标签 '{selected_tag}' 的文章")
            else:
                print("未找到任何标签")
                
        elif choice == '4':
            categories = ["科技", "经济", "政治", "文化", "体育", "娱乐"]
            print(f"可用分类: {', '.join(categories)}")
            selected_category = input("请输入要筛选的分类: ").strip()
            if selected_category:
                filtered_articles = [
                    article for article in articles 
                    if selected_category in categorize_article(article[0], article[4], categories)
                ]
                if filtered_articles:
                    print(f"\n找到 {len(filtered_articles)} 篇分类为 '{selected_category}' 的文章:")
                    for i, (title, link, date, source, content) in enumerate(filtered_articles, 1):
                        print(f"{i}. {title}")
                else:
                    print(f"未找到分类为 '{selected_category}' 的文章")
                    
        elif choice == '5':
            break
        else:
            print("无效选择，请重新输入")

# 修改主程序入口
if __name__ == "__main__":
    import sys
    
    if len(sys.argv) > 1:
        # 检查是否有特殊参数
        if sys.argv[1] == "--enhanced":
            # 启动增强功能菜单
            interactive_enhanced_menu()
        else:
            # 如果提供了文章标题参数，读取特定文章（支持模糊查询）
            title = sys.argv[1]
            read_specific_article(title)
    else:
        # 否则显示所有文章列表，并支持交互式选择
        interactive_select_article()
        import sqlite3
import os
import re
import json
from datetime import datetime
from typing import Dict, List, Any, Optional
import functools
import time
from contextlib import contextmanager

# 添加缓存装饰器
def memoize(func):
    """
    简单的缓存装饰器，用于缓存函数结果
    """
    cache = {}
    
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        # 创建缓存键
        key = str(args) + str(sorted(kwargs.items()))
        if key not in cache:
            cache[key] = func(*args, **kwargs)
        return cache[key]
    
    # 添加清除缓存的方法
    wrapper.clear_cache = lambda: cache.clear()
    wrapper.cache_info = lambda: {"cache_size": len(cache)}
    
    return wrapper

# 添加性能计时装饰器
def timing_decorator(func):
    """
    性能计时装饰器，用于测量函数执行时间
    """
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(f"⏱️  {func.__name__} 执行时间: {end_time - start_time:.4f} 秒")
        return result
    return wrapper

# 数据库连接上下文管理器
@contextmanager
def get_db_connection():
    """
    数据库连接上下文管理器，确保连接正确关闭
    """
    db_path = os.path.join(os.path.dirname(__file__), r'E:\新闻聚合与摘要平台\high-quality-info-aggregation-main\high-quality-info-aggregation-main\database\title_link.db')
    conn = None
    try:
        conn = sqlite3.connect(db_path)
        conn.row_factory = sqlite3.Row  # 使结果可以通过列名访问
        yield conn
    except Exception as e:
        if conn:
            conn.rollback()
        raise e
    finally:
        if conn:
            conn.close()

# 添加分页查询功能
def fetch_articles_paginated(page: int = 1, page_size: int = 50, order_by: str = "date DESC"):
    """
    分页获取文章，避免一次性加载大量数据
    """
    try:
        offset = (page - 1) * page_size
        with get_db_connection() as conn:
            cursor = conn.cursor()
            cursor.execute(f'''
                SELECT title, link, date, source, content 
                FROM title_link 
                ORDER BY {order_by}
                LIMIT ? OFFSET ?
            ''', (page_size, offset))
            
            articles = cursor.fetchall()
            return [tuple(row) for row in articles]
    except Exception as e:
        print(f"❌ 分页查询失败: {e}")
        return []

# 添加文章总数查询
@timing_decorator
def get_articles_count():
    """
    获取文章总数
    """
    try:
        with get_db_connection() as conn:
            cursor = conn.cursor()
            cursor.execute('SELECT COUNT(*) FROM title_link')
            count = cursor.fetchone()[0]
            return count
    except Exception as e:
        print(f"❌ 获取文章总数失败: {e}")
        return 0

# 优化的文章读取函数，支持分页
@timing_decorator
def read_articles_from_db(enhanced: bool = False, page: int = 1, page_size: int = 50):
    """从数据库读取文章内容（支持分页）"""
    try:
        # 分页获取文章
        articles = fetch_articles_paginated(page, page_size)
        
        if page == 1:  # 只在第一页显示统计信息
            total_count = get_articles_count()
            print(f"📊 数据库中共有 {total_count} 篇文章")
            print("=" * 60)
        
        # 显示文章列表
        for i, (title, link, date, source, content) in enumerate(articles, 1 + (page-1) * page_size):
            has_content = "✅" if content and content.strip() else "❌"
            print(f"{i:2d}. {has_content} {title}")
            print(f"     📅 {date} | 📰 {source}")
            print(f"     🔗 {link}")
            
            if content and content.strip():
                print(f"     📝 内容长度: {len(content)} 字符")
                print(f"     📄 内容预览: {content[:100]}...")
                
                # 如果启用增强功能，显示额外信息
                if enhanced:
                    stats = analyze_article_content(content)
                    tags = extract_tags_from_content(content)
                    summary = generate_content_summary(content)
                    
                    print(f"     📊 字数统计: {stats['word_count']} 字")
                    print(f"     🏷️  标签: {', '.join(tags) if tags else '无'}")
                    print(f"     📋 内容摘要: {summary}")
            print("-" * 60)
        
        if page == 1:  # 只在第一页显示统计信息
            # 统计当前页中有内容的文章数
            articles_with_content = sum(1 for _, _, _, _, content in articles if content and content.strip())
            print(f"\n📈 当前页统计信息:")
            print(f"   当前页文章数: {len(articles)}")
            print(f"   有内容的文章: {articles_with_content}")
            print(f"   无内容的文章: {len(articles) - articles_with_content}")
        
        return articles
        
    except Exception as e:
        print(f"❌ 读取数据库失败: {e}")
        return []

# 添加异步加载功能（模拟）
def load_articles_async(callback=None):
    """
    模拟异步加载文章（实际可以使用threading或asyncio实现）
    """
    print("🔄 正在后台加载文章数据...")
    
    def load_task():
        try:
            articles = fetch_articles_paginated(1, 100)  # 预加载前100篇文章
            if callback:
                callback(articles)
            return articles
        except Exception as e:
            print(f"❌ 异步加载失败: {e}")
            return []
    
    # 在实际应用中，这里可以使用线程或异步任务
    # 为演示目的，我们直接执行
    return load_task()

# 缓存版本的文章分析函数
@memoize
def cached_analyze_article_content(content: str) -> Dict[str, int]:
    """
    带缓存的文章内容分析函数
    """
    return analyze_article_content(content)

@memoize
def cached_extract_tags_from_content(content: str) -> List[str]:
    """
    带缓存的标签提取函数
    """
    return extract_tags_from_content(content)

@memoize
def cached_generate_content_summary(content: str, max_length: int = 200) -> str:
    """
    带缓存的内容摘要生成函数
    """
    return generate_content_summary(content, max_length)

# 优化的增强版读取函数，使用缓存
def read_articles_from_db_optimized(enhanced: bool = False, use_cache: bool = True):
    """从数据库读取文章内容（优化版，支持缓存）"""
    try:
        # 获取文章总数
        total_count = get_articles_count()
        
        # 分批加载文章
        page_size = 50
        all_articles = []
        total_pages = (total_count // page_size) + (1 if total_count % page_size > 0 else 0)
        
        print(f"📊 数据库中共有 {total_count} 篇文章，共 {total_pages} 页")
        
        # 只显示第一页，提高响应速度
        articles = fetch_articles_paginated(1, page_size)
        all_articles.extend(articles)
        
        print("=" * 60)
        print(f"显示第 1/{total_pages} 页:")
        
        # 显示文章列表
        for i, (title, link, date, source, content) in enumerate(articles, 1):
            has_content = "✅" if content and content.strip() else "❌"
            print(f"{i:2d}. {has_content} {title}")
            print(f"     📅 {date} | 📰 {source}")
            print(f"     🔗 {link}")
            
            if content and content.strip():
                print(f"     📝 内容长度: {len(content)} 字符")
                print(f"     📄 内容预览: {content[:100]}...")
                
                # 如果启用增强功能，显示额外信息
                if enhanced:
                    # 使用缓存版本的函数提高性能
                    analyze_func = cached_analyze_article_content if use_cache else analyze_article_content
                    extract_func = cached_extract_tags_from_content if use_cache else extract_tags_from_content
                    summary_func = cached_generate_content_summary if use_cache else generate_content_summary
                    
                    stats = analyze_func(content)
                    tags = extract_func(content)
                    summary = summary_func(content)
                    
                    print(f"     📊 字数统计: {stats['word_count']} 字")
                    print(f"     🏷️  标签: {', '.join(tags) if tags else '无'}")
                    print(f"     📋 内容摘要: {summary}")
            print("-" * 60)
        
        # 统计
        articles_with_content = sum(1 for _, _, _, _, content in articles if content and content.strip())
        print(f"\n📈 第1页统计信息:")
        print(f"   当前页文章数: {len(articles)}")
        print(f"   有内容的文章: {articles_with_content}")
        print(f"   无内容的文章: {len(articles) - articles_with_content}")
        
        if total_pages > 1:
            print(f"\n➡️  还有 {total_pages - 1} 页文章，使用 '查看更多' 功能查看")
        
        return articles
        
    except Exception as e:
        print(f"❌ 读取数据库失败: {e}")
        return []

# 添加批量处理功能
def batch_process_articles(articles: List[tuple], batch_size: int = 10):
    """
    批量处理文章，避免内存占用过高
    """
    for i in range(0, len(articles), batch_size):
        batch = articles[i:i+batch_size]
        yield batch

# 添加内存使用优化的导出函数
@timing_decorator
def export_articles_to_json_optimized(articles: List[tuple], filename: str = "exported_articles.json", batch_size: int = 50):
    """
    优化的导出函数，支持批量处理以减少内存占用
    """
    categories = ["科技", "经济", "政治", "文化", "体育", "娱乐"]  # 示例分类
    
    try:
        with open(filename, 'w', encoding='utf-8') as f:
            f.write('[\n')
            first_item = True
            
            # 分批处理文章
            for batch in batch_process_articles(articles, batch_size):
                for article in batch:
                    if not first_item:
                        f.write(',\n')
                    first_item = False
                    
                    title, link, date, source, content = article
                    
                    processed_article = {
                        "title": title,
                        "link": link,
                        "date": date,
                        "source": source,
                        "has_content": bool(content and content.strip()),
                        "content_length": len(content) if content else 0
                    }
                    
                    if content and content.strip():
                        # 添加增强数据处理结果
                        processed_article.update({
                            "metadata": extract_article_metadata(content),
                            "stats": analyze_article_content(content),
                            "tags": extract_tags_from_content(content),
                            "summary": generate_content_summary(content),
                            "categories": categorize_article(title, content, categories)
                        })
                    
                    json.dump(processed_article, f, ensure_ascii=False, indent=2)
            
            f.write('\n]')
        
        print(f"✅ 文章数据已导出到 {filename}")
        return True
    except Exception as e:
        print(f"❌ 导出失败: {e}")
        return False

# 添加预加载功能
class ArticleCacheManager:
    """
    文章缓存管理器
    """
    def __init__(self, cache_size: int = 100):
        self.cache_size = cache_size
        self.article_cache = {}
        self.access_order = []
    
    def add_to_cache(self, article_id: str, article_data: tuple):
        """
        添加文章到缓存
        """
        if article_id in self.article_cache:
            # 更新访问顺序
            self.access_order.remove(article_id)
        elif len(self.article_cache) >= self.cache_size:
            # 移除最久未访问的文章
            oldest = self.access_order.pop(0)
            del self.article_cache[oldest]
        
        self.article_cache[article_id] = article_data
        self.access_order.append(article_id)
    
    def get_from_cache(self, article_id: str) -> Optional[tuple]:
        """
        从缓存获取文章
        """
        if article_id in self.article_cache:
            # 更新访问顺序
            self.access_order.remove(article_id)
            self.access_order.append(article_id)
            return self.article_cache[article_id]
        return None
    
    def clear_cache(self):
        """
        清空缓存
        """
        self.article_cache.clear()
        self.access_order.clear()

# 全局缓存管理器实例
article_cache_manager = ArticleCacheManager()

# 修改 read_specific_article 函数，添加缓存支持
@timing_decorator
def read_specific_article(title_keyword, enhanced: bool = False, use_cache: bool = True):
    """根据标题关键字读取特定文章内容，支持模糊查询和分页显示"""
    try:
        # 检查缓存
        cache_key = f"title_search:{title_keyword}"
        if use_cache:
            cached_result = article_cache_manager.get_from_cache(cache_key)
            if cached_result:
                print("📦 从缓存中获取结果")
                articles = [cached_result]
            else:
                with get_db_connection() as conn:
                    cursor = conn.cursor()
                    cursor.execute('''
                        SELECT title, link, date, source, content 
                        FROM title_link 
                        WHERE title LIKE ?
                        ORDER BY date DESC
                    ''', (f'%{title_keyword}%',))
                    articles = cursor.fetchall()
                    articles = [tuple(row) for row in articles]
                
                # 添加到缓存
                if articles:
                    article_cache_manager.add_to_cache(cache_key, articles[0])
        else:
            with get_db_connection() as conn:
                cursor = conn.cursor()
                cursor.execute('''
                    SELECT title, link, date, source, content 
                    FROM title_link 
                    WHERE title LIKE ?
                    ORDER BY date DESC
                ''', (f'%{title_keyword}%',))
                articles = cursor.fetchall()
                articles = [tuple(row) for row in articles]
        
        if not articles:
            print(f"❌ 未找到标题包含 '{title_keyword}' 的文章")
            return None
        
        categories = ["科技", "经济", "政治", "文化", "体育", "娱乐"]  # 示例分类
        
        for idx, (title, link, date, source, content) in enumerate(articles, 1):
            print(f"\n📰 文章 {idx}:")
            print(f"标题: {title}")
            print(f"链接: {link}")
            print(f"日期: {date}")
            print(f"来源: {source}")
            
            if enhanced and content:
                # 使用缓存版本的函数提高性能
                analyze_func = cached_analyze_article_content if use_cache else analyze_article_content
                extract_func = cached_extract_tags_from_content if use_cache else extract_tags_from_content
                
                # 添加增强信息
                metadata = extract_article_metadata(content)
                stats = analyze_func(content)
                tags = extract_func(content)
                article_categories = categorize_article(title, content, categories)
                
                if metadata:
                    print(f"元数据: {metadata}")
                print(f"统计信息: {stats}")
                if tags:
                    print(f"标签: {', '.join(tags)}")
                if article_categories:
                    print(f"分类: {', '.join(article_categories)}")
            
            print("=" * 60)
            print("文章内容:")
            print("=" * 60)
            if content and content.strip():
                # 如果启用增强功能，先显示摘要
                if enhanced:
                    summary_func = cached_generate_content_summary if use_cache else generate_content_summary
                    summary = summary_func(content)
                    print(f"📋 内容摘要: {summary}\n")
                
                # 分页显示内容
                page_size = 300
                for i in range(0, len(content), page_size):
                    print(content[i:i+page_size])
                    if i + page_size < len(content):
                        input("按回车继续...")
            else:
                print("❌ 该文章暂无内容")
            print("-" * 60)
        return articles

    except Exception as e:
        print(f"❌ 读取文章失败: {e}")
        return None

# 添加性能监控功能
class PerformanceMonitor:
    """
    性能监控器
    """
    def __init__(self):
        self.metrics = {}
    
    def start_timer(self, operation_name: str):
        """
        开始计时
        """
        self.metrics[operation_name] = {
            'start_time': time.time(),
            'count': self.metrics.get(operation_name, {}).get('count', 0) + 1
        }
    
    def end_timer(self, operation_name: str):
        """
        结束计时并记录
        """
        if operation_name in self.metrics and 'start_time' in self.metrics[operation_name]:
            elapsed = time.time() - self.metrics[operation_name]['start_time']
            self.metrics[operation_name]['total_time'] = \
                self.metrics.get(operation_name, {}).get('total_time', 0) + elapsed
            self.metrics[operation_name]['avg_time'] = \
                self.metrics[operation_name]['total_time'] / self.metrics[operation_name]['count']
            del self.metrics[operation_name]['start_time']
    
    def report(self):
        """
        生成性能报告
        """
        print("\n📈 性能报告:")
        print("=" * 50)
        for operation, data in self.metrics.items():
            print(f"{operation}:")
            print(f"  调用次数: {data.get('count', 0)}")
            print(f"  总耗时: {data.get('total_time', 0):.4f} 秒")
            print(f"  平均耗时: {data.get('avg_time', 0):.4f} 秒")
            print("-" * 30)

# 全局性能监控器实例
perf_monitor = PerformanceMonitor()

# 添加交互式性能优化菜单
def interactive_performance_menu():
    """
    交互式性能优化菜单
    """
    while True:
        print("\n" + "="*50)
        print("🚀 性能优化菜单:")
        print("1. 查看缓存信息")
        print("2. 清空缓存")
        print("3. 查看性能报告")
        print("4. 使用优化版文章列表")
        print("5. 异步预加载文章")
        print("6. 返回上级菜单")
        print("="*50)
        
        choice = input("请选择功能 (1-6): ").strip()
        
        if choice == '1':
            # 显示缓存信息
            cache_info = article_cache_manager.article_cache
            print(f"📦 当前缓存文章数: {len(cache_info)}")
            if cache_info:
                print("缓存的文章:")
                for i, (key, article) in enumerate(cache_info.items(), 1):
                    print(f"  {i}. {article[0][:50]}..." if article[0] else f"  {i}. [无标题]")
        
        elif choice == '2':
            # 清空缓存
            article_cache_manager.clear_cache()
            # 清空函数缓存
            cached_analyze_article_content.clear_cache()
            cached_extract_tags_from_content.clear_cache()
            cached_generate_content_summary.clear_cache()
            print("✅ 缓存已清空")
        
        elif choice == '3':
            # 显示性能报告
            perf_monitor.report()
        
        elif choice == '4':
            # 使用优化版文章列表
            use_cache = input("是否使用缓存提高性能？(y/n，默认y): ").strip().lower()
            use_cache = use_cache != 'n'
            read_articles_from_db_optimized(enhanced=True, use_cache=use_cache)
        
        elif choice == '5':
            # 异步预加载
            def on_load_complete(articles):
                print(f"✅ 预加载完成，加载了 {len(articles)} 篇文章")
            
            loaded_articles = load_articles_async(on_load_complete)
            print(f"📥 预加载文章数: {len(loaded_articles)}")
        
        elif choice == '6':
            break
        else:
            print("❌ 无效选择")

# 修改主增强菜单，添加性能优化入口
def interactive_enhanced_menu():
    """交互式增强功能菜单"""
    # 开始性能监控
    perf_monitor.start_timer("enhanced_menu")
    
    articles = read_articles_from_db_optimized(enhanced=True)
    if not articles:
        perf_monitor.end_timer("enhanced_menu")
        return
    
    while True:
        print("\n" + "="*50)
        print("增强功能菜单:")
        print("1. 查看文章详细分析")
        print("2. 导出文章数据为JSON")
        print("3. 按标签筛选文章")
        print("4. 按分类筛选文章")
        print("5. 性能优化选项")
        print("6. 返回主菜单")
        print("="*50)
        
        choice = input("请选择功能 (1-6): ").strip()
        
        if choice == '1':
            perf_monitor.start_timer("article_analysis")
            idx = input("请输入要分析的文章编号: ")
            try:
                idx = int(idx)
                if 1 <= idx <= len(articles):
                    title = articles[idx-1][0]
                    read_specific_article(title, enhanced=True)
                else:
                    print("编号超出范围")
            except ValueError:
                print("请输入有效的编号")
            perf_monitor.end_timer("article_analysis")
                
        elif choice == '2':
            perf_monitor.start_timer("export_json")
            filename = input("请输入导出文件名 (默认: exported_articles.json): ").strip()
            if not filename:
                filename = "exported_articles.json"
            if not filename.endswith('.json'):
                filename += '.json'
            export_articles_to_json_optimized(articles, filename)
            perf_monitor.end_timer("export_json")
            
        elif choice == '3':
            perf_monitor.start_timer("filter_tags")
            all_tags = set()
            for _, _, _, _, content in articles:
                if content:
                    tags = extract_tags_from_content(content)
                    all_tags.update(tags)
            
            if all_tags:
                print(f"可用标签: {', '.join(sorted(all_tags))}")
                selected_tag = input("请输入要筛选的标签: ").strip()
                if selected_tag:
                    filtered_articles = [
                        article for article in articles 
                        if selected_tag in extract_tags_from_content(article[4])
                    ]
                    if filtered_articles:
                        print(f"\n找到 {len(filtered_articles)} 篇包含标签 '{selected_tag}' 的文章:")
                        for i, (title, link, date, source, content) in enumerate(filtered_articles, 1):
                            print(f"{i}. {title}")
                    else:
                        print(f"未找到包含标签 '{selected_tag}' 的文章")
            else:
                print("未找到任何标签")
            perf_monitor.end_timer("filter_tags")
                
        elif choice == '4':
            perf_monitor.start_timer("filter_categories")
            categories = ["科技", "经济", "政治", "文化", "体育", "娱乐"]
            print(f"可用分类: {', '.join(categories)}")
            selected_category = input("请输入要筛选的分类: ").strip()
            if selected_category:
                filtered_articles = [
                    article for article in articles 
                    if selected_category in categorize_article(article[0], article[4], categories)
                ]
                if filtered_articles:
                    print(f"\n找到 {len(filtered_articles)} 篇分类为 '{selected_category}' 的文章:")
                    for i, (title, link, date, source, content) in enumerate(filtered_articles, 1):
                        print(f"{i}. {title}")
                else:
                    print(f"未找到分类为 '{selected_category}' 的文章")
            perf_monitor.end_timer("filter_categories")
                    
        elif choice == '5':
            interactive_performance_menu()
            
        elif choice == '6':
            break
        else:
            print("无效选择，请重新输入")
    
    perf_monitor.end_timer("enhanced_menu")