#!/usr/bin/env python3
"""
分析数据库中的法条内容，提取真实关键词
"""

from app import create_app
from app.services.civil_code_service import CivilCodeService
import re
from collections import Counter
import jieba
import jieba.posseg as pseg

def analyze_keywords():
    """分析法条内容提取关键词"""
    app = create_app()
    
    with app.app_context():
        try:
            service = CivilCodeService()
            
            # 获取所有法条 - 通过搜索获取
            print("正在获取法条数据...")
            
            # 获取不同编章的法条
            all_articles = []
            parts = ["第一编", "第二编", "第三编", "第四编", "第五编", "第六编", "第七编"]
            
            for part in parts:
                try:
                    result = service.get_articles_by_chapter(part, per_page=200)
                    articles = result.get('articles', [])
                    all_articles.extend(articles)
                    print(f"获取 {part}: {len(articles)} 条")
                except Exception as e:
                    print(f"获取 {part} 失败: {e}")
                    
            print(f"总共获取到 {len(all_articles)} 条法条")
            
            # 分析法条标题关键词
            title_keywords = analyze_titles(all_articles)
            print(f"\n=== 标题关键词分析 ===")
            print("前20个高频标题关键词:")
            for keyword, count in title_keywords.most_common(20):
                print(f"  {keyword}: {count}")
            
            # 分析法条内容关键词  
            content_keywords = analyze_content(all_articles[:100])  # 先分析前100条测试
            print(f"\n=== 内容关键词分析 ===")
            print("前20个高频内容关键词:")
            for keyword, count in content_keywords.most_common(20):
                print(f"  {keyword}: {count}")
                
            # 生成搜索建议
            suggestions = generate_search_suggestions(title_keywords, content_keywords)
            print(f"\n=== 推荐搜索建议 ===")
            for i, suggestion in enumerate(suggestions, 1):
                print(f"  {i}. {suggestion}")
                
            return suggestions
            
        except Exception as e:
            print(f"分析失败: {str(e)}")
            import traceback
            traceback.print_exc()
            return []

def analyze_titles(articles):
    """分析法条标题"""
    # 法律专业词汇
    legal_terms = {
        '合同', '物权', '婚姻', '继承', '侵权', '人格权', '债权', '所有权', 
        '担保', '抵押', '质押', '留置', '用益物权', '占有', '违约', '损害赔偿',
        '民事权利', '民事义务', '法人', '自然人', '监护', '代理', '诉讼时效'
    }
    
    title_words = []
    
    for article in articles:
        title = article.get('title', '')
        if title:
            # 使用jieba分词
            words = jieba.lcut(title)
            for word in words:
                # 过滤长度和类型
                if len(word) >= 2 and word in legal_terms:
                    title_words.append(word)
    
    return Counter(title_words)

def analyze_content(articles):
    """分析法条内容"""
    # 停用词
    stop_words = {
        '的', '了', '在', '是', '有', '和', '或者', '以及', '但是', '如果', 
        '应当', '可以', '不得', '应该', '能够', '需要', '必须', '禁止',
        '第', '条', '款', '项', '编', '章', '节', '一', '二', '三', '四', '五',
        '本', '其', '该', '此', '所', '之', '于', '为', '从', '由', '对', '与'
    }
    
    content_words = []
    
    for article in articles:
        content = article.get('content', '')
        if content:
            # 使用jieba词性标注
            words = pseg.lcut(content)
            for word, flag in words:
                # 只保留名词和动词，且长度>=2
                if (flag.startswith('n') or flag.startswith('v')) and len(word) >= 2:
                    if word not in stop_words:
                        content_words.append(word)
    
    return Counter(content_words)

def generate_search_suggestions(title_keywords, content_keywords):
    """生成搜索建议"""
    # 合并关键词，标题权重更高
    combined = Counter()
    
    for word, count in title_keywords.items():
        combined[word] += count * 3  # 标题权重x3
    
    for word, count in content_keywords.items():
        combined[word] += count
    
    # 手动添加重要的法律概念
    important_terms = [
        '合同纠纷', '物权保护', '婚姻家庭', '继承遗产', '人格权', '侵权责任',
        '民事权利', '债权债务', '担保物权', '知识产权'
    ]
    
    # 获取高频词汇
    top_keywords = [word for word, count in combined.most_common(15)]
    
    # 合并重要术语和高频词汇
    suggestions = []
    suggestions.extend(important_terms[:6])  # 前6个重要术语
    
    # 添加高频词汇（去重）
    for keyword in top_keywords:
        if keyword not in suggestions and len(suggestions) < 12:
            suggestions.append(keyword)
    
    return suggestions[:12]

if __name__ == '__main__':
    # 初始化jieba
    jieba.initialize()
    suggestions = analyze_keywords()
