#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
文本特征提取器模块
负责从各种文本来源提取特征，为大模型分析提供支持
"""

import os
import re
import jieba
import jieba.analyse
import logging
import json
from typing import Dict, Any, List, Optional, Tuple
from datetime import datetime
from collections import Counter, defaultdict

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
)
logger = logging.getLogger(__name__)


class TextFeatureExtractor:
    """
    文本特征提取器
    从文本中提取关键词、情感倾向、主题特征等
    """
    
    def __init__(self, 
                 user_dict_path: str = None,
                 stopwords_path: str = None,
                 config_path: str = None):
        """
        初始化文本特征提取器
        
        Args:
            user_dict_path: 用户词典路径
            stopwords_path: 停用词表路径
            config_path: 配置文件路径
        """
        # 加载配置
        self._config = self._load_config(config_path)
        
        # 加载用户词典
        if user_dict_path:
            self._load_user_dict(user_dict_path)
        else:
            # 尝试加载默认词典
            default_dict = os.path.join(os.path.dirname(__file__), "data", "user_dict.txt")
            if os.path.exists(default_dict):
                self._load_user_dict(default_dict)
        
        # 加载停用词
        self._stopwords = set()
        if stopwords_path:
            self._load_stopwords(stopwords_path)
        else:
            # 尝试加载默认停用词表
            default_stopwords = os.path.join(os.path.dirname(__file__), "data", "stopwords.txt")
            if os.path.exists(default_stopwords):
                self._load_stopwords(default_stopwords)
        
        # 情感词表
        self._sentiment_words = self._load_sentiment_words()
        
        # 主题特征词表
        self._topic_features = self._load_topic_features()
        
        logger.info("文本特征提取器初始化完成")
    
    def _load_config(self, config_path: str = None) -> Dict[str, Any]:
        """
        加载配置文件
        
        Args:
            config_path: 配置文件路径
            
        Returns:
            Dict[str, Any]: 配置字典
        """
        config = {
            "keyword_extract_method": "tfidf",  # 可选: tfidf, textrank
            "top_k": 10,
            "punctuation_pattern": r"[\s\t\n\r\,\.\;\:\!\?\!\"\'\(\)\[\]\{\}\/\\\|\<\>\@\#\$\%\^\&\*\_\-\+\=\`\~]",
            "min_word_length": 2,
            "sentence_split_pattern": r"[。！？\n]+"
        }
        
        if config_path and os.path.exists(config_path):
            try:
                with open(config_path, 'r', encoding='utf-8') as f:
                    custom_config = json.load(f)
                    config.update(custom_config)
                logger.info(f"配置文件加载成功: {config_path}")
            except Exception as e:
                logger.error(f"加载配置文件失败: {str(e)}")
        
        return config
    
    def _load_user_dict(self, dict_path: str):
        """
        加载用户词典
        
        Args:
            dict_path: 词典文件路径
        """
        try:
            jieba.load_userdict(dict_path)
            logger.info(f"用户词典加载成功: {dict_path}")
        except Exception as e:
            logger.error(f"加载用户词典失败: {str(e)}")
    
    def _load_stopwords(self, stopwords_path: str):
        """
        加载停用词表
        
        Args:
            stopwords_path: 停用词表文件路径
        """
        try:
            with open(stopwords_path, 'r', encoding='utf-8') as f:
                for line in f:
                    word = line.strip()
                    if word:
                        self._stopwords.add(word)
            logger.info(f"停用词表加载成功: {stopwords_path}, 共 {len(self._stopwords)} 个停用词")
        except Exception as e:
            logger.error(f"加载停用词表失败: {str(e)}")
    
    def _load_sentiment_words(self) -> Dict[str, Dict[str, float]]:
        """
        加载情感词表
        
        Returns:
            Dict[str, Dict[str, float]]: 情感词表
        """
        sentiment_words = {
            "positive": {
                "好": 1.0, "优秀": 1.0, "棒": 1.0, "赞": 1.0, "喜欢": 1.0,
                "精彩": 1.0, "完美": 1.0, "满意": 1.0, "开心": 1.0, "高兴": 1.0,
                "幸福": 1.0, "快乐": 1.0, "激动": 1.0, "惊喜": 1.0, "感谢": 1.0,
                "成功": 1.0, "进步": 1.0, "希望": 0.8, "期待": 0.8,
                "推荐": 0.9, "支持": 0.9, "很棒": 1.0, "超赞": 1.0,
                "给力": 1.0, "优秀": 1.0, "完美": 1.0, "厉害": 0.9,
                "实用": 0.8, "值得": 0.8, "有效": 0.8, "实用": 0.8
            },
            "negative": {
                "差": -1.0, "糟糕": -1.0, "差": -1.0, "不好": -1.0, "失望": -1.0,
                "难过": -1.0, "生气": -1.0, "伤心": -1.0, "失败": -1.0, "困难": -0.8,
                "问题": -0.7, "缺点": -0.9, "错误": -0.9, "麻烦": -0.8,
                "贵": -0.6, "贵了": -0.7, "贵价": -0.8, "不好用": -0.9,
                "没用": -1.0, "浪费": -0.9, "遗憾": -0.8, "讨厌": -1.0,
                "无语": -0.7, "失望": -1.0, "难用": -0.9, "垃圾": -1.0
            }
        }
        
        # 尝试加载自定义情感词表
        custom_sentiment_path = os.path.join(os.path.dirname(__file__), "data", "sentiment_words.json")
        if os.path.exists(custom_sentiment_path):
            try:
                with open(custom_sentiment_path, 'r', encoding='utf-8') as f:
                    custom_words = json.load(f)
                    # 更新积极词汇
                    if "positive" in custom_words:
                        sentiment_words["positive"].update(custom_words["positive"])
                    # 更新消极词汇
                    if "negative" in custom_words:
                        sentiment_words["negative"].update(custom_words["negative"])
                logger.info(f"自定义情感词表加载成功: {custom_sentiment_path}")
            except Exception as e:
                logger.warning(f"加载自定义情感词表失败: {str(e)}")
        
        return sentiment_words
    
    def _load_topic_features(self) -> Dict[str, List[str]]:
        """
        加载主题特征词表
        
        Returns:
            Dict[str, List[str]]: 主题特征词表
        """
        topic_features = {
            "学业分享": ["学习", "考试", "复习", "作业", "课程", "成绩", "学校", "老师", "学生",
                       "高考", "中考", "考研", "专升本", "证书", "专业", "教材", "笔记",
                       "知识点", "解题", "方法"],
            "生活实用": ["生活", "实用", "技巧", "妙招", "小技巧", "方法", "家居", "做饭", "烹饪",
                       "收纳", "整理", "清洁", "保养", "省钱", "省时", "效率", "工具",
                       "推荐", "好物"],
            "情感表达": ["心情", "感受", "情感", "爱情", "友情", "亲情", "家人", "朋友", "爱人",
                       "快乐", "悲伤", "感动", "幸福", "痛苦", "开心", "难过", "思念",
                       "回忆", "陪伴"],
            "娱乐搞笑": ["搞笑", "幽默", "段子", "笑话", "哈哈", "笑死", "有趣", "好玩", "娱乐",
                       "游戏", "动漫", "追剧", "综艺", "明星", "偶像", "演唱会", "电影",
                       "音乐", "舞蹈"],
            "科普解说": ["知识", "科普", "科学", "原理", "机制", "解释", "说明", "教程", "讲解",
                       "为什么", "如何", "原理", "理论", "实验", "发现", "研究", "历史",
                       "文化", "背景"],
            "美食推荐": ["美食", "好吃", "餐厅", "推荐", "打卡", "探店", "做饭", "烹饪", "食谱",
                       "食材", "甜点", "饮品", "咖啡", "奶茶", "火锅", "烧烤", "小吃",
                       "特色", "味道"],
            "旅行出游": ["旅行", "旅游", "出游", "景点", "攻略", "打卡", "风景", "美景", "酒店",
                       "机票", "交通", "行程", "自驾", "露营", "徒步", "摄影", "拍照",
                       "假期", "周末"]
        }
        
        # 尝试加载自定义主题特征词表
        custom_topics_path = os.path.join(os.path.dirname(__file__), "data", "topic_features.json")
        if os.path.exists(custom_topics_path):
            try:
                with open(custom_topics_path, 'r', encoding='utf-8') as f:
                    custom_topics = json.load(f)
                    topic_features.update(custom_topics)
                logger.info(f"自定义主题特征词表加载成功: {custom_topics_path}")
            except Exception as e:
                logger.warning(f"加载自定义主题特征词表失败: {str(e)}")
        
        return topic_features
    
    def extract_features(self, 
                        text: str,
                        text_type: str = "general",
                        extract_keywords: bool = True,
                        extract_sentiment: bool = True,
                        extract_topics: bool = True,
                        top_k: int = None) -> Dict[str, Any]:
        """
        提取文本特征
        
        Args:
            text: 输入文本
            text_type: 文本类型 (general, title, comment, description)
            extract_keywords: 是否提取关键词
            extract_sentiment: 是否提取情感
            extract_topics: 是否提取主题
            top_k: 返回前k个关键词
            
        Returns:
            Dict[str, Any]: 文本特征
        """
        if not text or not isinstance(text, str):
            return {
                "success": False,
                "error": "输入文本为空或无效"
            }
        
        # 预处理文本
        processed_text = self._preprocess_text(text)
        
        # 初始化结果
        result = {
            "success": True,
            "original_text": text,
            "processed_text": processed_text,
            "text_length": len(text),
            "processed_length": len(processed_text),
            "timestamp": datetime.now().isoformat()
        }
        
        # 分词
        words = self._tokenize(processed_text)
        result["word_count"] = len(words)
        
        # 提取关键词
        if extract_keywords:
            top_k = top_k or self._config["top_k"]
            keywords_result = self._extract_keywords(processed_text, words, top_k, text_type)
            result["keywords"] = keywords_result["keywords"]
            result["keyword_weights"] = keywords_result["keyword_weights"]
        
        # 提取情感
        if extract_sentiment:
            sentiment_result = self._analyze_sentiment(processed_text, words)
            result["sentiment"] = sentiment_result
        
        # 提取主题
        if extract_topics:
            topic_result = self._analyze_topics(words)
            result["topics"] = topic_result
        
        # 提取统计特征
        result["statistics"] = self._extract_statistics(text, processed_text, words)
        
        return result
    
    def _preprocess_text(self, text: str) -> str:
        """
        预处理文本
        
        Args:
            text: 原始文本
            
        Returns:
            str: 处理后的文本
        """
        # 移除多余空格
        text = re.sub(r'\s+', ' ', text)
        
        # 移除特殊字符（可选）
        if self._config.get("remove_punctuation", True):
            punctuation_pattern = self._config.get("punctuation_pattern", r"[\s\t\n\r\,\.\;\:\!\?\!\"\'\(\)\[\]\{\}\/\\\|\<\>\@\#\$\%\^\&\*\_\-\+\=\`\~]")
            text = re.sub(punctuation_pattern, ' ', text)
        
        # 转换为小写（可选）
        if self._config.get("to_lowercase", True):
            text = text.lower()
        
        # 移除多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text
    
    def _tokenize(self, text: str) -> List[str]:
        """
        分词
        
        Args:
            text: 处理后的文本
            
        Returns:
            List[str]: 词语列表
        """
        # 使用jieba分词
        words = jieba.cut(text, cut_all=False)  # 精确模式
        
        # 过滤停用词和短词
        filtered_words = []
        min_length = self._config.get("min_word_length", 2)
        
        for word in words:
            # 过滤停用词
            if word in self._stopwords:
                continue
            
            # 过滤短词
            if len(word) < min_length:
                continue
            
            # 过滤空白词
            if not word.strip():
                continue
            
            filtered_words.append(word)
        
        return filtered_words
    
    def _extract_keywords(self, 
                        text: str,
                        words: List[str],
                        top_k: int,
                        text_type: str = "general") -> Dict[str, Any]:
        """
        提取关键词
        
        Args:
            text: 处理后的文本
            words: 分词结果
            top_k: 返回前k个关键词
            text_type: 文本类型
            
        Returns:
            Dict[str, Any]: 关键词结果
        """
        method = self._config.get("keyword_extract_method", "tfidf")
        keywords = []
        keyword_weights = []
        
        try:
            if method == "textrank":
                # 使用TextRank算法
                keywords_with_weights = jieba.analyse.textrank(text, topK=top_k, withWeight=True)
            else:
                # 默认使用TF-IDF
                keywords_with_weights = jieba.analyse.extract_tags(text, topK=top_k, withWeight=True, allowPOS=())
            
            # 处理结果
            for keyword, weight in keywords_with_weights:
                if len(keyword) >= self._config.get("min_word_length", 2):
                    keywords.append(keyword)
                    keyword_weights.append({
                        "keyword": keyword,
                        "weight": round(weight, 6)
                    })
            
            # 如果关键词不足，使用词频补充
            if len(keywords) < min(5, top_k) and words:
                word_freq = Counter(words)
                for word, freq in word_freq.most_common(top_k):
                    if word not in keywords:
                        keywords.append(word)
                        keyword_weights.append({
                            "keyword": word,
                            "weight": round(freq / len(words), 6)
                        })
                    if len(keywords) >= top_k:
                        break
        
        except Exception as e:
            logger.error(f"提取关键词失败: {str(e)}")
            # 降级使用词频统计
            word_freq = Counter(words)
            for word, freq in word_freq.most_common(top_k):
                keywords.append(word)
                keyword_weights.append({
                    "keyword": word,
                    "weight": round(freq / len(words), 6)
                })
        
        return {
            "keywords": keywords,
            "keyword_weights": keyword_weights
        }
    
    def _analyze_sentiment(self, text: str, words: List[str]) -> Dict[str, Any]:
        """
        分析情感倾向
        
        Args:
            text: 处理后的文本
            words: 分词结果
            
        Returns:
            Dict[str, Any]: 情感分析结果
        """
        positive_score = 0.0
        negative_score = 0.0
        positive_words = []
        negative_words = []
        
        # 情感词匹配
        for word in words:
            # 检查积极词汇
            if word in self._sentiment_words["positive"]:
                score = self._sentiment_words["positive"][word]
                positive_score += score
                positive_words.append({"word": word, "score": score})
            
            # 检查消极词汇
            if word in self._sentiment_words["negative"]:
                score = abs(self._sentiment_words["negative"][word])  # 转为正数
                negative_score += score
                negative_words.append({"word": word, "score": score})
        
        # 计算情感总分
        total_score = positive_score - negative_score
        
        # 归一化得分
        max_possible_score = max(positive_score + negative_score, 1.0)
        normalized_score = total_score / max_possible_score
        
        # 确定情感标签
        if abs(normalized_score) < 0.1:
            sentiment_label = "中性"
        elif normalized_score > 0:
            sentiment_label = "积极正向"
        else:
            sentiment_label = "消极负向"
        
        # 计算置信度
        confidence = min(abs(normalized_score) * 100, 100)
        
        return {
            "label": sentiment_label,
            "score": round(total_score, 4),
            "normalized_score": round(normalized_score, 4),
            "confidence": round(confidence, 2),
            "positive_score": round(positive_score, 4),
            "negative_score": round(negative_score, 4),
            "positive_words": positive_words,
            "negative_words": negative_words,
            "total_sentiment_words": len(positive_words) + len(negative_words)
        }
    
    def _analyze_topics(self, words: List[str]) -> List[Dict[str, Any]]:
        """
        分析主题倾向
        
        Args:
            words: 分词结果
            
        Returns:
            List[Dict[str, Any]]: 主题分析结果
        """
        topic_scores = defaultdict(float)
        topic_matched_words = defaultdict(list)
        
        # 词频统计
        word_freq = Counter(words)
        total_words = len(words)
        
        # 计算主题得分
        for topic, feature_words in self._topic_features.items():
            score = 0.0
            matched_words = []
            
            for word in words:
                if word in feature_words:
                    # 计算词频权重
                    freq_weight = word_freq.get(word, 1) / total_words
                    # 计算位置权重（简单实现，越靠前权重越高）
                    position_weight = 1.0 - (words.index(word) / len(words)) if word in words else 0.5
                    # 计算得分
                    word_score = freq_weight * position_weight * 100
                    score += word_score
                    matched_words.append({"word": word, "score": round(word_score, 4)})
            
            if score > 0:
                topic_scores[topic] = score
                topic_matched_words[topic] = matched_words
        
        # 排序并生成结果
        sorted_topics = sorted(topic_scores.items(), key=lambda x: x[1], reverse=True)
        topics_result = []
        
        for topic, score in sorted_topics[:5]:  # 取前5个主题
            # 归一化得分
            max_score = max(topic_scores.values())
            normalized_score = (score / max_score) * 100 if max_score > 0 else 0
            
            topics_result.append({
                "topic": topic,
                "score": round(score, 4),
                "normalized_score": round(normalized_score, 2),
                "confidence": round(min(normalized_score, 100), 2),
                "matched_words": topic_matched_words[topic],
                "match_count": len(topic_matched_words[topic])
            })
        
        return topics_result
    
    def _extract_statistics(self, 
                          original_text: str,
                          processed_text: str,
                          words: List[str]) -> Dict[str, Any]:
        """
        提取统计特征
        
        Args:
            original_text: 原始文本
            processed_text: 处理后的文本
            words: 分词结果
            
        Returns:
            Dict[str, Any]: 统计特征
        """
        # 句子分割
        sentence_split_pattern = self._config.get("sentence_split_pattern", r"[。！？\n]+")
        sentences = re.split(sentence_split_pattern, original_text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 词频统计
        word_freq = Counter(words)
        
        # 计算平均词长
        avg_word_length = sum(len(word) for word in words) / len(words) if words else 0
        
        # 计算平均句长
        avg_sentence_length = sum(len(sentence) for sentence in sentences) / len(sentences) if sentences else 0
        
        # 计算唯一词比例
        unique_words = set(words)
        unique_word_ratio = len(unique_words) / len(words) if words else 0
        
        # 高频词
        top_words = word_freq.most_common(10)
        
        return {
            "sentence_count": len(sentences),
            "avg_sentence_length": round(avg_sentence_length, 2),
            "avg_word_length": round(avg_word_length, 2),
            "unique_word_count": len(unique_words),
            "unique_word_ratio": round(unique_word_ratio, 4),
            "word_diversity": round(unique_word_ratio, 4),  # 词汇多样性
            "top_words": [{"word": word, "frequency": freq} for word, freq in top_words]
        }
    
    def extract_features_from_multiple_sources(self, 
                                             text_sources: Dict[str, str],
                                             **kwargs) -> Dict[str, Any]:
        """
        从多个文本来源提取特征
        
        Args:
            text_sources: 文本来源字典，格式 {"source_type": "text"}
            **kwargs: 传递给extract_features的其他参数
            
        Returns:
            Dict[str, Any]: 综合特征结果
        """
        if not text_sources or not isinstance(text_sources, dict):
            return {
                "success": False,
                "error": "文本来源为空或无效"
            }
        
        # 提取各来源特征
        source_features = {}
        all_keywords = []
        all_sentiment_scores = []
        all_topics = defaultdict(list)
        
        for source_type, text in text_sources.items():
            if text:
                features = self.extract_features(text, text_type=source_type, **kwargs)
                source_features[source_type] = features
                
                # 收集关键词
                if features.get("success") and "keywords" in features:
                    all_keywords.extend([(keyword, source_type) for keyword in features["keywords"]])
                
                # 收集情感分数
                if features.get("success") and "sentiment" in features:
                    sentiment = features["sentiment"]
                    all_sentiment_scores.append({
                        "score": sentiment["normalized_score"],
                        "confidence": sentiment["confidence"],
                        "source": source_type
                    })
                
                # 收集主题
                if features.get("success") and "topics" in features:
                    for topic in features["topics"]:
                        all_topics[topic["topic"]].append({
                            "score": topic["normalized_score"],
                            "confidence": topic["confidence"],
                            "source": source_type,
                            "matched_words": topic["matched_words"]
                        })
        
        # 计算综合关键词（去重，保留来源）
        keyword_sources = defaultdict(set)
        for keyword, source in all_keywords:
            keyword_sources[keyword].add(source)
        
        # 计算综合情感
        combined_sentiment = self._combine_sentiment_scores(all_sentiment_scores)
        
        # 计算综合主题
        combined_topics = self._combine_topics(all_topics)
        
        return {
            "success": True,
            "source_features": source_features,
            "combined_keywords": [
                {"keyword": keyword, "sources": list(sources)}
                for keyword, sources in keyword_sources.items()
            ][:20],  # 取前20个
            "combined_sentiment": combined_sentiment,
            "combined_topics": combined_topics,
            "timestamp": datetime.now().isoformat()
        }
    
    def _combine_sentiment_scores(self, sentiment_scores: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        合并多个情感分数
        
        Args:
            sentiment_scores: 情感分数列表
            
        Returns:
            Dict[str, Any]: 合并后的情感结果
        """
        if not sentiment_scores:
            return {
                "label": "中性",
                "score": 0.0,
                "confidence": 0.0,
                "source_count": 0
            }
        
        # 加权平均
        weighted_score = 0.0
        total_weight = 0.0
        
        for item in sentiment_scores:
            weight = item["confidence"] / 100.0
            weighted_score += item["score"] * weight
            total_weight += weight
        
        # 计算平均得分
        if total_weight > 0:
            avg_score = weighted_score / total_weight
        else:
            avg_score = 0.0
        
        # 确定情感标签
        if abs(avg_score) < 0.1:
            label = "中性"
        elif avg_score > 0:
            label = "积极正向"
        else:
            label = "消极负向"
        
        # 计算整体置信度
        avg_confidence = sum(item["confidence"] for item in sentiment_scores) / len(sentiment_scores)
        
        return {
            "label": label,
            "score": round(avg_score, 4),
            "confidence": round(avg_confidence, 2),
            "source_count": len(sentiment_scores)
        }
    
    def _combine_topics(self, topics_dict: Dict[str, List[Dict[str, Any]]]) -> List[Dict[str, Any]]:
        """
        合并多个来源的主题
        
        Args:
            topics_dict: 主题字典 {topic_name: [topic_info]}
            
        Returns:
            List[Dict[str, Any]]: 合并后的主题列表
        """
        combined_topics = []
        
        for topic, topic_infos in topics_dict.items():
            # 加权平均得分
            weighted_score = 0.0
            total_weight = 0.0
            all_matched_words = []
            all_sources = set()
            
            for info in topic_infos:
                weight = info["confidence"] / 100.0
                weighted_score += info["score"] * weight
                total_weight += weight
                all_matched_words.extend(info.get("matched_words", []))
                all_sources.add(info["source"])
            
            # 计算平均得分
            if total_weight > 0:
                avg_score = weighted_score / total_weight
            else:
                avg_score = 0.0
            
            # 计算平均置信度
            avg_confidence = sum(info["confidence"] for info in topic_infos) / len(topic_infos)
            
            combined_topics.append({
                "topic": topic,
                "score": round(avg_score, 2),
                "confidence": round(avg_confidence, 2),
                "sources": list(all_sources),
                "matched_words_count": len(all_matched_words),
                "source_count": len(all_sources)
            })
        
        # 按置信度排序
        combined_topics.sort(key=lambda x: x["confidence"], reverse=True)
        
        return combined_topics[:5]  # 取前5个主题


# 使用示例
if __name__ == '__main__':
    try:
        print("文本特征提取器初始化中...")
        
        # 创建实例
        extractor = TextFeatureExtractor()
        
        print("\n文本特征提取器初始化成功!")
        print("\n测试文本特征提取:")
        
        # 示例文本
        sample_text = "这个视频分享了很多学习技巧，非常实用，我按照这些方法复习，成绩提高了不少！"
        
        # 提取特征
        features = extractor.extract_features(sample_text)
        
        # 显示结果
        print(f"\n原始文本: {sample_text}")
        print(f"文本长度: {features['text_length']}")
        print(f"分词数量: {features['word_count']}")
        
        print("\n关键词:")
        for i, keyword in enumerate(features['keywords'], 1):
            print(f"  {i}. {keyword}")
        
        print("\n情感分析:")
        sentiment = features['sentiment']
        print(f"  情感倾向: {sentiment['label']}")
        print(f"  置信度: {sentiment['confidence']}%")
        print(f"  积极词汇: {len(sentiment['positive_words'])}")
        print(f"  消极词汇: {len(sentiment['negative_words'])}")
        
        print("\n主题分析:")
        for topic in features['topics']:
            print(f"  主题: {topic['topic']} (置信度: {topic['confidence']}%)")
        
        print("\n统计信息:")
        stats = features['statistics']
        print(f"  句子数量: {stats['sentence_count']}")
        print(f"  平均句长: {stats['avg_sentence_length']}")
        print(f"  词汇多样性: {stats['word_diversity']:.4f}")
        
    except Exception as e:
        print(f"初始化失败: {str(e)}")