"""
内容质量评分器
"""
import re
import math
import logging
from typing import Dict, List, Tuple, Optional, Any
from collections import Counter
import numpy as np

# jieba延迟导入，避免启动时加载缓慢

from ..models.quality import (
    QualityDimension,
    ScenarioType,
    DimensionScore,
    ScoreResult,
    ScoreRequest
)

logger = logging.getLogger(__name__)


def _get_jieba():
    """延迟导入jieba"""
    import jieba
    return jieba


def _get_jieba_posseg():
    """延迟导入jieba.posseg"""
    import jieba.posseg
    return jieba.posseg


class QualityScorer:
    """质量评分器"""
    
    def __init__(self):
        """初始化评分器"""
        # jieba会在首次使用时自动初始化，无需手动调用
        
        # 默认权重配置
        self.default_weights = {
            ScenarioType.NEWS: {
                QualityDimension.ORIGINALITY.value: 0.15,
                QualityDimension.READABILITY.value: 0.20,
                QualityDimension.INFORMATION_DENSITY.value: 0.30,
                QualityDimension.EMOTIONAL_RESONANCE.value: 0.10,
                QualityDimension.STRUCTURE_INTEGRITY.value: 0.25
            },
            ScenarioType.BLOG: {
                QualityDimension.ORIGINALITY.value: 0.25,
                QualityDimension.READABILITY.value: 0.25,
                QualityDimension.INFORMATION_DENSITY.value: 0.15,
                QualityDimension.EMOTIONAL_RESONANCE.value: 0.20,
                QualityDimension.STRUCTURE_INTEGRITY.value: 0.15
            },
            ScenarioType.TECHNICAL: {
                QualityDimension.ORIGINALITY.value: 0.20,
                QualityDimension.READABILITY.value: 0.15,
                QualityDimension.INFORMATION_DENSITY.value: 0.35,
                QualityDimension.EMOTIONAL_RESONANCE.value: 0.05,
                QualityDimension.STRUCTURE_INTEGRITY.value: 0.25
            },
            ScenarioType.CREATIVE: {
                QualityDimension.ORIGINALITY.value: 0.35,
                QualityDimension.READABILITY.value: 0.20,
                QualityDimension.INFORMATION_DENSITY.value: 0.10,
                QualityDimension.EMOTIONAL_RESONANCE.value: 0.25,
                QualityDimension.STRUCTURE_INTEGRITY.value: 0.10
            },
            ScenarioType.GENERAL: {
                QualityDimension.ORIGINALITY.value: 0.20,
                QualityDimension.READABILITY.value: 0.20,
                QualityDimension.INFORMATION_DENSITY.value: 0.20,
                QualityDimension.EMOTIONAL_RESONANCE.value: 0.20,
                QualityDimension.STRUCTURE_INTEGRITY.value: 0.20
            }
        }
    
    async def score_content(
        self, 
        content: str,
        scenario: ScenarioType = ScenarioType.GENERAL,
        custom_weights: Optional[Dict[str, float]] = None
    ) -> ScoreResult:
        """
        评分内容
        
        Args:
            content: 待评分内容
            scenario: 场景类型
            custom_weights: 自定义权重
            
        Returns:
            评分结果
        """
        # 获取权重配置
        weights = custom_weights or self.default_weights.get(
            scenario, 
            self.default_weights[ScenarioType.GENERAL]
        )
        
        # 计算各维度分数
        dimensions = {}
        
        # 原创性评分
        originality_score, originality_details = await self.calculate_originality_score(content)
        dimensions[QualityDimension.ORIGINALITY.value] = DimensionScore(
            dimension=QualityDimension.ORIGINALITY,
            score=originality_score,
            weight=weights[QualityDimension.ORIGINALITY.value],
            explanation=originality_details['explanation'],
            deductions=originality_details['deductions']
        )
        
        # 可读性评分
        readability_score, readability_details = await self.calculate_readability_score(content)
        dimensions[QualityDimension.READABILITY.value] = DimensionScore(
            dimension=QualityDimension.READABILITY,
            score=readability_score,
            weight=weights[QualityDimension.READABILITY.value],
            explanation=readability_details['explanation'],
            deductions=readability_details['deductions']
        )
        
        # 信息量评分
        info_score, info_details = await self.calculate_information_density_score(content)
        dimensions[QualityDimension.INFORMATION_DENSITY.value] = DimensionScore(
            dimension=QualityDimension.INFORMATION_DENSITY,
            score=info_score,
            weight=weights[QualityDimension.INFORMATION_DENSITY.value],
            explanation=info_details['explanation'],
            deductions=info_details['deductions']
        )
        
        # 情感共鸣评分
        emotion_score, emotion_details = await self.calculate_emotional_resonance_score(content)
        dimensions[QualityDimension.EMOTIONAL_RESONANCE.value] = DimensionScore(
            dimension=QualityDimension.EMOTIONAL_RESONANCE,
            score=emotion_score,
            weight=weights[QualityDimension.EMOTIONAL_RESONANCE.value],
            explanation=emotion_details['explanation'],
            deductions=emotion_details['deductions']
        )
        
        # 结构完整性评分
        structure_score, structure_details = await self.calculate_structure_integrity_score(content)
        dimensions[QualityDimension.STRUCTURE_INTEGRITY.value] = DimensionScore(
            dimension=QualityDimension.STRUCTURE_INTEGRITY,
            score=structure_score,
            weight=weights[QualityDimension.STRUCTURE_INTEGRITY.value],
            explanation=structure_details['explanation'],
            deductions=structure_details['deductions']
        )
        
        # 计算总分
        total_score = self.calculate_total_score(dimensions)
        
        # 生成综合解释
        explanation = self.generate_score_explanation(dimensions, total_score)
        
        return ScoreResult(
            total_score=total_score,
            dimensions=dimensions,
            scenario=scenario,
            weights=weights,
            explanation=explanation
        )
    
    async def calculate_originality_score(self, content: str) -> Tuple[float, Dict]:
        """
        计算原创性评分
        
        通过分析词汇多样性、句式变化、独特表达等因素评估原创性
        """
        score = 100.0
        deductions = []
        
        # 分词
        words = list(_get_jieba().cut(content))
        sentences = re.split(r'[。！？\n]', content)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 1. 词汇多样性（Type-Token Ratio）
        if len(words) > 0:
            unique_words = set(words)
            ttr = len(unique_words) / len(words)
            
            if ttr < 0.3:
                score -= 20
                deductions.append("词汇重复度过高 (-20)")
            elif ttr < 0.5:
                score -= 10
                deductions.append("词汇多样性不足 (-10)")
        
        # 2. 句式多样性
        if len(sentences) > 3:
            sentence_lengths = [len(s) for s in sentences]
            length_variance = np.var(sentence_lengths)
            
            if length_variance < 10:
                score -= 15
                deductions.append("句式过于单一 (-15)")
            elif length_variance < 50:
                score -= 8
                deductions.append("句式变化较少 (-8)")
        
        # 3. 常见短语检测
        common_phrases = [
            "众所周知", "毫无疑问", "不言而喻", "总而言之",
            "综上所述", "由此可见", "不难发现", "值得一提"
        ]
        phrase_count = sum(1 for phrase in common_phrases if phrase in content)
        if phrase_count > 5:
            score -= 10
            deductions.append(f"过多使用常见短语 ({phrase_count}个) (-10)")
        elif phrase_count > 3:
            score -= 5
            deductions.append(f"较多使用常见短语 ({phrase_count}个) (-5)")
        
        # 4. 独特性指标（基于字符级n-gram）
        if len(content) > 50:
            # 计算3-gram的独特性
            trigrams = [content[i:i+3] for i in range(len(content)-2)]
            unique_trigrams = set(trigrams)
            uniqueness_ratio = len(unique_trigrams) / len(trigrams) if trigrams else 0
            
            if uniqueness_ratio < 0.6:
                score -= 10
                deductions.append("内容模式化程度高 (-10)")
        
        explanation = f"原创性得分 {max(0, score):.1f}分，基于词汇多样性、句式变化和独特表达评估"
        
        return max(0, score), {
            'explanation': explanation,
            'deductions': deductions
        }
    
    async def calculate_readability_score(self, content: str) -> Tuple[float, Dict]:
        """
        计算可读性评分
        
        分析句子长度、段落结构、标点使用等
        """
        score = 100.0
        deductions = []
        
        # 基础分析
        sentences = re.split(r'[。！？]', content)
        sentences = [s.strip() for s in sentences if s.strip()]
        paragraphs = content.split('\n')
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        
        # 1. 句子长度分析
        if sentences:
            avg_sentence_length = sum(len(s) for s in sentences) / len(sentences)
            
            if avg_sentence_length > 60:
                score -= 20
                deductions.append(f"句子过长，平均{avg_sentence_length:.0f}字 (-20)")
            elif avg_sentence_length > 40:
                score -= 10
                deductions.append(f"句子偏长，平均{avg_sentence_length:.0f}字 (-10)")
            elif avg_sentence_length < 10:
                score -= 15
                deductions.append(f"句子过短，平均{avg_sentence_length:.0f}字 (-15)")
        
        # 2. 段落结构
        if len(paragraphs) == 1 and len(content) > 500:
            score -= 15
            deductions.append("缺少段落划分 (-15)")
        elif paragraphs:
            avg_para_length = len(content) / len(paragraphs)
            if avg_para_length > 500:
                score -= 10
                deductions.append("段落过长 (-10)")
        
        # 3. 标点符号使用
        punctuation_ratio = len(re.findall(r'[，。！？；：、]', content)) / len(content)
        if punctuation_ratio < 0.02:
            score -= 10
            deductions.append("标点符号使用不足 (-10)")
        elif punctuation_ratio > 0.15:
            score -= 8
            deductions.append("标点符号过多 (-8)")
        
        # 4. 复杂词汇比例
        words = list(_get_jieba().cut(content))
        long_words = [w for w in words if len(w) > 4]
        if words:
            complex_ratio = len(long_words) / len(words)
            if complex_ratio > 0.3:
                score -= 10
                deductions.append("复杂词汇过多 (-10)")
        
        # 5. 连接词使用
        connectors = ["但是", "然而", "因此", "所以", "并且", "而且", "同时", "另外", "此外"]
        connector_count = sum(1 for c in connectors if c in content)
        if len(sentences) > 5 and connector_count < 2:
            score -= 8
            deductions.append("缺少连接词，影响流畅性 (-8)")
        
        explanation = f"可读性得分 {max(0, score):.1f}分，基于句子长度、段落结构和语言流畅性评估"
        
        return max(0, score), {
            'explanation': explanation,
            'deductions': deductions
        }
    
    async def calculate_information_density_score(self, content: str) -> Tuple[float, Dict]:
        """
        计算信息量评分
        
        评估内容的信息密度、关键信息量、数据支撑等
        """
        score = 100.0
        deductions = []
        
        # 分词和句子分割
        words = list(_get_jieba().cut(content))
        sentences = re.split(r'[。！？]', content)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 1. 信息密度（实词比例）
        if words:
            # 过滤停用词和虚词
            stop_words = {'的', '了', '是', '在', '和', '与', '或', '但', '而', '就', '也', '还', '有', '这', '那', '些', '个', '都'}
            content_words = [w for w in words if w not in stop_words and len(w) > 1]
            info_density = len(content_words) / len(words) if words else 0
            
            if info_density < 0.3:
                score -= 20
                deductions.append("信息密度过低，空话较多 (-20)")
            elif info_density < 0.5:
                score -= 10
                deductions.append("信息密度偏低 (-10)")
        
        # 2. 数据和事实支撑
        # 检测数字、百分比、日期等
        numbers = re.findall(r'\d+', content)
        percentages = re.findall(r'\d+[%％]', content)
        dates = re.findall(r'\d{4}年|\d{1,2}月|\d{1,2}日', content)
        
        fact_indicators = len(numbers) + len(percentages) + len(dates)
        content_length_factor = len(content) / 1000  # 每1000字的预期事实数量
        expected_facts = max(2, int(content_length_factor * 3))
        
        if fact_indicators < expected_facts:
            score -= 15
            deductions.append(f"缺少数据支撑，仅{fact_indicators}处事实性信息 (-15)")
        
        # 3. 关键词密度
        # 提取名词作为潜在关键词
        words_pos = _get_jieba_posseg().cut(content)
        nouns = [w.word for w in words_pos if w.flag.startswith('n') and len(w.word) > 1]
        
        if nouns:
            # 计算关键词重复度
            noun_freq = Counter(nouns)
            top_keywords = noun_freq.most_common(5)
            
            if top_keywords:
                # 检查是否有核心主题
                max_freq = top_keywords[0][1]
                if max_freq < 3 and len(content) > 500:
                    score -= 10
                    deductions.append("缺少明确的核心主题 (-10)")
        
        # 4. 内容深度（长句比例作为复杂度指标）
        if sentences:
            complex_sentences = [s for s in sentences if len(s) > 30]
            complexity_ratio = len(complex_sentences) / len(sentences)
            
            if complexity_ratio < 0.2 and len(content) > 300:
                score -= 8
                deductions.append("内容深度不足，缺少复杂论述 (-8)")
        
        # 5. 冗余检测
        # 检测重复的句子或高度相似的内容
        if len(sentences) > 3:
            for i, sent1 in enumerate(sentences[:-1]):
                for sent2 in sentences[i+1:]:
                    if len(sent1) > 10 and len(sent2) > 10:
                        # 简单的相似度计算
                        common = len(set(sent1) & set(sent2))
                        similarity = common / min(len(sent1), len(sent2))
                        if similarity > 0.8:
                            score -= 10
                            deductions.append("存在重复或高度相似的内容 (-10)")
                            break
                if score < 90:
                    break
        
        explanation = f"信息量得分 {max(0, score):.1f}分，基于信息密度、数据支撑和内容深度评估"
        
        return max(0, score), {
            'explanation': explanation,
            'deductions': deductions
        }
    
    async def calculate_emotional_resonance_score(self, content: str) -> Tuple[float, Dict]:
        """
        计算情感共鸣评分
        
        评估内容的情感表达、感染力和读者共鸣度
        """
        score = 100.0
        deductions = []
        
        # 情感词典
        positive_words = {
            '喜欢', '快乐', '幸福', '美好', '优秀', '成功', '希望', '梦想', '热爱',
            '感动', '温暖', '激动', '振奋', '自豪', '满意', '欣慰', '珍惜', '感激'
        }
        
        negative_words = {
            '悲伤', '失望', '愤怒', '焦虑', '担心', '恐惧', '痛苦', '遗憾', '无奈',
            '困难', '挫折', '失败', '沮丧', '孤独', '迷茫', '压力', '疲惫', '绝望'
        }
        
        emotional_markers = {
            '！': 2, '？': 1.5, '...': 1.5, '～': 1, '~': 1,
            '哈哈': 2, '呵呵': 1, '唉': 1.5, '啊': 1
        }
        
        # 分词
        words = list(_get_jieba().cut(content))
        
        # 1. 情感词汇使用
        positive_count = sum(1 for w in words if w in positive_words)
        negative_count = sum(1 for w in words if w in negative_words)
        total_emotion_words = positive_count + negative_count
        
        if len(words) > 50:
            emotion_ratio = total_emotion_words / len(words)
            
            if emotion_ratio < 0.01:
                score -= 25
                deductions.append("缺乏情感表达 (-25)")
            elif emotion_ratio < 0.03:
                score -= 15
                deductions.append("情感表达不足 (-15)")
            elif emotion_ratio > 0.15:
                score -= 10
                deductions.append("情感表达过度 (-10)")
        
        # 2. 情感标记符号
        marker_score = 0
        for marker, weight in emotional_markers.items():
            count = content.count(marker)
            marker_score += count * weight
        
        if marker_score == 0 and len(content) > 200:
            score -= 10
            deductions.append("缺少情感标记符号 (-10)")
        elif marker_score > len(content) / 50:
            score -= 8
            deductions.append("情感符号使用过多 (-8)")
        
        # 3. 个人化表达
        personal_pronouns = ['我', '我们', '你', '你们', '咱们', '大家']
        personal_count = sum(1 for w in words if w in personal_pronouns)
        
        if len(words) > 50:
            personal_ratio = personal_count / len(words)
            if personal_ratio < 0.005:
                score -= 12
                deductions.append("缺少个人化表达，过于客观 (-12)")
        
        # 4. 故事性和具体性
        story_indicators = ['记得', '曾经', '那天', '那时', '有一次', '例如', '比如']
        story_count = sum(1 for indicator in story_indicators if indicator in content)
        
        if len(content) > 500 and story_count < 2:
            score -= 10
            deductions.append("缺少具体案例或故事 (-10)")
        
        # 5. 呼吁和互动性
        call_to_action = ['希望', '期待', '让我们', '一起', '愿', '祝', '请', '欢迎']
        cta_count = sum(1 for cta in call_to_action if cta in content)
        
        if len(content) > 300 and cta_count < 1:
            score -= 8
            deductions.append("缺少读者互动元素 (-8)")
        
        explanation = f"情感共鸣得分 {max(0, score):.1f}分，基于情感表达、个人化和互动性评估"
        
        return max(0, score), {
            'explanation': explanation,
            'deductions': deductions
        }
    
    async def calculate_structure_integrity_score(self, content: str) -> Tuple[float, Dict]:
        """
        计算结构完整性评分
        
        评估文章的开头、主体、结尾结构，以及逻辑连贯性
        """
        score = 100.0
        deductions = []
        
        # 分段和分句
        paragraphs = content.split('\n')
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        sentences = re.split(r'[。！？]', content)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 1. 基本结构完整性
        if len(content) > 200:
            # 检查是否有明确的开头
            first_para = paragraphs[0] if paragraphs else content[:100]
            if not any(kw in first_para for kw in ['本文', '今天', '最近', '我们', '随着', '在']):
                score -= 8
                deductions.append("开头不够明确 (-8)")
            
            # 检查是否有结尾总结
            last_para = paragraphs[-1] if paragraphs else content[-100:]
            conclusion_keywords = ['总之', '综上', '总结', '最后', '因此', '所以', '展望', '未来']
            if not any(kw in last_para for kw in conclusion_keywords) and len(content) > 500:
                score -= 10
                deductions.append("缺少总结或结尾 (-10)")
        
        # 2. 段落划分合理性
        if len(paragraphs) == 1 and len(content) > 300:
            score -= 20
            deductions.append("没有段落划分 (-20)")
        elif paragraphs:
            # 检查段落长度分布
            para_lengths = [len(p) for p in paragraphs]
            if max(para_lengths) > 500:
                score -= 10
                deductions.append("存在过长段落 (-10)")
            
            # 段落均衡性
            if len(para_lengths) > 2:
                length_variance = np.var(para_lengths)
                mean_length = np.mean(para_lengths)
                cv = math.sqrt(length_variance) / mean_length if mean_length > 0 else 0
                if cv > 1.5:
                    score -= 8
                    deductions.append("段落长度不均衡 (-8)")
        
        # 3. 逻辑连接词使用
        logical_connectors = {
            '因果': ['因为', '所以', '因此', '由于', '导致', '造成', '引起'],
            '转折': ['但是', '然而', '不过', '虽然', '尽管', '却', '反而'],
            '递进': ['而且', '并且', '另外', '此外', '同时', '不仅', '更'],
            '顺序': ['首先', '其次', '然后', '最后', '第一', '第二', '接着']
        }
        
        connector_count = 0
        for category, connectors in logical_connectors.items():
            for connector in connectors:
                connector_count += content.count(connector)
        
        if len(sentences) > 5 and connector_count < 3:
            score -= 12
            deductions.append("逻辑连接词不足，影响连贯性 (-12)")
        
        # 4. 主题一致性（通过关键词分布检测）
        if len(paragraphs) > 2:
            # 提取每段的关键词
            para_keywords = []
            for para in paragraphs:
                words = list(_get_jieba().cut(para))
                # 提取名词作为关键词
                words_pos = _get_jieba_posseg().cut(para)
                keywords = [w.word for w in words_pos if w.flag.startswith('n') and len(w.word) > 1]
                para_keywords.append(set(keywords[:10]))  # 每段取前10个关键词
            
            # 计算段落间的主题一致性
            if len(para_keywords) > 1:
                total_overlap = 0
                comparisons = 0
                for i in range(len(para_keywords)-1):
                    if para_keywords[i] and para_keywords[i+1]:
                        overlap = len(para_keywords[i] & para_keywords[i+1])
                        total_overlap += overlap
                        comparisons += 1
                
                if comparisons > 0:
                    avg_overlap = total_overlap / comparisons
                    if avg_overlap < 1:
                        score -= 10
                        deductions.append("段落间主题跳跃，缺乏连贯性 (-10)")
        
        # 5. 标题或小节划分（针对长文）
        if len(content) > 1000:
            # 检查是否有小标题（通常较短的独立段落）
            short_paras = [p for p in paragraphs if 5 < len(p) < 20]
            if len(short_paras) < 2:
                score -= 8
                deductions.append("长文缺少章节或小标题划分 (-8)")
        
        explanation = f"结构完整性得分 {max(0, score):.1f}分，基于文章结构、逻辑连贯性和主题一致性评估"
        
        return max(0, score), {
            'explanation': explanation,
            'deductions': deductions
        }
    
    def calculate_total_score(self, dimensions: Dict[str, DimensionScore]) -> float:
        """
        计算总分
        
        Args:
            dimensions: 各维度评分
            
        Returns:
            加权总分
        """
        total = 0.0
        for dim_score in dimensions.values():
            total += dim_score.score * dim_score.weight
        
        return round(total, 2)
    
    def generate_score_explanation(
        self, 
        dimensions: Dict[str, DimensionScore],
        total_score: float
    ) -> Dict[str, Any]:
        """
        生成评分解释
        
        Args:
            dimensions: 各维度评分
            total_score: 总分
            
        Returns:
            详细解释
        """
        # 评分等级
        if total_score >= 85:
            grade = "优秀"
            grade_desc = "内容质量很高，各方面表现出色"
        elif total_score >= 70:
            grade = "良好"
            grade_desc = "内容质量较好，部分方面可以改进"
        elif total_score >= 60:
            grade = "及格"
            grade_desc = "内容质量一般，需要较多改进"
        else:
            grade = "需改进"
            grade_desc = "内容质量较差，需要大幅改进"
        
        # 找出最高和最低维度
        sorted_dims = sorted(
            dimensions.items(), 
            key=lambda x: x[1].score,
            reverse=True
        )
        
        strengths = []
        weaknesses = []
        
        for dim_name, dim_score in sorted_dims[:2]:
            if dim_score.score >= 70:
                strengths.append({
                    'dimension': dim_name,
                    'score': dim_score.score,
                    'description': dim_score.explanation
                })
        
        for dim_name, dim_score in sorted_dims[-2:]:
            if dim_score.score < 70:
                weaknesses.append({
                    'dimension': dim_name,
                    'score': dim_score.score,
                    'deductions': dim_score.deductions
                })
        
        # 改进建议
        suggestions = []
        for dim_name, dim_score in dimensions.items():
            if dim_score.score < 60:
                if dim_name == QualityDimension.ORIGINALITY.value:
                    suggestions.append("增加独特观点和创新表达")
                elif dim_name == QualityDimension.READABILITY.value:
                    suggestions.append("优化句子长度和段落结构")
                elif dim_name == QualityDimension.INFORMATION_DENSITY.value:
                    suggestions.append("增加具体数据和事实支撑")
                elif dim_name == QualityDimension.EMOTIONAL_RESONANCE.value:
                    suggestions.append("加强情感表达和读者互动")
                elif dim_name == QualityDimension.STRUCTURE_INTEGRITY.value:
                    suggestions.append("完善文章结构和逻辑连接")
        
        return {
            'grade': grade,
            'grade_description': grade_desc,
            'total_score': total_score,
            'strengths': strengths,
            'weaknesses': weaknesses,
            'suggestions': suggestions,
            'dimension_summary': {
                dim_name: {
                    'score': dim_score.score,
                    'weight': dim_score.weight,
                    'weighted_score': dim_score.score * dim_score.weight
                }
                for dim_name, dim_score in dimensions.items()
            }
        }