"""
模型对比评分系统
实现多维度内容质量评分
"""

import re
from typing import Dict, Any, Optional
import jieba
import numpy as np
from textstat import flesch_reading_ease, flesch_kincaid_grade
import logging

logger = logging.getLogger(__name__)


class ComparisonScorer:
    """
    多维度内容评分器
    评估生成内容的质量、相关性、创新性和流畅度
    """
    
    def __init__(self, weights: Optional[Dict[str, float]] = None):
        """
        初始化评分器
        
        Args:
            weights: 各维度权重，默认均等
        """
        self.weights = weights or {
            "quality": 0.25,
            "relevance": 0.25,
            "creativity": 0.25,
            "fluency": 0.25
        }
        
        # 确保权重和为1
        total = sum(self.weights.values())
        if total > 0:
            self.weights = {k: v/total for k, v in self.weights.items()}
            
    async def score_content(self, 
                           content: str, 
                           prompt: str,
                           reference: Optional[str] = None) -> Dict[str, float]:
        """
        评分内容
        
        Args:
            content: 待评分的内容
            prompt: 原始提示词
            reference: 参考内容（可选）
            
        Returns:
            各维度评分和综合评分
        """
        scores = {
            "quality": await self._score_quality(content),
            "relevance": await self._score_relevance(content, prompt),
            "creativity": await self._score_creativity(content, reference),
            "fluency": await self._score_fluency(content)
        }
        
        # 计算综合评分
        scores["overall"] = sum(
            scores[dim] * self.weights.get(dim, 0.25) 
            for dim in ["quality", "relevance", "creativity", "fluency"]
        )
        
        # 四舍五入到两位小数
        scores = {k: round(v, 2) for k, v in scores.items()}
        
        return scores
        
    async def _score_quality(self, content: str) -> float:
        """
        评估内容质量
        
        考虑因素：
        - 内容长度
        - 段落结构
        - 信息密度
        - 格式规范
        
        Args:
            content: 内容文本
            
        Returns:
            质量评分 (0-1)
        """
        score = 0.0
        
        # 内容长度评分（100-5000字符为最佳）
        length = len(content)
        if 100 <= length <= 5000:
            length_score = 1.0
        elif length < 100:
            length_score = length / 100
        else:
            length_score = max(0.5, 1.0 - (length - 5000) / 10000)
        score += length_score * 0.3
        
        # 段落结构评分
        paragraphs = content.split('\n\n')
        paragraph_count = len([p for p in paragraphs if p.strip()])
        if 2 <= paragraph_count <= 10:
            para_score = 1.0
        elif paragraph_count < 2:
            para_score = 0.5
        else:
            para_score = max(0.6, 1.0 - (paragraph_count - 10) / 20)
        score += para_score * 0.3
        
        # 信息密度评分（关键词密度）
        words = jieba.lcut(content)
        unique_words = set(words)
        if len(words) > 0:
            diversity = len(unique_words) / len(words)
            diversity_score = min(1.0, diversity * 2)  # 0.5为理想值
        else:
            diversity_score = 0
        score += diversity_score * 0.2
        
        # 格式规范评分（标点、空格等）
        has_punctuation = bool(re.search(r'[。！？，、；：]', content))
        has_structure = bool(re.search(r'[\n\r]', content))
        format_score = (0.5 if has_punctuation else 0) + (0.5 if has_structure else 0)
        score += format_score * 0.2
        
        return min(1.0, score)
        
    async def _score_relevance(self, content: str, prompt: str) -> float:
        """
        评估内容与提示的相关性
        
        Args:
            content: 生成内容
            prompt: 原始提示
            
        Returns:
            相关性评分 (0-1)
        """
        # 提取提示词中的关键词
        prompt_words = set(jieba.lcut(prompt))
        content_words = set(jieba.lcut(content))
        
        # 移除停用词
        stopwords = {'的', '了', '是', '在', '我', '有', '和', '就', '不', '人', '都', '一', '这', '中', '大', '为', '上', '个', '地', '到', '出', '要', '以', '时', '会', '可', '也', '你', '对', '生', '能', '而', '子', '那', '得', '于', '着', '下', '自', '之', '年', '过', '发', '后', '作', '里', '用', '道', '行', '所', '然', '家', '种', '事', '成', '方', '多', '经', '么', '去', '法', '学', '如', '都', '同', '现', '当', '没', '动', '面', '起', '看', '定', '天', '分', '还', '进', '好', '小', '部', '其', '些', '主', '样', '理', '心', '她', '本', '前', '开', '但', '因', '只', '从', '想', '实'}
        
        prompt_words = prompt_words - stopwords
        content_words = content_words - stopwords
        
        if not prompt_words:
            return 0.5  # 如果提示词没有关键词，返回中等分数
            
        # 计算关键词覆盖率
        covered = prompt_words & content_words
        coverage = len(covered) / len(prompt_words) if prompt_words else 0
        
        # 计算TF-IDF相似度（简化版）
        prompt_freq = {}
        content_freq = {}
        
        for word in prompt_words:
            prompt_freq[word] = prompt.count(word)
        for word in content_words:
            content_freq[word] = content.count(word)
            
        # 计算余弦相似度
        common_words = set(prompt_freq.keys()) | set(content_freq.keys())
        if not common_words:
            return coverage
            
        prompt_vec = [prompt_freq.get(w, 0) for w in common_words]
        content_vec = [content_freq.get(w, 0) for w in common_words]
        
        dot_product = sum(p * c for p, c in zip(prompt_vec, content_vec))
        prompt_norm = sum(p * p for p in prompt_vec) ** 0.5
        content_norm = sum(c * c for c in content_vec) ** 0.5
        
        if prompt_norm * content_norm > 0:
            cosine_sim = dot_product / (prompt_norm * content_norm)
        else:
            cosine_sim = 0
            
        # 综合评分
        return min(1.0, coverage * 0.6 + cosine_sim * 0.4)
        
    async def _score_creativity(self, content: str, reference: Optional[str] = None) -> float:
        """
        评估内容创新性
        
        Args:
            content: 生成内容
            reference: 参考内容
            
        Returns:
            创新性评分 (0-1)
        """
        score = 0.0
        
        # 词汇多样性
        words = jieba.lcut(content)
        unique_words = set(words)
        if len(words) > 0:
            lexical_diversity = len(unique_words) / len(words)
            score += min(1.0, lexical_diversity * 1.5) * 0.4
        
        # 句式多样性（通过句子长度方差评估）
        sentences = re.split(r'[。！？]', content)
        sentences = [s for s in sentences if s.strip()]
        
        if len(sentences) > 1:
            sentence_lengths = [len(s) for s in sentences]
            variance = np.var(sentence_lengths)
            # 方差在10-100之间较好
            if 10 <= variance <= 100:
                sentence_score = 1.0
            elif variance < 10:
                sentence_score = variance / 10
            else:
                sentence_score = max(0.5, 1.0 - (variance - 100) / 200)
            score += sentence_score * 0.3
        else:
            score += 0.15  # 只有一个句子，给一半分
            
        # 如果有参考内容，计算差异度
        if reference:
            ref_words = set(jieba.lcut(reference))
            content_words = set(jieba.lcut(content))
            
            # 计算独特词汇比例
            unique_to_content = content_words - ref_words
            if content_words:
                uniqueness = len(unique_to_content) / len(content_words)
                score += uniqueness * 0.3
            else:
                score += 0.15
        else:
            # 没有参考，基于内容本身的特征
            # 检查是否有特殊元素（列表、代码块、引用等）
            has_list = bool(re.search(r'^\s*[-*•]\s', content, re.MULTILINE))
            has_code = bool(re.search(r'```|`[^`]+`', content))
            has_quote = bool(re.search(r'^>', content, re.MULTILINE))
            
            special_elements = sum([has_list, has_code, has_quote])
            score += min(1.0, special_elements * 0.1) * 0.3
            
        return min(1.0, score)
        
    async def _score_fluency(self, content: str) -> float:
        """
        评估内容流畅度
        
        Args:
            content: 内容文本
            
        Returns:
            流畅度评分 (0-1)
        """
        score = 0.0
        
        # 句子连贯性（通过连接词评估）
        connectors = ['因此', '所以', '然而', '但是', '而且', '并且', '同时', '另外', '此外', '首先', '其次', '最后', '总之', '综上所述', '例如', '比如', '即', '也就是说']
        connector_count = sum(1 for conn in connectors if conn in content)
        sentences = re.split(r'[。！？]', content)
        sentence_count = len([s for s in sentences if s.strip()])
        
        if sentence_count > 1:
            connector_ratio = connector_count / (sentence_count - 1)
            # 理想比例是0.3-0.6
            if 0.3 <= connector_ratio <= 0.6:
                coherence_score = 1.0
            elif connector_ratio < 0.3:
                coherence_score = connector_ratio / 0.3
            else:
                coherence_score = max(0.5, 1.0 - (connector_ratio - 0.6) / 0.4)
        else:
            coherence_score = 0.5
        score += coherence_score * 0.4
        
        # 语法正确性（简单检查）
        # 检查是否有明显的语法错误模式
        grammar_errors = 0
        grammar_patterns = [
            r'的的',  # 重复的"的"
            r'了了',  # 重复的"了"
            r'在在',  # 重复的"在"
            r'是是',  # 重复的"是"
        ]
        
        for pattern in grammar_patterns:
            grammar_errors += len(re.findall(pattern, content))
            
        # 每100字允许1个错误
        expected_errors = len(content) / 100
        if grammar_errors <= expected_errors:
            grammar_score = 1.0
        else:
            grammar_score = max(0.3, 1.0 - (grammar_errors - expected_errors) / 10)
        score += grammar_score * 0.3
        
        # 可读性评分（基于句子和段落长度）
        avg_sentence_length = len(content) / sentence_count if sentence_count > 0 else 0
        
        # 理想句子长度15-25字
        if 15 <= avg_sentence_length <= 25:
            readability_score = 1.0
        elif avg_sentence_length < 15:
            readability_score = avg_sentence_length / 15
        else:
            readability_score = max(0.5, 1.0 - (avg_sentence_length - 25) / 25)
        score += readability_score * 0.3
        
        return min(1.0, score)
        
    def update_weights(self, weights: Dict[str, float]):
        """
        更新评分权重
        
        Args:
            weights: 新的权重配置
        """
        self.weights = weights
        # 确保权重和为1
        total = sum(self.weights.values())
        if total > 0:
            self.weights = {k: v/total for k, v in self.weights.items()}