import jieba
import re
import numpy as np
from typing import Dict, List, Any, Optional, Tuple
from textblob import TextBlob
import logging
from collections import Counter
import json

logger = logging.getLogger(__name__)

class NLPAnalyzer:
    """NLP文本分析器，用于分析学习内容和学生作业"""
    
    def __init__(self):
        # 初始化jieba分词
        jieba.initialize()
        
        # 学科关键词词典
        self.subject_keywords = {
            'math': ['数学', '代数', '几何', '微积分', '函数', '方程', '矩阵', '概率', '统计'],
            'physics': ['物理', '力学', '电磁学', '热学', '光学', '量子', '相对论', '能量', '动量'],
            'computer_science': ['编程', '算法', '数据结构', '计算机', '软件', '代码', '程序', '数据库'],
            'chemistry': ['化学', '分子', '原子', '反应', '元素', '化合物', '有机', '无机'],
            'biology': ['生物', '细胞', '基因', '蛋白质', '进化', '生态', '遗传', '分子生物学']
        }
        
        # 难度关键词
        self.difficulty_keywords = {
            'beginner': ['基础', '入门', '简单', '初级', '基本', '概念', '介绍'],
            'intermediate': ['中级', '中等', '进阶', '应用', '实践', '综合'],
            'advanced': ['高级', '复杂', '深入', '研究', '理论', '前沿', '专家']
        }
        
        # 情感词典
        self.sentiment_words = {
            'positive': ['优秀', '很好', '不错', '满意', '喜欢', '有趣', '清晰', '易懂'],
            'negative': ['困难', '复杂', '困惑', '不满意', '不喜欢', '模糊', '难懂'],
            'neutral': ['一般', '普通', '正常', '标准', '常规']
        }
    
    def analyze_text(self, text: str, analysis_type: str = "comprehensive") -> Dict[str, Any]:
        """
        综合文本分析
        
        Args:
            text: 待分析文本
            analysis_type: 分析类型 (comprehensive, sentiment, difficulty, topic)
            
        Returns:
            分析结果
        """
        if not text or len(text.strip()) == 0:
            return {"error": "文本为空"}
        
        try:
            result = {
                "text_length": len(text),
                "word_count": len(text.split()),
                "char_count": len(text.replace(" ", "")),
                "analysis_type": analysis_type
            }
            
            if analysis_type == "comprehensive":
                result.update(self._comprehensive_analysis(text))
            elif analysis_type == "sentiment":
                result.update(self._sentiment_analysis(text))
            elif analysis_type == "difficulty":
                result.update(self._difficulty_analysis(text))
            elif analysis_type == "topic":
                result.update(self._topic_analysis(text))
            else:
                result.update(self._comprehensive_analysis(text))
            
            return result
            
        except Exception as e:
            logger.error(f"文本分析失败: {str(e)}")
            return {"error": str(e)}
    
    def _comprehensive_analysis(self, text: str) -> Dict[str, Any]:
        """综合分析"""
        result = {}
        
        # 基础统计
        result.update(self._basic_statistics(text))
        
        # 主题分析
        result.update(self._topic_analysis(text))
        
        # 难度分析
        result.update(self._difficulty_analysis(text))
        
        # 情感分析
        result.update(self._sentiment_analysis(text))
        
        # 质量评估
        result.update(self._quality_assessment(text))
        
        return result
    
    def _basic_statistics(self, text: str) -> Dict[str, Any]:
        """基础统计分析"""
        # 分词
        words = list(jieba.cut(text))
        
        # 词频统计
        word_freq = Counter(words)
        most_common_words = word_freq.most_common(10)
        
        # 句子分割
        sentences = re.split(r'[。！？.!?]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 段落分割
        paragraphs = text.split('\n\n')
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        
        return {
            "word_count": len(words),
            "sentence_count": len(sentences),
            "paragraph_count": len(paragraphs),
            "avg_sentence_length": len(words) / len(sentences) if sentences else 0,
            "most_common_words": most_common_words,
            "unique_words": len(set(words)),
            "lexical_diversity": len(set(words)) / len(words) if words else 0
        }
    
    def _topic_analysis(self, text: str) -> Dict[str, Any]:
        """主题分析"""
        words = list(jieba.cut(text.lower()))
        
        # 识别学科主题
        subject_scores = {}
        for subject, keywords in self.subject_keywords.items():
            score = sum(1 for word in words if word in keywords)
            subject_scores[subject] = score
        
        # 确定主要学科
        primary_subject = max(subject_scores.items(), key=lambda x: x[1])[0] if subject_scores else "unknown"
        
        # 提取关键概念
        key_concepts = self._extract_key_concepts(text)
        
        return {
            "primary_subject": primary_subject,
            "subject_scores": subject_scores,
            "key_concepts": key_concepts,
            "topic_keywords": self._extract_topic_keywords(text)
        }
    
    def _difficulty_analysis(self, text: str) -> Dict[str, Any]:
        """难度分析"""
        words = list(jieba.cut(text.lower()))
        
        # 计算难度分数
        difficulty_scores = {}
        for level, keywords in self.difficulty_keywords.items():
            score = sum(1 for word in words if word in keywords)
            difficulty_scores[level] = score
        
        # 确定难度等级
        if difficulty_scores['advanced'] > difficulty_scores['intermediate'] and difficulty_scores['advanced'] > difficulty_scores['beginner']:
            difficulty_level = "advanced"
            difficulty_score = 8.5
        elif difficulty_scores['intermediate'] > difficulty_scores['beginner']:
            difficulty_level = "intermediate"
            difficulty_score = 6.0
        else:
            difficulty_level = "beginner"
            difficulty_score = 3.5
        
        # 复杂度分析
        complexity_score = self._calculate_complexity(text)
        
        return {
            "difficulty_level": difficulty_level,
            "difficulty_score": difficulty_score,
            "difficulty_scores": difficulty_scores,
            "complexity_score": complexity_score,
            "readability": self._calculate_readability(text)
        }
    
    def _sentiment_analysis(self, text: str) -> Dict[str, Any]:
        """情感分析"""
        words = list(jieba.cut(text.lower()))
        
        # 中文情感分析
        sentiment_scores = {}
        for sentiment, keywords in self.sentiment_words.items():
            score = sum(1 for word in words if word in keywords)
            sentiment_scores[sentiment] = score
        
        # 确定主要情感
        if sentiment_scores['positive'] > sentiment_scores['negative']:
            primary_sentiment = "positive"
            sentiment_score = 0.7
        elif sentiment_scores['negative'] > sentiment_scores['positive']:
            primary_sentiment = "negative"
            sentiment_score = 0.3
        else:
            primary_sentiment = "neutral"
            sentiment_score = 0.5
        
        # 使用TextBlob进行英文情感分析（如果有英文内容）
        english_text = re.sub(r'[^\x00-\x7F]+', '', text)
        if english_text.strip():
            blob = TextBlob(english_text)
            english_sentiment = blob.sentiment.polarity
        else:
            english_sentiment = 0.0
        
        return {
            "primary_sentiment": primary_sentiment,
            "sentiment_score": sentiment_score,
            "sentiment_scores": sentiment_scores,
            "english_sentiment": english_sentiment,
            "overall_sentiment": (sentiment_score + english_sentiment) / 2
        }
    
    def _quality_assessment(self, text: str) -> Dict[str, Any]:
        """质量评估"""
        # 结构完整性
        structure_score = self._assess_structure(text)
        
        # 逻辑连贯性
        coherence_score = self._assess_coherence(text)
        
        # 语言表达
        expression_score = self._assess_expression(text)
        
        # 总体质量分数
        overall_quality = (structure_score + coherence_score + expression_score) / 3
        
        return {
            "structure_score": structure_score,
            "coherence_score": coherence_score,
            "expression_score": expression_score,
            "overall_quality": overall_quality,
            "quality_level": self._get_quality_level(overall_quality)
        }
    
    def _extract_key_concepts(self, text: str) -> List[str]:
        """提取关键概念"""
        # 使用正则表达式提取可能的专业术语
        concepts = re.findall(r'[A-Za-z]+(?:\s+[A-Za-z]+)*', text)
        
        # 提取中文专业术语（简单的启发式方法）
        chinese_concepts = re.findall(r'[\u4e00-\u9fff]{2,}', text)
        
        # 合并并去重
        all_concepts = concepts + chinese_concepts
        unique_concepts = list(set(all_concepts))
        
        # 按长度排序，取前10个
        unique_concepts.sort(key=len, reverse=True)
        return unique_concepts[:10]
    
    def _extract_topic_keywords(self, text: str) -> List[str]:
        """提取主题关键词"""
        words = list(jieba.cut(text))
        
        # 过滤停用词和短词
        stop_words = {'的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'}
        filtered_words = [word for word in words if len(word) > 1 and word not in stop_words]
        
        # 计算词频
        word_freq = Counter(filtered_words)
        
        # 返回高频词
        return [word for word, freq in word_freq.most_common(15)]
    
    def _calculate_complexity(self, text: str) -> float:
        """计算文本复杂度"""
        # 句子长度
        sentences = re.split(r'[。！？.!?]', text)
        avg_sentence_length = np.mean([len(s) for s in sentences if s.strip()])
        
        # 词汇多样性
        words = list(jieba.cut(text))
        lexical_diversity = len(set(words)) / len(words) if words else 0
        
        # 专业术语密度
        professional_terms = sum(1 for word in words if len(word) > 2)
        term_density = professional_terms / len(words) if words else 0
        
        # 综合复杂度分数
        complexity = (avg_sentence_length * 0.3 + lexical_diversity * 0.4 + term_density * 0.3) * 10
        
        return min(10.0, max(0.0, complexity))
    
    def _calculate_readability(self, text: str) -> float:
        """计算可读性"""
        # 简化的可读性计算
        sentences = re.split(r'[。！？.!?]', text)
        words = list(jieba.cut(text))
        
        if not sentences or not words:
            return 0.0
        
        avg_sentence_length = len(words) / len(sentences)
        
        # 可读性分数（句子越短，可读性越高）
        readability = max(0.0, 10.0 - avg_sentence_length * 0.5)
        
        return min(10.0, readability)
    
    def _assess_structure(self, text: str) -> float:
        """评估结构完整性"""
        paragraphs = text.split('\n\n')
        sentences = re.split(r'[。！？.!?]', text)
        
        # 段落数量合理性
        paragraph_score = min(1.0, len(paragraphs) / 5.0) if paragraphs else 0.0
        
        # 句子数量合理性
        sentence_score = min(1.0, len(sentences) / 10.0) if sentences else 0.0
        
        # 是否有标题或小标题
        has_headings = bool(re.search(r'^[一二三四五六七八九十\d]+[、.．]', text, re.MULTILINE))
        heading_score = 1.0 if has_headings else 0.5
        
        return (paragraph_score + sentence_score + heading_score) / 3 * 10
    
    def _assess_coherence(self, text: str) -> float:
        """评估逻辑连贯性"""
        # 连接词检测
        connectors = ['因此', '所以', '但是', '然而', '而且', '另外', '首先', '其次', '最后', '总之']
        connector_count = sum(1 for connector in connectors if connector in text)
        
        # 连贯性分数
        coherence_score = min(1.0, connector_count / 5.0)
        
        return coherence_score * 10
    
    def _assess_expression(self, text: str) -> float:
        """评估语言表达"""
        # 错别字检测（简单实现）
        common_errors = {
            '的': ['得', '地'],
            '地': ['的', '得'],
            '得': ['的', '地'],
            '在': ['再'],
            '再': ['在']
        }
        
        error_count = 0
        for correct, errors in common_errors.items():
            for error in errors:
                error_count += text.count(error)
        
        # 表达分数
        expression_score = max(0.0, 1.0 - error_count / len(text) * 10)
        
        return expression_score * 10
    
    def _get_quality_level(self, score: float) -> str:
        """获取质量等级"""
        if score >= 8.0:
            return "优秀"
        elif score >= 6.0:
            return "良好"
        elif score >= 4.0:
            return "一般"
        else:
            return "需要改进"
    
    def analyze_assignment(self, content: str, expected_topic: str = None) -> Dict[str, Any]:
        """
        分析作业内容
        
        Args:
            content: 作业内容
            expected_topic: 预期主题
            
        Returns:
            作业分析结果
        """
        analysis = self.analyze_text(content, "comprehensive")
        
        # 添加作业特定分析
        assignment_analysis = {
            "content_analysis": analysis,
            "relevance_score": self._calculate_relevance(content, expected_topic),
            "completeness_score": self._assess_completeness(content),
            "originality_score": self._assess_originality(content),
            "suggested_score": self._suggest_score(analysis),
            "improvement_suggestions": self._generate_improvement_suggestions(analysis)
        }
        
        return assignment_analysis
    
    def _calculate_relevance(self, content: str, expected_topic: str) -> float:
        """计算内容相关性"""
        if not expected_topic:
            return 0.7  # 默认相关性
        
        # 简单的关键词匹配
        topic_words = list(jieba.cut(expected_topic))
        content_words = list(jieba.cut(content))
        
        matches = sum(1 for word in topic_words if word in content_words)
        relevance = matches / len(topic_words) if topic_words else 0.0
        
        return min(1.0, relevance)
    
    def _assess_completeness(self, content: str) -> float:
        """评估完整性"""
        # 检查是否有开头、主体、结尾
        has_introduction = bool(re.search(r'引言|介绍|首先|开始', content))
        has_conclusion = bool(re.search(r'总结|结论|总之|最后', content))
        
        # 内容长度
        length_score = min(1.0, len(content) / 500.0)  # 假设500字为完整标准
        
        completeness = (has_introduction + has_conclusion + length_score) / 3
        return completeness * 10
    
    def _assess_originality(self, content: str) -> float:
        """评估原创性（简化实现）"""
        # 这里可以实现更复杂的原创性检测
        # 目前使用简单的启发式方法
        
        # 检查是否有个人观点表达
        personal_indicators = ['我认为', '我觉得', '我的观点', '在我看来', '我认为']
        has_personal_view = any(indicator in content for indicator in personal_indicators)
        
        # 检查是否有具体例子
        has_examples = bool(re.search(r'例如|比如|举例|实例', content))
        
        # 检查是否有分析过程
        has_analysis = bool(re.search(r'分析|解释|说明|因为|所以', content))
        
        originality_score = (has_personal_view + has_examples + has_analysis) / 3
        return originality_score * 10
    
    def _suggest_score(self, analysis: Dict[str, Any]) -> float:
        """建议分数"""
        quality_score = analysis.get('quality_assessment', {}).get('overall_quality', 5.0)
        difficulty_score = analysis.get('difficulty_analysis', {}).get('difficulty_score', 5.0)
        
        # 基于质量和难度调整分数
        base_score = quality_score * 10  # 转换为百分制
        
        # 根据难度调整
        if difficulty_score > 7.0:
            base_score *= 1.1  # 高难度内容适当加分
        elif difficulty_score < 4.0:
            base_score *= 0.9  # 低难度内容适当减分
        
        return min(100.0, max(0.0, base_score))
    
    def _generate_improvement_suggestions(self, analysis: Dict[str, Any]) -> List[str]:
        """生成改进建议"""
        suggestions = []
        
        quality_assessment = analysis.get('quality_assessment', {})
        
        # 结构建议
        if quality_assessment.get('structure_score', 0) < 6.0:
            suggestions.append("建议改善文章结构，增加段落划分和逻辑层次")
        
        # 连贯性建议
        if quality_assessment.get('coherence_score', 0) < 6.0:
            suggestions.append("建议增加连接词，提高文章的逻辑连贯性")
        
        # 表达建议
        if quality_assessment.get('expression_score', 0) < 6.0:
            suggestions.append("建议检查语言表达，避免错别字和语法错误")
        
        # 内容建议
        if analysis.get('basic_statistics', {}).get('word_count', 0) < 100:
            suggestions.append("建议增加内容篇幅，提供更详细的论述")
        
        if not suggestions:
            suggestions.append("内容质量良好，继续保持！")
        
        return suggestions