"""
自然语言处理增强模块
增强系统的自然语言理解和生成能力
"""

from typing import Dict, Any, List, Optional, Tuple
import re
import logging
from collections import Counter
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np

from src.utils.logging import get_logger

logger = get_logger(__name__)

class NLPSupport:
    """NLP支持类"""
    
    def __init__(self):
        """初始化NLP支持"""
        self.vectorizer = TfidfVectorizer(max_features=1000, stop_words=None)
        self.stop_words = self._load_stop_words()
        logger.info("NLP支持模块初始化完成")
    
    def _load_stop_words(self) -> set:
        """加载停用词表"""
        # 中文停用词
        chinese_stopwords = {
            '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'
        }
        
        # 英文停用词
        english_stopwords = {
            'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should'
        }
        
        return chinese_stopwords.union(english_stopwords)
    
    def extract_keywords(self, text: str, top_k: int = 10) -> List[str]:
        """
        提取关键词
        
        Args:
            text: 输入文本
            top_k: 返回关键词数量
            
        Returns:
            List[str]: 关键词列表
        """
        try:
            # 中文分词
            words = jieba.lcut(text)
            
            # 过滤停用词和短词
            filtered_words = [word for word in words if len(word) > 1 and word not in self.stop_words]
            
            # 统计词频
            word_freq = Counter(filtered_words)
            
            # 返回高频词
            return [word for word, _ in word_freq.most_common(top_k)]
            
        except Exception as e:
            logger.error(f"关键词提取失败: {e}")
            # 简单的备选方案
            words = re.findall(r'\b\w+\b', text.lower())
            filtered_words = [word for word in words if len(word) > 2 and word not in self.stop_words]
            return list(set(filtered_words))[:top_k]
    
    def calculate_similarity(self, text1: str, text2: str) -> float:
        """
        计算两个文本的相似度
        
        Args:
            text1: 文本1
            text2: 文本2
            
        Returns:
            float: 相似度分数（0-1）
        """
        try:
            # 使用TF-IDF向量化
            tfidf_matrix = self.vectorizer.fit_transform([text1, text2])
            # 将稀疏矩阵转换为密集矩阵以避免索引问题
            import numpy as np
            tfidf_dense = np.asarray(tfidf_matrix.todense())
            similarity_matrix = cosine_similarity(tfidf_dense[0:1], tfidf_dense[1:2])
            similarity = similarity_matrix[0][0]
            return float(similarity)
            
        except Exception as e:
            logger.error(f"相似度计算失败: {e}")
            # 简单的备选方案：基于关键词重叠
            keywords1 = set(self.extract_keywords(text1, 20))
            keywords2 = set(self.extract_keywords(text2, 20))
            
            if not keywords1 and not keywords2:
                return 1.0 if text1 == text2 else 0.0
                
            intersection = keywords1.intersection(keywords2)
            union = keywords1.union(keywords2)
            
            return len(intersection) / len(union) if union else 0.0
    
    def summarize_text(self, text: str, max_sentences: int = 3) -> str:
        """
        文本摘要
        
        Args:
            text: 输入文本
            max_sentences: 最大句子数
            
        Returns:
            str: 摘要文本
        """
        try:
            # 分句
            sentences = re.split(r'[。！？.!?]', text)
            sentences = [s.strip() for s in sentences if s.strip()]
            
            if len(sentences) <= max_sentences:
                return text
            
            # 简单的摘要方法：选择包含最多关键词的句子
            keywords = self.extract_keywords(text, 20)
            keyword_set = set(keywords)
            
            # 为每个句子评分
            sentence_scores = []
            for sentence in sentences:
                sentence_words = set(jieba.lcut(sentence))
                score = len(sentence_words.intersection(keyword_set))
                sentence_scores.append((score, sentence))
            
            # 选择得分最高的句子
            sentence_scores.sort(reverse=True)
            top_sentences = sentence_scores[:max_sentences]
            
            # 按原文顺序排列
            top_sentences.sort(key=lambda x: sentences.index(x[1]))
            
            return '。'.join([sentence for _, sentence in top_sentences]) + '。'
            
        except Exception as e:
            logger.error(f"文本摘要失败: {e}")
            # 简单截断
            return text[:500] + ('...' if len(text) > 500 else '')
    
    def classify_question_type(self, question: str) -> str:
        """
        分类问题类型
        
        Args:
            question: 问题文本
            
        Returns:
            str: 问题类型
        """
        question = question.lower().strip()
        
        # 定义问题类型关键词
        question_types = {
            'definition': ['什么是', '是什么', '定义', '概念', 'what is', 'define', 'definition'],
            'comparison': ['比较', '对比', '区别', '不同', 'similar to', 'difference', 'compare'],
            'procedure': ['如何', '怎么', '步骤', '方法', 'how to', 'procedure', 'steps'],
            'reason': ['为什么', '原因', '因为', 'why', 'reason', 'cause'],
            'list': ['有哪些', '列表', '列举', 'list', 'what are'],
            'evaluation': ['评价', '评估', '好么', '如何样', '评价', 'review', 'evaluate']
        }
        
        # 匹配问题类型
        for qtype, keywords in question_types.items():
            if any(keyword in question for keyword in keywords):
                return qtype
        
        return 'general'  # 默认类型
    
    def extract_entities(self, text: str) -> Dict[str, List[str]]:
        """
        提取实体（简单实现）
        
        Args:
            text: 输入文本
            
        Returns:
            Dict[str, List[str]]: 实体字典
        """
        entities = {
            'person': [],
            'location': [],
            'organization': [],
            'date': [],
            'number': [],
            'email': [],
            'url': []
        }
        
        try:
            import re
            # 使用正则表达式提取简单实体
            # 日期
            date_patterns = [
                r'\d{4}年\d{1,2}月\d{1,2}日',
                r'\d{4}-\d{1,2}-\d{1,2}',
                r'\d{1,2}/\d{1,2}/\d{4}',
                r'\d{1,2}月\d{1,2}日'
            ]
            
            for pattern in date_patterns:
                dates = re.findall(pattern, text)
                entities['date'].extend(dates)
            
            # 数字
            numbers = re.findall(r'\d+(?:\.\d+)?', text)
            entities['number'].extend(numbers)
            
            # 邮箱
            emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', text)
            entities['email'].extend(emails)
            
            # URL
            urls = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
            entities['url'].extend(urls)
            
            # 简单的命名实体（基于词性和规则）
            import jieba
            words = jieba.lcut(text)
            for i, word in enumerate(words):
                if len(word) >= 2:
                    # 简单启发式规则
                    if word.endswith(('公司', '集团', '企业', '银行', '学校', '大学')):
                        entities['organization'].append(word)
                    elif word.endswith(('省', '市', '县', '区', '国', '州', '岛')):
                        entities['location'].append(word)
                    elif re.match(r'^[A-Z][a-z]+$', word) and i > 0:
                        # 可能的人名（简单判断）
                        prev_word = words[i-1]
                        if len(prev_word) == 1 and prev_word.isalpha():
                            entities['person'].append(prev_word + word)
            
            # 使用更复杂的规则识别人名
            person_patterns = [
                r'[A-Z][a-z]+\s+[A-Z][a-z]+',  # 英文人名
                r'[一二三四五六七八九零十百千万亿]+',  # 中文数字可能表示人名的一部分
            ]
            
            for pattern in person_patterns:
                persons = re.findall(pattern, text)
                entities['person'].extend(persons)
                
        except Exception as e:
            logger.error(f"实体提取失败: {e}")
        
        # 去重
        for key in entities:
            entities[key] = list(set(entities[key]))
        
        return entities
    
    def sentiment_analysis(self, text: str) -> Dict[str, Any]:
        """
        情感分析
        
        Args:
            text: 输入文本
            
        Returns:
            Dict[str, Any]: 情感分析结果
        """
        # 简单的情感词典
        positive_words = {'好', '棒', '优秀', '赞', '喜欢', '高兴', '满意', 'good', 'great', 'excellent', 'like', 'love', 'happy', '开心', '愉快', '兴奋'}
        negative_words = {'坏', '差', '糟糕', '讨厌', '生气', '不满', '失望', 'bad', 'terrible', 'hate', 'angry', 'disappointed', '难过', '沮丧', '愤怒'}
        
        import jieba
        words = jieba.lcut(text.lower())
        positive_count = sum(1 for word in words if word in positive_words)
        negative_count = sum(1 for word in words if word in negative_words)
        
        total_sentiment_words = positive_count + negative_count
        if total_sentiment_words == 0:
            sentiment_score = 0
            sentiment_label = 'neutral'
        else:
            sentiment_score = (positive_count - negative_count) / total_sentiment_words
            if sentiment_score > 0.1:
                sentiment_label = 'positive'
            elif sentiment_score < -0.1:
                sentiment_label = 'negative'
            else:
                sentiment_label = 'neutral'
        
        return {
            'sentiment': sentiment_label,
            'score': sentiment_score,
            'positive_words_count': positive_count,
            'negative_words_count': negative_count,
            'confidence': min(total_sentiment_words / len(words), 1.0)  # 置信度
        }


class EnhancedTextGenerator:
    """增强文本生成器"""
    
    def __init__(self):
        """初始化增强文本生成器"""
        self.nlp_support = NLPSupport()
        logger.info("增强文本生成器初始化完成")
    
    def generate_answer(self, question: str, context: str, style: str = "formal") -> str:
        """
        基于问题和上下文生成答案
        
        Args:
            question: 问题
            context: 上下文
            style: 文本风格（formal, casual, technical）
            
        Returns:
            str: 生成的答案
        """
        try:
            # 分析问题类型
            question_type = self.nlp_support.classify_question_type(question)
            
            # 提取关键词
            question_keywords = self.nlp_support.extract_keywords(question)
            context_keywords = self.nlp_support.extract_keywords(context)
            
            # 根据问题类型生成答案
            if question_type == 'definition':
                return self._generate_definition_answer(question, context, question_keywords)
            elif question_type == 'procedure':
                return self._generate_procedure_answer(question, context, question_keywords)
            elif question_type == 'comparison':
                return self._generate_comparison_answer(question, context, question_keywords)
            else:
                return self._generate_general_answer(question, context, question_keywords, style)
                
        except Exception as e:
            logger.error(f"答案生成失败: {e}")
            return f"基于您的问题'{question}'，我找到了相关信息，但无法生成详细答案。请稍后重试或联系支持。"
    
    def _generate_definition_answer(self, question: str, context: str, keywords: List[str]) -> str:
        """生成定义类答案"""
        # 提取定义相关的句子
        sentences = re.split(r'[。！？.!?]', context)
        definition_sentences = []
        
        definition_keywords = ['是', '指', '定义', '概念', 'meaning', 'is', 'means']
        
        for sentence in sentences:
            if any(keyword in sentence.lower() for keyword in definition_keywords):
                definition_sentences.append(sentence)
        
        if definition_sentences:
            # 选择最相关的定义句
            return definition_sentences[0].strip() + '。'
        else:
            # 从上下文中提取包含关键词的句子
            for sentence in sentences:
                if any(keyword in sentence for keyword in keywords[:3]):
                    return sentence.strip() + '。'
            
            return context[:200] + ('...' if len(context) > 200 else '')
    
    def _generate_procedure_answer(self, question: str, context: str, keywords: List[str]) -> str:
        """生成步骤类答案"""
        # 查找有序列表或步骤描述
        ordered_info = self._extract_ordered_information(context)
        if len(ordered_info) > 50:  # 如果提取到足够的有序信息
            return ordered_info
        
        # 查找步骤关键词
        import re
        sentences = re.split(r'[。！？.!?]', context)
        procedure_sentences = []
        
        step_keywords = ['首先', '第一步', '然后', '接着', '下一步', '最后', '首先', 'step', 'first', 'then', 'next', 'finally', 'lastly']
        
        for sentence in sentences:
            if any(keyword in sentence.lower() for keyword in step_keywords):
                procedure_sentences.append(sentence.strip())
        
        if procedure_sentences:
            return '。'.join(procedure_sentences) + '。'
        else:
            # 返回上下文摘要
            return self.nlp_support.summarize_text(context, 4)
    
    def _generate_comparison_answer(self, question: str, context: str, keywords: List[str]) -> str:
        """生成比较类答案"""
        # 查找比较相关的句子
        sentences = re.split(r'[。！？.!?]', context)
        comparison_sentences = []
        
        comparison_keywords = ['比较', '对比', '区别', '不同', '优于', '劣于', '相比', 'than', 'compare', 'versus', 'vs']
        
        for sentence in sentences:
            if any(keyword in sentence.lower() for keyword in comparison_keywords):
                comparison_sentences.append(sentence.strip())
        
        if comparison_sentences:
            return '。'.join(comparison_sentences) + '。'
        else:
            # 返回上下文摘要
            return self.nlp_support.summarize_text(context, 3)
    
    def _generate_general_answer(self, question: str, context: str, keywords: List[str], style: str) -> str:
        """生成一般性答案"""
        # 根据风格调整答案
        if style == "casual":
            prefix = "根据我了解的信息"
        elif style == "technical":
            prefix = "根据技术资料"
        else:
            prefix = "根据相关信息"
        
        # 生成答案
        summary = self.nlp_support.summarize_text(context, 2)
        return f"{prefix}，{summary}"
    
    def generate_contextual_answer(self, question: str, context: str, user_preferences: Optional[Dict[Any, Any]] = None) -> str:
        """
        基于上下文和用户偏好生成个性化答案
        
        Args:
            question: 问题
            context: 上下文
            user_preferences: 用户偏好
            
        Returns:
            str: 生成的答案
        """
        try:
            # 分析问题类型和情感
            question_type = self.nlp_support.classify_question_type(question)
            sentiment = self.nlp_support.sentiment_analysis(question)
            
            # 根据用户偏好调整回答风格
            style = "formal"
            if user_preferences:
                if user_preferences.get('communication_style_preference', {}).get('value') == 'casual':
                    style = "casual"
                elif user_preferences.get('communication_style_preference', {}).get('value') == 'technical':
                    style = "technical"
            
            # 生成基础答案
            basic_answer = self.generate_answer(question, context, style)
            
            # 根据情感调整答案
            if sentiment['sentiment'] == 'negative':
                # 对负面情感问题提供更温和的回答
                if not basic_answer.startswith("很抱歉"):
                    basic_answer = "很抱歉听到您的困扰。" + basic_answer
            
            # 根据问题类型优化答案
            if question_type == 'evaluation':
                # 对评价类问题提供更平衡的回答
                if sentiment['sentiment'] == 'positive':
                    basic_answer = "确实如此，" + basic_answer.lower()
                elif sentiment['sentiment'] == 'negative':
                    basic_answer = "我理解您的观点，不过" + basic_answer.lower()
            
            return basic_answer
            
        except Exception as e:
            logger.error(f"生成个性化答案失败: {e}")
            return self.generate_answer(question, context, "formal")
    
    def _extract_ordered_information(self, text: str) -> str:
        """提取有序信息"""
        # 查找数字列表或项目符号
        lines = text.split('\n')
        ordered_lines = []
        
        for line in lines:
            line = line.strip()
            if line and (re.match(r'^\d+[\.\)、]', line) or re.match(r'^[•·\-]', line)):
                ordered_lines.append(line)
        
        if ordered_lines:
            return '\n'.join(ordered_lines)
        else:
            return text[:300] + ('...' if len(text) > 300 else '')


# 全局实例
nlp_support = NLPSupport()
text_generator = EnhancedTextGenerator()


def get_nlp_support() -> NLPSupport:
    """获取NLP支持实例"""
    return nlp_support


def get_text_generator() -> EnhancedTextGenerator:
    """获取文本生成器实例"""
    return text_generator