#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
情感分析器

提供中文文本的情感分析功能，包括正面、负面、中性判断。
"""

import re
import logging
from typing import Dict, List, Tuple, Optional
import jieba
import jieba.posseg as pseg

try:
    from snownlp import SnowNLP
    SNOWNLP_AVAILABLE = True
except ImportError:
    SNOWNLP_AVAILABLE = False
    logging.warning("SnowNLP未安装，将使用基础关键词匹配方法")

class SentimentAnalyzer:
    """情感分析器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        
        # 初始化负面词汇库
        self.negative_words = {
            # 质量问题
            "难吃", "恶心", "垃圾", "差劲", "糟糕", "烂", "臭", "坏",
            "不好吃", "太难吃", "超难吃", "巨难吃",
            
            # 服务问题
            "态度差", "服务差", "冷漠", "傲慢", "无礼", "粗鲁",
            "不耐烦", "爱理不理", "黑脸",
            
            # 环境问题
            "脏", "乱", "差", "嘈杂", "吵", "臭味", "异味",
            "不卫生", "太脏", "环境差",
            
            # 价格问题
            "坑人", "宰客", "黑店", "天价", "太贵", "不值",
            "性价比低", "不划算", "坑钱",
            
            # 严重负面
            "食物中毒", "拉肚子", "吃坏肚子", "有虫子", "有头发",
            "过期", "变质", "发霉", "有异物",
            
            # 情绪词汇
            "愤怒", "气愤", "生气", "郁闷", "失望", "后悔",
            "不推荐", "别去", "千万别", "劝退"
        }
        
        # 正面词汇库
        self.positive_words = {
            # 味道好
            "好吃", "美味", "香", "鲜", "甜", "爽", "嫩", "酥",
            "太好吃", "超好吃", "巨好吃", "很香", "很鲜",
            
            # 服务好
            "服务好", "态度好", "热情", "周到", "贴心", "专业",
            "有耐心", "很客气", "笑容满面",
            
            # 环境好
            "干净", "整洁", "舒适", "优雅", "温馨", "安静",
            "环境好", "装修好", "氛围好",
            
            # 价格好
            "便宜", "实惠", "划算", "性价比高", "物美价廉",
            "良心价", "不贵", "合理",
            
            # 推荐
            "推荐", "值得", "不错", "很好", "棒", "赞", "满意",
            "会再来", "还会来", "必须点赞"
        }
        
        # 程度副词权重
        self.degree_words = {
            "极": 2.0, "超": 1.8, "特别": 1.6, "非常": 1.5, "很": 1.3,
            "比较": 1.2, "还": 1.1, "稍微": 0.8, "有点": 0.9,
            "太": 1.8, "巨": 1.9, "贼": 1.7, "老": 1.4, "挺": 1.2
        }
        
        # 否定词
        self.negation_words = {"不", "没", "无", "非", "未", "别", "莫", "勿"}
        
        self.logger.info("情感分析器初始化完成")
    
    def analyze_sentiment(self, text: str) -> Dict:
        """
        分析文本情感
        
        Args:
            text (str): 待分析文本
            
        Returns:
            dict: 包含情感分析结果的字典
        """
        if not text or not isinstance(text, str):
            return {
                "sentiment": "neutral",
                "score": 0.0,
                "confidence": 0.0,
                "method": "empty_text",
                "keywords": [],
                "threat_level": 0
            }
        
        # 清理文本
        cleaned_text = self._clean_text(text)
        
        # 使用多种方法分析
        results = []
        
        # 方法1: SnowNLP分析（如果可用）
        if SNOWNLP_AVAILABLE:
            snow_result = self._analyze_with_snownlp(cleaned_text)
            results.append(snow_result)
        
        # 方法2: 关键词匹配分析
        keyword_result = self._analyze_with_keywords(cleaned_text)
        results.append(keyword_result)
        
        # 方法3: 语法分析
        grammar_result = self._analyze_with_grammar(cleaned_text)
        results.append(grammar_result)
        
        # 综合分析结果
        final_result = self._combine_results(results, text)
        
        self.logger.debug(f"文本情感分析完成: {text[:50]}... -> {final_result['sentiment']}")
        
        return final_result
    
    def batch_analyze(self, texts: List[str]) -> List[Dict]:
        """
        批量分析文本情感
        
        Args:
            texts (List[str]): 文本列表
            
        Returns:
            List[Dict]: 分析结果列表
        """
        results = []
        for i, text in enumerate(texts):
            try:
                result = self.analyze_sentiment(text)
                result['index'] = i
                results.append(result)
            except Exception as e:
                self.logger.error(f"分析第{i}条文本时出错: {e}")
                results.append({
                    "index": i,
                    "sentiment": "neutral",
                    "score": 0.0,
                    "confidence": 0.0,
                    "method": "error",
                    "keywords": [],
                    "threat_level": 0,
                    "error": str(e)
                })
        
        return results
    
    def get_negative_comments(self, comments: List[Dict], threshold: float = 0.3) -> List[Dict]:
        """
        筛选负面评论
        
        Args:
            comments (List[Dict]): 评论列表，每个评论需包含'text'字段
            threshold (float): 负面情感阈值，低于此值认为是负面
            
        Returns:
            List[Dict]: 负面评论列表，包含原评论信息和情感分析结果
        """
        negative_comments = []
        
        for comment in comments:
            if not isinstance(comment, dict) or 'text' not in comment:
                continue
            
            sentiment_result = self.analyze_sentiment(comment['text'])
            
            # 判断是否为负面评论
            if (sentiment_result['sentiment'] == 'negative' or 
                sentiment_result['score'] < threshold):
                
                # 合并原评论信息和情感分析结果
                negative_comment = comment.copy()
                negative_comment['sentiment_analysis'] = sentiment_result
                negative_comments.append(negative_comment)
        
        # 按威胁等级排序
        negative_comments.sort(
            key=lambda x: x['sentiment_analysis'].get('threat_level', 0), 
            reverse=True
        )
        
        return negative_comments
    
    def _clean_text(self, text: str) -> str:
        """清理文本"""
        # 移除表情符号和特殊字符
        text = re.sub(r'[^\u4e00-\u9fff\w\s]', ' ', text)
        # 移除多余空格
        text = re.sub(r'\s+', ' ', text.strip())
        return text
    
    def _analyze_with_snownlp(self, text: str) -> Dict:
        """使用SnowNLP分析"""
        try:
            s = SnowNLP(text)
            score = s.sentiments  # 0-1之间，越接近1越正面
            
            if score >= 0.6:
                sentiment = "positive"
            elif score <= 0.4:
                sentiment = "negative"
            else:
                sentiment = "neutral"
            
            return {
                "method": "snownlp",
                "sentiment": sentiment,
                "score": score,
                "confidence": abs(score - 0.5) * 2  # 转换为0-1的置信度
            }
        except Exception as e:
            self.logger.error(f"SnowNLP分析出错: {e}")
            return {
                "method": "snownlp_error",
                "sentiment": "neutral",
                "score": 0.5,
                "confidence": 0.0
            }
    
    def _analyze_with_keywords(self, text: str) -> Dict:
        """使用关键词匹配分析"""
        # 分词
        words = jieba.lcut(text)
        
        positive_score = 0.0
        negative_score = 0.0
        matched_keywords = []
        
        # 计算情感得分
        for i, word in enumerate(words):
            # 检查程度副词
            degree = 1.0
            if i > 0 and words[i-1] in self.degree_words:
                degree = self.degree_words[words[i-1]]
            
            # 检查否定词
            negation = False
            if i > 0 and words[i-1] in self.negation_words:
                negation = True
            elif i > 1 and words[i-2] in self.negation_words:
                negation = True
            
            # 计算情感得分
            if word in self.positive_words:
                score = degree
                if negation:
                    negative_score += score
                    matched_keywords.append(f"否定:{word}")
                else:
                    positive_score += score
                    matched_keywords.append(f"正面:{word}")
            
            elif word in self.negative_words:
                score = degree
                if negation:
                    positive_score += score * 0.5  # 否定负面词变成弱正面
                    matched_keywords.append(f"否定负面:{word}")
                else:
                    negative_score += score
                    matched_keywords.append(f"负面:{word}")
        
        # 计算最终得分 (-1到1之间)
        total_score = positive_score + negative_score
        if total_score > 0:
            final_score = (positive_score - negative_score) / total_score
        else:
            final_score = 0.0
        
        # 转换为0-1区间
        normalized_score = (final_score + 1) / 2
        
        # 判断情感类别
        if normalized_score >= 0.6:
            sentiment = "positive"
        elif normalized_score <= 0.4:
            sentiment = "negative"
        else:
            sentiment = "neutral"
        
        return {
            "method": "keywords",
            "sentiment": sentiment,
            "score": normalized_score,
            "confidence": min(1.0, total_score / 5),  # 基于匹配词数量的置信度
            "keywords": matched_keywords,
            "positive_score": positive_score,
            "negative_score": negative_score
        }
    
    def _analyze_with_grammar(self, text: str) -> Dict:
        """使用语法分析"""
        # 词性标注
        words = pseg.cut(text)
        
        emotion_score = 0.0
        emotion_words = []
        
        for word, flag in words:
            # 情感形容词 (a)
            if flag.startswith('a'):
                if word in self.negative_words:
                    emotion_score -= 1
                    emotion_words.append(f"负面形容词:{word}")
                elif word in self.positive_words:
                    emotion_score += 1
                    emotion_words.append(f"正面形容词:{word}")
            
            # 情感动词 (v)
            elif flag.startswith('v'):
                if word in self.negative_words:
                    emotion_score -= 0.5
                    emotion_words.append(f"负面动词:{word}")
                elif word in self.positive_words:
                    emotion_score += 0.5
                    emotion_words.append(f"正面动词:{word}")
        
        # 标准化得分
        if emotion_score > 0:
            normalized_score = min(1.0, 0.5 + emotion_score * 0.1)
        elif emotion_score < 0:
            normalized_score = max(0.0, 0.5 + emotion_score * 0.1)
        else:
            normalized_score = 0.5
        
        # 判断情感
        if normalized_score >= 0.6:
            sentiment = "positive"
        elif normalized_score <= 0.4:
            sentiment = "negative"
        else:
            sentiment = "neutral"
        
        return {
            "method": "grammar",
            "sentiment": sentiment,
            "score": normalized_score,
            "confidence": min(1.0, abs(emotion_score) * 0.2),
            "emotion_words": emotion_words
        }
    
    def _combine_results(self, results: List[Dict], original_text: str) -> Dict:
        """综合多种分析结果"""
        if not results:
            return {
                "sentiment": "neutral",
                "score": 0.5,
                "confidence": 0.0,
                "method": "no_method",
                "keywords": [],
                "threat_level": 0
            }
        
        # 加权平均
        weights = {"snownlp": 0.4, "keywords": 0.4, "grammar": 0.2}
        
        weighted_score = 0.0
        total_weight = 0.0
        all_keywords = []
        methods_used = []
        
        for result in results:
            method = result.get("method", "unknown")
            if method in weights:
                weight = weights[method]
                weighted_score += result.get("score", 0.5) * weight
                total_weight += weight
                methods_used.append(method)
                
                if "keywords" in result:
                    all_keywords.extend(result["keywords"])
        
        if total_weight > 0:
            final_score = weighted_score / total_weight
        else:
            final_score = 0.5
        
        # 确定最终情感
        if final_score >= 0.6:
            sentiment = "positive"
        elif final_score <= 0.4:
            sentiment = "negative" 
        else:
            sentiment = "neutral"
        
        # 计算威胁等级 (0-5级)
        threat_level = self._calculate_threat_level(original_text, final_score, all_keywords)
        
        # 计算置信度
        confidence = self._calculate_confidence(results, final_score)
        
        return {
            "sentiment": sentiment,
            "score": final_score,
            "confidence": confidence,
            "method": "+".join(methods_used),
            "keywords": list(set(all_keywords)),  # 去重
            "threat_level": threat_level,
            "details": results
        }
    
    def _calculate_threat_level(self, text: str, score: float, keywords: List[str]) -> int:
        """计算威胁等级 (0-5级)"""
        if score >= 0.6:  # 正面评论
            return 0
        
        threat_level = 1  # 基础负面等级
        
        # 严重负面关键词
        serious_negative = {
            "食物中毒", "拉肚子", "有虫子", "有头发", "发霉", "变质",
            "黑店", "坑人", "宰客", "恶心", "垃圾"
        }
        
        # 检查严重负面词汇
        for keyword in keywords:
            keyword_text = keyword.split(":")[-1] if ":" in keyword else keyword
            if keyword_text in serious_negative:
                threat_level += 2
        
        # 根据情感得分调整
        if score <= 0.2:
            threat_level += 2
        elif score <= 0.3:
            threat_level += 1
        
        # 文本长度影响（长文本通常影响更大）
        if len(text) > 100:
            threat_level += 1
        
        return min(5, threat_level)  # 最高5级
    
    def _calculate_confidence(self, results: List[Dict], final_score: float) -> float:
        """计算置信度"""
        if not results:
            return 0.0
        
        # 方法一致性
        sentiments = [r.get("sentiment", "neutral") for r in results]
        consistency = sentiments.count(max(set(sentiments), key=sentiments.count)) / len(sentiments)
        
        # 得分一致性
        scores = [r.get("score", 0.5) for r in results]
        score_variance = sum((s - final_score) ** 2 for s in scores) / len(scores)
        score_consistency = max(0, 1 - score_variance)
        
        # 综合置信度
        confidence = (consistency * 0.6 + score_consistency * 0.4)
        
        return min(1.0, confidence)
    
    def get_statistics(self, comments: List[Dict]) -> Dict:
        """获取评论统计信息"""
        if not comments:
            return {
                "total": 0,
                "positive": 0,
                "negative": 0,
                "neutral": 0,
                "avg_score": 0.0,
                "threat_distribution": {},
                "top_keywords": []
            }
        
        results = []
        for comment in comments:
            if isinstance(comment, dict) and 'text' in comment:
                result = self.analyze_sentiment(comment['text'])
                results.append(result)
        
        if not results:
            return {
                "total": 0,
                "positive": 0,
                "negative": 0,
                "neutral": 0,
                "avg_score": 0.0,
                "threat_distribution": {},
                "top_keywords": []
            }
        
        # 统计各类情感数量
        sentiment_counts = {"positive": 0, "negative": 0, "neutral": 0}
        total_score = 0.0
        all_keywords = []
        threat_levels = {}
        
        for result in results:
            sentiment = result.get("sentiment", "neutral")
            sentiment_counts[sentiment] += 1
            total_score += result.get("score", 0.5)
            all_keywords.extend(result.get("keywords", []))
            
            threat = result.get("threat_level", 0)
            threat_levels[threat] = threat_levels.get(threat, 0) + 1
        
        # 统计热门关键词
        from collections import Counter
        keyword_counter = Counter(all_keywords)
        top_keywords = keyword_counter.most_common(10)
        
        return {
            "total": len(results),
            "positive": sentiment_counts["positive"],
            "negative": sentiment_counts["negative"],
            "neutral": sentiment_counts["neutral"],
            "positive_rate": sentiment_counts["positive"] / len(results),
            "negative_rate": sentiment_counts["negative"] / len(results),
            "avg_score": total_score / len(results),
            "threat_distribution": threat_levels,
            "top_keywords": top_keywords
        }