"""
情感强度细化分析脚本 - 基于专用情感词典的高级情感分析
支持情感强度分级、程度副词识别、否定处理、专业术语分析
"""

import json
import os
import jieba
import re
import numpy as np
from collections import defaultdict, Counter
from datetime import datetime
import pandas as pd

class AdvancedSentimentAnalyzer:
    """基于专用词典的高级情感分析器"""
    
    def __init__(self, sentiment_dict_path):
        """初始化分析器"""
        self.sentiment_dict = self._load_sentiment_dict(sentiment_dict_path)
        self.emotion_levels = {
            "极强正面": (0.8, 1.0),
            "强正面": (0.6, 0.8),
            "中等正面": (0.4, 0.6),
            "弱正面": (0.2, 0.4),
            "中性": (-0.2, 0.2),
            "弱负面": (-0.4, -0.2),
            "中等负面": (-0.6, -0.4),
            "强负面": (-0.8, -0.6),
            "极强负面": (-1.0, -0.8)
        }
        
        # 路由器专业术语分类
        self.router_aspects = {
            "性能": ["信号", "网速", "速度", "延迟", "吞吐量", "带宽", "传输", "性能"],
            "稳定性": ["稳定", "掉线", "断线", "连接", "重启", "死机", "故障"],
            "覆盖": ["覆盖", "穿墙", "距离", "范围", "死角", "盲区"],
            "易用性": ["安装", "配置", "设置", "操作", "界面", "说明书", "使用"],
            "外观": ["外观", "设计", "颜色", "体积", "重量", "材质", "散热"],
            "价格": ["价格", "性价比", "便宜", "贵", "值得", "划算", "实惠"]
        }
    
    def _load_sentiment_dict(self, dict_path):
        """加载情感词典"""
        try:
            with open(dict_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            print(f"❌ 加载情感词典失败: {e}")
            return {}
    
    def _preprocess_text(self, text):
        """文本预处理"""
        if not text:
            return ""
        
        # 清理特殊字符，保留中文、英文、数字和基本标点
        text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9，。！？、；：""''（）【】]', '', text)
        return text.strip()
    
    def _detect_negation(self, words, pos):
        """检测否定词影响范围"""
        negation_words = self.sentiment_dict.get("否定词", [])
        negation_scope = []
        
        for i, word in enumerate(words):
            if word in negation_words:
                # 否定词影响后面3个词
                scope_end = min(len(words), i + 4)
                negation_scope.extend(range(i + 1, scope_end))
        
        return pos in negation_scope
    
    def _calculate_intensity_modifier(self, words, pos):
        """计算强度修饰倍数"""
        degree_words = self.sentiment_dict.get("程度副词", {})
        modifier = 1.0
        
        # 检查前2个词是否有程度副词
        for i in range(max(0, pos - 2), pos):
            if i < len(words) and words[i] in degree_words:
                modifier *= degree_words[words[i]]
        
        # 检查后1个词
        if pos + 1 < len(words) and words[pos + 1] in degree_words:
            modifier *= degree_words[words[pos + 1]]
        
        return min(3.0, modifier)  # 限制最大倍数
    
    def _extract_sentiment_phrases(self, text):
        """提取情感短语"""
        words = list(jieba.cut(text))
        positive_words = self.sentiment_dict.get("正面词汇", {})
        negative_words = self.sentiment_dict.get("负面词汇", {})
        
        found_phrases = []
        
        # 检查完整匹配的情感短语
        for phrase, score in positive_words.items():
            if phrase in text:
                found_phrases.append({
                    "phrase": phrase,
                    "type": "positive",
                    "base_score": score,
                    "length": len(phrase)
                })
        
        for phrase, score in negative_words.items():
            if phrase in text:
                found_phrases.append({
                    "phrase": phrase,
                    "type": "negative", 
                    "base_score": score,
                    "length": len(phrase)
                })
        
        # 按长度排序，优先长短语（更精确）
        found_phrases.sort(key=lambda x: x["length"], reverse=True)
        return found_phrases
    
    def _analyze_aspect_sentiment(self, text):
        """分析各方面的情感倾向"""
        aspect_sentiments = {}
        
        for aspect, keywords in self.router_aspects.items():
            aspect_score = 0
            aspect_count = 0
            
            for keyword in keywords:
                if keyword in text:
                    # 提取包含关键词的句子进行分析
                    sentences = re.split(r'[。！？，；]', text)
                    for sentence in sentences:
                        if keyword in sentence:
                            sentence_score = self._calculate_sentence_sentiment(sentence)
                            aspect_score += sentence_score
                            aspect_count += 1
            
            if aspect_count > 0:
                aspect_sentiments[aspect] = aspect_score / aspect_count
        
        return aspect_sentiments
    
    def _calculate_sentence_sentiment(self, sentence):
        """计算单句情感得分"""
        if not sentence:
            return 0.0
        
        words = list(jieba.cut(sentence))
        total_score = 0.0
        word_count = 0
        
        # 提取情感短语
        phrases = self._extract_sentiment_phrases(sentence)
        used_positions = set()
        
        for phrase_info in phrases:
            phrase = phrase_info["phrase"]
            if phrase in sentence:
                # 找到短语在分词中的位置
                phrase_words = list(jieba.cut(phrase))
                for i in range(len(words) - len(phrase_words) + 1):
                    if words[i:i+len(phrase_words)] == phrase_words:
                        # 检查是否被否定
                        is_negated = any(self._detect_negation(words, j) 
                                       for j in range(i, i+len(phrase_words)))
                        
                        # 计算强度修饰
                        modifier = self._calculate_intensity_modifier(words, i)
                        
                        # 计算最终得分
                        base_score = phrase_info["base_score"]
                        if is_negated:
                            base_score *= -0.8  # 否定减弱但不完全反转
                        
                        final_score = base_score * modifier
                        total_score += final_score
                        word_count += 1
                        
                        # 标记已使用的位置
                        for j in range(i, i+len(phrase_words)):
                            used_positions.add(j)
                        break
        
        return total_score / max(1, word_count)
    
    def analyze_text_advanced(self, text):
        """高级情感分析"""
        if not text:
            return self._create_empty_result()
        
        processed_text = self._preprocess_text(text)
        if not processed_text:
            return self._create_empty_result()
        
        # 计算整体情感得分
        overall_score = self._calculate_sentence_sentiment(processed_text)
        
        # 确定情感等级
        emotion_level = self._get_emotion_level(overall_score)
        
        # 提取情感短语
        sentiment_phrases = self._extract_sentiment_phrases(processed_text)
        
        # 分析各方面情感
        aspect_sentiments = self._analyze_aspect_sentiment(processed_text)
        
        # 计算置信度
        confidence = self._calculate_confidence(sentiment_phrases, len(processed_text))
        
        return {
            "original_text": text,
            "processed_text": processed_text,
            "overall_score": round(overall_score, 4),
            "emotion_level": emotion_level,
            "confidence": round(confidence, 4),
            "sentiment_phrases": sentiment_phrases,
            "aspect_sentiments": aspect_sentiments,
            "text_length": len(processed_text),
            "phrase_count": len(sentiment_phrases)
        }
    
    def _get_emotion_level(self, score):
        """根据得分确定情感等级"""
        for level, (min_score, max_score) in self.emotion_levels.items():
            if min_score <= score < max_score:
                return level
        return "中性"
    
    def _calculate_confidence(self, phrases, text_length):
        """计算分析置信度"""
        if not phrases or text_length == 0:
            return 0.0
        
        # 基于短语覆盖率和强度
        phrase_coverage = sum(len(p["phrase"]) for p in phrases) / text_length
        phrase_strength = np.mean([abs(p["base_score"]) for p in phrases])
        
        confidence = min(1.0, phrase_coverage * 2 + phrase_strength * 0.5)
        return confidence
    
    def _create_empty_result(self):
        """创建空结果"""
        return {
            "original_text": "",
            "processed_text": "",
            "overall_score": 0.0,
            "emotion_level": "中性",
            "confidence": 0.0,
            "sentiment_phrases": [],
            "aspect_sentiments": {},
            "text_length": 0,
            "phrase_count": 0
        }
    
    def batch_analyze(self, comments_data):
        """批量分析评论"""
        print(f"🔍 开始高级情感分析 {len(comments_data)} 条评论...")
        
        results = []
        for i, comment in enumerate(comments_data):
            if i % 100 == 0:
                print(f"  进度: {i}/{len(comments_data)}")
            
            content = comment.get("content", "")
            analysis_result = self.analyze_text_advanced(content)
            
            # 合并原始评论信息和分析结果
            enhanced_comment = comment.copy()
            enhanced_comment.update({
                "advanced_sentiment": analysis_result,
                "emotion_intensity": analysis_result["emotion_level"],
                "advanced_score": analysis_result["overall_score"],
                "analysis_confidence": analysis_result["confidence"]
            })
            
            results.append(enhanced_comment)
        
        return results

class SentimentReportGenerator:
    """情感分析报告生成器"""
    
    def __init__(self):
        self.report_sections = []
    
    def generate_comprehensive_report(self, analyzed_data, output_path):
        """生成综合分析报告"""
        print("📊 生成综合情感分析报告...")
        
        # 1. 基础统计
        basic_stats = self._calculate_basic_statistics(analyzed_data)
        
        # 2. 情感强度分布
        intensity_distribution = self._analyze_intensity_distribution(analyzed_data)
        
        # 3. 方面情感分析
        aspect_analysis = self._analyze_aspect_sentiments(analyzed_data)
        
        # 4. 高质量评论识别
        quality_comments = self._identify_quality_comments(analyzed_data)
        
        # 5. 情感短语统计
        phrase_statistics = self._analyze_phrase_usage(analyzed_data)
        
        # 6. 时间趋势分析（如果有时间数据）
        trend_analysis = self._analyze_temporal_trends(analyzed_data)
        
        # 生成完整报告
        report = {
            "报告信息": {
                "生成时间": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "分析版本": "高级情感强度分析 v2.0",
                "数据规模": len(analyzed_data),
                "分析维度": ["整体情感", "情感强度", "方面分析", "短语识别", "质量评估"]
            },
            "基础统计": basic_stats,
            "情感强度分布": intensity_distribution,
            "方面情感分析": aspect_analysis,
            "高质量评论": quality_comments,
            "情感短语统计": phrase_statistics,
            "趋势分析": trend_analysis,
            "数据详情": self._generate_detailed_data(analyzed_data)
        }
        
        # 保存报告
        self._save_report(report, output_path)
        self._print_report_summary(report)
        
        return report
    
    def _calculate_basic_statistics(self, data):
        """计算基础统计信息"""
        scores = [item["advanced_sentiment"]["overall_score"] for item in data]
        confidences = [item["advanced_sentiment"]["confidence"] for item in data]
        
        return {
            "总评论数": len(data),
            "有效分析数": len([s for s in scores if s != 0]),
            "平均情感得分": round(np.mean(scores), 4),
            "情感得分标准差": round(np.std(scores), 4),
            "平均置信度": round(np.mean(confidences), 4),
            "高置信度评论数": len([c for c in confidences if c >= 0.7]),
            "得分分布": {
                "最高分": round(max(scores), 4),
                "最低分": round(min(scores), 4),
                "中位数": round(np.median(scores), 4),
                "75分位数": round(np.percentile(scores, 75), 4),
                "25分位数": round(np.percentile(scores, 25), 4)
            }
        }
    
    def _analyze_intensity_distribution(self, data):
        """分析情感强度分布"""
        intensity_counts = Counter()
        intensity_scores = defaultdict(list)
        
        for item in data:
            level = item["advanced_sentiment"]["emotion_level"]
            score = item["advanced_sentiment"]["overall_score"]
            
            intensity_counts[level] += 1
            intensity_scores[level].append(score)
        
        total = len(data)
        distribution = {}
        
        for level, count in intensity_counts.items():
            avg_score = np.mean(intensity_scores[level]) if intensity_scores[level] else 0
            distribution[level] = {
                "数量": count,
                "比例": round(count / total * 100, 2),
                "平均得分": round(avg_score, 4),
                "得分范围": f"{min(intensity_scores[level]):.3f} ~ {max(intensity_scores[level]):.3f}" if intensity_scores[level] else "无"
            }
        
        return {
            "分布详情": distribution,
            "强度排序": sorted(intensity_counts.items(), key=lambda x: x[1], reverse=True),
            "正面情感比例": round(sum(count for level, count in intensity_counts.items() 
                                 if "正面" in level) / total * 100, 2),
            "负面情感比例": round(sum(count for level, count in intensity_counts.items() 
                                 if "负面" in level) / total * 100, 2)
        }
    
    def _analyze_aspect_sentiments(self, data):
        """分析方面情感"""
        aspect_scores = defaultdict(list)
        aspect_mentions = defaultdict(int)
        
        for item in data:
            aspects = item["advanced_sentiment"]["aspect_sentiments"]
            for aspect, score in aspects.items():
                aspect_scores[aspect].append(score)
                aspect_mentions[aspect] += 1
        
        aspect_analysis = {}
        for aspect in aspect_scores:
            scores = aspect_scores[aspect]
            aspect_analysis[aspect] = {
                "提及次数": aspect_mentions[aspect],
                "平均得分": round(np.mean(scores), 4),
                "得分标准差": round(np.std(scores), 4),
                "正面提及": len([s for s in scores if s > 0.2]),
                "负面提及": len([s for s in scores if s < -0.2]),
                "情感倾向": "正面" if np.mean(scores) > 0.2 else ("负面" if np.mean(scores) < -0.2 else "中性")
            }
        
        # 按提及次数排序
        sorted_aspects = sorted(aspect_analysis.items(), 
                              key=lambda x: x[1]["提及次数"], reverse=True)
        
        return {
            "方面详情": dict(sorted_aspects),
            "最受关注方面": sorted_aspects[0][0] if sorted_aspects else "无",
            "最正面方面": max(aspect_analysis.items(), 
                           key=lambda x: x[1]["平均得分"])[0] if aspect_analysis else "无",
            "最负面方面": min(aspect_analysis.items(), 
                           key=lambda x: x[1]["平均得分"])[0] if aspect_analysis else "无"
        }
    
    def _identify_quality_comments(self, data):
        """识别高质量评论"""
        # 筛选高质量评论的标准
        quality_comments = []
        
        for item in data:
            sentiment = item["advanced_sentiment"]
            
            # 高质量标准：高置信度 + 有情感短语 + 足够长度
            if (sentiment["confidence"] >= 0.6 and 
                sentiment["phrase_count"] >= 2 and 
                sentiment["text_length"] >= 10 and
                abs(sentiment["overall_score"]) >= 0.4):
                
                quality_comments.append({
                    "内容": sentiment["original_text"][:100] + "..." if len(sentiment["original_text"]) > 100 else sentiment["original_text"],
                    "情感等级": sentiment["emotion_level"],
                    "得分": sentiment["overall_score"],
                    "置信度": sentiment["confidence"],
                    "短语数": sentiment["phrase_count"],
                    "识别短语": [p["phrase"] for p in sentiment["sentiment_phrases"][:3]]
                })
        
        # 按置信度排序
        quality_comments.sort(key=lambda x: x["置信度"], reverse=True)
        
        return {
            "高质量评论数": len(quality_comments),
            "质量比例": round(len(quality_comments) / len(data) * 100, 2),
            "代表性评论": quality_comments[:10],  # 取前10条
            "质量分布": {
                "极高质量": len([c for c in quality_comments if c["置信度"] >= 0.9]),
                "高质量": len([c for c in quality_comments if 0.7 <= c["置信度"] < 0.9]),
                "中等质量": len([c for c in quality_comments if 0.6 <= c["置信度"] < 0.7])
            }
        }
    
    def _analyze_phrase_usage(self, data):
        """分析情感短语使用统计"""
        phrase_counter = Counter()
        phrase_scores = defaultdict(list)
        
        for item in data:
            phrases = item["advanced_sentiment"]["sentiment_phrases"]
            for phrase_info in phrases:
                phrase = phrase_info["phrase"]
                phrase_counter[phrase] += 1
                phrase_scores[phrase].append(phrase_info["base_score"])
        
        # 统计最常用短语
        top_phrases = phrase_counter.most_common(20)
        
        phrase_analysis = {}
        for phrase, count in top_phrases:
            scores = phrase_scores[phrase]
            phrase_analysis[phrase] = {
                "使用次数": count,
                "平均强度": round(np.mean([abs(s) for s in scores]), 4),
                "情感类型": "正面" if np.mean(scores) > 0 else "负面"
            }
        
        return {
            "总短语类型数": len(phrase_counter),
            "总短语使用次数": sum(phrase_counter.values()),
            "平均每评论短语数": round(sum(phrase_counter.values()) / len(data), 2),
            "热门短语": phrase_analysis,
            "正面短语数": len([p for p, s in phrase_scores.items() if np.mean(s) > 0]),
            "负面短语数": len([p for p, s in phrase_scores.items() if np.mean(s) < 0])
        }
    
    def _analyze_temporal_trends(self, data):
        """分析时间趋势（如果有时间数据）"""
        # 这里是示例，实际需要根据数据中的时间字段调整
        return {
            "说明": "时间趋势分析需要评论时间数据",
            "数据状态": "时间字段缺失" if not any("time" in str(item.keys()) for item in data) else "可分析",
            "建议": "添加评论时间字段以进行趋势分析"
        }
    
    def _generate_detailed_data(self, data):
        """生成详细数据摘要"""
        return {
            "数据完整性": {
                "有内容评论": len([item for item in data if item.get("content", "").strip()]),
                "空评论": len([item for item in data if not item.get("content", "").strip()]),
                "成功分析": len([item for item in data if item["advanced_sentiment"]["overall_score"] != 0])
            },
            "分析覆盖率": round(len([item for item in data if item["advanced_sentiment"]["phrase_count"] > 0]) / len(data) * 100, 2),
            "数据质量评分": self._calculate_data_quality_score(data)
        }
    
    def _calculate_data_quality_score(self, data):
        """计算数据质量评分"""
        valid_content = len([item for item in data if len(item.get("content", "").strip()) >= 5])
        high_confidence = len([item for item in data if item["advanced_sentiment"]["confidence"] >= 0.5])
        
        content_score = (valid_content / len(data)) * 40
        confidence_score = (high_confidence / len(data)) * 60
        
        return round(content_score + confidence_score, 2)
    
    def _save_report(self, report, output_path):
        """保存报告"""
        try:
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
            
            print(f"✅ 综合报告已保存: {output_path}")
            
        except Exception as e:
            print(f"❌ 保存报告失败: {e}")
    
    def _print_report_summary(self, report):
        """打印报告摘要"""
        print(f"\n{'='*60}")
        print(f"📊 情感分析综合报告摘要")
        print(f"{'='*60}")
        
        basic = report["基础统计"]
        intensity = report["情感强度分布"]
        aspect = report["方面情感分析"]
        quality = report["高质量评论"]
        
        print(f"📈 基础指标:")
        print(f"   总评论数: {basic['总评论数']}")
        print(f"   平均情感得分: {basic['平均情感得分']}")
        print(f"   平均置信度: {basic['平均置信度']}")
        print(f"   高置信度比例: {basic['高置信度评论数'] / basic['总评论数'] * 100:.1f}%")
        
        print(f"\n🎯 情感分布:")
        print(f"   正面情感: {intensity['正面情感比例']}%")
        print(f"   负面情感: {intensity['负面情感比例']}%")
        print(f"   中性情感: {100 - intensity['正面情感比例'] - intensity['负面情感比例']:.1f}%")
        
        print(f"\n🔍 方面分析:")
        if aspect["方面详情"]:
            top_aspects = list(aspect["方面详情"].items())[:3]
            for aspect_name, aspect_data in top_aspects:
                print(f"   {aspect_name}: {aspect_data['平均得分']:.3f} "
                      f"({aspect_data['提及次数']}次提及)")
        
        print(f"\n💎 质量评估:")
        print(f"   高质量评论: {quality['高质量评论数']} 条 ({quality['质量比例']}%)")
        print(f"   数据质量评分: {report['数据详情']['数据质量评分']}/100")

def main():
    """主处理流程"""
    print("🚀 启动高级情感强度分析...")
    
    # 配置路径
    sentiment_dict_path = "data/dictionaries/optimized_router_sentiment_dict.json"
    results_dir = "results"
    output_dir = "advanced_analysis"
    
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    
    # 初始化分析器
    analyzer = AdvancedSentimentAnalyzer(sentiment_dict_path)
    report_generator = SentimentReportGenerator()
    
    # 处理所有情感分析结果文件
    for filename in os.listdir(results_dir):
        if filename.endswith('_sentiment.json'):
            print(f"\n{'='*60}")
            print(f"📁 处理文件: {filename}")
            
            input_path = os.path.join(results_dir, filename)
            
            try:
                # 加载数据
                with open(input_path, 'r', encoding='utf-8') as f:
                    product_data = json.load(f)
                
                print(f"🔍 产品: {product_data.get('product_name', '未知')}")
                comments = product_data.get("comments", [])
                
                if not comments:
                    print("⚠️ 无评论数据，跳过")
                    continue
                
                # 执行高级分析
                enhanced_comments = analyzer.batch_analyze(comments)
                
                # 更新产品数据
                product_data["comments"] = enhanced_comments
                product_data["analysis_info"] = {
                    "analysis_type": "高级情感强度分析",
                    "analysis_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                    "enhancement_features": ["情感强度分级", "方面分析", "短语识别", "置信度评估"]
                }
                
                # 生成报告
                report_filename = filename.replace('_sentiment.json', '_advanced_report.json')
                report_path = os.path.join(output_dir, report_filename)
                
                comprehensive_report = report_generator.generate_comprehensive_report(
                    enhanced_comments, report_path)
                
                # 保存增强后的数据
                enhanced_filename = filename.replace('_sentiment.json', '_enhanced.json')
                enhanced_path = os.path.join(output_dir, enhanced_filename)
                
                with open(enhanced_path, 'w', encoding='utf-8') as f:
                    json.dump(product_data, f, ensure_ascii=False, indent=2)
                
                print(f"✅ 高级分析完成，文件已保存:")
                print(f"   📊 分析报告: {report_path}")
                print(f"   📈 增强数据: {enhanced_path}")
                
            except Exception as e:
                print(f"❌ 处理 {filename} 失败: {e}")
                continue
    
    print(f"\n🎉 所有文件处理完成！结果保存在 {output_dir} 目录")

if __name__ == "__main__":
    main()