"""
基于情感分析结果和关键词构建路由器领域专用情感词典 - 优化版
去重、交叉验证、智能筛选
"""

import json
import os
import jieba
import pandas as pd
from collections import Counter, defaultdict
import numpy as np
import re

class OptimizedSentimentDictionaryBuilder:
    """情感词典构建器 - 解决重复和中性词问题"""
    
    def __init__(self):
        # 定义中性技术词汇（不应被归为情感词）
        self.neutral_technical_terms = {
            "路由器", "路由", "wifi", "网络", "信号", "速度", "配置", "安装", "设置", 
            "产品", "设备", "功能", "性能", "质量", "外观", "设计", "价格", "上网",
            "品牌", "型号", "版本", "系统", "软件", "硬件", "天线", "端口", "联网",
            "频段", "带宽", "传输", "连接", "覆盖", "穿墙", "散热", "材质", "手机",
            "重量", "体积", "颜色", "包装", "说明书", "保修", "售后", "客服", "电脑",
            "使用", "购买", "家里", "房间", "楼层", "距离", "位置", "地方", "时间",
            "视频", "游戏", "下载", "播放", "观看", "听歌", "看剧", "直播", "办公"
        }
        
        # 情感修饰词模式（用于识别真正的情感表达）
        self.emotion_patterns = {
            "positive": [
                "很", "非常", "超级", "特别", "相当", "挺", "蛮", "真的", "确实",
                "好", "棒", "赞", "强", "快", "稳", "广", "低", "高", "简单", "容易",
                "便宜", "实惠", "划算", "值得", "满意", "喜欢", "推荐"
            ],
            "negative": [
                "很", "非常", "超级", "特别", "相当", "太", "老", "总", "经常",
                "差", "烂", "慢", "弱", "不稳", "掉线", "断网", "卡", "贵", "坑",
                "难", "复杂", "麻烦", "不满", "失望", "后悔", "讨厌"
            ]
        }
    
    def load_keywords_data(self, keywords_json_path):
        """加载关键词分析结果"""
        print(f"🔍 加载关键词文件: {keywords_json_path}")
        
        if not os.path.exists(keywords_json_path):
            print(f"❌ 文件不存在: {keywords_json_path}")
            return {}
        
        try:
            with open(keywords_json_path, 'r', encoding='utf-8') as f:
                content = f.read().strip()
            
            if content.startswith('\ufeff'):
                content = content[1:]
            
            keywords_data = json.loads(content)
            print(f"✓ 成功加载 {len(keywords_data)} 个文件的关键词")
            return keywords_data
            
        except Exception as e:
            print(f"❌ 加载失败: {e}")
            return {}
    
    def analyze_keyword_sentiment_context(self, sentiment_results_dir, keywords_data):
        """分析关键词情感上下文"""
        print(f"📊 分析关键词情感分布...")
        
        if not os.path.exists(sentiment_results_dir):
            print(f"❌ 目录不存在: {sentiment_results_dir}")
            return {}
        
        sentiment_files = [f for f in os.listdir(sentiment_results_dir) if f.endswith('_sentiment.json')]
        print(f"📁 找到 {len(sentiment_files)} 个情感分析文件")
        
        keyword_sentiment_stats = defaultdict(lambda: {
            "positive": 0, "negative": 0, "neutral": 0,
            "positive_scores": [], "negative_scores": [], "neutral_scores": [],
            "total_confidence": 0, "count": 0, "contexts": []
        })
        
        for filename in sentiment_files:
            file_path = os.path.join(sentiment_results_dir, filename)
            
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                
                csv_filename = filename.replace('_sentiment.json', '.csv')
                file_keywords = keywords_data.get(csv_filename, [])
                
                if not file_keywords:
                    continue
                
                for comment in data.get("comments", []):
                    content = comment.get("content", "")
                    sentiment_label = comment.get("sentiment_label", "中性")
                    sentiment_score = comment.get("sentiment_score", 0.5)
                    confidence = comment.get("confidence", 0)
                    
                    sentiment_mapping = {"正面": "positive", "负面": "negative", "中性": "neutral"}
                    sentiment = sentiment_mapping.get(sentiment_label, "neutral")
                    
                    if confidence >= 0.2:
                        for keyword in file_keywords:
                            if keyword in content:
                                stats = keyword_sentiment_stats[keyword]
                                stats[sentiment] += 1
                                stats[f"{sentiment}_scores"].append(sentiment_score)
                                stats["total_confidence"] += confidence
                                stats["count"] += 1
                                
                                # 保存上下文用于质量检验
                                if len(stats["contexts"]) < 5:
                                    stats["contexts"].append({
                                        "content": content[:100],
                                        "sentiment": sentiment,
                                        "score": sentiment_score
                                    })
                
            except Exception as e:
                print(f"⚠️ 处理 {filename} 失败: {e}")
                continue
        
        print(f"✓ 分析完成，找到 {len(keyword_sentiment_stats)} 个有效关键词")
        return keyword_sentiment_stats
    
    def calculate_advanced_keyword_scores(self, keyword_sentiment_stats):
        """计算高级关键词情感得分"""
        print(f"🧮 计算关键词情感得分...")
        
        keyword_scores = {}
        
        for keyword, stats in keyword_sentiment_stats.items():
            total = stats["positive"] + stats["negative"] + stats["neutral"]
            
            if total >= 3:
                positive_ratio = stats["positive"] / total
                negative_ratio = stats["negative"] / total
                avg_confidence = stats["total_confidence"] / stats["count"] if stats["count"] > 0 else 0
                
                all_scores = (stats["positive_scores"] + stats["negative_scores"] + stats["neutral_scores"])
                
                if all_scores:
                    avg_sentiment_score = np.mean(all_scores)
                    score_std = np.std(all_scores) if len(all_scores) > 1 else 0
                    
                    # 计算情感倾向
                    ratio_score = positive_ratio - negative_ratio
                    score_bias = (avg_sentiment_score - 0.5) * 2
                    combined_score = (ratio_score * 0.6 + score_bias * 0.4)
                    
                    # 调整权重
                    confidence_weight = min(1.0, avg_confidence * 3)
                    frequency_weight = min(1.0, total / 10)
                    consistency = max(0, 1 - score_std) if score_std < 1 else 0
                    
                    final_score = combined_score * confidence_weight * frequency_weight
                    
                    keyword_scores[keyword] = {
                        "score": final_score,
                        "frequency": total,
                        "positive_count": stats["positive"],
                        "negative_count": stats["negative"],
                        "neutral_count": stats["neutral"],
                        "avg_confidence": avg_confidence,
                        "avg_sentiment_score": avg_sentiment_score,
                        "score_std": score_std,
                        "consistency": consistency,
                        "contexts": stats["contexts"]
                    }
        
        print(f"✓ 计算完成 {len(keyword_scores)} 个关键词")
        return keyword_scores
    
    def _is_genuine_sentiment_word(self, keyword):
        """判断是否为真正的情感词汇"""
        # 过滤中性技术词汇
        if keyword in self.neutral_technical_terms:
            return False
        
        # 过滤纯数字、单字符
        if keyword.isdigit() or len(keyword) == 1:
            return False
        
        # 过滤无意义词汇
        meaningless = {"的", "了", "是", "在", "有", "和", "就", "都", "要", "也", "会"}
        if keyword in meaningless:
            return False
        
        # 检查是否包含情感修饰成分
        for emotion_type, patterns in self.emotion_patterns.items():
            for pattern in patterns:
                if pattern in keyword and len(keyword) > len(pattern):
                    return True
        
        # 检查是否为组合情感表达
        emotion_indicators = ["好", "差", "快", "慢", "强", "弱", "高", "低", "简单", "复杂", "容易", "困难"]
        for indicator in emotion_indicators:
            if indicator in keyword:
                return True
        
        return False
    
    def _cross_validate_sentiment_word(self, keyword, stats):
        """交叉验证情感词汇的有效性"""
        # 1. 情感强度检查
        if abs(stats["score"]) < 0.4:
            return False
        
        # 2. 一致性检查
        if stats["consistency"] < 0.3:
            return False
        
        # 3. 频次检查
        if stats["frequency"] < 5:
            return False
        
        # 4. 置信度检查
        if stats["avg_confidence"] < 0.3:
            return False
        
        # 5. 情感倾向明确性检查
        total = stats["positive_count"] + stats["negative_count"] + stats["neutral_count"]
        dominant_sentiment = max(stats["positive_count"], stats["negative_count"], stats["neutral_count"])
        if dominant_sentiment / total < 0.6:  # 主导情感至少占60%
            return False
        
        # 6. 上下文检查
        if not self._validate_contexts(keyword, stats["contexts"]):
            return False
        
        return True
    
    def _validate_contexts(self, keyword, contexts):
        """验证关键词在上下文中的情感表达"""
        if not contexts:
            return False
        
        emotion_count = 0
        for context in contexts:
            content = context["content"]
            # 检查上下文中是否有明确的情感表达
            emotion_words = ["好", "差", "棒", "烂", "喜欢", "讨厌", "满意", "失望", "推荐", "不推荐"]
            if any(word in content for word in emotion_words):
                emotion_count += 1
        
        # 至少一半的上下文包含明确情感表达
        return emotion_count >= len(contexts) * 0.5
    
    def build_optimized_sentiment_dict(self, keyword_scores):
        """构建优化版情感词典"""
        print(f"\n📚 构建优化版情感词典...")
        
        sentiment_dict = {
            "正面词汇": {},
            "负面词汇": {},
            "中性词汇": {},
            "程度副词": {
                "很": 1.5, "非常": 1.8, "超级": 2.0, "极其": 2.0, "十分": 1.6,
                "特别": 1.7, "相当": 1.4, "比较": 1.2, "还算": 1.1, "挺": 1.3,
                "蛮": 1.2, "真的": 1.4, "确实": 1.5, "实在": 1.6, "太": 1.9
            },
            "否定词": ["不", "没", "无", "非", "未", "别", "勿", "莫", "并非", "绝非"],
            "转折词": ["但是", "不过", "然而", "虽然", "尽管", "只是", "就是"],
            "关键词统计": {},
            "质量报告": {},
            "构建信息": {
                "版本": "3.0 优化版",
                "特性": ["去重", "交叉验证", "中性词过滤", "上下文验证"],
                "阈值": {
                    "最小频次": 5,
                    "最小置信度": 0.3,
                    "最小情感强度": 0.4,
                    "最小一致性": 0.3
                }
            }
        }
        
        # 精选基础情感词（避免大量重复的0.8分）
        curated_positive = {
            "完美": 0.95, "优秀": 0.9, "超级棒": 0.9, "非常好": 0.85,
            "很满意": 0.85, "强烈推荐": 0.9, "值得购买": 0.8, "物超所值": 0.85,
            "惊喜": 0.8, "给力": 0.8, "超值": 0.8, "实用": 0.7
        }
        
        curated_negative = {
            "垃圾": -0.95, "糟糕": -0.9, "很差": -0.85, "非常失望": -0.85,
            "很不满意": -0.85, "不推荐": -0.8, "不值得": -0.75, "太坑了": -0.9,
            "后悔": -0.8, "讨厌": -0.8, "无语": -0.7, "崩溃": -0.8
        }
        
        # 路由器专业情感组合（避免单独的技术词）
        router_positive = {
            "信号很强": 0.9, "网速超快": 0.9, "非常稳定": 0.85, "覆盖很广": 0.8,
            "穿墙能力强": 0.85, "延迟很低": 0.8, "连接稳定": 0.8, "不会掉线": 0.8,
            "安装简单": 0.75, "配置方便": 0.75, "操作简单": 0.7, "界面友好": 0.7,
            "做工精良": 0.8, "散热良好": 0.75, "运行安静": 0.7, "质量可靠": 0.8,
            "性价比高": 0.85, "价格实惠": 0.7, "很划算": 0.8
        }
        
        router_negative = {
            "信号很弱": -0.9, "网速很慢": -0.85, "经常断线": -0.9, "覆盖太差": -0.8,
            "穿墙很差": -0.8, "延迟很高": -0.8, "连接不稳": -0.85, "老掉线": -0.9,
            "安装困难": -0.75, "配置复杂": -0.75, "操作麻烦": -0.7, "界面难用": -0.7,
            "做工粗糙": -0.8, "发热严重": -0.8, "噪音很大": -0.75, "质量很差": -0.85,
            "性价比低": -0.8, "价格虚高": -0.75, "太贵了": -0.7
        }
        
        # 添加精选词汇
        sentiment_dict["正面词汇"].update(curated_positive)
        sentiment_dict["负面词汇"].update(curated_negative)
        sentiment_dict["正面词汇"].update(router_positive)
        sentiment_dict["负面词汇"].update(router_negative)
        
        # 处理统计关键词
        print("🔍 处理统计关键词...")
        data_positive = 0
        data_negative = 0
        neutral_filtered = 0
        quality_rejected = 0
        
        # 按情感强度排序
        sorted_keywords = sorted(keyword_scores.items(), 
                               key=lambda x: abs(x[1]["score"]), reverse=True)
        
        for keyword, stats in sorted_keywords:
            sentiment_dict["关键词统计"][keyword] = stats
            
            # 去重检查
            if (keyword in sentiment_dict["正面词汇"] or 
                keyword in sentiment_dict["负面词汇"]):
                continue
            
            # 中性词过滤
            if not self._is_genuine_sentiment_word(keyword):
                sentiment_dict["中性词汇"][keyword] = 0.0
                neutral_filtered += 1
                continue
            
            # 交叉验证
            if not self._cross_validate_sentiment_word(keyword, stats):
                quality_rejected += 1
                continue
            
            # 添加到情感词典
            score = max(-0.9, min(0.9, stats["score"]))
            if score > 0:
                sentiment_dict["正面词汇"][keyword] = score
                data_positive += 1
            else:
                sentiment_dict["负面词汇"][keyword] = score
                data_negative += 1
        
        # 生成质量报告
        quality_report = {
            "词典规模": {
                "正面词汇": len(sentiment_dict["正面词汇"]),
                "负面词汇": len(sentiment_dict["负面词汇"]),
                "中性词汇": len(sentiment_dict["中性词汇"])
            },
            "数据驱动词汇": {
                "正面": data_positive,
                "负面": data_negative
            },
            "过滤统计": {
                "中性词过滤": neutral_filtered,
                "质量拒绝": quality_rejected
            },
            "质量指标": self._calculate_quality_metrics(sentiment_dict, keyword_scores)
        }
        sentiment_dict["质量报告"] = quality_report
        
        print(f"✅ 优化版词典构建完成:")
        print(f"   📊 数据驱动正面词: {data_positive}")
        print(f"   📊 数据驱动负面词: {data_negative}")
        print(f"   ⚪ 中性词过滤: {neutral_filtered}")
        print(f"   ❌ 质量拒绝: {quality_rejected}")
        print(f"   📈 总正面词汇: {len(sentiment_dict['正面词汇'])}")
        print(f"   📉 总负面词汇: {len(sentiment_dict['负面词汇'])}")
        
        return sentiment_dict
    
    def _calculate_quality_metrics(self, sentiment_dict, keyword_scores):
        """计算词典质量指标"""
        data_driven_words = []
        for word in sentiment_dict["正面词汇"]:
            if word in keyword_scores:
                data_driven_words.append(keyword_scores[word])
        for word in sentiment_dict["负面词汇"]:
            if word in keyword_scores:
                data_driven_words.append(keyword_scores[word])
        
        if not data_driven_words:
            return {}
        
        return {
            "平均频次": round(np.mean([w["frequency"] for w in data_driven_words]), 2),
            "平均置信度": round(np.mean([w["avg_confidence"] for w in data_driven_words]), 3),
            "平均一致性": round(np.mean([w["consistency"] for w in data_driven_words]), 3),
            "高质量词汇数": len([w for w in data_driven_words 
                              if w["frequency"] >= 10 and w["avg_confidence"] >= 0.5])
        }
    
    def save_optimized_results(self, sentiment_dict, output_path):
        """保存优化结果"""
        try:
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(sentiment_dict, f, ensure_ascii=False, indent=2)
            
            print(f"✓ 优化版情感词典已保存: {output_path}")
            
            # 输出详细统计
            report = sentiment_dict["质量报告"]
            print(f"\n📊 词典质量报告:")
            print(f"   正面词汇: {report['词典规模']['正面词汇']} 个")
            print(f"   负面词汇: {report['词典规模']['负面词汇']} 个")
            print(f"   中性词汇: {report['词典规模']['中性词汇']} 个")
            print(f"   数据驱动词汇: {report['数据驱动词汇']['正面']+report['数据驱动词汇']['负面']} 个")
            
            if "质量指标" in report and report["质量指标"]:
                metrics = report["质量指标"]
                print(f"   平均频次: {metrics.get('平均频次', 0)}")
                print(f"   平均置信度: {metrics.get('平均置信度', 0)}")
                print(f"   高质量词汇: {metrics.get('高质量词汇数', 0)} 个")
            
        except Exception as e:
            print(f"❌ 保存失败: {e}")

def main():
    """主流程"""
    print("🚀 构建情感词典...")
    
    builder = OptimizedSentimentDictionaryBuilder()
    
    # 配置路径
    keywords_json_path = "keywords_summary.json"
    sentiment_results_dir = "results"
    output_path = "data/dictionaries/optimized_router_sentiment_dict.json"
    
    # 1. 加载关键词数据
    keywords_data = builder.load_keywords_data(keywords_json_path)
    if not keywords_data:
        return
    
    # 2. 分析关键词情感分布
    keyword_sentiment_stats = builder.analyze_keyword_sentiment_context(
        sentiment_results_dir, keywords_data)
    if not keyword_sentiment_stats:
        return
    
    # 3. 计算情感得分
    keyword_scores = builder.calculate_advanced_keyword_scores(keyword_sentiment_stats)
    
    # 4. 构建优化词典
    sentiment_dict = builder.build_optimized_sentiment_dict(keyword_scores)
    
    # 5. 保存结果
    builder.save_optimized_results(sentiment_dict, output_path)
    
    # 6. 显示高质量词汇示例
    print(f"\n💎 高质量情感词汇示例:")
    
    # 显示数据驱动的高质量词汇
    high_quality_words = []
    for word, score in sentiment_dict["正面词汇"].items():
        if word in keyword_scores:
            stats = keyword_scores[word]
            if stats["frequency"] >= 8 and stats["avg_confidence"] >= 0.4:
                high_quality_words.append((word, score, stats, "正面"))
    
    for word, score in sentiment_dict["负面词汇"].items():
        if word in keyword_scores:
            stats = keyword_scores[word]
            if stats["frequency"] >= 8 and stats["avg_confidence"] >= 0.4:
                high_quality_words.append((word, score, stats, "负面"))
    
    # 按频次排序，显示前10个
    high_quality_words.sort(key=lambda x: x[2]["frequency"], reverse=True)
    
    for word, score, stats, emotion_type in high_quality_words[:10]:
        emoji = "😊" if emotion_type == "正面" else "😞"
        print(f"   {emoji} {word}: {score:.3f} "
              f"(频次:{stats['frequency']}, 置信度:{stats['avg_confidence']:.3f})")

if __name__ == "__main__":
    main()