# -*- coding: utf-8 -*-
"""自然语言处理模块 - 情感分析与评论观点抽取"""
import requests
import json
import os
import time
from typing import List, Dict, Any
import random

API_KEY = "6g2fDuXYU1BhfX4tB9niYRHt"
SECRET_KEY = "NxCbYpNLD7Ko4y3SnKr9nvZJClICb70v"

class NLPProcessor:
    """NLP处理器，用于情感分析与评论观点抽取"""
    def __init__(self, api_key: str, secret_key: str):
        self.api_key = api_key
        self.secret_key = secret_key
        self.access_token = None
        self.last_request_time = 0
        self.min_request_interval = 0.5  # 最小请求间隔（秒），增加到2.5秒避免QPS限制
        
        # 评论类型映射表
        self.comment_types = {
            1: "酒店",
            2: "KTV",
            3: "丽人",
            4: "美食餐饮",
            5: "旅游",
            6: "健康",
            7: "教育",
            8: "商业",
            9: "房产",
            10: "汽车",
            11: "生活",
            12: "购物",
            13: "3C"
        }
    
    def get_access_token(self) -> str:
        """
        使用 AK，SK 生成鉴权签名（Access Token）
        :return: access_token，或是None(如果错误)
        """
        if self.access_token:
            return self.access_token
        
        url = "https://aip.baidubce.com/oauth/2.0/token"
        params = {"grant_type": "client_credentials", "client_id": self.api_key, "client_secret": self.secret_key}
        try:
            response = requests.post(url, params=params)
            response_json = response.json()
            self.access_token = str(response_json.get("access_token"))
            return self.access_token
        except Exception as e:
            print(f"获取access_token失败: {e}")
            return None
    
    def _rate_limit(self):
        """控制请求频率，避免超过QPS限制"""
        current_time = time.time()
        elapsed = current_time - self.last_request_time
        if elapsed < self.min_request_interval:
            wait_time = self.min_request_interval - elapsed + random.uniform(0, 0.5)  # 增加随机延迟避免严格周期性请求
            time.sleep(wait_time)
        self.last_request_time = time.time()
    
    def analyze_sentiment(self, text: str, max_retries: int = 3) -> Dict[str, Any]:
        """
        分析文本情感，增加重试机制
        :param text: 要分析的文本
        :param max_retries: 最大重试次数
        :return: 情感分析结果
        """
        if not text or len(text.strip()) == 0:
            return {
                "sentiment": 1,  # 中性
                "confidence": 0.0,
                "positive_prob": 0.33,
                "negative_prob": 0.33,
                "text": text
            }
        
        retries = 0
        while retries < max_retries:
            try:
                # 控制请求频率
                self._rate_limit()
                
                url = f"https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?charset=UTF-8&access_token={self.get_access_token()}"
                
                # 文本过长时截取（百度API有长度限制）
                if len(text) > 2000:
                    text = text[:2000]
                
                payload = json.dumps({
                    "text": text
                }, ensure_ascii=False)
                headers = {
                    'Content-Type': 'application/json',
                    'Accept': 'application/json'
                }
                
                response = requests.request("POST", url, headers=headers, data=payload.encode("utf-8"))
                result = response.json()
                
                if "error_code" in result:
                    if result["error_code"] == 18:  # QPS限制错误
                        print(f"情感分析QPS限制，正在重试({retries+1}/{max_retries})...")
                        retries += 1
                        time.sleep(min(10, 2 ** retries))  # 指数退避策略
                        continue
                    else:
                        print(f"情感分析API错误: {result}")
                        return {
                            "sentiment": 1,  # 中性
                            "confidence": 0.0,
                            "positive_prob": 0.33,
                            "negative_prob": 0.33,
                            "text": text
                        }
                
                if "items" in result and len(result["items"]) > 0:
                    item = result["items"][0]
                    item["text"] = text
                    return item
                else:
                    print(f"情感分析返回为空: {result}")
                    return {
                        "sentiment": 1,  # 中性
                        "confidence": 0.0,
                        "positive_prob": 0.33,
                        "negative_prob": 0.33,
                        "text": text
                    }
            except Exception as e:
                print(f"情感分析请求异常: {e}")
                retries += 1
                if retries < max_retries:
                    time.sleep(min(10, 2 ** retries))
                else:
                    return {
                        "sentiment": 1,  # 中性
                        "confidence": 0.0,
                        "positive_prob": 0.33,
                        "negative_prob": 0.33,
                        "text": text
                    }
        
        # 所有重试都失败
        print(f"情感分析达到最大重试次数({max_retries})，返回默认值")
        return {
            "sentiment": 1,  # 中性
            "confidence": 0.0,
            "positive_prob": 0.33,
            "negative_prob": 0.33,
            "text": text
        }
    
    # 添加评论观点抽取方法，增加重试机制
    def extract_comment_tags(self, text: str, type_id: int = 4, max_retries: int = 3) -> List[Dict[str, Any]]:
        """
        提取评论句子的关注点和评论观点
        :param text: 评论内容，最大10240字节
        :param type_id: 评论行业类型，默认为4（餐饮美食）
        :param max_retries: 最大重试次数
        :return: 评论观点抽取结果列表
        """
        if not text or len(text.strip()) == 0:
            return []
        
        retries = 0
        while retries < max_retries:
            try:
                # 控制请求频率
                self._rate_limit()
                
                url = f"https://aip.baidubce.com/rpc/2.0/nlp/v2/comment_tag?charset=UTF-8&access_token={self.get_access_token()}"
                
                # 文本过长时截取（百度API有长度限制）
                if len(text) > 10240:
                    text = text[:10240]
                
                payload = json.dumps({
                    "text": text,
                    "type": type_id
                }, ensure_ascii=False)
                headers = {
                    'Content-Type': 'application/json',
                    'Accept': 'application/json'
                }
                
                response = requests.request("POST", url, headers=headers, data=payload.encode("utf-8"))
                result = response.json()
                
                if "error_code" in result:
                    if result["error_code"] == 18:  # QPS限制错误
                        print(f"评论观点抽取QPS限制，正在重试({retries+1}/{max_retries})...")
                        retries += 1
                        time.sleep(min(10, 2 ** retries))  # 指数退避策略
                        continue
                    else:
                        print(f"评论观点抽取API错误: {result}")
                        return []
                
                if "items" in result and len(result["items"]) > 0:
                    # 为结果添加评论类型名称
                    for item in result["items"]:
                        item["type_id"] = type_id
                        item["type_name"] = self.comment_types.get(type_id, "未知类型")
                    return result["items"]
                else:
                    print(f"评论观点抽取返回为空: {result}")
                    return []
            except Exception as e:
                print(f"评论观点抽取请求异常: {e}")
                retries += 1
                if retries < max_retries:
                    time.sleep(min(10, 2 ** retries))
                else:
                    return []
        
        # 所有重试都失败
        print(f"评论观点抽取达到最大重试次数({max_retries})，返回空列表")
        return []
    
    # 添加关键词提取方法，增加重试机制
    def extract_keywords(self, text_list: List[str], num: int = 10, max_retries: int = 3) -> List[Dict[str, Any]]:
        """
        从文本列表中提取关键词
        :param text_list: 文本列表，每个文本最大65535字符
        :param num: 需要提取的关键词数量的最大值
        :param max_retries: 最大重试次数
        :return: 关键词提取结果列表
        """
        if not text_list or len(text_list) == 0:
            return []
        
        retries = 0
        while retries < max_retries:
            try:
                # 控制请求频率
                self._rate_limit()
                
                url = f"https://aip.baidubce.com/rpc/2.0/nlp/v1/txt_keywords_extraction?charset=UTF-8&access_token={self.get_access_token()}"
                
                # 确保文本长度不超过限制
                processed_texts = []
                for text in text_list:
                    if len(text) > 65535:
                        text = text[:65535]
                    processed_texts.append(text)
                
                payload = json.dumps({
                    "text": processed_texts,
                    "num": num
                }, ensure_ascii=False)
                headers = {
                    'Content-Type': 'application/json',
                    'Accept': 'application/json'
                }
                
                response = requests.request("POST", url, headers=headers, data=payload.encode("utf-8"))
                result = response.json()
                
                if "error_code" in result:
                    if result["error_code"] == 18:  # QPS限制错误
                        print(f"关键词提取QPS限制，正在重试({retries+1}/{max_retries})...")
                        retries += 1
                        time.sleep(min(10, 2 ** retries))
                        continue
                    else:
                        print(f"关键词提取API错误: {result}")
                        return []
                
                if "results" in result and len(result["results"]) > 0:
                    return result["results"]
                else:
                    print(f"关键词提取返回为空: {result}")
                    return []
            except Exception as e:
                print(f"关键词提取请求异常: {e}")
                retries += 1
                if retries < max_retries:
                    time.sleep(min(10, 2 ** retries))
                else:
                    return []
        
        # 所有重试都失败
        print(f"关键词提取达到最大重试次数({max_retries})，返回空列表")
        return []
    
    # 修改process_json_file方法，同时进行情感分析、观点抽取和关键词提取
    def process_json_file(self, json_file_path: str, output_file_path: str = None) -> List[Dict[str, Any]]:
        """
        处理JSON文件中的笔记数据或评论数据，进行情感分析、评论观点抽取和关键词提取
        :param json_file_path: JSON文件路径
        :param output_file_path: 结果输出文件路径
        :return: 分析结果列表
        """
        try:
            # 读取JSON文件
            with open(json_file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            # 处理每条数据
            results = []
            # 为了批量提取关键词，先收集所有文本
            all_texts = []
            text_to_index = []  # 记录每个文本对应的原始索引
            
            for i, item in enumerate(data):
                # 自动检测是笔记数据还是评论数据
                if 'content' in item:  # 评论数据
                    full_text = item.get('content', '')
                else:  # 笔记数据
                    title = item.get("title", "")
                    desc = item.get("desc", "")
                    full_text = f"{title}\n{desc}"
                
                if full_text.strip():
                    all_texts.append(full_text)
                    text_to_index.append(i)
            
            # 批量提取关键词（每批处理最多20条，避免请求过大）
            batch_size = 20
            all_keywords_results = {}
            
            for i in range(0, len(all_texts), batch_size):
                batch_texts = all_texts[i:i+batch_size]
                # 为每批文本提取关键词
                keywords = self.extract_keywords(batch_texts, num=5)
                # 保存结果
                for j, text_idx in enumerate(text_to_index[i:i+batch_size]):
                    # 确保不会越界
                    if j < len(keywords):
                        all_keywords_results[text_idx] = keywords[j] if isinstance(keywords[j], list) else [keywords[j]]
                    else:
                        all_keywords_results[text_idx] = []
            
            # 处理每条数据，添加情感分析和观点抽取结果
            for i, item in enumerate(data):
                if i % 10 == 0:
                    print(f"正在处理第 {i} 条数据...")
                
                # 自动检测是笔记数据还是评论数据
                if 'content' in item:  # 评论数据
                    full_text = item.get('content', '')
                else:  # 笔记数据
                    title = item.get("title", "")
                    desc = item.get("desc", "")
                    full_text = f"{title}\n{desc}"
                
                # 进行情感分析
                sentiment_result = self.analyze_sentiment(full_text)
                
                # 进行评论观点抽取（根据内容自动选择类型，这里默认为美食餐饮）
                comment_type = 4  # 默认美食餐饮
                comment_tags_result = self.extract_comment_tags(full_text, comment_type)
                
                # 获取关键词提取结果
                keywords_result = all_keywords_results.get(i, [])
                
                # 保存结果
                processed_item = {
                    **item,
                    "full_text": full_text,
                    "sentiment_analysis": sentiment_result,
                    "comment_tags_analysis": comment_tags_result,
                    "keywords_analysis": keywords_result  # 新增关键词分析结果
                }
                results.append(processed_item)
            
            # 保存结果
            if not output_file_path:
                # 默认输出路径
                base_name = os.path.basename(json_file_path)
                name_without_ext = os.path.splitext(base_name)[0]
                output_file_path = f"d:/redbook/MediaCrawler-1/data/xhs/json/{name_without_ext}_analyzed.json"
            
            with open(output_file_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)
                
            print(f"处理完成，结果已保存至: {output_file_path}")
            return results
        except Exception as e:
            print(f"处理JSON文件失败: {e}")
            return []
    
    def generate_visualization_data(self, analyzed_file_path: str) -> Dict[str, Any]:
        """
        从分析结果生成可视化所需的数据
        :param analyzed_file_path: 分析结果文件路径
        :return: 可视化数据
        """
        try:
            # 验证文件是否存在
            if not os.path.exists(analyzed_file_path):
                print(f"错误：分析结果文件不存在: {analyzed_file_path}")
                # 返回空数据，让前端处理显示"暂无数据"
                return {
                    "top_tag_keywords": [],
                    "wordcloud_data": [],
                    "geo_data": []
                }
            
            with open(analyzed_file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            # 初始化数据容器
            all_keywords = []
            keyword_scores = {}
            all_comment_tags = []
            prop_counts = {}
            ip_location_counts = {}
            
            # 处理每条笔记，提取关键词和标签
            for item in data:
                # 提取笔记文本（标题+描述）
                title = item.get("title", "")
                desc = item.get("desc", "")
                
                # 统计IP位置数据
                ip_location = item.get("ip_location", "")
                if ip_location and ip_location != "":
                    ip_location_counts[ip_location] = ip_location_counts.get(ip_location, 0) + 1
                
                # 处理评论观点抽取数据
                if "comment_tags_analysis" in item and item["comment_tags_analysis"]:
                    for tag in item["comment_tags_analysis"]:
                        all_comment_tags.append(tag)
                        
                        # 统计属性词频率
                        prop = tag.get("prop", "")
                        if prop:
                            prop_counts[prop] = prop_counts.get(prop, 0) + 1
                
                # 处理关键词提取数据
                if "keywords_analysis" in item and item["keywords_analysis"]:
                    for keyword_info in item["keywords_analysis"]:
                        # 处理百度API返回的格式
                        if isinstance(keyword_info, dict) and "word" in keyword_info and "score" in keyword_info:
                            word = keyword_info["word"]
                            score = keyword_info["score"]
                            all_keywords.append(word)
                            # 累加关键词的得分
                            if word not in keyword_scores:
                                keyword_scores[word] = 0
                            keyword_scores[word] += score
            
            # 生成标签关键词数据（Top10属性词）
            top_tag_keywords = sorted(prop_counts.items(), key=lambda x: x[1], reverse=True)[:10]
            
            # 为词云图准备数据
            wordcloud_data = []
            
            # 添加标签关键词
            for word, freq in top_tag_keywords:
                wordcloud_data.append({
                    "name": word,
                    "value": freq,
                    "category": "标签"
                })
            
            # 添加API提取的关键词（如果有）
            if keyword_scores:
                top_api_keywords = sorted(keyword_scores.items(), key=lambda x: x[1], reverse=True)[:10]
                for word, score in top_api_keywords:
                    wordcloud_data.append({
                        "name": word,
                        "value": int(score * 100),  # 转换为整数便于显示
                        "category": "提取关键词"
                    })
            
            # 为地理分布图准备数据 - 使用实际的IP位置数据
            geo_data = []
            if ip_location_counts:
                # 按频率排序
                sorted_locations = sorted(ip_location_counts.items(), key=lambda x: x[1], reverse=True)
                # 限制最多10个地点
                for loc, count in sorted_locations[:10]:
                    geo_data.append({
                        "name": loc,
                        "value": count
                    })
            # 返回可视化数据
            return {
                "top_tag_keywords": top_tag_keywords,
                "wordcloud_data": wordcloud_data,
                "geo_data": geo_data
            }
        except Exception as e:
            print(f"生成可视化数据失败: {e}")
            # 返回空数据，让前端处理显示"暂无数据"
            return {
                "top_tag_keywords": [],
                "wordcloud_data": [],
                "geo_data": []
            }
