"""
智谱AI敏感词检测工具类（增强版）
基于智谱AI GLM-4-Flash模型进行快速且准确的内容审核和敏感词检测
"""

import json
import asyncio
import hashlib
import time
import re
from typing import Dict, Any, Optional, Tuple, List
import logging
from zai import ZhipuAiClient

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ZhipuContentChecker:
    """
    智谱AI内容检测器（增强版）
    用于快速且准确地检测文本内容是否包含敏感词汇
    """
    
    def __init__(self, api_key: str = "5cc102e5a21644b6b06abbda8b833af7.nhJhiTMlkdBGFDpQ"):
        """
        初始化智谱AI内容检测器
        
        Args:
            api_key (str): 智谱AI的API密钥
        """
        self.api_key = api_key
        self.client = ZhipuAiClient(api_key=api_key)
        # 使用更快的Flash模型
        self.model = "glm-4-flash"
        
        # 本地缓存机制
        self.cache = {}
        self.cache_ttl = 300  # 缓存5分钟
        
        # 扩充的敏感词关键字库（分类管理）
        self.sensitive_keywords = {
            # 政治敏感
            "political": [
                "政治", "政府", "领导人", "党", "革命", "政权", "独立", "分裂", 
                "台独", "港独", "藏独", "疆独", "民主", "专制", "独裁", "政变"
            ],
            # 种族歧视和仇恨言论
            "discrimination": [
                "傻逼", "傻b", "sb", "煞笔", "沙比", "傻比", "白痴", "智障", "弱智",
                "死", "滚", "滚蛋", "去死", "该死", "找死", "作死", "等死",
                "日本人", "韩国人", "美国人", "中国人", "黑人", "白人", "黄种人",
                "歧视", "仇恨", "种族", "民族", "排外", "仇外", "反华", "反日",
                "垃圾", "废物", "人渣", "败类", "畜生", "禽兽", "狗", "猪","cnm",
                "狗日的"
            ],
            # 色情低俗
            "sexual": [
                "色情", "黄色", "淫秽", "性", "做爱", "性交", "强奸", "轮奸",
                "裸体", "裸照", "艳照", "三级", "成人", "情色", "激情", "床戏",
                "胸", "乳房", "阴道", "阴茎", "生殖器", "下体", "私处"
            ],
            # 暴力血腥
            "violence": [
                "暴力", "血腥", "杀", "杀死", "杀害", "谋杀", "屠杀", "血",
                "砍", "刺", "捅", "打", "揍", "殴打", "虐待", "折磨",
                "爆炸", "炸弹", "恐怖", "袭击", "攻击", "武器", "枪", "刀"
            ],
            # 违法违规
            "illegal": [
                "违法", "犯罪", "毒品", "吸毒", "贩毒", "海洛因", "冰毒", "大麻",
                "赌博", "赌", "博彩", "彩票", "六合彩", "赌场", "赌钱",
                "诈骗", "骗", "欺诈", "传销", "洗钱", "走私", "偷税", "逃税",
                "盗窃", "抢劫", "绑架", "勒索", "敲诈", "贿赂", "腐败"
            ],
            # 其他不当内容
            "inappropriate": [
                "自杀", "自残", "抑郁", "轻生", "跳楼", "上吊", "割腕",
                "邪教", "迷信", "算命", "风水", "占卜", "神棍", "大师",
                "假货", "山寨", "盗版", "侵权", "抄袭", "剽窃"
            ]
        }
        
        # 合并所有关键词用于快速检查
        self.all_keywords = []
        for category, keywords in self.sensitive_keywords.items():
            self.all_keywords.extend(keywords)
        
        # 敏感词组合模式（更复杂的检测）
        self.sensitive_patterns = [
            r".*人.*都.*是.*[傻煞沙][逼比笔b].*",  # 种族歧视模式
            r".*[滚去].*死.*",  # 恶意攻击模式
            r".*[废垃圾].*物.*",  # 侮辱性词汇模式
            r".*[打揍砍杀].*[死光].*",  # 暴力威胁模式
        ]
        
        logger.info("智谱AI内容检测器（增强版）初始化完成")
        
    def get_cache_key(self, content: str) -> str:
        """
        生成内容的缓存键
        
        Args:
            content (str): 待检测的内容
            
        Returns:
            str: 缓存键
        """
        return hashlib.md5(content.encode('utf-8')).hexdigest()
    
    def get_from_cache(self, content: str) -> Optional[Tuple[bool, str]]:
        """
        从缓存中获取检测结果
        
        Args:
            content (str): 待检测的内容
            
        Returns:
            Optional[Tuple[bool, str]]: 缓存的检测结果，如果不存在或过期则返回None
        """
        cache_key = self.get_cache_key(content)
        if cache_key in self.cache:
            cached_data = self.cache[cache_key]
            if time.time() - cached_data['timestamp'] < self.cache_ttl:
                logger.debug(f"从缓存获取检测结果: {content[:30]}...")
                return cached_data['result']
            else:
                # 缓存过期，删除
                del self.cache[cache_key]
        return None
    
    def save_to_cache(self, content: str, result: Tuple[bool, str]):
        """
        将检测结果保存到缓存
        
        Args:
            content (str): 待检测的内容
            result (Tuple[bool, str]): 检测结果
        """
        cache_key = self.get_cache_key(content)
        self.cache[cache_key] = {
            'result': result,
            'timestamp': time.time()
        }
        
        # 限制缓存大小，避免内存溢出
        if len(self.cache) > 1000:
            # 删除最旧的缓存项
            oldest_key = min(self.cache.keys(), key=lambda k: self.cache[k]['timestamp'])
            del self.cache[oldest_key]
    
    def advanced_keyword_check(self, content: str) -> Optional[Tuple[bool, str, List[str]]]:
        """
        增强的关键词检测（多层检测机制）
        
        Args:
            content (str): 待检测的内容
            
        Returns:
            Optional[Tuple[bool, str, List[str]]]: (是否安全, 检测原因, 发现的敏感词列表)，如果需要AI检测则返回None
        """
        content_lower = content.lower()
        content_clean = re.sub(r'[^\w\u4e00-\u9fff]', '', content)  # 移除标点符号
        
        detected_keywords = []
        detected_categories = []
        
        # 1. 精确关键词匹配
        for category, keywords in self.sensitive_keywords.items():
            for keyword in keywords:
                if keyword in content_lower or keyword in content_clean.lower():
                    detected_keywords.append(keyword)
                    if category not in detected_categories:
                        detected_categories.append(category)
        
        # 2. 正则表达式模式匹配
        pattern_matches = []
        for pattern in self.sensitive_patterns:
            if re.search(pattern, content_lower):
                pattern_matches.append(pattern)
                detected_keywords.append("模式匹配")
        
        # 3. 组合词检测（如"日本人"+"傻逼"）
        if any(race in content_lower for race in ["日本人", "韩国人", "美国人", "中国人"]):
            if any(insult in content_lower for insult in ["傻逼", "傻b", "sb", "白痴", "智障"]):
                detected_keywords.append("种族歧视组合")
                detected_categories.append("discrimination")
        
        # 4. 判断是否需要进一步AI检测
        if detected_keywords:
            # 发现敏感词，但需要AI确认上下文
            if len(detected_keywords) >= 2 or pattern_matches or "种族歧视组合" in detected_keywords:
                # 多个敏感词或匹配模式，直接判定为不安全
                reason = f"检测到敏感内容：{', '.join(detected_categories)}类别，关键词：{', '.join(detected_keywords[:5])}"
                logger.warning(f"多层检测发现高风险内容: {content[:30]}... -> {reason}")
                return False, reason, detected_keywords
            else:
                # 单个敏感词，需要AI进一步判断上下文
                logger.debug(f"发现敏感词但需AI确认: {detected_keywords}")
                return None  # 需要AI检测
        
        # 5. 内容长度和复杂度判断
        if len(content.strip()) < 5:
            return True, "内容过短，快速通过", []
        
        # 6. 如果没有发现明显敏感词，但内容较长，交给AI判断
        if len(content.strip()) > 50:
            return None  # 需要AI检测
        
        # 7. 短内容且无敏感词，判定为安全
        return True, "快速检测：未发现敏感内容", []
        
    def create_enhanced_detection_prompt(self, content: str) -> str:
        """
        创建增强的敏感词检测提示词
        
        Args:
            content (str): 待检测的内容
            
        Returns:
            str: 构建好的提示词
        """
        prompt = f"""请严格检测以下内容是否包含敏感信息："{content}"

重点检测以下类型：
1. 种族歧视、民族仇恨、地域攻击
2. 人身攻击、侮辱谩骂、恶意中伤  
3. 政治敏感、分裂言论
4. 色情低俗、暴力血腥
5. 违法违规、危险行为
6. 仇恨言论、歧视性表达

特别注意：
- 对特定群体的攻击性言论（如"XX人都是XX"）
- 侮辱性词汇和恶意标签
- 煽动仇恨和对立的表达

返回JSON格式：
{{
    "is_safe": true/false,
    "risk_level": "safe/medium/high", 
    "detected_issues": ["具体问题类型"],
    "reason": "详细检测原因"
}}

仅返回JSON，无其他内容。"""
        return prompt
        
    def parse_detection_result(self, response_text: str) -> Tuple[bool, str]:
        """
        解析智谱AI的检测结果（增强版）
        
        Args:
            response_text (str): AI返回的文本
            
        Returns:
            Tuple[bool, str]: (是否安全, 详细信息)
        """
        try:
            logger.debug(f"解析AI响应: {response_text[:100]}...")
            
            # 清理响应文本
            response_text = response_text.strip()
            
            # 提取JSON部分
            if "```json" in response_text:
                start = response_text.find("```json") + 7
                end = response_text.find("```", start)
                if end != -1:
                    response_text = response_text[start:end].strip()
            elif "```" in response_text:
                start = response_text.find("```") + 3
                end = response_text.find("```", start)
                if end != -1:
                    response_text = response_text[start:end].strip()
            
            # 如果响应中包含多余的文字，尝试提取JSON部分
            if response_text.startswith('{') and response_text.endswith('}'):
                pass  # 已经是纯JSON
            else:
                # 尝试找到JSON部分
                start = response_text.find('{')
                end = response_text.rfind('}') + 1
                if start != -1 and end > start:
                    response_text = response_text[start:end]
            
            # 解析JSON
            result = json.loads(response_text)
            
            is_safe = result.get("is_safe", True)
            risk_level = result.get("risk_level", "safe")
            detected_issues = result.get("detected_issues", [])
            reason = result.get("reason", "")
            
            # 构建详细信息
            detail_info = {
                "risk_level": risk_level,
                "detected_issues": detected_issues,
                "reason": reason,
                "source": "zhipu_ai_enhanced"
            }
            
            logger.info(f"AI检测结果: 安全={is_safe}, 风险={risk_level}, 问题={detected_issues}")
            return is_safe, json.dumps(detail_info, ensure_ascii=False)
            
        except json.JSONDecodeError as e:
            logger.warning(f"JSON解析失败，使用备选方案: {e}")
            return self.fallback_keyword_detection(response_text)
        except Exception as e:
            logger.error(f"结果解析异常: {e}")
            return True, f"解析异常，默认安全: {str(e)}"
    
    def fallback_keyword_detection(self, response_text: str) -> Tuple[bool, str]:
        """
        备选的关键词检测方法（增强版）
        
        Args:
            response_text (str): AI返回的文本
            
        Returns:
            Tuple[bool, str]: (是否安全, 详细信息)
        """
        logger.warning("使用增强备选关键词检测")
        
        response_lower = response_text.lower()
        
        # 检查明确的不安全标识
        unsafe_indicators = [
            "false", "不安全", "敏感", "违规", "high", "medium", "风险",
            "歧视", "仇恨", "攻击", "侮辱", "不当", "问题", "危险"
        ]
        safe_indicators = ["true", "安全", "safe", "正常", "无问题", "通过"]
        
        found_unsafe = any(indicator in response_lower for indicator in unsafe_indicators)
        found_safe = any(indicator in response_lower for indicator in safe_indicators)
        
        if found_unsafe and not found_safe:
            return False, f"备选检测：发现风险指标 - {response_text[:100]}"
        elif found_safe and not found_unsafe:
            return True, f"备选检测：确认安全 - {response_text[:100]}"
        else:
            # 模糊情况，倾向于保守判断
            return False, f"备选检测：模糊情况保守判断 - {response_text[:100]}"
    
    def check_content(self, content: str, timeout: int = 15) -> Tuple[bool, str]:
        """
        检测内容是否包含敏感词（增强版）
        
        Args:
            content (str): 待检测的内容
            timeout (int): 超时时间（秒），默认15秒
            
        Returns:
            Tuple[bool, str]: (是否安全, 详细信息)
        """
        start_time = time.time()
        
        # 空内容检查
        if not content or not content.strip():
            logger.debug("内容为空，返回安全")
            return True, "内容为空"
        
        logger.info(f"开始检测内容: {content[:30]}...")
        
        # 1. 检查缓存
        cached_result = self.get_from_cache(content)
        if cached_result:
            logger.info(f"使用缓存结果，耗时: {time.time() - start_time:.2f}秒")
            return cached_result
        
        # 2. 增强的关键词预检查
        keyword_result = self.advanced_keyword_check(content)
        if keyword_result is not None:
            is_safe, reason, detected_words = keyword_result
            result = (is_safe, reason)
            self.save_to_cache(content, result)
            logger.info(f"多层检测完成，耗时: {time.time() - start_time:.2f}秒")
            return result
        
        # 3. AI检测（使用增强提示词）
        try:
            prompt = self.create_enhanced_detection_prompt(content)
            
            logger.debug("调用智谱AI API进行深度检测...")
            response = self.client.chat.completions.create(
                model=self.model,
                messages=[{"role": "user", "content": prompt}],
                timeout=timeout,
                temperature=0.1,  # 降低随机性，提高一致性
                max_tokens=300    # 增加响应长度以获得更详细的分析
            )
            
            if response and response.choices and len(response.choices) > 0:
                response_content = response.choices[0].message.content
                result = self.parse_detection_result(response_content)
                
                # 保存到缓存
                self.save_to_cache(content, result)
                
                elapsed_time = time.time() - start_time
                logger.info(f"AI深度检测完成，耗时: {elapsed_time:.2f}秒")
                return result
            else:
                logger.error("AI响应为空")
                return True, "AI响应异常，默认安全"
                
        except Exception as e:
            logger.error(f"检测异常: {str(e)}")
            # 异常情况下，进行保守的关键词检测
            if any(keyword in content.lower() for keyword in self.all_keywords):
                return False, f"异常情况下的保守检测：发现潜在敏感词汇"
            return True, f"检测异常，默认安全: {str(e)}"
    
    def batch_check_content(self, contents: list, timeout: int = 30) -> Dict[str, Tuple[bool, str]]:
        """
        批量检测多个内容（增强版）
        
        Args:
            contents (list): 待检测的内容列表
            timeout (int): 总超时时间（秒）
            
        Returns:
            Dict[str, Tuple[bool, str]]: 检测结果字典
        """
        logger.info(f"开始批量检测 {len(contents)} 个内容")
        start_time = time.time()
        
        results = {}
        single_timeout = max(5, timeout // len(contents)) if contents else 5
        
        for i, content in enumerate(contents):
            try:
                result = self.check_content(content, single_timeout)
                results[content] = result
                
                # 每10个内容输出一次进度
                if (i + 1) % 10 == 0:
                    logger.info(f"批量检测进度: {i+1}/{len(contents)}")
                    
            except Exception as e:
                logger.error(f"批量检测第{i+1}项失败: {str(e)}")
                results[content] = (True, f"检测失败: {str(e)}")
        
        elapsed_time = time.time() - start_time
        logger.info(f"批量检测完成，共处理 {len(results)} 项，总耗时: {elapsed_time:.2f}秒")
        return results

# 全局检测器实例
_content_checker = None

def get_content_checker() -> ZhipuContentChecker:
    """
    获取全局内容检测器实例
    
    Returns:
        ZhipuContentChecker: 内容检测器实例
    """
    global _content_checker
    if _content_checker is None:
        _content_checker = ZhipuContentChecker()
    return _content_checker

async def check_sensitive_content(content: str) -> tuple[bool, str]:
    """
    异步检测敏感内容（保持与原接口兼容）
    
    Args:
        content (str): 待检测的内容
        
    Returns:
        tuple[bool, str]: (是否安全, 详细信息)
    """
    try:
        checker = get_content_checker()
        # 在异步环境中运行同步检测
        loop = asyncio.get_event_loop()
        result = await loop.run_in_executor(None, checker.check_content, content)
        return result
    except Exception as e:
        logger.error(f"异步敏感词检测失败: {str(e)}")
        return True, f"检测异常: {str(e)}"

def sync_check_sensitive_content(content: str) -> tuple[bool, str]:
    """
    同步检测敏感内容
    
    Args:
        content (str): 待检测的内容
        
    Returns:
        tuple[bool, str]: (是否安全, 详细信息)
    """
    try:
        checker = get_content_checker()
        return checker.check_content(content)
    except Exception as e:
        logger.error(f"同步敏感词检测失败: {str(e)}")
        return True, f"检测异常: {str(e)}"

if __name__ == "__main__":
    # 测试代码
    test_contents = [
        "这是一个正常的测试内容",
        "今天天气很好，适合出门游玩",
        "我爱我的祖国，希望世界和平"
    ]
    
    checker = ZhipuContentChecker()
    
    print("=== 单个内容检测测试 ===")
    for content in test_contents:
        is_safe, result = checker.check_content(content)
        print(f"内容: {content}")
        print(f"检测结果: {'安全' if is_safe else '敏感'}")
        print(f"详细信息: {result}")
        print("-" * 50)
    
    print("\n=== 批量检测测试 ===")
    batch_results = checker.batch_check_content(test_contents)
    for content, (is_safe, result) in batch_results.items():
        print(f"内容: {content}")
        print(f"检测结果: {'安全' if is_safe else '敏感'}")
        print(f"详细信息: {result}")
        print("-" * 50)