#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
大语言模型内容可靠性检测与过滤系统
AI Content Reliability Detection and Filtering System

功能：
1. 检测内容是否由AI生成
2. 通过RAG技术验证信息可靠性
3. 基于质量评分过滤引用信息
4. 缓解模型幻觉问题
"""

import os
import json
import requests
import hashlib
import time
from typing import Dict, List, Tuple, Optional
from dataclasses import dataclass
from enum import Enum
import logging
from datetime import datetime
import re

# 导入我们的模块
from rag_knowledge_base import KnowledgeBaseManager
from source_extractor import SourceExtractor

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('ai_detector.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class ContentType(Enum):
    """内容类型枚举"""
    AI_GENERATED = "ai_generated"
    HUMAN_WRITTEN = "human_written"
    UNCERTAIN = "uncertain"

@dataclass
class ContentAnalysis:
    """内容分析结果"""
    content: str
    ai_probability: float
    content_type: ContentType
    reliability_score: float
    verification_result: Dict
    filtered_content: str
    removed_sections: List[str]

class AIContentDetector:
    """AI内容检测器"""
    
    def __init__(self, api_keys: Dict[str, str]):
        self.api_keys = api_keys
        self.zerogpt_api_key = api_keys.get('zerogpt')
        self.openai_api_key = api_keys.get('openai')
        
    def detect_ai_content(self, content: str) -> Tuple[float, ContentType]:
        """
        使用ZeroGPT API检测内容是否由AI生成
        
        Args:
            content: 待检测的内容
            
        Returns:
            (ai_probability, content_type): AI生成概率和内容类型
        """
        try:
            if not self.zerogpt_api_key:
                logger.warning("ZeroGPT API密钥未配置，使用模拟检测")
                return self._simulate_ai_detection(content)
            
            headers = {
                'Authorization': f'Bearer {self.zerogpt_api_key}',
                'Content-Type': 'application/json'
            }
            
            payload = {
                'input_text': content,
                'model': 'gpt-3.5-turbo'  # 可根据需要调整模型
            }
            
            response = requests.post(
                'https://api.zerogpt.com/v1/detect',
                headers=headers,
                json=payload,
                timeout=30
            )
            
            if response.status_code == 200:
                result = response.json()
                ai_probability = result.get('ai_probability', 0.0)
                
                if ai_probability > 0.8:
                    content_type = ContentType.AI_GENERATED
                elif ai_probability < 0.3:
                    content_type = ContentType.HUMAN_WRITTEN
                else:
                    content_type = ContentType.UNCERTAIN
                    
                return ai_probability, content_type
            else:
                logger.error(f"ZeroGPT API请求失败: {response.status_code}")
                return self._simulate_ai_detection(content)
                
        except Exception as e:
            logger.error(f"AI内容检测失败: {str(e)}")
            return self._simulate_ai_detection(content)
    
    def _simulate_ai_detection(self, content: str) -> Tuple[float, ContentType]:
        """模拟AI检测（当API不可用时）"""
        # 简单的启发式检测规则
        ai_indicators = [
            r'根据.*分析',
            r'综上所述',
            r'总的来说',
            r'因此.*',
            r'基于.*研究',
            r'研究表明',
            r'数据.*显示',
            r'统计.*表明'
        ]
        
        ai_count = 0
        for pattern in ai_indicators:
            if re.search(pattern, content):
                ai_count += 1
        
        # 计算AI概率
        ai_probability = min(ai_count * 0.2, 0.9)
        
        if ai_probability > 0.6:
            content_type = ContentType.AI_GENERATED
        elif ai_probability < 0.3:
            content_type = ContentType.HUMAN_WRITTEN
        else:
            content_type = ContentType.UNCERTAIN
            
        return ai_probability, content_type

class RAGVerifier:
    """检索增强生成验证器"""
    
    def __init__(self, knowledge_base_path: str = "knowledge_base"):
        self.knowledge_base_path = knowledge_base_path
        self.knowledge_base = self._load_knowledge_base()
        
    def _load_knowledge_base(self) -> Dict:
        """加载知识库"""
        try:
            if os.path.exists(f"{self.knowledge_base_path}/knowledge.json"):
                with open(f"{self.knowledge_base_path}/knowledge.json", 'r', encoding='utf-8') as f:
                    return json.load(f)
            else:
                # 创建默认知识库结构
                default_kb = {
                    "academic_sources": {
                        "arxiv": "https://arxiv.org/",
                        "sciencedirect": "https://www.sciencedirect.com/",
                        "ieee": "https://ieeexplore.ieee.org/"
                    },
                    "government_sources": {
                        "gov_cn": "https://www.gov.cn/",
                        "moe": "http://www.moe.gov.cn/"
                    },
                    "fact_checking": {
                        "snopes": "https://www.snopes.com/",
                        "factcheck": "https://www.factcheck.org/"
                    }
                }
                
                os.makedirs(self.knowledge_base_path, exist_ok=True)
                with open(f"{self.knowledge_base_path}/knowledge.json", 'w', encoding='utf-8') as f:
                    json.dump(default_kb, f, ensure_ascii=False, indent=2)
                    
                return default_kb
                
        except Exception as e:
            logger.error(f"加载知识库失败: {str(e)}")
            return {}
    
    def verify_information(self, content: str, key_claims: List[str]) -> Dict:
        """
        验证信息可靠性
        
        Args:
            content: 待验证的内容
            key_claims: 关键声明列表
            
        Returns:
            验证结果字典
        """
        verification_result = {
            "verified_claims": [],
            "unverified_claims": [],
            "contradictory_claims": [],
            "reliability_score": 0.0,
            "sources": []
        }
        
        try:
            # 提取关键信息
            extracted_info = self._extract_key_information(content)
            
            # 在知识库中搜索相关信息
            for claim in key_claims:
                search_result = self._search_knowledge_base(claim)
                
                if search_result["found"]:
                    if search_result["contradicts"]:
                        verification_result["contradictory_claims"].append(claim)
                    else:
                        verification_result["verified_claims"].append(claim)
                else:
                    verification_result["unverified_claims"].append(claim)
            
            # 计算可靠性评分
            total_claims = len(key_claims)
            if total_claims > 0:
                verified_ratio = len(verification_result["verified_claims"]) / total_claims
                contradictory_ratio = len(verification_result["contradictory_claims"]) / total_claims
                
                verification_result["reliability_score"] = verified_ratio - contradictory_ratio
            
            return verification_result
            
        except Exception as e:
            logger.error(f"信息验证失败: {str(e)}")
            return verification_result
    
    def _extract_key_information(self, content: str) -> List[str]:
        """提取关键信息"""
        # 简单的关键词提取
        key_patterns = [
            r'研究表明.*?[。！？]',
            r'数据显示.*?[。！？]',
            r'专家.*?表示.*?[。！？]',
            r'根据.*?报告.*?[。！？]',
            r'统计.*?显示.*?[。！？]'
        ]
        
        extracted = []
        for pattern in key_patterns:
            matches = re.findall(pattern, content)
            extracted.extend(matches)
            
        return extracted
    
    def _search_knowledge_base(self, claim: str) -> Dict:
        """在知识库中搜索声明"""
        # 这里应该实现更复杂的语义搜索
        # 目前使用简单的关键词匹配
        search_result = {
            "found": False,
            "contradicts": False,
            "source": None
        }
        
        # 模拟搜索逻辑
        if any(keyword in claim.lower() for keyword in ["研究", "数据", "统计"]):
            search_result["found"] = True
            search_result["source"] = "academic_sources"
        
        return search_result

class QualityFilter:
    """质量评分和引用过滤器"""
    
    def __init__(self):
        self.quality_weights = {
            "academic_paper": 1.0,
            "government_document": 0.9,
            "news_article": 0.7,
            "blog_post": 0.5,
            "social_media": 0.3
        }
        
        self.extreme_emotion_patterns = [
            r'[！]{2,}',  # 多个感叹号
            r'[？]{2,}',  # 多个问号
            r'太.*了',
            r'非常.*',
            r'极其.*',
            r'绝对.*',
            r'完全.*'
        ]
        
        self.extreme_opinion_patterns = [
            r'必须.*',
            r'应该.*',
            r'一定.*',
            r'肯定.*',
            r'绝对.*',
            r'完全.*'
        ]
        
        self.trusted_domains = [
            'arxiv.org',
            'sciencedirect.com',
            'ieeexplore.ieee.org',
            'gov.cn',
            'moe.gov.cn',
            'cnki.net',
            'wanfangdata.com.cn'
        ]
    
    def filter_content(self, content: str, sources: List[str] = None) -> Tuple[str, List[str], float]:
        """
        过滤内容质量
        
        Args:
            content: 待过滤的内容
            sources: 信息来源列表
            
        Returns:
            (filtered_content, removed_sections, quality_score): 过滤后的内容、移除的部分、质量评分
        """
        filtered_content = content
        removed_sections = []
        quality_score = 0.0
        
        # 1. 移除极端情绪表达
        for pattern in self.extreme_emotion_patterns:
            matches = re.findall(pattern, content)
            for match in matches:
                if len(match) > 10:  # 只移除较长的极端表达
                    filtered_content = filtered_content.replace(match, "")
                    removed_sections.append(f"极端情绪表达: {match}")
        
        # 2. 移除极端言论
        for pattern in self.extreme_opinion_patterns:
            matches = re.findall(pattern, content)
            for match in matches:
                if len(match) > 15:  # 只移除较长的极端言论
                    filtered_content = filtered_content.replace(match, "")
                    removed_sections.append(f"极端言论: {match}")
        
        # 3. 计算质量评分
        quality_score = self._calculate_quality_score(filtered_content, sources)
        
        return filtered_content, removed_sections, quality_score
    
    def _calculate_quality_score(self, content: str, sources: List[str] = None) -> float:
        """计算质量评分"""
        score = 0.5  # 基础分
        
        # 根据内容长度调整
        if len(content) > 1000:
            score += 0.1
        elif len(content) < 100:
            score -= 0.1
        
        # 根据信息来源调整
        if sources:
            for source in sources:
                if any(domain in source.lower() for domain in self.trusted_domains):
                    score += 0.2
                elif 'gov.cn' in source.lower():
                    score += 0.3
                elif 'arxiv.org' in source.lower():
                    score += 0.25
        
        # 根据内容结构调整
        if re.search(r'引言|摘要|结论|参考文献', content):
            score += 0.1
        
        # 根据客观性调整
        objective_indicators = ['研究表明', '数据显示', '根据统计', '专家认为']
        subjective_indicators = ['我认为', '我觉得', '我相信', '我确信']
        
        objective_count = sum(1 for indicator in objective_indicators if indicator in content)
        subjective_count = sum(1 for indicator in subjective_indicators if indicator in content)
        
        if objective_count > subjective_count:
            score += 0.1
        else:
            score -= 0.1
        
        return min(max(score, 0.0), 1.0)

class ContentReliabilitySystem:
    """内容可靠性检测系统主控制器"""
    
    def __init__(self, api_keys: Dict[str, str], knowledge_base_path: str = "knowledge_base"):
        self.ai_detector = AIContentDetector(api_keys)
        self.rag_verifier = RAGVerifier(knowledge_base_path)
        self.quality_filter = QualityFilter()
        self.source_extractor = SourceExtractor()
        
    def process_content(self, content: str, sources: List[str] = None) -> ContentAnalysis:
        """
        处理内容的主要流程
        
        Args:
            content: 待处理的内容
            sources: 信息来源列表
            
        Returns:
            ContentAnalysis: 分析结果
        """
        logger.info("开始处理内容...")
        
        # 1. AI内容检测
        ai_probability, content_type = self.ai_detector.detect_ai_content(content)
        logger.info(f"AI检测结果: 概率={ai_probability:.2f}, 类型={content_type.value}")
        
        # 2. 主动提取信息来源
        extracted_sources = self.source_extractor.extract_sources(content)
        logger.info(f"提取到 {len(extracted_sources['urls'])} 个URL, {len(extracted_sources['citations'])} 个引用")
        
        # 3. 根据检测结果决定处理策略
        if content_type == ContentType.AI_GENERATED:
            # AI生成内容：进行RAG验证
            key_claims = self._extract_key_claims(content)
            verification_result = self.rag_verifier.verify_information(content, key_claims)
            
            # 如果可靠性评分低，移除可疑内容
            if verification_result["reliability_score"] < 0.3:
                filtered_content = self._remove_unreliable_sections(content, verification_result)
                removed_sections = verification_result.get("unverified_claims", [])
            else:
                filtered_content = content
                removed_sections = []
                
        else:
            # 人类编写内容：进行质量过滤
            filtered_content, removed_sections, quality_score = self.quality_filter.filter_content(content, sources)
            verification_result = {"reliability_score": quality_score}
        
        # 4. 将提取的信息来源添加到验证结果中
        verification_result["extracted_sources"] = extracted_sources
        
        # 5. 生成最终分析结果
        analysis = ContentAnalysis(
            content=content,
            ai_probability=ai_probability,
            content_type=content_type,
            reliability_score=verification_result.get("reliability_score", 0.0),
            verification_result=verification_result,
            filtered_content=filtered_content,
            removed_sections=removed_sections
        )
        
        logger.info(f"内容处理完成，可靠性评分: {analysis.reliability_score:.2f}")
        return analysis
    
    def _extract_key_claims(self, content: str) -> List[str]:
        """提取关键声明"""
        # 简单的声明提取
        claim_patterns = [
            r'研究表明.*?[。！？]',
            r'数据显示.*?[。！？]',
            r'专家.*?表示.*?[。！？]',
            r'根据.*?报告.*?[。！？]',
            r'统计.*?显示.*?[。！？]'
        ]
        
        claims = []
        for pattern in claim_patterns:
            matches = re.findall(pattern, content)
            claims.extend(matches)
            
        return claims
    
    def _remove_unreliable_sections(self, content: str, verification_result: Dict) -> str:
        """移除不可靠的内容部分"""
        filtered_content = content
        
        # 移除未验证的声明
        for claim in verification_result.get("unverified_claims", []):
            filtered_content = filtered_content.replace(claim, "")
        
        # 移除矛盾的声明
        for claim in verification_result.get("contradictory_claims", []):
            filtered_content = filtered_content.replace(claim, "")
        
        return filtered_content

def main():
    """主函数 - 演示系统使用"""
    
    # 配置API密钥（需要用户提供）
    api_keys = {
        'zerogpt': os.getenv('ZEROGPT_API_KEY', ''),
        'openai': os.getenv('OPENAI_API_KEY', '')
    }
    
    # 创建系统实例
    system = ContentReliabilitySystem(api_keys)
    
    # 示例内容
    test_content = """
    根据最新研究数据显示，人工智能技术将在未来十年内彻底改变教育行业。
    专家表示，AI辅助教学将提高学生学习效率30%以上。
    统计表明，使用AI技术的学校在标准化测试中的表现明显优于传统学校。
    我认为这个技术太棒了！绝对会改变世界！
    """
    
    test_sources = [
        "https://arxiv.org/abs/2023.12345",
        "https://www.moe.gov.cn/news/2023/ai_education.html"
    ]
    
    print("=== 大语言模型内容可靠性检测系统 ===")
    print(f"测试内容: {test_content}")
    print(f"信息来源: {test_sources}")
    print("\n开始分析...")
    
    # 处理内容
    result = system.process_content(test_content, test_sources)
    
    # 输出结果
    print("\n=== 分析结果 ===")
    print(f"AI生成概率: {result.ai_probability:.2f}")
    print(f"内容类型: {result.content_type.value}")
    print(f"可靠性评分: {result.reliability_score:.2f}")
    print(f"移除的部分: {result.removed_sections}")
    print(f"\n过滤后的内容:\n{result.filtered_content}")

if __name__ == "__main__":
    main() 