'''
案例3：内容安全监控系统
实时检测生成内容是否包含敏感信息
'''
import asyncio
import os
import re
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI

load_dotenv()


class ContentSafetyMonitor:
    def __init__(self):
        self.model = ChatOpenAI(api_key=os.getenv("MODELSCOPE_API_KEY"), openai_api_base=os.getenv("MODELSCOPE_API_BASE"), model="deepseek-ai/DeepSeek-R1-0528", streaming=True, temperature=0.7)

        self.sensitive_keywords = [
            '暴力', '色情', '诈骗', '违法', '毒品',
            '赌博', '自杀', '仇恨', '歧视', '恐怖'
        ]
        self.warnings = []

    async def generate_with_safety_check(self, prompt):
        """带安全检测的内容生成"""
        print(f"📝 生成请求: {prompt}")
        print("=" * 50)
        print("🔍 安全监控已启动...")

        generated_content = ""
        warning_count = 0

        async for event in self.model.astream_events(prompt, version="v2"):
            if event['event'] == 'on_chat_model_stream':
                if hasattr(event['data']['chunk'], 'content'):
                    content = event['data']['chunk'].content
                    generated_content += content

                    # 实时显示生成内容
                    print(content, end='', flush=True)

                    # 安全检测
                    warnings = self.check_safety(content)
                    if warnings:
                        warning_count += len(warnings)
                        for warning in warnings:
                            print(f"\n🚨 安全警告: {warning}")
                            self.warnings.append(warning)

            elif event['event'] == 'on_chat_model_end':
                print(f"\n\n✅ 生成完成")
                self.generate_safety_report(generated_content, warning_count)

        return generated_content

    def check_safety(self, text):
        """检查文本安全性"""
        warnings = []

        # 关键词检测
        for keyword in self.sensitive_keywords:
            if keyword in text:
                warnings.append(f"检测到敏感词: '{keyword}'")

        # 模式检测
        patterns = {
            '联系方式': r'\b\d{11}\b|\b\d{3,4}-\d{7,8}\b',  # 手机号或电话
            '网址': r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
        }

        for pattern_name, pattern in patterns.items():
            if re.search(pattern, text):
                warnings.append(f"检测到{pattern_name}信息")

        return warnings

    def generate_safety_report(self, content, warning_count):
        """生成安全报告"""
        print("\n" + "=" * 50)
        print("📋 安全监控报告:")
        print(f"总内容长度: {len(content)}字符")
        print(f"安全警告数: {warning_count}")

        if warning_count == 0:
            print("✅ 内容安全: 通过")
        else:
            print("⚠️ 内容安全: 需要审核")
            print("\n详细警告:")
            for i, warning in enumerate(self.warnings, 1):
                print(f"  {i}. {warning}")


# 运行案例3
async def run_safety_example():
    monitor = ContentSafetyMonitor()

    test_prompts = [
        "写一篇关于网络安全的教育文章",
        "描述一个健康的生活习惯",
        # 可以测试一些可能触发警告的提示词
    ]

    for prompt in test_prompts:
        print(f"\n{'=' * 60}")
        content = await monitor.generate_with_safety_check(prompt)
        monitor.warnings.clear()  # 清空警告记录

asyncio.run(run_safety_example())
