"""
使用示例 - 各种场景的实际应用
"""
import sys
sys.path.insert(0, '..')

# ============================================================================
# 示例 1: 基础验证 - Guardrails 验证器
# ============================================================================
def example_basic_validation():
    """示例1：基础内容验证"""
    print("="*70)
    print("示例 1: 基础内容验证")
    print("="*70)

    from guardrails import Guard, OnFailAction
    from validators.ml_content_safety import MLContentSafety

    # 创建验证器
    guard = Guard().use(
        MLContentSafety(
            model_path="../models/safety_classifier",
            threshold=0.5,
            on_fail=OnFailAction.NOOP
        )
    )

    # 测试文本
    test_texts = [
        "人工智能正在改变世界",
        "这个傻逼产品太垃圾了"
    ]

    for text in test_texts:
        print(f"\n文本: {text}")
        result = guard.validate(text)
        print(f"结果: {'✅ 安全' if result.validation_passed else '❌ 违规'}")


# ============================================================================
# 示例 2: 集成到 LLM 工作流
# ============================================================================
def example_llm_workflow():
    """示例2：集成到 LLM 工作流"""
    print("\n" + "="*70)
    print("示例 2: 集成到 LLM 工作流")
    print("="*70)

    from guardrails import Guard, OnFailAction
    from validators.ml_content_safety import MLContentSafety

    # 模拟 LLM 输出
    def mock_llm_generate(prompt):
        """模拟 LLM 生成内容"""
        responses = {
            "介绍人工智能": "人工智能是计算机科学的一个分支...",
            "评价产品": "这个垃圾产品真是傻逼设计！"
        }
        return responses.get(prompt, "")

    # 创建带验证的 Guard
    guard = Guard().use(
        MLContentSafety(
            model_path="../models/safety_classifier",
            threshold=0.5,
            on_fail=OnFailAction.EXCEPTION  # 违规时抛出异常
        )
    )

    # 测试不同提示
    prompts = ["介绍人工智能", "评价产品"]

    for prompt in prompts:
        print(f"\n提示: {prompt}")

        # 生成内容
        llm_output = mock_llm_generate(prompt)
        print(f"LLM输出: {llm_output}")

        try:
            # 验证输出
            result = guard.validate(llm_output)
            print(f"✅ 验证通过，可以返回给用户")
            print(f"最终输出: {result.validated_output}")
        except Exception as e:
            print(f"❌ 验证失败: {e}")
            print(f"🔄 需要重新生成或拒绝输出")


# ============================================================================
# 示例 3: API 客户端集成
# ============================================================================
def example_api_integration():
    """示例3：通过 API 进行验证"""
    print("\n" + "="*70)
    print("示例 3: API 客户端集成")
    print("="*70)

    import requests

    API_URL = "http://localhost:8000"

    # 封装 API 调用
    class SafetyChecker:
        def __init__(self, api_url, threshold=0.5):
            self.api_url = api_url
            self.threshold = threshold

        def validate(self, text):
            """验证文本安全性"""
            response = requests.post(
                f"{self.api_url}/api/validate",
                json={"text": text, "threshold": self.threshold}
            )

            if response.status_code == 200:
                return response.json()
            else:
                raise Exception(f"API 调用失败: {response.status_code}")

        def validate_batch(self, texts):
            """批量验证"""
            response = requests.post(
                f"{self.api_url}/api/validate/batch",
                json={"texts": texts, "threshold": self.threshold}
            )

            if response.status_code == 200:
                return response.json()
            else:
                raise Exception(f"API 调用失败: {response.status_code}")

    # 使用示例
    try:
        checker = SafetyChecker(API_URL)

        # 单个验证
        result = checker.validate("这是一个测试文本")
        print(f"\n单个验证结果:")
        print(f"- 是否安全: {result['is_safe']}")
        print(f"- 置信度: {result['confidence']:.2%}")

        # 批量验证
        batch_results = checker.validate_batch([
            "人工智能很有趣",
            "这个傻逼设计",
            "今天天气不错"
        ])

        print(f"\n批量验证结果:")
        print(f"- 总数: {batch_results['summary']['total']}")
        print(f"- 安全: {batch_results['summary']['safe']}")
        print(f"- 危险: {batch_results['summary']['unsafe']}")

    except requests.exceptions.ConnectionError:
        print("❌ 无法连接到 API 服务")
        print("💡 请先启动服务: python api/main.py")


# ============================================================================
# 示例 4: 自定义阈值策略
# ============================================================================
def example_custom_threshold():
    """示例4：自定义阈值策略"""
    print("\n" + "="*70)
    print("示例 4: 自定义阈值策略")
    print("="*70)

    from guardrails import Guard, OnFailAction
    from validators.ml_content_safety import MLContentSafety

    test_text = "这个产品有点不太好用"

    thresholds = [0.3, 0.5, 0.7, 0.9]

    for threshold in thresholds:
        guard = Guard().use(
            MLContentSafety(
                model_path="../models/safety_classifier",
                threshold=threshold,
                on_fail=OnFailAction.NOOP
            )
        )

        result = guard.validate(test_text)

        print(f"\n阈值 {threshold}:")
        print(f"- 验证结果: {'✅ 通过' if result.validation_passed else '❌ 失败'}")


# ============================================================================
# 示例 5: 实际应用场景 - 聊天机器人
# ============================================================================
def example_chatbot_integration():
    """示例5：聊天机器人集成"""
    print("\n" + "="*70)
    print("示例 5: 聊天机器人集成")
    print("="*70)

    from guardrails import Guard, OnFailAction
    from validators.ml_content_safety import MLContentSafety

    class SafeChatbot:
        def __init__(self, model_path):
            self.guard = Guard().use(
                MLContentSafety(
                    model_path=model_path,
                    threshold=0.6,
                    on_fail=OnFailAction.EXCEPTION
                )
            )

        def chat(self, user_input):
            """安全聊天"""
            # 模拟 LLM 生成响应
            llm_response = self._generate_response(user_input)

            # 验证响应安全性
            try:
                result = self.guard.validate(llm_response)
                return {
                    "success": True,
                    "response": result.validated_output,
                    "safe": True
                }
            except Exception as e:
                return {
                    "success": False,
                    "response": "抱歉，我无法回答这个问题。",
                    "safe": False,
                    "error": str(e)
                }

        def _generate_response(self, user_input):
            """模拟 LLM 生成"""
            responses = {
                "你好": "你好！有什么可以帮助你的吗？",
                "介绍AI": "人工智能是一门研究如何让计算机模拟人类智能的科学。",
                "骂人": "你这个傻逼，滚一边去！"
            }
            return responses.get(user_input, "我不太明白你的意思。")

    # 使用示例
    bot = SafeChatbot("../models/safety_classifier")

    test_inputs = ["你好", "介绍AI", "骂人"]

    for user_input in test_inputs:
        print(f"\n用户: {user_input}")
        result = bot.chat(user_input)

        if result["success"]:
            print(f"机器人: {result['response']}")
        else:
            print(f"机器人: {result['response']} (内容被过滤)")


# ============================================================================
# 运行所有示例
# ============================================================================
if __name__ == "__main__":
    print("\n🎯 Guardrails AI 内容安全检测 - 使用示例\n")

    # 示例 1: 基础验证
    example_basic_validation()

    # 示例 2: LLM 工作流
    example_llm_workflow()

    # 示例 3: API 集成
    input("\n按 Enter 继续 API 示例（需先启动 API 服务）...")
    example_api_integration()

    # 示例 4: 自定义阈值
    example_custom_threshold()

    # 示例 5: 聊天机器人
    example_chatbot_integration()

    print("\n" + "="*70)
    print("✅ 所有示例运行完成！")
    print("="*70)
