import os
from openai import OpenAI
from langfuse import Langfuse
from dotenv import load_dotenv
import time

# 加载环境变量
load_dotenv()

# 初始化LangFuse
langfuse = Langfuse(
    public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
    secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
    host=os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com")
)

# 配置OpenAI客户端（使用千问模型）
client = OpenAI(
    api_key=os.getenv("QWEN_API_KEY"),
    base_url=os.getenv("QWEN_API_BASE", "https://dashscope.aliyuncs.com/compatible-mode/v1")
)

def track_qwen_correct():
    """使用正确的API跟踪千问模型调用"""

    try:
        # 创建trace ID
        trace_id = langfuse.create_trace_id()
        print(f"Trace ID: {trace_id}")

        # 使用start_as_current_observation开始span
        with langfuse.start_as_current_observation(
                name="qwen-chat-process",
                as_type="span",
                trace_context={"trace_id": trace_id},
                metadata={"model": "qwen-max", "user_id": "user-123"},  # user_id放在metadata中
                input="请解释Transformer架构"
        ) as span:

            # 在span内开始generation
            with langfuse.start_as_current_observation(
                    name="qwen-chat-generation",
                    as_type="generation",
                    model="qwen-max",
                    model_parameters={
                        "temperature": 0.7,
                        "max_tokens": 1000
                    },
                    input=[{"role": "user", "content": "请解释Transformer架构"}]
            ) as generation:

                print("正在调用千问模型...")

                # 调用千问模型
                response = client.chat.completions.create(
                    model="qwen-max",
                    messages=[{"role": "user", "content": "请解释Transformer架构"}],
                    temperature=0.7,
                    max_tokens=1000
                )

                result = response.choices[0].message.content

                # 更新generation记录输出
                generation.update(
                    output=result,
                    usage_details={
                        "promptTokens": response.usage.prompt_tokens,
                        "completionTokens": response.usage.completion_tokens,
                        "totalTokens": response.usage.total_tokens
                    } if hasattr(response, 'usage') and response.usage else None,
                    metadata={"status": "completed"}
                )

                print("响应内容:")
                print(result)
                print(f"\n跟踪已发送到LangFuse, Trace ID: {trace_id}")

                return result

    except Exception as e:
        print(f"发生错误: {e}")
        raise
    finally:
        # 确保所有数据都发送到LangFuse
        langfuse.flush()

def simple_tracked_qwen():
    """简化的跟踪版本"""

    try:
        # 创建trace ID
        trace_id = langfuse.create_trace_id()

        # 直接使用start_observation创建generation
        observation = langfuse.start_observation(
            name="qwen-simple",
            as_type="generation",
            trace_context={"trace_id": trace_id},
            model="qwen-max",
            input="什么是深度学习？",
            metadata={"type": "simple-chat"}
        )

        # 调用模型
        response = client.chat.completions.create(
            model="qwen-max",
            messages=[{"role": "user", "content": "什么是深度学习？"}],
            temperature=0.7
        )

        result = response.choices[0].message.content

        # 更新observation
        observation.update(
            output=result,
            metadata={"success": True}
        )

        print("简化跟踪结果:")
        print(result)
        return result

    except Exception as e:
        print(f"简化跟踪错误: {e}")
        raise
    finally:
        langfuse.flush()

def minimal_working_tracking():
    """最小可工作的跟踪版本"""

    try:
        # 最简单的跟踪方式 - 使用start_observation
        observation = langfuse.start_observation(
            name="qwen-minimal",
            as_type="generation",
            model="qwen-max",
            input="解释人工智能"
        )

        # 调用模型
        response = client.chat.completions.create(
            model="qwen-max",
            messages=[{"role": "user", "content": "解释人工智能"}],
            temperature=0.7
        )

        result = response.choices[0].message.content

        # 更新observation
        observation.update(output=result)

        print("最小跟踪结果:")
        print(result)
        return result

    except Exception as e:
        print(f"最小跟踪错误: {e}")
        return None
    finally:
        langfuse.flush()

def using_context_manager():
    """使用上下文管理器的版本"""

    try:
        trace_id = langfuse.create_trace_id()

        # 使用上下文管理器自动管理span
        with langfuse.start_as_current_observation(
                name="qwen-context-manager",
                as_type="span",
                trace_context={"trace_id": trace_id},
                metadata={"approach": "context-manager"}
        ):

            # 在span内创建generation
            with langfuse.start_as_current_observation(
                    name="qwen-generation",
                    as_type="generation",
                    model="qwen-max",
                    input="请介绍自然语言处理"
            ) as gen:

                response = client.chat.completions.create(
                    model="qwen-max",
                    messages=[{"role": "user", "content": "请介绍自然语言处理"}]
                )

                result = response.choices[0].message.content
                gen.update(output=result)

                print("上下文管理器结果:")
                print(result)
                return result

    except Exception as e:
        print(f"上下文管理器错误: {e}")
        raise
    finally:
        langfuse.flush()

def simple_qwen_example():
    """简单的千问模型调用示例（不带跟踪）"""
    try:
        response = client.chat.completions.create(
            model="qwen-max",
            messages=[{"role": "user", "content": "你好，请介绍一下你自己"}],
            temperature=0.7
        )
        return response.choices[0].message.content
    except Exception as e:
        print(f"千问调用错误: {e}")
        return None

if __name__ == "__main__":
    print("=== 简单千问调用 ===")
    result = simple_qwen_example()
    if result:
        print(result)

    print("\n=== 最小跟踪版本 ===")
    try:
        minimal_working_tracking()
    except Exception as e:
        print(f"最小跟踪失败: {e}")

    print("\n=== 简化跟踪版本 ===")
    try:
        simple_tracked_qwen()
    except Exception as e:
        print(f"简化跟踪失败: {e}")

    print("\n=== 上下文管理器版本 ===")
    try:
        using_context_manager()
    except Exception as e:
        print(f"上下文管理器失败: {e}")

    print("\n=== 完整跟踪版本 ===")
    try:
        track_qwen_correct()
    except Exception as e:
        print(f"完整跟踪失败: {e}")