import os
from openai import OpenAI
import httpx

# 初始化OpenAI客户端 - 适配OpenAI v1.x版本
# 使用自定义HTTP客户端避免proxies参数问题
custom_http_client = httpx.Client()

client = OpenAI(
    api_key="sk-457cedeb65a14e0792fdbfefd5d3d5e8",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    http_client=custom_http_client
)

def get_completion(messages):
    """发送消息到API并获取响应"""
    return client.chat.completions.create(
        model="deepseek-v3.2-exp",
        messages=messages,
        stream=True
    )

def process_streaming_response(completion):
    """处理流式响应并返回完整回答"""
    reasoning_content = ""  # 完整思考过程
    answer_content = ""  # 完整回复
    is_answering = False  # 是否进入回复阶段
    
    print("\n" + "=" * 20 + "思考过程" + "=" * 20 + "\n")
    
    for chunk in completion:
        if not chunk.choices:
            continue

        delta = chunk.choices[0].delta

        # 尝试处理可能的回复格式变化
        if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
            if not is_answering:
                print(delta.reasoning_content, end="", flush=True)
            reasoning_content += delta.reasoning_content
        elif hasattr(delta, "content") and delta.content:
            if not is_answering:
                print("\n" + "=" * 20 + "完整回复" + "=" * 20 + "\n")
                is_answering = True
            print(delta.content, end="", flush=True)
            answer_content += delta.content
        elif hasattr(chunk, "usage") and chunk.usage:
            print("\n" + "=" * 20 + "Token 消耗" + "=" * 20 + "\n")
            print(chunk.usage)
    
    return answer_content

def main():
    """主函数，实现自由问答模式"""
    print("欢迎使用DeepSeek自由问答模式！")
    print("输入'退出'或'quit'结束对话。\n")
    
    # 初始化对话历史
    messages = []
    
    while True:
        # 获取用户输入
        user_input = input("\n你: ")
        
        # 检查是否退出
        if user_input.lower() in ['退出', 'quit', 'q']:
            print("感谢使用！再见！")
            break
        
        # 添加用户消息到对话历史
        messages.append({"role": "user", "content": user_input})
        
        try:
            # 获取模型响应
            completion = get_completion(messages)
            
            # 处理并显示响应
            answer = process_streaming_response(completion)
            
            # 将模型回复添加到对话历史
            messages.append({"role": "assistant", "content": answer})
            
        except Exception as e:
            print(f"发生错误: {e}")
            # 移除最后添加的用户消息，避免错误消息被保留在历史中
            messages.pop()

if __name__ == "__main__":
    main()