#!/anaconda3/envs/FEALPy/bin python3.8
# -*- coding: utf-8 -*-
# File: chatgpt-api-sevice.py
# Author: Bryan SHEN
# E-mail: m18801919240_3@163.com
# Site: Shanghai, China
# Time: 2025/6/13 15:46
# File-Desp:

# chatgpt_client.py
import os
import time
import json
from typing import List, Literal, Generator
from dotenv import load_dotenv
from openai import OpenAI, RateLimitError, APITimeoutError, NotFoundError
from loguru import logger


# Scene = Literal["creative", "factual", "summary", "economy", "max", "ultra"]
Scene = Literal["creative", "factual", "summary", "economy"]
ReturnFormat = Literal["text", "dict", "json"]


class ChatGPTClient:
    """
    轻量级 OpenAI ChatGPT 封装
    ------------
    client = ChatGPTClient()
    client.ask("用一句话解释量子纠缠", scene="summary")
    for delta in client.ask("写一首中文打油诗", stream=True, scene="creative"):
        logger.info(delta, end="")
    """

    # 暂时无法使用的模型 （o3需要高级权限，o4-mini-high仅限网页版）
    # o3/o3-pro ❌ 已经开放 API（前提：账号在 Tier 4/5，或者完成组织验证）
    # o4-mini-high ❌ 目前仅限 ChatGPT 网页端，官方文档与模型目录中没有可调用的 API 模型 ID
    MODEL_MAP = {
        "creative": "gpt-4o",          # 需要创造力或高质量时
        "factual" : "gpt-4o-mini",     # 问答 / 查证
        "summary" : "gpt-3.5-turbo",   # 摘要、提炼
        "economy" : "gpt-3.5-turbo-0125",   # 控制成本时
        # "max": "gpt-o3",
        # "ultra": "gpt-o3-pro"
    }

    def __init__(self, api_key: str=None, max_retries: int=3, timeout: int=30):
        # 清除可能影响OpenAI初始化的环境变量
        import os
        env_vars_to_clear = ['HTTP_PROXY', 'HTTPS_PROXY', 'http_proxy', 'https_proxy', 'ALL_PROXY', 'all_proxy']
        for var in env_vars_to_clear:
            if var in os.environ:
                del os.environ[var]
        
        load_dotenv() # 读取 .env 中的 OPENAI_API_KEY
        # 使用默认API密钥
        default_api_key = "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"
        
        # 确保API密钥不为空
        final_api_key = api_key or os.getenv("OPENAI_API_KEY") or default_api_key
        
        # 使用更兼容的方式初始化OpenAI客户端
        try:
            # 方法1：直接使用OpenAI类，不传递任何可能导致proxies错误的参数
            self.client = OpenAI(api_key=final_api_key)
        except Exception as e:
            try:
                # 方法2：使用旧版本的初始化方式
                import openai
                openai.api_key = final_api_key
                self.client = openai
            except Exception as e2:
                # 方法3：创建一个简单的包装器，避免httpx问题
                class SimpleOpenAIClient:
                    def __init__(self, api_key):
                        self.api_key = api_key
                    
                    def chat(self):
                        return self
                    
                    def completions(self):
                        return self
                    
                    def create(self, **kwargs):
                        # 这里可以实现简单的HTTP请求
                        raise NotImplementedError("简化客户端暂不支持实际API调用")
                
                self.client = SimpleOpenAIClient(final_api_key)
        
        self.max_retries = max_retries

    # —— 核心调用 ——
    def ask(
        self,
        prompt: str,
        *,
        scene: Scene = "factual",
        model_override: str= None,
        temperature: float = 0.7,
        stream: bool = False,
        return_format: ReturnFormat = "text"
    ) -> str:
        """
        prompt         : 文本或 messages 列表
        stream         : False⇒完整返回; True⇒逐 token 生成器
        return_format  : "text" | "dict" | "json"
        """
        if stream and return_format != "text":
            # 如果用户要 JSON/dict，就改成非流式
            stream = False

        model = model_override or self.MODEL_MAP[scene]
        messages = self._normalize_prompt(prompt)

        for attempt in range(self.max_retries + 1):
            try:
                resp = self.client.chat.completions.create(
                    model=model,
                    messages=messages,
                    temperature=temperature,
                    stream=stream
                )

                # ---------- 非流式 ----------
                if not stream:
                    full_text = (
                        resp.choices[0].message.content
                        if return_format == "text"
                        else None  # 延后决定
                    )
                    if return_format == "text":
                        return full_text

                    answer_dict = {
                        "content": resp.choices[0].message.content,
                        "model": model,
                        "finish_reason": resp.choices[0].finish_reason,
                        "usage": {
                            "prompt_tokens": resp.usage.prompt_tokens,
                            "completion_tokens": resp.usage.completion_tokens,
                            "total_tokens": resp.usage.total_tokens,
                        },
                    }
                    if return_format == "dict":
                        return answer_dict
                    return json.dumps(answer_dict, ensure_ascii=False)

                # ---------- 流式 ----------
                return self._yield_stream(resp)

            except (RateLimitError, APITimeoutError) as e:
                if attempt >= self.max_retries:
                    raise RuntimeError("OpenAI API 已超过最大重试次数") from e
                time.sleep(2 ** (attempt + 1))
            except NotFoundError as e:
                raise RuntimeError(
                    "模型不存在或暂未开放，请检查 model 名称/权限"
                ) from e

    @staticmethod
    def _normalize_prompt(prompt):
        if isinstance(prompt, list):
            return prompt
        return [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user",   "content": prompt},
        ]

    @staticmethod
    def _yield_stream(stream_resp):
        for chunk in stream_resp:
            delta = chunk.choices[0].delta.content or ""
            if delta:
                yield delta


if __name__ == "__main__":

    # 默认是 gpt-4o-mini
    gpt = ChatGPTClient()

    # # Type1, 流式输出
    # logger.info("\n>> 创造力场景（流式）")
    # for tok in gpt.ask("写一段科幻小说开头，中文", scene="creative", stream=True):
    #     logger.info(tok, end="", flush=True)

    # Type2, 完整输出
    # logger.info(">> 摘要场景（一次性返回）")
    # logger.info(gpt.ask("用一句话解释量子纠缠", scene="summary", stream=False))

    # logger.info(gpt.ask("从下面的title中提取出完整的手机型号，title='HUAWEI Pura 70 国家补贴500元 12GB+512GB 羽砂黑 北斗卫星消息版 第二代昆仑玻璃 华为鸿蒙智能手机' ", scene="factual", stream=False))
    # 输出：从给定的标题中，可以提取出的完整手机型号是：**HUAWEI Pura 70**。

def test_chatgpt_client():
    """测试ChatGPT客户端功能（需要手动调用）"""
    try:
        gpt = ChatGPTClient()
        result = gpt.ask("从下面的title中提取出完整的手机型号，title='HUAWEI Pura 70 国家补贴500元 12GB+512GB 羽砂黑 北斗卫星消息版 第二代昆仑玻璃 华为鸿蒙智能手机' ", scene="factual", stream=False, return_format="json")
        logger.info(result)
        return True
    except Exception as e:
        logger.error(f"测试失败: {e}")
        return False

if __name__ == "__main__":
    # 只有直接运行此文件时才会执行测试
    # 导入时不会执行，避免意外API调用
    print("开始测试ChatGPT客户端...")
    test_chatgpt_client()



