# import os, json, uuid
# from types import SimpleNamespace
# from typing import Any, Dict, List

# import requests
# from google import generativeai as genai
# from google.generativeai import types, GenerationConfig

# from core.providers.llm.base import LLMProviderBase
# from core.utils.util import check_model_key
# from config.logger import setup_logging
# from google.generativeai.types import GenerateContentResponse
# from requests import RequestException

# log = setup_logging()
# TAG = __name__


# def test_proxy(proxy_url: str, test_url: str) -> bool:
#     try:
#         resp = requests.get(test_url, proxies={"http": proxy_url, "https": proxy_url})
#         return 200 <= resp.status_code < 400
#     except RequestException:
#         return False


# def setup_proxy_env(http_proxy: str | None, https_proxy: str | None):
#     """
#     分别测试 HTTP 和 HTTPS 代理是否可用，并设置环境变量。
#     如果 HTTPS 代理不可用但 HTTP 可用，会将 HTTPS_PROXY 也指向 HTTP。
#     """
#     test_http_url = "http://www.google.com"
#     test_https_url = "https://www.google.com"

#     ok_http = ok_https = False

#     if http_proxy:
#         ok_http = test_proxy(http_proxy, test_http_url)
#         if ok_http:
#             os.environ["HTTP_PROXY"] = http_proxy
#             log.bind(tag=TAG).info(f"配置提供的Gemini HTTPS代理连通成功: {http_proxy}")
#         else:
#             log.bind(tag=TAG).warning(f"配置提供的Gemini HTTP代理不可用: {http_proxy}")

#     if https_proxy:
#         ok_https = test_proxy(https_proxy, test_https_url)
#         if ok_https:
#             os.environ["HTTPS_PROXY"] = https_proxy
#             log.bind(tag=TAG).info(f"配置提供的Gemini HTTPS代理连通成功: {https_proxy}")
#         else:
#             log.bind(tag=TAG).warning(
#                 f"配置提供的Gemini HTTPS代理不可用: {https_proxy}"
#             )

#     # 如果https_proxy不可用，但http_proxy可用且能走通https，则复用http_proxy作为https_proxy
#     if ok_http and not ok_https:
#         if test_proxy(http_proxy, test_https_url):
#             os.environ["HTTPS_PROXY"] = http_proxy
#             ok_https = True
#             log.bind(tag=TAG).info(f"复用HTTP代理作为HTTPS代理: {http_proxy}")

#     if not ok_http and not ok_https:
#         log.bind(tag=TAG).error(
#             f"Gemini 代理设置失败: HTTP 和 HTTPS 代理都不可用，请检查配置"
#         )
#         raise RuntimeError("HTTP 和 HTTPS 代理都不可用，请检查配置")


# class LLMProvider(LLMProviderBase):
#     def __init__(self, cfg: Dict[str, Any]):
#         self.model_name = cfg.get("model_name", "gemini-2.0-flash")
#         self.api_key = cfg["api_key"]
#         http_proxy = cfg.get("http_proxy")
#         https_proxy = cfg.get("https_proxy")

#         model_key_msg = check_model_key("LLM", self.api_key)
#         if model_key_msg:
#             log.bind(tag=TAG).error(model_key_msg)

#         if http_proxy or https_proxy:
#             log.bind(tag=TAG).info(
#                 f"检测到Gemini代理配置，开始测试代理连通性和设置代理环境..."
#             )
#             setup_proxy_env(http_proxy, https_proxy)
#             log.bind(tag=TAG).info(
#                 f"Gemini 代理设置成功 - HTTP: {http_proxy}, HTTPS: {https_proxy}"
#             )
#         # 配置API密钥
#         genai.configure(api_key=self.api_key)

#         # 设置请求超时（秒）
#         self.timeout = cfg.get("timeout", 120)  # 默认120秒

#         # 创建模型实例
#         self.model = genai.GenerativeModel(self.model_name)

#         self.gen_cfg = GenerationConfig(
#             temperature=0.7,
#             top_p=0.9,
#             top_k=40,
#             max_output_tokens=2048,
#         )

#     @staticmethod
#     def _build_tools(funcs: List[Dict[str, Any]] | None):
#         if not funcs:
#             return None
#         return [
#             types.Tool(
#                 function_declarations=[
#                     types.FunctionDeclaration(
#                         name=f["function"]["name"],
#                         description=f["function"]["description"],
#                         parameters=f["function"]["parameters"],
#                     )
#                     for f in funcs
#                 ]
#             )
#         ]

#     # Gemini文档提到，无需维护session-id，直接用dialogue拼接而成
#     def response(self, session_id, dialogue, **kwargs):
#         yield from self._generate(dialogue, None)

#     def response_with_functions(self, session_id, dialogue, functions=None):
#         yield from self._generate(dialogue, self._build_tools(functions))

#     def _generate(self, dialogue, tools):
#         role_map = {"assistant": "model", "user": "user"}
#         contents: list = []
#         # 拼接对话
#         for m in dialogue:
#             r = m["role"]

#             if r == "assistant" and "tool_calls" in m:
#                 tc = m["tool_calls"][0]
#                 contents.append(
#                     {
#                         "role": "model",
#                         "parts": [
#                             {
#                                 "function_call": {
#                                     "name": tc["function"]["name"],
#                                     "args": json.loads(tc["function"]["arguments"]),
#                                 }
#                             }
#                         ],
#                     }
#                 )
#                 continue

#             if r == "tool":
#                 contents.append(
#                     {
#                         "role": "model",
#                         "parts": [{"text": str(m.get("content", ""))}],
#                     }
#                 )
#                 continue

#             contents.append(
#                 {
#                     "role": role_map.get(r, "user"),
#                     "parts": [{"text": str(m.get("content", ""))}],
#                 }
#             )

#         stream: GenerateContentResponse = self.model.generate_content(
#             contents=contents,
#             generation_config=self.gen_cfg,
#             tools=tools,
#             stream=True,
#             timeout=self.timeout,
#         )

#         try:
#             for chunk in stream:
#                 cand = chunk.candidates[0]
#                 for part in cand.content.parts:
#                     # a) 函数调用-通常是最后一段话才是函数调用
#                     if getattr(part, "function_call", None):
#                         fc = part.function_call
#                         yield None, [
#                             SimpleNamespace(
#                                 id=uuid.uuid4().hex,
#                                 type="function",
#                                 function=SimpleNamespace(
#                                     name=fc.name,
#                                     arguments=json.dumps(
#                                         dict(fc.args), ensure_ascii=False
#                                     ),
#                                 ),
#                             )
#                         ]
#                         return
#                     # b) 普通文本
#                     if getattr(part, "text", None):
#                         yield part.text if tools is None else (part.text, None)

#         finally:
#             if tools is not None:
#                 yield None, None  # function‑mode 结束，返回哑包

#     # 关闭stream，预留后续打断对话功能的功能方法，官方文档推荐打断对话要关闭上一个流，可以有效减少配额计费和资源占用
#     @staticmethod
#     def _safe_finish_stream(stream: GenerateContentResponse):
#         if hasattr(stream, "resolve"):
#             stream.resolve()  # Gemini SDK version ≥ 0.5.0
#         elif hasattr(stream, "close"):
#             stream.close()  # Gemini SDK version < 0.5.0
#         else:
#             for _ in stream:  # 兜底耗尽
#                 pass
from typing import Any, Dict, List, Iterator, Union, Tuple, Optional
import re

import openai

# 假设这些依赖来自原始环境，我们保留它们的引用
from core.providers.llm.base import LLMProviderBase
from core.providers.llm.system_prompt import get_system_prompt_for_function
from core.utils.util import check_model_key
from config.logger import setup_logging


log = setup_logging()
TAG = __name__


# --- 辅助函数 (来自 gemini_chat_caller.py) ---
def remove_markdown(text: str) -> str:
    """去除基本的 Markdown 格式（如星号、下划线和反引号）。"""
    return re.sub(r"[*_`]", "", text)


# --- LLM Provider 实现 (使用 OpenAI 兼容层) ---
class LLMProvider(LLMProviderBase):
    def __init__(self, cfg: Dict[str, Any]):
        self.model_name = cfg.get("model_name", "gemini-2.5-flash-lite")
        self.api_key = cfg.get("api_key")

        # 移除代理配置相关的获取

        # 使用 gemini_chat_caller.py 中的 Base URL 默认值
        self.base_url = cfg.get("api_url", "https://jeniya.cn/v1")

        model_key_msg = check_model_key("LLM", self.api_key)
        if model_key_msg:
            log.bind(tag=TAG).error(model_key_msg)

        # 配置 OpenAI 兼容客户端
        try:
            self.client = openai.OpenAI(
                api_key=self.api_key,
                base_url=self.base_url
            )
            log.bind(tag=TAG).info(f"Gemini LLM Provider (OpenAI Comp) {self.model_name}配置成功，Base URL: {self.base_url}")
        except Exception as e:
            log.bind(tag=TAG).error(f"Error initializing OpenAI client: {e}")
            raise

        # 设置请求参数
        self.timeout = cfg.get("timeout", 120)  # 默认120秒
        self.temperature = cfg.get("temperature", 0.7)
        self.max_output_tokens = cfg.get("max_output_tokens", 2048)

    @staticmethod
    def _convert_dialogue_to_messages(dialogue: List[Dict[str, Any]]) -> List[Dict[str, str]]:
        """将原始的 Dialogue 结构转换为 OpenAI 兼容的 Messages 结构。"""
        messages = []
        # 默认使用 gemini_chat_caller.py 中的系统提示词，除非对话中已有系统提示词
        system_content = "Eres un asistente útil que siempre responde en español de manera natural y adecuada para un niño."
        has_system_message = False

        for m in dialogue:
            role = m["role"]
            content = m.get("content", "")

            # 原始代码中的角色映射: assistant, user, tool
            # OpenAI 兼容层使用: assistant, user, system (或其他)
            if role == "system":
                # 如果对话历史中已有明确的 system 消息，使用它
                messages.append({"role": "system", "content": content})
                has_system_message = True
            elif role == "assistant" or role == "model":
                # 忽略原始代码中复杂的 function_call 转换，专注于文本
                messages.append({"role": "assistant", "content": content})
            elif role == "user":
                messages.append({"role": "user", "content": content})
            # 忽略 "tool" 角色的转换，因为我们不再支持复杂的函数调用流程

        # 确保存在一个系统提示词（如果没有来自历史或配置）
        if not has_system_message and not (messages and messages[0].get("role") == "system"):
            messages.insert(0, {"role": "system", "content": system_content})

        return messages


    # response 方法：执行简单流式文本输出
    def response(self, session_id: Any, dialogue: List[Dict[str, Any]], **kwargs) -> Iterator[str]:
        # 忽略 session_id 和 kwargs
        yield from self._generate(dialogue)

    # response_with_functions 方法：保留接口但抛出异常，因为新标准中没有实现
    def response_with_functions(self, session_id: Any, dialogue: List[Dict[str, Any]],
                                functions: Optional[List[Dict[str, Any]]] = None) -> Iterator[
        Union[str, Tuple[str, Optional[List[Dict[str, Any]]]]]]:
        log.bind(tag=TAG).warning(
            "Function calling (response_with_functions) is not implemented based on the new OpenAI-compatible calling standard.")
        # 返回一个空的生成器，或者根据您的环境需求抛出 NotImplementedError
        return iter(())

    # _generate 方法：执行调用并处理流
    def _generate(self, dialogue: List[Dict[str, Any]]) -> Iterator[str]:
        messages = self._convert_dialogue_to_messages(dialogue)

        try:
            # === 流式输出逻辑 (基于 gemini_chat_caller.py) ===
            stream_response = self.client.chat.completions.create(
                model=self.model_name,
                messages=messages,
                temperature=self.temperature,
                max_tokens=self.max_output_tokens,
                stream=True  # 强制使用流式
            )

            # 仅处理流式输出
            for chunk in stream_response:
                # 检查 delta 对象中是否有内容
                content = chunk.choices[0].delta.content
                if content:
                    # 按照 gemini_chat_caller.py 的标准，在输出前去除 Markdown
                    cleaned_content = remove_markdown(content)
                    yield cleaned_content

        except Exception as e:
            log.bind(tag=TAG).error(f"API call failed: {e}")
            # 允许异常向上层传播
            raise