import os
import threading
from typing import Optional, Literal, Any
import dspy
import requests

try:
    from anthropic import RateLimitError
except ImportError:
    RateLimitError = None


class OpenAIModel(dspy.OpenAI):
    """A wrapper class for dspy.OpenAI."""

    def __init__(
        self,
        model: str,
        api_key: Optional[str],
        model_type: Literal["chat", "text"] = None, #？是否影响生成效果
        **kwargs,
    ):
        # 将 api_key 和其他参数合并到 self.kwargs 中
        self.kwargs = {
            "api_key": api_key,
            "model": model,
            "model_type": model_type,
            **kwargs,
        }

        super().__init__(model=model, api_key=api_key, model_type=model_type, **kwargs)
        self._token_usage_lock = threading.Lock()
        self.prompt_tokens = 0
        self.completion_tokens = 0


    def log_usage(self, response):
        """Log the total tokens from the OpenAI API response."""
        usage_data = response.get("usage")
        if usage_data:
            with self._token_usage_lock:
                self.prompt_tokens += usage_data.get("prompt_tokens", 0)
                self.completion_tokens += usage_data.get("completion_tokens", 0)

    def get_usage_and_reset(self):
        """Get the total tokens used and reset the token usage."""
        usage = {
            self.kwargs.get("model")
            or self.kwargs.get("engine"): {
                "prompt_tokens": self.prompt_tokens,
                "completion_tokens": self.completion_tokens,
            }
        }
        self.prompt_tokens = 0
        self.completion_tokens = 0

        return usage


    #覆盖Request方法
    def request(self, prompt: str, **kwargs):
        """Override the request method to use a custom URL."""
        # Use custom URL if provided, else use the default OpenAI API URL
        url = os.getenv("API_URL")
        #检查API
        #print("Using API Key:", self.kwargs.get('api_key'))  # Debugging: Check if API key is correct
        api_key = os.getenv("OPENAI_API_KEY")
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json",
        }

        from .storm_wiki.modules.savetemp import savetemp
        savetemp("lm/prompt.txt",prompt)

        data = {
            "model": self.kwargs.get("model"),
            "messages": [{"role": "user", "content": prompt}],  # 包装为 messages 格式
            "max_tokens": kwargs.get("max_tokens", 50),
            "temperature": kwargs.get("temperature", 0.7),
        }
        # 调用外部 API 请求
        try:
            response = requests.post(url, json=data, headers=headers,proxies={"http": None, "https": None})
            response.raise_for_status()  # 确保请求成功
            return response.json()  # 返回 JSON 响应
        except requests.exceptions.RequestException as e:
            # 处理请求错误
            raise RuntimeError(f"Request failed: {str(e)}")


    def __call__(
        self,
        prompt: str,
        only_completed: bool = True,
        return_sorted: bool = False,
        **kwargs,
    ) -> list[dict[str, Any]]:
        """Copied from dspy/dsp/modules/gpt3.py with the addition of tracking token usage."""

        assert only_completed, "for now"
        assert return_sorted is False, "for now"
        merged_kwargs = {**self.kwargs, **kwargs}
        # 调用request方法并传递merged_kwargs
        response = self.request(prompt, **merged_kwargs)  # Pass merged kwargs to request
        # Log the token usage from the OpenAI API response.
        self.log_usage(response)

        choices = response["choices"]

        completed_choices = [c for c in choices if c["finish_reason"] != "length"]

        if only_completed and len(completed_choices):
            choices = completed_choices
        #change
        model_name = self.kwargs.get("model")
        if "gpt" not in model_name:
            completions = [c["message"]["content"] for c in choices]
        else:
            completions = [self._get_choice_text(c) for c in choices]
        if return_sorted and kwargs.get("n", 1) > 1:
            scored_completions = []

            for c in choices:
                tokens, logprobs = (
                    c["logprobs"]["tokens"],
                    c["logprobs"]["token_logprobs"],
                )

                if "<|endoftext|>" in tokens:
                    index = tokens.index("<|endoftext|>") + 1
                    tokens, logprobs = tokens[:index], logprobs[:index]

                avglog = sum(logprobs) / len(logprobs)
                scored_completions.append((avglog, self._get_choice_text(c)))

            scored_completions = sorted(scored_completions, reverse=True)
            completions = [c for _, c in scored_completions]

        return completions