import os
import httpx
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
import openai
from openai import APIError, RateLimitError, Timeout
from retry import retry

from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger

OPENAI_RETRIES = 5


class OpenAIHandler(BaseAiHandler):
    def __init__(self):
        # Initialize OpenAIHandler specific attributes here
        try:
            super().__init__()
            # openai.api_key = get_settings().openai.key
            # if get_settings().get("OPENAI.ORG", None):
            #     openai.organization = get_settings().openai.org
            # if get_settings().get("OPENAI.API_TYPE", None):
            #     if get_settings().openai.api_type == "azure":
            #         self.azure = True
            #         openai.azure_key = get_settings().openai.key
            # if get_settings().get("OPENAI.API_VERSION", None):
            #     openai.api_version = get_settings().openai.api_version
            # if get_settings().get("OPENAI.API_BASE", None):
            #     openai.api_base = get_settings().openai.api_base

        except AttributeError as e:
            raise ValueError("OpenAI key is required") from e
    @property
    def deployment_id(self):
        """
        Returns the deployment ID for the OpenAI API.
        """
        return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
    
    @retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
           tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
    async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
        try:
            deployment_id = self.deployment_id
            get_logger().info(f"System: {system}")
            get_logger().info(f"User: {user}")
            messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]

            proxy_url = os.environ.get("DEVCHAT_PROXY", "")
            proxy_setting = {"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {}
            client = openai.AsyncOpenAI(
                api_key=get_settings().openai.key,
                base_url=get_settings().openai.api_base,
                http_client=httpx.AsyncClient(**proxy_setting, trust_env=False),
            )

            chat_completion = await client.chat.completions.create(
                model=model,
                messages=messages,
                temperature=temperature,
            )

            # chat_completion = await openai.ChatCompletion.acreate(
            #     model=model,
            #     deployment_id=deployment_id,
            #     messages=messages,
            #     temperature=temperature,
            # )

            resp = chat_completion.choices[0].message.content
            finish_reason = chat_completion.choices[0].finish_reason
            # usage = chat_completion.usage.to_dict()

            key_fileds = [
                "relevant_file",
                "code_suggestions",
                "suggestion_content",
                "existing_code",
                "improved_code",
                "one_sentence_summary",
                "relevant_lines_start",
                "relevant_lines_end",
                "suggestion_score",
                "changes_summary",
                "changes_title",
                "estimated_effort_to_review_",
                "relevant_tests",
                "possible_issues",
                "security_concerns",
                "can_be_split",
                "relevant_files",
            ]
            for key_filed in key_fileds:
                err_key_filed_1 = key_filed.replace("_", "\\_")
                resp = resp.replace(err_key_filed_1, key_filed)
                err_key_filed_2 = key_filed.replace("_", "_ ")
                resp = resp.replace(err_key_filed_2, key_filed)
                err_key_filed_3 = key_filed.replace("_", " _")
                resp = resp.replace(err_key_filed_3, key_filed)
            
            lines = resp.split("\n")
            for index, line in enumerate(lines):
                seq = ": | "
                pos = line.find(seq)
                if pos > 0:
                    if line[pos+len(seq):].strip() != "":
                        lines[index] = line[0: pos+1] + " " + line[pos+len(seq):]
            resp = "\n".join(lines)

            get_logger().info(f"AI response(fix):\n{resp}")
            
            return resp, finish_reason       
        except (APIError, Timeout) as e:
            get_logger().error("Error during OpenAI inference: ", e)
            raise
        except (RateLimitError) as e:
            get_logger().error("Rate limit error during OpenAI inference: ", e)
            raise
        except (Exception) as e:
            get_logger().error("Unknown error during OpenAI inference: ", e)
            raise      