import pickle
from loguru import logger
from openai import OpenAI
import openai

import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))


class LLM():
    def __init__(self, model):
        try:
            self.tokenizer = None
            self.api_key = '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'
            self.base_url = "https://openrouter.ai/api/v1"
            self.temperature = 0.0
            self.top_p = 0.95
            self.max_tokens = 8000
            self.model = model
            self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)
        except Exception as e:
            logger.error("Error loading configuration: llm.key or llm.api, please check the configuration file.")
            logger.error(f'The exception is {e}')
            exit(-1)
    
    def single_llm_call(self, messages):
        try:
            model_low = self.model.lower()
            # qwen distill (streaming) should be checked first
            if 'distill' in model_low and 'qwen' in model_low:
                completion = self.client.chat.completions.create(
                    model=self.model,
                    messages=messages,
                    stream=True
                )

                is_answering = False
                reasoning_full = []
                answer_full = []

                for chunk in completion:
                    delta = chunk.choices[0].delta

                    # 收集 reasoning_content（思考内容）
                    if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
                        if not is_answering:
                            reasoning_full.append(delta.reasoning_content)

                    # 收集最终回答内容（content）
                    if hasattr(delta, "content") and delta.content:
                        if not is_answering:
                            is_answering = True
                        answer_full.append(delta.content)

                full_reasoning = "".join(reasoning_full)
                full_answer = "".join(answer_full)
                return full_answer

            # qwen (non-distill) — disable thinking
            elif 'qwen' in model_low:
                response = self.client.chat.completions.create(
                    model=self.model,
                    messages=messages,
                    temperature=self.temperature,
                    top_p=self.top_p,
                    max_tokens=self.max_tokens,
                    extra_body={"enable_thinking": False},
                    stream=False
                )
            # other models
            else:
                response = self.client.chat.completions.create(
                    model=self.model,
                    messages=messages,
                    temperature=self.temperature,
                    top_p=self.top_p,
                    max_tokens=self.max_tokens,
                )
        except Exception as e:
            print(f"API request failed: {str(e)}")
            return None

        # Normalize return for non-stream responses
        if response is None:
            return None
        if hasattr(response, "choices") and len(response.choices) > 0:
            choice = response.choices[0]
            # prefer message.content, fallback to text
            if hasattr(choice, "message") and getattr(choice.message, "content", None) is not None:
                return choice.message.content
            if getattr(choice, "text", None) is not None:
                return choice.text
        return None

    def get_response(self, messages):
        try:
            return self.single_llm_call(messages)
        except Exception as e:
            logger.error(f"第一次调用接口失败: {str(e)}")
            logger.info(f'尝试第二次调用接口')
            try:
                return self.single_llm_call(messages)
            except Exception as e:
                logger.error(f"第二次调用接口失败: {str(e)}")
            return None

    def get_response_with_prefix_use_ckp(self, messages, prefix):
        try:
            new_message = pickle.loads(pickle.dumps(messages))
            prompt = self.tokenizer.apply_chat_template(new_message, tokenize=False)
            prompt += prefix
            response = openai.completions.create(
                model=self.model,
                prompt=prompt,
                temperature=self.temperature,
                top_p=self.top_p,
                max_tokens=self.max_tokens,
                # stop=self.stop,
            )
            return prefix + response.choices[0].text
        except Exception as e:
            print(f"API request failed: {str(e)}")
            return None
    
    def get_response_with_prefix_use_api(self, messages, prefix):
        try:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                extra_body={"prefix": prefix},
                stream=False,
                temperature=self.temperature,
                top_p=self.top_p,
                max_tokens=self.max_tokens,
                # stop=self.stop,
            )
            return prefix + response.choices[0].message.content
        except Exception as e:
            print(f"API request failed: {str(e)}")
            return None
    
    def get_response_with_prefix(self, messages, prefix):
        if self.tokenizer:
            return self.get_response_with_prefix_use_ckp(messages, prefix)
        else:
            return self.get_response_with_prefix_use_api(messages, prefix)


if __name__ == '__main__':
    allm = LLM()
    response = allm.get_response(messages = [
        {"role": "user", "content":  "Give me a quick sort code."}
    ])
    print(response)
