import pdb

import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from transformers import pipeline

from openai import OpenAI

# Set OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "EMPTY"
openai_api_base = "http://localhost:8000/v1"


class Reader:
    def __init__(self, model_name, device, model_service_type):
        self.device = device
        if model_service_type == "local":
            self.model, self.tokenizer = self._build_reader_llm(model_name)
        elif model_service_type == "service":
            self.llm_client = OpenAI(
                api_key=openai_api_key,
                base_url=openai_api_base,
            )
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)

    def _build_reader_llm(self, model_name):
        # 构建阅读理解模型的实现
        bnb_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.bfloat16,
        )
        # device_map='cuda'
        # model = AutoModelForCausalLM.from_pretrained(READER_MODEL_NAME, quantization_config=bnb_config)  # CPU 版
        model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config,
                                                     device_map=self.device)  # GPU 版
        tokenizer = AutoTokenizer.from_pretrained(model_name)

        reader_model = pipeline(
            model=model,
            tokenizer=tokenizer,
            task="text-generation",
            do_sample=True,
            temperature=0.2,
            repetition_penalty=1.1,
            return_full_text=False,
            max_new_tokens=500,
        )
        return reader_model, tokenizer

    def rag_with_llm(self, retrieved_docs, user_question, model_service_type="service"):
        """

        Args:
            retrieved_docs:
            user_question:
            model_service_type:

        Returns:

        """
        prompt_in_chat_format = [
            {
                "role": "system",
                "content": """使用上下文中包含的信息，对问题给出准确的回答。
                只回答所问的问题，回答应简洁并与问题强相关。在相关时提供来源文档的编号。如果答案不能从上下文中推断出来，则不给出答案。用中文回答。""",
            },
            {
                "role": "user",
                "content": """Context:
        {context}
        ---
        以下是你需要回答的问题.

        Question: {question}""",
            },
        ]

        retrieved_docs_text = [doc.page_content for doc in retrieved_docs]  # we only need the text of the documents
        context = "\nExtracted documents:\n"
        context += "".join([f"Document {str(i)}:::\n" + doc for i, doc in enumerate(retrieved_docs_text)])

        if model_service_type == "local":
            rag_prompt_template = self.tokenizer.apply_chat_template(
                prompt_in_chat_format, tokenize=False, add_generation_prompt=True
            )
            # print("rag_prompt_template=", rag_prompt_template)
            final_prompt = rag_prompt_template.format(question=user_question, context=context)
            # print("final_prompt=", final_prompt)
            # Redact an answer
            answer = self.model(final_prompt)[0]["generated_text"]
        elif model_service_type == "service":
            print("Use service")

            for item in prompt_in_chat_format:
                if item["role"] == "user":  # 我们只需要替换用户部分的占位符
                    item["content"] = item["content"].format(question=user_question, context=context)
            print("prompt_in_chat_format=", prompt_in_chat_format)
            resp = self.llm_client.chat.completions.create(
                model="/b4-ai-hl/share_model_zoo/LLM/Qwen/Qwen1.5-4B-Chat",
                messages=prompt_in_chat_format,
                # temperature=0.1,
            )
            answer = resp.choices[0].message.content
        return answer
