from torch import device
import prompts
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import BitsAndBytesConfig
import json
from concurrent.futures import ThreadPoolExecutor

class LLMReranker:
    def __init__(self, checkpoint= "./Qwen/Qwen2.5-7B-Instruct"):
        self.tokenizer, self.llm, self.device = self.set_up_llm(checkpoint)
        self.system_prompt_rerank_single_block = prompts.RerankingPrompt.system_prompt_rerank_single_block
        self.system_prompt_rerank_multiple_blocks = prompts.RerankingPrompt.system_prompt_rerank_multiple_blocks
        # self.schema_for_single_block = prompts.RetrievalRankingSingleBlock
        # self.schema_for_multiple_blocks = prompts.RetrievalRankingMultipleBlocks

    def set_up_llm(self, checkpoint= "./Qwen/Qwen2.5-7B-Instruct"):
        ''' 设置 LLM 模型和分词器 量化具体可改'''
        bnb_config = BitsAndBytesConfig(
            load_in_8bit=True,       # 启用 8-bit 量化
            llm_int8_threshold=6.0,  # 推荐阈值（可调）
            llm_int8_skip_modules=None,
            llm_int8_enable_fp32_cpu_offload=True
        )

        tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(
            checkpoint,
            trust_remote_code=True,
            quantization_config=bnb_config,  # 使用量化配置
            device_map="auto"
        )
        device = model.device

        return tokenizer, model, device

    def get_rank_for_single_block(self, query, retrieved_document):
        """获取单个文本块的排名"""
        retrieved_document = [
                    {"page_content": doc.page_content, "metadata": doc.metadata}
                    for doc in retrieved_document
                ]
        user_prompt = f"""/n 这里被<>括起来的是查询: /n <{query}> /n/n 这里被[]括起来的是检索到的文本块: /n [{retrieved_document}] /n/n """
        messages = [
            {"role": "system", "content": self.system_prompt_rerank_single_block},
            {"role": "user", "content": user_prompt}
            ]
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.device)

        generated_ids = self.llm.generate(
            **model_inputs,
            max_new_tokens=512
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

        response_dict = json.loads(response)
        
        return response_dict
    

    def get_rank_for_multiple_blocks(self, query, retrieved_document):
        """获取多个文本块的排名"""
        retrieved_document = [
                    {"page_content": doc.page_content, "metadata": doc.metadata}
                    for doc in retrieved_document
                ]
        # formatted_blocks = "\n\n---\n\n".join([f'Block {i+1}:\n\n""" metadata: \n{text["metadata"]} \n  page_content: \n{text["page_content"]}\n """'
        #                                 for  i, text in enumerate(retrieved_document)])
        formatted_blocks = "\n\n".join([
                            f"<Block id='{i+1}'>\n"
                            f"<metadata>\n{text['metadata']}\n</metadata>\n"
                            f"<content>\n{text['page_content']}\n</content>\n"
                            f"</Block>"
                            for i, text in enumerate(retrieved_document)
                        ])
        
        user_prompt = f"""```text
                下面是一个用户查询，用尖括号括起来：
                <{query}>

                以下是从知识库中检索到的多个文本块，格式为结构化 XML 标签：
                [{formatted_blocks}]

                你需要根据每个 <Block> 中 <content> 的内容，与查询的相关性进行打分，并输出一个包含恰好 {len(retrieved_document)} 个元素的 JSON 结果，表示每个 block 的相关性得分，分值范围为 0.0 到 1.0。

                请严格按照格式返回，**只输出 JSON，不输出解释说明**。
        """
        # user_prompt = f"""/n 这里被<>括起来的是查询: /n <{query}> /n/n 这里被[]括起来的是检索到的所有文本块，每一个文本块被\n\n---\n\n所分割: /n [{formatted_blocks}] /n/n 
                    # 你应当按照顺序提供恰好 {len(retrieved_document)} 个排名。"""
        messages = [
            {"role": "system", "content": self.system_prompt_rerank_multiple_blocks},
            {"role": "user", "content": user_prompt}
            ]
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.device)

        generated_ids = self.llm.generate(
            **model_inputs,
            max_new_tokens=512
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

        response_dict = json.loads(response)
        
        return response_dict
    
    def rerank_documents(self, query: str, documents: list, documents_batch_size: int = 4, llm_weight: float = 0.7, top_k: int = 6):
        """
        Rerank multiple documents using parallel processing with threading.
        Combines vector similarity and LLM relevance scores using weighted average.
        """
        doc_batches = [documents[i:i + documents_batch_size] for i in range(0, len(documents), documents_batch_size)]
        vector_weight = 1 - llm_weight
        if documents_batch_size == 1:
            def process_single_doc(doc):
                # Get ranking for single document
                ranking = self.get_rank_for_single_block(query, doc)
                
                doc_with_score = doc[0].copy()
                doc_with_score.metadata["relevance_score"] = ranking["relevance_score"]
                # Calculate combined score - note that distance is inverted since lower is better
                doc_with_score.metadata["combined_score"] = round(
                    llm_weight * ranking["relevance_score"] + 
                    vector_weight * doc_with_score.metadata['fassi_score'],
                    4
                )
                return doc_with_score

            # Process all documents in parallel using single-block method
            all_results = []
            for doc in documents:
                all_results.append(process_single_doc(doc))
            # with ThreadPoolExecutor() as executor: # huggingface transformers does not support parallel processing
            #     all_results = list(executor.map(process_single_doc, documents))
        
        else:
            def process_batch(batch):
                texts = documents
                rankings = self.get_rank_for_multiple_blocks(query, texts)
                results = []
                block_rankings = rankings.get('block_rankings', [])
                
                if len(block_rankings) < len(batch):
                    print(f"\nWarning: Expected {len(batch)} rankings but got {len(block_rankings)}")
                    for i in range(len(block_rankings), len(batch)):
                        doc = batch[i]
                        print(f"Missing ranking for document on page {doc.metadata.get('file_name', 'unknown')}:")
                        print(f"Text preview: {doc.page_content[:100]}...\n")
                    
                    for _ in range(len(batch) - len(block_rankings)):
                        block_rankings.append({
                            "reason": "Default ranking due to missing LLM response",
                            "relevance_score": 0.0
                        })
                
                for doc, rank in zip(batch, block_rankings):
                    doc_with_score = doc.copy()
                    doc_with_score.metadata["relevance_score"] = rank["relevance_score"]
                    doc_with_score.metadata["combined_score"] = round(
                        llm_weight * rank["relevance_score"] + 
                        vector_weight * doc.metadata['fassi_score'],
                        4
                    )
                    
                    results.append(doc_with_score)
                return results
            
            batch_results = []
            for docs in doc_batches:
                batch_results.append(process_batch(docs))
            # Process batches in parallel using threads 
            # with ThreadPoolExecutor() as executor: # huggingface transformers does not support parallel processing
            #     batch_results = list(executor.map(process_batch, doc_batches))

            # Flatten the results
            all_results = []
            for batch in batch_results:
                all_results.extend(batch)

        # Sort results by combined score in descending order
        all_results.sort(key=lambda x: x.metadata.get("combined_score", 0.0), reverse=True)
        return all_results[:top_k]