from transformers import AutoTokenizer, AutoModelForSequenceClassification
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.llms.base import LLM
from langchain.prompts import PromptTemplate
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.vectorstores.faiss import FAISS
from langchain.schema import Document
from langchain.chains.base import Chain
from langchain.schema.vectorstore import VectorStoreRetriever
from langchain.retrievers import EnsembleRetriever, BM25Retriever
from langchain.schema.runnable import RunnableConfig, RunnableLambda, ConfigurableField, RunnablePassthrough
from typing import Literal, List, Optional, Dict
from . import templates
from .document import PDFSplitter
import torch
import jieba
from collections import deque

    
    
def combine_docs(docs: List[Document]):
    """对召回文档进行合并
    """
    return '\n\n'.join([doc.page_content for doc in docs])


def jieba_tokenize(text: str) -> List[str]:
    """用于bm25的分词
    """
    return list(jieba.cut_for_search(text))


class CustomRetriever(VectorStoreRetriever):
    def invoke(self, input: str, config: Optional[RunnableConfig] = None) -> List[Document]:
        config = config or {}
        docs =  self.get_relevant_documents(
            input,
            callbacks=config.get("callbacks"),
            tags=config.get("tags"),
            metadata=config.get("metadata"),
            run_name=config.get("run_name"),
        )
        return {'input': input, 'docs': docs}
    
class MultiRetriever(EnsembleRetriever):
    def invoke(self, input: str, config: Optional[RunnableConfig] = None) -> List[Document]:
        docs = super().invoke(input, config)
        return {'input': input, 'docs': docs}


class Reranker():
    def __init__(self, 
                 reranker_dir: str = 'plms/bge-reranker-base', 
                 rerank_top_k: int = 5, 
                 reranker_device: str = 'cuda:0',
                 reorder: bool = True):
        self.rerank_top_k = rerank_top_k
        self.tokenizer = AutoTokenizer.from_pretrained(reranker_dir)
        self.model = AutoModelForSequenceClassification.from_pretrained(reranker_dir).to(reranker_device)
        self.model.eval()
        self.reorder = reorder
    
    def __call__(self, inputs: Dict, **kwrags) -> List[Document]:
        """对召回文档进行rerank
        """
        question = inputs['input']
        docs = inputs['docs']
        with torch.inference_mode():
            inputs = self.tokenizer([[question, doc.page_content] for doc in docs], padding=True, truncation=True, return_tensors='pt', max_length=512).to(self.model.device)
            outputs = self.model(**inputs, return_dict=True)
            scores = outputs.logits.view(-1, ).float().cuda().tolist()
            scores = [{score: doc} for score, doc in zip(scores, docs)]
            scores = sorted(scores, key=lambda x: list(x.keys())[0], reverse=True)
            # 获取最高分的前k个文档
            docs = [list(score.values())[0] for score in scores[:self.rerank_top_k]]
            if self.reorder:
                # reorder: 将相似度最高的放到列表的两边
                left = []
                right = deque([])
                for i in range(len(docs)):
                    if i % 2 == 0:
                        left.append(docs[i])
                    else:
                        right.appendleft(docs[i])
                docs = left + list(right)
            return docs



def build_pdf_context_chain(pdf_path: str = 'assets/初赛训练数据集.pdf', 
                        embedder_dir: str = './plms/bge-large-zh', 
                        embedder_device: Literal['cpu', 'cuda:0'] = 'cuda:0',
                        pdf_split_type: Literal['chunk', 'heading2', 'block'] = 'block',
                        search_top_k: int = 10,
                        split_size: int = 256,
                        split_overlap: int = 50,
                        reranker_dir: str = 'plms/bge-reranker-base', 
                        rerank_top_k: int = 5, 
                        reranker_device: str = 'cuda:0',
                        reorder: bool = True) -> Chain:
    """构建pdf的检索器

    Args:
        pdf_path (str, optional): pdf . Defaults to 'assets/初赛训练数据集.pdf'.
        embedder_dir (str, optional): _description_. Defaults to './plms/bge-large-zh'.
        embedder_device (_type_, optional): _description_. Defaults to 'cuda:0'.
        pdf_split_type (Literal[chunk, &#39;heading2&#39;], optional): _description_. Defaults to 'chunk'.
        search_top_k (int, optional): _description_. Defaults to 10.
    """
    pdf_splitter = PDFSplitter(pdf_path=pdf_path, tokenizer_dir=embedder_dir)
    if pdf_split_type == 'chunk':
        docs = pdf_splitter.split_by_chunks(chunke_size=split_size, chunk_overlap=split_overlap)
    elif pdf_split_type == 'heading2':
        docs = pdf_splitter.split_by_heading2()
    elif pdf_split_type == 'block':
        docs = pdf_splitter.split_by_blocks(block_size=split_size, block_overlap=split_overlap)
    model_name = embedder_dir
    model_kwargs = {'device': embedder_device}
    encode_kwargs = {'normalize_embeddings': True}
    # 向量召回
    vdb = FAISS.from_documents(documents=docs, embedding=HuggingFaceBgeEmbeddings(model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs))
    vdb_retriever = vdb.as_retriever(search_kwargs={'k': search_top_k})
    # bm25召回
    bm25_retriever = BM25Retriever.from_documents(documents=docs, preprocess_func=jieba_tokenize)
    bm25_retriever.k = search_top_k
    # 多路召回
    retriever = MultiRetriever(retrievers=[bm25_retriever, vdb_retriever], weights=[0.5, 0.5])
    reranker = Reranker(rerank_top_k=rerank_top_k, 
                        reranker_dir=reranker_dir, 
                        reranker_device=reranker_device, 
                        reorder=reorder)
    chain = retriever | RunnableLambda(reranker) | RunnableLambda(combine_docs)
    return chain