
"""
BM25文档召回
"""
from bm25s.tokenization import Tokenized
import jieba
from typing import List, Union
from tqdm.auto import tqdm
import bm25s
from models.document import Document


def tokenize(
    texts,
    return_ids: bool = True,
    show_progress: bool = True,
    leave: bool = False,
) -> Union[List[List[str]], Tokenized]:
    if isinstance(texts, str):
        texts = [texts]
    corpus_ids = []
    token_to_index = {}
    for text in tqdm(
        texts, desc="Split strings", leave=leave, disable=not show_progress
    ):
        splitted = jieba.lcut(text)
        doc_ids = []
        for token in splitted:
            if token not in token_to_index:
                token_to_index[token] = len(token_to_index)
            token_id = token_to_index[token]
            doc_ids.append(token_id)
        corpus_ids.append(doc_ids)
    unique_tokens = list(token_to_index.keys())
    vocab_dict = token_to_index
    if return_ids:
        return Tokenized(ids=corpus_ids, vocab=vocab_dict)
    else:
        reverse_dict = unique_tokens
        for i, token_ids in enumerate(
            tqdm(
                corpus_ids,
                desc="Reconstructing token strings",
                leave=leave,
                disable=not show_progress,
            )
        ):
            corpus_ids[i] = [reverse_dict[token_id] for token_id in token_ids]
        return corpus_ids

bm25s.tokenize = tokenize

class BM25Retriever(object):

    def __init__(self):
        self.retriever = bm25s.BM25()
        self.documents = dict()

    def from_documents(self, documents: List[Document]):
        self.documents = {idx: doc for (idx, doc) in enumerate(documents)}
        texts = [doc.content for doc in documents]
        corpus_tokens = bm25s.tokenize(texts, show_progress=False)
        self.retriever.index(corpus_tokens,show_progress=False)

    def query(self, queries: List[str], topn:int=10, score_threshold: float = 0.3):
        query_tokens = bm25s.tokenize(queries, show_progress=False)
        top_k = min(topn, len(self.documents))
        doc_ids, scores = self.retriever.retrieve(query_tokens, k = top_k, sorted=False, show_progress=False)
        sat_ids = set()
        for idx in range(len(doc_ids)):
            sat_ids.update([doc_id for doc_id, score in zip(doc_ids[idx], scores[idx]) if score >= score_threshold])
        return [self.documents[idx] for idx in sat_ids]

bm25 = BM25Retriever()


if __name__ == "__main__":
    corpus = [
        "今天天气晴朗，我的心情美美哒",
        "小明和小红一起上学",
        "我们来试一试吧",
        "我们一起学猫叫",
        "我和Faker五五开",
        "明天预计下雨，不能出去玩了",
    ]
    documents = [Document(id=f"id_{id}", content=text) for id, text in enumerate(corpus)]
    bm25.from_documents(documents)
    print(bm25.query(["明天天气怎么样", "学猫叫"]))