#!/usr/bin/env python
# coding: utf-8

import jieba
from typing import List
import pdfplumber
from langchain.retrievers import BM25Retriever
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter

from langchain.vectorstores import ElasticsearchStore
es = ElasticsearchStore()

class DataProcess(object):
    def __init__(self, reference_path: str):
        self.reference_path = reference_path
        self.docs = []

    # 分析数据
    def gen_docs(self, chunk_size=512, chunk_overlap=200):
        tmp_docs = []
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
        pages = pdfplumber.open(self.reference_path).pages
        for i, page in enumerate(pages):
            text = page.extract_text()
            if len(text) < 1500 and len(text) > 10:
                header = self.__get_header(page)
                if header is None:
                    header = '未知'
                # 替换掉一些预防模型不认识的字符
                text = text.replace('■', '-').replace('•', '-')
                tmp_docs.append(Document(page_content=text, metadata={"page_count": i, 'header': header}))
        self.docs += text_splitter.split_documents(tmp_docs)

    # 标题头
    def __get_header(self, page):
        try:
            lines = page.extract_words()[::]
        except:
            return None
        if len(lines) > 0:
            for line in lines:
                if "目录" in line["text"] or ".........." in line["text"]:
                    return None
                if line["top"] < 20 and line["top"] > 17:
                    return line["text"]
            return lines[0]["text"]
        return None


class BM25(object):
    def __init__(self, docs: List[Document]):
        # 原文文档
        self.origin_docs = docs

        # 生成BM25的文档
        bm25_docs = []
        for idx, doc in enumerate(docs):
            text = doc.page_content.strip("\n").strip()
            # 生成标题和关键字
            tokens = doc.metadata['header'] + ' ' + ' '.join(jieba.cut_for_search(text))
            # 把索引写入，用于查询时返回原文的文档
            bm25_docs.append(Document(page_content=tokens, metadata={'idx': idx}))
        # 生成数据库
        self.retriever = BM25Retriever.from_documents(bm25_docs)

    def search_top_k(self, query, k):
        # BM25检索
        self.retriever.k = k
        query = " ".join(jieba.cut_for_search(query))
        docs = self.retriever.get_relevant_documents(query)
        # 返回原文
        return [self.origin_docs[doc.metadata['idx']] for doc in docs]


