import os

from tqdm import tqdm
from whoosh.fields import TEXT, SchemaClass
from jieba.analyse import ChineseAnalyzer
from whoosh.index import create_in, open_dir
from whoosh.qparser import QueryParser
import pandas as pd

analyzer = ChineseAnalyzer()


class ArticleSchema(SchemaClass):
    title = TEXT(stored=True, analyzer=analyzer)
    content = TEXT(stored=True, analyzer=analyzer)
    author = TEXT(stored=True, analyzer=analyzer)


class CreateDataAndSearch():
    def __init__(self):
        pass

    def create_data_set(self):
        if not os.path.exists("indexdir"):
            os.mkdir("./indexdir")
        max_voc=pd.read_csv("max_voc_pandas.csv")
        schema = ArticleSchema()

        ix = create_in("indexdir", schema, indexname='article_index')
        writer = ix.writer()
        for doc in tqdm(max_voc.loc[~pd.isnull(max_voc["voc"]),"voc"].values.tolist()):
            writer.add_document(title=doc, author=doc, content=doc)
            # writer.add_document(title="登鹳雀楼", author="王之涣", content="白日依山尽，黄河入海流，欲穷千里目，更上一层楼")
            # writer.add_document(title="登高", author="杜甫", content="风急天高猿啸哀，渚清沙白鸟飞回")
            # writer.add_document(title="胡乱写的", author="黄河恋", content="展示效果")
        writer.commit()

    def search_by_data(self, text):
        ix = open_dir("indexdir", indexname='article_index')
        with ix.searcher() as searcher:
            query = QueryParser("content", ix.schema).parse(text)
            results = searcher.search(query)
            print(results[0])
if __name__ == '__main__':
    ob_set=CreateDataAndSearch()
    # ob_set.create_data_set()
    ob_set.search_by_data("去")
