from __future__ import unicode_literals

import csv
import os
import shutil
import jieba
from progress.bar import Bar
from whoosh.analysis import Tokenizer, Token, LowercaseFilter, StopFilter, StemFilter
from whoosh.fields import *
from whoosh.index import create_in

from config import INDEX_DIR, DATA_FILE


class ChineseAnalyzer(Tokenizer):
    def __call__(self, text, **kargs):
        start = 0
        token = Token()
        for w in jieba.cut(text, cut_all=True):
            width = len(w)
            token.original = token.text = w
            token.pos = start
            token.startchar = start
            token.endchar = start + width
            yield token


def build():
    print("开始制作索引")
    if os.path.exists(INDEX_DIR):
        shutil.rmtree(INDEX_DIR)
    os.makedirs(INDEX_DIR)

    data = []
    print("开始解析文件:", DATA_FILE)
    with open(DATA_FILE, encoding='GBK') as f:
        reader = csv.reader(f)
        index = 0
        for row in reader:
            index += 1
            if index <= 2:
                continue
            code = row[0].strip()
            name = row[1].strip()
            data.append((code, name))

    total = len(data)
    bar = Bar('索引进度', max=total)
    print("数据读取成功，共", total, "条数据，即将开始制作索引")
    analyzer = ChineseAnalyzer()

    # 定义索引结构，content 为搜索内容，path 为搜索结果对应的路径
    schema = Schema(code=TEXT(stored=True), content=TEXT(stored=True, analyzer=analyzer))
    ix = create_in(INDEX_DIR, schema)
    writer = ix.writer()

    # 添加文档到索引
    for item in data:
        writer.add_document(code=item[0], content=item[1])
        bar.next()
    # 提交写入的内容
    writer.commit()
    ix.close()
    bar.finish()
    print("制作完成!")
