import jieba
from whoosh.analysis import Tokenizer, Token
import haystack


# https://github.com/fxsjy/jieba
# 中文分词使用配置步骤：
# vi blog/tokenizer.py
# cp site_packages/haystack/backends/whoosh_backend.py myapp/whoosh_backend_zh.py
# vi whoosh_backend_zh.py
#   from blog.tokenizer import ChineseAnalyzer
#   schema_fields(xx, ..., analyzer=ChineseAnalyzer(), xx)

# vi myweb/settings.py
# HAYSTACK_CONNECTIONS = {
#     'ENGINE': 'blog.whoosh_backend_zh.WhooshEngine',
# }

# python manage.py rebuid_index
class ChineseTokenizer(Tokenizer):
    def __call__(self, value, positions=False, chars=False,
                 keeporiginal=False, removestops=True,
                 start_pos=0, start_char=0, mode='', **kwargs):
        token = Token(positions, chars, removestops=removestops, mode=mode,
                  **kwargs)
        seglist = jieba.cut_for_search(value)  # 搜索引擎模式
        for word in seglist:
            token.original = token.text = word
            token.boost = 1.0
            if positions:
                token.pos = start_pos = value.find(word)
            if chars:
                token.startchar = start_char + value.find(word)
                token.endchar = start_char + value.find(word) + len(word)
            yield token  # 返回每个分词的结果


def ChineseAnalyzer():
    return ChineseTokenizer()

