import jieba
from whoosh.analysis import Tokenizer, Token
from whoosh.qparser import QueryParser
from whoosh.query import Term, And, Or, Not


class CustomChineseTokenizer(Tokenizer):
    def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0,
                 start_char=0, mode='', **kwargs):
        t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs)
        seglist = jieba.cut(value)
        for w in seglist:
            t.original = t.text = w
            t.boost = 1.0
            if positions:
                t.pos = start_pos + value.find(w)
            if chars:
                t.startchar = start_char + value.find(w)
                t.endchar = start_char + value.find(w) + len(w)
            yield t
            t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs)

class CustomQueryParser(QueryParser):
    def __init__(self, filename, schema):
        super().__init__(filename, schema)

    def _replace_term(self,q):
        if isinstance(q,Term):
            if q.text.startwith("!"):
                return Not(Term(q.fieldname,q.text[1:]))
            elif q.text.startwith("&") and '，' in q.text[1:]:
                q_list = [Term(q.fieldname,x.strip()) for x in q.text[1:].split("，")]
                return And(q_list)
            elif q.text.startwith("|") and '，' in q.text[1:]:
                q_list = [Term(q.fieldname, x.strop()) for x in q.text[1:].split("，")]
                return Or(q_list)
        return q