
import jieba
from zhon.hanzi import punctuation as zpunc
from utils.deal_punc import *

def word_ngrams(tokens, stop_words=None,ngram_range=(2,2),flag_full_stop=False):
    """Turn tokens into a sequence of n-grams after stop words filtering"""
    # handle stop words
    if stop_words is not None:
        tokens = [w for w in tokens if w not in stop_words]

    # handle token n-grams
    min_n, max_n = ngram_range
    if max_n != 1:
        original_tokens = tokens
        if min_n == 1:
            # no need to do any slicing for unigrams
            # just iterate through the original tokens
            tokens = list(original_tokens)
            min_n += 1
        else:
            tokens = []

        n_original_tokens = len(original_tokens)

        # bind method outside of loop to reduce overhead
        tokens_append = tokens.append
        space_join = " ".join

        for n in range(min_n,
                        min(max_n + 1, n_original_tokens + 1)):
            for i in range(n_original_tokens - n + 1):
                tokens_append(space_join(original_tokens[i: i + n]))

    return tokens

if __name__=='__main__':
    text="今年“3.15”之际，我的(股票)涨了10%。"
    # text='i am not very happy, cause i feel (not) good'
    # text=rm_ignore_punc(text)
    seg=jieba.cut(text,cut_all=False)
    seg_list=list(seg)
    # deal_full_stop_punc(seg_list=seg_list, full_stop_punc=['，', '。', '！'])
    bigram_words=word_ngrams(tokens=seg_list)
    diction={}
    print(seg_list)
    for pair in bigram_words:
        if pair not in diction:
            diction[pair]=1
        else:
            diction[pair]+=1
    print(diction)
    # print(list(zpunc))