# -*- coding: utf-8 -*-
'''
Created on 2017年5月13日

@author: ZhuJiahui
'''
import time
import thulac
import re
from file_utils.file_reader import read_to_1d_list, read_to_1d_list_gbk
from global_info.global_nlp import GlobalNLP

def get_stopwords():
    '''
    获取中英文停用词
    '''
    cn_mark_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/mark_stop.txt")
    cn_waste_content = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/waste_content.txt")
    cn_global_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/cn_stopwords.txt")
    en_char_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/englishword.txt")
    en_mark_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/english_mark_stop.txt")
    en_global_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/english_stopwords.txt")
    return set(cn_mark_stop + cn_waste_content + cn_global_stop + en_char_stop + en_mark_stop + en_global_stop)

def cn_word_segment(thu, sentence, to_string=False):
    '''
    分词并词性标注
    :param thu: thulac对象
    :param sentence: 字符串
    :param to_string: 是否将结果转化为字符串
    '''

    if to_string:
        segment = thu.cut(sentence, text=True)
        return segment
    else:
        segment = thu.cut(sentence)
        result_list = []
        for item in segment:
            result_list.append(item[0] + GlobalNLP.CN_WORD_INNER_DELIMITER + item[1])
        
        return result_list

def cn_filtered_word_segment(thu, sentence, to_string=False):
    '''
    分词并词性标注，同时去除停用词
    :param thu: thulac对象
    :param sentence: 字符串
    :param stopwords_list: 停用词表
    :param to_string: 是否将结果转化为字符串
    '''

    sentence = re.sub(r'[_~=/\+\-\*\|\\]', " ", sentence)
    
    if to_string:
        segment = thu.cut(sentence, text=True)
        return segment
    else:
        segment = thu.cut(sentence)
        result_list = []
        for item in segment:
            result_list.append(item[0] + GlobalNLP.CN_WORD_INNER_DELIMITER + item[1])
        
        return result_list
    
def test1():
    start = time.clock()
    thu = thulac.thulac(T2S=True, filt=True, deli=GlobalNLP.CN_WORD_INNER_DELIMITER)
    sentence = "南京市长江大桥好啦可以南京市长江大桥好啦可以"
    for i in range(100):
        result = cn_word_segment(thu, sentence, to_string=True)
    print('Total time %f seconds' % (time.clock() - start))
    
def test2():
    start = time.clock()
    thu = thulac.thulac(T2S=True, filt=True, deli=GlobalNLP.CN_WORD_INNER_DELIMITER)
    sentence = "南京市长江長江大桥好啦可以南京市长江大桥好啦可以"
    # for i in range(100):
    result = cn_filtered_word_segment(thu, sentence, to_string=True)
    print(result)
    print('Total time %f seconds' % (time.clock() - start))

if __name__ == '__main__':
    # test1()
    test2()
    