#coding:utf-8

#分词器
import jieba
import jieba.analyse
import jieba.posseg as pseg
from config import config
home_dir =  config.get_home_dir()
dictPath = home_dir+'dict/dict.txt'
stopword = home_dir+'dict/stopword.dict'
jieba.load_userdict(dictPath)
jieba.analyse.set_stop_words(stopword) # file_name为自定义语料库的路径

#分词
def segword(path, output):
    # 读取文件
    file = open(path).read()
    # 分词
    seg_list = jieba.cut(str(file), cut_all=False)
    seg_list2 = " ".join(seg_list)

    # 分词结果导出
    fout = open(output, 'w')
    fout.write(seg_list2.encode('utf-8'))
    fout.close()

#词性标注  数字提取：([0-9]+,)+([0-9])+\D{1,2}
def pos_tag_word(path,tag_output):
    #读取文件
    file = open(path).read()
    #词性标注
    words = pseg.cut(str(file))
    tag_list = []
    for word,flag in words:
        if word=="" or word==" " : continue
        if  word=='\n':
            tag_list.append(word)
        else:
            tag_list.append(word + '/' + flag)

    seg_list2 = " ".join(tag_list)

    #标注结果导出
    fout = open(tag_output, 'w')
    fout.write(seg_list2.encode('utf-8'))
    fout.close()

#分词
path = home_dir+'result/text_01/02.txt'
output = home_dir+'train_result.txt'
#词性标注
# path = home_dir+'train'
# tag_output=home_dir+'train_result_tag.txt'
segword(path, output)
