# -*- coding: utf-8 -*-

import jieba
import jieba.posseg as pseg
import os
jieba.enable_parallel(13)
flag_list = ['t','q','p','u','e','y','o','w','m']
def sentence_fenci(all_the_text):
    re = []
    words = pseg.cut(all_the_text)
    for w in words:
        flag = w.flag
        tmp = w.word
        #print "org: "+tmp
        if len(tmp)> 0 and len(flag)>0 and flag[0] not in flag_list and  tmp[0]>=u'/u4e00' and tmp[0]<=u'\u9fa5':
            re.append(w.word)
    return re


def file_fenci(src,dest,end_for_each_line=' '):
    f = open(src)
    o = open(dest,"a")

    for line in f.readlines():
        #line = line.strip()
        #if line == "":
        #    continue
        words = sentence_fenci(line.decode("utf-8"))
        if len(words) == 0:
            continue
        str_words = " ".join(words) + end_for_each_line
        o.write(str_words.encode("utf-8"))
    o.write("\n")
    f.close()
    o.close()


def dir_fenci(_dir,_outfile):
    #outfile = outdir + "/" + os.path.basename(inpath) + ".step1"
    #outfile = outfile.decode("utf-8")
    #_dir = _dir.decode("utf-8")
    for filename in os.listdir(_dir):
        print(filename)
        file_fenci(_dir+"/"+filename, _outfile)


if __name__ == "__main__":

    #process_dir("../dataset/raw/Auto", "../dataset/step1/Auto.txt")
    #process_dir("../dataset/raw/Culture", "../dataset/step1/Culture.txt")
    #process_dir("../dataset/raw/Economy", "../dataset/step1/Economy.txt")
    #process_dir("../dataset/raw/Medicine", "../dataset/step1/Medicine.txt")
    #process_dir("../dataset/raw/Military", "../dataset/step1/Military.txt")
    #process_dir("../dataset/raw/Sports", "../dataset/step1/Sports.txt")
    #preparing word vector dict
    file_fenci("dataset/news-163.txt","dataset/news-163_fencied_utf8.txt",end_for_each_line='\n')
