#encoding=utf-8

from ctypes import *


class tokenizer:
    
    def __init__(self):
        self._stext=['、','“','”','，','。','《','》','：','；','!','‘','’','?','？','！','·',' ',''] #枚举标点符号包括空格
        self._stopword_list=[line for line in file('D:\\prog\\ICTCLAS50\\Sample\\stopword.txt')]
        self._stopword_list=map(lambda x: x.strip(),self._stopword_list) # 去掉行尾的空格


    def parseString(self,text):    
        atext_list=[]#存放要分词的文档
        rtext=[]#存放去标点符号和分词后的词
        participle = cdll.LoadLibrary('D:\\prog\\ICTCLAS50\\API\\ICTCLAS50.dll')
        participle.ICTCLAS_Init(c_char_p('D:\\prog\\ICTCLAS50\\API'))
        strlen = len(c_char_p(text).value)
        t = c_buffer(strlen*6)
        a = participle.ICTCLAS_ParagraphProcess(c_char_p(text),c_int(strlen),t,c_int(3),0)
        atext_list=t.value.split(' ')
        participle.ICTCLAS_Exit()
        rtext=[item for item in atext_list if item not in self._stext]
        result_list=[iword for iword in rtext if iword not in self._stopword_list]

        return result_list

    def parseDoc(self,doc,cut_doc):
        print 'segmenting',doc
        #tmp_write_file = 'F:\\IS10\\grad\\data\\tmpw.txt'
        participle = cdll.LoadLibrary('D:\\prog\\ICTCLAS50\\API\\ICTCLAS50.dll')
        participle.ICTCLAS_Init(c_char_p('D:\\prog\\ICTCLAS50\\API'))
        a = participle.ICTCLAS_FileProcess(doc,cut_doc,c_int(3),0)
        participle.ICTCLAS_Exit()
        #rtext=[item for item in atext_list if item not in self._stext]
        #result_list=[iword for iword in rtext if iword not in self._stopword_list]

        #return result_list

import re
def removeAt(infl,otfl):
    infile = open(infl,'r')
    outfile = open(otfl,'w')
    for line in infile.read('utf-8'):
        outfile.writelines(re.compile(r'@.*?\s').sub(' ',line))
    outfile.close()
    infile.close()
        
#count frequency; [user new word dict/speed]-<new error,quit
#doc='F:\\IS10\\grad\\data\\3213963343'
import os
def getNotEmptyFileSet():
    working_set = set()
    for root, dirs, files in os.walk('F:\\IS10\\grad\\data'):
        for f in files:
            if os.path.getsize(os.path.join(root,f))!=0 and f.startswith(('1','2','3')):
                working_set.add(f)
        print len(working_set)
    done_set = set(os.listdir('F:\\IS10\\grad\\cata'))
    working_set = working_set - done_set
    return working_set

def run():
    inpath='F:\\IS10\\grad\\data\\'
    outpath='F:\\IS10\\grad\\cata\\'
    for f in os.listdir('F:\\IS10\\grad\\data'):
        if f.startswith(('2','3','1')) and f>'2815183601':
            tokenizer().parseDoc(inpath+str(f),outpath+str(f))

if __name__=='__main__':
    run()
