import os,sys

import jieba
from gensim.models import word2vec

def check2skip(filename,overwrite):
    if overwrite==0 and os.path.exists(filename):
        return 1
    return 0

class CleanText:

    def __init__(self,stopwords_file='stopwords.txt',punc_file="punctuation.txt"):
        stopwords = set([x.strip() for x in open(stopwords_file).readlines()])
        punc = [x.strip().decode('utf8') for x in open(punc_file).readlines()]
        self.punctuation = set(punc+['\t']+[' '])
        self.stopwords = stopwords
        
    def clean_stopwords(self,words):
        clean_words = []
        for w in words:
            if w not in self.stopwords:
                clean_words.append(w)
        return clean_words

    def clean_punc(self,words):
        clean_words = []
        for w in words: 
            if w not in self.punctuation:
                clean_words.append(w)
        return clean_words


class Parse:

    def __init__(self,filename='',dict_file=''):
        self.text=''
        if filename != '':
            data = open(filename).readlines()
            self.text = [x.strip() for x in data]
        if dict_file != '':
            jieba.load_userdict(dict_file)
        self.wordslist = []
     
    def load(self,filename):
        data = open(filename).readlines()
        for line in data:
            self.wordslist.append(line.strip().split('/'))  

    def parse(self,sentence): 
        seg_list = jieba.lcut(sentence)
        return seg_list

    def parse_text(self,resultfile,text=''):
        fw = open(resultfile,'w')
        if text == '':
            text=self.text
        total = len(text)
        for i,sentence in enumerate(text):
            if i%1000==0:
                print "parse: %d/%d ..."%(i,total)
            seg_list = self.parse(sentence)
            fw.write('/'.join(seg_list).encode('utf8')+'\n')
        fw.close()

class W2vector:
    
    def __init__(self,feat_dim=300,min_word_count=40,num_workers=4,context=10,downsampling=1e-3):
        self.dim=feat_dim
        self.min_count=min_word_count
        self.num_workers=num_workers
        self.window_size=context
        self.downsampling = downsampling
        self.model=''

    def train(self,sentences):
        self.model = word2vec.Word2Vec(sentences, workers=self.num_workers, size=self.dim, min_count = self.min_count,  window = self.window_size, sample = self.downsampling)

    def save_model(self,filename):
        if not os.path.exists(filename):
            os.makedirs('/'.join(filename.split('/')[:-1]))
        self.model.save(filename)


if __name__ == '__main__': 
    filename = "data/zhwiki_test1000.txt"       
    parse_file = "%s_parse"%filename
    parse = Parse(filename)
    parse.parse_text(parse_file)













