#!/usr/bin/env python
#coding=utf8

import os
import re
import jieba
import random
import chardet
from nanorm import *
set_db_name("data.db")
frequency = {}
distribution = {}
sentences = []

class Sentence(Model):
    text = CharField()
    doc = CharField()
    words = CharField()

    def __unicode__(self):
        return u'Sentence|%d|%s|%s|%s' % (self.id, self.text, self.doc, self.words)

    def __str__(self):
        return self.__unicode__().encode('utf8')

class Word(Model):
    last_doc = CharField()
    word = CharField()
    count = IntegerField()
    file_count = IntegerField()

    def __unicode__(self):
        return u'Sentence|%d|%s|%s|%s' % (self.id, self.word, self.count, self.file_count)

    def __str__(self):
        return self.__unicode__().encode('utf8')




def do_file(file_cup):
    print u"清空Sentence表数据%d条" % (len(Sentence.query().all()))
    print(Sentence.query().all())
    Sentence.query().delete()
    print u"清空Word表数据%d条" % (len(Word.query().all()))
    Word.query().delete()
    import types
    names=[]
    if type(file_cup) is types.TupleType:  
        print "the type of file_cup is Tuple"
        names = list(file_cup)
    else:
        names = os.listdir('docs')
    
    
    
    for name in names:
        if 'DS_Store' in name:
            continue
    
        
        if type(file_cup) is types.TupleType:
            fname = name
        else:
            fname = 'docs/' + name
        text = open(fname).read()
    
        print fname
    
        r = chardet.detect(text)
        print r
    
        text = text.decode(r['encoding'], 'replace')
        print('jieba切词')
        words = jieba.cut(text)
        print('jieba切词完毕')
        for word in words:
            word = word.strip()
            if not word:
                continue
            frequency[word] = frequency.get(word, 0) + 1
            distribution[word] = distribution.get(word, set())
            distribution[word].add(fname)
        print('处理频次完毕')
        auto_commit_close()
    
        text = re.sub(ur'[，。？！；,.?!;\t\n]', '|', text)
        sentences = text.split('|')
        for text in sentences:
            text = text.strip()
            print(u'待处理语句'+text)
            if len(text) < 12:
                continue
            #get_sents = Sentence.gets(text=text)
            #print(len(get_sents))
            
            if Sentence.gets(text=text):
                #print get_sents
                continue
            words = jieba.cut(text, cut_all=False)
            my_words = '' + '_'.join(words) + ''
            sentence = Sentence()
            sentence.text = text
            sentence.doc = fname
            sentence.words = my_words
            sentence.save()
            try:
                for tmp_w in my_words.split('_'):
                    #判断词汇是否已经出现
                    
                    if not re.match(u"[\u4e00-\u9fa5]+",tmp_w):
                        print u"此处有特殊符号"
                        continue
                    print(u'处理入words'+tmp_w)
                    if Word.get(word=tmp_w):
                        #数量加1
                        word_set = Word.get(word=tmp_w)
                        cu = get_cursor()
                        sql = "update `%s` set %s where id = %d" % ("Word", "count=count+1", word_set.id)
                        cu.execute(sql)
                        db_commit()
                        #判断文件增加否
                        #print(u"%s && %s"%(name,Word.get(word=tmp_w)))
                        if not Word.get(last_doc=name):
                            cu = get_cursor()
                            sql = "update `%s` set %s where id = %d" % ("Word", "file_count=file_count+1", word_set.id)
                            cu.execute(sql)
                            db_commit()
                        continue
                    mword = Word()
                    mword.word = tmp_w
                    mword.last_doc = name
                    mword.count = 1
                    mword.file_count = 1
                    mword.save()
                    print(u'成功处理入word'+tmp_w)
                print(u'处理入word'+text)
            except Exception as e:
                print e
            
        print('处理语句完毕')
        auto_commit_open()
    
    for word, fnames in distribution.items():
        distribution[word] = 1.0 * len(fnames) / len(names)


def generate(word):
    sentences = Sentence.gets(text='%%_%s_%%' % word, operator="like")
    random.shuffle(sentences)
    result = {}
    result['frequency'] = frequency.get(word, 0)
    result['distribution'] = distribution.get(word, 0)
    result['sentences'] = sentences
    return result


if __name__ == '__main__':
    do_file(1)
    result = generate(u'自己')
    print result['frequency']
    print result['distribution']
    for sentence in result['sentences']:
        print sentence

