#-*-coding:utf-8-*-
import MySQLdb
import sys
import numpy
import time
from config import *
reload(sys)
sys.setdefaultencoding('utf-8')
#数据库操作类
class DB:
    conn=None;#这里的None相当于其它语言的NULL
    __instance = None #单例模式
    def __init__(self):#构造函数
        self.conn=MySQLdb.connect(host="127.0.0.1",user="root",passwd="x9Cut#=(TRlJ",db="test",port=3306,charset='utf8');
        #数据库连接，localhost python不认，必须127.0.0.1
    def getBySql(self,sql,*param):
        cursor=self.conn.cursor();#初始化游标
        result=cursor.fetchmany(cursor.execute(sql,param))
        self.conn.commit();#提交上面的sql语句到数据库执行
        return result;
    def getBySql_result_unique(self,sql,*param):
        cursor=self.conn.cursor();#初始化游标
        result=cursor.fetchmany(cursor.execute(sql,param));
        self.conn.commit();#提交上面的sql语句到数据库执行
        return result[0][0];
    def setBySql(self,sql,*param):
        cursor=self.conn.cursor();#初始化游标
        cursor.execute(sql,param);
        self.conn.commit();#提交上面的sql语句到数据库执行
    def __del__(self):#析构函数
        self.conn.close();#关闭数据库连接
    @classmethod
    def get_instance(cls):
        if cls.__instance:
            return cls.__instance
        cls.__instance = DB()
        return cls.__instance

if __name__ == '__main__':
    #截取时间 测试防止数据量过多
    endtime = '2017-11-1'
    # 读取数据库内容
    allCount = DB.get_instance().getBySql_result_unique('select COUNT(1) from chat WHERE status IN (2,3) AND utime<=%s',endtime)
    print "需要学习的总评论数:", allCount, "条"
    pageSize = 5000 #拿出5000条数据
    pageCount = int(allCount/pageSize)+1
    #pageCount = 2 #默认 垃圾评论取出最新5000条 正常评论取出最新5000条
    fw = open(study_file, 'w') #历史数据文件
    fwvoca = open(sms_vocalist_file, 'w') #语料库
    fwords = open(sms_word_file, 'w') #历史数据文件
    i=0
    from AdaboostNavieBayes import *
    start = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
    classCategory = []  # 类别标签，1表示是垃圾评论，0表示正常评论
    smsWords = []
    vocabularySet = set([]) #语料库
    vocabularyList = []
    for page in range(pageCount):
        '''
        startPo = 0
        if page == 0:
            status = 2 #正常评论
        else:
            status = 3 #垃圾评论
        '''
        startPo = page*pageSize
        status = '2,3'
        list = DB.get_instance().getBySql("select chat_content,status from chat WHERE status IN (%s) \
                      ORDER BY chat_id desc limit %s,%s",status,
                                          startPo, pageSize)

        for row in list:
            import re
            p = re.compile('\s+') #去除空格和换行
            strinfo = re.compile('\?') #去除？号这类没意义的字符
            new_string = re.sub(p, '', row[0])
            new_string = re.sub(strinfo, '', new_string)
            i = i + 1
            if new_string=='':
                continue
            if row[1] == 2:  # 2是审核通过正常留言
                classCategory.append(CATEGORY_HAMORUNKNOW)
                fw.write('ham::'+new_string+ SPLIT_NEXT_ROW)
            elif row[1] == 3:  # 3是审核不通过垃圾留言
                fw.write('spam::' + new_string + SPLIT_NEXT_ROW)
                classCategory.append(CATEGORY_SPAM)
            words = textParser(new_string)
            #smsWords.append(words)
            fwords.write(SPLIT_TAB.join(words)+SPLIT_NEXT_ROW)
            vocabularySet = vocabularySet | set(words)
            print "正在处理第%s条数据"%i
    for word in vocabularySet:
        vocabularyList.append(word)
    fwvoca.write(SPLIT_TAB.join(vocabularyList))
    fw.flush()
    fw.close()
    fwvoca.flush()
    fwvoca.close()
    np.savetxt(sms_category_file, classCategory, delimiter=SPLIT_TAB)
    #np.savetxt(sms_vocalist_file, vocabularyList, delimiter=SPLIT_TAB)
    #np.savetxt(sms_word_file, smsWords, delimiter=SPLIT_TAB)
    end = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
    print "基础数据新增完成"
    print "开始时间:%s" % start
    print "结束时间:%s" % end
    print "可参考数据数:%s" % i
