#!/home/kuangxiaofeng/miniconda3/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Aug  1 12:17:53 2017
黑白名单分类器的对象库,增加ensemble方法的预测
@author: Steven
"""
import pandas as pd
import numpy as np
import re,jieba,os,pickle
from pyltp import Segmentor,Postagger
import jieba.posseg as pseg
import time as ttime
from scipy.sparse import csr_matrix,hstack

# 加载 pyltp 的模型
LTP_DATA_DIR = './Model/ltp_data'
pos_model_path = os.path.join(LTP_DATA_DIR,'pos.model')
cws_model_path = os.path.join(LTP_DATA_DIR,'cws.model')

segmentor = Segmentor()
segmentor.load(cws_model_path)
postagger = Postagger()
postagger.load(pos_model_path)

def batch_densearray_generator(X,batch_size):
    number_of_batches = np.ceil(X.shape[0]/batch_size)
    index = list(range(X.shape[0]))
    counter = 0
    while 1:
        index_batch = index[batch_size*counter:batch_size*(counter+1)]
        X_batch = X[index_batch,:].todense()
        counter += 1
        yield(np.array(X_batch))
        if (counter >= number_of_batches):
            break

def whatstime():
    #####################################################
    # 			根据指定格式输出日期字符串 				#
    #	 							                    #
    #===================================================#
    # 参数说明：（无）                                  #
    #####################################################
    #  时间格式
    fmt = "[%Y-%b-%d  %H:%M:%S]: "
    timestr = ttime.strftime(fmt,ttime.localtime())
    return timestr

def struct_feature_extract(input_corpus):
    #=======================
    #   输入文本list，输出这个list对应的文本特征
    #   2.0版本，包含基本的内容特征提取（分词结果）
    #=======================
    print(whatstime()+ '开始提取特征。。。')

    banned_df = pd.read_excel('./Data/黑名单规则-打死不改版.xlsx',sheetname='黑名单规则-包含')
    filter_a = banned_df.iloc[:,2].isnull()
    banned_words = set(banned_df.ix[filter_a,0].tolist())
    del banned_df
    
    pseg_jieba_list = list()
    postags_ltp_list = list()
    
    str_len_list = list()
    suffix_len_list = list()
    suffix_postag_list = list()
    only_name_flag_list = list()
    contain_geo_cnt_list = list()
    non_word_flag_list = list()
    banned_word_cnt_list = list()
    only_num_flag_list=list()
    
    # 分词/词性标注/获取签名长度
    print(whatstime()+'正在词性标注...')
    for ii,eachline in enumerate(input_corpus):
        if ii%50000 == 0:
            print(whatstime() + '已完成 %d '%ii)
            
        # 获取签名长度
        eachline_clean = re.sub(r'[A-Za-z.]+', 'E',str(eachline))
        eachline_clean = re.sub(r'\d+', 'N',str(eachline))
        str_len_list.append(len(str(eachline_clean)))
        
        # 是否只包含数字
        if re.match(r'.*(\D).*',str(eachline)):
            only_num_flag_list.append(0)
        else:
            only_num_flag_list.append(1)
        
        try:
            words = pseg.cut(eachline)
            word_psegs = [(word,flag) for word,flag in words]
            words = [each[0] for each in word_psegs]
            psegs = [each[1] for each in word_psegs]
            pseg_jieba_list.append((words,psegs))
            
            words_ltp = segmentor.segment(eachline)
            words_ltp = [each for each in words_ltp]
            postags_ltp = postagger.postag(words_ltp)
            postags_ltp = [each for each in postags_ltp]
            postags_ltp_list.append(postags_ltp)
        except AttributeError:
            pseg_jieba_list.append(([''],['']))
            postags_ltp_list.append([''])
            
    print(whatstime()+'词性标注finished!')
    
    for ii,(words,psegs) in enumerate(pseg_jieba_list):
        #后缀长度
        if len(words)>1:
            # 后缀的词性 
            suffix_postag_list.append(psegs[-1])
            
            if (psegs[-1] == 'eng')|(psegs[-1] == 'm'):
                suffix_len_list.append(1)
            else:
                suffix_len_list.append(len(words[-1]))
        else:
            suffix_len_list.append(0)
            suffix_postag_list.append('')

            
        # 是否只包含人名     
        if len(postags_ltp_list[ii])==1 and postags_ltp_list[ii][0]=='nh':
            only_name_flag_list.append(1)
        else:
            only_name_flag_list.append(0)  
            
        # 包含的地理名词的数量      
        geos = [flag for flag in psegs if flag == 'ns']
        contain_geo_cnt_list.append(len(geos)) 
        
        # 包含非字符的数量
        non_word_set = set(['x','xx','xu','w'])
        if len(non_word_set & set(psegs)) == 0:
            non_word_flag_list.append(0)
        else:
            non_word_flag_list.append(1)
            
        # 包含禁用词的数量
        eachline_banned_word_cnt = len(set(words)&banned_words)
        banned_word_cnt_list.append(eachline_banned_word_cnt)
        
    # 将各个特征空间组合起来，构建样本数据集
    output_df = pd.DataFrame({'signiture':input_corpus,
                              'str_len':str_len_list,
                              'suffix_len':suffix_len_list,
                              'person_name_flag':only_name_flag_list,
                              'geography_flag':contain_geo_cnt_list,
                              'non_word_flag':non_word_flag_list,
                              'banned_word_flag':banned_word_cnt_list,
                              'only_num_flag':only_num_flag_list,
                              'suffix_postag':suffix_postag_list})
    print(whatstime()+'特征提取完毕！')
    
    return output_df

class BW_Classifier(object):
    def __init__(self):
        with open('./Model/model_set_2.5.3.pkl','rb') as f:
            model_object = pickle.load(f)
        self.mlp = model_object['mlp']
        self.ohc_sp = model_object['ohc_sp']
        self.tfidf_model = model_object['tfidf_model']
        self.ch2_sfs = model_object['ch2_for_structure_feature_selection']
        self.le_sp = model_object['le_sp']
        self.fenci_tokenizer = model_object['fenci_tokenizer']
        self.ch2_tfidf = model_object['ch2_tfidf']
        self.only_person_name = ['']
            
    def regex_rule_predict(self,input_sign_list):
        #------------------------------------------
        #   强规则黑白名单分类结果，返回结果list
        #------------------------------------------
        df = pd.DataFrame({'原始签名':input_sign_list})
        # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 加载一些正则规则
        pattern_list = [r'.*(街.*号)',
                       r'.*(路.*号)',
                       r'.*(主叫电话)\D*(\d*)',
                        r'.*(预约电话)\D*(\d*)',
                        r'.*(咨询电话)\D*(\d*)',
                        r'.*(加.*微信)',
                        r'.*(加.*[qq|QQ])',
                        r'.*?([一二三四五六七八九十零\d]层)',
                        r'.*?([一二三四五六七八九十零\d]楼)',
                        r'.*?([一二三四五六七八九十零\d]种)',
                        r'(^\d\d*\d$)',
                        r'^.*([가-힝ぁ-ヶ゛゜ー]).*$',
                        r'^.{0,1}$',
                        r'.*(今日|每日|一起|我的).*(?<!网)$',
                        r'.*(什么).*(?<!值得买)$',
                        r'.+(?<!(城|超))市$',
                        r'.+(?<!([浏览]|电|神|乐))器$',
                        r'.+(?<!(网络))电话$',
                        r'^((?!(光大|兴业|中信|民生|广发|邮储|邮政|包商|农商|徽商|平安|浦发|行)).)*(支行|分行)$',
                        r'^((?!(居委|委员|村委|安委|居民委员|村民委员|安全管理委员)).)*(会)$'
                       ]

        
        banned_df = pd.read_excel('./Data/黑名单规则-打死不改版.xlsx',sheetname='黑名单规则-包含')
        filter_a = banned_df.iloc[:,2].isnull()
        banned_words = set(banned_df.ix[filter_a,0].tolist())
        del banned_df
        
        banned_df = pd.read_excel('./Data/黑名单规则-打死不改版.xlsx',sheetname='黑名单规则-结尾')
        filter_a = banned_df.iloc[:,1].isnull()
        banned_end_words = set(banned_df.ix[filter_a,0].tolist())
        del banned_df
        
        # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 开始做匹配
        
        # 包含有
        pattern = r'.*(' + '|'.join(banned_words) + ')'
        abnormal_str = list()
        for each in df.原始签名:
            try:
                match_group = re.match(pattern,each)
                if match_group:
                    abnormal_str.append(match_group.groups()[0])
                else:
                    abnormal_str.append(np.nan)
            except TypeError:
                abnormal_str.append(np.nan)   
        df['abnormal_str_contain']=abnormal_str
        
        #　正则规则匹配
        abnormal_str = [np.nan]*len(df)

        for pattern in pattern_list:
            for ii,each in enumerate(df.原始签名):
                if not(isinstance(abnormal_str[ii],type(np.nan))):
                    continue
                try:
                    match_group = re.match(pattern,each)
                    if match_group:
                        abnormal_str[ii]=match_group.group(0)
                    else:
                        abnormal_str[ii]=np.nan
                except TypeError:
                    abnormal_str[ii]=np.nan 
        
        df['abnormal_str_rule']=abnormal_str
        # 处理 村 的规则
        filter_1 = df.原始签名.str.endswith(r'村').fillna(False)
        filter_2 = df.原始签名.str.contains(r'(鱼村|渔村|稻香村|度假村|渡假村|杏花村)$').fillna(False)
        df.ix[filter_1&(~filter_2),'abnormal_str_rule'] = '结尾是村但不满足规则'
        # 处理 区 的规则
        filter_1 = df.原始签名.str.endswith(r'区').fillna(False)
        filter_2 = df.原始签名.str.contains(r'院区|园区|小区|校区|度假区|景区|智能社区|渡假区').fillna(False)
        df.ix[filter_1&(~filter_2),'abnormal_str_rule'] = '结尾是区但不满足规则'

        # 结尾有
        pattern = r'.*(' + '|'.join(banned_end_words) + ')$'
        abnormal_str = list()      
        for each in df.原始签名:
            try:
                match_group = re.match(pattern,each)
                if match_group:
                    abnormal_str.append(match_group.groups()[0])
                else:
                    abnormal_str.append(np.nan)
            except TypeError:
                abnormal_str.append(np.nan)   
        
        df['abnormal_str_endwith']=abnormal_str
        
        df['检测出的符合黑名单规则的字符'] = np.nan
        
        df.检测出的符合黑名单规则的字符.fillna(df.abnormal_str_contain,inplace=True)
        df.检测出的符合黑名单规则的字符.fillna(df.abnormal_str_rule,inplace=True)
        df.检测出的符合黑名单规则的字符.fillna(df.abnormal_str_endwith,inplace=True)
        
        df['预测是否黑名单']=['']*len(df)
        black_filter = df.检测出的符合黑名单规则的字符.isnull()
        df.ix[~black_filter,'预测是否黑名单']='-1'
        
        return df.预测是否黑名单.tolist()

    def feature_extract(self,input_sign_list):
        #--------------------------------------------
        #   机器学习模型预测白名单结果的预处理方法,返回处理后的稀疏矩阵
        #--------------------------------------------     
        df = pd.DataFrame({'原始签名':input_sign_list})
        
        #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 提取结构特征
        train_feature_df = struct_feature_extract(input_sign_list)
        # 找出新出现的词性类型
        normal_posseg_set = set(self.le_sp.classes_)
        new_posseg_set = set(train_feature_df.suffix_postag.unique()) - normal_posseg_set
        
        train_feature_df.ix[train_feature_df.suffix_postag.isin(new_posseg_set),'suffix_postag'] = ''
        
        tag_ids =self.le_sp.transform(train_feature_df.suffix_postag)
        output_array = self.ohc_sp.transform(tag_ids.reshape(-1, 1)).toarray()        
        
        for ii in range(output_array.shape[1]):
            train_feature_df['suffix_postag_%d'%ii] = output_array[:,ii]

        #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 提取tfidf特征
        df.原始签名_clean = df.原始签名.str.replace(r'[A-Za-z]+', 'E')
        df.原始签名_clean = df.原始签名_clean.str.replace(r'\d+', 'N')
        df.原始签名_clean = df.原始签名_clean.str.replace(r'\W+','',flags=re.U)                
        
        segment = list()
        for each in df.原始签名_clean:
            try:
                segs = jieba.cut(each)
                segment.append(' '.join([each_word for each_word in segs]))
            except AttributeError:
                segment.append(' ')
        x_tfidf = self.tfidf_model.transform(segment)
        x_tfidf = self.ch2_tfidf.transform(x_tfidf)
        
        sel_feature_df = train_feature_df.drop(['signiture','suffix_postag'],axis=1)
        x_sel_feature = csr_matrix(sel_feature_df)
        x_syn = hstack([x_sel_feature,x_tfidf],format = 'csr')
        
        self.only_person_name = train_feature_df.person_name_flag.tolist()
        return x_syn

    def ml_predict(self,input_sparse_metrix):
        #--------------------------------------------
        #   机器学习模型预测白名单结果，返回结果list
        #--------------------------------------------
        result = np.array([]).reshape(0,1)
        result_proba = np.array([]).reshape(0,2)
        for ii,each in enumerate(batch_densearray_generator(input_sparse_metrix,1280)):
            result = np.vstack([result,self.mlp.predict_classes(each).reshape(-1,1)])
            result_proba = np.vstack([result_proba,self.mlp.predict_proba(each).reshape(-1,2)])
        
        label_result = list()
        label_result_proba = list()
        for ii,each in enumerate(result):
            if each >0:
                if self.only_person_name[ii]!=1:
                    label_result.append('1')
                    label_result_proba.append(result_proba[ii,1])
                else:
					# 只包含人名是黑名单
                    label_result.append('-1')
                    label_result_proba.append(1.0)
            else:
                label_result.append('-1')   
                label_result_proba.append(result_proba[ii,0])
        return label_result,label_result_proba
        
        
