#!/home/kuangxiaofeng/miniconda3/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Aug  1 17:27:31 2017
签名行业分类器的对象库
@author: Steven
"""
import pickle
import pandas as pd
import numpy as np
from sklearn import preprocessing
import jieba,re
import time as ttime
import jieba.posseg as pseg
from scipy.sparse import csr_matrix,hstack

def whatstime():
    #####################################################
    # 			根据指定格式输出日期字符串 				#
    #	 							                    #
    #===================================================#
    # 参数说明：（无）                                  #
    #####################################################
    #  时间格式
    fmt = "[%Y-%b-%d  %H:%M:%S]: "
    timestr = ttime.strftime(fmt,ttime.localtime())
    return timestr

def batch_densearray_generator(X,batch_size):
    number_of_batches = np.ceil(X.shape[0]/batch_size)
    index = list(range(X.shape[0]))
    counter = 0
    while 1:
        index_batch = index[batch_size*counter:batch_size*(counter+1)]
        X_batch = X[index_batch,:].todense()
        counter += 1
        yield(np.array(X_batch))
        if (counter >= number_of_batches):
            break

def feature_extract(input_corpus):
    #=======================
    #   输入文本list，输出这个list对应的文本特征
    #   2.0版本，包含基本的内容特征提取（分词结果）
    #=======================
    print(whatstime()+ '开始提取特征。。。')
    pseg_jieba_list = list()
    
    str_len_list = list()
    suffix_len_list = list()
    suffix_postag_list = list()
    contain_geo_cnt_list = list()
    
    # 分词/词性标注/获取签名长度
    print(whatstime()+'正在词性标注...')
    for ii,eachline in enumerate(input_corpus):
        if ii%50000 == 0:
            print(whatstime() + '已完成 %d '%ii)
            
        # 获取签名长度
        eachline_clean = re.sub(r'[A-Za-z.]+', 'E',str(eachline))
        eachline_clean = re.sub(r'\d+', 'N',str(eachline_clean))
        eachline_clean = re.sub(r'\W+','',str(eachline_clean))
        
        str_len_list.append(len(str(eachline)))
        
        try:
            words = pseg.cut(eachline)
            word_psegs = [(word,flag) for word,flag in words]
            words = [each[0] for each in word_psegs]
            psegs = [each[1] for each in word_psegs]
            pseg_jieba_list.append((words,psegs))
            
        except AttributeError:
            pseg_jieba_list.append(([''],['']))
            
    print(whatstime()+'词性标注finished!')
    
    for ii,(words,psegs) in enumerate(pseg_jieba_list):
        #后缀长度
        if len(words)>1:
            # 后缀的词性 
            suffix_postag_list.append(psegs[-1])
            
            if (psegs[-1] == 'eng')|(psegs[-1] == 'm'):
                suffix_len_list.append(1)
            else:
                suffix_len_list.append(len(words[-1]))
        else:
            suffix_len_list.append(0)
            suffix_postag_list.append('')
            
        # 包含的地理名词的数量      
        geos = [flag for flag in psegs if flag == 'ns']
        contain_geo_cnt_list.append(len(geos)) 
        
        
    # 将各个特征空间组合起来，构建样本数据集
    output_df = pd.DataFrame({'signiture':input_corpus,
                              'str_len':str_len_list,
                              'suffix_len':suffix_len_list,
                              'geography_flag':contain_geo_cnt_list,
                              'suffix_postag':suffix_postag_list})
    print(whatstime()+'特征提取完毕！')
    return output_df

class IND_Classifier(object):
    def __init__(self):
        with open('./Model/model_set_R2.6.0.pkl','rb') as f:
            model_object = pickle.load(f)
        
        from keras.models import load_model

        self.mlp = load_model('./Model/mlp_INDClassifier_2.6.0.h5')
        self.le_y = model_object['le_y']
        self.ohc_y = model_object['ohc_y']
        self.le_sp = model_object['le_sp']
        self.ohc_sp = model_object['ohc_sp']
        self.stop_words = model_object['stop_words']
        self.tfidf_model = model_object['tfidf_model']
        self.ch2_tfidf = model_object['ch2_tfidf']
        
    def regex_rule_predict(self,input_sign_list):
        #------------------------------------------
        #   强规则签名分类结果
        #------------------------------------------
        df = pd.DataFrame({'原始签名':input_sign_list})     
        # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 加载行业分类的正则规则
        filename = './Data/户部分类及企业关键词对应表-最终版1.xlsx'
        rule_df = pd.read_excel(filename)
        rule_df.columns = ['大类', '小类', '关系', '包含',
                           '且', '不包含', '且_1', '结尾不是', '最小字符']
        
        rule_df['分类'] = rule_df.大类 + '-' + rule_df.小类
        rule_df.fillna('',inplace=True)
        
        # 写多条正则语句去匹配
        pattern_dict = dict()
        for eachline in rule_df.itertuples():
            contain_str = '|'.join(eachline.包含.split('、'))
            not_contain_str = '|'.join(eachline.不包含.split('、'))
            
                
            # 拼接结尾的正则语句
            not_end_with_str =  '|'.join(eachline.结尾不是.split('、'))
            #print(not_contain_str)
            if len(contain_str) > 0:
                contain_regex_str = '(' + contain_str + ')'
            else:
                contain_regex_str = ''
            
            if len(not_contain_str) > 0:
                not_contain_regex_str = '^((?!' + not_contain_str + ').)*' + \
                                '((?!' + not_contain_str + ').)*$'
            else:
                not_contain_regex_str = ''
            
            if len(not_end_with_str) > 0:
                not_end_with_regex_str = '.*(' + not_end_with_str + ')$'
            else:
                not_end_with_regex_str = ''
            
            # 获取字符串长度的判断key
            if not(isinstance(eachline.最小字符,str)):
                less_str_len = int(eachline.最小字符)
                less_str_regex_len = less_str_len
            else:
                less_str_regex_len = 0  
            
            pattern_dict[eachline.分类] = (contain_regex_str,
                                         not_contain_regex_str,
                                         not_end_with_regex_str,
                                        less_str_regex_len)
        # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 预测行业分类
        pub_names = list()
        category_strs = list()
        #print(whatstime() + 'begin')
        for eachline in df.itertuples():
            len_flag = False
            contain_match_str = np.nan
            eachline_category = ''
            pub_names.append(eachline.原始签名)
            
            for ii,each_class in enumerate(pattern_dict):
                (contain_regex_str,
                 not_contain_regex_str,
                 not_end_with_regex_str,
                 less_str_regex_len) = pattern_dict[each_class]

                try:
                    # 包含有
                    contain_match_str = re.search(contain_regex_str,eachline.原始签名).group()
                    if len(contain_match_str) == 0:
                        contain_match_str = np.nan
                except AttributeError:
                    contain_match_str = np.nan      
                    
                try:
                    # 不包含
                    re.search(not_contain_regex_str,eachline.原始签名).group()
                except AttributeError:
                    contain_match_str = np.nan  
                    
                try:
                    # 结尾是
                    suffix_str = re.search(not_end_with_regex_str,eachline.原始签名).group()
                    if not(len(suffix_str) == 0):
                        contain_match_str = np.nan
                except AttributeError:
                    pass       
                
                if len(eachline.原始签名) >= less_str_regex_len:
                    len_flag = True
                else:
                    len_flag = False
                
                if isinstance(contain_match_str,str) and len_flag:
                    eachline_category = each_class
                    break
                else:
                    continue
                
            category_strs.append(eachline_category)
            
        return category_strs
    
    def feature_extract(self,input_sign_list,sel_KBest=False):
        #--------------------------------------------
        #   机器学习模型预测行业分类结果的预处理方法,返回处理后的稀疏矩阵
        #--------------------------------------------
        df = pd.DataFrame({'原始签名':input_sign_list})

        #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 提取结构特征
        # 找出新出现的词性类型
        train_feature_df = feature_extract(df.原始签名.tolist())
        normal_posseg_set = set(self.le_sp.classes_)
        new_posseg_set = set(train_feature_df.suffix_postag.unique()) - normal_posseg_set
        
        
        train_feature_df.ix[train_feature_df.suffix_postag.isin(new_posseg_set),'suffix_postag'] = ''
        pos_id = self.le_sp.transform(train_feature_df.suffix_postag)
        output_array = self.ohc_sp.transform(pos_id.reshape(-1, 1)).toarray()        
        
        for ii in range(output_array.shape[1]):
            train_feature_df['suffix_postag_%d'%ii] = output_array[:,ii]
        
        #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        # 提取tfidf特征
        df.原始签名_clean = df.原始签名.str.replace(r'[A-Za-z]+', 'E')
        df.原始签名_clean = df.原始签名_clean.str.replace(r'\d+', 'N')
        df.原始签名_clean = df.原始签名_clean.str.replace(r'\W+','',flags=re.U)                
        
        segment = list()
        for each in df.原始签名_clean:
            try:
                segs = jieba.cut(each)
                segment.append(' '.join([each_word for each_word in segs]))
            except AttributeError:
                segment.append(' ')
        
        x_tfidf = self.tfidf_model.transform(segment)
        if sel_KBest:
            x_tfidf = self.ch2_tfidf.transform(x_tfidf)
        else:
            pass
        
        sel_feature_df = train_feature_df.drop(['signiture','suffix_postag'],axis=1)
        x_sel_feature = csr_matrix(sel_feature_df)
        x_syn = hstack([x_sel_feature,x_tfidf],format = 'csr')
        
        return x_syn
    
    def ml_predict(self,input_sparse_metrix):
        result = list()
        result_proba = list()
        
        for ii,each in enumerate(batch_densearray_generator(input_sparse_metrix,1280)):
           # print(each.shape)
           # print(self.mlp.get_input_shape_at(node_index=0))
            result.extend(self.le_y.inverse_transform(self.mlp.predict_classes(each)).tolist())
            result_proba.extend(['%.8f'%each_r.max() for each_r in self.mlp.predict_proba(each)])
        
        
        return result,result_proba
    
                
