# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 10:29:21 2018

@author: lizheng
"""



def sjcl(x): 
    '''
    jieba分词
    '''
    import datetime
    print("开始分词",datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))
    import jieba
    import jieba.analyse
    #jieba.enable_parallel()#并行分词 仅用于linux系统
    jieba.load_userdict('/root/spbm/data/ciku.txt')
    stopwords = {}.fromkeys([line.rstrip() for line in open('/root/spbm/data/stopword.txt')])
    #stopwords = {}.fromkeys([line.rstrip() for line in open('D:\\PDM\\ciku\\stopword.txt')])
    #jieba.load_userdict('D:/PDM/SPBM/ciku.txt')
    '''
    jieba中文分词
    '''    
    import re
    #匹配中文的分词
    #zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
    zhPattern = re.compile(u'[\u4e00-\u9fa5_a-zA-Z0-9-]+')
    
    #开始分词，对商品名称进行切割
    train_X = []
    for i in range(len(x)):
        segments = []
        fileContent = x[i]
        segs = jieba.cut(fileContent)
        for seg in segs:
            if zhPattern.search(seg):
                if seg not in stopwords:
                    segments.append(seg)
        fileContent = " ".join(segments)
        if len(fileContent)==1:
            train_X.append(fileContent + fileContent)
        else:
            train_X.append(fileContent)
    #jieba.disable_parallel()
    return train_X

def iter_minibatches(filename, minibatch_size):
    '''
    迭代器
    给定文件流（比如一个大文件），每次输出minibatch_size行，默认选择1k行
    将输出转化成numpy输出，返回X, y
    '''
    import pandas as pd
    import numpy as np
    from sklearn.utils import shuffle
    x = []
    y = []
    cur_line_num = 0
    csvfile = open(filename, 'rb')
    reader = pd.read_csv(csvfile
                         #,encoding = 'gb18030'
                         )
     #分割商品名称
    #reader = reader.drop_duplicates(['U_NAME','U_CODE','HWMC'])
    reader['HWMC'] = sjcl(list(reader['HWMC'].astype(str)))
    reader['HWMC']=reader['HWMC'].apply(lambda x: np.NaN if str(x)=='' else x)#将空白替换为nan
    #df_null = df[df['HWMC'].isnull()]
    reader = reader[reader['HWMC'].notnull()]
    reader.index =np.arange(len(reader))
    reader = shuffle(reader) 
    reader.index =np.arange(len(reader))
    for line in reader.index:
        x.append(reader.HWMC[line])
        y.append(reader.U_CODE[line])  # 这里要将数据转化成float类型
        cur_line_num += 1
        if cur_line_num >= minibatch_size:
            x, y = np.array(x), np.array(y)  # 将数据转成numpy的array类型并返回
            yield x, y
            x, y = [], []
            cur_line_num = 0
    csvfile.close()



def get_classes(filename):
    '''
     获取类目总数
    '''
    import pandas as pd
    import numpy as np
    from sklearn.utils import shuffle
    df_csv= pd.read_csv(filename
                        #,encoding = 'gb18030'
                        )
    df_csv = shuffle(df_csv)
    classes=df_csv.drop_duplicates('U_CODE')
    classes= np.array(classes['U_CODE'])
    return classes

def get_test(filename2):
    '''
     获取类目总数
    '''
    import pandas as pd
    import numpy as np
    df_csv= pd.read_csv(filename2
                        #,encoding = 'gb18030'
                        )
    df_csv = df_csv.drop_duplicates(['U_NAME','U_CODE','HWMC'])
    x_test = np.array(sjcl(list(df_csv['HWMC'].astype(str))))
    y_test = np.array(df_csv['U_CODE'])
    return x_test,y_test


def get_hv(x_train):
    '''
    get HashingVectorizer
    '''
    from sklearn.feature_extraction.text import HashingVectorizer 
    hv = HashingVectorizer(decode_error='ignore', n_features=2 ** 20,
                               alternate_sign=False)
    x_train_hv = hv.transform(x_train)
    return x_train_hv   
    
def sigmoid(x): 
    '''
    将点积转化为0-1的可信度
    '''
    import math
    return round(1.0/(1+math.exp(-x)),2)

def get_tfidf(x):
    '''
	适用于小数据量，文本特征提取
    '''
    from sklearn.feature_extraction.text import TfidfVectorizer
    import pickle
    f = open('/root/spbm/data/goods_vocabulary.txt','rb')
    dd = pickle.load(f)
    f.close()
    # 初始化TfidfVectorizer
    vectorizer = TfidfVectorizer(vocabulary = dd)
    # 特征提取
    data = vectorizer.fit_transform(x)
    return data


def fitby_linear_model(filename,filename2,model,size,modelsave_filename):
    '''
    filename训练数据源路径，
    filename2 测试数据集
    size 迭代器大小
    modelsave_filename 模型保存路径
    '''
    import datetime

    from sklearn import metrics
    from sklearn.externals import joblib
    MD = model
    print("获取classes",datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))
    all_classes = get_classes(filename)

    x_test, y_test = get_test(filename2)
    minibatch_train_iterators = iter_minibatches(filename, size)
    #print("开始训练",datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))
    for i, (X_train, y_train) in enumerate(minibatch_train_iterators):
        print("{} time".format(i))  # 当前次数
        # 使用 partial_fit ，并在第一次调用 partial_fit 的时候指定 classes
        MD.partial_fit(get_hv(X_train), y_train, classes=all_classes)
        result=MD.predict(get_hv(x_test))
        print(model,"score: %.4g" % metrics.accuracy_score(y_test,result)) # 在测试集上看效果
    result=MD.predict(get_hv(x_test))
    joblib.dump(MD, modelsave_filename, compress=('gzip', 3)) 
    
def prect(model_filename,data_filename,prectdata_savefilename):
    '''
    model_filename模型路径，
    data_filename 数据路径
    prectdata_savefilename 预测结果保存路径
    '''
    import pandas as pd 
    import numpy as np
    import datetime
    from sklearn.externals import joblib
    MD = joblib.load(model_filename)
    print("开始预测",datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))
    file = open(data_filename, 'rb')
    df_re = pd.read_csv(file)
    df_re =df_re.drop_duplicates('HWMC') 
    bm_mc = pd.read_csv('/root/data/spbm_bmmc.csv')
    df_test =sjcl(list(df_re['HWMC'].astype(str)))
    df_test_hv = get_hv(df_test)
    df_re['proba'] =  list(map(sigmoid,np.amax(MD.decision_function(df_test_hv),axis=1)))#将距离通过sigmiod函数转换到0-1的值，视为可信度
    df_re['SPBM'] = MD.predict( df_test_hv)
    df_re = df_re.merge(bm_mc ,on = 'SPBM',how = 'left')
    df_re.to_csv(prectdata_savefilename,index = False,index_label = False,encoding = 'utf-8')
