# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 16:31:30 2018

@author: lizheng
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import jieba
import jieba.analyse
#jieba.enable_parallel()#并行分词 仅用于linux系统
jieba.load_userdict('/root/lizheng/ciku.txt')
from pyspark.sql import SparkSession
from sklearn.externals import joblib
from pyspark.ml import Pipeline #管道
from pyspark.ml.feature import OneHotEncoder
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import Tokenizer
from pyspark.ml.feature import HashingTF 
from pyspark.ml.feature import IDF
from pyspark.ml import PipelineModel #模型保存
from pyspark.ml.classification import RandomForestClassifier,LinearSVC
from pyspark.sql import Row
from pyspark.sql.types import *
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from multiprocessing import Pool
import datetime
import pyspark.ml.evaluation as ev
SPARK_VERSION=2      
APP_NAME = 'lz_pyspark,' + str(SPARK_VERSION)
    
def sjcl(x): 
    reload(sys)
    sys.setdefaultencoding('utf-8')
    #jieba.load_userdict('sftp://centos6/root/lizheng/ciku.txt')
    #stopwords = {}.fromkeys([line.rstrip() for line in open("hdfs://centos4:8020/user/hive/stopword.txt")])
    stopwords = ['\ufeff合计', '详见', '折扣', '行数', '清单', '明细', '一批', \
                 '--', '?', '“', '”', '》', '－－———', '》），', '）÷（１－', \
                 '”，', '）、', '＝（', ':', '→', '℃', '&', '*', '一一', '~~~~',\
                 '’', '.', '『', '.一', './', '--', '』', '＝″', '【', '［＊］',\
                 '｝＞', '［⑤］］', '［①Ｄ］', 'ｃ］', 'ｎｇ昉', '＊', '//', \
                 '［', '］', '［②ｅ］', '［②ｇ］', '＝｛', '}', '，也', '‘', 'Ａ', '［①⑥］', '［②Ｂ］', '［①ａ］', '［④ａ］', '［①③］', '［③ｈ］', '③］', '１．', '－－', '［②ｂ］', '’‘', '×××', '［①⑧］', '０：２', '＝［', '［⑤ｂ］', '［②ｃ］', '［④ｂ］', '［②③］', '［③ａ］', '［④ｃ］', '［①⑤］', '［①⑦］', '［①ｇ］', '∈［', '［①⑨］', '［①④］', '［①ｃ］', '［②ｆ］', '［②⑧］', '［②①］', '［①Ｃ］', '［③ｃ］', '［③ｇ］', '［②⑤］', '［②②］', '一.', '［①ｈ］', '.数', '［］', '［①Ｂ］', '数/', '［①ｉ］', '［③ｅ］', '［①①］', '［④ｄ］', '［④ｅ］', '［③ｂ］', '［⑤ａ］', '［①Ａ］', '［②⑧］', '［②⑦］', '［①ｄ］', '［②ｊ］', '〕〔', '］［', '://', '′∈', '［②④', '［⑤ｅ］', '１２％', 'ｂ］', '...', '...................', '…………………………………………………③', 'ＺＸＦＩＴＬ', '［③Ｆ］', '」', '［①ｏ］', '］∧′＝［', '∪φ∈', '′｜', '｛－', '②ｃ', '｝', '［③①］', 'Ｒ．Ｌ．', '［①Ｅ］', 'Ψ', '－［＊］－', '↑', '.日', '［②ｄ］', '［②', '［②⑦］', '［②②］', '［③ｅ］', '［①ｉ］', '［①Ｂ］', '［①ｈ］', '［①ｄ］', '［①ｇ］', '［①②］', '［②ａ］', 'ｆ］', '［⑩］', 'ａ］', '［①ｅ］', '［②ｈ］', '［②⑥］', '［③ｄ］', '［②⑩］', 'ｅ］', '〉', '】', '元／吨', '［②⑩］', '２．３％', '５：０', '［①］', '::', '［②］', '［③］', '［④］', '［⑤］', '［⑥］', '［⑦］', '［⑧］', '［⑨］', '……', '——', '?', '、', '。', '“', '”', '《', '》', '！', '，', '：', '；', '？', '．', ',', '．', "'", '?', '·', '———', '──', '?', '—', '<', '>', '（', '）', '〔', '〕', '[', ']', '(', ')', '-', '+', '～', '×', '／', '/', '①', '②', '③', '④', '⑤', '⑥', '⑦', '⑧', '⑨', '⑩', 'Ⅲ', 'В', '"', ';', '#', '@', 'γ', 'μ', 'φ', 'φ．', '×', 'Δ', '■', '▲', 'sub', 'exp', 'sup', 'sub', 'Lex', '＃', '％', '＆', '＇', '＋', '＋ξ', '＋＋', '－', '－β', '＜', '＜±', '＜Δ', '＜λ', '＜φ', '＜＜', '=', '＝', '＝☆', '＝－', '＞', '＞λ', '＿', '～±', '～＋', '［⑤ｆ］', '［⑤ｄ］', '［②ｉ］', '≈', '［②Ｇ］', '［①ｆ］', 'ＬＩ', '㈧', '［－', '......', '〉', '［③⑩］']
    #stopwords = {}.fromkeys([line.rstrip() for line in open('D:\\PDM\\ciku\\stopword.txt')])
    #jieba.load_userdict('D:/PDM/SPBM/ciku.txt')    
    import re
    #匹配中文的分词
    #zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
    zhPattern = re.compile(u'[\u4e00-\u9fa5_a-zA-Z]+')
    #x = x.strip()
    #开始分词，对商品名称进行切割
    outstr = [] 
    segs = jieba.cut(x,cut_all=False)
    
    for seg in segs:
        if zhPattern.search(seg):
            if seg not in stopwords:
                outstr.append(seg) 
    
    if len(outstr)>=2:
        out = " ".join(outstr)
    else:
        out = " ".join(outstr+outstr)
    #jieba.disable_parallel()
    return out


def to_int(x):
    # X = []
    # for i in x:
        # s = ""
        # for c in i:
            # if not c is None:
                # s += c.encode('utf-8')
        # X.append(int(s))
    X = int(x)
    return X



def row_data(x):
    X = [(sjcl(i[0]),i[1])for i in x]
    return X

if __name__ == "__main__":
    spark = SparkSession.builder.appName(APP_NAME).enableHiveSupport().getOrCreate()
    df = spark.sql('select * from goods_code.traindata')
    # =============================================================================
    #     lines = sc.textFile('hdfs://centos4:8020/user/hive/traindata_6512677_3657.csv')
    #     header = lines.first()#第一行 
    #     lines = lines.filter(lambda row:row != header)#删除第一行
    #     lines = lines.sample(False,0.1,666)
    #     print(lines.count())
    #     parts = lines.map(lambda l: l.split(","))#按照偶好分割
    #     parts = parts.repartition(10)
    #     print(len(parts.glom().collect()))
    #     #spark2以上版本
    #     spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('file:///root/lizheng/data/CleanData/traindata_6512677_3657.csv').first()
    print("开始分词",datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))
    statime = datetime.datetime.now()
    data1 = df.rdd.repartition(10).mapPartitions(lambda x:row_data(x))
    data1.take(5)
    #data1.saveAsTextFie())
    endtime = datetime.datetime.now()
    print ((endtime - statime).seconds)
    schema = StructType([StructField('HWMC',StringType(),True),\
                         StructField('lab',StringType(),True)])
    data_temp = spark.createDataFrame(data1,schema)
    data_temp = data_temp.na.drop()
    data_temp.registerTempTable("data_temp")

    spark.sql("select count(1) from  data_temp ").show()
    spark.sql("select * from  data_temp limit 10").show()
    #spark.sql("select distinct lab from  data_temp limit 100").show()


    data_train,data_test = data_temp.randomSplit([0.9,0.1],seed = 678)#切分训练数据和测试数据
    stringindexer = StringIndexer(inputCol="lab", outputCol="label")
    #encoder = OneHotEncoder(inputCol="indexed", outputCol="label")
    tokenizer = Tokenizer(inputCol="HWMC",outputCol="hwmc")
    hashingTF = HashingTF(numFeatures=2**5,inputCol=tokenizer.getOutputCol(),outputCol="features")
    f = RandomForestClassifier(maxDepth=5, maxBins=32, minInstancesPerNode=5, minInfoGain=2, maxMemoryInMB=1500,numTrees=50, seed=42,featureSubsetStrategy='sqrt')
    #gbt = GBTClassifier(maxIter=5, maxDepth=2, seed=42)
    #svm = LinearSVC(maxIter=5, regParam=0.01)
    pipeline = Pipeline(
            stages = [stringindexer,
                      #encoder,
                      tokenizer,
                      hashingTF,f])
    paramGrid = ParamGridBuilder()\
    .addGrid(hashingTF.numFeatures,[2**5,2**10, 2**15])\
    .addGrid(f.maxDepth,[5,15, 20])\
    .addGrid(f.numTrees, [50,100,150,200,250])\
    .addGrid(f.minInstancesPerNode,[10,15,20,50]).build()

    crossval = CrossValidator(estimator=pipeline,  
                          estimatorParamMaps=paramGrid,  
                          evaluator=MulticlassClassificationEvaluator())

    RFmodel = crossval.fit(data_train)
    result = RFmodel.transform(data_test)
    result1 = prediction.select('HWMC','lab','label','prediction')
    for row in result1.take(100):
        print(row)
        


    evaluator = MulticlassClassificationEvaluator()
    print(evaluator.evaluate(results, 
         {evaluator.metricName: 'areaUnderROC'}))
    print(evaluator.evaluate(results, 
         {evaluator.metricName: 'areaUnderPR'}))
