# -*- coding: utf-8 -*-
def sjcl(x): 
    import jieba
    #import jieba.analyse
    #jieba.enable_parallel()#并行分词 仅用于linux系统
    jieba.load_userdict('/root/lizheng/ciku.txt')
    stopwords = {}.fromkeys([line.rstrip() for line in open('/root/lizheng/stopword.txt')])
    #stopwords = {}.fromkeys([line.rstrip() for line in open('D:\\PDM\\ciku\\stopword.txt')])
    #jieba.load_userdict('D:/PDM/SPBM/ciku.txt')
    import re
    #匹配中文的分词
    #zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
    zhPattern = re.compile(u'[\u4e00-\u9fa5_a-zA-Z]+')
    x = x.strip()
    #开始分词，对商品名称进行切割
    outstr = [] 
    segs = jieba.cut(x,cut_all=False)
    
    for seg in segs:
        if zhPattern.search(seg):
            if seg not in stopwords:
                outstr.append(seg) 
    
    if len(outstr)>=2:
        out = " ".join(outstr)
    else:
        out = " ".join(outstr+outstr)
    outstr 
        
    #jieba.disable_parallel()
    return out


if __name__ == "__main__":
    import pandas as pd
    from pyspark import SparkConf, SparkContext
    from pyspark.sql import Row
    from pyspark.sql import SparkSession 
    from pyspark.sql import SQLContext
    from pyspark.sql.types import *
    # 创建SparkConf和SparkContext
    #conf = SparkConf().setMaster("spark://centos4:7077").setAppName("PySparkShell")
    conf = SparkConf().setMaster("local").setAppName("PySparkShell")
    spark=SparkSession.builder.appName("lz").getOrCreate()  
    sc = SparkContext.getOrCreate()  
    sqlContest = SQLContext(sc) 
    from sklearn.linear_model import PassiveAggressiveClassifier
    from sklearn import metrics   
    import numpy as np
    from sklearn.externals import joblib
    from sklearn.utils import shuffle
    from sklearn.model_selection import StratifiedShuffleSplit 
    from pyspark.ml import Pipeline #管道
    from pyspark.ml.feature import OneHotEncoder
    from pyspark.ml.feature import StringIndexer
    lines = sc.textFile('hdfs://centos4:8020/user/hive/traindata_6512677_3657.csv')
    header = lines.first()#第一行 
    lines = lines.filter(lambda row:row != header)#删除第一行
    lines = lines.sample(False,0.01,666)
    print(lines.count())
    parts = lines.map(lambda l: l.split(","))#按照偶好分割
    #    parts = parts.repartition(10)
    #    print(len(parts.glom().collect()))
    data = parts.map(lambda p: Row(sjcl(p[0]),str(p[1])))#分词
    print(data.count())
    schema = StructType([StructField('HWMC',StringType(),True),\
                         StructField('lab',StringType(),True)])
    data_temp = sqlContest.createDataFrame(data,schema)
    data_temp = data_temp.na.drop()
    data_temp.registerTempTable("data_temp")
    print(data_temp.count())
    sqlContest.sql("select count(distinct lab) from  data_temp limit 100").show()
    stringindexer = StringIndexer(inputCol="lab", outputCol="indexed").fit(data_temp)
    data_temp = stringindexer.transform(data_temp)
    data_temp.registerTempTable("data_temp")
    sqlContest.sql("select * from  data_temp limit 100").show()
    encoder = OneHotEncoder(inputCol="indexed", outputCol="label")
    data_temp = encoder.transform(data_temp)
    sqlContest.sql("select * from  data_temp ").show()
    data_train,data_test = data_temp.randomSplit([0.9,0.1],seed = 678)#切分训练数据和测试数据
    from pyspark.ml.feature import Tokenizer
    from pyspark.ml.feature import HashingTF 
    from pyspark.ml.feature import IDF

    from pyspark.ml import PipelineModel #模型保存
    from pyspark.ml.classification import RandomForestClassifier
    from pyspark.ml.classification import GBTClassifier#目前仅支持二分类
    from pyspark.ml.classification import LinearSVC
    tokenizer = Tokenizer(inputCol="HWMC",outputCol="hwmc")
    hashingTF = HashingTF(numFeatures=2**18,inputCol=tokenizer.getOutputCol(),outputCol="features")
    #f = RandomForestClassifier(maxDepth=5, maxBins=32, minInstancesPerNode=5, minInfoGain=0.0, maxMemoryInMB=1500,numTrees=50, seed=42,featureSubsetStrategy='sqrt')
    #gbt = GBTClassifier(maxIter=5, maxDepth=2, seed=42)
    svm = LinearSVC(maxIter=5, regParam=0.01)
    pipline = Pipeline(stages = [stringindexer,encoder,tokenizer,hashingTF,gbt])
    model = pipline.fit(data_train)
    prediction = model.transform(data_test)
    selected = prediction.select('HWMC','lab','prediction')
    for row in selected.take(100):
        print(row)



