package com.jnpc.spark
import org.apache.spark.ml.feature._
import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.sql.SparkSession

/**
  * Created by yangqiang on 2018/4/4 0004.
  */
object PMWorkGroup {
  case class RawDataRecord( pmgroup: Double, text: String)

  def main(args: Array[String]): Unit = {

    if(args.length !=2) {
      println("Usage: PMWorkGroup <filepath> <threadsCount>  ")
      System.exit(1)
    }
    val Array(filepath,threadsCount) = args
    val spark = SparkSession.builder()
     .master("local[*]")
      .appName("PMWorkGroup").getOrCreate()
    val sc=spark.sparkContext
    sc.setLogLevel("WARN")

    val sqlContext = spark.sqlContext
    import sqlContext.implicits._
    /*  val df = sqlContext.read
        .format("csv")
        .option("header", "true") // Use first line of all files as header
        .option("inferSchema", "true") // Automatically infer data types
        .load(filepath)*/
    val rawData = spark.sparkContext.textFile(filepath,threadsCount.toInt)
    val lines =  rawData.mapPartitions( card => card.map(item => item.split("\t")) )
    val pmgroupMap = lines.map(fields => fields(2)).distinct.collect.sorted.zipWithIndex.toMap
    val srcRDD = lines.map {
      fields =>
        val categoryIdxpmgroup = pmgroupMap(fields(2))
        RawDataRecord(categoryIdxpmgroup, fields(1))
    }
    val splits = srcRDD.randomSplit(Array(0.8, 0.2))
    val trainingDF = splits(0).toDF()
    val testDF = splits(1).toDF()

    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    val wordsData = tokenizer.transform(trainingDF)
    val hashingTF = new HashingTF().setNumFeatures(650000).setInputCol("words").setOutputCol("rawFeatures")
    val featurizedData = hashingTF.transform(wordsData)
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedData)
    val rescaledData = idfModel.transform(featurizedData)
    val rescaledRDD=rescaledData.select($"pmgroup", $"features").rdd
    val trainDataRdd = rescaledRDD.map {
      x =>
        var data = x.toSeq
        LabeledPoint(data(0).toString.toDouble , Vectors.parse(data(1).toString))
    }

    trainDataRdd.take(1).foreach(println)
    val model = NaiveBayes.train(trainDataRdd, lambda = 0.6, modelType = "multinomial")
    val testwordsData = tokenizer.transform(testDF)
    val testfeaturizedData = hashingTF.transform(testwordsData)
    val testrescaledData = idfModel.transform(testfeaturizedData)
    val testrescaledRDD=testrescaledData.select($"pmgroup",$"features").rdd
    val testDataRdd = testrescaledRDD.map {
      x =>
        var data = x.toSeq
        LabeledPoint(data(0).toString.toDouble , Vectors.parse(data(1).toString))
    }
    //对测试数据集使用训练模型进行分类预测
    val testpredictionAndLabel = testDataRdd.map(p => (model.predict(p.features), p.label))

    //统计分类准确率
    val testaccuracy = 1.0 * testpredictionAndLabel.filter(x => x._1 == x._2).count() / testDataRdd.count()
    println("output5：")
    println(testaccuracy)

  }

}
