package com.xinqing.bigdata.ml.piepline

import org.apache.spark.SparkConf
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.{HashingTF, Tokenizer}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.ml.linalg.{Vector, Vectors}

/**
  * @Author:CHQ
  * @Date:2020 /8/6 13:46
  * @Description
  */
object ClassificationPipeline {
  def main(args: Array[String]): Unit = {

    //解决windows下Hadoop报错null\bin\winutils.exe
    System.setProperty("hadoop.home.dir", "D:\\hadoop")

    //创建SparkSession对象，目的是为了获取DataFrame
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("ClassificationPipeline Test")
    val sparkSession: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    //创建DataFrame,每个dataFrame都必须要有label，它将作为预测时的prediction列，
    val training: DataFrame = sparkSession.createDataFrame(Seq(
      (0L, "a b c d e spark", 1.0),
      (1L, "b d", 0.0),
      (2L, "spark f g h", 1.0),
      (3L, "hadoop mapreduce", 0.0)
    )).toDF("id", "text", "label")

    //注入pipelineSatge
    //pipelineSatge1
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    //pipelineSatge2
    val hashingTF: HashingTF = new HashingTF().setInputCol(tokenizer.getOutputCol).setOutputCol("features").setNumFeatures(1000)
    //pipelineSatge3
    val regression: LogisticRegression = new LogisticRegression().setMaxIter(20)

    //为当前pipeline设置stage
    val pipeline: Pipeline = new Pipeline().setStages(Array(tokenizer, hashingTF, regression))

    //拟合模型训练
    val model: PipelineModel = pipeline.fit(training)

    //将训练的数据保存起来
    model.write.overwrite().save("/tmp/spark-logistic-regression-model")
    pipeline.write.overwrite().save("/tmp/unfit-lr-model")
    //加载模型数据
    val sameModel: PipelineModel = PipelineModel.load("/tmp/spark-logistic-regression-model")

    //准备测试数据，如不作说明，默认按label顺序预测
    val test: DataFrame = sparkSession.createDataFrame(Seq(
      (4L, "spark i j k"), //此时的label为1.0
      (5L, "l m n "), //此时的label为0.0
      (6L, "spark hadoop spark"), //此时的label为1.0
      (7L, "apache hadoop") //此时的label为0.0
    )).toDF("id", "text")

    //预测
    model.transform(test)
      .select("id", "text", "probability", "prediction")
      .collect()
      .foreach({
        case Row(id: Long, text: String, probability: Vector, prediction: Double) => {
          println(s"($id,$text)   --->  ($probability,$prediction)")
        }
      })

    //结果
    //(4,spark i j k)   --->  ([0.004784145917034348,0.9952158540829656],1.0)
    //(5,l m n )   --->  ([0.932844948773724,0.06715505122627598],0.0)
    // (6,spark hadoop spark)   --->  ([8.573931668269729E-4,0.9991426068331729],1.0)
    // (7,apache hadoop)   --->  ([0.9998604597299573,1.3954027004262252E-4],0.0)
  }
}
