package com.shujia.spark.mllib

import org.apache.spark.SparkContext
import org.apache.spark.internal.Logging
import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.slf4j.Logger

object Demo03PersonModelTransform extends Logging{

  val logger: Logger = log

  /**
   * 如果出现了nativeIO的错误，同样可以在运行配置中添加VMOption：
   * -Djava.library.path="D:\soft\hadoop-3.2.0\bin"
   * 注意还有bin目录，bin目录下会有hadoop.dll以及winutils.exe及相关的文件
   */
  def main(args: Array[String]): Unit = {

    if (args.length != 1) {
      logger.error("请检查参数的个数，需要一个参数：模型路径")
      return
    }

    val modelPath: String = args(0)
    // 使用模型
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .master("local") // 设置运行的方式
      .getOrCreate()

    import spark.implicits._

    val lrModel: LogisticRegressionModel = LogisticRegressionModel.load(modelPath)

    // 预测数据

    // 预测一条
    // 1 1:5.8 2:3.6 3:2.8 4:117.0 5:75.2 6:70.6 7:75
    val preRes: Double = lrModel.predict(Vectors.dense(5.9, 3.5, 2.9, 118.0, 74.2, 71.6, 75))
    println(s"预测结果为：$preRes")
    // 预测一批
    val sc: SparkContext = spark.sparkContext
    val rdd: RDD[String] = sc.parallelize(List(
      "5.3,3.5,2.5,106.4,67.5,69.1,83"
      , "5.9,4.9,3.0,135.0,82.8,79.5,64"
      , "6.5,4.2,3.3,140.4,85.0,79.8,69"
      , "5.4,4.0,2.0,135.6,88.6,70.1,72"
      , "4.5,3.6,2.4,101.1,77.1,65.1,87"
      , "4.7,3.8,2.8,98.7,69.3,65.5,77"
      , "4.6,3.4,2.2,104.7,69.4,52.3,90"
      , "4.5,3.7,3.0,113.9,73.5,72.2,79"
      , "5.7,4.3,2.7,120.5,79.1,72.4,75"
      , "5.8,5.0,3.7,148.7,90.1,76.2,65")
    )

    val newDataDF: DataFrame = rdd
      .map(line => {
        Tuple1(Vectors.dense(line.split(",").map(_.toDouble)))
      }).toDF("features")

    lrModel
      .transform(newDataDF)
      .show()

  }

}
