package com.shujia.mllib

import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo03PersonPredict {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo03PersonPredict")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()

    // 加载模型
    val logisticRegModel: LogisticRegressionModel = LogisticRegressionModel.load("Spark/data/mllib/person")

    // 使用模型 预测

    // 一条数据 predict
    // 1 1:5.8 2:4.7 3:3.0 4:138.4 5:93.5 6:85.1 7:72

    val personVec: linalg.Vector = Vectors.dense(Array(5.8, 4.7, 3.0, 138.4, 93.5, 85.1, 72.0))

    val res: Double = logisticRegModel.predict(personVec)
    println(res)

    // 一批数据 transform
    val personDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("person String")
      .load("Spark/data/mllib/data/person.txt")

    personDF.show()
    import spark.implicits._

    // 将每条数据变成一个向量
    val personVecRDD: RDD[Tuple1[linalg.Vector]] = personDF
      .rdd
      .map(row => {
        val personStr: String = row.getAs[String]("person")
        val vecArr: Array[Double] = personStr.split(" ").map(_.split(":")(1).toDouble)
        Tuple1(Vectors.dense(vecArr))
      })

    val personVecDF: DataFrame = personVecRDD.toDF("features")

    val resDF: DataFrame = logisticRegModel.transform(personVecDF)
    resDF.show(false)

  }

}
