package com.shujia.mllib

import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo03PersonPredict {
  def main(args: Array[String]): Unit = {
    // 加载保存好的模型 进行预测
    // 1:5.5 2:3.3 3:2.5 4:99.3 5:62.8 6:53.7 7:97

    // 1、构建SparkSession
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo03PersonPredict")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", "8")
      .getOrCreate()

    // 2、加载模型
    val logisticRegressionModel: LogisticRegressionModel = LogisticRegressionModel.load("Spark/data/mllib/person")

    // 3、对新的数据进行预测

    // 预测一条数据
    val personData: String = "1:5.3 2:3.6 3:3.0 4:127.0 5:85.8 6:70.8 7:69"
    // 将数据转换成向量 才能够被模型识别
    val personVec: linalg.Vector = Vectors.dense(personData
      .split(" ")
      .map(ele => ele.split(":")(1).toDouble))

    val res: Double = logisticRegressionModel.predict(personVec)
    println(s"预测的结果为：$res")

    // 预测一批数据
    val personDataList: List[String] = List[String](
      "1:5.3 2:3.5 3:2.5 4:106.4 5:67.5 6:69.1 7:83"
      , "1:5.9 2:3.9 3:3.0 4:135.0 5:82.8 6:79.5 7:64"
      , "1:6.5 2:4.2 3:3.3 4:140.4 5:85.0 6:79.8 7:69"
      , "1:5.4 2:4.0 3:3.0 4:135.6 5:88.6 6:70.1 7:72")
    import spark.implicits._

    val personListDF: DataFrame = spark
      .sparkContext
      .parallelize(personDataList)
      .map(line => {
        Tuple1(Vectors.dense(line.split(" ").map(ele => ele.split(":")(1).toDouble)))
      })
      .toDF("features")


    val preResDF: DataFrame = logisticRegressionModel.transform(personListDF)
    preResDF.show()


  }
}
