package com.shujia.mllib

import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo03PersonPredict {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("Demo03PersonPredict")
      .getOrCreate()

    val lRModel: LogisticRegressionModel = LogisticRegressionModel.load("spark/data/mllib/person")

    /**
     * 预测分为两类：
     * 1、对一条数据进行预测 predict
     * 2、对一批数据进行预测 transform
     */

    // predict
    // 1:5.7 2:4.2 3:3.0 4:131.1 5:80.2 6:79.8 7:65
    val predictLabel: Double = lRModel.predict(Vectors.dense(5.7, 4.2, 3.0, 131.1, 80.2, 79.8, 65, 0, 0, 0))
    println(predictLabel)

    import spark.implicits._

    // transform
    val df: DataFrame = spark.sparkContext.parallelize(List(
      Vectors.sparse(10, Array(0, 1, 2, 3, 4, 5, 6), Array(5.7, 4.2, 3.0, 131.1, 80.2, 79.8, 65))
      , Vectors.sparse(10, Array(0, 1, 2, 3, 4, 5, 6), Array(5.8, 4.2, 3.0, 131.1, 80.2, 79.8, 65))
      , Vectors.sparse(10, Array(0, 1, 2, 3, 4, 5, 6), Array(5.7, 4.3, 3.0, 131.1, 80.2, 79.8, 65))
      , Vectors.sparse(10, Array(0, 1, 2, 3, 4, 5, 6), Array(5.7, 4.2, 3.1, 131.1, 80.2, 79.8, 65))
      , Vectors.sparse(10, Array(0, 1, 2, 3, 4, 5, 6), Array(1.7, 3, 2.0, 100.2, 70.2, 70, 65))
    )).map(vec => Tuple1(vec))
      .toDF("features")

    val predictDF: DataFrame = lRModel.transform(df)
    predictDF.show()


  }

}
