package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo3Person {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("person")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._


    /**
      * 读取SV格式的数据
      *
      */

    val data: DataFrame = spark
      .read
      .format("libsvm")
      .load("data/人体指标.txt")

    data.printSchema()
    data.show()

    /**
      * 将处理好的数据拆分成训练集和测试集
      *
      */
    val Array(train: DataFrame, test: DataFrame) = data.randomSplit(Array(0.8, 0.2))


    /**
      * 构建算法，将训练集带入蒜贩训练模型
      *
      */

    val logisticRegression = new LogisticRegression()


    /**
      * 将训练集带入算法训练模型
      *
      */

    val model: LogisticRegressionModel = logisticRegression.fit(train)


    println(s"截距${model.intercept}")
    println(s"权重${model.coefficients}")

    /**
      * 使用测试集评估模型的准确率
      *
      * transform: 批量预测
      * predict： 单挑预测
      */

    val tranDF: DataFrame = model.transform(test)

    tranDF.show(1000, truncate = false)

    //计算准确率
    val p: Double = tranDF.where($"label" === $"prediction").count().toDouble / tranDF.count()

    println(s"准确率：$p")


    /**
      * 将模板保存到hdfs中
      *
      */

    model
      .write
      .overwrite()
      .save("data/person_model")

  }

}
