package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel, LogisticRegressionTrainingSummary}
import org.apache.spark.sql.functions.{count, sum, when}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo3PersonTrain {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("train")
      .getOrCreate()


    /**
      * 读取svm格式的数据
      *
      */

    val data: DataFrame = spark
      .read
      .format("libsvm")
      .load("data/人体指标.txt")


    /**
      * 1、将数据切分成训练数据和测试数据
      *
      */

    val array: Array[Dataset[Row]] = data.randomSplit(Array(0.8, 0.2))
    val train: Dataset[Row] = array(0)
    val test: Dataset[Row] = array(1)


    /**
      * 2、选择算法
      *
      */

    val lr = new LogisticRegression()


    /**
      * 2、将训练数据带入算训练模型
      *
      */

    val model: LogisticRegressionModel = lr.fit(train)

    println(s"Multinomial coefficients: ${model.coefficientMatrix}")
    println(s"Multinomial intercepts: ${model.interceptVector}")

    /**
      * 获取模型在训练集上的准去率
      *
      */
    val summary: LogisticRegressionTrainingSummary = model.summary

    //召回率
    println(summary.weightedRecall)
    //精确率
    println(summary.weightedPrecision)


    /**
      * 使用测试集测试模型的准确率
      *
      */

    val resultDF: DataFrame = model.transform(test)

    resultDF.show()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    resultDF
      .select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count($"label"))
      .show()


    //保存模型
    model.write.overwrite().save("data/model")




  }

}
