package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo2TrainModel {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("source")
      .master("local")
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()

    //导入隐式转换
    import spark.implicits._
    //导入spark 所有的函数
    import org.apache.spark.sql.functions._

    //1、读取数据
    val data: DataFrame = spark.read.format("libsvm").load("data/人体指标.txt")

    /**
      * +-----+------------------------------------------------------+
      * |label|features                                              |
      * +-----+------------------------------------------------------+
      * |0.0  |(7,[0,1,2,3,4,5,6],[5.3,3.5,2.5,106.4,67.5,69.1,83.0])|
      * |1.0  |(7,[0,1,2,3,4,5,6],[5.9,3.9,3.0,135.0,82.8,79.5,64.0])|
      *
      */

    data.printSchema()
    data.show(false)

    /**
      * 2、将数据切分成训练集和测试集  8比2
      *
      */

    val splitDF: Array[Dataset[Row]] = data.randomSplit(Array(0.8, 0.2))

    //训练集
    val trainData: Dataset[Row] = splitDF(0)
    //测试集
    val textData: Dataset[Row] = splitDF(1)

    /**
      * 选择算法
      *
      * 根据y的特性选择，如果离散就是分类，，如果是连续的就是回归
      *
      *
      * 逻辑回归
      *
      */

    //构建算法指定参数
    val logisticRegression: LogisticRegression = new LogisticRegression()
      .setMaxIter(20)
      .setFitIntercept(true)


    //将训练集带入算法训练模型
    //底层使用spark 进行分布式模型训练
    val model: LogisticRegressionModel = logisticRegression.fit(trainData)


    //铜鼓测试数据测试模型的准确率

    val result: DataFrame = model.transform(textData)


    //计算准确率
    result
      .select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count($"label"))
      .show()


    //保存模型, 保存到hdfs
    model.save("data/model")
  }

}
