package com.timeriver.machine_learning.binaryclassification

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.mllib.evaluation.{BinaryClassificationMetrics}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
  * 逻辑回归模型训练
  */
object LogisticRegressionAlg {
  def main(args: Array[String]): Unit = {

    val session: SparkSession = SparkSession.builder()
      .appName("文本数据源进行逻辑回归预测")
      .master("local[4]")
      .getOrCreate()

    import session.implicits._

    /** 1.读取数据 */
    val ds: Dataset[String] = session.read.textFile("D:\\workspace\\gitee_space\\spark-ml-machine-learning\\data\\breast-cancer-wisconsin.data")

    val data: Dataset[LabeledPoint] = ds.map(_.trim)
      .filter(line => !(line.isEmpty || line.contains("?")))
      .map(line => {
        val array: Array[Double] = line.split(",").map(_.toDouble)
        LabeledPoint(array.last, Vectors.dense(array.slice(1, array.size)))
      })

    /** 2.数据切割 */
    val Array(trainData, testData) = data.randomSplit(Array(0.6, 0.4), 123)

    /** 3.创建模型并训练 */
    val regression: LogisticRegression = new LogisticRegression()
      .setMaxIter(50)
      .setFitIntercept(true)

    val model: LogisticRegressionModel = regression.fit(trainData)

    /** 4.模型预测 */
    val pred: DataFrame = model.transform(testData)

    pred.show(5)

    /** 5.构建回归评估对象 */
    val value: RDD[(Double, Double)] = pred.rdd.map(row => {
      (row.getAs[Double]("prediction"), row.getAs[Double]("label"))
    })

    val metrics = new BinaryClassificationMetrics(value)

    /** 6.进行模型评估 */
    metrics.roc().foreach(println)
    println(s"areaUnderROC = ${metrics.areaUnderROC()}")

    /** 5.结束spark会话 */
    session.stop()
  }
}
