package com.timeriver.machine_learning.binaryclassification

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.mllib.evaluation.{BinaryClassificationMetrics, RegressionMetrics}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object LogisticRegressionPipeline {
  def main(args: Array[String]): Unit = {

    val session: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("逻辑回归二分类")
      .getOrCreate()

    val ds: Dataset[String] = session.read
      .textFile("D:\\workspace\\gitee_space\\spark-ml-machine-learning\\data\\breast-cancer-wisconsin.data")

    ds.show(5, false)

    import session.implicits._

    val data: Dataset[LabeledPoint] = ds.map(_.trim)
      .filter(line => !(line.isEmpty || line.contains("?")))
      .map(line => {
        val array: Array[Double] = line.split(",").map(_.toDouble)
        LabeledPoint(array.last, Vectors.dense(array.slice(1, array.size)))
      })

    val Array(train, test): Array[Dataset[LabeledPoint]] = data.randomSplit(Array(0.7, 0.3), 123)

    val regression: LogisticRegression = new LogisticRegression().setMaxIter(30)
      .setFitIntercept(true)
      .setElasticNetParam(0.8)

    val pipeline: Pipeline = new Pipeline().setStages(Array(regression))

    val model: PipelineModel = pipeline.fit(train)

    val frame: DataFrame = model.transform(test)

    val value: RDD[(Double, Double)] = frame.rdd.map(pre => {
      (pre.getAs[Double]("prediction"), pre.getAs[Double]("label"))
    })


    val metrics = new BinaryClassificationMetrics(value)

    /** 6.进行模型评估 */
    metrics.roc().foreach(println)
    println(s"areaUnderROC = ${metrics.areaUnderROC()}")

    /** 模型保存 */
    model.save("./model/logisticregression")

    session.stop()
  }
}
