package com.shujia.spark.mllib

import org.apache.spark.SparkContext
import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel, LogisticRegressionTrainingSummary}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{count, sum, when}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo5Point {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("train")
      .getOrCreate()

    import spark.implicits._

    val sc: SparkContext = spark.sparkContext


    val data: RDD[String] = sc.textFile("data/points.txt")

    val pointDf: DataFrame = data.map(line => {
      val split: Array[String] = line.split(" ")
      val label: Double = split(0).toDouble

      val xs: String = split(1)
      val featuresArray: Array[Double] = xs.split(",").map(_.toDouble)
      val x1: Double = featuresArray(0)
      val x2: Double = featuresArray(1)

      //处理二维不可分，升维
      val x3: Double = x1 * x2

      val features: linalg.Vector = Vectors.dense(x1, x2, x3)
      //将特征转换成向量
      (label, features)
    }).toDF("label", "features")


    val array: Array[Dataset[Row]] = pointDf.randomSplit(Array(0.8, 0.2))
    val train: Dataset[Row] = array(0)
    val test: Dataset[Row] = array(1)


    /**
      * 2、选择算法
      *
      */

    val lr = new LogisticRegression()


    /**
      * 2、将训练数据带入算训练模型
      *
      */

    val model: LogisticRegressionModel = lr.fit(train)


    val resultDF: DataFrame = model.transform(test)

    resultDF.show()

    resultDF
      .select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count($"label"))
      .show()


  }

}
