package com.shujia.mllib

import org.apache.spark.SparkContext
import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{count, sum, when}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo7Feng {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local[8]")
      .appName("image")
      .getOrCreate()
    import spark.implicits._

    val sc: SparkContext = spark.sparkContext


    val data: RDD[String] = sc.textFile("Spark/data/data.txt")

    val df: DataFrame = data.map(line => {
      val split: Array[String] = line.split(" ")
      val label: Double = split(0).toDouble

      val fsplit: Array[String] = split(1).split(",")
      val x1: Double = fsplit(0).toDouble
      val x2: Double = fsplit(1).toDouble

      //升维
      val x3: Double = x1 * x2

      val features: linalg.Vector = Vectors.dense(Array(x1, x2, x3))

      (label, features)
    })
      .toDF("label", "features")


    val split: Array[Dataset[Row]] = df.randomSplit(Array(0.7, 0.3))
    val train: Dataset[Row] = split(0)
    val test: Dataset[Row] = split(1)


    /**
      * 选择算法
      *
      */

    val logisticRegression = new LogisticRegression()


    //将训练集带入算法训练模型
    val model: LogisticRegressionModel = logisticRegression.fit(train)


    //使用模型对测试集进行测试
    val testDF: DataFrame = model.transform(test)


    //计算准确率
    testDF
      .select(sum(when($"label" === $"prediction", 1.0).otherwise(0.0)) / count($"label"))
      .show()


  }

}
