package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo7Point {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[7]")
      .appName("image")
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //读取数据
    val pointDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("label DOUBLE,x STRING")
      .load("data/points.txt")

    //将x转换成向量
    val dataToVector: UserDefinedFunction = udf((x: String) => {
      val Array(x1, x2) = x
        .split("\\|")
        .map(i => i.toDouble)

      //基于x1和先生成一个维度，解决二维不可分为题
      val x3: Double = x1 * x2

      Vectors.dense(x1, x2, x3)
    })

    //特征工程
    val dataDF: DataFrame = pointDF
      .select($"label", dataToVector($"x") as "features")

    val Array(train, test) = dataDF.randomSplit(Array(0.8, 0.2))

    //x选择算法
    val lr = new LogisticRegression()

    //训练模型
    val model: LogisticRegressionModel = lr.fit(train)

    val frame: DataFrame = model.transform(test)

    //准确率
    val acc: Double = frame.where($"label" === $"prediction").count().toDouble /
      frame.count()

    println(s"准确率：$acc")

  }
}
