package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.functions.count
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo1Train {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local[4]")
      .appName("mllib")
      .getOrCreate()

    import spark.implicits._

    val data: DataFrame = spark.read
      .format("csv")
      .option("header", "true") //默认值为false
      .load("data/训练集.csv")

    data.printSchema()
    data.show()

    /**
      * root
      * |-- USER_ID: string (nullable = true)
      * |-- FLOW: string (nullable = true)
      * |-- FLOW_LAST_ONE: string (nullable = true)
      * |-- FLOW_LAST_TWO: string (nullable = true)
      * |-- MONTH_FEE: string (nullable = true)
      * |-- MONTHS_3AVG: string (nullable = true)
      * |-- BINDEXP_DATE: string (nullable = true)
      * |-- PHONE_CHANGE: string (nullable = true)
      * |-- AGE: string (nullable = true)
      * |-- OPEN_DATE: string (nullable = true)
      * |-- REMOVE_TAG: string (nullable = true)
      *
      */

    val data1: DataFrame = data.map(row => {
      val USER_ID: Double = row.getAs[String]("USER_ID").toDouble
      val FLOW: Double = row.getAs[String]("FLOW").toDouble
      val FLOW_LAST_ONE: Double = row.getAs[String]("FLOW_LAST_ONE").toDouble
      val FLOW_LAST_TWO: Double = row.getAs[String]("FLOW_LAST_TWO").toDouble
      val MONTH_FEE: Double = row.getAs[String]("MONTH_FEE").toDouble
      val MONTHS_3AVG: Double = row.getAs[String]("MONTHS_3AVG").toDouble
      val BINDEXP_DATE: Double = row.getAs[String]("BINDEXP_DATE").toDouble
      val PHONE_CHANGE: Double = row.getAs[String]("PHONE_CHANGE").toDouble
      val AGE: Double = row.getAs[String]("AGE").toDouble
      val OPEN_DATE: Double = row.getAs[String]("OPEN_DATE").toDouble

      val REMOVE_TAG: String = row.getAs[String]("REMOVE_TAG")

      // y  ; 目标值
      val label: Double = REMOVE_TAG match {
        case "A" => 1.0
        case _ => 0.0
      }

      //特征值
      val array: Array[Double] = Array(USER_ID, FLOW, FLOW_LAST_ONE, FLOW_LAST_TWO, MONTH_FEE, MONTHS_3AVG, BINDEXP_DATE, PHONE_CHANGE, AGE, OPEN_DATE)

      //将特征转换成向量
      val features: linalg.Vector = Vectors.dense(array)
      (label, features)
    }).toDF("label", "features")


    data1.show(false)


    //1、切分训练集和测试集

    val split: Array[Dataset[Row]] = data1.randomSplit(Array(0.8, 0.2))
    //训练集
    val trainData: Dataset[Row] = split(0)
    //测试集
    val testData: Dataset[Row] = split(1)

    /**
      * 选择算法
      *
      */

    //逻辑回归算法
    val lr: LogisticRegression = new LogisticRegression()


    // 将训练集带入算法训练模型
    //底层使用spark core进行分布式训练
    val model: LogisticRegressionModel = lr.fit(trainData)


    //使用模型对测试集进行测试，判断正确率
    val resultDF: DataFrame = model.transform(testData)

    resultDF.show(1000, false)

    import org.apache.spark.sql.functions._

    //计算准确率
    resultDF
      .select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count($"label"))
      .show()


    //保存模型
    model.write.overwrite().save("data/model")
  }

}
