package com.fwmagic.spark.ml.loss

import com.fwmagic.spark.util.SparkUtils
import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.feature.{MinMaxScaler, MinMaxScalerModel}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * 逻辑回归算法预测流失率
 */
object LossProbabilityTrain {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtils.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1.加载样本数据
    val sampleDF: DataFrame = spark.read.options(Map("header" -> "true", "inferSchema" -> "true")).csv("data/loss_predict/liushi_sample.csv")

    //特征向量化
    //label,gid,3_cs,15_cs,3_xf,15_xf,3_th,15_th,3_hp,15_hp,3_cp,15_cp,last_dl,last_xf
    import com.fwmagic.spark.ml.utils.VectorUtils._
    spark.udf.register("arr2vec", arr2vec)

    sampleDF.createOrReplaceTempView("sample")

    val sampleVecs: DataFrame = sampleDF.selectExpr(
      "label",
      "arr2vec(array(3_cs,15_cs,3_xf,15_xf,3_th,15_th,3_hp,15_hp,3_cp,15_cp,last_dl,last_xf)) as vec"
    )

    //缓存
    sampleVecs.cache()

    //2.特征加工 -- 如果各个特征之间的值域范围差别太大，需要对向量特征做规范化处理
    //sparkmllib中，自带有4种规范化算法（P范数规范化，标准差规范化，值域规范化（MinMax规范），绝对最大值规范（｜Max｜））
    val scaler: MinMaxScaler = new MinMaxScaler()
      .setInputCol("vec")
      .setOutputCol("features")

    //获取模型
    val scalerModel: MinMaxScalerModel = scaler.fit(sampleVecs)

    //模型训练样本数据
    val sampleScalerTransDF: DataFrame = scalerModel.transform(sampleVecs).drop("vec")

    //sampleScalerTransDF.show(100,false)

    //定义逻辑回归算法
    val logisticRegression: LogisticRegression = new LogisticRegression()
      .setLabelCol("label")
      .setFeaturesCol("features")

    //测试数据集分为两部分（训练集，预测集）
    val Array(trans, test) = sampleScalerTransDF.randomSplit(Array(0.8, 0.2))

    //用逻辑回归算法训练模型
    val regressionModel: LogisticRegressionModel = logisticRegression.fit(trans)

    // 用测试集评估模型准确率
    val sampleTransDF: DataFrame = regressionModel.transform(test)

    sampleTransDF.show(100, false)


    val total: Long = test.count()
    val count: Long = sampleTransDF.where("label=prediction").count()

    //计算模型预测的准确率
    val res = count / (total.toDouble)
    println(res)

    //最后保存模型到文件中
    regressionModel.save("data/loss_predict/model")

    spark.close()

  }
}
