package cn.doitedu.ml

import cn.doitedu.commons.utils.SparkUtil
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.MinMaxScaler
import org.apache.spark.ml.linalg.Vectors

import scala.collection.mutable
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.Matrix
/**
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: 流失率风险预测
 * @Author hunter@doitedu.cn
 * @date 2020/8/1811:51     
 */
object LossProbabilityPredict {
  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession("用户流失风险预测")
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 加载样本数据集
    val sample = spark.read.option("header", "true").option("inferSchema", "true").csv("portrait/testdata/loss_probability/sample")
    // 加载测试数据集
    val test = spark.read.option("header", "true").option("inferSchema", "true").csv("portrait/testdata/loss_probability/test")


    val arr2Vec = udf((arr: mutable.WrappedArray[Double]) => {
      Vectors.dense(arr.toArray)
    })
    // 特征工程
    val sampleFeatures = sample.select(
      'label,
      'gid,
      arr2Vec(array($"3_cs", $"15_cs", $"3_xf", $"15_xf", $"3_th", $"15_th", $"3_hp", $"15_hp", $"3_cp", $"15_cp", $"last_dl", $"last_xf")) as "features")

    // 将原是特征向量进行规范化加工
    // 向量规范化的意义： 消除不同特征值域范围相差悬殊所带来的不利影响
    // 向量规范化的思想： 让同一个特征的所有样本特征值，缩放到一个统一的值域区间(0 ，1)
    // 向量规范化算法，在sparkmllib中，内置了4种： p范数规范化，值域范围缩放，标准差规范化，最大绝对值缩放
    val minMaxScaler = new MinMaxScaler()
      .setInputCol("features")
      .setOutputCol("scaled_features")
    val scaleModel = minMaxScaler.fit(sampleFeatures)
    val scaledFeatures = scaleModel.transform(sampleFeatures).drop("features")


    val testFeatures = test.select(
      'gid,
      arr2Vec(array($"3_cs", $"15_cs", $"3_xf", $"15_xf", $"3_th", $"15_th", $"3_hp", $"15_hp", $"3_cp", $"15_cp", $"last_dl", $"last_xf")) as "features",'label)

    val testScaledModel = minMaxScaler.fit(testFeatures)
    val scaledTestFeatures = testScaledModel.transform(testFeatures).drop("features")





    // 训练模型
    val logisticRegression = new LogisticRegression()
      .setFeaturesCol("scaled_features")
      .setLabelCol("label")
      .setRegParam(0.1)

    val model = logisticRegression.fit(scaledFeatures)

    // 风险预测
    val prediction = model.transform(scaledTestFeatures)

    prediction.show(10, false)

    // 模型评估 ROC
    val ev = new BinaryClassificationEvaluator()
        .setLabelCol("label")
      .setMetricName("areaUnderROC")
    val roc: Double = ev.evaluate(prediction)
    println(roc)

    // 模型评估 混淆矩阵
    val rdd = prediction.rdd.map(row=>{
      val label = row.getAs[Double]("label")
      val prediction = row.getAs[Double]("prediction")
      (prediction,label)
    })
    val matrix: Matrix = new MulticlassMetrics(rdd).confusionMatrix
    println(matrix)
    spark.close()


  }

}
