package MLlib2

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.{MinMaxScaler, MinMaxScalerModel}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import util.SparkUtil

/**
 * 流失率预测算法模型训练
 * 所用算法：逻辑回归算法
 */
object LossProbabilityTrain {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    val sample: DataFrame = spark.read.option("header", value = true).option("inferSchema", value = true).csv("userprofile/data/LossPredict/liushi_sample.csv")
    val test: DataFrame = spark.read.options(Map("header" -> "true", "inferSchema" -> "true")).csv("userprofile/data/LossPredict/liushi_to_predict.csv")

    // 训练数据集向量特征加工（如果各个特征之间的值域范围相差很大，那么需要对向量特征做规范化处理）
    // sparkMLlib中自带有四种规范化算法（P范数、标准差规范化、值域规范化(MinMax)、绝对最大值规范化(Max)）
    val sampleVec: DataFrame = sample.rdd.map({
      case Row(label: Double, gid: Double, cs_3: Double, cs_15: Double, xf_3: Double, xf_15: Double, th_3: Double, th_15: Double, hp_3: Double, hp_15: Double, cp_3: Double, cp_15: Double, last_dl: Double, last_xf: Double) =>
        val features: linalg.Vector = Vectors.dense(Array(cs_3, cs_15, xf_3, xf_15, th_3, th_15, hp_3, hp_15, cp_3, cp_15, last_dl, last_xf))
        (features, label)
    }) toDF("features", "label")
    sampleVec.cache() // fit、transform以及show都用了该数据集，所以为了提高性能最好是先缓存一下
    val scaler: MinMaxScaler = new MinMaxScaler() // 规范化器
      .setInputCol("features")
      .setOutputCol("vector")
    val scalerModel: MinMaxScalerModel = scaler.fit(sampleVec)
    val sampleScaleVecs: DataFrame = scalerModel.transform(sampleVec).drop("features") // 抛弃不需要的列
    sampleScaleVecs.cache()
    sampleScaleVecs.show(100, truncate = false)

    // 测试数据集向量特征加工
    val testVec: DataFrame = test.rdd.map({
      case Row(gid: Int, cs_3: Double, cs_15: Double, xf_3: Double, xf_15: Double, th_3: Double, th_15: Double, hp_3: Double, hp_15: Double, cp_3: Double, cp_15: Double, last_dl: Double, last_xf: Double) =>
        val features: linalg.Vector = Vectors.dense(Array(cs_3, cs_15, xf_3, xf_15, th_3, th_15, hp_3, hp_15, cp_3, cp_15, last_dl, last_xf))
        (gid, features)
    }).toDF("guid", "features")
    val scalerModel2 = scaler.fit(testVec)
    val testScaleVecs: DataFrame = scalerModel2.transform(testVec).drop("features")

    // 逻辑回归模型
    val regression: LogisticRegression = new LogisticRegression().setFeaturesCol("vector").setLabelCol("label")
    val regressionModel = regression.fit(sampleScaleVecs)
    val predictSample: DataFrame = regressionModel.transform(sampleScaleVecs)
    val predictTest: DataFrame = regressionModel.transform(testScaleVecs)
    predictSample.show(100, truncate = false)
    predictTest.show(100, truncate = false)

    spark.close()
  }
}
