package cn.doitedu.ml.doit13.losspre

import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.MinMaxScaler
import cn.doitedu.commons.util.SparkUtil
/**
 * 流失率预测算法模型训练
 * 所用算法： 逻辑回归算法
 */
object LossprobabilityTrain {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val spark = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._

    // 1.  加载样本数据
    val sample = spark.read.options(Map("header" -> "true", "inferSchema" -> "true")).csv("userprofile/data/loss_predict/sample/liushi_sample.csv")

    // 向量化
    // label,gid,3_cs,15_cs,3_xf,15_xf,3_th,15_th,3_hp,15_hp,3_cp,15_cp,last_dl,last_xf
    import cn.doitedu.ml.util.VecUtil._
    spark.udf.register("arr2vec",arr2Vec)
    val sampleVecs = sample.selectExpr(
      "label",
      "arr2Vec(array(3_cs,15_cs,3_xf,15_xf,3_th,15_th,3_hp,15_hp,3_cp,15_cp,last_dl,last_xf)) vec"
    )
    sampleVecs.cache()
    sampleVecs.show(100,false)

    // 2. 特征加工  -- 如果各个特征之间的值域范围差别太大，需要对向量特征做规范化处理
    // sparkmllib中，自带有4种规范化算法（P范数规范化，标准差规范化，值域规范化（MinMax规范），绝对最大值规范（|Max|)
    val scaler = new MinMaxScaler()
      .setInputCol("vec")
      .setOutputCol("features")
    val model = scaler.fit(sampleVecs)
    val sampleScaleVecs = model.transform(sampleVecs).drop("vec")

    sampleScaleVecs.show(100,false)
    val Array(train,test) = sampleScaleVecs.randomSplit(Array(0.8,0.2))

    // 3. 构造算法，训练模型   LogisticRegression
    val logistic = new LogisticRegression()
        .setFeaturesCol("features")
        .setLabelCol("label")

    val logisticModel = logistic.fit(train)

    // 4. 利用测试数据评估模型
    val predict = logisticModel.transform(test)
    predict.show(100,false)


    // 5. 保存模型
    logisticModel.save("userprofile/data/loss_predict/model")


    spark.close()

  }

}
