package cn.doitedu.ml.loss

import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable

/**
  * 流失概率风险预测模型训练-利用逻辑回归
  */
object LossProbModelTrain {

  def main(args: Array[String]): Unit = {


    Logger.getLogger("org.apache").setLevel(Level.WARN)

    val spark = SparkSession
      .builder()
      .appName("流失概率风险预测模型训练")
      .master("local")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._

    val arr2Vec: UserDefinedFunction = udf((arr:mutable.WrappedArray[Double])=>{
      val vector: linalg.Vector = Vectors.dense(arr.toArray)
      vector
    })

    val sample = spark.read.option("header",true).option("inferSchema",true).csv("userprofile/data/loss_probability/sample")
    // label,gid,cs_3,cs_15,xf_3,xf_15,th_3,th_15,hp_3,hp_15,cp_3,cp_15,last_dl,last_xf
    val vecDF = sample.select('label,arr2Vec(array('cs_3,'cs_15,'xf_3,'xf_15,'th_3,'th_15,'hp_3,'hp_15,'cp_3,'cp_15,'last_dl,'last_xf)) as "features")

    val logisticRegression = new LogisticRegression()
      .setFeaturesCol("features")
      .setLabelCol("label")
      // 它在损失函数上加上了一个 组成部分，rp*θ的平方，
      // 最后带来的影响是，让每一个特征对结果的影响都不至于太大，相对比较均匀
      // 可防止过拟合
      .setRegParam(1.0)

    // 将样本数据，拆分成（训练集，测试集）
    val arr: Array[DataFrame] = vecDF.randomSplit(Array(8, 2))
    println(arr.size)


    val trainSets = arr(0)

    println("trainSets count:"+trainSets.count())

    val testSets = arr(1)

    println("testSets count:"+testSets.count())


    // 用训练集来训练模型
    val model = logisticRegression.fit(trainSets)
    //model.save("userprofile/data/loss_probability/model")

    // 用训练好的模型，对测试集进行预测
    val testPredict = model.transform(testSets)

    testPredict.show(100,false)

    spark.close()
  }
}
