package cn.doitedu.ml.lossprob

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.MinMaxScaler
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession

import scala.collection.mutable

object LossProbPredict {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("流失风险预测-逻辑回归").master("local").getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val sample = spark.read.option("header", true).option("inferSchema", true).csv("user_portrait/data/loss_probability/sample")

    // 特征向量化
    val arr2vec = udf((arr: mutable.WrappedArray[Double]) => {
      Vectors.dense(arr.toArray)
    })

    val vecDF = sample.select(
      arr2vec(array(
        'cs_3,
        'cs_15,
        'xf_3,
        'xf_15,
        'th_3,
        'th_15,
        'hp_3,
        'hp_15,
        'cp_3,
        'cp_15,
        'last_dl,
        'last_xf
      )) as "features",
      'label)

    // 特征向量规范化
    val minMaxScaler = new MinMaxScaler()
      .setInputCol("features")
      .setOutputCol("vec")
    val model = minMaxScaler.fit(vecDF)
    val scaledFeatures = model.transform(vecDF)

    // 调用逻辑回归算法,训练模型
    val regression = new LogisticRegression()
      .setFeaturesCol("vec")
      .setLabelCol("label")
      .setRegParam(1.0)
      .setMaxIter(100)
    val regressionModel = regression.fit(scaledFeatures)

    // 加载测试集
    val test = spark.read.option("header", true).option("inferSchema", true).csv("user_portrait/data/loss_probability/test")
    val testVecDF = test.select(
      arr2vec(array(
        'cs_3,
        'cs_15,
        'xf_3,
        'xf_15,
        'th_3,
        'th_15,
        'hp_3,
        'hp_15,
        'cp_3,
        'cp_15,
        'last_dl,
        'last_xf
      )) as "features",
      'guid)
    val model2 = minMaxScaler.fit(testVecDF)
    val testScaled = model2.transform(testVecDF)


    // 用模型预测
    val result = regressionModel.transform(testScaled).drop("features","vec")
    result.show(100,false)




    spark.close()
  }

}
