package cn.doitedu.profile

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-12-25
 * @desc 用户流失风险预测
 */
object LossProbability {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName("流失风险预测")
      .master("local")
      .getOrCreate()

    // 加载样本原始数据
    val sample: DataFrame = spark.read.option("header", "true").option("inferSchema", "true").csv("data/loss/sample")


    // 将样本数据中的特征，向量化
    val arr2Vector = (arr:Array[Double])=>{
      Vectors.dense(arr)
    }
    spark.udf.register("vec",arr2Vector)
    val sampleVecs = sample.selectExpr("label","vec(array(cs_3,cs_15,xf_3,xf_15,th_3,th_15,hp_3,hp_15,cp_3,cp_15,last_dl,last_xf)) as features")


    // 构造一个逻辑回归算法工具
    val regression = new LogisticRegression()
      .setFeaturesCol("features")
      .setLabelCol("label")

    // 逻辑回归算法模型中的参数： model
    val model: LogisticRegressionModel = regression.fit(sampleVecs)

    // 用训练好的模型，去对未知数据进行预测
    //  加载待预测数据集
    val test: DataFrame = spark.read.option("header", "true").option("inferSchema", "true").csv("data/loss/test")
    val testVecs: DataFrame = test.selectExpr("guid", "vec(array(cs_3,cs_15,xf_3,xf_15,th_3,th_15,hp_3,hp_15,cp_3,cp_15,last_dl,last_xf)) as features")

    // 拿着训练好的模型，对测试数据集进行算法预测
    val predict: DataFrame = model.transform(testVecs)

    predict.show(100,false)


    spark.close()

  }
}
