package cn.doitedu.ml.loss

import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.stat.Correlation
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable

/**
 * 流失概率预测模型，特征相关度计算筛选
 */
object LossProbModelFeatureCorr {

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache").setLevel(Level.WARN)

    val spark = SparkSession
      .builder()
      .appName("流失概率风险预测特征相关度计算")
      .master("local")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._

    val arr2Vec: UserDefinedFunction = udf((arr:mutable.WrappedArray[Double])=>{
      // Vector是一个接口，它有两个实现，一个是DenseVector，一个是SparseVector
      val vector: linalg.Vector = Vectors.dense(arr.toArray)
      vector
    })


    val sample = spark.read.option("header",true).option("inferSchema",true).csv("userprofile/data/loss_probability/sample")
    // label,gid,cs_3,cs_15,xf_3,xf_15,th_3,th_15,hp_3,hp_15,cp_3,cp_15,last_dl,last_xf
    val vec = sample.select(arr2Vec(array('label,'cs_3,'cs_15,'xf_3,'xf_15,'th_3,'th_15,'hp_3,'hp_15,'cp_3,'cp_15,'last_dl,'last_xf)) as "features")


    // 0.3-弱,0.1-0.3为弱相关,0.3-0.5为中等相关,0.5-1.0为强相关
    val corr: DataFrame = Correlation.corr(vec, "features", "pearson")
    corr.show(1,false)


    spark.close()
  }

}
