package cn.doitedu.ml.loss

import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession

import scala.collection.mutable

object LossProbabilityTrainner {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()

    import spark.implicits._


    // 加载样本
    val sample = spark.read.option("header","true").option("inferSchema","true").csv("userprofile/data/loss_probability/sample/sample.csv")

    // 特征向量化

    val arr2Vec = (arr:mutable.WrappedArray[String])=>{
      Vectors.dense(arr.map(s=>s.toDouble).toArray)
    }
    spark.udf.register("arr2vec",arr2Vec)

    val sampleVecs = sample.selectExpr("arr2vec(array(cs_3,cs_15,xf_3,xf_15,th_3,th_15,hp_3,hp_15,cp_3,cp_15,last_dl,last_xf)) as features", "label")

    // 训练模型
    val logisticRegression = new LogisticRegression()
      .setFeaturesCol("features")
      .setLabelCol("label")
      .setRegParam(1.0)

    val model = logisticRegression.fit(sampleVecs)

    model.save("userprofile/data/loss_probability/model")


    spark.close()


  }

}
