package cn.doitedu.ml.loss

import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession

import scala.collection.mutable

object LossProbabilityPredict {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()

    import spark.implicits._


    // 加载待预测数据集
    val sample = spark.read.option("header","true").option("inferSchema","true").csv("userprofile/data/loss_probability/test/test.csv")

    // 特征向量化
    val arr2Vec = (arr:mutable.WrappedArray[String])=>{
      Vectors.dense(arr.map(s=>s.toDouble).toArray)
    }
    spark.udf.register("arr2vec",arr2Vec)
    val testVecs = sample.selectExpr("arr2vec(array(cs_3,cs_15,xf_3,xf_15,th_3,th_15,hp_3,hp_15,cp_3,cp_15,last_dl,last_xf)) as features", "guid")


    // 加载之前训练好的模型
    val model = LogisticRegressionModel.load("userprofile/data/loss_probability/model")

    // 用模型来对待预测数据进行预测
    val result = model.transform(testVecs)

    result.show(100,false)

    spark.close()


  }

}
