package MLlib

import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.types.DataTypes
import org.apache.spark.sql.{DataFrame, SparkSession}
import util.SparkUtil

import scala.collection.mutable

/**
 * 调用sparkMLlib中现成的朴素贝叶斯算法，来进行出轨预测（分类）
 */
object NaiveBayesChugui {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    import org.apache.spark.sql.functions._
    // 加载数据
    val sample: DataFrame = spark.read.option("header", value = true).csv("userprofile/data/Bayes/sampleBayes.csv")
    // 将特征 （数字化）向量化
    val frame: DataFrame = sample.selectExpr("name",
      "case label when '出轨' then 0.0 when '没出' then 1.0 end as label",
      "case job when '老师' then 0.0 when '程序员' then 1.0 else 2.0 end as job",
      "case income when '低' then 0.0 when '中' then 1.0 else 2.0 end as income",
      "case age when '青年' then 0.0 when '中年' then 1.0 else 2.0 end as age",
      "case sex when '男' then 0.0 when '女' then 1.0 end as sex"
    )
    val arr2vec: UserDefinedFunction = udf((arr: mutable.WrappedArray[Double]) => {
      Vectors.dense(arr.toArray)
    })
    val res: DataFrame = frame.select('name, 'label, arr2vec(array('job.cast(DataTypes.DoubleType), 'income.cast(DataTypes.DoubleType), 'age.cast(DataTypes.DoubleType), 'sex.cast(DataTypes.DoubleType))).as("vec"))
    // 构造朴素贝叶斯算法工具
    val bayes: NaiveBayes = new NaiveBayes()
      .setFeaturesCol("vec")
      .setLabelCol("label")
      .setSmoothing(1.0) //拉普拉斯平滑系数（消除贝叶斯算法中0概率值带来的影响，给的值大小看经验）
    // 用算法工具对训练集训练模型
    val model: NaiveBayesModel = bayes.fit(res)
    model.save("userprofile/data/Bayes/bayesModel")
    // 用训练好的模型对未知数据进行预测
    val model1: NaiveBayesModel = NaiveBayesModel.load("userprofile/data/Bayes/bayesModel")
    val test: DataFrame = spark.read.option("header", value = true).csv("userprofile/data/Bayes/testBayes.csv")
    spark.udf.register("arr2vec", arr2vec)
    test.createTempView("test")
    val res2: DataFrame = spark.sql(
      """
        |select name,
        |arr2vec(
        |  array(
        |    cast(case job when '老师' then 0.0 when '程序员' then 1.0 else 2.0 end as double),
        |    cast(case income when '低' then 0.0 when '中' then 1.0 else 2.0 end as double),
        |    cast(case age when '青年' then 0.0 when '中年' then 1.0 else 2.0 end as double),
        |    cast(case sex when '男' then 0.0 when '女' then 1.0 end as double)
        |)) as vec
        |from test
        |""".stripMargin)
    res2.show(100, truncate = false)
//    val frame2: DataFrame = test.selectExpr("name",
//      "case job when '老师' then 0.0 when '程序员' then 1.0 else 2.0 end as job",
//      "case income when '低' then 0.0 when '中' then 1.0 else 2.0 end as income",
//      "case age when '青年' then 0.0 when '中年' then 1.0 else 2.0 end as age",
//      "case sex when '男' then 0.0 when '女' then 1.0 end as sex"
//    )
//    val res2: DataFrame = frame2.select('name, arr2vec(array('job.cast(DataTypes.DoubleType), 'income.cast(DataTypes.DoubleType), 'age.cast(DataTypes.DoubleType), 'sex.cast(DataTypes.DoubleType))).as("vec"))
    val result: DataFrame = model1.transform(res2)
    result.show(100, truncate = false)
    spark.close()
  }

}
