package cn.doitedu.ml.bayes

import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession

import scala.collection.mutable

/**
 * 出轨率预测算法实战
 * 算法：朴素贝叶斯  naive bayes
 */
object CheatPredict {

  def main(args: Array[String]): Unit = {


    val spark = SparkSession.builder()
      .master("local")
      .appName("出轨概率预测-朴素贝叶斯")
      .getOrCreate()

    // 加载原始样本数据
    val sampledata = spark.read.option("header", "true").csv("userprofile/data/chugui/sample")
    sampledata.selectExpr("name", "job", "cast(income as double) as income", "age", "sex", "label").createTempView("sampledata")


    // 加载测试样本数据
    val testdata = spark.read.option("header", "true").csv("userprofile/data/chugui/test")
    testdata.selectExpr("name", "job", "cast(income as double) as income", "age", "sex").createTempView("testdata")


    // 原始数据的特征，加工成数值特征
    val sampleFeatureDigital = spark.sql(
      """
        |
        |select
        |name,
        |cast(
        |case
        |  when job='程序员' then 0.0
        |  when job='老师' then  1.0
        |  when job='公务员' then 2.0
        |end  as double) as job,
        |
        |cast(
        |case
        |  when income<10000 then 0.0
        |  when income>=10000 and income<20000 then  1.0
        |  when income>=20000 and income<30000 then  2.0
        |  when income>=30000 and income<40000 then  3.0
        |  else 4.0
        |end as double) as income,
        |
        |cast(
        |case
        |  when age='青年' then 0.0
        |  when age='中年' then  1.0
        |  when age='老年' then 2.0
        |end as double) as age,
        |
        |cast(
        |case
        |  when sex='男' then 0.0
        |  when sex='女' then  1.0
        |end as double) as sex,
        |
        |cast(if(label='出轨',0.0,1.0)  as double) as label
        |
        |from sampledata
        |
        |""".stripMargin)

    val testFeatureDigital = spark.sql(
      """
        |
        |select
        |name,
        |cast(
        |case
        |  when job='程序员' then 0.0
        |  when job='老师' then  1.0
        |  when job='公务员' then 2.0
        |end  as double) as job,
        |
        |cast(
        |case
        |  when income<10000 then 0.0
        |  when income>=10000 and income<20000 then  1.0
        |  when income>=20000 and income<30000 then  2.0
        |  when income>=30000 and income<40000 then  3.0
        |  else 4.0
        |end as double) as income,
        |
        |cast(
        |case
        |  when age='青年' then 0.0
        |  when age='中年' then  1.0
        |  when age='老年' then 2.0
        |end as double) as age,
        |
        |cast(
        |case
        |  when sex='男' then 0.0
        |  when sex='女' then  1.0
        |end as double) as sex
        |
        |from testdata
        |
        |""".stripMargin)

    sampleFeatureDigital.show(100, false)

    testFeatureDigital.show(100, false)




    // 将数值化的特征数据，向量化！（把特征转成特征向量 Vector=>DenseVector密集型向量 , SparseVector 稀疏型向量)

    sampleFeatureDigital.createTempView("sample")
    testFeatureDigital.createTempView("test")

    val arr2Vector = (arr: mutable.WrappedArray[Double]) => {
      Vectors.dense(arr.toArray)
    }
    spark.udf.register("arr2vector", arr2Vector)

    val sampleVectors = spark.sql(
      """
        |select
        |name,
        |arr2vector(array(job,income,age,sex)) as features,
        |label
        |from sample
        |
        |""".stripMargin)


    val testVectors = spark.sql(
      """
        |select
        |name,
        |arr2vector(array(job,income,age,sex)) as features
        |from test
        |
        |""".stripMargin)


    /**
     * 调算法
     */


    // 构造算法对象
    val bayes = new NaiveBayes()
      .setLabelCol("label")
      .setFeaturesCol("features")
      .setSmoothing(0.1) // 拉普拉斯平滑系数
      .setPredictionCol("cheat_possibility")

    // 训练模型
    val model = bayes.fit(sampleVectors)

    // 保存模型
    model.save("userprofile/data/chugui/model")

    // 加载模型
    val model1 = NaiveBayesModel.load("userprofile/data/chugui/model")

    // 用模型对待预测数据集进行预测
    val result = model1.transform(testVectors)
    result.show(100,false)


    spark.close()
  }

}
