package demo

import org.apache.log4j.{Level, Logger}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import util.SparkUtil

/**
 * 手写朴素贝叶斯算法实现
 * 利用算法来预测明星出轨概率
 */
object BayesChuGui {
  def main(args: Array[String]): Unit = {
    /**
     * 1.用样本集来训练模型
     * 各种特征值发生的 条件概率 和 独立概率
     * 还有类别概率
     */
    Logger.getLogger("org").setLevel(Level.WARN)
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    val sample: DataFrame = spark.read.option("header", value = true).csv("userprofile/data/Bayes/sampleBayes.csv")
    sample.cache()
    sample.createTempView("sample")
    val sample_cnt: Long = sample.count() // 计算整个样本空间的条数
    // 计算类别发生概率
    val label_prob: DataFrame = spark.sql("select label, count(1) / " + sample_cnt + " as label_prob from sample group by label")
    val chugui_cnt: Long = sample.where("label = '出轨'").count()
    // 计算job特征各种值的独立概率
    val job_prob: DataFrame = spark.sql(
      """select job, count(1)/""" + sample_cnt +
        """ as job_prob
        from sample group by job
        """.stripMargin)
    // 计算income特征各种值的独立概率
    val income_prob: DataFrame = spark.sql(
      """select income, count(1)/""" + sample_cnt +
        """ as income_prob
        from sample group by income
        """.stripMargin)
    // 计算age特征各种值的独立概率
    val age_prob: DataFrame = spark.sql(
      """select age, count(1)/""" + sample_cnt +
        """ as age_prob
        from sample group by age
        """.stripMargin)
    // 计算sex特征各种值的独立概率
    val sex_prob: DataFrame = spark.sql(
      """select sex, count(1)/""" + sample_cnt +
        """ as sex_prob
        from sample group by sex
        """.stripMargin)

    // 计算job特征各种值的条件概率
    val job_condition_prob: DataFrame = spark.sql(
      """select job, count(if(label='出轨',1,null))/""" + chugui_cnt +
        """ as job_condition_prob
        from sample group by job
        """.stripMargin)
    // 计算income特征各种值的条件概率
    val income_condition_prob: DataFrame = spark.sql(
      """select income, count(if(label='出轨',1,null))/""" + chugui_cnt +
        """ as income_condition_prob
        from sample group by income
        """.stripMargin)
    // 计算age特征各种值的条件概率
    val age_condition_prob: DataFrame = spark.sql(
      """select age, count(if(label='出轨',1,null))/""" + chugui_cnt +
        """ as age_condition_prob
        from sample group by age
        """.stripMargin)
    // 计算sex特征各种值的条件概率
    val sex_condition_prob: DataFrame = spark.sql(
      """select sex, count(if(label='出轨',1,null))/""" + chugui_cnt +
        """ as sex_condition_prob
        from sample group by sex
        """.stripMargin)

    val label_prob_map: collection.Map[String, Double] = label_prob.rdd.map(row => (row.getAs[String]("label"), row.getAs[Double]("label_prob"))).collectAsMap()
    val job_prob_map: collection.Map[String, Double] = job_prob.rdd.map(row => (row.getAs[String]("job"), row.getAs[Double]("job_prob"))).collectAsMap()
    val income_prob_map: collection.Map[String, Double] = income_prob.rdd.map(row => (row.getAs[String]("income"), row.getAs[Double]("income_prob"))).collectAsMap()
    val age_prob_map: collection.Map[String, Double] = age_prob.rdd.map(row => (row.getAs[String]("age"), row.getAs[Double]("age_prob"))).collectAsMap()
    val sex_prob_map: collection.Map[String, Double] = sex_prob.rdd.map(row => (row.getAs[String]("sex"), row.getAs[Double]("sex_prob"))).collectAsMap()
    val job_prob_condition_map: collection.Map[String, Double] = job_condition_prob.rdd.map(row => (row.getAs[String]("job"), row.getAs[Double]("job_condition_prob"))).collectAsMap()
    val income_condition_prob_map: collection.Map[String, Double] = income_condition_prob.rdd.map(row => (row.getAs[String]("income"), row.getAs[Double]("income_condition_prob"))).collectAsMap()
    val age_condition_prob_map: collection.Map[String, Double] = age_condition_prob.rdd.map(row => (row.getAs[String]("age"), row.getAs[Double]("age_condition_prob"))).collectAsMap()
    val sex_condition_prob_map: collection.Map[String, Double] = sex_condition_prob.rdd.map(row => (row.getAs[String]("sex"), row.getAs[Double]("sex_condition_prob"))).collectAsMap()

    val bc_label: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(label_prob_map)
    val bc_job: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(job_prob_map)
    val bc_income: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(income_prob_map)
    val bc_age: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(age_prob_map)
    val bc_sex: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(sex_prob_map)
    val bc_job_con: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(job_prob_condition_map)
    val bc_income_con: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(income_condition_prob_map)
    val bc_age_con: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(age_condition_prob_map)
    val bc_sex_con: Broadcast[collection.Map[String, Double]] = spark.sparkContext.broadcast(sex_condition_prob_map)

    /**
     * 2.利用前面训练好的模型来对未知数据做预测
     */
    val test: DataFrame = spark.read.option("header", value = true).csv("userprofile/data/Bayes/testBayes.csv")
    val value = test.rdd.map({
      case Row(name: String, job: String, income: String, age: String, sex: String) =>
        val a: Double = bc_label.value.get("出轨").get
        val b: Double = bc_job.value.get(job).get
        val c: Double = bc_income.value.get(income).get
        val d: Double = bc_age.value.get(age).get
        val e: Double = bc_sex.value.get(sex).get
        val f: Double = bc_job_con.value.get(job).get
        val g: Double = bc_income_con.value.get(income).get
        val h: Double = bc_age_con.value.get(age).get
        val i: Double = bc_sex_con.value.get(sex).get
        (name, (a * f * g * h * i) / (b * c * e * d))
    }).toDF()
    value.show(100)
    spark.close()
  }
}
