package cn.doitedu.ml.gender

import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession

import scala.collection.mutable

object GenderTagGenBayes {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("行为性别标签生成-朴素贝叶斯").master("local").getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val sample = spark.read.option("header", true).option("inferSchema", true).csv("user_portrait/data/gender/sample")

    // 连续特征离散化
    sample.createTempView("sample")
    val rawFeatures = spark.sql(
      """
        |
        |select
        |
        |label,
        |category1 as c1,
        |category2 as c2,
        |category3 as c3,
        |brand1 as b1,
        |brand2 as b2,
        |brand3 as b3,
        |case
        |  when day30_buy_cnts < 10 then 0.0
        |  when day30_buy_cnts >= 10 and day30_buy_cnts <20 then 1.0
        |  when day30_buy_cnts >= 20 and day30_buy_cnts <30 then 2.0
        |  else 3.0
        |end as buycnt_30,
        |
        |case
        |  when day30_buy_amt < 100 then 0.0
        |  when day30_buy_amt >= 100 and day30_buy_amt <200 then 1.0
        |  when day30_buy_amt >= 200 and day30_buy_amt <500 then 2.0
        |  when day30_buy_amt >= 500 and day30_buy_amt <1000 then 3.0
        |  else 4.0
        |end as buyamt30
        |
        |from sample
        |
        |
        |""".stripMargin)

    // 特征向量化
    val arr2vec = udf((arr: mutable.WrappedArray[Double]) => {
      Vectors.dense(arr.toArray)
    })

    val vecDF = rawFeatures.select(
      arr2vec(array('c1, 'c2, 'c3, 'b1, 'b2, 'b3, 'buycnt_30, 'buyamt30)) as "features",
      'label
    )

    // 训练模型
    val naiveBayes = new NaiveBayes()
      .setSmoothing(0.01)
      .setLabelCol("label")
      .setFeaturesCol("features")

    val model = naiveBayes.fit(vecDF)
    // -- TODO 保存模型

    // -- TODO 加载模型

    // 加载测试数据集
    val test = spark.read.option("header", true).option("inferSchema", true).csv("user_portrait/data/gender/test")
    test.createTempView("test")
    val rawTestFeatures = spark.sql(
      """
        |
        |select
        |
        |label,
        |guid,
        |category1 as c1,
        |category2 as c2,
        |category3 as c3,
        |brand1 as b1,
        |brand2 as b2,
        |brand3 as b3,
        |case
        |  when day30_buy_cnts < 10 then 0.0
        |  when day30_buy_cnts >= 10 and day30_buy_cnts <20 then 1.0
        |  when day30_buy_cnts >= 20 and day30_buy_cnts <30 then 2.0
        |  else 3.0
        |end as buycnt_30,
        |
        |case
        |  when day30_buy_amt < 100 then 0.0
        |  when day30_buy_amt >= 100 and day30_buy_amt <200 then 1.0
        |  when day30_buy_amt >= 200 and day30_buy_amt <500 then 2.0
        |  when day30_buy_amt >= 500 and day30_buy_amt <1000 then 3.0
        |  else 4.0
        |end as buyamt30
        |
        |from test
        |
        |
        |""".stripMargin)

    val vecTestDF = rawTestFeatures.select(
      'guid,
      'label,
      arr2vec(array('c1, 'c2, 'c3, 'b1, 'b2, 'b3, 'buycnt_30, 'buyamt30)) as "features"
    )


    // 使用训练好的模型来预测
    val res = model.transform(vecTestDF).drop("features")
    res.show(100,false)

    // 模型评估
    val evaluator = new BinaryClassificationEvaluator()
        .setLabelCol("label")
        .setMetricName("areaUnderROC")  // 可以不用设置，它默认值就是：areaUnderROC

    val d: Double = evaluator.evaluate(res)  // 此面积应该在0.5-1之间,越接近1，效果越好
    println(d)

    spark.close()
  }

}
