package cn.doitedu.ml.doit13.gender

import cn.doitedu.commons.util.SparkUtil
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.HashingTF

/**
 * 行为性别预测，模型训练
 * 算法： 朴素贝叶斯
 */
object ActionGenderModelTrain {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val spark = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._

    // 加载样本数据
    val sample = spark.read.option("header", true).csv("userprofile/data/gender/sample/sex_sample.csv")


    /** *
     * 特征处理：
     * 1.购买单数  和  购买金额  ，这两个有数量含义的特征，进行区间离散化
     * 单数：  0-10单  ==》 ORDER_CNTS_A
     * 11-20单  ==》 ORDER_CNTS_B
     * 20- 单  ==》 ORDER_CNTS_C
     * 购买金额：
     * 0-100  ==> ORDER_AMT_A
     * 101-500  ==> ORDER_AMT_B
     * 501-1000  ==> ORDER_AMT_C
     * 1001-   ==> ORDER_AMT_D
     *
     * 2.将所有离散化特征，进行hash映射
     */
    sample.createTempView("sample")

    sample.show(100, false)

    val vecs = spark.sql(
      """
        |select
        |cast(label as double) as label,
        |gid,
        |array(
        |category1,
        |category2,
        |category3,
        |brand1,
        |brand2,
        |brand3,
        |case
        | when cast(day30_buy_cnts as double) between 0 and 10 then 'ORDER_CNTS_A'
        | when cast(day30_buy_cnts as double) between 11 and 20 then 'ORDER_CNTS_B'
        | else 'ORDER_CNTS_C'
        |end,
        |
        |case
        | when cast(day30_buy_amt as double) between  0 and 100 then 'ORDER_AMT_A'
        | when cast(day30_buy_amt as double) between  101 and 500 then 'ORDER_AMT_B'
        | when cast(day30_buy_amt as double) between  501 and 1000 then 'ORDER_AMT_C'
        | else 'ORDER_AMT_D'
        |end
        |) as features
        |from sample
        |
        |""".stripMargin)

    vecs.show(100, false)

    // hash映射
    val tf = new HashingTF()
      .setNumFeatures(1000000)
      .setInputCol("features")
      .setOutputCol("tf")
    val hashVecs = tf.transform(vecs)

    val Array(train, test) = hashVecs.randomSplit(Array(0.8, 0.2))

    // 训练模型
    val bayes = new NaiveBayes()
      .setFeaturesCol("tf")
      .setSmoothing(1.0)
      .setLabelCol("label")
      .setProbabilityCol("probs") // 设置输出概率结果字段的列名
      .setPredictionCol("prediction") // 设置输出类别结果字段的列名
    val model = bayes.fit(train)

    // 测试模型效果
    val predict = model.transform(test).select("gid", "label", "probs", "prediction")
    predict.show(100, false)

    // 混淆矩阵计算
    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("label")
      .setPredictionCol("prediction")

    predict.cache()
    // F1值就是精确值和召回率的调和均值,也就是  2/F = 1/精确率   +  1/召回率
    val f: Double = evaluator.evaluate(predict)

    // 准确率指标
    val accuracy = evaluator.setMetricName("accuracy").evaluate(predict)

    // 准确率指标
    val precision = evaluator.setMetricName("weightedPrecision").evaluate(predict)

    // 召回率指标
    val recall = evaluator.setMetricName("weightedRecall").evaluate(predict)

    println("F1 :   " + f)
    println("accuracy :   " + accuracy)
    println("precision :   " + precision)
    println("recall :   " + recall)

    spark.close()

  }


}
