package cn.doitedu.ml

import cn.doitedu.commons.utils.SparkUtil
import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.MinMaxScaler
import org.apache.spark.ml.linalg.Vectors

import scala.collection.mutable

/**
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: 行为性别预测
 * @Author hunter@doitedu.cn
 * @date 2020/8/1816:01     
 */
object GenderPredict {

  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession("行为性别预测")
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 加载样本数据
    val sample = spark.read.options(Map("header"->"true","inferSchema"->"true")).csv("portrait/testdata/gender/sample")

    // 加载测试数据
    val test = spark.read.options(Map("header"->"true","inferSchema"->"true")).csv("portrait/testdata/gender/test")

    // 特征向量加工
    val arr2Vec = udf((arr: mutable.WrappedArray[Double]) => {
      Vectors.dense(arr.toArray)
    })


    // 连续型特征：day30_buy_cnts,day30_buy_amt  离散化
    val sampleFeatures = sample.selectExpr("label",
      "gid",
      "category1",
      "category2",
      "category3",
      "brand1",
      "brand2",
      "brand3",
      "case  when day30_buy_cnts <5 then 1.0 when day30_buy_cnts>=5 and day30_buy_cnts<10 then 2.0 when day30_buy_cnts>=10 and day30_buy_cnts<20 then 3.0 else 4.0 end as day30_buy_cnts",
      "case  when day30_buy_amt <100 then 1.0 when day30_buy_amt>=100 and day30_buy_amt<500 then 2.0 when day30_buy_amt>=500 and day30_buy_amt<1000 then 3.0 else 4.0 end as day30_buy_amt"
      )
      .select('label,'gid,arr2Vec(array('category1,'category2,'category3,'brand1,'brand2,'brand3,'day30_buy_cnts,'day30_buy_amt)) as "features")

    val minMaxScaler = new MinMaxScaler()
      .setInputCol("features")
      .setOutputCol("scaled_features")
    val scaledModel = minMaxScaler.fit(sampleFeatures)
    val scaledSampleFeatures = scaledModel.transform(sampleFeatures)

    // 训练贝叶斯模型
    val naiveBayes = new NaiveBayes()
      .setFeaturesCol("scaled_features")
      .setLabelCol("label")
      .setSmoothing(1.0)
    val model = naiveBayes.fit(scaledSampleFeatures)

    // model.save("")
    //NaiveBayesModel.load("")

    // 处理测试集特征
    val testFeatures = test.selectExpr(
      "label",
      "gid",
      "category1",
      "category2",
      "category3",
      "brand1",
      "brand2",
      "brand3",
      "case when day30_buy_cnts <5 then 1.0 when day30_buy_cnts>=5 and day30_buy_cnts<10 then 2.0 when day30_buy_cnts>=10 and day30_buy_cnts<20 then 3.0 else 4.0 end as day30_buy_cnts",
      "case when day30_buy_amt <100 then 1.0 when day30_buy_amt>=100 and day30_buy_amt<500 then 2.0 when day30_buy_amt>=500 and day30_buy_amt<1000 then 3.0 else 4.0 end as day30_buy_amt"
    )
      .select('label,'gid,arr2Vec(array('category1,'category2,'category3,'brand1,'brand2,'brand3,'day30_buy_cnts,'day30_buy_amt)) as "features")
    val scaledModel2 = minMaxScaler.fit(testFeatures)
    val scaledTestFeatures = scaledModel2.transform(testFeatures)

    // 模型预测
    val predict = model.transform(scaledTestFeatures)

    predict.show(100,false)

    spark.close()
  }

}
