package com.atguigu.userprofile.ml.train

import java.util.Properties

import com.atguigu.userprofile.ml.pipeline.MyPipeline
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object BusiGenderTrainApp {

  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setAppName("busi_gender_train").setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()
    //通过SQL 把特征及参考答案 一起取
    println("通过SQL 把特征及参考答案 一起取")
    val querySql =
      s"""
         | with  user_cate as
         | (
         |     select  user_id ,category1_id c1 ,during_time from
         |      dwd_page_log pl join dim_sku_info ski
         |     on  pl.page_item=ski.id
         |     where  ski.dt='2021-05-16' and  pl.dt='2021-05-16'
         |     and page_id='good_detail' and  page_item_type='sku_id'
         |  ) ,
         |   user_gender as
         |  (
         |   select  id ,gender  from dim_user_info
         |    where dt='9999-99-99' and  gender<>''
         |  )
         |  select   user_id as uid , top1_c1,top2_c1,top3_c1 ,
         |  male_c1_dur,female_c1_dur ,ug.gender
         |  from
         |  (
         |  select user_id ,
         |  sum(if(ct_rank=1,c1,0)) top1_c1,
         |  sum(if(ct_rank=2,c1,0)) top2_c1,
         |  sum(if(ct_rank=3,c1,0)) top3_c1,
         |  sum(sum_dur),
         |   sum( if(c1 in (3,4,16),sum_dur,0  ))  male_c1_dur ,
         |   sum( if(c1 in (8,12,15),sum_dur,0  ))  female_c1_dur
         |  from
         |  (
         |    select user_id ,c1 ,sum(during_time) sum_dur,count(*),
         |    row_number()over(partition by user_id order by count(*) desc ) ct_rank
         |    from user_cate
         |    group by user_id ,c1
         |     order by user_id
         |   ) user_c1
         |   group by user_id
         | )user_ct  join user_gender  ug on ug.id=user_ct.user_id
       """.stripMargin;
    sparkSession.sql("use  gmall2021")
    val dataFrame: DataFrame = sparkSession.sql(querySql)
    //把数据拆分训练集和测试集
    println("把数据拆分训练集和测试集")
    val Array(trainDataFrame,testDataFrame) = dataFrame.randomSplit(Array(0.8,0.2))
    //创建流水线
    println("创建流水线")
    val myPipeline: MyPipeline = new MyPipeline().setLabelColumnName("gender")
      .setFeatureColumnNames(Array("male_c1_dur",
        "female_c1_dur", "top1_c1",
        "top2_c1", "top3_c1"))
      .setMaxDepth(7)
      .init()
    println("开始训练")
    //把训练集投入流水线进行训练 //得到模型
    myPipeline.train(trainDataFrame)
    //
    println("模型打印")
    println(myPipeline.getDecisionTreeString())
    println(myPipeline.getFeatureImportances())

    //预测
    println("预测测试集")
  //  val test2Datafram: DataFrame = testDataFrame.select("uid" ,
  //  "top1_c1","top2_c1","top3_c1" ,"male_c1_dur","female_c1_dur")
    val predictedDataFrame: DataFrame = myPipeline.predict(testDataFrame)
    //评估
    println("评估")
    myPipeline.evaluate(predictedDataFrame)


    val properties: Properties =
          MyPropertiesUtil.load("config.properties")
    val modelPath: String =
          properties.getProperty("model.path")

    myPipeline.save(modelPath)

  }

}
