package com.atguigu.userprofile.ml.train

import java.util.Properties

import com.atguigu.userprofile.common.util.MyPropertiesUtil
import com.atguigu.userprofile.ml.pipeline.MyPipeline
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object PredictGenderTrain {


  //训练阶段
  //1  根据特征提取数据（数仓）
  //2   label (参考答案)  提取
  //3  初始化流水线对象
  //4   训练
  //5   模拟预测
  //6   评估 、反复优化
  //7   存储模型
  def main(args: Array[String]): Unit = {
   // 0  spark环境
    val sparkConf: SparkConf = new SparkConf().setAppName("task_predict_gender_train").setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

    val taskDate: String = args(1)

    //1  根据特征提取数据（数仓）
    //2   label (参考答案)  提取
    println("1提取数据")
    val sql=
      s"""
         |    with
         |   user_gender as
         |   (
         |   select id,gender  from dim_user_info  where gender<>'' and dt='9999-99-99'
         |   ) ,
         |   visit_c1 as
         |   (
         |        select  user_id,  category1_id,during_time  from dwd_page_log pl  ,dim_sku_info si
         |        where pl.dt='${taskDate}' and page_id='good_detail' and
         |        page_item_type='sku_id'  and  si.id=page_item and si.dt='${taskDate}'
         |   )
         |select
         |   user_visit.user_id,
         |    male_dur,
         |  female_dur,
         |  c1_top1,
         |  c1_top2,
         |  c1_top3,
         |  gender from
         |   (
         |   select user_id,
         |   sum(if(category1_id in (3,4,6) ,during_time,0)) male_dur,
         |   sum(if(category1_id in (8,12,15) ,during_time,0)) female_dur,
         |   sum(if(rk=1,category1_id,0))  c1_top1,
         |   sum(if(rk=2,category1_id,0)) c1_top2,
         |   sum(if(rk=3,category1_id,0)) c1_top3
         |   from
         |   (
         |     select  user_id,category1_id,count(*),sum(during_time) during_time ,row_number()over( partition by user_id  order by  count(*) desc   ) rk from
         |     visit_c1
         |     group by user_id,category1_id
         |   )user_c1_rk
         |   group by user_id
         |   ) user_visit  inner join   user_gender  on   user_gender.id=user_visit.user_id
         |
       """.stripMargin
    println(sql)
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val dwName: String = properties.getProperty("data-warehouse.dbname")
    val upName: String = properties.getProperty("user-profile.dbname")

    sparkSession.sql(s"use $dwName")
    val dataFrame: DataFrame = sparkSession.sql(sql)

    dataFrame.show(10000,false)

    //3  初始化流水线对象
    println("3  初始化流水线对象")
    val myPipeline: MyPipeline = new MyPipeline().setLabelColName("gender").setFeatureColName(Array("male_dur", "female_dur", "c1_top1", "c1_top2", "c1_top3"))
      .setMaxCategories(10)
      .setMaxDepth(5).setMaxBins(32).setMinInfoGain(0.01).setMinInstancesPerNode(3)
      .init()


    //4   训练
    println("4   训练")
    val Array(trainDF,testDF) = dataFrame.randomSplit(Array(0.8,0.2))

    myPipeline.train(trainDF)

    myPipeline.printTree()
    myPipeline.printFeatureWeight()

    //5   模拟预测
    println("5   模拟预测")
    val predictedDataFrame: DataFrame = myPipeline.predict(testDF)
    predictedDataFrame.show(1000,false)
    //6   评估 、反复优化
    println("6   评估 、反复优化")
    myPipeline.printEuvluateReport(predictedDataFrame)

    //7 存储
    println("7 存储")
    val modelPath: String = properties.getProperty("model.gmall_gender.path")
    myPipeline.saveModel(modelPath)

  }

}
