package com.atguigu.userprofile.train

import java.util.Properties

import com.atguigu.userprofile.pipeline.MyPipeline
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object BusiGenderTrain {

  //1 、如何提取提取特征数据
  //2、提取label( 参考答案)
  //3、拆分数据测试集 和 训练集
  //4、初始化自定义流水线
  //5、训练
  //6、模拟预测
  //7、评估结果
  //8、保存模型
  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setAppName("task_busi_gender_train_app").setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

    val taskId=args(0)
    val taskDate=args(1)


    //1 、如何提取提取特征数据
    //2、提取label( 参考答案)
    println("1 、 提取提取特征数据 2 、提取label( 参考答案)")
    val selectSQL=
      s"""
         |with
         |user_info
         |as
         |( select id , gender  from dim_user_info where dt='9999-99-99'  and  gender is not null  )
         |,
         |visit_c1
         |as
         |(
         |select user_id ,during_time , category1_id  from dwd_page_log pl, dim_sku_info si
         | where  page_id='good_detail' and pl.dt='$taskDate'
         |  and  pl.page_item =si.id  and si.dt='$taskDate'
         |  )
         |select
         |user_info.id ,
         |male_dur,
         |female_dur,
         |top1_c1,
         |top2_c1,
         |top3_c1,
         |user_info.gender
         | from user_info  inner join
         |(
         | select  user_id ,
         | sum(if( rk=1 ,  category1_id,0) ) top1_c1,
         | sum(if( rk=2 ,  category1_id,0)) top2_c1,
         | sum(if( rk=3 ,  category1_id,0)) top3_c1 ,
         | sum(if (category1_id in (3,4,6 ) , during_time_sum ,0) ) male_dur,
         |  sum(if (category1_id in (8,12,15 ) , during_time_sum ,0) ) female_dur
         |  from  (
         |select  user_id,category1_id,sum(during_time) during_time_sum,count(*)
         |, row_number()over( partition by user_id order by count(*) desc ,sum(during_time) desc )  rk
         | from visit_c1
         |group by  user_id,category1_id
         |order by  user_id,category1_id
         |) visit_c1_rk
         |group by user_id
         |) user_visit   on  user_info.id =user_visit.user_id
         |
         |
       """.stripMargin
    sparkSession.sql("use gmall2021");
      val dataFrame: DataFrame = sparkSession.sql(selectSQL)

    //3、拆分数据测试集 和 训练集
    println("3、拆分数据测试集 和 训练集")
    val   Array(trainDF,testDF) = dataFrame.randomSplit(Array(0.8,0.2))
    //4、初始化自定义流水线
    println("4、初始化自定义流水线")
    val myPipeline: MyPipeline = new MyPipeline().setLabelColName("gender")
      .setFeatureColName(Array("male_dur", "female_dur", "top1_c1", "top2_c1", "top3_c1"))
      .setMaxCategories(20)
        .setMaxDepth(5)
        .setMinInstancesPerNode(3)
        .setMinInfoGain(0.02)
        .setMaxBins(20).init()


    //5、训练
    println("5、训练")
    myPipeline.train(trainDF)
    myPipeline.printTree()
    myPipeline.printFeatureWeight()
    //6、模拟预测
    println("6、模拟预测")
    val predictedDF: DataFrame = myPipeline.predict(testDF)
    //7、评估结果
    println("7、评估结果")
    myPipeline.printEvaluate(predictedDF)
    //8、保存模型
    println("8、保存模型")
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val modelPath: String = properties.getProperty("model.path.busi_gender")
    myPipeline.saveModel(modelPath)



  }

}
