package com.atguigu.userprofile.ml.train

import java.util.Properties

import com.atguigu.userprofile.ml.pipeline.MyPipeline
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object BusiGenderTrain {



  //1  提数  sql
  //2  初始化流水线
  //3 数据切分  训练集 测试集
  //4  训练
  // 5 评估  打印数 观察特征权重
  // 6 模型存储
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("busi_gender_train_app").setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

    val taskId: String = args(0)
    val taskDate: String = args(1)
    val sql=
       s"""
          |  with  user_cate1
          | as
          | (
          | select user_id, category1_id, during_time from dwd_page_log pl
          | join dim_sku_info si  on    page_item=id
          | where page_id='good_detail' and page_item_type='sku_id' and  si.dt='$taskDate'
          |  and  pl.dt='$taskDate'
          |  ),
          |  user_gender
          |  as(
          |    select id ,gender  from dim_user_info where dt='9999-99-99' and   gender <>''  and gender is not null
          |  )
          |  select user_id, top1_c1 ,top2_c1 ,top3_c1 ,male_dur_time,female_dur_time,ug.gender
          |  from
          |  (
          |  select user_id,  sum(if(rk_ct=1,category1_id,0)) top1_c1,sum(if(rk_ct=2,category1_id,0))  top2_c1 ,sum(if(rk_ct=3,category1_id,0))  top3_c1 ,
          |   sum(if(category1_id in (3,4,16) ,sum_dur_time,0)) male_dur_time,
          |   sum(if(category1_id in (8,12,15),sum_dur_time,0)) female_dur_time
          |  from
          |  (
          |    select  user_id,category1_id, count(*) ct ,sum(during_time) sum_dur_time,
          |    row_number()over(partition by user_id  order by  count(*) desc  )  rk_ct
          |    from user_cate1
          |     group by user_id,category1_id
          | ) user_cate1_ct
          | where rk_ct<=3
          | group by user_id
          | )  user_feature
          |  join user_gender ug on ug.id = user_feature.user_id
        """.stripMargin
    println(sql)
    println("提数  sql")
    sparkSession.sql("use gmall2021")
      val dataFrame: DataFrame = sparkSession.sql(sql)

    //2  初始化流水线
    println(" 初始化流水线")
       val myPipeline: MyPipeline = new MyPipeline()
         .setFeatureColNames(Array("top1_c1","top2_c1","top3_c1","male_dur_time","female_dur_time" ))
         .setLabelColName("gender").setMaxCategories(20)
         .setMaxDepth(6).setMaxBins(32).setMinInstancesPerNode(2).setMinInfoGain(0.02).init()
    //3 数据切分  训练集 测试集
    println("  数据切分  训练集 测试集")
    val Array(trainDF,testDF) = dataFrame.randomSplit(Array(0.8,0.2))
    //4  训练
    println(" 训练")
    trainDF.show(1000,false)
    myPipeline.train(trainDF)

    // 5 打印树 观察特征权重
    println(" 初始化流水线")
    myPipeline.printFeatureWeight()
    myPipeline.printTree()
    //6  模拟预测
    println(" 模拟预测")
    testDF.show(1000,false)
    val predictedDF: DataFrame = myPipeline.predict(testDF)
    // 7 打印评估报告
    println(" 打印评估报告")
    myPipeline.printEvaluateReport(predictedDF)
   // 8 存储
    println(" 保存模型")
   val properties: Properties = MyPropertiesUtil.load("config.properties")
    val modelPath: String = properties.getProperty("model.path")
    myPipeline.saveModel(modelPath)

  }

}
