package com.atguigu.userprofile.ml.app

import java.util.Properties

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagDAO
import com.atguigu.userprofile.ml.pipeline.MyPipeline
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

object BusiGenderPredictApp {


  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf()
      .setAppName("busi_gender_train").setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder()
      .config(sparkConf).enableHiveSupport().getOrCreate()

    var taskId=""
    var taskDate=""
    if(args.length>0){
      taskId=args(0)
      taskDate=args(1)
    }
    //通过SQL 把特征数据集取出
    println("把特征数据集取出")
    val querySql =
      s"""
         | with  user_cate as
         | (
         |     select  user_id ,category1_id c1 ,during_time from
         |     dwd_page_log pl join dim_sku_info ski
         |     on  pl.page_item=ski.id
         |     where  ski.dt='$taskDate' and  pl.dt='$taskDate'
         |     and page_id='good_detail' and  page_item_type='sku_id'
         |  ) ,
         |   user_gender as
         |  (
         |   select  id ,gender  from dim_user_info
         |    where dt='9999-99-99' and  gender is null
         |  )
         |  select   user_id as uid , top1_c1,top2_c1,top3_c1 ,male_c1_dur,female_c1_dur
         |  from
         |  (
         |  select user_id ,
         |  sum(if(ct_rank=1,c1,0)) top1_c1,
         |  sum(if(ct_rank=2,c1,0)) top2_c1,
         |  sum(if(ct_rank=3,c1,0)) top3_c1,
         |  sum(sum_dur),
         |   sum( if(c1 in (3,4,16),sum_dur,0  ))  male_c1_dur ,
         |   sum( if(c1 in (8,12,15),sum_dur,0  ))  female_c1_dur
         |  from
         |  (
         |    select user_id ,c1 ,sum(during_time) sum_dur,count(*),
         |    row_number()over(partition by user_id order by count(*) desc ) ct_rank
         |    from user_cate
         |    group by user_id ,c1
         |     order by user_id
         |   ) user_c1
         |   group by user_id
         | )user_ct  join user_gender  ug on ug.id=user_ct.user_id
       """.stripMargin;
    println(querySql)
    sparkSession.sql("use gmall2021")
    val dataFram: DataFrame = sparkSession.sql(querySql)
//    println("行数："+dataFram.count())
//    dataFram.show(100,false)
   //取出模型
    println("取出模型")
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val modelPath: String = properties.getProperty("model.path")
    val myPipeline: MyPipeline = new MyPipeline().load(modelPath)
    println(myPipeline.getDecisionTreeString())
    println(myPipeline.getFeatureImportances())
    // 预测
    println("预测")
    val predictedDataFrame: DataFrame = myPipeline.predict(dataFram)
    predictedDataFrame.show(100,false)
    println("预测行数："+predictedDataFrame.count())
//    // 转换为原值
    println("转换为原值")
    val predictedWithOrginDF: DataFrame = myPipeline.convertOriginLabel(predictedDataFrame)

    predictedWithOrginDF.cache().createTempView("predicted_gender")

    predictedWithOrginDF.show(100,false)

    insertTag(taskId,taskDate,sparkSession)
  }



  def  insertTag(taskId:String, taskDate:String ,sparkSession: SparkSession): Unit ={
    // 1  建标签表   // 预测性别专用表
    val tagInfo: TagInfo = TagDAO.getTagInfo(taskId)
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsStorePath: String = properties.getProperty("hdfs-store.path")
    val upDBName: String = properties.getProperty("user-profile.dbname")
    val dwDBName: String = properties.getProperty("data-warehouse.dbname")

    val createTableSQL=
      s"""
         |create  table if not exists ${tagInfo.tagCode.toLowerCase}
         |(   uid string ,
         |    tag_value  string
         |) comment '${tagInfo.tagName}'
         |  ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |  PARTITIONED BY (  `dt`  string )
         |   location '$hdfsStorePath/$upDBName/${tagInfo.tagCode.toLowerCase}'
        """.stripMargin

    println(createTableSQL)

    // 2   插入预测值进入标签表

    //insert overwrite table ${tagInfo.tagCode.toLowerCase}
    //select  uid , case prediction_origin when 'F' then '女' when 'M' then '男' end as  gender   from predicted_gender pg
    // union all
    // select id  ,gender  from dim_user_info ui
    // where   dt ='9999-99-99'  and (select count(*) from predicted_gender where ui.id =pg.uid )=0 // 预测无 实际有  保留

    val insertSQL=
      s"""
         |insert overwrite table ${tagInfo.tagCode.toLowerCase}  partition (dt='$taskDate')
         |    select  uid , case prediction_origin when 'F' then '女' when 'M' then '男' end as  gender  from predicted_gender pg
         |      union all
         |    select id  ,gender   from $dwDBName.dim_user_info ui
         |    where   dt ='9999-99-99'  and (select count(*) from predicted_gender pdg where ui.id =pdg.uid )=0
       """.stripMargin
    println(insertSQL)
    sparkSession.sql(s"use $upDBName")
    sparkSession.sql(createTableSQL)
    sparkSession.sql(insertSQL)



  }
}
