package com.atguigu.userprofile.app

import java.util.Properties

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagInfoDAO
import com.atguigu.userprofile.pipeline.MyPipeline
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

object BusiGenderApp {
//1 、如何提取提取特征数据
//2、加载已有的模型
//3、用模型进行预测
//4、原值转换
  def main(args: Array[String]): Unit = {
  val sparkConf: SparkConf = new SparkConf().setAppName("task_busi_gender_app").setMaster("local[*]")
  val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

  val taskId=args(0)
  val taskDate=args(1)


  //1 、提取特征数据
  println("1 、 提取提取特征数据 ")
  val selectSQL=
    s"""
       |with
       |user_info
       |as
       |( select id , gender  from dim_user_info where dt='9999-99-99'    )
       |,
       |visit_c1
       |as
       |(
       |select user_id ,during_time , category1_id  from dwd_page_log pl, dim_sku_info si
       | where  page_id='good_detail' and pl.dt='$taskDate'
       |  and  pl.page_item =si.id  and si.dt='$taskDate'
       |  )
       |select
       |user_info.id ,
       |male_dur,
       |female_dur,
       |top1_c1,
       |top2_c1,
       |top3_c1
       | from user_info  inner join
       |(
       | select  user_id ,
       | sum(if( rk=1 ,  category1_id,0) ) top1_c1,
       | sum(if( rk=2 ,  category1_id,0)) top2_c1,
       | sum(if( rk=3 ,  category1_id,0)) top3_c1 ,
       | sum(if (category1_id in (3,4,6 ) , during_time_sum ,0) ) male_dur,
       |  sum(if (category1_id in (8,12,15 ) , during_time_sum ,0) ) female_dur
       |  from  (
       |select  user_id,category1_id,sum(during_time) during_time_sum,count(*)
       |, row_number()over( partition by user_id order by count(*) desc ,sum(during_time) desc )  rk
       | from visit_c1
       |group by  user_id,category1_id
       |order by  user_id,category1_id
       |) visit_c1_rk
       |group by user_id
       |) user_visit   on  user_info.id =user_visit.user_id
       |
         |
       """.stripMargin
  sparkSession.sql("use gmall2021");
  val dataFrame: DataFrame = sparkSession.sql(selectSQL)

  //2、加载已有的模型
  println("2、加载已有的模型 ")
  val properties: Properties = MyPropertiesUtil.load("config.properties")
  val modelPath: String = properties.getProperty("model.path.busi_gender")
   val myPipeline: MyPipeline = new MyPipeline().loadModel(modelPath)

  //3、用模型进行预测
  println("3、用模型进行预测 ")
  val predictedDataFrame: DataFrame = myPipeline.predict(dataFrame)

  //4、原值转换
  println("4、原值转换 ")
  val convertedDF: DataFrame = myPipeline.convertOrigin(predictedDataFrame)
  convertedDF.cache().show(1000,false)

  //5、写入标签
  println("5、写入标签 ")
  saveToTag(convertedDF ,taskId ,taskDate  ,sparkSession )

  }


  //标签生成阶段   (预测性别)
  //1、对标签进行定义(平台界面)
  //2、创建标签表
  //3、读取预测结果
  //4、写入标签表
  def  saveToTag(convertedDF:DataFrame,taskId:String,taskDate:String ,sparkSession: SparkSession): Unit ={
    //2  自动建表
    //创建一个标签表    （一个标签一个表 ）  （1 手动建立 2自动建立  ）
    //   表名：  标签编码
    //   字段：  uid , tag_value ,dt
    //   create  table  $tableName
    //  (uid string , tag_value string )
    //  partitioned by (dt string)
    //  comment '$tagName'
    //  ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
    //  location '$hdfsStorePath/$userprofiledb/$tableName'

    val tagInfo: TagInfo = TagInfoDAO.getTagInfoByTaskID(taskId)
    val tableName: String = tagInfo.tagCode.toLowerCase

    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val  hdfsStorePath: String = properties.getProperty("hdfs-store.path")
    val  dwDBname: String = properties.getProperty("data-warehouse.dbname")
    val  upDBname: String = properties.getProperty("user-profile.dbname")

    val createTableSQL=
      s"""
         |       create  table  $upDBname.$tableName
         |     (uid string , tag_value string )
         |     partitioned by (dt string)
         |     comment '${tagInfo.tagName}'
         |      ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |      location '$hdfsStorePath/$upDBname/$tableName'
       """.stripMargin

    sparkSession.sql(createTableSQL)

    //3、读取预测结果
    //4、写入标签表
    convertedDF.createTempView("prediction_table")

    var insertSQL=
      s"""
         |insert  overwrite table $upDBname.$tableName partition (dt='$taskDate')
         |select  id, case prediction_origin when 'F' then '女' when 'M' then '男' end tag_value from prediction_table
       """.stripMargin

    sparkSession.sql(insertSQL)


  }



}
