package com.atguigu.userprofile.ml.app

import java.util.Properties

import com.atguigu.userprofile.common.bean.TagInfo
import com.atguigu.userprofile.common.constants.ConstCode
import com.atguigu.userprofile.common.dao.TagInfoDAO
import com.atguigu.userprofile.common.util.MyPropertiesUtil
import com.atguigu.userprofile.ml.pipeline.MyPipeline
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

object PredictGenderApp {



  //预测阶段
  //1  加载模型
  //2  提取特征数据 （数仓）
  //3   预测
  //4   把矢量值转为原值
  //5   最终结果表
  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setAppName("task_predict_gender_app")//.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

    val taskId: String = args(0)
    val taskDate: String = args(1)
    //1  加载模型
    println("1  加载模型")
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val modelPath: String = properties.getProperty("model.gmall_gender.path")


    val myPipeline: MyPipeline = new MyPipeline().loadModel(modelPath)

    //2  提取数据
    println("2  提取数据")
    val sql =
      s"""
         |    with
         |   user_gender as
         |   (
         |   select id,gender  from dim_user_info  where   dt='9999-99-99'
         |   ) ,
         |   visit_c1 as
         |   (
         |        select  user_id,  category1_id,during_time  from dwd_page_log pl  ,dim_sku_info si
         |        where pl.dt='2021-05-16' and page_id='good_detail' and
         |        page_item_type='sku_id'  and  si.id=page_item and si.dt='2021-05-16'
         |   )
         |select
         |   user_visit.user_id,
         |    male_dur,
         |  female_dur,
         |  c1_top1,
         |  c1_top2,
         |  c1_top3
         |  from
         |   (
         |   select user_id,
         |   sum(if(category1_id in (3,4,6) ,during_time,0)) male_dur,
         |   sum(if(category1_id in (8,12,15) ,during_time,0)) female_dur,
         |   sum(if(rk=1,category1_id,0))  c1_top1,
         |   sum(if(rk=2,category1_id,0)) c1_top2,
         |   sum(if(rk=3,category1_id,0)) c1_top3
         |   from
         |   (
         |     select  user_id,category1_id,count(*),sum(during_time) during_time ,row_number()over( partition by user_id  order by  count(*) desc   ) rk from
         |     visit_c1
         |     group by user_id,category1_id
         |   )user_c1_rk
         |   group by user_id
         |   ) user_visit  inner join   user_gender  on   user_gender.id=user_visit.user_id
         |
         |
       """.stripMargin

    println(sql)
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val dwName: String = properties.getProperty("data-warehouse.dbname")
    val upName: String = properties.getProperty("user-profile.dbname")

    sparkSession.sql(s"use $dwName")
    val dataFrame: DataFrame = sparkSession.sql(sql)
    //3   预测
    println("3   预测")
    val predictedDF: DataFrame = myPipeline.predict(dataFrame)

    //4   把矢量值转为原值
    println("4   把矢量值转为原值")
    val convertedDF: DataFrame = myPipeline.convertOrgin(predictedDF)
    convertedDF.cache().show(10000,false)


   // 生成标签
    insertTag(taskId ,taskDate ,upName:String,hdfsPath:String,convertedDF:DataFrame,sparkSession: SparkSession)
  }

//1   标签表的生成
// 2  把预测结果写入标签表
//   (insert  select)
  def insertTag(taskId:String ,taskDate:String,upDBName:String,hdfsPath:String,convertedDF:DataFrame,sparkSession: SparkSession): Unit ={

   val tagInfo: TagInfo = TagInfoDAO.getTagInfoByTask(taskId)
   val tableName=tagInfo.tagCode.toLowerCase()

  val tagValueType= tagInfo.tagValueType match {
    case ConstCode.TAG_VALUE_TYPE_STRING => "STRING"
    case ConstCode.TAG_VALUE_TYPE_LONG => "BIGINT"
    case ConstCode.TAG_VALUE_TYPE_DECIMAL => "DECIMAL(16,2)"
    case ConstCode.TAG_VALUE_TYPE_DATE => "STRING"
  }
    val createTableSQL=
      s"""
         | create table if not exists $upDBName.$tableName
         |( uid string ,tag_value $tagValueType )
         |comment '${tagInfo.tagName}'
         |partitioned by ( dt string)
         |ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         | location '$hdfsPath/$upDBName/$tableName'
       """.stripMargin

      //println(s"drop table if exists   $upDBName.$tableName")
      println(createTableSQL)
      sparkSession.sql(createTableSQL)
      convertedDF.createTempView("prediction_table")

      val insertSelectSQL=
        s"""
           | insert overwrite  $upDBName.$tableName partition(dt='$taskDate')
           | select user_id,case prediction_origin when 'F' then '女' when 'M' then '男' end tag_value
           | from prediction_table
         """.stripMargin
     println(insertSelectSQL)

     sparkSession.sql(insertSelectSQL)



  }

}
