package com.atguigu.userprofile.app

import com.atguigu.userprofile.common.bean.TagInfo
import com.atguigu.userprofile.common.dao.TagInfoDAO
import com.atguigu.userprofile.common.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

import java.util.Properties

object TaskMergeApp {


  //任务二：把多个标签表合并成一张标签宽表
  //1  创建一张标签宽表
  //2   通过执行insert select 来完成 多个标签表写入到宽表 （pivot)
  def main(args: Array[String]): Unit = {

     val taskId=args(0)
     val busiDate=args(1)

    val sparkConf: SparkConf = new SparkConf().setAppName("task_merge_app")//.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()


    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val upDbName: String = properties.getProperty("user-profile.dbname")
    val dwDbName: String = properties.getProperty("data-warehouse.dbname")

    val tagInfoList: List[TagInfo] = TagInfoDAO.getTagInfoWithOnList()
    val tagCodeSQL: String = tagInfoList.map(tagInfo => s"${tagInfo.tagCode.toLowerCase} string").mkString(",")

    //1  创建一张标签宽表
     //   create table  user_tag_merge_20220520
    //    (uid string ,标签编码1 string,标签编码2 string....... )
    //  格式文本
    //  存储位置

    val tableName=s"user_tag_merge_${busiDate.replace("-","")}"

    val createTableSQL=
      s"""
         |     create table if not exists  $upDbName.$tableName
         |        (uid string ,$tagCodeSQL )
         |    ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |    location '$hdfsPath/$upDbName/$tableName'
         |""".stripMargin

    val  dropTableSQL=s" drop table if exists $upDbName.$tableName"
    println(dropTableSQL)
    sparkSession.sql(dropTableSQL)

    println(createTableSQL)
    sparkSession.sql(createTableSQL)
    //2   通过执行 insert select 来完成 多个标签表写入到宽表 （pivot)
    //     pivot
    //  1 维度列   uid
     // 2 聚合列  tag_value
    // 3 旋转列    补充一个伪列 tag_code

    // 1  先做union all 同时补充伪列tag_code
    //  select uid,tag_value,'tg_person_base_gender' as tag_code
    //  from tg_person_base_gender where dt='2020-06-14'
    //  union all
    //  select uid,tag_value,'tg_person_base_agegroup' as tag_code
    //  from tg_person_base_agegroup where dt='2020-06-14'
   //  union all
    //  ....
    //2 pivot旋转
    // select * from ( ..) pivot ( max(tag_value)  for tag_code     in ('tg_person_base_agegroup','tg_person_base_gender' ,....)   )
    // 拼接sql



    // 利用标签列表 拼接union all 的子查询
    val tagSQLList: List[String] = tagInfoList.map { tagInfo =>
      val tagSQL =
        s"""
           |select uid,tag_value,'${tagInfo.tagCode.toLowerCase}' as tag_code
           |     from ${tagInfo.tagCode.toLowerCase} where dt='${busiDate}'
           |""".stripMargin
      tagSQL
    }

    val unionAllSql: String = tagSQLList.mkString(" union all ")

    // pivot sql
    val pivotTagCodeSQL=tagInfoList.map(tagInfo=>s"'${tagInfo.tagCode.toLowerCase}'").mkString(",")
    val pivotSQL=
      s"""
         |select * from ( $unionAllSql)
         |pivot ( max(tag_value)  for tag_code
         |    in ($pivotTagCodeSQL)   )
         |""".stripMargin

    // insert select
    val insertSQL=s"insert overwrite table $tableName $pivotSQL"
    println(insertSQL)
    sparkSession.sql(s"use $upDbName")
    sparkSession.sql(insertSQL)
  }

}
