package com.atguigu.userprofile.app

import java.util.Properties

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagInfoDao
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object TaskMergeApp {

//   1   取得所有的标签定义列表
//  2   由于每天宽表的标签字段不同， 需要每天滚动生成新表 用日期作为表名的后缀 如：  user_tag_merge_20210731
//  动态生成建表语句
//  3  动态生成sql :
//    把所有的标签表 组合 并且行转列 变成宽表

  def main(args: Array[String]): Unit = {
   val taskId: String = args(0)
   val taskDate: String = args(1)

   //0 spark运行环境
   val sparkConf: SparkConf = new SparkConf().setAppName("task_merge_app")//.setMaster("local[*]")
   sparkConf.set("spark.sql.shuffle.partitions ","20");
   val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()


   //   1   取得所有的标签定义列表   mysql  tag_info
    val tagInfoList: List[TagInfo] = TagInfoDao.getTagListWithOn()

    //  2   由于每天宽表的标签字段不同， 需要每天滚动生成新表 用日期作为表名的后缀 如：  user_tag_merge_20210731
    //  动态生成建表语句
    //  create table user_tag_merge_$taskDate
    //  ( uid string , tag_code1 string  ,tag_code2 string .......   )
     //comment  '标签宽表'
    //         ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
    //   location  '$hdfsPath/$upName/user_tag_merge_$taskDate



    val tagCodeTypeList: List[String] = tagInfoList.map(tagInfo=>s"${tagInfo.tagCode.toLowerCase()} string")
    val colsSQL: String = tagCodeTypeList.mkString(",")
    val tableName=s"user_tag_merge_${taskDate.replace("-","")}"

    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val dwName: String = properties.getProperty("data-warehouse.dbname")
    val upName: String = properties.getProperty("user-profile.dbname")


   //增加drop方法 如果程序发生重跑 而且字段有变化 把表重新生成
   //

    var createTableSQL=
      s"""
         |      create table  if not exists $tableName
         |      ( uid string , $colsSQL   )
         |      comment  '标签宽表'
         |        ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |        location  '$hdfsPath/$upName/$tableName'
       """.stripMargin

    println(createTableSQL)
   sparkSession.sql(s"use $upName")
   sparkSession.sql(createTableSQL)

   //  3  动态生成sql :
   //    把所有的标签表 组合 并且行转列 变成宽表
   //1 组合 union all  2 行转列 pivot
   //
   //select * from
   //(
   //
   //select uid, 'TG_PERSONA_BASE_GENDER' as  tag_code  ,tag_value  from  tg_persona_base_gender  where dt='2021-05-16'
   //union all
   //select uid, 'TG_PERSONA_BASE_AGEGROUP' as  tag_code  ,tag_value  from  tg_persona_base_agegroup  where dt='2021-05-16'
   //union all
   //...
   //)  tv  pivot (  concat_ws(',',  collect_list(tag_value)  )  for  tag_code in ('tg_persona_base_gender','tg_persona_base_agegroup',... ))

   val tagSQLList: List[String] = tagInfoList.map { tagInfo =>
    s"select uid, '${tagInfo.tagCode}' as  tag_code  ,tag_value  from  ${tagInfo.tagCode.toLowerCase()}  where dt='${taskDate}'"
   }
   val tagUnionSQL: String = tagSQLList.mkString(" union all ")

   val tagCodeList: List[String] = tagInfoList.map(tagInfo=> s"'${tagInfo.tagCode}'")
   val tagCodeSQL: String = tagCodeList.mkString(",")


   val selectSQL=
     s"""
        |select /*+ REPARTITION(8) */ * from
        |    ( $tagUnionSQL )  tv  pivot (  concat_ws(',',  collect_list(tag_value)  )
        |    for  tag_code in (  $tagCodeSQL ))
        |
      """.stripMargin


   val insertSQL=s" insert overwrite table $tableName  $selectSQL"

   println(insertSQL)
   sparkSession.sql(insertSQL)



  }

}
