package com.atguigu.userprofile.app

import java.util.Properties

import com.atguigu.userprofile.common.bean.TagInfo
import com.atguigu.userprofile.common.dao.TagInfoDAO
import com.atguigu.userprofile.common.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object TaskMergeApp {


//  1   创建宽表 （create table ） 建一次就行 ，还是要每天建？
//  由于每天标签的数量 都有可能发生变化  意味着宽表的结构不稳定
//  每天建一张宽表   user_tag_merge_20210516    ....
//
//  2   查询select  查询标签表
//  散表拼接成高表     union all
//  高表在通过行转列变为宽表    pivot
//
//  3  写入 insert  宽表

  def main(args: Array[String]): Unit = {


    val sparkConf: SparkConf = new SparkConf().setAppName("task_merge_app")//.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

    //  1   创建宽表 （create table ）
    //  每天建一张宽表   user_tag_merge_20210516    ....
    //  create table tableName
    //  ( uid string , [tag_code] string ,...... , )
    //   partitioned by ( ) 不要分区  实际上每张表一个分区
    //   comment ' 标签宽表'
     //ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
    //  location '$hdfsPath/$upName/$tableName'

    val taskId: String = args(0)
    val taskDate: String = args(1)

    val tableName=s"user_tag_merge_${taskDate.replace("-","")}"
    //查询到所有已启用的标签
    val tagInfoList: List[TagInfo]  = TagInfoDAO.getTagInfoWithOn()
    //把查询到的list转为 字段定义SQL
    val tagCodeList: List[String] = tagInfoList.map(tagInfo=> s"${tagInfo.tagCode.toLowerCase} string")
    val tagCodeSQL: String = tagCodeList.mkString(",")

    //库名 和存放地址
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val dwName: String = properties.getProperty("data-warehouse.dbname")
    val upName: String = properties.getProperty("user-profile.dbname")

    val dropTableSQL=s"drop table if exists $tableName"
    println(dropTableSQL)
    sparkSession.sql(s"use $upName")
    sparkSession.sql(dropTableSQL)
    val createTableSQL=
      s"""
         |create  table  $tableName
         |( uid string , $tagCodeSQL )
         | comment '标签宽表'
         |ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |location '$hdfsPath/$upName/$tableName'
       """.stripMargin

    println(createTableSQL)
    sparkSession.sql(createTableSQL)

    //  2   查询select  查询标签表
    //   2.1  散表拼接成高表     union all
    // select  uid ,'[tag_code]', tag_value from [tag_code]
    // union all
    // select uid ,'[tag_code]', tag_value from xxx
    // union all
    // ...
    val unionSQL: String = tagInfoList.map { tagInfo =>
      s"""
         |select  uid ,'${tagInfo.tagCode}' tag_code, tag_value
         |  from ${tagInfo.tagCode.toLowerCase}
         |  where dt='$taskDate'
       """.stripMargin
    }.mkString(" union all ")
    println(unionSQL)

    //   2.2  高表在通过行转列变为宽表    pivot
  //select * from test_user_tags  pivot (max(tag_value))  as tag_value   for tag_code in ('gender','age','amount')  )
    //select * from  ($unionsql ) unionsql pivot (max(tag_value))  as tag_value   for tag_code in ($pivotColValues)  )
    val pivotColValues: String = tagInfoList.map(tagInfo=> s"'${tagInfo.tagCode}'").mkString(",")
   val selectSQL=
     s"""
        |select * from
        |  ($unionSQL )usql
        |    pivot (max(tag_value)
        |     as tag_value   for tag_code in ($pivotColValues)  )
        |
      """.stripMargin
    println(selectSQL)

    val  insertSQL=s"insert overwrite table $tableName  $selectSQL"
    println(insertSQL)
    sparkSession.sql(insertSQL)

  }

}
