package com.atguigu.userprofile.app

import com.atguigu.userprofile.common.bean.TagInfo
import com.atguigu.userprofile.common.dao.{TagInfoDAO, TaskInfoDAO}
import com.atguigu.userprofile.common.util.PropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

import java.util.Properties

object TaskMerge {

  private val properties: Properties =  PropertiesUtil.load("config.properties")
  val HDFS_PATH = properties.getProperty("hdfs-store.path")
  val UP_DBNAME = properties.getProperty("user-profile.dbname")
  val DW_DBNAME = properties.getProperty("data-warehouse.dbname")

  //目标： 把多个标签表 合并为一个标签宽表
  //1 取得计算的业务日期
  //2 查询mysql得出所有需要被合并的标签， 其中使用tag_code作为列名
  //3 建立宽表 不能手工建立 因为不确定有多少列，每天都可能不一样
  //4 组合一个 pivot语句，进行行转列的操作
  def main(args: Array[String]): Unit = {
    //0 执行环境
    val sparkConf: SparkConf = new SparkConf().setAppName("task_merge_app").setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()


    //1 取得计算的业务日期
    val taskId: String = args(0)
    val busiDate: String = args(1)


    //2 查询mysql得出所有需要被合并的标签， 其中使用tag_code作为列名
    val tagInfoList: List[TagInfo] = TagInfoDAO.getTagInfoList()

    //3 建立宽表 不能手工建立 因为不确定有多少列，每天都可能不一样
    //  表名？  字段？   分区？ 不用分区  存储位置？  格式？普通文本
    // create  table if not exists up_tag_merge_20200614
    // (uid string ,$tagCodeSQl(tagCode+ string)  )
    //   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
    // location '${HDFS_PATH}/${UP_DBNAME}/${tableName}'
    val tableName =s"up_tag_merge_${busiDate.replace("-","")}"

    val tagCodeList: List[String] = tagInfoList.map(tagInfo => tagInfo.tagCode.toLowerCase + " string")

    val tagCodeSQL=tagCodeList.mkString(",")

    val createTableSQL=
      s"""
         |     create  table if not exists ${tableName}
         |     (uid string ,$tagCodeSQL  )
         |        ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |      location '${HDFS_PATH}/${UP_DBNAME}/${tableName}'
         |
         |""".stripMargin

    val dropTableSQL=
      s"""
         |drop table if exists  ${tableName}
         |
         |""".stripMargin
    sparkSession.sql(s"use $UP_DBNAME")
    println(dropTableSQL)
    sparkSession.sql(dropTableSQL)

    println(createTableSQL)
    sparkSession.sql(createTableSQL)



    //4 组合一个 pivot语句，进行行转列的操作
    // 聚合列  max(tag_value)
    //  维度列  uid
    //  旋转列  旋转值  tag_code
    //select * from  (各种标签表的union集合的高表 )
    //pivot ( max(tag_value) as tag_value   for tag_code in( $tagCodesForPivot) )

    // select uid,$tag_code as tag_code,tag_value from $tag_code where dt=$busiDate
    // union all
    // select uid,$tag_code as tag_code,tag_value from $tag_code where dt=$busiDate
    // union all
    // ....
    //...
    val tagTableList: List[String] = tagInfoList.map(tagInfo =>
      s"select uid,'${tagInfo.tagCode}' as tag_code,tag_value from $UP_DBNAME.${tagInfo.tagCode.toLowerCase()} where dt='$busiDate'")
    val unionSQL: String = tagTableList.mkString(" union all ")

    val tagCodeForPivotList: List[String] = tagInfoList.map(tagInfo => s"'${tagInfo.tagCode}'")
    val tagCodePivotSQL: String = tagCodeForPivotList.mkString(",")

    val selectSQL=
      s"""
         |    select * from  ($unionSQL ) tg
         |    pivot ( max(tag_value) as tag_value   for tag_code in( $tagCodePivotSQL) )
         |
         |""".stripMargin

    println(selectSQL)
    val insertSQL=s"insert overwrite table $UP_DBNAME.$tableName  $selectSQL"
    println(insertSQL)
    sparkSession.sql(insertSQL)
  }

}
