package com.atguigu.userprofile.app

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagInfoDao
import com.atguigu.userprofile.util.MyPropertiesUtil
import java.util.Properties
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
 * @Author: cpw
 * @Date: 2021/8/2 16:30
 * @Version 1.0
 */
object  TaskMergeApp {

    // 1.取得所有的标签定义列表
    // 2.由于每天宽表的标签字段不同，需要每天滚动生产新表  用日期作为表名的后缀  如：user_tag_merge_20210731
    // 动态生成建表语句
    // 3.动态生成sql
    // 把所有的标签表  组合  并且行转列  变成宽表

    def main(args: Array[String]): Unit = {

        val taskId: String = args(0)
        val taskDate: String = args(1)

        // 0.spark运行环境
        val sparkConf: SparkConf = new SparkConf().setAppName("task_merge_app")//.setMaster("local[*]")
        val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

        // 1.取得所有的标签定义列表   mysql  tag_info
        val tagInfoList: List[TagInfo] = TagInfoDao.getTagListWithOn()

        // 2.由于每天宽表的标签字段不同，需要每天滚动生产新表  用日期作为表名的后缀  如：user_tag_merge_20210731
        // 动态生成建表语句
        // create table user_tag_merge_$taskId
        // (uid string, tag_code1 string,tag_code2 string ...)
        // comment '标签宽表'
        // row format delimited fields terminated by '\\t'
        // location '$hdfsPath/$upName/user_tag_merge_$taskDate'

        val tagCodeTypeList: List[String] = tagInfoList.map(tagInfo => s"${tagInfo.tagCode.toLowerCase()} string")
        val colsSQL: String = tagCodeTypeList.mkString(",")
        val tableName = s"user_tag_merge_${taskDate.replace("-","")}"

        val properties: Properties = MyPropertiesUtil.load("config.properties")
        val hdfsPath: String = properties.getProperty("hdfs-store.path")
        val dwName: String = properties.getProperty("data-warehouse.dbname")
        val upName: String = properties.getProperty("user-profile.dbname")


        var createTableSQL=
            s"""
              |  create table if not exists $tableName
              |  (uid string,$colsSQL)
              |  comment '标签宽表'
              |  row format delimited fields terminated by '\\t'
              |  location '$hdfsPath/$upName/$tableName'
              |""".stripMargin

        println(createTableSQL)
        sparkSession.sql(s"use $upName")
        sparkSession.sql(createTableSQL)

        // 3. 动态生成sql
        // 把所有的标签表  组合  并且行转列  变成宽表
        //   1.组合  union all 2. 行转列 pivot

        val taskSQLList: List[String] = tagInfoList.map(tagInfo =>
            s"select uid,'${tagInfo.tagCode}' as tag_code, " +
                    s"tag_value from ${tagInfo.tagCode.toLowerCase()} where dt = '${taskDate}'")
        val tagUnionSQL: String = taskSQLList.mkString("union all ")

        val tagCodeList: List[String] = tagInfoList.map(tagInfo => s"'${tagInfo.tagCode}'")
        val tagCodeSQL: String = tagCodeList.mkString(",")


        val selectSQL =
            s"""
              |  select * from
              |     ($tagUnionSQL) tv pivot ( concat_ws(',', collect_list(tag_value) ) for
              |     tag_code in ($tagCodeSQL))
              |""".stripMargin


        val insertSQL = s"insert overwrite $tableName $selectSQL"

        println(insertSQL)
        sparkSession.sql(insertSQL)

    }

}
