package com.atguigu.userprofile.app

import java.util.Properties

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagInfoDAO
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object TaskMergeApp {


  //1  创建标签宽表    （手动   自动  ？  每天创建  一次性创建）
 // 2 组合select查询sql  取查询标签表
  //3   写入 标签宽表

  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setAppName("task_merge_app")//.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()


    //1  创建标签宽表    （手动   自动  ？  每天创建  一次性创建）
    // create table if not exists $tableName
    // ( uid string,  $tagListSQL )
    //comment '标签宽表'
    //paritioned by 无
    // ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
    // location '$hdfsStorePath/$userprofiledb/$tableName'
    val taskId: String = args(0)
    val taskDate: String = args(1)

    val  tableName= "user_tag_merge_"+taskDate.replace("-","")
    //要查询出所有已启用的标签编码
   //    mysql  tag_info
    val tagInfoList: List[TagInfo] = TagInfoDAO.getTagInfoListWithOn()
    val tagList: List[String] = tagInfoList.map(tagInfo=>s"${tagInfo.tagCode.toLowerCase}  string ")
    val tagListSQL: String = tagList.mkString(",")

    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val  hdfsStorePath: String = properties.getProperty("hdfs-store.path")
    val  dwDBname: String = properties.getProperty("data-warehouse.dbname")
    val  upDBname: String = properties.getProperty("user-profile.dbname")


    val  createTableSQL=
      s"""
         |    create table if not exists $upDBname.$tableName
         |    ( uid string,  $tagListSQL )
         |    comment '标签宽表'
         |   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |    location '$hdfsStorePath/$upDBname/$tableName'
       """.stripMargin

    println(createTableSQL)
    //  程序幂等性
    sparkSession.sql(s"drop table if exists  $upDBname.$tableName")
    sparkSession.sql(createTableSQL)

    // 2 组合select查询sql  取查询标签表
    //    2.1  把标签各个散表 拼成 高表   union all
    //    2.2  把高表转为宽表  行转列     pivot
    // select * from
    //  (
    //    select uid , 'tg_table1' tag_code, tag_value    from  tg_table1 where dt='$taskDate'
//        union all
    //    select uid , 'tg_table2' tag_code, tag_value from  tg_table2 where dt='$taskDate'
    //        union all
    //    select uid , 'tg_table3' tag_code, tag_value from  tg_table3 where dt='$taskDate'
    //   ...
   // )  pivot (  max(tag_value ) as tag_value for tag_code in( $tagCodeListSQL) )
    //

    val tagQueryList: List[String] = tagInfoList.map { tagInfo =>
      s"select uid , '${tagInfo.tagCode}' tag_code, tag_value    from  ${tagInfo.tagCode.toLowerCase} where dt='$taskDate'"
    }
    val tagQuery: String = tagQueryList.mkString(" union all ")

    val tagCodeListSQL: String = tagInfoList.map(tagInfo=> s"'${tagInfo.tagCode}'").mkString(",")

    val selectSQL=
      s"""
         |select * from
         |    (
         |     $tagQuery
         |   )  pivot (  max(tag_value ) as tag_value for tag_code in( $tagCodeListSQL) )
         |
         |
       """.stripMargin

    println(selectSQL)

    val insertSQL=s" insert overwrite table  $upDBname.$tableName $selectSQL"
    println(insertSQL)
    sparkSession.sql(s"use  $upDBname")
    sparkSession.sql(insertSQL)

  }

}
