package com.atguigu.userprofile.app

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagInfoDAO
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

import java.util.Properties

object TaskMergeApp {


  //1  如何得到多个标签单表？？
  // 通过查询mysql 得到  已启用的标签  通过tag_code就可以得到表名
  //
  //2 标签宽表 怎么建立
  // 1  手动建立 2 程序建立
  // 1  每天建立一张 可以用日期当后缀     2 一共建立一张表 每天一个分区
  //create table xxxxx
  //( uid , ...... )
  //不分区
  // 文本格式
  //location ...
  //
  //
  // 3 通过一条pivot SQL语句把多个标签单表合并为一个标签宽表
  def main(args: Array[String]): Unit = {
    val taskId: String = args(0)
    val busiDate: String = args(1)

    val sparConf = new SparkConf().setAppName("task_merge_app")//.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparConf).enableHiveSupport().getOrCreate()


    //1 通过查询mysql 得到  已启用的标签list  通过tag_code就可以得到表名    tag_info

    val tagInfoList: List[TagInfo] = TagInfoDAO.getTagInfoListWithOn()

    //2 标签宽表   每天建立一张 可以用日期当后缀
    //create table xxxxx      --> up_tag_merge_$busiDate
    //( uid string, ...... )   --> tagCode1 string ,tagCode2 string ....
    //不分区
    // 文本格式
    //location ..
   val  tableName=s"up_tag_merge_${busiDate.replace("-","")}"
    val filedNameList: List[String] = tagInfoList.map(tagInfo => s"${tagInfo.tagCode.toLowerCase} string")
    val filedNameSQL: String = filedNameList.mkString(",")

    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val upDbName: String = properties.getProperty("user-profile.dbname")
    val dwDbName: String = properties.getProperty("data-warehouse.dbname")

    sparkSession.sql(s"use $upDbName")
    val dropSQL= s"drop table if exists $upDbName.$tableName"
    println(dropSQL)
    sparkSession.sql(dropSQL)
    var createTableSQL=
      s"""
         | create table  if not exists $upDbName.$tableName
         | (uid string ,$filedNameSQL)
         | ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |  location '$hdfsPath/$upDbName/$tableName'
         |""".stripMargin
    println(createTableSQL)
    sparkSession.sql(createTableSQL)

    // 3 通过一条pivot SQL语句把多个标签单表合并为一个标签宽表
    ////维度列 uid 旋转列 tag_code  聚合列 tag_value  max(xx)
    // select * from (   select uid,tag_code,tag_value from xxx_gender where dt='xxx'
    //                    union all
    //                  select * from xxx_agegroup where dt='xxx'
    //                    union all
    //                  .....
    // ) pivot (  max(tag_value) tv   for  tag_code in ('xxx_gender','xxx_agegroup') )
    //
    //每个标签产生一条语句
    val tagSQLList: List[String] = tagInfoList.map(tagInfo => s"select uid,'${tagInfo.tagCode.toLowerCase}' tag_code,tag_value from $upDbName.${tagInfo.tagCode.toLowerCase} where dt='$busiDate' ")

    val unionSQL=tagSQLList.mkString(" union all ")

    val tagCodeList: List[String] = tagInfoList.map(tagInfo => s"'${tagInfo.tagCode.toLowerCase}'")
    val tagCodeSQL: String = tagCodeList.mkString(",")

    val pivotSQL=
      s"""
         |select * from (   $unionSQL )
         | pivot (  max(tag_value) tv   for  tag_code in (${tagCodeSQL}) )
         |""".stripMargin

   // println(pivotSQL)

    sparkSession.sql(s"use $upDbName")
    val  insertSQL=s" insert overwrite table $upDbName.$tableName $pivotSQL"

    println(insertSQL)

    sparkSession.sql(insertSQL)




  }

}
