package com.atguigu.userprofile.app

import java.util.Properties

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagDAO
import com.atguigu.userprofile.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

object TagMergeApp {


  // 1  从tag定义库中查询 应当进行合并的标签信息列表
  //mysql查询

// 2 建表
//  因为每天的标签是不一样的
//  每天的表的列也不一样
//  user_tag_merge_20210526

  //3  组织多个标签表 合并写入 标签宽表

  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setAppName("tag_merge_app")
      //.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

    // 1  从tag定义库中查询 应当进行合并的标签信息列表
    //mysql查询
      val tagList: List[TagInfo] = TagDAO.getTagInfoOnTaskList()


    //2    create table user_tag_merge_$taskDate
    //       uid  , ...... 取标签编号作为字段  ，字段类型 string
    //     存储格式  文本 \t 分割
    //    存储的位置  hdfsStorePath+userprofileDbName+tableName
    //   6  备注信息 tag中文名
    // comment '标签宽表'
    //  ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
    //   location '$hdfsStorePath/$upDBName/$tableName'


    var taskDateOrigin=args(1)
    var taskDate=taskDateOrigin.replace("-","")
    val  tableName= s"user_tag_merge_$taskDate"

    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsStorePath: String = properties.getProperty("hdfs-store.path")
    val upDBName: String = properties.getProperty("user-profile.dbname")

    val colString: String = tagList.map(_.tagCode.toLowerCase()+" String").mkString(",")

    val dropSQL= s" drop  table if  exists $tableName"
    sparkSession.sql("use "+upDBName)
    sparkSession.sql(dropSQL);
    val createSQL=
      s""" create  table if not exists $tableName ( uid String, $colString )
         | comment '标签宽表'
         | ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         | location '$hdfsStorePath/$upDBName/$tableName'
       """.stripMargin

    println(createSQL)
    sparkSession.sql(createSQL);

    //3  组织多个标签表 合并写入 标签宽表
//    select * from
//    (  select  uid, 'tg_person_base_agegroup' tag_code ,tag_value  from tg_person_base_agegroup  where dt='2021-05-16'
//       union all
//      select  uid, 'tg_person_base_gender' tag_code ,tag_value  from tg_person_base_gender  where dt='2021-05-16'
//    )
//    pivot (    concat_ws(',',collect_list(tag_value))  for  tag_code in ('tg_person_base_agegroup','tg_person_base_gender' ))  ;

    //select  uid, 'tg_person_base_agegroup' tag_code ,tag_value  from tg_person_base_agegroup  where dt='2021-05-16'
    val tagUnionSql: String = tagList.map { tagInfo =>
      s"""select  uid, '${tagInfo.tagCode.toLowerCase}' tag_code ,tag_value  from ${tagInfo.tagCode.toLowerCase}  where dt='$taskDateOrigin'"""
    }.mkString(" union all ")

    val tagCodes: String = tagList.map("'"+_.tagCode.toLowerCase+"'").mkString(",")

    val selectSql=
      s"""
         |select * from
         |    (    $tagUnionSql   )
         |     pivot (    concat_ws(',',collect_list(tag_value))  for  tag_code in ($tagCodes))
       """.stripMargin

    println(selectSql)
    val df: DataFrame = sparkSession.sql(selectSql)

    val insertSQL=s"insert overwrite table $tableName  $selectSql"
    println(insertSQL)
    sparkSession.sql(insertSQL);

  }

}
