package com.hkbigdata.userprofile.app

import com.hkbigdata.userprofile.common.dao.TagInfoDAO
import com.hkbigdata.userprofile.common.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
 * @author liuanbo
 * @creat 2024-04-17-15:49
 * @see 2194550857@qq.com
 *
 */
/*      1  要组合哪些标签表？  show tables (不准确)
         最好 查询 tag_info  join  task_info
          获得启用状态的标签列表
      2  建立宽表
           表名     user_tag_merge_20210609  ......
           字段     uid ,  <tag_code1> ,<tag_code2>
         考虑到每天的标签数是不一样的，那么宽表的字段也不一样，所以不使用一张固定的表，而是每天建一张新表，每天的字段可以不同。

      3  合并数据进宽表hh
            利用  pivot 进行行转列操作。*/
object TaskTagMergeApp {
  def main(args: Array[String]): Unit = {
    //    System.setProperties("")
    val conf = new SparkConf().setAppName("task merge")
//      .setMaster("local[*]")
    //ooize
    //azkaban
    //airflow
    val session = SparkSession.builder().config(conf).enableHiveSupport().getOrCreate()

    val taskid = args(0)
    val date = args(1)
    println(date)
    //1.获得启用状态的标签列表
    val tagInfoes = TagInfoDAO.getTaskTagMergeOntask()
    for (elem <- tagInfoes) {
      println(elem)
    }

    //2.自动建表
    val tableName = s"user_tag_merge_${date.replace("-", "")}"
    val list = tagInfoes.map(taskinfo => s" ${taskinfo.tagCode.toLowerCase}  string")
    val column = list.mkString(",")


    val properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath = properties.getProperty("hdfs-store.path")
    val gmalldb = properties.getProperty("data-warehouse.dbname")
    val userprofiledb = properties.getProperty("user-profile.dbname")

    val createsql =
      s"""
         |    create table   if not exists `$userprofiledb`.`$tableName` ( uid string ,  $column )
         |     comment '${tableName}'
         |      ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |      LOCATION    '$hdfsPath/$userprofiledb/$tableName'
         |""".stripMargin

    println(createsql)
    session.sql(createsql)

    // /////////////     3  合并数据进宽表/////////////////////////////////
    //                  利用  pivot 进行行转列操作。
    //
    //   select * from   (
    //        select uid ,tag_code,tag_value from tg_person_base_gender
    //         union all
    //        select uid ,tag_code,tag_value from tg_person_base_agegroup
    //     union all
    //      ....
    //    )
    //pivot ( concat_ws(',' ,collect_set(tag_value)) as tv
    //for  tag_code in( 'tg_person_base_gender','tg_person_base_agegroup','tg_person_base_last30ct',......))

    val unionsql = tagInfoes.map(data => s"select uid,'${data.tagCode.toLowerCase()}' as tag_code ," +
      s" tag_value from  ${data.tagCode.toLowerCase()}  group by uid,tag_code ,tag_value ").mkString(" union all ")
    //    println(unionsql)

    val insql = tagInfoes.map(data => "'" + s"${data.tagCode.toLowerCase()}" + "'").mkString(",")

    val selectsql = s"select * from ($unionsql)" +
      s" pivot(  concat_ws(',',collect_list(tag_value))  for tag_code in ($insql))"
    println(selectsql)

    val insertsql = s"insert overwrite $userprofiledb.$tableName " + selectsql
    println(insertsql)
    session.sql("use " + userprofiledb)
    session.sql(insertsql)


  }
}
