package com.hkbigdata.userprofile.app

import com.hkbigdata.userprofile.common.constcode.ConstCode
import com.hkbigdata.userprofile.common.dao.{TagInfoDAO, TaskInfoDAO, TaskTagRuleDAO}
import com.hkbigdata.userprofile.common.util.MyPropertiesUtil
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
 * @author liuanbo
 * @creat 2024-04-12-14:41
 * @see 2194550857@qq.com
 *
 */
object TaskTagSqlApp {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","hkbigdata")
    val conf = new SparkConf().setAppName("task-sql")
//      .setMaster("local[*]")
    val sparkSession = SparkSession.builder().config(conf).enableHiveSupport().getOrCreate()
    //1、根据TaskID 读取 任务的定义、规则、SQL    读取标签  名称
    //1.1taskId   //spark-submit结尾处 会带俩个参数  第一个参数是taskId  第二个参数是业务日期 ，一般是前一天的日期
    //spark-submit   --master ..xxxx.xx.     xxxxx.jar  1 2021-06-08
    val taskid = args(0)
    val date = args(1)
    println(taskid)
    println(date)

    //1.查询tag_info 并且转换为对象
    val tagInfo = TagInfoDAO.TagInfoDaoBytaskid(taskid)
    println(tagInfo)
    //2.查询task_info 并且转换为对象
    val taskInfo = TaskInfoDAO.TaskInfoDAObytaskid(taskid)
    //2.查询task_tag_rule 并且转换为对象
    val taskTagRules = TaskTagRuleDAO.TaskTagRuleDAOBytaskid(taskid)
    println(taskTagRules)

    //2、 每个标签都保存在对应的标志  一个标签对应一张表   根据定义 建立标签表（如果新标签,没有表）
    // hive :  create table   if not exists $tagCode ( uid string , tag_value $tag_value_type )
    // comment '${tagInfo.tagName}' PARTITIONED BY (`dt` STRING)
    //  ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
    //  LOCATION    $hdfsPath/$userprofielDbName/$tagCode
    //2.1 读取表名
    val tableName = tagInfo.tagCode.toLowerCase()
    val tagValueType = tagInfo.tagValueType match {
      case ConstCode.TAG_VALUE_TYPE_STRING => "STRING"
      case ConstCode.TAG_VALUE_TYPE_LONG => "BIGINT"
      case ConstCode.TAG_VALUE_TYPE_DECIMAL => "DECIMAL(16,2)"
      case ConstCode.TAG_VALUE_TYPE_DATE => "STRING"
    }

    val properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath = properties.getProperty("hdfs-store.path")
    val gmalldb = properties.getProperty("data-warehouse.dbname")
    val userprofiledb = properties.getProperty("user-profile.dbname")


    val createSQL =
      s"""
         |create table   if not exists `$userprofiledb`.`$tableName` ( uid string , tag_value $tagValueType )
         |     comment '${tagInfo.tagName}' PARTITIONED BY (`dt` STRING)
         |      ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
         |      LOCATION    '$hdfsPath/$userprofiledb/$tableName'
         |""".stripMargin
    println(createSQL)
    //    sparkSession.sql("create database user_profile")
    sparkSession.sql("use user_profile")
    sparkSession.sql(createSQL)

    //3.通过sql查询数据仓库中的数据 ，写入到标签表中
    // insert  overwrite table $tagCode partition (dt='$taskDate')
    // select   uid,
    //  case  query_value
    //         when  'F' then '女'
    //         when 'M' then '男'
    //         when  'U' then '未知' end  as  tag_value
    //  from   ($sql)
    val tasksql = taskInfo.taskSql.replace("$dt", date)
    var casewhensql = ""
    if (taskTagRules.size > 0) {
      val list = taskTagRules.map(tasktagrule => s" when '${tasktagrule.queryValue}' then '${tasktagrule.subTagValue}' ")
      casewhensql = " case query_value " + list.mkString(" ") + " end  as  tag_value"
    } else {
      casewhensql = "query_value as tag_value"
    }
    var selectsql = s"select uid ,$casewhensql from ($tasksql) as ts"
    println(selectsql)

    var insertsql = s"insert overwrite $userprofiledb.$tableName partition(dt='$date') " +
      selectsql

    sparkSession.sql("use " + gmalldb)
    sparkSession.sql(insertsql)


  }
}
