package com.atguigu.userprofile.app

import java.util.Properties

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagInfoDAO
import com.atguigu.userprofile.util.{ClickhouseUtil, MyPropertiesUtil}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object TaskExportApp {
  //1    创建ck中的标签宽表（每日自动创建，同hive标签宽表 ）
  //
  //2   读取hive中的标签宽表
  //
  //3  写入clickhouse中的标签宽表
  def main(args: Array[String]): Unit = {
     val sparkConf: SparkConf = new SparkConf().setAppName("task_export_app")//.setMaster("local[*]")
     val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()


    val taskId: String = args(0)
    val taskDate: String = args(1)

    val  tableName= "user_tag_merge_"+taskDate.replace("-","")
    //要查询出所有已启用的标签编码
    //    mysql  tag_info
    val tagInfoList: List[TagInfo] = TagInfoDAO.getTagInfoListWithOn()
    val tagList: List[String] = tagInfoList.map(tagInfo=>s"${tagInfo.tagCode.toLowerCase}  String ")
    val tagListSQL: String = tagList.mkString(",")

    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val  hdfsStorePath: String = properties.getProperty("hdfs-store.path")
    val  dwDBname: String = properties.getProperty("data-warehouse.dbname")
    val  upDBname: String = properties.getProperty("user-profile.dbname")
    val  clickhouseURL: String = properties.getProperty("clickhouse.url")


    //1    创建ck中的标签宽表（每日自动创建，同hive标签宽表 ）
    //  create table $tableName
    //  ( uid String ,
     //    $tagListSQL
    //    )engine=MergeTree
    //  partition by  不分区
    //  primary key uid  和排序字段一致 可以省略
    //  order by uid
    val createTableSQL=
      s"""
         |create table   $upDBname.$tableName
         |       ( uid String ,
         |         $tagListSQL
         |       )engine=MergeTree
         |     order by uid
       """.stripMargin

    println(createTableSQL)
    ClickhouseUtil.executeSql(s"drop table $upDBname.$tableName")   //driver
    ClickhouseUtil.executeSql(createTableSQL)   //driver


    //2   读取hive中的标签宽表

    val dataFrame: DataFrame = sparkSession.sql(s"select * from $upDBname.$tableName")

    //3  写入clickhouse中的标签宽表    //executor
    dataFrame.write.mode(SaveMode.Append)
      .option("batchsize", "1000")
      .option("isolationLevel", "NONE") // 关闭事务
      .option("numPartitions", "4") // 设置并发
      .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
      .jdbc(clickhouseURL,tableName,new Properties())





  }

}
