package com.atguigu.userprofile.app

import java.util.Properties

import com.atguigu.userprofile.bean.TagInfo
import com.atguigu.userprofile.dao.TagInfoDao
import com.atguigu.userprofile.util.{ClickhouseUtil, MyPropertiesUtil}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object TaskExportCkApp {

//  1  在clickhouse中创建表
//    表名
//  字段名
//  引擎
//  分区
//  排序
//  索引
//  2  读取hive中宽表的数据
//
//  3 写入到clickhouse宽表

  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("task_export_app")//.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()


    //  1  在clickhouse中创建表
    //    表名 //  字段名
    //  引擎 //  分区
    //  排序 //  索引
    //create table user_tag_merge_2021xxxx(
    //    uid UInt64,
    //     tag_code String,.....  标签编码A ,...... //查询所有可用标签的列表
    // ) engine =MergeTree
    //   primary key (uid) 因为和order by 一致，所以可以省略
    //   order by uid


    val taskId: String = args(0)
    val taskDate: String = args(1)
    val tableName=s"user_tag_merge_${taskDate.replace("-","")}"

    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val dwName: String = properties.getProperty("data-warehouse.dbname")
    val upName: String = properties.getProperty("user-profile.dbname")
    val clickhouseURL: String = properties.getProperty("clickhouse.url")

    //所有可用标签集合
    val tagInfoList: List[TagInfo] = TagInfoDao.getTagListWithOn()
    val tagCodeList: List[String] = tagInfoList.map(tagInfo=> s"${tagInfo.tagCode.toLowerCase} String ")
    val tagCodeSQL: String = tagCodeList.mkString(",")
    val createTableSQL=
      s"""
         | create table $upName.$tableName(
         |  uid UInt64,
         |  $tagCodeSQL )
         |  engine=MergeTree
         |  order by uid
       """.stripMargin

    val dropTableSQL=s"drop table if exists $upName.$tableName"

    println(dropTableSQL)
    println(createTableSQL)
    ClickhouseUtil.executeSql(dropTableSQL)
    ClickhouseUtil.executeSql(createTableSQL)

    //  2  读取hive中宽表的数据
    val sql=s"select * from   $upName.$tableName"

    val dataFrame: DataFrame = sparkSession.sql(sql)

    // 3 把 dataFrame 插入到clickhouse
    dataFrame.write.mode(SaveMode.Append)
        .option("batchsize","200")//  批量提交
        .option("isolationLevel","NONE")   // 关闭事务
        .option("numPartitions","12") //  增加并发
      .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
      .jdbc(clickhouseURL,tableName,new Properties())


  }

}
