package com.hkbigdata.app

import com.hkbigdata.userprofile.common.bean.TagInfo
import com.hkbigdata.userprofile.common.dao.TagInfoDAO
import com.hkbigdata.userprofile.common.util.{MyClickHouseUtil, MyPropertiesUtil}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import java.util.Properties

/**
 * @author Clown
 */

/**
 * 1、建表 每天建一个
 *
 * 插入数据  因为是异构数据库   不能使用insert xxx select 完成
 *
 * 2、读取要插入的数据
 *
 * 3、写入到clickhouse
 * spark通过jdbc写入到 某个数据中
 */
object TakHiveExportCk {
  def main(args: Array[String]): Unit = {
    /**
     * 在clickhouse 中建表 每天建一个
     *
     * 表名   字段名    标签自定列表      查hive宽表的定义
     * create table if not exists tableName (uid UInt64, 标签 String, xxxx...)
     * engine  = MergeTree
     * 分区可以不用   因为表是每天的
     * primary key uid
     * order by uid
     */
    System.setProperty("HADOOP_USER_NAME", "hkbigdata")
    val conf: SparkConf = new SparkConf().setAppName("ck")
//          .setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(conf).enableHiveSupport().getOrCreate()

    val taskId: String = args(0)
    val taskDate: String = args(1)

    val tableName: String = s"user_tag_merge_${taskDate.replace("-", "")}"
    val infoes: List[TagInfo] = TagInfoDAO.getTaskTagMergeOnTask()

    // 获取标签字段
    val columns: String = infoes.map((_: TagInfo).tagCode.toLowerCase() + " String").mkString(", ")
    println(columns)

    val createSql: String =
      s"""
         |create table if not exists ${tableName}  ( uid UInt64, $columns )
         |  engine = MergeTree
         |  primary key uid
         |  order by uid
         |""".stripMargin

    MyClickHouseUtil.executeSql(createSql)


    /**
     * hdfs-store.path=hdfs://hadoop102:8020/user_profile
     * data-warehouse.dbname=gmall
     * user-profile.dbname=user_profile
     */
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val upDBName: String = properties.getProperty("user-profile.dbname")
    val clickHouseUrl: String = properties.getProperty("clickhouse.url")

    val dataFrame: DataFrame = sparkSession.sql(s"select * from $upDBName.$tableName")
    dataFrame.show(100, truncate = false)

    dataFrame.write
      .mode(SaveMode.Append)
      .option("batch", "100")
      .option("isolationLevel", "NONE") // 关闭事务
      .option("numPartitions", "4")  // 设置并发
      .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
      .jdbc(clickHouseUrl, tableName, new Properties())



  }
}
