package com.atguigu.userprofile.app

import com.atguigu.userprofile.bean
import com.atguigu.userprofile.dao.TagInfoDAO
import com.atguigu.userprofile.util.{MyClickhouseUtil, MyPropertiesUtil}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}

import java.util.Properties
import javax.servlet.jsp.tagext.TagInfo

object TaskExportCk {


  //1 查询已启用的标签列表
  //
  //2 clickhouse的建表
  //同任务2 的建表逻辑大致相同 1 程序建表 2 每天一张表
  //
  //3 读取hive的宽表
  //
  //java 的 List<> 不行 内存一下扛不住会oom的
  //rdd dataframe dataset
  //
  //4 写入到clickhouse
  //    jdbc
  def main(args: Array[String]): Unit = {
    val taskId: String = args(0)
    val busiDate: String = args(1)

    val sparConf = new SparkConf().setAppName("task_export_ck_app")//.setMaster("local[*]")
    val sparkSession: SparkSession = SparkSession.builder().config(sparConf).enableHiveSupport().getOrCreate()


    //1 查询已启用的标签列表
    val tagInfoList: List[bean.TagInfo] = TagInfoDAO.getTagInfoListWithOn()

    //2 clickhouse的建表
    //同任务2 的建表逻辑大致相同 1 程序建表 2 每天一张表
    // create table xxx
    //(uid String , $fieldSQL )
    //engine=  MergeTree
    //分区？不分区
    //primary key uid     可省
    //order by  uid

    val  tableName=s"up_tag_merge_${busiDate.replace("-","")}"
    val filedNameList: List[String] = tagInfoList.map(tagInfo => s"${tagInfo.tagCode.toLowerCase} String")
    val filedNameSQL: String = filedNameList.mkString(",")


    val dropTableSQL=s" drop table if exists $tableName"
    println(dropTableSQL)
    MyClickhouseUtil.executeSql(dropTableSQL)

    val createTableSQL=
      s"""
         |      create table $tableName
         |     (uid String , $filedNameSQL )
         |     engine=  MergeTree
         |     order by  uid
         |""".stripMargin

      println(createTableSQL)

      MyClickhouseUtil.executeSql(createTableSQL)


    //3 读取hive的宽表
    //rdd dataframe dataset
    val properties: Properties = MyPropertiesUtil.load("config.properties")
    val hdfsPath: String = properties.getProperty("hdfs-store.path")
    val upDbName: String = properties.getProperty("user-profile.dbname")
    val dwDbName: String = properties.getProperty("data-warehouse.dbname")


    val dataFrame: DataFrame = sparkSession.sql(s"select * from $upDbName.$tableName")

    //4 写入clickhouse
    val clickhouseUrl: String = properties.getProperty("clickhouse.url")
    dataFrame.write.mode(SaveMode.Append)
      .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
      .option("batchsize",500)
      .option("isolationLevel","NONE")   //事务关闭
      .option("numPartitions", "4") // 设置并发
      .jdbc(clickhouseUrl,tableName,new Properties())


  }

}
