package com.sugon.bt

import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable.ArrayBuffer


/**
  * spark-submit --master yarn --driver-memory 6g --executor-memory 6g   --num-executors 10  --driver-class-path /xxx/clickhouse-jdbc-0.2.4 /xx/xx.jar  dbname  tableName  desTableName
  */
object ClickHouseP {

  def main(args: Array[String]): Unit = {


    if (args.length < 3) {
      throw new IllegalArgumentException("输入参数错误,参数依次为 '源库库名 源表 目标表'")
    }
    // 输入参数
    val sourceDbName = args(0)
    val sourceTable = args(1)
    val desTable = args(2)


    val spark: SparkSession = SparkSession.builder().appName("ClickHouseToHive_" + sourceTable)
      .enableHiveSupport()
      .getOrCreate()

    /**
      * val df: DataFrame = spark.read
      * .format("jdbc")
      * .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
      * .option("url", "jdbc:clickhouse://slave14:8123/" + sourceDbName)
      * .option("dbtable", sourceTable)
      * .load()
      */
    val url: String = "jdbc:clickhouse://slave14:8123/" + sourceDbName
    val tableName: String = sourceTable

    //    val predicates: Array[String] =
    //      Array(
    //        "1602682037" -> "1607605954",
    //        "2018-11-02" -> "2018-12-01",
    //        "2018-12-02" -> "2019-01-01",
    //        "2019-02-02" -> "2019-03-01",
    //        "2019-03-02" -> "2019-04-01",
    //        "2019-04-02" -> "2019-05-01",
    //        "2019-05-02" -> "2019-06-01",
    //        "2019-06-02" -> "2019-07-01",
    //        "2019-07-02" -> "2019-08-01",
    //        "2019-08-02" -> "2019-09-01",
    //        "2019-09-02" -> "2019-10-01",
    //        "2019-10-02" -> "2019-11-01"
    //      ).map {
    //        case (start, end) =>
    //          s"S_B050012 >= '$start' " + s"AND S_B050012 < '$end'"
    //      }

    val arr: ArrayBuffer[Int] = ArrayBuffer[Int]()
    for (i <- 0 until 10) {
      arr.append(i)
    }

    val predicates: Array[String] = arr.map(i => {
      s"substring(S_B050012,10)='$i'"
    }).toArray

    // 设置连接用户&密码
    val prop = new java.util.Properties
    prop.setProperty("user", "")
    prop.setProperty("password", "")


    // 取得该表数据
    val jdbcDF: DataFrame = spark.read.jdbc(url, tableName, predicates, prop)

    println("partitions" + jdbcDF.rdd.getNumPartitions)

    // 写入到hive表
    jdbcDF.write.partitionBy().mode("overwrite").format("orc")
      .saveAsTable("ysk.ysk_my_" + desTable.toLowerCase)

  }


}
