package com.sugon.bt

import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 读取clickhouse to hive
  *
  * spark-submit --master yarn  --num-executors 3 SparkDemo-jar-with-dependencies.jar xxx xxx xxx
  *
  *
        val jdbcDF = spark.read.format("jdbc").options(
     |   Map("url" ->  "jdbc:mysql://localhost:3306/ontime?user=root&password=mysql",
     |   "dbtable" -> "ontime.ontime_sm",
     |   "fetchSize" -> "10000",
     |   "partitionColumn" -> "yeard", "lowerBound" -> "1988", "upperBound" -> "2015", "numPartitions" -> "48"
     |   )).load()
  *
  */
object ClickHouseToHive {

  def main(args: Array[String]): Unit = {

    if (args.length < 3) {
      throw new IllegalArgumentException("输入参数错误,参数依次为 '源库库名 源表 目标表'")
    }
    // 输入参数
    val sourceDbName = args(0)
    val sourceTable = args(1)
    val desTable = args(2)


    val spark: SparkSession = SparkSession.builder().appName("ClickHouseToHive_" + sourceTable)
      //      .master("local[*]")
      .enableHiveSupport()
      .getOrCreate()

    val df: DataFrame = spark.read
      .format("jdbc")
      .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
      .option("url", "jdbc:clickhouse://slave14:8123/" + sourceDbName)
      .option("dbtable", sourceTable)
      .load()

    println("ysk.ysk_my_" + desTable.toLowerCase + " begin start")


    df.write.format("orc").mode("append").saveAsTable("ysk.ysk_my_" + desTable.toLowerCase)

    spark.close()

  }


  /**
    * write
    *
    * def insert(spark:SparkSession): Unit ={
    * //clickhouse客户端配置
    * val pro = new java.util.Properties
    *     pro.put("driver", "ru.yandex.clickhouse.ClickHouseDriver")
    * //创建数据
    * import spark.implicits._
    * val df = Seq(Person("yyy",19)).toDS
    * //写入clickhouse
    *     df.write
    * .mode(SaveMode.Append)
    * .option("batchsize", "20000")
    * .option("isolationLevel", "NONE")
    * .option("numPartitions", "1")
    * .jdbc("jdbc:clickhouse://hadoop:8124/tutorial","test",pro)
    *     spark.stop()
    * }
    */

  /**
    * /*
    * 读取
    **/
    * def select(spark:SparkSession): Unit ={
    *     spark.read
    * .format("jdbc")
    * .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
    * .option("url", "jdbc:clickhouse://hadoop:8124/tutorial")
    * .option("dbtable", "test")
    * .load().show()
    *     spark.stop()
    * }
    */
}
