package com.guchenbo.spark.sql


/**
 * @author guchenbo
 * @date 2021/6/25
 */
object MysqlReader {

  def main(args: Array[String]): Unit = {
    val spark = SparkUtils.sparkSession("Spark Pi")
    read(spark)
  }

  var master: String = "local"
  var jars: Array[String] = Array("")

  def yarn(): Unit = {
    master = "yarn"
    jars = Array("hdfs:///user/turing/algorithm/prod/modelpaas/algoso/model-pass-data-exchange-1-0-3.jar")
  }

  def read(spark: org.apache.spark.sql.SparkSession): Unit = {
    println("read table")
    val src = "GMT_CREATE"
    val dbtype = "TIMESTAMP"
    //    val dbtype = "DATE"

    var sql = "with view_alias as (select * from model_manager.model_info) select * from (select * from ( select  'prod' as key ,prod as val ,score ,sum(label) bc ,sum(1-label) gc ,dt from view_alias group by  prod,score,dt ) Table_0 union all select * from ( select  'chan' as key ,chan as val ,score ,sum(label) bc ,sum(1-label) gc ,dt from view_alias group by  chan,score,dt ) Table_0) dims"
    sql = "with view_alias as (select * from model_manager.model_info) select * from (select * from ( select * from view_alias ) Table_0) dims"
    sql = "select * from model_manager.model_info"

    val df = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://10.57.16.13:3306/model_manager_zhongyin?useSSL=false")
      .option("user", "model_paas_manager")
      .option("password", "Liu_0123456789")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("dbtable", s"($sql) sub")
      .option("numPartitions", 5)
      .option("fetchsize", 1000)
      .option("partitionColumn", "gmt_create")
      .option("lowerBound", "2020-08-18 14:13:31")
      .option("upperBound", "2020-08-24 17:32:46")
      .load()

    df.show()
    //    val fns = df.schema.fieldNames
    //    var map: Map[String, Integer] = Map()
    //    for (i <- fns.indices) map += (fns(i) -> i)
    //    var col = ""
    //    var idx = map(col)
    //    val colConf = Map("transform" -> src, "transformType" -> dbtype)
    //    val writeMaps = Map(src -> colConf)
    //    val rdd = df.rdd.map(row => mapMethod(row, fns, writeMaps))
    //    val array = rdd.collect()
    //
    //    println("find data size " + array.length)
    //    println(array.mkString("Array(", ", ", ")"))
  }

  def mapMethod(row: org.apache.spark.sql.Row, fns: Array[String], writeMaps: Map[String, Map[String, String]]): java.util.Date = {
    var date: java.util.Date = null

    for (i <- fns.indices) {
      val name = fns(i)
      if (writeMaps.contains(name)) {
        val conf = writeMaps(name)
        val dbt = conf("transformType")
        if (dbt.equalsIgnoreCase("date")) {
          try {
            date = row.getDate(i)
          } catch {
            case exception: Exception =>
              println("getDate error then getTimestamp", exception)
              date = row.getTimestamp(i)
          }
        } else {
          try {
            date = row.getTimestamp(i)
          } catch {
            case exception: Exception =>
              println("getTimestamp error then getDate", exception)
              date = row.getDate(i)
          }
        }
        if (null == date) {
          println("date is null")
        }
      }
    }
    date
  }
}
