package com.zeta.edw.venus

import java.net.InetAddress
import java.sql.{Connection, DriverManager}
import java.text.SimpleDateFormat
import java.util.Properties

import org.apache.hive.service.cli.HiveSQLException
import org.apache.spark.SparkConf
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, SparkSession}
import org.backuity.clist.Cli
import org.slf4j.{Logger, LoggerFactory}
import util.retry.blocking._

import scala.concurrent.duration._

trait App {
  val logger: Logger = LoggerFactory.getLogger(getClass)

  implicit val retryStrategy: FixedWaitRetryStrategy = RetryStrategy.fixedBackOff(
    retryDuration = 10.seconds, maxAttempts = 3
  )

  lazy val jdbcProperties: Properties = {
    val props = new Properties()
    props.put("driver", "oracle.jdbc.OracleDriver")
    props.put("fetchsize", "10000")
    props.put("oracle.jdbc.ReadTimeout", s"${2 * 60 * 1000}")
    props.put("oracle.net.CONNECT_TIMEOUT", s"${10 * 1000}")
    props
  }

  def startApp(args: Array[String], config: Config): Unit = {
    Cli.parse(args).withCommand(config)(c => run(c))
  }

  def run(config: Config): Unit = {

    beforeJob(config)

    implicit val spark: SparkSession = sparkSession(config)

    try {

      logger.info("get table schema from oracle - start")

      val schema = Retry (
        spark.read.option("user", config.jdbcUsername)
          .option("password", config.jdbcPassword)
          .jdbc(config.jdbcUrl, config.jdbcTable, Array("rownum = 1"), jdbcProperties).schema
      ) match {
        case Success(x) => x
        case Failure(e) => throw new Exception(e)
      }

      logger.info("get table schema from oracle - end")

      val df = readData(config, schema).repartition(config.readRePartition).cache()
      if (df.head(1).length > 0) {
        beforeProcess(config)
        process(config, df.select(df.schema.fields.map(convertColumn): _*))
        //afterProcess(config)
       // refreshMetadataToImpala(config)
      } else {
        logger.info(s"No data found: ${config.jdbcTable}")
      }

    } finally {
      spark.stop()
    }

  }

  def beforeJob(config: Config): Unit = {
    //do nothing
  }

  def readData(config: Config, schema: StructType)(implicit spark: SparkSession): DataFrame = {
    spark.read.option("user", config.jdbcUsername)
      .option("password", config.jdbcPassword)
      .jdbc(config.jdbcUrl, config.jdbcTable, Array(predicates(config, schema)), jdbcProperties)
  }

  def predicates(config: Config, schema: StructType): String = null

  def beforeProcess(config: Config): Unit = {
    // do nothing
  }

  def process(config: Config, df: DataFrame)

  def afterProcess(config: Config): Unit = {
    // do nothing
  }

  def convertColumn(field: StructField): Column = {
    val x = field.name
    val format = new SimpleDateFormat("yyyyMMddHHmmss")
    field.dataType match {
      case dt: DecimalType =>
        (if(dt.scale == 0) {
          if(dt.precision <= 10) col(x).cast(IntegerType) else col(x).cast(LongType)
        } else {
          if(dt.precision <= 14) col(x).cast(FloatType) else col(x).cast(DoubleType)
        }).as(x.toLowerCase())
      case dt:DateType=>(
        col(x).cast(TimestampType)
      )
//      case dt:StructType("",)=>(
//        col(x).cast(String)
//        )
      case _ => col(x).as(x.toLowerCase())
    }
  }

  def sparkSession(config: Config): SparkSession = {
    val sparkConf: SparkConf = new SparkConf().setAppName(s"Extract ${config.jdbcTable}")
    config.sparkMaster.foreach(master => sparkConf.setMaster(master))
    SparkSession.builder
      .config(sparkConf)
      .config("spark.sql.parquet.compression.codec", "snappy")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("hive.metastore.uris", config.hiveMetastoreUris)
      .config("fs.defaultFS", config.hdfsUrl)
      .enableHiveSupport()
      .getOrCreate()
  }

//  def refreshMetadataToImpala(config: Config): Unit = {
//    val conn = getImpalaConnection(config)
//    val stmt = conn.createStatement()
//    try {
//      stmt.execute(s"REFRESH ${config.hiveDatabase}.${config.hiveTable}")
//    } catch {
//      case ex: HiveSQLException =>
//        if (ex.getMessage.contains("Table does not exist")) {
//          stmt.execute(s"INVALIDATE METADATA ${config.hiveDatabase}.${config.hiveTable}")
//        } else {
//          throw new Exception(ex)
//        }
//      case ex: Exception => throw new Exception(ex)
//    } finally {
//      stmt.close()
//      conn.close()
//    }
//  }

//  private def getImpalaConnection(config: Config): Connection = {
//    Class.forName("org.apache.hive.jdbc.HiveDriver")
//    val hosts = config.impalaHosts.split(',').map(_.trim)
//    for (host <- hosts) {
//      try {
//        val isReachable = InetAddress.getByName(host).isReachable(2000)
//        if (isReachable) {
//          return DriverManager.getConnection(s"jdbc:hive2://$host:21050/;auth=noSasl", "impala", null)
//        } else {
//          logger.warn(s"host $host can not reachable")
//        }
//      } catch {
//        case e: Exception => logger.warn(e.getMessage)
//      }
//    }
//
//    throw new Exception(s"can not get impala jdbc connection on hosts ${config.impalaHosts}")
//  }
}
