package cn.getech.data.development.job

import java.sql.{Connection, DriverManager, ResultSet, ResultSetMetaData, Statement}
import java.util.Properties

import cn.getech.data.development.job.util.{DateTimeUtils, JdbcUtil, ParseDateTime, PropertiesUtils}
import org.apache.spark.SparkConf
import org.apache.spark.sql.types.{BooleanType, DateType, DecimalType, DoubleType, FloatType, IntegerType, LongType, StringType, StructField, StructType, TimestampType}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.slf4j.LoggerFactory

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

object ExportHiveToRDB {
  private val log = LoggerFactory.getLogger(this.getClass)
  private val properties: Properties = PropertiesUtils.getProperties("common.properties")
  var statement: Statement = null
  var connection: Connection = null


  def main(args: Array[String]): Unit = {
    val jobid = args(0)
    val timehour = args(1)

    val hdfsPath = "hdfs:///bdp/jobconfig/4/jobid_" + jobid + ".json"

    val conf = new SparkConf().setAppName("ExportHiveToMysql")
    val spark = SparkSession.builder().config(conf).config("spark.yarn.maxAppAttempts",2).enableHiveSupport().getOrCreate()
    val row = spark.read.json(hdfsPath).first()
    val jobConfig = row.getValuesMap(row.schema.fieldNames)


    //检测map中的必填条件
    val isCorrect: Boolean = checkJobConfig(jobConfig)
    if(!isCorrect){
      throw new RuntimeException("please check the job config")
    }

    // 获取参数
    val hiveQL = ParseDateTime.replaceDateTime(jobConfig.getOrElse("input_input_content", ""), timehour)
    val saveMode = jobConfig.getOrElse("output_write_model", "overwrite")
    val hiveUser = jobConfig.getOrElse("rangerUserName","")
    val hivePass = jobConfig.getOrElse("rangerUserPassword","")
    val hiveurl = properties.getProperty("hiveserver2_url")

    val jdbcUser = jobConfig.getOrElse("username", "")
    val jdbcPass = JdbcUtil.rsaDecrypt(jobConfig.getOrElse("password", ""))
    val jdbcHost = jobConfig.getOrElse("host", "")
    val jdbcPort = jobConfig.getOrElse("port", "3306")
    val dbType = jobConfig.getOrElse("type", "").toLowerCase
    val driver = jobConfig.getOrElse("driverClass", "")
    val dbName = jobConfig.getOrElse("dbname", "")
    val preSql = ParseDateTime.replaceDateTime(jobConfig.getOrElse("output_pre_statment", ""), timehour)
    val targetTable = jobConfig.getOrElse("output_table_name", "")
    val url = JdbcUtil.urlFormat(dbType, jdbcHost, jdbcPort, dbName)

    log.info("hiveQL === " + hiveQL)
    log.info("preSql === " + preSql)

    //获取hive的查询结果
    val resultSet: ResultSet = getHiveQueryResult(hiveurl,hiveUser,hivePass,hiveQL)
    log.info(s"success get hive query result  ${DateTimeUtils.getDate(System.currentTimeMillis())} ...")

    // 使用别名方式
    import spark.{sql => hsql}
    // 执行前置sql
    if (preSql != "") {
      JdbcUtil.runSQL(driver, preSql, url, jdbcUser, jdbcPass)
    }

    //查询结果并导入
    resultSet2RDB(resultSet,spark,jobConfig)

    spark.stop()

    if(null != statement && null != connection){
      statement.close()
      connection.close()
      log.error("connect is closing...")
      log.error("statment is closing...")
    }
  }

  /**
   * 检查配置文件必需项的是否完整
   * @param map 任务配置文件
   * @return boolean 是否必需项完整
   */
  def checkJobConfig(map: Map[String, String]): Boolean = {
    var correct = true

    //checkRDBSource
    val rdbUser = map.get("username")

    if(rdbUser.isEmpty){
      correct = false
    }

    val dbname = map.get("dbname")

    if(dbname.isEmpty){
      correct = false
    }

    val output_table_name = map.get("output_table_name")

    if(output_table_name.isEmpty){
      correct = false
    }

    val dbType = map.get("type")

    if(dbType.isEmpty){
      correct = false
    }

    val host = map.get("host")

    if(host.isEmpty){
      correct = false
    }

    val driverClass = map.get("driverClass")

    if(driverClass.isEmpty){
      correct = false
    }

    val password = map.get("password")

    if(password.isEmpty){
      correct = false
    }

    correct
  }

  /**
   * 获取hiveserver2JdbcConnect
   * @param url hiveserver2url
   * @param user 登录hive的用户
   * @param passwd 登录hive的密码
   */
  def getHiveQueryResult(url:String,user:String,passwd:String,hiveQL:String): ResultSet = {
    var resultSet: ResultSet = null
    val hiveDriver = properties.getProperty("hiveserver2_driver")

    Class.forName(hiveDriver)
    connection = DriverManager.getConnection(url, user, passwd)
    statement = connection.createStatement()
    resultSet = statement.executeQuery(hiveQL.trim)

    resultSet
  }

  /**
   * 通过列名，列属性获取 StructField
   * @param name 列名
   * @param colType 列属性
   * @return StructField
   */
  def string2StructField(name:String,colType:String): StructField ={
    var realname: String = ""
    if(name.contains(".")){
      println(name)
      val strings: Array[String] = name.split("\\.")
      realname = strings(strings.length-1)
    }else{
      realname = name
    }

    val structField :StructField = colType match {
      case "java.lang.String" =>StructField(realname,StringType,true)
      case "java.lang.Integer" =>StructField(realname,IntegerType,true)
      case "java.lang.Long" =>StructField(realname,LongType,true)
      case "java.lang.Boolean" =>StructField(realname,BooleanType,true)
      case "java.lang.Double" =>StructField(realname,DoubleType,true)
      case "java.lang.Float" =>StructField(realname,FloatType,true)
      case "java.sql.Date" =>StructField(realname,DateType,true)
      case "java.sql.Time" =>StructField(realname,TimestampType,true)
      case "java.sql.Timestamp" =>StructField(realname,TimestampType,true)
      case "java.math.BigDecimal" =>StructField(realname,DecimalType(10,0),true)
    }
    structField
  }

  /**
   * 将查询到的结果转化成DataFrame 并写入RDB
   * @param resultSet 查询结果集
   * @param spark SparkSession
   * @return ArrayBuffer[DataFrame]
   */
  def resultSet2RDB(resultSet: ResultSet,spark: SparkSession,map:Map[String,Nothing]): Unit ={
    //先获取元数据信息和类型
    val rsMetaData: ResultSetMetaData = resultSet.getMetaData
    val rsClass: Class[_ <: ResultSet] = resultSet.getClass
    val jdbcPass = JdbcUtil.rsaDecrypt(map.getOrElse("password", ""))
    val jdbcUser = map.getOrElse("username", "")
    val driver = map.getOrElse("driverClass", "")
    val saveMode = map.getOrElse("output_write_model", "overwrite")
    val jdbcHost = map.getOrElse("host", "")
    val jdbcPort = map.getOrElse("port", "3306")
    val dbType = map.getOrElse("type", "").toLowerCase
    val dbName = map.getOrElse("dbname", "")
    val url = JdbcUtil.urlFormat(dbType, jdbcHost, jdbcPort, dbName)
    val targetTable = map.getOrElse("output_table_name", "")

    //设置数据库属性
    val prop = new Properties()
    prop.setProperty("user", jdbcUser)
    prop.setProperty("password", jdbcPass)
    prop.setProperty("driver",driver)
    //查询输出表是否存在
    val dataFrame: DataFrame = spark.read.jdbc(url,"(select * from %s limit 1) m".format(targetTable),prop)

    //列名和列类型的List
    val columnTypeList = new ListBuffer[String]
    val rowSchemaList = new ListBuffer[StructField]
    //将Integer转为Int
    for(i <- 1 to rsMetaData.getColumnCount){
      var temp:String = rsMetaData.getColumnClassName(i)
      temp = temp.substring(temp.lastIndexOf(".")+1)
      if("Integer".equals(temp)){
        temp = "Int"
      }
      columnTypeList += temp
      //获取对应类型的StructField
      rowSchemaList += string2StructField(rsMetaData.getColumnName(i),rsMetaData.getColumnClassName(i))
    }
    //生成DataFrame 的 MetaData
    val structType = new StructType(rowSchemaList.toArray)

    var count: Int = 0
    //存储内容ROW的Array
    val resultList = new java.util.ArrayList[Row]
    var batchCount = 0
    while (resultSet.next()) {
      count = count + 1
      val columnValueArr = new mutable.ArrayBuffer[AnyRef]
      //根据列类型去获取列内容
      for (num <- 0 until columnTypeList.size) {
        val method = rsClass.getMethod("get"+columnTypeList(num),"aa".getClass)
        val columnValue: AnyRef = method.invoke(resultSet, rsMetaData.getColumnName(num + 1))

        //println(s" columnName:${rsMetaData.getColumnName(num + 1)} --- columnValue:${columnValue}")
        columnValueArr += columnValue
      }
      val row: Row = Row.fromSeq(columnValueArr)
      resultList.add(row)

      //设定2w一个批次
      if(count >= 50000){
        val frame: DataFrame = spark.createDataFrame(resultList,structType)
        df2RDB(frame,targetTable,url,saveMode,prop,batchCount)
        count = 0
        batchCount += 1
        resultList.clear()
      }
    }

    if(resultList.size > 0){
      val frame: DataFrame = spark.createDataFrame(resultList,structType)
      df2RDB(frame,targetTable,url,saveMode,prop,batchCount)
      count = 0
      batchCount += 1
      resultList.clear()
    }
  }

  /**
   * 写入mysql
   * @param dataFrame dataframe
   * @param targetTable mysql table
   * @param url mysql jdbc
   * @param saveMode saveMode
   * @param prop mysql properties
   * @param batchCount write batch target
   */
  def df2RDB(dataFrame: DataFrame,targetTable: String, url: String, saveMode: String, prop: Properties, batchCount: Int): Unit ={

    val sm = if (saveMode.toLowerCase() == "overwrite" && batchCount == 0){
      SaveMode.Overwrite
    } else {
      SaveMode.Append
    }
    //truncate设置true overwrite 先清空表再写数据 false的话 先删除表再创建写入  isolationLevel隔离级别设置为 读已提交
    dataFrame.write.mode(sm).option("truncate","true").option("isolationLevel","READ_COMMITTED").jdbc(url, targetTable, prop)
  }
}