package com.kingsoft.dc.khaos.module.spark.sink

import java.sql.{Connection, Statement}
import java.util
import java.util.Properties
import com.alibaba.druid.pool.DruidDataSource
import com.alibaba.fastjson.JSON
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.extender.meta.model.ds.{CosConnect}
import com.kingsoft.dc.khaos.extender.meta.utils.{RSAEncrypt}
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, HiveConstants, MetaDataConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.{ExtractFieldInfo, HiveSinkConfig}
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.model.cos.CosAccessConfig
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo}
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Row, SaveMode, SparkSession}
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, render}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import scala.util.control.Breaks.{break, breakable}

/**
 *
 * create by goosoog 2019/6/13.
 *
 */
class HiveSink extends SinkStrategy with Logging with Serializable {
  private var meta: MetaDataEntity = _
  private var hiveProperties: Map[String, String] = _
  private var accumValue = 0l

  def init(kc: KhaosContext, hiveSinkConf: HiveSinkConfig): Unit = {
    //鉴权
    val checkResult = MetaUtils.checkWriteAuth(kc,
      hiveSinkConf.db_name,
      hiveSinkConf.table_name,
      hiveSinkConf.extender.auth.clazz,
      compact(render(hiveSinkConf.extender.auth.params)))

    //获取元数据
    meta = MetaUtils.getHiveMeta(kc,
      hiveSinkConf.db_name,
      hiveSinkConf.table_name,
      hiveSinkConf.extender.meta.clazz,
      compact(render(hiveSinkConf.extender.meta.params)),
      this)

    hiveProperties = kc.conf.getAllWithUnPrefix("module.").toMap

  }


  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {
    implicit val formats = DefaultFormats
    val hiveSinkConfig = config.extract[HiveSinkConfig]
    init(kc, hiveSinkConfig)
    //上一个节点传递过来的schema
    //    val fromSchema = schema.toList()

    //根据sink端的schema信息转换dataFrame
    val tempDF = DataframeUtils.setDefaultValue(hiveSinkConfig.extract_fields, meta.getColumnEntiy, dataFrame)
    //    var finalData = DataframeUtils.convertDataType(hiveSinkConfig.extract_fields, tempDF)
    var finalData = DataframeUtils.convertDataType4Hive(hiveSinkConfig.extract_fields, tempDF)
    //df字段重新排序 setDefaultValue可能会打乱字段顺序
    //    val cols: List[String] = hiveSinkConfig.extract_fields.map(_.field)
    //    finalData = finalData.select(cols.head, cols.tail: _*)
    finalData = DataframeUtils.sortDataCol(finalData, meta.getColumnEntiy)

    val dbName = hiveSinkConfig.db_name
    val tableName = hiveSinkConfig.table_name

    val hiveDataStatusInfo: RelationDataStatusInfo = new RelationDataStatusInfo
    val (tempData, accumulator) = DataframeUtils.calculateDataNum(kc, finalData, "HiveSink")
    val nums = tempData.rdd.getNumPartitions
    //分区数小于等于20，不做估算
    val limit = hiveProperties.getOrElse(HiveConstants.ESTIMATE_PARTITION_LIMIT, 20).toString.toInt
    //优化小文件
    var partitions = 1 //重分区数
    var resultData: DataFrame = null
    val advancedJsonString = compact(render(hiveSinkConfig.advanced_options))
    val dataSplit = JSON.parseObject(advancedJsonString).get(MetaDataConstants.DATA_SPLIT)
    if (dataSplit != null) {
      val o = JSON.parseObject(dataSplit.toString)
      if (o.getBoolean(MetaDataConstants.ON_OFF)) {
        partitions = o.getInteger(MetaDataConstants.DATA_SPLIT_NUMS)
      } else {
        partitions = getPartitionNums(kc, tempData, nums, limit)
      }
    } else {
      partitions = getPartitionNums(kc, tempData, nums, limit)
    }

    //源端数据为空则不指定分区
    if (partitions > 0) {
      resultData = DataframeUtils.repartionDataframe(tempData, partitions)
    } else {
      resultData = tempData
    }

    val writeMode = hiveSinkConfig.write_option.get.trim.toLowerCase
    val hiveMode = if (writeMode.equals("append")) "into" else writeMode
    //    val (dynamicFields, tablePartitions) = getPartitionInfo(hiveSinkConfig.extract_fields)
    //拆分分区字段和非分区字段
    val (dynamicFields, tablePartitions) = getFieldsInfo(hiveSinkConfig.extract_fields, meta.getColumnEntiy)

    if (meta.getDefaultDs) {

      //分区处理
      if (tablePartitions.size > 0) {
        val tempTableName = "sparksql_temphivetable_" + System.currentTimeMillis() + "_" + Random.nextInt(100)
        resultData.createOrReplaceTempView(tempTableName)
        kc.sparkSession.sql(s"select ${dynamicFields.mkString(",")} from $tempTableName")

        val sql = s"insert $hiveMode table ${dbName}.${tableName} partition(${tablePartitions.mkString(",")}) select ${dynamicFields.mkString(",")} from $tempTableName"
        logInfo(s"==> hiveSink sql=$sql")
        kc.sparkSession.sql(sql)
      } else { //无分区
        logInfo("==> hiveSink non-partitions")
        resultData.write.mode(writeMode).insertInto(s"${dbName}.${tableName}")
      }

      //      val partitions = meta.getTablePartition
      //      if (partitions.size > 0) {
      //        resultData.write.mode(hiveSinkConfig.write_option.get).format("Hive").partitionBy(partitions: _*).saveAsTable(s"${dbName}.${tableName}")  //overwrite会清空整个表的数据
      //      } else { //无分区
      //        resultData.write.mode(hiveSinkConfig.write_option.get).insertInto(s"${dbName}.${tableName}")
      //      }
    } else {
      val syncMode = hiveSinkConfig.sync_mode.get
      syncMode match {
        case "jdbc" => {
          writeExternalHiveByJDBC(kc, resultData, dbName, tableName, dynamicFields, tablePartitions, writeMode, dataSplit, advancedJsonString)
        }
        case "externalTable" => {
          writeExternalHiveByTempCos(kc, resultData, dbName, tableName, dynamicFields, tablePartitions, writeMode, dataSplit, advancedJsonString)
        }
        case _ => logInfo(s"other write: $syncMode")
      }
    }

    //上报数据状态
    if (nums <= limit) {
      hiveDataStatusInfo.setDataNum(String.valueOf(accumulator.value))
      logInfo("累加器上报条数==>" + accumulator.value)
    } else {
      hiveDataStatusInfo.setDataNum(String.valueOf(accumValue))
      logInfo("couunt上报条数==>" + accumValue)
    }

    hiveDataStatusInfo.setCover(if (hiveSinkConfig.write_option.get == "overwrite") true else false)
    DataframeUtils.reportDataStatusRelation(kc, hiveDataStatusInfo, dbName, tableName, hiveSinkConfig.extender.meta.clazz,
      compact(render(hiveSinkConfig.extender.meta.params)))

    //上报运维中心指标
    val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
    metric.setProcessDataLValue(hiveDataStatusInfo.getDataNum.toLong)
    CenterMetricUtils.reportSyncProcessData(metric, kc)

    this
  }

  /**
   * 整合hiveserver2的配置（基本配置+连接池配置）
   *
   * @param dbName
   * @return
   */
  def getHiveServerProps(dbName: String) = {
    val props = new Properties()
    props.put("hive.driver", "org.apache.hive.jdbc.HiveDriver")
    var url = ""
    if (meta.getDsHiveConnect.getConnectType != null && meta.getDsHiveConnect.getConnectType == "Cluster") { //Cluster
      throw new Exception("不支持外部hive集群模式！")
      //          url = s"jdbc:hive2://${meta.getDsHiveConnect.getHost};serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=${meta.getDsHiveConnect.getZookeeperNamespace};principal=${meta.getDsHiveConnect.getMasterPrincipal}"
    } else { // null和Single
      url = s"jdbc:hive2://${meta.getDsHiveConnect.getHost}:${meta.getDsHiveConnect.getPort}/${dbName}"
    }
    props.put("hive.jdbc.url", url)
    //        props.put("hive.jdbc.url", s"jdbc:hive2://${meta.getDsHiveConnect.getHost}:${meta.getDsHiveConnect.getPort}/${dbName}")
    props.put("hive.jdbc.username", meta.getDsHiveConnect.getUserName)
    props.put("hive.jdbc.password", RSAEncrypt.decryptByRSAPassWord(meta.getDsHiveConnect.getPassword))
    props.put("hive.initialSize", hiveProperties.getOrElse(HiveConstants.HIVE_INITIALSIZE, "5")) //默认5
    props.put("hive.minIdle", hiveProperties.getOrElse(HiveConstants.HIVE_MINIDLE, "20")) //20
    props.put("hive.maxActive", hiveProperties.getOrElse(HiveConstants.HIVE_MAXACTIVE, "500")) //500
    props.put("hive.maxWait", hiveProperties.getOrElse(HiveConstants.HIVE_MAXWAIT, "60000")) //60000
    props.put(HiveConstants.SUBMIT_THREAD_POOL, hiveProperties.getOrElse(HiveConstants.SUBMIT_THREAD_POOL, "5")) //5
    props
  }

  /**
   * 拼接load cos文件命令
   *
   * @param dbTable
   * @param location
   * @param writeMode
   * @param staticPartition
   * @return
   */
  def getSqlWithCosPath(dbTable: String, location: String, writeMode: String, staticPartition: Array[String]) = {
    val mode = if (writeMode == "overwrite") "OVERWRITE" else ""
    val partition = if (staticPartition.length > 0) s"PARTITION ${staticPartition.mkString("(", ",", ")")}" else ""
    s"LOAD DATA INPATH 'cosn:/${location}' $mode INTO TABLE $dbTable $partition"
  }

  /**
   * 加载数据到外部hive表
   *
   * @param dbName
   * @param tableName
   * @param location
   * @param writeMode
   * @param staticPartition
   */
  def addDataToHiveTable(dbName: String, tableName: String, location: String, writeMode: String, staticPartition: Array[String]): Unit = {
    val props: Properties = getHiveServerProps(dbName)
    val hiveDs: DruidDataSource = HiveDataSourceUtil.getHiveDataSource(props)
    //获取hive jdbc连接
    var connection: Connection = null
    var statm: Statement = null
    val sql = getSqlWithCosPath(dbName + "." + tableName, location, writeMode, staticPartition)
    logInfo(s" load sql = $sql")
    try {
      //获取hive jdbc连接
      connection = hiveDs.getConnection
      statm = connection.createStatement()
      //load数据
      statm.execute(sql)
      logInfo(s"load data successed !")
    }
    catch {
      case e: Throwable => {
        throw new Exception(e)
      }
    }
    finally {
      //关闭连接
      if (statm != null) statm.close()
      if (connection != null) connection.close()
    }
  }

  /**
   * 缓存cos方式写入hive
   *
   * @param kc
   * @param data
   * @param dbName
   * @param tableName
   * @param dynamicFields
   * @param tablePartitions
   * @param writeMode
   * @param dataSplit
   * @param advancedJsonString
   */
  def writeExternalHiveByTempCos(kc: KhaosContext,
                                 data: DataFrame,
                                 dbName: String,
                                 tableName: String,
                                 dynamicFields: Array[String],
                                 tablePartitions: Array[String],
                                 writeMode: String,
                                 dataSplit: Any,
                                 advancedJsonString: String): Unit = {
    logInfo("=>> cos temp mode ...")
    //解析hive数据源自定义json
    val cosJson = meta.getDsHiveConnect.getDynamicParam
    if (StringUtils.isBlank(cosJson)) {
      throw new Exception("使用外部表加速方式请设置hive dynamicParam(cos dsinfo)")
    }
    val cosDsName = JSON.parseObject(cosJson).get(HiveConstants.HIVE_DS_PARAMS_COS_SOURCE_NAME)
    val cosBucketName = JSON.parseObject(cosJson).get(HiveConstants.HIVE_DS_PARAMS_COS_BUCKET_NAME)
    //调用cos数据源接口获取cos数据源信息
    val cosConnect = MetaUtils.getCosMetaByDsName(kc, cosDsName.toString)
    //初始化cos
    val cosConfig = getCosAccessConfig(kc, cosConnect, cosBucketName.toString)
    val fsConfig = addCosFileSystem(kc, cosConnect, cosConfig)

    //写入数据到cos
    val workDir = "/" + kc.conf.getString(SchedulerConstants.JOB_INSTANCE_ID) //实例id不会重复，同时也只会有一个作业实例在跑
    var location = workDir + "/" + tableName
    //    var location = "/" + cosConfig.getBucket + "/" + kc.conf.getString(SchedulerConstants.JOB_INSTANCE_ID) + "/" + tableName //实例id不会重复，同时也只会有一个作业实例在跑

    var staticPartition: Array[String] = Array[String]()
    var finalDF: DataFrame = kc.sparkSession.emptyDataFrame
    if (tablePartitions.size > 0) {
      //动静态分区计算
      val tps = tablePartitionsDemerger(tablePartitions)
      staticPartition = tps._1
      val dynamicPartition = tps._2
      if (dynamicPartition.length > 0) throw new Exception(s"use [外部表加速] can not support dynamic partition !")
      //写文件到临时目录
      if (staticPartition.length > 0) {
        location = location + "/" + staticPartition.mkString("/").replaceAll("'", "")
      }
      //将静态分区字段放到末尾
      val temptableName = "sparksql_external_hive_temptable"
      logInfo(s"=>>sourceDF df fields:${data.schema.fieldNames.mkString("[", ",", "]")} ")
      data.createOrReplaceTempView(temptableName)
      val sql = s"select ${dynamicFields.mkString(",")} from $temptableName"
      logInfo(s"=>>select temphive sql=$sql")
      //      val sql = s"select ${dynamicFields.mkString(",") + "," + tablePartitions.mkString(",")} from $temptableName"
      finalDF = kc.sparkSession.sql(sql)
      logInfo(s"=>>finalDF df fields:${finalDF.schema.fieldNames.mkString("[", ",", "]")} ")
    } else {
      finalDF = data
    }
    logInfo(s"=>>hive cosfile load location = $location")
    //缓存数据到cos缓存
    cacheTempData(finalDF, location)
    //into和overwrite方式判断--后期实现

    location = getOrcFilePath(location)
    try {
      //load方式
      addDataToHiveTable(dbName, tableName, "/" + cosConfig.getBucket + location, writeMode, staticPartition)
      //注册临时表sql插入方式--后期现实
    } catch {
      case e: Throwable => {
        throw new Exception("load data to hive failed！", e)
      }
    } finally {
      val isClean = hiveProperties.getOrElse(HiveConstants.HIVE_COS_TEMPDIR_CLEAN, "true").toBoolean
      //清理临时数据
      if (isClean) {
        HdfsUtils.deletePath(workDir, FileSystem.get(fsConfig))
      }
    }
  }

  def getOrcFilePath(location: String): String = {
    location + "/" + "part-*.orc" // part-00000-2f8e9c42-c48a-42cd-ab1b-674d84e030a9-c000.snappy.orc
  }

  def cacheTempData(df: DataFrame, tempPath: String): Unit = {
    df.write.mode(SaveMode.Overwrite).format("orc").save(tempPath) //overwrite防止缓存目录中历史数据没有删掉
  }

  /**
   * 拆分静态、动态分区
   *
   * @param tablePartitions
   */
  def tablePartitionsDemerger(tablePartitions: Array[String]): (Array[String], Array[String]) = {
    val staticPartition = new ArrayBuffer[String]()
    val dynamicPartition = new ArrayBuffer[String]()
    //遍历分区list，找出静态分区和静态分区
    if (tablePartitions.size > 1) {
      for (i <- 0 until tablePartitions.size) {
        if (tablePartitions.size != i + 1 && !tablePartitions(i).contains("=") && tablePartitions(i + 1).contains("=")) {
          throw new Exception("不支持动态分区作为静态分区的父分区！")
        } else {
          if (!tablePartitions(i).contains("=")) {
            dynamicPartition.append(tablePartitions(i))
          } else {
            staticPartition.append(tablePartitions(i))
          }
        }
      }
      (staticPartition.toArray, dynamicPartition.toArray)
    } else { //只有1个分区
      if (!tablePartitions(0).contains("=")) {
        dynamicPartition.append(tablePartitions(0))
      } else {
        staticPartition.append(tablePartitions(0))
      }
      (staticPartition.toArray, dynamicPartition.toArray)
    }
  }

  /**
   * 添加cos文件系统配置
   *
   * @param kc
   * @param cosConnect
   * @param cosBucketName
   * @return
   */
  def addCosFileSystem(kc: KhaosContext, cosConnect: CosConnect, cosConfig: CosAccessConfig): org.apache.hadoop.conf.Configuration = {
    val sparkSession = kc.sparkSession
    //    val cosconfig = getCosAccessConfig(kc,cosConnect,cosBucketName)
    val hadoopConf = HadoopCosUtils.appendCosHadoopConfigs(sparkSession.sparkContext.hadoopConfiguration, cosConfig)
    sparkSession.sparkContext.hadoopConfiguration.addResource(hadoopConf)
    sparkSession.sparkContext.hadoopConfiguration
  }

  def getCosAccessConfig(kc: KhaosContext, cosConnect: CosConnect, cosBucketName: String): CosAccessConfig = {
    val env = kc.conf.getString(SchedulerConstants.RUN_ENV)
    val appId = cosConnect.getAppId

    val cosAccessConfig = new CosAccessConfig
    cosAccessConfig.setAccessKey(cosConnect.getAccessKeyId)
    cosAccessConfig.setSecretKey(cosConnect.getAccessKeySecret)
    cosAccessConfig.setRegion(cosConnect.getRegion)
    if (StringUtils.isBlank(cosBucketName)) {
      cosAccessConfig.setBucket("hivesinktemp" + "-" + env + "-" + appId)
    } else {
      cosAccessConfig.setBucket(cosBucketName + "-" + env + "-" + appId)
    }
    cosAccessConfig.setEndPoint("cos." + cosConnect.getRegion + "." + cosConnect.getEndpoint)
    cosAccessConfig.setAppId(appId)
    logInfo(s"cos info:bucketName=>[${cosAccessConfig.getBucket}] appId=>[$appId] region=>[${cosAccessConfig.getRegion}] endpoint=>[${cosAccessConfig.getEndPoint}]")
    cosAccessConfig
  }


  /**
   * jdbc方式写入外部hive
   *
   * @param kc
   * @param resultData
   * @param dbName
   * @param tableName
   * @param dynamicFields
   * @param tablePartitions
   * @param writeMode
   * @param dataSplit
   * @param advancedJsonString
   */
  def writeExternalHiveByJDBC(kc: KhaosContext,
                              resultData: DataFrame,
                              dbName: String,
                              tableName: String,
                              dynamicFields: Array[String],
                              tablePartitions: Array[String],
                              writeMode: String,
                              dataSplit: Any,
                              advancedJsonString: String): Unit = {
    logInfo(s"=>>> jdbc save mode...")

    var commitRecods = hiveProperties.getOrElse(HiveConstants.COMMIT_RECODS, "5000").toString.toInt //单次插入插入数，也等于生成文件个数
    val crs = JSON.parseObject(advancedJsonString).get(MetaDataConstants.COMMIT_RECODS).toString.toInt //用户设置提交条数
    if (crs != null) {
      commitRecods = crs
    }

    log.info(s"==> commit records $commitRecods by per ...")
    val jdbcAcc: LongAccumulator = kc.sparkSession.sparkContext.longAccumulator("hiveSink_jdbc_get_connections")

    val props: Properties = getHiveServerProps(dbName)
    val bcProps: Broadcast[Properties] = kc.sparkSession.sparkContext.broadcast(props)

    //实现数据覆盖功能
    if ("overwrite".equalsIgnoreCase(writeMode)) {
      jdbcOverWriteTable(props, dbName, tableName, dynamicFields, tablePartitions)
      logInfo(s"==> successed clear data by jdbc！partition=${tablePartitions.mkString("[", ",", "]")}")
    }

    //对源数据进行重分区
    var rddData = resultData.rdd
    if (dataSplit != null) {
      val o = JSON.parseObject(dataSplit.toString)
      if (o.getBoolean(MetaDataConstants.ON_OFF)) { //开关打开
        val p = o.getInteger(MetaDataConstants.DATA_SPLIT_NUMS)
        rddData = rddData.repartition(p)
      }
    }

    logInfo(s"==> resultData partition size=${rddData.getNumPartitions}")
    resultData.rdd.foreachPartition(insertExternalDsTable _)
    logInfo(s"==> accumulator hive connects=${jdbcAcc.value}")

    /**
     * 同步插入
     *
     * @param iter
     */
    def insertExternalDsTable(iter: Iterator[Row]): Unit = {
      val props = bcProps.value
      val hiveDs: DruidDataSource = HiveDataSourceUtil.getHiveDataSource(props)
      jdbcAcc.add(1)

      //获取hive jdbc连接
      var connection: Connection = null
      var statm: Statement = null
      try {
        //获取hive jdbc连接
        connection = hiveDs.getConnection
        statm = connection.createStatement()

        //遍历源端数据
        val values = ArrayBuffer[String]()
        var count = 0
        for (row <- iter) {
          count = count + 1
          val value = ArrayBuffer[String]() //一行数据值
          for (field <- dynamicFields) {
            value += "\"" + row.getAs[String](field) + "\""
          }
          values += value.mkString(",")
          if (count == commitRecods) { //每commitRecods条提交一次sql
            val sql = buildHiveSql(s"${dbName}.${tableName}", dynamicFields, tablePartitions.mkString(","), values.toArray, "into", commitRecods)
            statm.executeUpdate(sql)
            logInfo(s"batch 成功插入${count}条数据...")
            values.clear()
            count = 0
          }
        }
        //提交剩余的数据
        if (values.size > 0) {
          val sql = buildHiveSql(s"${dbName}.${tableName}", dynamicFields, tablePartitions.mkString(","), values.toArray, "into", commitRecods)
          statm.executeUpdate(sql)
          logInfo(s"成功插入${values.size}条数据......")
        }
        statm.close()
      }
      catch {
        case e: Throwable => throw new Exception("插入数据失败！", e)
      }
      finally {
        //关闭连接
        if (statm != null) statm.close()
        if (connection != null) connection.close()
      }
    }
  }

  /**
   * jdbc方式删除表（或者指定分区）数据
   *
   * @param props
   * @param dbName
   * @param tableName
   * @param dynamicFields
   * @param tablePartitions
   * @return
   */
  def jdbcOverWriteTable(props: Properties,
                         dbName: String,
                         tableName: String,
                         dynamicFields: Array[String],
                         tablePartitions: Array[String]) = {
    val hiveDs: DruidDataSource = HiveDataSourceUtil.getHiveDataSource(props)
    //获取hive jdbc连接
    var connection: Connection = null
    var statm: Statement = null
    var sql = ""
    val tempHiveTableName = "di_temp_hive_table_" + System.currentTimeMillis()
    try {
      //获取hive jdbc连接
      connection = hiveDs.getConnection
      statm = connection.createStatement()

      //创建临时表用于覆盖数据
      statm.execute(s"CREATE TABLE IF NOT EXISTS ${dbName}.$tempHiveTableName LIKE ${dbName}.${tableName}")
      logInfo(s"create temp talbe : ${dbName}.$tempHiveTableName")

      //拼接sql
      if (StringUtils.isBlank(tablePartitions.mkString(","))) {
        //      s"insert overwrite table $dbtable select $fields from $dbtable where 1=0"
        sql = s"TRUNCATE TABLE ${dbName}.${tableName}"
      } else {
        sql = s"INSERT OVERWRITE TABLE ${dbName}.${tableName} PARTITION(${tablePartitions.mkString(",")}) SELECT ${dynamicFields.mkString(",")} FROM ${dbName}.$tempHiveTableName WHERE 1=0"
      }
      logInfo(s"delete data sql=$sql")
      statm.executeUpdate(sql)
      statm.execute(s"DROP TABLE IF EXISTS ${dbName}.$tempHiveTableName")
      logInfo(s"drop temp talbe : ${dbName}.$tempHiveTableName")

    }
    catch {
      case e: Throwable => throw new Exception("delete failed！", e)
    }
    finally {
      //关闭连接
      if (statm != null) statm.close()
      if (connection != null) connection.close()
    }
  }

  /**
   * 构建动态sql
   *
   * @param table
   * @param cols
   * @param partitions
   * @param values
   * @param writeMode
   * @return
   */
  def buildHiveSql(table: String, cols: Array[String], partitions: String, values: Array[String], writeMode: String, commitRecods: Int): String = {
    if (values.size > 0) {
      val arrSize = commitRecods * (values(0).size + 3) * 2 //(任意取一条数据的长度+括号数量+逗号数量) * 2
      log.info(s"values arr size=" + arrSize)
      val dynamicValues = new mutable.StringBuilder(arrSize)
      var count = 0
      for (value <- values) {
        dynamicValues.append("(")
        dynamicValues.append(value)
        dynamicValues.append(")")
        count = count + 1
        if (count < values.size) {
          dynamicValues.append(",")
        }
      }
      if (StringUtils.isBlank(partitions)) {
        s"insert $writeMode table $table values${dynamicValues.mkString}"
      } else {
        s"insert $writeMode table $table partition($partitions) values${dynamicValues.mkString}"
      }
    } else {
      throw new Exception("源表数据为空,取消插入操作!")
    }
  }

  /**
   * 组织分区信息
   *
   * @param extract_fields
   * @return (非静态分区的字段数组，分区数组)
   */
  def getPartitionInfo(extract_fields: List[ExtractFieldInfo]) = {
    val partitionArr = ArrayBuffer[String]()
    val dynamicFields = ArrayBuffer[String]() //非分区字段+动态分区字段
    val dynamicPartitions = ArrayBuffer[String]()
    for (efi <- extract_fields) {
      val isPartition = efi.field_props.is_partition.get

      if (!isPartition) {
        dynamicFields.append(efi.field)
      } else {
        val partitionInfo = efi.field_props.partition_info.get.trim.toLowerCase
        if (MetaDataConstants.DYNAMIC_PARTITION.equals(partitionInfo)) {
          partitionArr.append(efi.field) //动态分区字段值需要从源表中抽取
          dynamicPartitions.append(efi.field)
        } else if (MetaDataConstants.STATIC_PARTITION.equals(partitionInfo)) {
          partitionArr.append(efi.field + "='" + efi.field_props.default_value + "'") //静态分区，如：partitionName=20200509
        }
      }
    }
    //将分区列添加到普通字段之后
    dynamicFields.++=(dynamicPartitions)

    logInfo(s"==>partition info: ${partitionArr.mkString("[", ",", "]")}")
    (dynamicFields.toArray, partitionArr.toArray)
  }

  /**
   * 拆分分区字段和非分区字段，分区字段以partitionName='20200509'方式返回
   *
   * @param extract_fields
   * @return (非静态分区的字段数组，分区数组)
   */
  def getFieldsInfo(extract_fields: List[ExtractFieldInfo], dmMateList: util.List[DmTableColumn]) = {
    val partitionArr = ArrayBuffer[String]() //分区字段
    val dynamicFields = ArrayBuffer[String]() //非分区字段+动态分区字段
    val dynamicPartitions = ArrayBuffer[String]()

    for (efi <- extract_fields) {
      val isPartition = efi.field_props.is_partition.get

      if (!isPartition) {
        dynamicFields.append(efi.field)
      }
    }

    /*
    当从前端JSON中解析hive表字段，其中存在动态分区时，分区字段的位置会与数据管理元数据中的分区字段位置不一致，导致数据错位。
    所以此处将分区字段解析替换为从数据管理元数据中获取分区字段的位置
    */
    val dmMetaTablepartition = meta.getTablePartition
    for(partitionName <- dmMetaTablepartition) {
      // 从前端JSON中解析字段的其他信息，如动态、静态分区标识
      val partitionField = extract_fields.find(extractFieldInfo => partitionName.equals(extractFieldInfo.field)).get
      val partitionInfo = partitionField.field_props.partition_info.get.trim.toLowerCase
      if (MetaDataConstants.DYNAMIC_PARTITION.equals(partitionInfo)) {
          partitionArr.append(partitionName) //动态分区字段值需要从源表中抽取
          dynamicPartitions.append(partitionName)
      } else if (MetaDataConstants.STATIC_PARTITION.equals(partitionInfo)) {
          partitionArr.append(partitionName + "='" + partitionField.field_props.default_value + "'") //静态分区，如：partitionName=20200509
      }
    }

    //修复开发页面增加自动排序后乱序问题
    val targetDynamicFields = new ArrayBuffer[String]()
    for (i <- 0 until dmMateList.size()) {
      val field = dmMateList.get(i).getColName
      if (dynamicFields.contains(field)) {
        targetDynamicFields.append(field)
      }
    }
    //将分区列添加到普通字段之后
    /*dynamicFields.++=(dynamicPartitions)*/
    targetDynamicFields.++=(dynamicPartitions)

     logInfo(s"==>partition info: ${partitionArr.mkString("[", ",", "]")}")
    //    (dynamicFields.toArray, partitionArr.toArray)
    (targetDynamicFields.toArray, partitionArr.toArray)
  }

  def getPartitionNums(kc: KhaosContext, data: DataFrame, nums: Int, limit: Int): Int = {
    if (nums <= limit) {
      nums
    } else {
      accumValue = kc.conf.getString(CommonConstants.DATA_COUNT_SET_NUMS, String.valueOf(data.count())).toLong
      logInfo("accumValue ==>" + accumValue)
      DataframeUtils.estimatePartitions(kc, data, meta.getColumnEntiy, accumValue)
    }
  }
}
