package com.kingsoft.dc.khaos.module.spark.sink

import java.util

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.SchedulerConstants
import com.kingsoft.dc.khaos.module.spark.metadata.sink.HAWQSinkConfig
import com.kingsoft.dc.khaos.module.spark.model.RelationDataStatusInfo
import com.kingsoft.dc.khaos.module.spark.request.model.{JdbcConnectEntity, StructFieldEntity}
import com.kingsoft.dc.khaos.module.spark.util.TableSplitUtils.StrategyValueEnum
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.Logging
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame}
import org.json4s.jackson.JsonMethods.{compact, render}
import org.json4s.{DefaultFormats, JsonAST}

import sys.process._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

class HAWQSink extends SinkStrategy with Logging {
  private var connectEntity: JdbcConnectEntity = null
  private var _kc: KhaosContext = null
  private var _hawq_Config: HAWQSinkConfig = null
  private var _hawqColsInfo: util.List[DmTableColumn] = null

  // mpp数据库物理地址
  private var _host: String = null
  private var _port: String = null
  private var _username: String = null
  private var _password: String = null
  private var _instansename: String = null
  private var dbName = ""
  private var tblName = ""

  private var tblNameAndDF: mutable.HashMap[String, DataFrame] = null
  private var _hawqMete: mutable.HashMap[String, StructFieldEntity] = null
  private var fs: FileSystem = null
  // 临时存储数据目录
  private val HDFS_WORK_DIR = "/di/.working_input"
  private val systemPath = "hdfs://hdfs-ha/kdw-cache"
  private var mppDataStatusInfo: RelationDataStatusInfo = null
  //  orc文件类型对应的外部表类型
  //  Type Mapping Supported
  //  External Data Type  |  Orc Data Type
  //  --------------------------------------------------------------------
  //  bool                |  boolean, bigint, int, smallint, tinyint, date
  //  int,int2,int4,int8  |  boolean, bigint, int, smallint, tinyint, date
  //  date                |  boolean, bigint, int, smallint, tinyint, date
  //  float4,float8       |  float,   double
  //  interval            |  string,  varchar
  //  time                |  string,  varchar
  //  timestamp           |  timestamp
  //  numeric             |  decimal
  //  char(n),varchar(n)  |  binary,  char, varchar, string
  //  bytea,text          |  binary,  char, varchar, string

  private val ORC_FORMAT = "orc"
  private var tableSplit: DmTableSplit = null
  private val curr_time = System.currentTimeMillis()
  private var _location: String = ""

  object SourceMode {
    val HashData = "hashdata" //mpp
    val GreenPlum = "greenplum"
    val HAWQ = "hawq"
  }

  /** 数据输出 */
  def sink(kc: KhaosContext,
           module_id: String,
           config: JsonAST.JObject,
           schema: Schema,
           dataSet: DataFrame,
           ds_config: Map[String, String],
           hawq_cols_info: util.List[DmTableColumn]): Any = {

    // 解析config
    implicit val formats = DefaultFormats
    val hawqSinkConfig = config.extract[HAWQSinkConfig]
    this._hawq_Config = hawqSinkConfig
    this._hawqColsInfo = hawq_cols_info
    this._kc = kc
    //库表名加引号
    dbName = s"""\"${_hawq_Config.db_name}\""""
    tblName = s"""\"${_hawq_Config.table_name}\""""

    // 初始化配置
    init(ds_config)
    log.info("KDW_for_HAWQ DataSource Synchronization!")
    // 临时缓存目录 /di/.working_input/1820/*
    val workDir = s"${HDFS_WORK_DIR}/${_kc.conf.getString("job.inst.id")}"
    try {
      loadToHdfsAndHAWQ(dataSet, workDir)
    } catch {
      case e: Exception =>
        e.printStackTrace()
        throw e
    } finally {
      // 关闭文件系统
      if (fs != null) {
        log.info("关闭文件系统!")
        fs.close()
      }
    }

  }

  /**
   * 初始化
   */
  def init(ds_config: Map[String, String]): Unit = {
    initMetaData(ds_config)
    initHdfsConfig
    initJdbcConnectInfo()
  }

  /**
   * 初始化写入的数据表的元数据信息
   */
  def initMetaData(ds_config: Map[String, String]) = {
    log.info("初始化物理地址...")
    //    val dbname = _hawq_Config.db_name
    //    val tblname = _hawq_Config.table_name
    //    val className = _hawq_Config.extender.meta.clazz
    //    val mppMeta: MetaDataEntity = MetaUtils.getMPPMeta(_kc, dbname, tblname, className, compact(render(_hawq_Config.extender.auth.params)), this)
    //    val mppConnect = mppMeta.getDsMppConnect
    //    _mppColsInfo = mppMeta.getColumnEntiy
    //    _host = mppConnect.getHost
    //    _port = mppConnect.getPort
    //    _username = mppConnect.getUsername
    //    _password = mppConnect.getPassword
    //    _instansename = mppConnect.getInstanceName
    //    _dataSourceType = mppConnect.getSourceMode
    //
    //    if (mppMeta.tableEntiy.getTblType.equalsIgnoreCase(MetaDataConstants.VIEW)) {
    //      throw new Exception(s"暂不支持写入视图类型表 ${dbname}.${tblname}")
    //    }

    _host = ds_config("host")
    _port = ds_config("port")
    _username = ds_config("username")
    _password = ds_config("password")
    _instansename = ds_config("instansename")
  }

  /**
   * 初始化用于中间缓存数据的连接hdfs信息
   */
  def initHdfsConfig(): Unit = {
    log.info("初始化hdfs文件系统...")
    fs = FileSystem.get(_kc.sparkSession.sparkContext.hadoopConfiguration)
  }

  /**
   * 初始化jdbc连接
   */
  def initJdbcConnectInfo() = {
    log.info("初始化JDBC...")
    val url = getConnectUrl()
    val user = _username
    val password = _password
    val schema = _hawq_Config.db_name
    this.connectEntity = new JdbcConnectEntity(url,
      user,
      password,
      schema,
      _hawq_Config.table_name)
  }

  /**
   * 将数据缓存到hdfs中
   *
   * @param data    需要导入的DataFrame
   * @param workDir hdfs路径
   * @return 是否成功
   */
  def loadToHdfsAndHAWQ(data: DataFrame,
                        workDir: String): Unit = {
    try {
      _location = getLocation(workDir)

      // 是否是分表
      tableSplit = TableSplitUtils.getTableSplit(_kc,
        _hawq_Config.db_name,
        _hawq_Config.table_name,
        _hawq_Config.extender.meta.clazz,
        compact(render(_hawq_Config.extender.auth.params)))
      var splitValues: List[String] = null
      var newSplitValues: Map[String, String] = null
      var tblList: List[String] = null

      // 改名设置默认值
      var colArr = new ArrayBuffer[Column]()
      val sdvData = DataframeUtils.setDefaultValue(_hawq_Config.extract_fields, _hawqColsInfo, data)
      for (elem <- sdvData.columns) {
        colArr += sdvData.col(elem)
      }
      // 获取分表列表
      if (tableSplit != null) {
        logInfo("分表不为空")
        tableSplit.getStrategyType match {
          case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => {
            val splitValues = tableSplit.getStrategyValue.split(",").toList
            //获取分表名称列表
            tblList = TableSplitUtils.getRealTable(_kc,
              _hawq_Config.db_name,
              _hawq_Config.table_name,
              _hawq_Config.extender.meta.clazz,
              compact(render(_hawq_Config.extender.auth.params)), this, tableSplit, "in", splitValues)

          }
          case TableSplitUtils.StrategyTypeEnum.DATETIME => {
            splitValues = sdvData.select(colArr: _*).select(col(tableSplit.getSplitColName).cast(StringType)).distinct().collect().map(row => {
              row.getAs[String](tableSplit.getSplitColName)
            }).toList.filter(_ != null)

            tableSplit.getStrategyValue match {
              //2019-01-01 ==> 2019/201901/20190101
              case StrategyValueEnum.year => newSplitValues = splitValues.map(sv => (sv.replaceAll("-", "").substring(0, 4), sv)).toMap; splitValues = splitValues.map(_.replaceAll("-", "").substring(0, 4))
              case StrategyValueEnum.month => newSplitValues = splitValues.map(sv => (sv.replaceAll("-", "").substring(0, 6), sv)).toMap; splitValues = splitValues.map(_.replaceAll("-", "").substring(0, 6))
              case StrategyValueEnum.day => newSplitValues = splitValues.map(sv => (sv.replaceAll("-", "").substring(0, 8), sv)).toMap; splitValues = splitValues.map(_.replaceAll("-", "").substring(0, 8))
            }
            tblList = TableSplitUtils.getRealTable(_kc,
              _hawq_Config.db_name,
              _hawq_Config.table_name,
              _hawq_Config.extender.meta.clazz,
              compact(render(_hawq_Config.extender.auth.params)), this, tableSplit, "in", splitValues)
          }
          case TableSplitUtils.StrategyTypeEnum.BUSSINESS => {
            logInfo("=====业务分表=====")
            var suffixValue = ""
            if (_hawq_Config.sub_table.on_off.trim.toLowerCase == "true") {
              suffixValue = _hawq_Config.sub_table.suffix
              if (suffixValue == "" || suffixValue == null) {
                throw new IllegalArgumentException("未正确填写业务分表后缀!")
              }
            } else {
              throw new IllegalArgumentException("未开启业务分表开关!")
            }
            splitValues = List[String](suffixValue)
            logInfo("table_name===>" + _hawq_Config.table_name + "   db_name===>" + _hawq_Config.db_name)
            logInfo("tableSplit===>" + tableSplit + "   splitValues===>" + splitValues(0) + "   _hawq_Config.extender.meta.clazz===>" + _hawq_Config.extender.meta.clazz)
            tblList = TableSplitUtils.getRealTable(_kc,
              _hawq_Config.db_name,
              _hawq_Config.table_name,
              _hawq_Config.extender.meta.clazz,
              compact(render(_hawq_Config.extender.auth.params)), this, tableSplit, "=", splitValues)
          }
        }
      }

      val tuple = DataframeUtils.buildNewDataframeHAWQ(tblList: List[String],
        tableSplit,
        _hawq_Config.table_name,
        _hawqColsInfo,
        _hawq_Config.extract_fields,
        sdvData,
        connectEntity)

      val targetData = tuple._1
      _hawqMete = tuple._2
      log.info("===> " + targetData.schema.fields.mkString(","))
      //获得真实物理表和DataFrame
      tblNameAndDF =
        TableSplitUtils.getSinkRealTable(_kc,
          _hawq_Config.db_name,
          _hawq_Config.table_name,
          this,
          _hawq_Config.extender.meta.clazz,
          compact(render(_hawq_Config.extender.meta.params)),
          targetData,
          _hawq_Config.sub_table)

      //      tblNameAndDF = tblNameAndDF.map(tp => (tp._1.toLowerCase(), tp._2))
      mppDataStatusInfo = new RelationDataStatusInfo
      // 取出分区表名和DataFrame
      for (tblDF <- tblNameAndDF) {

        val numPartition = DataframeUtils.rePartitions(_kc, targetData, _hawqColsInfo)

        // 上报数据相关
        val (resultData, accumulator) = DataframeUtils.calculateDataNum(_kc, tblDF._2, "HAWQSink")

        val partitions = resultData.rdd.partitions.length
        // 取较大的分区数
        if (partitions >= numPartition) {
          resultData.write.format(ORC_FORMAT).mode("Overwrite").save(s"${_location}")
        } else {
          resultData.coalesce(numPartition).write.format(ORC_FORMAT).mode("Overwrite").save(s"${_location}")
        }
        // 对所有分表进行累加
        var numTemp: Long = 0
        if (mppDataStatusInfo.getDataNum == null) {
          numTemp = accumulator.value.toLong
        } else {
          numTemp = mppDataStatusInfo.getDataNum.toLong + accumulator.value.toLong
        }
        mppDataStatusInfo.setDataNum(numTemp.toString)
      }

      logInfo("缓存数据成功.")

      val fieldList = targetData.schema.fields.toList
      loadToMpp(workDir, fieldList)

    } catch {
      case e: Exception => {
        e.printStackTrace()
        log.info("缓存数据失败,失败信息:" + e.getMessage + "失败原因:" + e.getCause)
        throw new Exception("缓存数据失败,失败信息:" + e.getMessage + "失败原因:" + e.getCause)
      }
    } finally {
      // 删除缓存.tmp文件
      fs.delete(new Path(s"${_location}/.tmp"), true)
      fs.delete(new Path(s"${_location}/_SUCCESS"), true)
      // 对workDir赋权，防止影响其他任务实例的权限
      Process(s"hdfs dfs -chmod 777 ${systemPath}${workDir}").!!

    }
  }


  /**
   * 加载hdfs数据到HAWQ
   *
   * @param workDir 缓存路径
   * @param colList 字段信息
   */
  def loadToMpp(workDir: String,
                colList: List[StructField]): Unit = {
    val mppTmpExtTable = getMppTmpExtTableName()
    loadData(workDir, mppTmpExtTable, tblName, colList)
  }


  /**
   * 获取hawq外部表名
   *
   * @return 外部表名
   */
  def getMppTmpExtTableName() = {
    val jobId = _kc.conf.getString("job.inst.id")
    s"""\"${_hawq_Config.table_name}_${jobId}\""""
  }

  /**
   * 创建基于对象存储的hawq外部表
   *
   * @param workDir    hdfs临时存储目录
   * @param cosTblName
   * @param mppTblName hawq内部表名
   */
  def loadData(workDir: String,
               cosTblName: String,
               mppTblName: String,
               colList: List[StructField]): Unit = {
    logInfo("开始缓存加载数据至mpp...")
    val url = getConnectUrl()
    val user = _username
    val password = _password
    //外部表字段定义字符串
    val extTblFieldDefinedStr: String = DataframeUtils.getHAWQExtTableFieldDefined(_hawq_Config.extract_fields)

    val externalFlag = _kc.conf.getString("job.inst.id") + "_" + curr_time
    val hdfsInfo = CosApiUtils.getHDFSInfoFromFile()
    var nameNodeAddress = ""
    val dev = _kc.conf.getString(SchedulerConstants.RUN_ENV)
    dev match {
      case "test" => {
        nameNodeAddress = hdfsInfo.getProperty("hdfs.namenode.address.offline")
      }
      case _ => {
        nameNodeAddress = hdfsInfo.getProperty("hdfs.namenode.address.online")
      }
    }
    val ip = "hdfs://" + nameNodeAddress
    val cluster_namespace = _kc.conf.getString("cluster.namespace")
    // 拼接sql语句(方法上有参数注释)
    val externalTableDDL: String = MppUtils.getHAWQExternalTable(
      tblNameAndDF,
      dbName,
      extTblFieldDefinedStr,
      _location.replace(cluster_namespace, ""),
      DataframeUtils.getFieldConvertExps(tableSplit, tblNameAndDF, colList, connectEntity, _hawqMete),
      DataframeUtils.getTableFieldNames(colList),
      externalFlag, ip)

    logInfo("totalSQL ==> " + externalTableDDL)

    //复制数据SQL语句
    val sqlList = List(externalTableDDL)
    logInfo(s"表路径信息:" + _location)
    //对表目录授权
    Process(s"hdfs dfs -chmod 777 ${_location}").!!
    //对应表缓存数据授权
    Process(s"hdfs dfs -chmod -R 777 ${ip + _location.replace(cluster_namespace, "")}/*.orc").!!
    val executeRs = MppUtils.executeSqls(url, user, password, sqlList)
    if (executeRs) {
      logInfo("加载缓存数据至KDW_for_HAWQ成功")
      // 上报数据状态
      DataframeUtils.reportDataStatusRelation(_kc, mppDataStatusInfo, _hawq_Config.db_name, _hawq_Config.table_name, _hawq_Config.extender.meta.clazz,
        compact(render(_hawq_Config.extender.meta.params)))
    } else {
      throw new Exception("加载缓存数据至KDW_for_HAWQ失败")
    }
  }

  /**
   * 获取数据在hdfs的物理存储目录
   *
   * @param workDir
   * @return 缓存路径
   */
  def getLocation(workDir: String): String = {
    //    val data_dev_id = _kc.conf.getString(SchedulerConstants.DATA_DEV_ID).replace("@", "")
    var location: String = ""
    //        val systemPath = System.getenv("SPARK_YARN_STAGING_DIR")
    if (workDir.startsWith("/")) {
      location = s"${systemPath}${workDir}/${_hawq_Config.table_name}"
    } else {
      location = s"${systemPath}/${workDir}/${_hawq_Config.table_name}"
    }
    Process(s"hdfs dfs -chmod -R 777 ${systemPath}").!!
    log.info(s"LOCATION: ${location}")
    location
  }

  /**
   * 获取mpp连接url
   *
   * @return url连接
   */
  def getConnectUrl(): String = {
    val host = _host
    val port = _port
    val dbName = _instansename
    val url = s"jdbc:postgresql://${host}:${port}/${dbName}"
    log.info("url: " + url)
    url
  }
}
