package com.kingsoft.dc.khaos.module.spark.sink

import java.util

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{DataTypeEnum, MetaDataConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.HdfsConfig
import com.kingsoft.dc.khaos.module.spark.util._
import org.apache.spark.sql._
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, parse, render}
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, StructFieldEntity}
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.ExchangeFileUtils
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.control.ControlFile
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.ddl.StructFileDDL
import com.kingsoft.dc.khaos.module.spark.model.hdfs.HdfsDataStatusInfo
import com.kingsoft.dc.khaos.module.spark.source.HdfsSource
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.functions.{col, lit}
import org.apache.spark.sql.types.{NumericType, StringType}
import org.apache.spark.util.LongAccumulator

import scala.collection.mutable.ArrayBuffer
import scala.util.Random

/**
 * Created by wuxiang on 2021/08/18.
 */
class HdfsSink extends SinkStrategy {
  private var meta: MetaDataEntity = _
  private var sparkSession: SparkSession = _
  private var hdfsConf: HdfsConfig = _
  private var fs: FileSystem = _
  private var writeOption: String = ""
  private var custFilePath: String = ""
  private var hdfsDataStatusInfo: HdfsDataStatusInfo = null
  private var accu: LongAccumulator = _
  private var submitTime: String = null

  val NA_FILL_NUMERIC = 0
  val NA_FILL_STRING = MetaDataConstants.NULL
  //数据缓存工作目录
  val WORKING_DIR = "/.working/di"

  def init(kc: KhaosContext, HdfsConfig: HdfsConfig) = {
    implicit val formats = DefaultFormats

    //鉴权
    val checkResult = MetaUtils.checkReadAuth(kc,
      HdfsConfig.db_name,
      HdfsConfig.table_name,
      HdfsConfig.extender.auth.clazz,
      compact(render(HdfsConfig.extender.auth.params)))

    //获取元数据
    meta = MetaUtils.getHdfsMeta(kc,
      HdfsConfig.db_name,
      HdfsConfig.table_name,
      HdfsConfig.extender.meta.clazz,
      compact(render(HdfsConfig.extender.meta.params)),
      this)

    this.sparkSession = kc.sparkSession

    //初始化HDFS文件系统
    fs = FileSystem.get(kc.sparkSession.sparkContext.hadoopConfiguration)
    this.submitTime = kc.conf.getString(SchedulerConstants.SUBMIT_TIME, "")
  }

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {

    //解析json
    implicit val formats = DefaultFormats
    hdfsConf = parse(compact(render(config)), true).extract[HdfsConfig]
    init(kc, hdfsConf)

    writeOption = hdfsConf.write_option
    val jobId = kc.conf.getString(SchedulerConstants.JOB_INSTANCE_ID)
    val hdfsDynamicsPath = MetaUtils.getHdfsDynamicsPath(kc)

    val workDir = s"${WORKING_DIR}/${jobId}"
    val outputDir = meta.getHdfsOutputPath
    custFilePath = hdfsConf.hdfs_sub_path.getOrElse("")

    //处理映射关系 默认值等
    val tempdf = DataframeUtils.setDefaultValue(hdfsConf.extract_fields, meta.getColumnEntiy, dataFrame)
    var df = DataframeUtils.convertDataType(hdfsConf.extract_fields, tempdf)

    //df字段重新排序 setDefaultValue可能会打乱字段顺序
    //    val cols: List[String] = hdfsConf.extract_fields.map(_.field)
    //    df = df.select(cols.head, cols.tail: _*)
    df = DataframeUtils.sortDataCol(df, meta.getColumnEntiy)

    logInfo("开始缓存数据至临时目录...")
    val cacheSuccess = cacheData(df, hdfsDynamicsPath, workDir)
    if (cacheSuccess) {
      logInfo("缓存数据移动至最终存储目录...")
      moveDataToOutput(workDir, outputDir)
      deleteCacheData(workDir)

      //上报数据状态
      DataframeUtils.reportDataStatusHdfs(kc,
        hdfsDataStatusInfo,
        hdfsConf.db_name,
        hdfsConf.table_name,
        hdfsConf.extender.meta.clazz,
        compact(render(hdfsConf.extender.meta.params)))

      //上报运维中心指标
      val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
      metric.setProcessDataLValue(accu.value.toLong)
      CenterMetricUtils.reportSyncProcessData(metric, kc)
    }
    this
  }

  /**
   * 缓存临时数据
   *
   * @param data             待缓存的数据
   * @param hdfsDynamicsPath hdfs文件的动态缓存路径
   * @param workDir          缓存临时目录
   */
  def cacheData(data: DataFrame, hdfsDynamicsPath: String, workDir: String): Boolean = {
    import org.apache.spark.sql.functions._
    var successFlag = false
    var fileSize: String = ""
    var fileName: String = ""
    var outputFile = ""
    var oldFilePath = ""
    var fileLocation: String = ""
    hdfsDataStatusInfo = new HdfsDataStatusInfo
    try {
      var delimiter = meta.getHdfsFileDelimiter()
      var targetData = data

      logInfo(s"=>>> sink delimiter: $delimiter")

      val fileFormat = MetaUtils.getHdfsFileFormat(meta.getTableEntiy).trim.toLowerCase
      logInfo(s"=>>> fileformat: $fileFormat")
      logInfo("=>>> custFilePath:" + custFilePath)
      if (StringUtils.isNotBlank(custFilePath)) {
        if (!custFilePath.endsWith("/")) {
          outputFile = getOutputPath(custFilePath)
          oldFilePath = outputFile
        } else if (custFilePath.endsWith("/")) {
          outputFile = getOutputPath(custFilePath) + hdfsDynamicsPath
          if ("orc".equals(fileFormat)) {
            oldFilePath = ExchangeFileUtils.genOrcFileName(outputFile)
          } else {
            oldFilePath = ExchangeFileUtils.genDatFileName(outputFile)
          }
        } else {
          throw new Exception("暂不支持该自定义文件路径：" + custFilePath)
        }
      } else if ("orc".equals(fileFormat)) {
        outputFile = getOutputPath(hdfsDynamicsPath)
        oldFilePath = ExchangeFileUtils.genOrcFileName(outputFile)
      } else {
        outputFile = getOutputPath(hdfsDynamicsPath)
        oldFilePath = ExchangeFileUtils.genDatFileName(outputFile)
      }
      val (tempData, accumulator) = DataframeUtils.calculateDataNum(sparkSession, targetData, "HdfsSink")
      accu = accumulator
      logInfo(s"outputFile:$outputFile, oldFilePath:$oldFilePath")
      targetData = doAppend(fileFormat, oldFilePath, tempData)

      //企业云2.3默认不定长
      logInfo(s"=>>> 不定长导出文件 ...")
      if (StringUtils.isBlank(delimiter)) {
        delimiter = MetaDataConstants.DATA_SEPARATOR
      }

      fileFormat match {
        case "csv" => {
          logInfo("=>>>按csv格式写入中...")
          // 替换null 保证生成文件的分隔符数量正常
          targetData = replaceNa(targetData)
          val colNames = targetData.columns
          val colArr = new Array[Column](colNames.size)
          for (index <- 0 until colNames.size) {
            colArr(index) = targetData.col(colNames(index))
          }
          //拼接为一个字段
          val tempDF = targetData.select(concat_ws(delimiter, colArr: _*).cast(StringType).as("value"))
          tempDF.coalesce(1).write
            //            .option("quote", "")
            .option("ignoreLeadingWhiteSpace", false: Boolean)
            .option("ignoreTrailingWhiteSpace", false: Boolean)
            .format("csv")
            .mode("append")
            .save(s"${workDir}")
        }
        case "orc" => {
          logInfo("=>>>按orc格式写入中...")
          targetData.coalesce(1).write
            .option("quote", "")
            .option("ignoreLeadingWhiteSpace", false: Boolean)
            .option("ignoreTrailingWhiteSpace", false: Boolean)
            .format("orc")
            .mode("append")
            .save(s"${workDir}")
        }
        case _ => throw new Exception("=>>> 暂时不支持写入该数据类型！")
      }

      logInfo(s"=>>>targetData schema:${targetData.printSchema()}")

      //文件名过滤
      val fileList = HdfsUtils.listFiles(workDir, "_", fs)
      for (index <- 0 until fileList.size()) {
        val tmpCacheDatFile = workDir + "/" + fileList.get(index)
        fileLocation = genDataStatusLocation(oldFilePath)
        logInfo("===>fileLocation：" + fileLocation)
        logInfo("===>tmpCacheDatFile：" + tmpCacheDatFile)
        logInfo("===>outputDatFile：" + oldFilePath)

        if (writeOption.toLowerCase.equals(MetaDataConstants.APPEND)) {
          fileName = doOverWrite(tmpCacheDatFile, oldFilePath, oldFilePath)
        } else if (writeOption.toLowerCase.equals(MetaDataConstants.OVERWRITE)) {
          fileName = doOverWrite(tmpCacheDatFile, oldFilePath, fileLocation)
        }

        //此处不生成控制文件和ddl文件，通过其提供的api计算出数据文件的大小
        fileSize = String.valueOf(fs.getContentSummary(new Path(fileLocation)).getLength)

      }
      successFlag = true
      logInfo("缓存数据成功")
    } catch {
      case e: Throwable => {
        e.printStackTrace()
        logError("缓存数据失败,错误信息:" + e.getMessage + ",错误引发原因:" + e.getCause)
        throw new Exception("缓存数据失败,错误信息:" + e.getMessage + ",错误引发原因:" + e.getCause)
      }
    }
    //20200901 上报base路径+同步页面配置的子目录
    hdfsDataStatusInfo.setFileLocation(fileLocation)
    hdfsDataStatusInfo.setFileName(fileName.split("/").last)
    hdfsDataStatusInfo.setFileSize(fileSize)
    successFlag
  }

  /**
   * 解决路径中包含“//”问题
   *
   * @param fileName
   * @return
   */
  def getOutputPath(fileName: String): String = {
    val HdfsOutputPath = (meta.getHdfsOutputPath + "/" + fileName).replaceAll("//", "/")
    HdfsOutputPath
  }

  /**
   * 文件内容追加
   *
   * @param fileFormat
   * @param oldFilePath
   * @param data
   * @return
   */
  def doAppend(fileFormat: String, oldFilePath: String, data: DataFrame) = {
    var targetData = data
    var oldDataFrame: DataFrame = readOldFile(fileFormat, oldFilePath, targetData)

    // 临时字段 用来排序 保证旧数据在前,用完后删除
    var sortFieldName = "row_number_over" + Random.nextInt(10)
    while (targetData.columns.contains(sortFieldName)) {
      sortFieldName = sortFieldName + Random.nextInt(10)
    }
    if (null != oldDataFrame) {
      oldDataFrame = oldDataFrame.withColumn(sortFieldName, lit(1).cast(StringType))
      targetData = targetData.withColumn(sortFieldName, lit(null).cast(StringType))
      targetData = oldDataFrame.unionByName(targetData).orderBy(col(sortFieldName).desc)
    }
    targetData = targetData.drop(sortFieldName)
    targetData
  }

  /**
   * 覆盖文件
   *
   * @param tmpCacheDatFile
   * @param outputFile
   * @return
   */
  def doOverWrite(tmpCacheDatFile: String, outputFile: String, fileLocation: String): String = {
    HdfsUtils.deletePath(fileLocation, fs)
    HdfsUtils.renameFile(tmpCacheDatFile, outputFile, fs)
    outputFile
  }

  /** append模式 如果存在旧文件 读旧文件内容 */
  def readOldFile(fileFormat: String, oldFilePath: String, data: DataFrame): DataFrame = {
    var oldata: DataFrame = null
    val source = new HdfsSource
    if (writeOption.toLowerCase.equals(MetaDataConstants.APPEND)) {
      if (HdfsUtils.isExists(oldFilePath, fs)) {
        fileFormat match {
          case "orc" => oldata = sparkSession.read.format("orc").load(oldFilePath)
          case _ => oldata = {
            val delimiter = meta.getHdfsFileDelimiter
            val characterSet = MetaUtils.getTableCharacterSet(meta.getTableEntiy) //获取文件字符集
            val rddData = source.readCsvNotCheckNull(sparkSession, oldFilePath, delimiter, fileFormat, characterSet)
            //取交换接口的所有字段名称和字段类型
            val columnArr = new ArrayBuffer[(String, String)]()
            val columnInfoMetaList: java.util.List[DmTableColumn] = meta.getColumnEntiy
            for (i <- 0 until columnInfoMetaList.size) {
              columnArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getColType)
            }
            //Generate the schema based on the string of schema
            val schema: org.apache.spark.sql.types.StructType = SparkJobHelper.dynamicBuildDFSchema(columnArr)

            val rowRDD = source.convert2Rows(schema, delimiter, rddData)
            sparkSession.createDataFrame(rowRDD, schema)
          }
        }
      }
    }
    oldata
  }


  /**
   * 生成控制文件
   *
   * @param datFileName  数据文件名
   * @param ctrlFileName 控制文件名
   */
  def generateCtrlFile(datFileName: String, ctrlFileName: String): String = {
    val recordCount = sparkSession.sparkContext.textFile(datFileName).count()
    val contentSummary = fs.getContentSummary(new Path(datFileName))
    val fileSize = contentSummary.getLength
    var cf: ControlFile = new ControlFile(DataTypeEnum.TXT.toString,
      datFileName,
      String.valueOf(recordCount),
      String.valueOf(fileSize),
      submitTime,
      DateUtils.getNowTime())
    cf.writeXml(ctrlFileName, fs)
    String.valueOf(fileSize)
  }

  /**
   * 生成数据文件对应的控制文件
   *
   * @param datFileName 数据文件名
   * @param ddlFileName ddl文件名
   */
  def generateDDLFile(datFileName: String, ddlFileName: String): Unit = {
    var ddl = new StructFileDDL()
    ddl.setFileName(datFileName)
    ddl.setFieldCount(String.valueOf(meta.getColumnEntiy.size))
    ddl.setFileVersion("1.0")
    ddl.setIsfixedlength("1")
    var fields = new util.ArrayList[StructFieldEntity]()
    import scala.collection.JavaConverters._
    val columnList = meta.getColumnEntiy
    columnList.asScala.foreach(col => {
      var structField = new StructFieldEntity(col.getColName, col.getColType)
      fields.add(structField)
    })
    ddl.setFielddescription(fields)
    ddl.writeXml(ddlFileName, fs)
  }

  /**
   * 移动缓存文件至输出目录
   *
   * @param cacheDir
   * @param outputDir
   */
  def moveDataToOutput(cacheDir: String, outputDir: String): Unit = {
    HdfsUtils.moveFiles(cacheDir, outputDir, fs)
  }

  /**
   * 删除缓存数据
   *
   * @param cacheDir 缓存目录
   */
  def deleteCacheData(cacheDir: String): Unit = {
    HdfsUtils.deletePath(cacheDir, fs)
  }

  /**
   * 去除dataframe的空值，数字型替换为空值，其余替换为空字符串
   *
   * @param data 待处理的dataframe
   * @return 替换后的dataframe
   */
  def replaceNa(data: DataFrame): DataFrame = {
    var targetData = data
    val colNames = targetData.columns
    val fields = targetData.schema.fields
    for (i <- 0 until colNames.length) {
      val field = fields(i)
      val fieldType = field.dataType
      val colName = colNames(i)
      if (!fieldType.isInstanceOf[NumericType]) {
        targetData = targetData.withColumn(colName, targetData.col(colName).cast(StringType))
      }
    }
    targetData = targetData.na.fill(NA_FILL_NUMERIC)
    targetData = targetData.na.fill(NA_FILL_STRING)
    targetData
  }

  /**
   *
   * @param
   * @param fileName base路径加作业指定的文件名(文件名可能带有子路径) e.g: /aaa/bb.dat   bb.dat
   * @return void
   */
  def genDataStatusLocation(fileName: String): String = {
    //去除文件名
    var fileLocation: String = fileName.split("/").dropRight(1).mkString("/")
    //转换拼接时可能生成的 //
    fileLocation = fileLocation.replaceAll("///", "//").replaceAll("//", "/").stripSuffix("/")
    fileLocation
  }
}
