package com.kingsoft.dc.khaos.module.spark.sink

import java.util
import java.util.{Locale, TimeZone}

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{DataTypeEnum, MetaDataConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.Ks3Config
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.ExchangeFileUtils
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.control.ControlFile
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.ddl.StructFileDDL
import com.kingsoft.dc.khaos.module.spark.model.ks3.Ks3DataStatusInfo
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, StructFieldEntity}
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ContentSummary, FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.functions.{col, lit}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Row, SaveMode, SparkSession}
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random

/**
  * Created by haorenhui on 2019/12/05.
  */
class Ks3Sink extends SinkStrategy with Logging {

  final val SEPARATOR = MetaDataConstants.KS3_DATA_SEPARATOR
  private var sparkSession: SparkSession = _
  private var submitTime: String = _
  private var ks3Fs: FileSystem = _
  private var meta: MetaDataEntity = _
  private var columnEntiy: util.List[DmTableColumn] = _
  private var isAddHeader: Boolean = false
  private var writeOption: String = ""
  private var ks3DataStatusInfo: Ks3DataStatusInfo = _
  private var custFileName: String = ""
  private var ks3Config: Ks3Config = _

  //ks3 文件对象内容字段信息
  val NA_FILL_NUMERIC = 0
  val NA_FILL_STRING: String = MetaDataConstants.NULL
  //job运行时设置的临时目录
  val JOB_TMP_DIR = "/di/tmp/job_"
  //数据缓存工作目录
  val WORKING_DIR = "/.working/di"


  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {
    //解析json
    implicit val formats: DefaultFormats = DefaultFormats
    ks3Config = parse(compact(render(config)), true: Boolean).extract[Ks3Config]
    init(kc, ks3Config)
    doWrite(kc, dataFrame)
    this
  }

  def init(kc: KhaosContext, ks3Config: Ks3Config): Unit = {

    //鉴权
    val checkResult: Boolean = MetaUtils.checkWriteAuth(kc,
      ks3Config.db_name,
      ks3Config.table_name,
      ks3Config.extender.auth.clazz,
      compact(render(ks3Config.extender.auth.params)))

    if (!checkResult) {
      log.error(s"读取ks3 ${ks3Config.db_name}.${ks3Config.table_name}, 权限验证未通过")
      throw new Exception(s"读取ks3 ${ks3Config.db_name}.${ks3Config.table_name}, 权限验证未通过")
    }

    //获取元数据
    meta = MetaUtils.getKs3Meta(kc,
      ks3Config.db_name,
      ks3Config.table_name,
      ks3Config.extender.meta.clazz,
      compact(render(ks3Config.extender.meta.params)),
      this)

    columnEntiy = meta.getColumnEntiy
    this.sparkSession = kc.sparkSession
    this.submitTime = kc.conf.getString(SchedulerConstants.SUBMIT_TIME, "")
    // 是否添加表头
    isAddHeader = if ("true".equals(ks3Config.add_head)) true else false
    // 追加/覆盖
    writeOption = ks3Config.write_option

    initKs3FileSystem()
  }

  /**
    * 初始化ks3文件系统
    */
  def initKs3FileSystem(): Unit = {
    ks3Fs = FileSystem.get(addKs3FileSystem())
  }

  /**
    * 添加ks3文件系统配置
    */
  def addKs3FileSystem(): org.apache.hadoop.conf.Configuration = {
    val hadoopConf: Configuration = HadoopKs3Utils.appendKs3HadoopConfigs(sparkSession.sparkContext.hadoopConfiguration, meta.getKs3AccessConfig)
    sparkSession.sparkContext.hadoopConfiguration.addResource(hadoopConf)
    sparkSession.sparkContext.hadoopConfiguration
  }

  /** 写入数据 */
  def doWrite(kc: KhaosContext, dataFrame: DataFrame): Unit = {

    val jobId: String = kc.conf.getString(SchedulerConstants.JOB_INSTANCE_ID)
    val objectKeySuffix: String = MetaUtils.getKs3DynamicsPath(kc)

    val workDir = s"$WORKING_DIR/$jobId"
    val outputDir: String = meta.getKs3OutputPath
    // 用户指定存储的文件名,若不指定默认使用objectKeySuffix
    custFileName = ks3Config.file_name.getOrElse("")

    //处理映射关系 默认值等
    val tempDf: DataFrame = DataframeUtils.setDefaultValue(ks3Config.extract_fields, meta.getColumnEntiy, dataFrame)
    var df: DataFrame = DataframeUtils.convertDataType(ks3Config.extract_fields, tempDf)

    //df字段重新排序 setDefaultValue可能会打乱字段顺序
    df = DataframeUtils.sortDataCol(df, columnEntiy)

    logInfo("开始缓存数据至临时目录...")
    val cacheSuccess: Boolean = cacheData(df, objectKeySuffix, workDir)
    if (cacheSuccess) {
      logInfo("缓存数据移动至最终存储目录...")
      moveDataToOutput(workDir, outputDir)
      deleteCacheData(workDir)

      //上报数据状态
      DataframeUtils.reportDataStatusKs3(kc,
        ks3DataStatusInfo,
        ks3Config.db_name, ks3Config.table_name,
        ks3Config.extender.meta.clazz,
        compact(render(ks3Config.extender.meta.params)))
    }
  }

  /**
    * 覆盖文件
    *
    * @param tmpCacheDatFile 临时目录/临时文件
    * @param outputFile      输出文件名
    * @return
    */
  def overWriteFile(tmpCacheDatFile: String, outputFile: String): String = {
    val outputDatFile: String = outputFile
    val ctrlFileName: String = ExchangeFileUtils.genControlFileName(outputFile)
    val ddlFileName: String = ExchangeFileUtils.genDDLFileName(outputFile)

    HdfsUtils.deletePath(outputDatFile, ks3Fs)
    HdfsUtils.deletePath(ctrlFileName, ks3Fs)
    HdfsUtils.deletePath(ddlFileName, ks3Fs)
    HdfsUtils.renameFile(tmpCacheDatFile, outputDatFile, ks3Fs)
    outputDatFile
  }

  /**
    * 文件内容追加
    *
    * @param fileFormat  文件格式:csv/json
    * @param oldFilePath 已存在文件的目录
    * @param data        数据集
    * @return
    */
  def doAppend(fileFormat: String, oldFilePath: String, data: DataFrame): DataFrame = {
    var targetData: DataFrame = data
    var oldDataFrame: DataFrame = readOldFile(fileFormat, oldFilePath, targetData)

    // 临时字段 用来排序 保证旧数据在前,用完后删除
    var sortFieldName: String = "row_number_over" + Random.nextInt(10)
    while (targetData.columns.contains(sortFieldName)) {
      sortFieldName = sortFieldName + Random.nextInt(10)
    }

    if (null != oldDataFrame) {
      oldDataFrame = oldDataFrame.withColumn(sortFieldName, lit(1).cast(StringType))
      targetData = targetData.withColumn(sortFieldName, lit(null).cast(StringType))
      targetData = oldDataFrame.unionByName(targetData).orderBy(col(sortFieldName).desc)
    }
    targetData = targetData.drop(sortFieldName)
    targetData
  }

  /**
    * 缓存临时数据
    *
    * @param data              待缓存的数据
    * @param objectKeyFileName object最后层级的文件名称，比如:ks3://${bucket}/dir1/{objectKeySuffix}
    * @param workDir           缓存临时目录
    */
  def cacheData(data: DataFrame, objectKeyFileName: String, workDir: String): Boolean = {
    import org.apache.spark.sql.functions._
    var successFlag = false
    var fileSize: String = ""
    var fileName: String = ""
    var outputFile = ""
    var oldFilePath = ""
    //数据上报对象
    ks3DataStatusInfo = new Ks3DataStatusInfo
    try {
      var delimiter: String = meta.getKs3FileDelimiter
      logInfo(s"=>>> sink delimiter: $delimiter")
      val fileFormat: String = MetaUtils.getKs3FileFormat(meta.getTableEntiy).trim.toLowerCase

      var targetData: DataFrame = data
      //用户是否指定文件名
      if (StringUtils.isNotBlank(custFileName)) {
        outputFile = getOutputObject(custFileName)
        oldFilePath = outputFile
      } else {
        outputFile = getOutputObject(objectKeyFileName)
        oldFilePath = ExchangeFileUtils.genDatFileName(outputFile)
      }

      if ("json".equals(fileFormat)) {
        // 如果是追加 则读取旧文件
        if (writeOption.toLowerCase.equals(MetaDataConstants.APPEND)) {
          targetData = doAppend(fileFormat, oldFilePath, targetData)
        }
        targetData.coalesce(1).write.format("json").mode(SaveMode.Overwrite).save(s"$workDir")
      } else {
        // 替换null 保证生成文件的分隔符数量正常
        targetData = replaceNa(targetData)
        //获取表头
        val csvFileHeader: String = getFileHeader(targetData, delimiter)
        //获取定长标识
        val isField: Boolean = MetaUtils.isfixedLength4Ks3(meta.getTableEntiy)
        val columns: Array[String] = targetData.columns
        val colArr: Array[Column] = columns.map((col_name: String) => col(col_name).cast(StringType))
        if (isField) {
          //定长
          logInfo(s"=>>> 定长导出文件 ...")
          var tempDF: DataFrame = targetData.select(columns.map((col_name: String) => col(col_name).cast(StringType)): _*)
          tempDF = DataframeUtils.UnLoading(tempDF, meta.getColumnEntiy, delimiter) //只有一个name="value"的列

          writeCSVFile(tempDF, fileFormat, oldFilePath, workDir, csvFileHeader)
        } else {
          //不定长
          logInfo(s"=>>> 不定长导出文件 ...")
          if (StringUtils.isBlank(delimiter)) {
            delimiter = MetaDataConstants.KS3_DATA_SEPARATOR
          }
          var tempDF: DataFrame = targetData.select(concat_ws(delimiter, colArr: _*).cast(StringType).as("value"))
          writeCSVFile(tempDF, fileFormat, oldFilePath, workDir, csvFileHeader)
        }
      }

      // 生成xml,ddl文件
      val fileList: util.List[String] = HdfsUtils.listFiles(workDir, "_", ks3Fs)
      for (index <- 0 until fileList.size()) {
        val tmpCacheDatFile: String = workDir + "/" + fileList.get(index)
        //dat文件
        var outputDatFile = ""
        if (StringUtils.isNotBlank(custFileName)) {
          outputDatFile = outputFile
        } else if (fileFormat.equals("json")) {
          outputDatFile = ExchangeFileUtils.genJsonFileName(outputFile)
        } else {
          outputDatFile = ExchangeFileUtils.genDatFileName(outputFile)
        }

        //删除已有的xml,ddl文件
        fileName = overWriteFile(tmpCacheDatFile, outputDatFile)

        var outputFile_bak: String = outputFile
        if (outputFile.endsWith(".csv")) {
          outputFile_bak = outputFile_bak.stripSuffix(".csv")
        } else if (outputFile.endsWith(".json")) {
          outputFile_bak = outputFile_bak.stripSuffix(".json")
        }
        //控制文件
        val ctrlFileName: String = ExchangeFileUtils.genControlFileName(outputFile_bak)
        fileSize = generateCtrlFile(outputDatFile, ctrlFileName)
        //ddl文件
        val ddlFileName: String = ExchangeFileUtils.genDDLFileName(outputFile_bak)
        generateDDLFile(outputDatFile, ddlFileName)

      }
      successFlag = true
      logInfo("缓存数据成功")
    } catch {
      case e: Throwable =>
        e.printStackTrace()
        logError("缓存数据失败,错误信息:" + e.getMessage + ",错误引发原因:" + e.getCause, e)
        throw new Exception("缓存数据失败,错误信息:" + e.getMessage + ",错误引发原因:" + e.getCause, e)

    }
    //20200901 上报base路径+同步页面配置的子目录
    val fileLocation: String = genDataStatusLocation(fileName)
    ks3DataStatusInfo.setFileLocation(fileLocation)
    ks3DataStatusInfo.setFileName(fileName.split("/").last)
    ks3DataStatusInfo.setFileSize(fileSize)
    successFlag
  }

  /** append模式 如果存在旧文件 读旧文件内容 */
  def readOldFile(fileFormat: String, oldFilePath: String, data: DataFrame): DataFrame = {
    var oldData: DataFrame = null

    if (HdfsUtils.isExists(oldFilePath, ks3Fs)) {
      //取交换接口的所有字段名称和字段类型
      val columnArr = new ArrayBuffer[(String, String)]()
      val columnInfoMetaList: java.util.List[DmTableColumn] = meta.getColumnEntiy
      for (i <- 0 until columnInfoMetaList.size) {
        columnArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getColType)
      }
      var schema: StructType = SparkJobHelper.dynamicBuildDFSchema(columnArr)
      fileFormat match {
        case "json" =>
          oldData = sparkSession.read.format("json").schema(schema).load(oldFilePath)
          //重新组织字段顺序
          oldData = DataframeUtils.sortDataCol(oldData, columnEntiy)
        case "csv" =>
          schema = StructType(Array(StructField("value", StringType, nullable = true)))
          oldData = sparkSession.read.format("csv").schema(schema)
            .option("header", value = false) //跳过表头标志
            .option("delimiter", "\n") //默认使用csv方式会使用逗号切割数据,使用\n的目的是不真正的切分数据
            .csv(oldFilePath)
        case _ => throw new Exception(s"=>>> 暂时不支持读取该数据类型 =>$fileFormat")
      }
    }

    oldData
  }


  /**
    * 去除dataframe的空值，数字型替换为空值，其余替换为空字符串
    *
    * @param data 待处理的dataframe
    * @return 替换后的dataframe
    */
  def replaceNa(data: DataFrame): DataFrame = {
    var targetData: DataFrame = data
    val colNames: Array[String] = targetData.columns
    val fields: Array[StructField] = targetData.schema.fields
    for (i <- colNames.indices) {
      val field = fields(i)
      val fieldType: DataType = field.dataType
      val colName = colNames(i)
      if (!fieldType.isInstanceOf[NumericType]) {
        targetData = targetData.withColumn(colName, targetData.col(colName).cast(StringType))
      }
    }
    targetData = targetData.na.fill(NA_FILL_NUMERIC)
    targetData = targetData.na.fill(NA_FILL_STRING)
    targetData
  }

  /**
    * 解决路径中包含“//”问题
    *
    * @param objectKeyFileName 文件名
    * @return
    */
  def getOutputObject(objectKeyFileName: String): String = {
    var objectName: String = meta.getKs3OutputPath + "/" + objectKeyFileName
    objectName = HadoopKs3Utils.keyToPath(objectName)
    objectName.replaceAll("//", "/")
  }

  /**
    * 生成控制文件
    *
    * @param datFileName  数据文件名
    * @param ctrlFileName 控制文件名
    */
  def generateCtrlFile(datFileName: String, ctrlFileName: String): String = {
    val recordCount: Long = sparkSession.read.textFile(datFileName).count()
    val contentSummary: ContentSummary = ks3Fs.getContentSummary(new Path(datFileName))
    val fileSize: Long = contentSummary.getLength
    val cf: ControlFile = new ControlFile(DataTypeEnum.TXT.toString,
      datFileName,
      String.valueOf(recordCount), //
      String.valueOf(fileSize),
      submitTime,
      DateUtils.getNowTime())
    cf.writeXml(ctrlFileName, ks3Fs)
    String.valueOf(fileSize)
  }

  /**
    * 生成数据文件对应的控制文件
    *
    * @param datFileName 数据文件名
    * @param ddlFileName ddl文件名
    */
  def generateDDLFile(datFileName: String, ddlFileName: String): Unit = {
    val ddl = new StructFileDDL()
    ddl.setFileName(datFileName)
    ddl.setFieldCount(String.valueOf(meta.getColumnEntiy.size))
    ddl.setFileVersion("1.0")
    ddl.setIsfixedlength("1") //
    val fields = new util.ArrayList[StructFieldEntity]()
    import scala.collection.JavaConverters._
    val columnList: util.List[DmTableColumn] = meta.getColumnEntiy
    columnList.asScala.foreach(col => {
      val structField = new StructFieldEntity(col.getColName, col.getColType)
      fields.add(structField)
    })
    ddl.setFielddescription(fields)
    ddl.writeXml(ddlFileName, ks3Fs) //
  }

  /**
    * 移动缓存文件至输出目录
    *
    * @param cacheDir  缓存临时目录
    * @param outputDir 最终存储目录
    */
  def moveDataToOutput(cacheDir: String, outputDir: String): Unit = {
    HdfsUtils.moveFiles(cacheDir, outputDir, ks3Fs)
  }

  /**
    * 删除缓存数据
    *
    * @param cacheDir 缓存目录
    */
  def deleteCacheData(cacheDir: String): Unit = {
    HdfsUtils.deletePath(cacheDir, ks3Fs)
  }

  def writeCSVFile(data: DataFrame, fileFormat: String, oldFilePath: String, workDir: String, csvFileHeader: String = ""): Unit = {
    var writeData: DataFrame = data
    if (writeOption.toLowerCase.equals(MetaDataConstants.APPEND)) {
      // 如果是追加,则读取旧文件,追加模式不判断表头
      writeData = doAppend(fileFormat, oldFilePath, writeData)
      writeCSVFile(writeData, false: Boolean, workDir)
    } else {
      if (isAddHeader) {
        writeData = writeData.withColumnRenamed("value", csvFileHeader)
        writeCSVFile(writeData, true: Boolean, workDir)
      } else {
        writeCSVFile(writeData, false: Boolean, workDir)
      }
    }
  }

  def writeCSVFile(data: DataFrame, header: Boolean, workDir: String): Unit = {
    data.coalesce(1).write
      .option("timestampFormat", "yyyy/MM/dd HH:mm:ss ZZ")
      .option("delimiter", "\n")
      .option("quote", "") //去掉引号转义
      .option("header", header)
      .option("ignoreLeadingWhiteSpace", false: Boolean)
      .option("ignoreTrailingWhiteSpace", false: Boolean)
      .mode(SaveMode.Overwrite).csv(s"$workDir")
  }

  /**
    *
    * @param fileName 作业指定的文件名(文件名可能带有子路径) e.g: /aaa/bb.dat   bb.dat
    * @return void
    */
  def genDataStatusLocation(fileName: String): String = {
    //拼接base路径和文件名
    val metaPathWithFileName: String = "/" + fileName
    //去除文件名
    var fileLocation: String = metaPathWithFileName.split("/").dropRight(1).mkString("/")
    //转换拼接时可能生成的 //
    fileLocation = fileLocation.replaceAll("///", "//").replaceAll("//", "/").stripSuffix("/")
    fileLocation
  }

  def getFileHeader(targetData: DataFrame, delimiter: String): String = {
    val colNames: Array[String] = targetData.columns
    val csvFileHeader: StringBuilder = new StringBuilder(colNames(0))
    for (i <- 1 until colNames.length) {
      csvFileHeader.append(delimiter + colNames(i))
    }
    csvFileHeader.toString()
  }
}
