package com.kingsoft.dc.khaos.module.spark.source

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, MetaDataConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source._
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.{KhaosConstants, Logging}
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.ArrayBuffer

/**
 * Created by wuxiang on 2021/08/18.
 */
class HdfsSource extends SourceStrategy with Logging with Serializable {
  private var meta: MetaDataEntity = _
  private var sparkSession: SparkSession = _
  private var hdfsConf: HdfsConfig = _
  private var ignoreAbnormal: Boolean = _
  private var abnormalAccumulator: LongAccumulator = _

  def init(kc: KhaosContext, HdfsConfig: HdfsConfig) = {
    implicit val formats = DefaultFormats

    //鉴权
    val checkResult = MetaUtils.checkReadAuth(kc,
      HdfsConfig.db_name,
      HdfsConfig.table_name,
      HdfsConfig.extender.auth.clazz,
      compact(render(HdfsConfig.extender.auth.params)))

    //获取元数据
    meta = MetaUtils.getHdfsMeta(kc,
      HdfsConfig.db_name,
      HdfsConfig.table_name,
      HdfsConfig.extender.meta.clazz,
      compact(render(HdfsConfig.extender.meta.params)),
      this)

    this.sparkSession = kc.sparkSession
    abnormalAccumulator = kc._valuesChannel.getValues[LongAccumulator](KhaosConstants.KHAOS_ABNORMALACCUMULATOR)
    ignoreAbnormal = hdfsConf.advanced_options.ignore_abnormal.getOrElse(IgnoreAbnormal(on_off = Option(false))).on_off.getOrElse(false)

  }


  /** 数据抽取 */
  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {

    implicit val formats = DefaultFormats

    hdfsConf = parse(config, true).extract[HdfsConfig]
    init(kc, hdfsConf)
    val fileFormat = MetaUtils.getFileFormat(meta.getTableEntiy).trim.toLowerCase

    //用户指定文件名读取
    var inputPath = this.getHdfsInputPath(kc, hdfsConf.hdfs_sub_path.getOrElse(""))
    inputPath = inputPath.replaceAll("//", "/")
    log.info(s"=>>> inputPath=$inputPath")

    val delimiter = setDefaultDelimiter(meta.getHdfsFileDelimiter)
    logInfo("=>>> delimiter: " + delimiter)
    //取交换接口的所有字段名称和字段类型
    val columnInfoMetaList: java.util.List[DmTableColumn] = meta.getColumnEntiy
    val columnArr = new ArrayBuffer[(String, String)](columnInfoMetaList.size)
    val columnSizeArr = new ArrayBuffer[(String, String)](columnInfoMetaList.size)

    //生成(字段->数据类型)，(字段->数据长度)映射关系
    for (i <- 0 until columnInfoMetaList.size) {
      columnArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getColType)
      columnSizeArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getLength)
    }

    // Apply the schema to the RDD
    var schema: org.apache.spark.sql.types.StructType = null

    val estag = kc.conf.getBoolean("judge_essink_tag", false) //khaos-core模块JobScheduler类中定义
    val mpptag = kc.conf.getBoolean("judge_mppsink_tag", false) //khaos-core模块JobScheduler类中定义
    val phoenixtag = kc.conf.getBoolean("judge_phoenixsink_tag", false) //khaos-core模块JobScheduler类中定义

    //后备开关
    val phoenixswitch = kc.conf.getBoolean(CommonConstants.PHOENIX_DECIMAL_SWITCH, false)
    val gpswitch = kc.conf.getBoolean(CommonConstants.GREENPLUM_DECIMAL_SWITCH, false)
    val hashdataswitch = kc.conf.getBoolean(CommonConstants.HASHDATA_DECIMAL_SWITCH, false)

    if (estag) {
      schema = SparkJobHelper.dynamicBuildDFSchemaForES(columnArr)
    } else if (phoenixtag && phoenixswitch) {
      schema = SparkJobHelper.dynamicBuildDFSchema(columnArr, columnSizeArr)
    } else if (mpptag && gpswitch) {
      schema = SparkJobHelper.dynamicBuildDFSchema(columnArr, columnSizeArr)
    } else if (mpptag && hashdataswitch) {
      schema = SparkJobHelper.dynamicBuildDFSchema(columnArr, columnSizeArr)
    } else {
      schema = SparkJobHelper.dynamicBuildDFSchema(columnArr)
    }

    //根据支持的文件类型进行处理
    var rddData = sparkSession.sparkContext.emptyRDD[String]
    var orcDF = sparkSession.emptyDataFrame
    val characterSet = MetaUtils.getHdfsCharacterSet(meta.getTableEntiy) //获取文件字符集
    fileFormat match {
      case "csv" => {
        //        rddData = readTextFile(sparkSession, inputPath, skipHeader, delimiter)
        rddData = readCsvNotCheckNull(sparkSession, inputPath, delimiter, fileFormat, characterSet)
        //TODO 定长读取

        //        rddData = fixedRead(rddData, meta.getCosFileDelimiter, meta, hdfsConf)

        //TODO 字符替换
        val rowRDD = convert2Rows(schema, delimiter, rddData.filter(!_.trim.equals("")))
        if (StringUtils.isBlank(hdfsConf.filter)) {
          val hdfsFileDF = sparkSession.createDataFrame(rowRDD, schema)
          hdfsFileDF
        } else {
          val hdfsFileDF = sparkSession.createDataFrame(rowRDD, schema).filter(hdfsConf.filter)
          hdfsFileDF
        }

      }

      case "orc" => {
        orcDF = sparkSession.read.option("encoding", characterSet).orc(inputPath)

        if (StringUtils.isBlank(hdfsConf.filter)) {
          orcDF
        } else {
          orcDF.filter(hdfsConf.filter)
        }
      }
      case _ => throw new Exception("=>>> 暂时不支持读取该数据类型！")
    }

  }


  /**
   * 用于支持用户指定文件名方式加载
   *
   * @param kc
   * @return
   */
  def getHdfsInputPath(kc: KhaosContext, custFilePath: String): String = {
    logInfo("===>custFilePath:" + custFilePath)
    val finalPath = (meta.getHdfsInputPath + "/" + custFilePath)
    //          finalPath = MetaUtils.addFileExtension(meta.getTableEntiy,finalPath) //添加扩展名
    finalPath
  }


  /**
   * 读取文本类型数据
   * eg:"21202F2938212B3E22272626252E434D"
   *
   * @param sparkSession
   * @param inputPath
   * @return
   */
  def readCsvNotCheckNull(sparkSession: SparkSession, inputPath: String, delimiter: String, fileFormat: String, characterSet: String): RDD[String] = {
    logInfo(s"=>>>readText input filepath: $inputPath")
    logInfo("===>characterSet: " + characterSet)
    logInfo("===>delimiter: " + delimiter)

    val rddData = sparkSession.read
      .format("csv")
      //      .option("header", true) //跳过表头标志
      .option("delimiter", "\n") //使用csv方式读取默认会使用逗号切割数据,使用\n切分数据,便于使用csv方式跳过表头
      .option("encoding", characterSet) //指定字符集
      .csv(inputPath)
      .rdd

    if (rddData.isEmpty()) {
      logWarning(s"=>>> hdfs源文件数据内容为空!")
    }
    rddData.map(_.mkString)
  }

  /**
   * 数据转换及映射schema
   *
   * @param schema
   * @param delimiter
   * @param rdd
   * @return
   */
  def convert2Rows(schema: StructType, delimiter: String, rdd: RDD[String]): RDD[Row] = {
    val transDelimiter = getTransDelimiter(delimiter)

    //    val replaceInfo = hdfsConfig.advanced_options.replace_content.orNull
    //过滤垃圾数据
    var rows = filterRowData(rdd, schema, delimiter)
    //    //检查替换功能是否开启并完成字符替换
    //    rows = checkAndReplaceCsvData(rows, delimiter, hdfsConfig)
    //    //对数据进行预转换，检查数据内容正确性
    rows = rows.filter(line => {
      val lineArr: Array[String] = line.split(transDelimiter, -1)
      checkDataConvertStatus(lineArr, schema, delimiter)
    })

    //切分并解析数据
    val finalRdd = rdd.map(line => {
      log.info("line数据显示==>" + line)
      val arrayList = new ArrayBuffer[String]()
      //此处解决csv数据中包含有切割符号的问题
      var lineArr = line.split(delimiter + "(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)", -1)
      lineArr
    })

    DataTypeConvertUtils.rdd2Rows(schema, finalRdd, delimiter)
  }


  def getTransDelimiter(delimiter: String): String = {
    var trans_delimiter = ""
    for (i <- delimiter) {
      trans_delimiter += "\\" + i //转义
    }
    trans_delimiter
  }

  /**
   * 指定分默认分隔符
   *
   * @param delimiter
   * @return
   */
  def setDefaultDelimiter(delimiter: String) = {
    if (StringUtils.isBlank(delimiter)) {
      MetaDataConstants.HDFS_DATA_SEPARATOR
    } else {
      delimiter
    }
  }


  /**
   * 过滤不符合条件的数据
   *
   * @param rdd
   * @param schema
   * @param delimiter
   * @return
   */
  def filterRowData(rdd: RDD[String], schema: StructType, delimiter: String) = {
    val transDelimiter = getTransDelimiter(delimiter)
    //过滤垃圾数据
    rdd.filter(line => { //检查字段数
      val lineArr: Array[String] = line.split(transDelimiter, -1)
      checkSchemaLength(lineArr, schema, delimiter)
    })
  }

  /**
   * 检查schema长度
   *
   * @param lineArr
   * @param schema
   * @param delimiter
   * @return
   */
  def checkSchemaLength(lineArr: Array[String],
                        schema: StructType,
                        delimiter: String): Boolean = {
    var status = true
    var count = 0
    if (lineArr.size < schema.size) { //字段数与预定义不相等
      if (ignoreAbnormal) {
        if (count < 10) {
          logWarning(s"数据映射失败！实际字段数=${lineArr.size} 期望字段数=${schema.size} 分隔符=${delimiter} 数据行=${lineArr.mkString}")
          count = count + 1
        }
        abnormalAccumulator.add(1)
        status = false
      } else {
        throw new Exception(s"数据映射失败！实际字段数=${lineArr.size} 期望字段数=${schema.size} 分隔符=${delimiter} 数据行=${lineArr.mkString}")
      }
    }
    status
  }

  /**
   * 返回数据转换结果
   *
   * @param schema
   * @param lineArr
   * @param delimiter
   * @return
   */
  def checkDataConvertStatus(lineArr: Array[String],
                             schema: StructType,
                             delimiter: String): Boolean = {
    val status = DataTypeConvertUtils.checkDataLineAbnormal(schema, lineArr, delimiter)
    var count = 0
    if (!status) {
      if (ignoreAbnormal) {
        if (!status) {
          if (count < 10) {
            logWarning(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
            count = count + 1
          }
          abnormalAccumulator.add(1)
        }
      } else {
        throw new Exception(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
      }
    }
    status
  }

  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val info = parse(config, true).extract[HdfsConfig]
    val extrFields = info.extract_fields
    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}
