package com.kingsoft.dc.khaos.module.spark.source

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, MetaDataConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{CosReplaceContent, CosReplaceContentInfo, CosSourceConfig, HiveSourceConfig, IgnoreAbnormal}
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.{KhaosConstants, Logging}
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.trim
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.util.matching.Regex

/**
 *
 * create by goosoog 2019/6/20.
 *
 */
class CosSource extends SourceStrategy with Logging with Serializable {

  final val SEPARATOR = MetaDataConstants.DATA_SEPARATOR
  private var sparkSession: SparkSession = _
  private var meta: MetaDataEntity = _
  private var ignoreAbnormal: Boolean = _
  private var abnormalAccumulator: LongAccumulator = _

  val NA_FILL_NUMERIC = 0
  val NA_FILL_STRING = MetaDataConstants.NULL

  def init(kc: KhaosContext, cosSourceConf: CosSourceConfig): Unit = {

    //鉴权
    val checkResult = MetaUtils.checkReadAuth(kc,
      cosSourceConf.db_name,
      cosSourceConf.table_name,
      cosSourceConf.extender.auth.clazz,
      compact(render(cosSourceConf.extender.auth.params)))

    //获取元数据
    meta = MetaUtils.getCosMeta(kc,
      cosSourceConf.db_name,
      cosSourceConf.table_name,
      cosSourceConf.extender.meta.clazz,
      compact(render(cosSourceConf.extender.meta.params)),
      this)

    this.sparkSession = kc.sparkSession
    initCosFileSystem

    //20200803新增
    abnormalAccumulator = kc._valuesChannel.getValues[LongAccumulator](KhaosConstants.KHAOS_ABNORMALACCUMULATOR)
    ignoreAbnormal = cosSourceConf.advanced_options.ignore_abnormal.getOrElse(IgnoreAbnormal(on_off = Option(false))).on_off.getOrElse(false)
  }

  /**
   * 初始化Cos文件系统
   */
  def initCosFileSystem(): Unit = {
    addCosFileSystem()
  }

  /**
   * 指定分默认分隔符
   *
   * @param delimiter
   * @return
   */
  def setDefaultDelimiter(delimiter: String) = {
    if (StringUtils.isBlank(delimiter)) {
      MetaDataConstants.DATA_SEPARATOR
    } else {
      delimiter
    }
  }

  /**
   * RDD数据转换Row
   *
   * @param schema
   * @param rdd
   */
  def convert2Rows(schema: StructType, delimiter: String, rdd: RDD[String]) = {
    val trans_delimiter = getTransDelimiter(delimiter)
    val rows = rdd.map(line => {
      val lineArr = line.split(trans_delimiter, -1)
      if (lineArr.size < schema.size) {
        logInfo(s"=>>> line=${lineArr.mkString}")
        throw new Exception(s"数据映射失败！实际字段数=${lineArr.size} 期望字段数=${schema.size} 分隔符=${delimiter} 数据行=${line}")
      }
      lineArr
    })
    DataTypeConvertUtils.rdd2Rows(schema, rows, delimiter)
  }

  /**
   * RDD数据转换Row（带容错）
   *
   * @param schema
   * @param rdd
   */
  def convert2RowsWithIgnoreAbnormal(schema: StructType, delimiter: String, rdd: RDD[String]): RDD[Row] = {
    val trans_delimiter = getTransDelimiter(delimiter)

    //过滤垃圾数据
    val rows = rdd.filter(line => {
      val lineArr: Array[String] = line.split(trans_delimiter, -1)
      var count = 0
      if (lineArr.size < schema.size) { //字段数与预定义不相等
        if (ignoreAbnormal) {
          if (count < 10) {
            logWarning(s"数据映射失败！实际字段数=${lineArr.size} 期望字段数=${schema.size} 分隔符=${delimiter} 数据行=${line}")
            count = count + 1
          }
          abnormalAccumulator.add(1)
          false
        } else {
          throw new Exception(s"数据映射失败！实际字段数=${lineArr.size} 期望字段数=${schema.size} 分隔符=${delimiter} 数据行=${line}")
        }
      } else {
        val status = DataTypeConvertUtils.checkDataLineAbnormal(schema, lineArr, delimiter)
        if (!status) {
          if (ignoreAbnormal) {
            if (!status) {
              if (count < 10) {
                logWarning(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
                count = count + 1
              }
              abnormalAccumulator.add(1)
            }
          } else {
            throw new Exception(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
          }
        }
        status
      }
    }).map(line => {
      val lineArr = line.split(trans_delimiter, -1)
      lineArr
    })
    DataTypeConvertUtils.rdd2Rows(schema, rows, delimiter)
  }

  /**
   * 过滤不符合条件的数据
   *
   * @param rdd
   * @param schema
   * @param delimiter
   * @return
   */
  def filterRowData(rdd: RDD[String], schema: StructType, delimiter: String) = {
    val transDelimiter = getTransDelimiter(delimiter)
    //过滤垃圾数据
    rdd.filter(line => { //检查字段数
      val lineArr: Array[String] = line.split(transDelimiter, -1)
      checkSchemaLength(lineArr, schema, delimiter)
    })
  }

  /**
   * 文本文件内容替换
   *
   * @param rdd
   * @param delimiter
   * @param cosSourceConf
   * @return
   */
  def checkAndReplaceCsvData(rdd: RDD[String], delimiter: String, cosSourceConf: CosSourceConfig) = {
    val transDelimiter = getTransDelimiter(delimiter)
    var rows = rdd
    val replaceInfo = cosSourceConf.advanced_options.replace_content.orNull
    if (replaceInfo != null) {
      if (replaceInfo.on_off.get) { //替换开关打开
        rows = rows.map(line => {
          //行替换
          /////暂不实现/////
          val fieldArr: Array[String] = line.split(transDelimiter, -1)
          //字段替换
          val replaceDetailed = replaceInfo.detailed.get
          //          println(replaceFieldsContent(fieldArr, replaceDetailed, delimiter)+"===>>>>>")
          FieldsContentUtils.replaceFieldsContent(fieldArr, replaceDetailed, delimiter)
        })
      }
    }
    rows
  }

  /**
   * 文件内容替换
   *
   * @param jsonDf
   * @param columnArr
   * @param replaceInfo
   * @return
   */
  def checkAndReplaceJsonData(jsonDf: DataFrame,
                              columnArr: ArrayBuffer[(String, String)],
                              replaceInfo: CosReplaceContent) = {
    val defaultDelimiter = "|@@|"
    val transDelimiter = getTransDelimiter(defaultDelimiter)
    //    var rows = jsonDf.rdd

    logInfo(s"==>json df schema:${jsonDf.schema.fields.mkString("[", ",", "]")}")
    logInfo(s"==>json dm schema:${columnArr.map(_._1).mkString("[", ",", "]")}")

    val rddStr = jsonDf.rdd.map(row => {
      val fieldArr = new ArrayBuffer[String](columnArr.size)
      for (i <- 0 until columnArr.size) {
        fieldArr.append(row.get(i).toString)
      }
      //字段替换
      val replaceDetailed = replaceInfo.detailed.get
      FieldsContentUtils.replaceFieldsContent(fieldArr.toArray, replaceDetailed, defaultDelimiter)
    })
    //切分并解析数据
    val finalRdd = rddStr.map(line => {
      val lineArr = line.split(transDelimiter, -1)
      lineArr
    })
    DataTypeConvertUtils.rdd2Rows(jsonDf.schema, finalRdd, defaultDelimiter)
  }

  /**
   * 转换json数据
   *
   * @param jsonDf
   * @param columnArr
   * @param cosSourceConf
   */
  def convertJsonRows(jsonDf: DataFrame, columnArr: ArrayBuffer[(String, String)], cosSourceConf: CosSourceConfig) = {
    val replaceInfo = cosSourceConf.advanced_options.replace_content.orNull
    if (replaceInfo != null) {
      if (replaceInfo.on_off.get) { //替换开关打开
        val rdd = checkAndReplaceJsonData(jsonDf, columnArr, replaceInfo)
        sparkSession.createDataFrame(rdd, jsonDf.schema)
      } else {
        jsonDf
      }
    } else {
      jsonDf
    }
  }

  /**
   * 数据转换及映射schema
   *
   * @param schema
   * @param delimiter
   * @param rdd
   * @return
   */
  def convert2Rows(schema: StructType, delimiter: String, rdd: RDD[String], cosSourceConf: CosSourceConfig): RDD[Row] = {
    val transDelimiter = getTransDelimiter(delimiter)

    //    val replaceInfo = cosSourceConf.advanced_options.replace_content.orNull
    //过滤垃圾数据
    var rows = filterRowData(rdd, schema, delimiter)
    //检查替换功能是否开启并完成字符替换
    rows = checkAndReplaceCsvData(rows, delimiter, cosSourceConf)
    //对数据进行预转换，检查数据内容正确性
    rows = rows.filter(line => {
      val lineArr: Array[String] = line.split(transDelimiter, -1)
      checkDataConvertStatus(lineArr, schema, delimiter)
    })

    //切分并解析数据
    val finalRdd = rows.map(line => {
      val lineArr = line.split(transDelimiter, -1)
      lineArr
    })
    DataTypeConvertUtils.rdd2Rows(schema, finalRdd, delimiter)
  }

  /**
   * 检查schema长度
   *
   * @param lineArr
   * @param schema
   * @param delimiter
   * @return
   */
  def checkSchemaLength(lineArr: Array[String],
                        schema: StructType,
                        delimiter: String): Boolean = {
    var status = true
    var count = 0
    if (lineArr.size < schema.size) { //字段数与预定义不相等
      if (ignoreAbnormal) {
        if (count < 10) {
          logWarning(s"数据映射失败！实际字段数=${lineArr.size} 期望字段数=${schema.size} 分隔符=${delimiter} 数据行=${lineArr.mkString}")
          count = count + 1
        }
        abnormalAccumulator.add(1)
        status = false
      } else {
        throw new Exception(s"数据映射失败！实际字段数=${lineArr.size} 期望字段数=${schema.size} 分隔符=${delimiter} 数据行=${lineArr.mkString}")
      }
    }
    status
  }

  /**
   * 返回数据转换结果
   *
   * @param schema
   * @param lineArr
   * @param delimiter
   * @return
   */
  def checkDataConvertStatus(lineArr: Array[String],
                             schema: StructType,
                             delimiter: String): Boolean = {
    val status = DataTypeConvertUtils.checkDataLineAbnormal(schema, lineArr, delimiter)
    var count = 0
    if (!status) {
      if (ignoreAbnormal) {
        if (!status) {
          if (count < 10) {
            logWarning(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
            count = count + 1
          }
          abnormalAccumulator.add(1)
        }
      } else {
        throw new Exception(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
      }
    }
    status
  }


  /**
   * 定长读取逻辑：
   * ①有分隔符，无论定长是否开启优先以分隔符切分
   * ②无分隔符，则检查定长是否开启。定长开启，不使用默认分隔符；定长未开启，使用默认分隔符。
   *
   * @param cosRDD
   * @return
   */
  def fixedRead(cosRDD: RDD[String],
                delimiter: String,
                meta: MetaDataEntity): RDD[String] = {
    val filteredRDD = cosRDD.filter(!_.trim.equals("")) //过滤空行
    val isfixed = MetaUtils.isfixedLength(meta.getTableEntiy)
    //    val _delimiter = setDefaultDelimiter(delimiter)
    if (isfixed) { //开启定长
      logInfo("=>>>定长读取......")
      if (StringUtils.isBlank(delimiter)) {
        //        DataframeUtils.Loading(filteredRDD, meta.getColumnEntiy).map(_.mkString(delimiter)) //定长读取后添加默认分隔符
        DataframeUtils.Loading(filteredRDD, meta.getColumnEntiy).map(_.mkString(MetaDataConstants.DATA_SEPARATOR)) //定长读取后添加默认分隔符
      } else { //使用分隔符读取
        filteredRDD
      }
    } else { //未开启定长
      logInfo("=>>>不定长读取......")
      filteredRDD
    }
  }

  /**
   * 定长读取逻辑：
   * ①有分隔符，无论定长是否开启优先以分隔符切分
   * ②无分隔符，则检查定长是否开启。定长开启，不使用默认分隔符；定长未开启，使用默认分隔符。
   *
   * @param cosRDD
   * @return
   */
  def fixedRead(cosRDD: RDD[String],
                delimiter: String,
                meta: MetaDataEntity,
                cosSourceConf: CosSourceConfig): RDD[String] = {
    val filteredRDD = cosRDD.filter(!_.trim.equals("")) //过滤空行
    val isfixed = MetaUtils.isfixedLength(meta.getTableEntiy)

    if (isfixed) { //开启定长
      logInfo("=>>>定长读取......")
      if (StringUtils.isBlank(delimiter)) {
        //判断是否需要添加定长增量
        /*        val switch = cosSourceConf.advanced_options.fixed_read_increment.get.on_off.get
                val increment = cosSourceConf.advanced_options.fixed_read_increment.get.value.get.toInt
                if (switch) {
                  DataframeUtils.Loading(filteredRDD, meta.getColumnEntiy, increment).map(_.mkString(MetaDataConstants.DATA_SEPARATOR)) //定长读取后添加默认分隔符
                } else {
                  DataframeUtils.Loading(filteredRDD, meta.getColumnEntiy).map(_.mkString(MetaDataConstants.DATA_SEPARATOR)) //定长读取后添加默认分隔符
                }*/
        fixedLoading(filteredRDD, meta.getColumnEntiy, cosSourceConf).map(_.mkString(MetaDataConstants.DATA_SEPARATOR))
      } else { //使用分隔符读取
        filteredRDD
      }
    } else { //未开启定长
      logInfo("=>>>不定长读取......")
      filteredRDD
    }
  }

  /** 定长加载 */
  def fixedLoading(cosRDD: RDD[String],
                   columns: java.util.List[DmTableColumn],
                   cosSourceConf: CosSourceConfig): RDD[Array[String]] = {
    import scala.collection.JavaConverters._
    //判断是否需要添加定长增量
    val switch = cosSourceConf.advanced_options.fixed_read_increment.get.on_off.get
    var increment = 0
    if (switch) {
      increment = cosSourceConf.advanced_options.fixed_read_increment.get.value.get.toInt
    }

    var arrRDD: RDD[Array[String]] = sparkSession.sparkContext.emptyRDD[Array[String]]
    var tempRDD: RDD[String] = cosRDD

    // 存储每个字段长度的集合
    val colSizeList = ListBuffer[String]()
    columns.asScala.map(column => {
      var length: String = column.getLength
      //如果包含逗号, 则字段类型为decimal(16,2) 取第一个长度(16)加小数点(1)的长度为最终长度
      if (length.contains(",")) {
        val lengthArr: Array[String] = length.split(",")
        length = (lengthArr(0).toInt + 1).toString
      }
      colSizeList += length
    })
    try {
      if (ignoreAbnormal) { //开启容错，过滤有问题数据
        tempRDD = tempRDD.filter(line => {
          checkFixedDataLine(line, colSizeList, columns.size(), increment)
        })
      }
      arrRDD = tempRDD.map(line => {
        substringFixedLine(line, colSizeList, columns.size(), increment)
      })
    } catch {
      case e: Exception => {
        throw new Exception(s"定长加载失败,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}")
      }
    }
    arrRDD
  }

  /**
   * 定长读取数据行截取
   *
   * @param line
   * @param colSizeList
   * @param colSize
   * @param increment
   * @return
   */
  def substringFixedLine(line: String,
                         colSizeList: ListBuffer[String],
                         colSize: Int,
                         increment: Int): Array[String] = {
    //实际所需的长度
    val expectLength = colSizeList.map(i => {
      i.toInt + increment
    }).sum
    if (line.size < expectLength) {
      logError(s"==>定长读取解析length失败！actual: ${line.size} expect: $expectLength line=$line")
    }
    //存储定长后的字段值
    val colArr = new Array[String](colSize)
    var index: Int = 0
    for (i <- colSizeList.indices) {
      val length: Int = colSizeList(i).toInt + increment
      //取每个定长字段的值
      colArr.update(i, line.substring(index, length + index))
      index += length
    }
    colArr
  }

  /**
   * 检查截取定长行的解析结果
   *
   * @param line
   * @param colSizeList
   */
  def checkFixedDataLine(line: String,
                         colSizeList: ListBuffer[String],
                         colSize: Int,
                         increment: Int) = {
    var stats = false
    try {
      substringFixedLine(line, colSizeList, colSize, increment)
      stats = true
    } catch {
      case _ => {
        stats = false
      }
    }
    if (!stats) {
      var count = 0
      if (count < 10) {
        logWarning(s"=> 数据转换失败! 期望行长度分别为${colSizeList.mkString("[", ",", "]")}与实际不符！data line=${line} ")
        count = count + 1
      }
      abnormalAccumulator.add(1)
    }
    stats
  }

  /**
   * 读取文本类型数据
   * eg:"21202F2938212B3E22272626252E434D"
   *
   * @param sparkSession
   * @param inputPath
   * @return
   */
  def readTextFile(sparkSession: SparkSession, inputPath: String, skipHeader: Boolean, delimiter: String): RDD[String] = {
    logInfo(s"=>>> readText input filepath: $inputPath")
    val rddData = sparkSession.read
      .format("csv")
      .option("header", skipHeader) //跳过表头标志
      .option("delimiter", "\n") //使用csv方式读取默认会使用逗号切割数据,使用\n切分数据,便于使用csv方式跳过表头
      .csv(inputPath)
      .rdd

    if (rddData.isEmpty()) {
      if (ignoreAbnormal) {
        logError(s"源文件数据内容为空!")
      } else {
        throw new Exception(s"源文件数据内容为空!")
      }
    }

    rddData.map(_.mkString)
  }

  /**
   * 读取文本类型数据
   * eg:"21202F2938212B3E22272626252E434D"
   *
   * @param sparkSession
   * @param inputPath
   * @return
   */
  def readTextFileNotCheckNull(sparkSession: SparkSession, inputPath: String, skipHeader: Boolean, characterSet: String): RDD[String] = {
    logInfo(s"=>>>readText input filepath: $inputPath")
    //    import org.apache.spark.sql.functions.{col, decode}
    //      .select(decode(col("_c0"), characterSet).as("value"))  //.option("sep", "\t")
    val rddData = sparkSession.read
      .format("csv")
      .option("header", skipHeader) //跳过表头标志
      .option("delimiter", "\n") //使用csv方式读取默认会使用逗号切割数据,使用\n切分数据,便于使用csv方式跳过表头
      .option("encoding", characterSet) //指定字符集
      .csv(inputPath)
      .rdd

    if (rddData.isEmpty()) {
      logWarning(s"=>>> cos源文件数据内容为空!")
    }

    rddData.map(_.mkString)
  }

  /**
   * 读取json数据
   *
   * @param sparkSession
   * @param inputPath
   */
  def readJsonFile(sparkSession: SparkSession, inputPath: String): DataFrame = {
    logInfo(s"=>>>readJson input filepath: $inputPath")
    val df = sparkSession.read.format("json").load(inputPath)
    df
  }

  /**
   * 读取json数据
   *
   * @param sparkSession
   * @param inputPath
   */
  def readJsonFileBySchema(sparkSession: SparkSession, inputPath: String, schema: StructType,characterSet:String): DataFrame = {
    logInfo(s"=>>>readJson input filepath: $inputPath")
    val df = sparkSession.read.format("json").option("encoding", characterSet).schema(schema).load(inputPath) // //指定字符集
    df
  }

  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {
    //解析json
    implicit val formats = DefaultFormats

    val cosSourceText = "cos_file"
    if (StringUtils.isBlank(cosSourceText)) {
      val cosConfig = parse(config, true).extract[CosSourceConfig]
      val rddData = sparkSession.sparkContext.emptyRDD[String]
      readTextFile(sparkSession, "", false, rddData + "21202F2938212B3E22272626252E434D")
    }
    //解析json
    val cosSourceConf = parse(config, true).extract[CosSourceConfig]
    init(kc, cosSourceConf)
    val fileFormat = MetaUtils.getFileFormat(meta.getTableEntiy).trim.toLowerCase

    //930新增  支持用户指定文件名读取
    var inputPath = ""
    if (StringUtils.isNotBlank(cosSourceConf.file_name.getOrElse(""))) {
      inputPath = this.getCosInputPath(kc, cosSourceConf.file_name.get)
    } else {
      inputPath = meta.getCosInputPath.replaceAll("//", "/")
    }
    logInfo(s"inputPath:$inputPath")
    //跳过表头
    val skipHeader = if ("true".equalsIgnoreCase(cosSourceConf.skip_head)) true else false
    val delimiter = setDefaultDelimiter(meta.getCosFileDelimiter)

    logInfo(s"=>>> tech.check.tag=${kc.conf.getString(SchedulerConstants.TECHCHECKTAG)}")
    val isCheck = if ("true".equals(kc.conf.getString(SchedulerConstants.TECHCHECKTAG))) true else false

    //取交换接口的所有字段名称和字段类型
    val columnInfoMetaList: java.util.List[DmTableColumn] = meta.getColumnEntiy
    val columnArr = new ArrayBuffer[(String, String)](columnInfoMetaList.size)
    val columnSizeArr = new ArrayBuffer[(String, String)](columnInfoMetaList.size)

    //生成（字段->数据类型），(字段->数据长度)映射关系
    for (i <- 0 until columnInfoMetaList.size) {
      columnArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getColType)
      //TODO 20201208新增，适配es数据精度问题
      columnSizeArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getLength)
    }

    // Apply the schema to the RDD
    var schema: org.apache.spark.sql.types.StructType = null
    //检查是否开启技术检核,如果开启，日期，时间，日期时间类型全部转为String类型
    if (isCheck) {
      logInfo("=>>> 技术检核开启,日期格式不做处理...")
      schema = SparkJobHelper.dynamicBuildDFSchemaForTechcheck(columnArr)
    } else {
      //      schema = SparkJobHelper.dynamicBuildDFSchema(columnArr)
      //TODO 2020/12/10 gaosong: 污染了khaos框架,在cos->es时,cos浮点类型字段强转string ，解决写入es精度丢失问题
      val estag = kc.conf.getBoolean("judge_essink_tag", false) //khaos-core模块JobScheduler类中定义
      val mpptag = kc.conf.getBoolean("judge_mppsink_tag", false)//khaos-core模块JobScheduler类中定义
      val phoenixtag = kc.conf.getBoolean("judge_phoenixsink_tag", false)//khaos-core模块JobScheduler类中定义

      //后备开关
      val phoenixswitch = kc.conf.getBoolean(CommonConstants.PHOENIX_DECIMAL_SWITCH,false)
      val gpswitch = kc.conf.getBoolean(CommonConstants.GREENPLUM_DECIMAL_SWITCH,false)
      val hashdataswitch = kc.conf.getBoolean(CommonConstants.HASHDATA_DECIMAL_SWITCH,false)
      //      val switch = kc.conf.getBoolean(CommonConstants.DECIMAL_SWITCH,false)

      if(estag){//sink端为es时，浮点类型统一转为string
        schema = SparkJobHelper.dynamicBuildDFSchemaForES(columnArr)
      }else if(phoenixtag && phoenixswitch){ //TODO 2021/1/27 gaosong: 污染了khaos框架,在cos->mpp时,cos浮点类型字段转decimal ，临时解决写入精度丢失问题
        DataTypeConvertUtils.kc = kc
        schema = SparkJobHelper.dynamicBuildDFSchema(columnArr,columnSizeArr)
      }else if(mpptag && gpswitch){ //TODO 2021/1/27 gaosong: 污染了khaos框架,在cos->phoenix时,cos浮点类型字段转decimal ，临时解决写入精度丢失问题
        DataTypeConvertUtils.kc = kc
        schema = SparkJobHelper.dynamicBuildDFSchema(columnArr,columnSizeArr)
      }else if(mpptag && hashdataswitch){ //TODO 2021/1/27 gaosong: 污染了khaos框架,在cos->phoenix时,cos浮点类型字段转decimal ，临时解决写入精度丢失问题
        DataTypeConvertUtils.kc = kc
        schema = SparkJobHelper.dynamicBuildDFSchema(columnArr,columnSizeArr)
      }else{
        schema = SparkJobHelper.dynamicBuildDFSchema(columnArr)
      }
    }

    //根据支持的文件类型进行处理
    var rddData = sparkSession.sparkContext.emptyRDD[String]
    var jsonDF = sparkSession.emptyDataFrame
    val characterSet = MetaUtils.getTableCharacterSet(meta.getTableEntiy) //获取文件字符集
    fileFormat match {
      case "csv" => {
        //        rddData = readTextFile(sparkSession, inputPath, skipHeader, delimiter)

        rddData = readTextFileNotCheckNull(sparkSession, inputPath, skipHeader, characterSet)

        //定长读取
        //        rddData = fixedRead(rddData, meta.getCosFileDelimiter, meta)
        rddData = fixedRead(rddData, meta.getCosFileDelimiter, meta, cosSourceConf)

        // 将RDD转映射为Rows
        //        val rowRDD = convert2Rows(schema, delimiter, rddData)
        //        val rowRDD = convert2RowsWithIgnoreAbnormal(schema, delimiter, rddData)
        //字符替换
        val rowRDD = convert2Rows(schema, delimiter, rddData, cosSourceConf)

        if (StringUtils.isBlank(cosSourceConf.filter)) {
          val cosFileDF = sparkSession.createDataFrame(rowRDD, schema)
          cosFileDF
        } else {
          val cosFileDF = sparkSession.createDataFrame(rowRDD, schema).filter(cosSourceConf.filter)
          cosFileDF
        }
      }
      case "json" => {
        //        val schema = SparkJobHelper.dynamicBuildDFSchema(columnArr)
        //        jsonDF = readJsonFile(sparkSession, inputPath)
        jsonDF = readJsonFileBySchema(sparkSession, inputPath, schema,characterSet)

        //字符替换
        jsonDF = convertJsonRows(jsonDF, columnArr, cosSourceConf)

        //重新组织字段顺序
        val colArr = new Array[Column](columnArr.toMap.keys.size)
        for (i <- 0 until columnArr.toMap.keys.size) {
          colArr(i) = jsonDF.col(columnArr.toMap.keys.toList(i))
        }
        if (!StringUtils.isBlank(cosSourceConf.filter)) {
          jsonDF = jsonDF.select(colArr: _*).filter(cosSourceConf.filter)
        } else {
          jsonDF = jsonDF.select(colArr: _*)
        }
        //开启技术检核时需要将数据类型都转换为String
        if (isCheck) {
          for ((field, dataType) <- columnArr) {
            if (dataType.equalsIgnoreCase("TIME")) { //将time类型的值去掉空格，否则写入mysql会报错
              jsonDF = jsonDF.withColumn(field, trim(jsonDF.col(field).cast(StringType)))
            } else {
              jsonDF = jsonDF.withColumn(field, jsonDF.col(field).cast(StringType))
            }
          }
        }
        jsonDF
      }
      case _ => throw new Exception("=>>> 暂时不支持读取该数据类型！")
    }
  }


  /**
   * 添加cos文件系统配置
   */
  def addCosFileSystem(): org.apache.hadoop.conf.Configuration = {
    val hadoopConf = HadoopCosUtils.appendCosHadoopConfigs(sparkSession.sparkContext.hadoopConfiguration, meta.getCosAccessConfig())
    sparkSession.sparkContext.hadoopConfiguration.addResource(hadoopConf)
    sparkSession.sparkContext.hadoopConfiguration
  }


  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val info = parse(config, true).extract[HiveSourceConfig]
    val extrFields = info.extract_fields
    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }

  /**
   * 930版本新增  用于支持用户指定文件名方式加载
   *
   * @param kc
   * @return
   */
  def getCosInputPath(kc: KhaosContext, fileName: String): String = {
    val cospath = kc.conf.getString(SchedulerConstants.COS_FILE_PATH, "")
    var finalPath = ""
    if (cospath == null || cospath.equals("") || cospath.equals("\"\"")) {
      val cosDir = MetaUtils.getCosFileDir(meta.getTableEntiy)
      //finalPath = "/" + cosDir + "/" + fileName
      //finalPath = MetaUtils.addFileExtension(meta.getTableEntiy,finalPath) //添加扩展名

      if(cosDir.endsWith("/") && fileName.startsWith("/")){
        //base目录以/结束,且用户配置以/开头
        finalPath = cosDir + fileName.stripSuffix("/")
      }else if(cosDir.endsWith("/") && !fileName.startsWith("/")){
        //base目录以/结束,且用户配置不以/开头
        finalPath = cosDir + fileName
      }else if(!cosDir.endsWith("/") && fileName.startsWith("/")){
        //base目录不以/结束,且用户配置以/开头
        finalPath = cosDir + fileName
      }else{
        finalPath = cosDir + "/" + fileName
      }

      finalPath.replaceAll("//", "/")
    } else {
      meta.getCosInputPath
    }
  }

  def getTransDelimiter(delimiter: String): String = {
    var trans_delimiter = ""
    for (i <- delimiter) {
      trans_delimiter += "\\" + i //转义
    }
    trans_delimiter
  }
}
