package com.kingsoft.dc.khaos.module.spark.source

import java.io.FileNotFoundException
import java.text.SimpleDateFormat
import java.util
import java.util.Calendar
import java.util.regex.{Matcher, Pattern}

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, Ks3Constants, MetaDataConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ExtractFieldInfo, IgnoreAbnormal, Ks3Config, Ks3ReplaceContent, Ks3ReplaceContentInfo}
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.{KhaosConstants, Logging}
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.trim
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.{immutable, mutable}
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.util.matching.Regex

/**
 * Created by haorenhui on 2019/12/03.
 */
class Ks3Source  extends SourceStrategy   with Logging  with Serializable {

    private var sparkSession: SparkSession = _
    private var ks3Config: Ks3Config = _
    private var columnInfoMetaList: List[ExtractFieldInfo] = Nil
    private var meta: MetaDataEntity = _
    final val SEPARATOR = MetaDataConstants.KS3_DATA_SEPARATOR
    //容错计数器
    private var ignoreAbnormal: Boolean = _
    private var abnormalAccumulator: LongAccumulator = _

    override def source(kc: KhaosContext, module_id: String, config: String, dependence: Dependency): DataFrame = {
        init(kc, config)
        val data: DataFrame = doRead(kc)
        data
    }

    /**
     * 初始化相关参数
     *
     * @param kc khaos上下文
     * @param config 任务json
     * @return void
     */
    def init(kc: KhaosContext, config: String): Unit = {
        implicit val formats:DefaultFormats = DefaultFormats
        //ks3转对象
        ks3Config = parse(config, useBigDecimalForDouble = true).extract[Ks3Config]
        //容错开关及计数器
        ignoreAbnormal = ks3Config.advanced_options.ignore_abnormal.getOrElse(IgnoreAbnormal(on_off = Option(false))).on_off.getOrElse(false)
        //权限校验
        val checkResult:Boolean = MetaUtils.checkReadAuth(kc,
            ks3Config.db_name,
            ks3Config.table_name,
            ks3Config.extender.auth.clazz,
            compact(render(ks3Config.extender.auth.params)))

        if(!checkResult){
            log.error(s"读取ks3 ${ks3Config.db_name}.${ks3Config.table_name}, 权限验证未通过")
            throw new Exception(s"读取ks3 ${ks3Config.db_name}.${ks3Config.table_name}, 权限验证未通过")
        }

        columnInfoMetaList = ks3Config.extract_fields
        sparkSession = kc.sparkSession
        abnormalAccumulator = kc._valuesChannel.getValues[LongAccumulator](KhaosConstants.KHAOS_ABNORMALACCUMULATOR)
        initMeta(kc)
        initKs3FileSystem()
    }

    /** 初始化元数据 */
    def initMeta(kc: KhaosContext): Unit ={
        //获取元数据
        meta = MetaUtils.getKs3Meta(kc,
            ks3Config.db_name,
            ks3Config.table_name,
            ks3Config.extender.meta.clazz,
            compact(render(ks3Config.extender.meta.params)),
            this)

        if(meta.dsKs3Connect == null){
            throw new Exception(s"ks3 ${ks3Config.db_name}.${ks3Config.table_name} 获取数据管理元数据失败!")
        }
    }

    /**
     * 初始化ks3文件系统
     */
    def initKs3FileSystem(): Unit = {
        addKs3FileSystem()
    }

    /**
     * 添加ks3文件系统配置
     */
    def addKs3FileSystem(): org.apache.hadoop.conf.Configuration = {
        val hadoopConf:Configuration = HadoopKs3Utils.appendKs3HadoopConfigs(sparkSession.sparkContext.hadoopConfiguration, meta.getKs3AccessConfig)
        sparkSession.sparkContext.hadoopConfiguration.addResource(hadoopConf)
        sparkSession.sparkContext.hadoopConfiguration
    }


    /** 读取KS3数据 */
    def doRead(kc:KhaosContext): DataFrame = {
        //取交换接口的所有字段名称和字段类型
        val columnInfoMetaList: java.util.List[DmTableColumn] = meta.getColumnEntiy
        val columnArr = new ArrayBuffer[(String, String)]()
        val columnSizeArr = new ArrayBuffer[(String, String)](columnInfoMetaList.size)
        for (i <- 0 until columnInfoMetaList.size) {
            columnArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getColType)
            //TODO v2.0 适配es,mpp,phoenix数据精度问题
            columnSizeArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getLength)
        }
        var schema: StructType = null
        // 是否开启技术检核
        val isCheck:Boolean = if ("true".equals(kc.conf.getString(SchedulerConstants.TECHCHECKTAG))) true else false
        logInfo(s"=>>> tech.check.tag=$isCheck")
        if(isCheck){
            logInfo("=>>> 技术检核开启,日期格式不做处理...")
            schema = SparkJobHelper.dynamicBuildDFSchemaForTechcheck(columnArr)
        }else{
            //TODO v2.0 ks3->es时,ks3浮点类型字段强转string  解决写入es精度丢失问题
            val estag: Boolean = kc.conf.getBoolean("judge_essink_tag", defaultValue = false) //khaos-core模块JobScheduler类中定义
            val mpptag: Boolean = kc.conf.getBoolean("judge_mppsink_tag", defaultValue = false)//khaos-core模块JobScheduler类中定义
            val phoenixtag: Boolean = kc.conf.getBoolean("judge_phoenixsink_tag", defaultValue = false)//khaos-core模块JobScheduler类中定义

            //后备开关
            val phoenixswitch: Boolean = kc.conf.getBoolean(CommonConstants.PHOENIX_DECIMAL_SWITCH,defaultValue = false)
            val gpswitch: Boolean = kc.conf.getBoolean(CommonConstants.GREENPLUM_DECIMAL_SWITCH,defaultValue = false)
            val hashdataswitch: Boolean = kc.conf.getBoolean(CommonConstants.HASHDATA_DECIMAL_SWITCH,defaultValue = false)

            if(estag){
                schema = SparkJobHelper.dynamicBuildDFSchemaForES(columnArr)
            }else if(phoenixtag && phoenixswitch){ //TODO v2.0 ks3->mpp时,ks3浮点类型字段转decimal ，临时解决写入精度丢失问题
                schema = SparkJobHelper.dynamicBuildDFSchema(columnArr,columnSizeArr)
            }else if(mpptag && gpswitch){ //TODO v2.0 ks3->phoenix时,ks3浮点类型字段转decimal ，临时解决写入精度丢失问题
                schema = SparkJobHelper.dynamicBuildDFSchema(columnArr,columnSizeArr)
            }else if(mpptag && hashdataswitch){ //TODO v2.0 ks3->phoenix时,ks3浮点类型字段转decimal ，临时解决写入精度丢失问题
                schema = SparkJobHelper.dynamicBuildDFSchema(columnArr,columnSizeArr)
            }else{
                schema = SparkJobHelper.dynamicBuildDFSchema(columnArr)
            }
        }

        //支持用户指定文件名读取 or 文件在bucket中的路径 /testPath/test.csv
        var inputPathList: mutable.Buffer[String] = mutable.Buffer[String]()
        val path_type:String=ks3Config.read_path_mode.path_type.getOrElse(Ks3Constants.Ks3ReadPathMode.default)
        path_type match {
            case Ks3Constants.Ks3ReadPathMode.default => inputPathList.append(meta.getKs3InputPath.replaceAll("//", "/"))
            case Ks3Constants.Ks3ReadPathMode.file   =>  inputPathList.append(this.getKs3InputPath(kc, ks3Config.read_path_mode.file_name.getOrElse("")))
            case Ks3Constants.Ks3ReadPathMode.folder =>
                val bizDate: String = kc.conf.getString(SchedulerConstants.BIZ_DATE)
                val bizTime: String = kc.conf.getString(SchedulerConstants.BIZ_TIME)
                ks3Config.read_path_mode.folder_regex_range_type.getOrElse("") match {
                    // 范围选择时间
                    case Ks3Constants.Ks3RegexRangeType.time =>
                        val range_ruler: String = ks3Config.read_path_mode.folder_regex_range_ruler.getOrElse("")
                        if(range_ruler == null ||range_ruler.equals(""))
                            throw new Exception(s"配置信息不完整,范围属性:time,范围规则:$range_ruler")
                        val range_num: String = ks3Config.read_path_mode.folder_regex_range_num.getOrElse("0")
                        if(range_num == null ||range_num.equals(""))
                            throw new Exception(s"配置信息不完整,范围属性:time,范围规则:$range_ruler,范围值:$range_num")
                        inputPathList = generateReadPathList4Time(ks3Config.read_path_mode.folder_regex_path.getOrElse(""),
                            range_ruler,
                            ks3Config.read_path_mode.folder_regex_range_num.getOrElse(0).toString,bizDate,bizTime).toBuffer
                    // 无范围,但依然替换变量
                    case Ks3Constants.Ks3RegexRangeType.none =>
                        inputPathList = generateReadPathList4Time(ks3Config.read_path_mode.folder_regex_path.getOrElse(""),
                            ks3Config.read_path_mode.folder_regex_range_ruler.getOrElse(""),"0",bizDate,bizTime).toBuffer
                }
                //加上数据管理注册时的base路径
                val ks3Dir:String = MetaUtils.getKs3FileDir(meta.getTableEntiy)
                inputPathList=inputPathList.map((inputpath: String) => {
                    val tempPath:String= "/" + ks3Dir + "/" + inputpath
                    tempPath.replaceAll("//","/")
                })

        }
        //去掉重复路径(在算范围时,可能由于配置错误导致生成相同路径)
        inputPathList=inputPathList.distinct
        log.info(s"ks3根据规则匹配到的路径=>inputPathList\n${inputPathList.mkString("\n")}")

        //空目录校验
        inputPathList.foreach((path: String) =>{
            //如果目录为空 作业失败
            if(!filterEmptyFolder(sparkSession.sparkContext.hadoopConfiguration,path))
                throw new Exception(s"ks3当前读取目录为空==>$path")
        })

        /*log.info(s"检查路径是否可读,过滤不可读路径...")
        //探测路径是否可用
        inputPathList=inputPathList.filter(path=>{
            if(checkAndGlobPathIfNecessary(sparkSession.sparkContext.hadoopConfiguration,path,true:Boolean))
                true
            else
                false
        })
        log.info(s"ks3真正读取路径=>inputPathList\n${inputPathList.mkString("\n")}")*/

        // 是否跳过表头
        val skipHeader:Boolean = if ("true".equalsIgnoreCase(ks3Config.skip_head)) true else false

        var compression_type: String = ks3Config.compression_type.getOrElse(Ks3Constants.Ks3CompressionType.none)
        if(!compression_type.equals(Ks3Constants.Ks3CompressionType.none)){
            //读取压缩文件
            compression_type match {
                case Ks3Constants.Ks3CompressionType.lzo =>
                    //加载lzo压缩参数
                    addLzoConf()
                case _ => throw new Exception(s"ks3暂不支持此压缩类型读取=>$compression_type")
            }
        }

        var rddData:RDD[String] = sparkSession.sparkContext.emptyRDD[String]
        var resultData:DataFrame = sparkSession.createDataFrame(sparkSession.sparkContext.emptyRDD[Row],schema)

        //获取文件格式 csv/json
        val fileFormat:String = MetaUtils.getKs3FileFormat(meta.getTableEntiy).trim.toLowerCase
        fileFormat match {
            case "csv" =>
                rddData = readTextFileNotCheckNull(sparkSession, inputPathList, skipHeader, meta.getKs3FileDelimiter)
                var ks3RDD:RDD[String] = rddData.filter(!(_: String).trim.equals("")) //过滤空行
                var rowRDD:RDD[Row] = null
                //定长标识
                val isFixed:Boolean = MetaUtils.isfixedLength4Ks3(meta.getTableEntiy)
                var metaDelimiter: String = meta.getKs3FileDelimiter
                //metaDelimiter = getTransDelimiter(metaDelimiter)
                if (isFixed) { //开启定长
                    logInfo("=>>>定长开启......")
                    if (StringUtils.isBlank(metaDelimiter)) {
                        logInfo(s"=>>>分隔符未设置,定长读取......")
                        val loadingRDD: RDD[Array[String]] = DataframeUtils.Loading(ks3RDD, columnInfoMetaList)
                        rowRDD = convertLoadingData2Rows(schema,metaDelimiter,loadingRDD)
                    } else { //优先使用分隔符读取
                        logInfo(s"=>>>分隔符已设置:$metaDelimiter,按照分隔符读取......")
                        rowRDD = convert2Rows(schema, metaDelimiter, rddData)
                    }
                } else { //未开启定长
                    logInfo("=>>>不定长读取......")
                    rowRDD = convert2Rows(schema, metaDelimiter, rddData)
                }

                resultData = sparkSession.createDataFrame(rowRDD, schema)

            case "json" =>
                resultData = readJsonFile(sparkSession, inputPathList,schema)
                //字符替换
                resultData = convertJsonRows(resultData, columnArr, ks3Config)
                //重新组织字段顺序
                resultData=DataframeUtils.sortDataCol(resultData,columnInfoMetaList)
                //开启技术检核时需要将数据类型都转换为String
                if (isCheck) {
                    for ((field, dataType) <- columnArr) {
                        if (dataType.equalsIgnoreCase("TIME")) { //将time类型的值去掉空格，否则写入mysql会报错
                            resultData = resultData.withColumn(field, trim(resultData.col(field).cast(StringType)))
                        } else {
                            resultData = resultData.withColumn(field, resultData.col(field).cast(StringType))
                        }
                    }
                }
            case _ => throw new Exception(s"ks3暂时不支持读取该数据类型！=>$fileFormat")
        }

        // 过滤
        if (StringUtils.isNotBlank(ks3Config.filter)) {
            resultData = resultData.filter(ks3Config.filter)
        }

        resultData
    }

    /**
     * 指定分默认分隔符
     *
     * @param delimiter 分隔符
     * @return
     */
    def setDefaultDelimiter(delimiter: String): String = {
        if (StringUtils.isBlank(delimiter)) {
            MetaDataConstants.KS3_DATA_SEPARATOR
        } else {
            delimiter
        }
    }

    def getTransDelimiter(delimiter: String): String = {
        var trans_delimiter = ""
        for (i <- delimiter) {
            trans_delimiter += "\\" + i //转义
        }
        trans_delimiter
    }

    /**
     * 930版本新增  用于支持用户指定文件名方式加载
     *
     * @param kc khaos上下文
     * @return
     */
    def getKs3InputPath(kc: KhaosContext, fileName: String): String = {
        // 调度传递的文件名
        val ks3path:String = kc.conf.getString(SchedulerConstants.KS3_FILE_PATH, "")
        var finalPath = ""
        // 调度未传递,则读取入参文件
        if (ks3path == null || ks3path.equals("") || ks3path.equals("\"\"")) {
            val ks3Dir:String = MetaUtils.getKs3FileDir(meta.getTableEntiy)
            finalPath = "/" + ks3Dir + "/" + fileName

            finalPath.replaceAll("//", "/")
        } else {
            //调度传递,读取数据管理文件路径
            meta.getKs3InputPath
        }
    }

    /**
     * 读取文本类型数据
     * eg:"21202F2938212B3E22272626252E434D"
     *
     * @param sparkSession spark上下文
     * @param inputPath 读取路径
     * @return
     */
    def readTextFileNotCheckNull(sparkSession: SparkSession, inputPath: String, skipHeader: Boolean, delimiter: String): RDD[String] = {
        logInfo(s"=>>>readText input filepath:==>$inputPath")
        val rddData:RDD[Row] = sparkSession.read
          .format("csv")
          .option("header", skipHeader) //跳过表头标志
          .option("delimiter", "\n") //默认使用csv方式会使用逗号切割数据,使用\n的目的是不真正的切分数据
          .csv(inputPath)
          .rdd
        if (rddData.isEmpty()) {
            logWarning(s"=>>> ks3源文件数据内容为空!")
        }
        rddData.map((_: Row).mkString)
    }

    /**
     * 读取文本类型数据
     * eg:"21202F2938212B3E22272626252E434D"
     *
     * @param sparkSession spark上下文
     * @param inputPathList 读取路径集合
     * @return
     */
    def readTextFileNotCheckNull(sparkSession: SparkSession, inputPathList: mutable.Buffer[String], skipHeader: Boolean, delimiter: String): RDD[String] = {
        logInfo(s"=>>>readText input filepath:==>${inputPathList.mkString(",")}")
        val rddData:RDD[Row] = sparkSession.read
          .format("csv")
          .option("header", skipHeader) //跳过表头标志
          .option("delimiter", "\n") //默认使用csv方式会使用逗号切割数据,使用\n的目的是不真正的切分数据
          .csv(inputPathList:_*)
          .rdd
        if (rddData.isEmpty()) {
            logWarning(s"=>>> ks3源文件数据内容为空!")
        }
        rddData.map((_: Row).mkString)
    }

    /**
     * 读取json数据
     *
     * @param sparkSession spark上下文
     * @param inputPath 读取路径
     */
    def readJsonFile(sparkSession: SparkSession, inputPath: String,schema:StructType): DataFrame = {
        logInfo(s"=>>>readJson input filepath:==>$inputPath")
        val df:DataFrame = sparkSession.read.format("json").schema(schema).load(inputPath)
        df
    }

    /**
     * 读取json数据
     *
     * @param sparkSession spark上下文
     * @param inputPathList 读取路径集合
     */
    def readJsonFile(sparkSession: SparkSession, inputPathList: mutable.Buffer[String],schema:StructType): DataFrame = {
        logInfo(s"=>>>readJson input filepath:==>${inputPathList.mkString(",")}")
        val df:DataFrame = sparkSession.read.format("json").schema(schema).load(inputPathList:_*)
        df
    }

    /**
     * RDD数据转换Row
     *
     * @param schema 字段定义
     * @param rdd 数据集
     */
    def convert2Rows(schema: StructType, delimiter: String, rdd: RDD[String]):RDD[Row] = {
        val transDelimiter: String = getTransDelimiter(delimiter)
        //过滤垃圾数据
        var rows: RDD[String] = filterRowData(rdd, schema, delimiter)
        //检查替换功能是否开启并完成字符替换
        rows = checkAndReplaceCsvData(rows, delimiter, ks3Config)
        //对数据进行预转换，检查数据内容正确性
        rows = rows.filter((line: String) => {
            val lineArr: Array[String] = line.split(transDelimiter, -1)
            checkDataConvertStatus(lineArr, schema, delimiter)
        })

        //切分并解析数据
        val finalRdd: RDD[Array[String]] = rows.map((line: String) => {
            val lineArr: Array[String] = line.split(transDelimiter, -1)
            lineArr
        })
        DataTypeConvertUtils.rdd2Rows(schema, finalRdd,delimiter)
    }

    /**
     * 定长RDD数据转换Row
     *
     * @param schema 字段定义
     * @param rdd 数据集
     */
    def convertLoadingData2Rows(schema: StructType, delimiter: String, rdd: RDD[Array[String]]):RDD[Row] = {
        //过滤垃圾数据,检查字段数是否匹配
        var loadingRDD: RDD[Array[String]] = rdd.filter((lineArr: Array[String]) => {
            checkSchemaLength(lineArr, schema, delimiter)
        })
        //检查替换功能是否开启并完成字符替换
        loadingRDD = checkAndReplaceCsvLoadingData(loadingRDD, delimiter, ks3Config)
        //对数据进行预转换，检查数据内容正确性
        loadingRDD = loadingRDD.filter((lineArr: Array[String]) => {
            checkDataConvertStatus(lineArr, schema, delimiter)
        })
        //解析数据
        DataTypeConvertUtils.rdd2Rows(schema, loadingRDD,delimiter)
    }

    /**
     * 过滤不符合条件的数据
     *
     * @return
     */
    def filterRowData(rdd: RDD[String], schema: StructType, delimiter: String): RDD[String] = {
        val transDelimiter = getTransDelimiter(delimiter)
        //过滤垃圾数据
        rdd.filter((line: String) => { //检查字段数
            var lineArr:Array[String] = null
            if(schema.size == 1){
                //只有一个字段,不需要切割
                lineArr = Array(line)
            }else{
                lineArr  = line.split(transDelimiter, -1)
            }
            checkSchemaLength(lineArr, schema, transDelimiter)
        })
    }

    /**
     * 检查schema长度
     * @return
     */
    def checkSchemaLength(lineArr: Array[String],
                          schema: StructType,
                          delimiter: String): Boolean = {
        var status = true
        var count = 0
        if (lineArr.length < schema.size) { //字段数与预定义不相等
            if (ignoreAbnormal) {
                if (count < 10) {
                    logWarning(s"数据映射失败！实际字段数=${lineArr.length} 期望字段数=${schema.size} 分隔符=$delimiter 数据行=${lineArr.mkString}")
                    count = count + 1
                }
                abnormalAccumulator.add(1)
                status = false
            } else {
                throw new Exception(s"数据映射失败！实际字段数=${lineArr.length} 期望字段数=${schema.size} 分隔符=$delimiter 数据行=${lineArr.mkString}")
            }
        }
        status
    }

    /**
     * 文本文件内容替换
     *
     * @return
     */
    def checkAndReplaceCsvData(rdd: RDD[String], delimiter: String, ks3Config: Ks3Config): RDD[String] = {
        val transDelimiter: String = getTransDelimiter(delimiter)
        var rows: RDD[String] = rdd
        val replaceInfo: Ks3ReplaceContent = ks3Config.advanced_options.replace_content.orNull
        if (replaceInfo != null) {
            if (replaceInfo.on_off.get) { //替换开关打开
                rows = rows.map((line: String) => {
                    //行替换
                    val fieldArr: Array[String] = line.split(transDelimiter, -1)
                    //字段替换
                    val replaceDetailed: List[Ks3ReplaceContentInfo] = replaceInfo.detailed.get
                    replaceFieldsContent(fieldArr, replaceDetailed,delimiter).mkString(delimiter)
                })
            }
        }
        rows
    }

    /**
     * 定长文本文件内容替换
     *
     * @return
     */
    def checkAndReplaceCsvLoadingData(rdd: RDD[Array[String]], delimiter: String, ks3Config: Ks3Config): RDD[Array[String]] = {
        var rows:RDD[Array[String]] = null
        val replaceInfo: Ks3ReplaceContent = ks3Config.advanced_options.replace_content.orNull
        if (replaceInfo != null) {
            if (replaceInfo.on_off.get) { //替换开关打开
                rows = rdd.map((fieldArr: Array[String]) => {
                    //字段替换
                    val replaceDetailed: List[Ks3ReplaceContentInfo] = replaceInfo.detailed.get
                    replaceFieldsContent(fieldArr, replaceDetailed,delimiter)
                })
            }
        }
        rows
    }

    /**
     * 字段内容替换
     *
     * @return
     */
    def replaceFieldsContent(lineArr: Array[String],
                             replaceInfos: List[Ks3ReplaceContentInfo],
                             delimiter: String): Array[String] = {
        lineArr.map((col: String) => {
            var field: String = col.trim
            for (i <- replaceInfos) {
                val replaceType: String = i.replace_type
                val source: String = i.source
                val target: String = i.target
                val scope: String = i.scope
                if (replaceType.equalsIgnoreCase("field")) {
                    if (StringUtils.isBlank(field)) {
                        if (source.equals("{NULL}")) {
                            field = target
                        }
                    } else {
                        field = scope match {
                            case "all" => field.replaceAllLiterally(source, target) //全部替换
                            case "pre" => StringHandleUtils.replacePrefix(field, source, target) //替换前缀
                            case "suf" => StringHandleUtils.replaceSuffix(field, source, target) //替换后缀
                            case "pre_suf" => StringHandleUtils.replacePrefixAndSuffix(field, source, target) //替换前后缀
                        }
                    }
                }
            }
            field
        })
    }

    /**
     * 返回数据转换结果
     *
     * @return
     */
    def checkDataConvertStatus(lineArr: Array[String],
                               schema: StructType,
                               delimiter: String): Boolean = {
        val status: Boolean = DataTypeConvertUtils.checkDataLineAbnormal(schema, lineArr, delimiter)
        var count = 0
        if (!status) {
            if (ignoreAbnormal) {
                if (!status) {
                    if (count < 10) {
                        logWarning(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
                        count = count + 1
                    }
                    abnormalAccumulator.add(1)
                }
            } else {
                throw new Exception(s"=> 数据转换失败! data line=${lineArr.mkString(delimiter)}")
            }
        }
        status
    }

    /**
     * 转换json数据
     *
     */
    def convertJsonRows(jsonDf: DataFrame, columnArr: ArrayBuffer[(String, String)], ks3Config: Ks3Config): DataFrame = {
        val replaceInfo: Ks3ReplaceContent = ks3Config.advanced_options.replace_content.orNull
        if (replaceInfo != null) {
            if (replaceInfo.on_off.get) { //替换开关打开
                val rdd: RDD[Row] = checkAndReplaceJsonData(jsonDf, columnArr, replaceInfo)
                sparkSession.createDataFrame(rdd, jsonDf.schema)
            } else {
                jsonDf
            }
        } else {
            jsonDf
        }
    }

    /**
     * 文件内容替换
     *
     * @return
     */
    def checkAndReplaceJsonData(jsonDf: DataFrame,
                                columnArr: ArrayBuffer[(String, String)],
                                replaceInfo: Ks3ReplaceContent): RDD[Row] = {
        val defaultDelimiter = "|@@|"
        val transDelimiter: String = getTransDelimiter(defaultDelimiter)
        //    var rows = jsonDf.rdd

        logInfo(s"==>json df schema:${jsonDf.schema.fields.mkString("[", ",", "]")}")
        logInfo(s"==>json dm schema:${columnArr.map((_: (String, String))._1).mkString("[", ",", "]")}")

        val rddStr: RDD[String] = jsonDf.rdd.map((row: Row) => {
            val fieldArr = new ArrayBuffer[String](columnArr.size)
            for (i <- columnArr.indices) {
                fieldArr.append(row.get(i).toString)
            }
            //字段替换
            val replaceDetailed: List[Ks3ReplaceContentInfo] = replaceInfo.detailed.get
            replaceFieldsContent(fieldArr.toArray, replaceDetailed, defaultDelimiter).mkString(defaultDelimiter)
        })
        //切分并解析数据
        val finalRdd: RDD[Array[String]] = rddStr.map((line: String) => {
            val lineArr: Array[String] = line.split(transDelimiter, -1)
            lineArr
        })
        DataTypeConvertUtils.rdd2Rows(jsonDf.schema, finalRdd, defaultDelimiter)
    }

    def generateReadPathList4Time(regex_path:String,regex_ruler:String,regex_num:String,bizDate:String,bizTime:String): List[String] ={

        val timeList:List[(String,String)] = generateDateTimeRange(regex_ruler,regex_num,bizDate,bizTime)
        val realPathList: List[String] = timeList.map((tuple: (String, String)) => {
            val date: String = tuple._1
            val time: String = tuple._2
            generateReadPath4Time(regex_path, regex_ruler, regex_num, date, time)
        })

        realPathList
    }

    def generateReadPath4Time(regex_path:String,regex_ruler:String,regex_num:String,bizDate:String,bizTime:String): String ={
        var regex_path_bak: String = regex_path

        val dateAndTimeMap: Map[String, String] = generateDateTimeMap(bizDate,bizTime)

        var r: Regex = Ks3Constants.dateRegex.r
        var regexList: List[String] = r.findAllIn(regex_path_bak).toList
        regexList.foreach((regex_str: String) =>{
            //${bizDate,yyyy-MM-dd}
            var str_bak:String=regex_str
            //${bizDate,2020-01-01}
            var replceStr: String = str_bak.replace(Ks3Constants.Ks3RegexRangeTimeRuler.yyyy,dateAndTimeMap.getOrElse(Ks3Constants.Ks3RegexRangeTimeRuler.yyyy,""))
              .replace(Ks3Constants.Ks3RegexRangeTimeRuler.yy,dateAndTimeMap.getOrElse(Ks3Constants.Ks3RegexRangeTimeRuler.yy,""))
              .replace(Ks3Constants.Ks3RegexRangeTimeRuler.MM,dateAndTimeMap.getOrElse(Ks3Constants.Ks3RegexRangeTimeRuler.MM,""))
              .replace(Ks3Constants.Ks3RegexRangeTimeRuler.dd,dateAndTimeMap.getOrElse(Ks3Constants.Ks3RegexRangeTimeRuler.dd,""))
            //2020-01-01
            val pattern: Pattern = Pattern.compile(Ks3Constants.dateRegex2)
            val matcher: Matcher = pattern.matcher(replceStr)
            while(matcher.find()){
                replceStr=matcher.group(Ks3Constants.dateRegex3)
            }
            //转义
            str_bak = str_bak.map((ch: Char) =>
                if(ch.toString.equals("$") || ch.toString.equals("{") || ch.toString.equals("}"))
                    "\\"+ch
                else
                    ch
            ).mkString
            //${bizDate,yyyy-MM-dd} ==> 2020-01-01
            regex_path_bak = regex_path_bak.replaceFirst(str_bak,replceStr)
        })
        r = Ks3Constants.timeRegex.r
        regexList= r.findAllIn(regex_path_bak).toList
        regexList.foreach((str: String) =>{
            //${bizTime,HH-mm}
            var str_bak:String=str
            //${bizDate,08-00}
            var replceStr: String = str_bak.replace(Ks3Constants.Ks3RegexRangeTimeRuler.HH,dateAndTimeMap.getOrElse(Ks3Constants.Ks3RegexRangeTimeRuler.HH,""))
              .replace(Ks3Constants.Ks3RegexRangeTimeRuler.mm,dateAndTimeMap.getOrElse(Ks3Constants.Ks3RegexRangeTimeRuler.mm,""))
            //08-00
            val pattern: Pattern = Pattern.compile(Ks3Constants.timeRegex2)
            val matcher: Matcher = pattern.matcher(replceStr)
            while(matcher.find()){
                replceStr=matcher.group(Ks3Constants.timeRegex3)
            }
            //转义
            str_bak = str_bak.map((ch: Char) =>
                if(ch.toString.equals("$") || ch.toString.equals("{") || ch.toString.equals("}"))
                    "\\"+ch
                else
                    ch
            ).mkString
            //${bizTime,HH-mm} ==> 08-00
            regex_path_bak = regex_path_bak.replaceFirst(str_bak,replceStr)
        })

        regex_path_bak
    }

    def generateDateTimeMap(bizDate:String,bizTime:String): Map[String, String] = {
        val format = new SimpleDateFormat("yyyyMMdd HH:mm:ss")
        val date: java.util.Date = format.parse(bizDate + " " + bizTime)
        val calendar: Calendar = Calendar.getInstance()
        calendar.setTime(date)
        val year: Int = calendar.get(Calendar.YEAR)
        val month: Int = calendar.get(Calendar.MONTH) + 1
        val day: Int = calendar.get(Calendar.DAY_OF_MONTH)
        val hour: Int = calendar.get(Calendar.HOUR_OF_DAY)
        val minute: Int = calendar.get(Calendar.MINUTE)

        Map((Ks3Constants.Ks3RegexRangeTimeRuler.yyyy, String.valueOf(year)),
            (Ks3Constants.Ks3RegexRangeTimeRuler.yy, String.valueOf(year).substring(String.valueOf(year).length-2)),
            (Ks3Constants.Ks3RegexRangeTimeRuler.MM, String.format("%02d",Integer.valueOf(month))),
            (Ks3Constants.Ks3RegexRangeTimeRuler.dd, String.format("%02d",Integer.valueOf(day))),
            (Ks3Constants.Ks3RegexRangeTimeRuler.HH, String.format("%02d",Integer.valueOf(hour))),
            (Ks3Constants.Ks3RegexRangeTimeRuler.mm, String.format("%02d",Integer.valueOf(minute))))
    }

    def generateDateTimeRange(regex_ruler:String,regex_num:String,bizDate:String,bizTime:String): List[(String, String)] ={
        val bizDateAndTime: String = bizDate + " " + bizTime
        val format = new SimpleDateFormat("yyyyMMdd HH:mm:ss")
        val date: util.Date = format.parse(bizDateAndTime)
        val calendar: Calendar = Calendar.getInstance()
        calendar.setTime(date)

        val datetimeList: ListBuffer[String] = ListBuffer[String]()
        if(regex_num == "0"){
            //regex_num为0 则读取当前调度时间文件夹
            datetimeList.append(format.format(calendar.getTime))
        }else{
            regex_ruler match {
                case Ks3Constants.Ks3RegexRangeTimeRuler.yyyy =>
                    for(index <- 1 to regex_num.toInt){
                        calendar.add(Calendar.YEAR,-1)
                        datetimeList.append(format.format(calendar.getTime))
                    }
                case Ks3Constants.Ks3RegexRangeTimeRuler.MM   =>
                    for(index <- 1 to regex_num.toInt){
                        calendar.add(Calendar.MONTH,-1)
                        datetimeList.append(format.format(calendar.getTime))
                    }
                case Ks3Constants.Ks3RegexRangeTimeRuler.dd   =>
                    for(index <- 1 to regex_num.toInt){
                        calendar.add(Calendar.DAY_OF_MONTH,-1)
                        datetimeList.append(format.format(calendar.getTime))
                    }
                case Ks3Constants.Ks3RegexRangeTimeRuler.HH   =>
                    for(index <- 1 to regex_num.toInt){
                        calendar.add(Calendar.HOUR_OF_DAY,-1)
                        datetimeList.append(format.format(calendar.getTime))
                    }
                case Ks3Constants.Ks3RegexRangeTimeRuler.mm   =>
                    for(index <- 1 to regex_num.toInt){
                        calendar.add(Calendar.MINUTE,-1)
                        datetimeList.append(format.format(calendar.getTime))
                    }
            }
        }

        val resultList: List[(String, String)] = datetimeList.map((str: String) => {
            val strings: Array[String] = str.split(" ")
            val date: String = strings(0)
            val time: String = strings(1)
            (date, time)
        }).toList

        resultList
    }


    /**
     * 添加lzo压缩参数
     */
    def addLzoConf(): org.apache.hadoop.conf.Configuration = {
        val hadoopConf:Configuration = new Configuration
        hadoopConf.set("io.compression.codecs", "org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzopCodec")
        hadoopConf.set("io.compression.codec.lzo.class", "com.hadoop.compression.lzo.LzoCodec")
        sparkSession.sparkContext.hadoopConfiguration.addResource(hadoopConf)
        sparkSession.sparkContext.hadoopConfiguration
    }

    def checkAndGlobPathIfNecessary(hadoopConf: Configuration,
                                    path: String,
                                    checkFilesExist: Boolean): Boolean = {
        var is_available_path:Boolean=true
        val hdfsPath = new Path(path)
        val fs: FileSystem = hdfsPath.getFileSystem(hadoopConf)
        val qualified: Path = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
        val globPath: Seq[Path] = SparkHadoopUtil.get.globPathIfNecessary(fs, qualified)

        try{
            if (globPath.isEmpty) {
                is_available_path=false
                throw new Exception(s"Path does not exist: $qualified")
            }
            // Sufficient to check head of the globPath seq for non-glob scenario
            // Don't need to check once again if files exist in streaming mode
            if (checkFilesExist && !fs.exists(globPath.head)) {
                is_available_path=false
                throw new Exception(s"Path does not exist: ${globPath.head}")
            }
        }catch {
            case e:Exception =>  is_available_path=false
        }

        try{
            val statuses: Array[FileStatus] = fs.listStatus(hdfsPath)
            if(fs.isDirectory(hdfsPath)){
                if(statuses.length == 0){
                    is_available_path=false
                }
            }
        }catch {
            case e:FileNotFoundException => //do nothing
            case e:Exception =>  e.printStackTrace()

        }
        is_available_path
    }

    /**
     * 过滤空目录
     * /basePath/path1/  ==> 如果是空目录,则返回false
     * */
    def filterEmptyFolder(hadoopConf: Configuration,
                          path: String
                         ): Boolean ={
        var is_available_path:Boolean=true
        val ks3Path = new Path(path)
        val fs: FileSystem = ks3Path.getFileSystem(hadoopConf)

        try{
            if(fs.isDirectory(ks3Path)){
                val statuses: Array[FileStatus] = fs.listStatus(ks3Path)
                if(statuses.length == 0){
                    is_available_path=false
                }
            }
        }catch {
            case e:Exception =>  e.printStackTrace()

        }
        is_available_path
    }

    // /basePath/path1/* or /basePath/path1/*.lzo ==> 如果没匹配到文件, 则返回false
    def checkPathV1(hadoopConf: Configuration,
                    path: String): Boolean = {
        var is_available_path:Boolean=true
        val hdfsPath = new Path(path)
        val fs: FileSystem = hdfsPath.getFileSystem(hadoopConf)
        val qualified: Path = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
        val globPath: Seq[Path] = SparkHadoopUtil.get.globPathIfNecessary(fs, qualified)

        if(globPath.isEmpty)
            is_available_path=false
        is_available_path
    }

    override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
        val fieldSchema:ArrayBuffer[KhaosStructField] = ArrayBuffer[KhaosStructField]()
        implicit val formats:DefaultFormats = DefaultFormats
        val info:Ks3Config = parse(config, useBigDecimalForDouble = true).extract[Ks3Config]
        val extrFields:List[ExtractFieldInfo] = info.extract_fields
        for (ef <- extrFields) {
            fieldSchema += KhaosStructField(ef.field, ef.data_type)
        }
        fieldSchema.toList
    }

}
