package com.kingsoft.dc.khaos.module.spark.source

import java.util
import com.alibaba.fastjson.serializer.SerializerFeature
import com.alibaba.fastjson.JSON
import com.fasterxml.jackson.core.JsonParseException
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.ds.RedisConnect
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{RedisConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source._
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.{KhaosConstants, Logging}
import com.redislabs.provider.redis._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.util.LongAccumulator
import org.apache.spark.SparkConf
import org.json4s.jackson.JsonMethods.parse
import org.json4s.{DefaultFormats, JValue}
import redis.clients.jedis.{Jedis, JedisCluster, JedisPool, JedisSentinelPool}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
  * Created by haorenhui on 2020/02/10.
  */
class RedisSource  extends SourceStrategy  with Logging  with Serializable {

    var redisConfig:RedisSourceConfig=_

    //json配置
    //枚举 string,list,set,hash,zset
    private var value_type: String = _
    //枚举 standard(标准json模式),value2key,mix,uniqueValue
    private var write_mode: String = _
    //value2key模式时的 value分隔符
    private var value_delimiter: String = _
    private var columnInfoMetaList: List[ExtractFieldInfo] = Nil
    private var schema: org.apache.spark.sql.types.StructType = _
    //元数据
    private var redisNodes: String = _
    private var redisPassword: String = _
    private var masterName: String = _
    private var connectType: String = _

    private var session: SparkSession = _
    private var conf: SparkConf = _
    private var abnormalAccumulator: LongAccumulator = _
    private var ignore_abnormal: Boolean = false

    //redisProperties
    private var redisProperties: Map[String, String] = Map[String, String]()
    private var partitionNum:Int=3
    private var hashReadCount:Int=10000

    /** 数据抽取 */
    override def source(kc: KhaosContext,
                        module_id: String,
                        config: String,
                        dependence: Dependency): DataFrame = {
        init(kc, config)
        val data: DataFrame = doRead(kc)
        data
    }

    def init(kc: KhaosContext, config: String): Unit = {
        implicit val formats:DefaultFormats = DefaultFormats
        redisConfig = parse(config, true:Boolean).extract[RedisSourceConfig]
        session = kc.sparkSession
        abnormalAccumulator = kc._valuesChannel.getValues[LongAccumulator](KhaosConstants.KHAOS_ABNORMALACCUMULATOR)
        columnInfoMetaList = redisConfig.extract_fields
        val valueMode: RedisValueMode = redisConfig.value_mode

        value_type = valueMode.`type`.get
        write_mode = valueMode.write_mode.get
        value_delimiter = getTransDelimiter(valueMode.value_delimiter.getOrElse(""))

        val columnArr = new ArrayBuffer[(String, String)]()
        for (i <- columnInfoMetaList.indices) {
            columnArr += (columnInfoMetaList(i).field -> columnInfoMetaList(i).data_type)
        }
        schema = SparkJobHelper.dynamicBuildDFSchema(columnArr)

        //匹配数据存储的形态(json/csv/mix)
        write_mode = write_mode match {
            case RedisConstants.RedisValueEnum.standard => RedisConstants.RedisValueEnum.standard
            case RedisConstants.RedisValueEnum.value2key => RedisConstants.RedisValueEnum.value2key
            case RedisConstants.RedisValueEnum.uniqueValue => RedisConstants.RedisValueEnum.uniqueValue
            case _ => RedisConstants.RedisValueEnum.mix
        }

        ignore_abnormal = redisConfig.advanced_options.ignore_abnormal.getOrElse(IgnoreAbnormal(on_off = Option(false))).on_off.getOrElse(false)

        initProperties(kc)
        initMeta(kc)
        initRedisConnConf()
    }

    def initMeta(kc: KhaosContext): Unit = {
        val PROJECT_ID: Int = kc.conf.getString(SchedulerConstants.PROJECT_ID).toInt
        var metaParamsMap: Map[String, Any] = redisConfig.extender.meta.params.values
        metaParamsMap=metaParamsMap.updated("project_id",PROJECT_ID)
        import org.json4s.DefaultFormats
        import org.json4s.native.Json
        val metaJson: String = Json(DefaultFormats).write(metaParamsMap)

        //获取元数据
        val entity: MetaDataEntity = MetaUtils.getRedisMeta(kc,
            redisConfig.db_name.getOrElse(""),
            redisConfig.table_name,
            redisConfig.extender.meta.clazz,
            metaJson,
            this)
        val redisConnect: RedisConnect = entity.getDsRedisConnect
        connectType = redisConnect.getConnectType

        //从连接信息中获取集群地址,密码等信息
        connectType match {
            case RedisConstants.RedisConnectEnum.cluster => redisNodes = redisConnect.getClusterNodes
            case RedisConstants.RedisConnectEnum.sentinel => redisNodes = redisConnect.getSentinelNodes; masterName = redisConnect.getMasterName
            case RedisConstants.RedisConnectEnum.masterSlave => redisNodes = redisConnect.getMasterNode
            case _ => throw new Exception(s"不支持的redis类型 $connectType")
        }
        if (redisConnect.getPassword != null)
            redisPassword = redisConnect.getPassword

    }

    //初始化redis配置文件
    def initProperties(kc: KhaosContext): Unit ={
        try {
            redisProperties = kc.conf.getAllWithPrefix("module.redis.source.").toMap
            log.info("redisSource redisProperties")
            redisProperties.foreach {case(k,v) =>log.info(k + "   " + v)}
            partitionNum = redisProperties.getOrElse(RedisConstants.SOURCE_PARTITION_NUMS,RedisConstants.DEFAULT_SOURCE_PARTITION_NUMS).toInt
            hashReadCount = redisProperties.getOrElse(RedisConstants.SOURCE_PER_HASHREAD_NUMS,RedisConstants.DEFAULT_SOURCE_PER_HASHREAD_NUMS).toInt
        } catch {
            case  e:Exception=>
                log.error("未读取到redis配置! 改用默认配置")
        }
    }

    def doRead(kc:KhaosContext): DataFrame ={
        var data:DataFrame=session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        try {
            // 根据json中的table_name模糊匹配key及其type
            var readKeys: Array[String] = Array[String]()
            log.info("redisSource 开始模糊匹配key ")
            connectType match {
                case RedisConstants.RedisConnectEnum.cluster =>
                    val conn: JedisCluster = RedisUtils.getClusterPool(redisNodes, redisPassword, redisProperties,this)
                    readKeys = RedisUtils.getTypeKey(conn,redisConfig.table_name,value_type,redisProperties)
                    RedisUtils.closeConn(conn)
                case RedisConstants.RedisConnectEnum.sentinel =>
                    val pool: JedisSentinelPool = RedisUtils.getSentinelPool(redisNodes, redisPassword,masterName, redisProperties,this)
                    val conn: Jedis = pool.getResource
                    readKeys = RedisUtils.getTypeKey(conn,redisConfig.table_name,value_type,redisProperties)
                    RedisUtils.closeConn(conn)
                    RedisUtils.closeConnPool(pool)
                case RedisConstants.RedisConnectEnum.masterSlave =>
                    val pool: JedisPool = RedisUtils.getSinglePool(redisNodes, redisPassword, redisProperties,this)
                    val conn: Jedis = pool.getResource
                    readKeys = RedisUtils.getTypeKey(conn,redisConfig.table_name,value_type,redisProperties)
                    RedisUtils.closeConn(conn)
                    RedisUtils.closeConnPool(pool)
                case _ => throw new Exception(s"不支持的redis类型 $connectType")
            }
            //value_mode单选,过滤出特定类型的key

            //遍历需要读取的key,每个key读取后生成一个DataFrame,所有df union生成最终数据
            val delete_keys: Boolean = redisConfig.advanced_options.delete_keys.getOrElse(DeleteKeys(on_off = Option(false))).on_off.getOrElse(false)
            if(delete_keys){
                log.info(s"redisSource 开启删除选项 ")
                kc._valuesChannel.emit(RedisConstants.isClearSyncData,"true")
                val connInfoMap: mutable.HashMap[String, String] = collection.mutable.HashMap[String,String]()
                connInfoMap.put(RedisConstants.clearSyncDataInfoConnectType,connectType)
                connInfoMap.put(RedisConstants.clearSyncDataInfoRedisNodes,redisNodes)
                if(redisPassword != null)
                    connInfoMap.put(RedisConstants.clearSyncDataInfoRedisPassword,redisPassword)
                if(masterName != null)
                    connInfoMap.put(RedisConstants.clearSyncDataInfoMasterName,masterName)
                //跨算子传递消息
                kc._valuesChannel.emit(RedisConstants.clearSyncDataInfo,connInfoMap)
                kc._valuesChannel.emit(RedisConstants.clearSyncDataInfoKeys,readKeys)
            }
            log.info("redisSource 开始读取数据 ")
            log.info(s"redisSource partitionNum $partitionNum ")
            value_type match {
                case RedisConstants.RedisKeyEnum.string => data=readString(readKeys,partitionNum)
                case RedisConstants.RedisKeyEnum.list => data=readList(readKeys,partitionNum)
                case RedisConstants.RedisKeyEnum.set => data=readSet(readKeys,partitionNum)
                case RedisConstants.RedisKeyEnum.hash =>
                    write_mode match {
                        //字段名作为hash的小key(inner_key)的情况
                        case RedisConstants.RedisValueEnum.uniqueValue =>
                            data=readHash4UniqueKey(readKeys,partitionNum)
                        //标准模式,value2key模式,mix模式套用公共方法
                        case _ =>
                            data=readHash(readKeys,partitionNum)
                    }

                //case RedisValueEnum.zset => data=readZSet()
                case _ => throw new Exception(s"redis read fail, 不支持的value类型$value_type")
            }



        } catch {
            case e: Exception =>
                e.printStackTrace()
                log.error(s"redis read 读取失败,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}")
                throw new Exception(s"redis read 读取失败,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}",e)
        }
        if (!redisConfig.filter.isEmpty) {
            data = data.filter(redisConfig.filter)
        }
        data
    }

    /** 初始化spark-redis conf */
    def initRedisConnConf(): SparkConf ={
        conf=new SparkConf()
        val connectTimeout:Int = redisProperties.getOrElse(RedisConstants.SOURCE_CONNECTTIMEOUT,RedisConstants.DEFAULT_SOURCE_CONNECTTIMEOUT).toInt
        // 哨兵模式获取到哨兵地址后,在获取当前监听的master地址配到conf中
        if(connectType.equals(RedisConstants.RedisConnectEnum.sentinel)){
            val pool: JedisSentinelPool = RedisUtils.getSentinelPool(redisNodes,redisPassword,masterName, redisProperties,this)
            conf.set("spark.redis.host", pool.getCurrentHostMaster.getHost)    //host,随便一个节点，自动发现
            conf.set("spark.redis.port", pool.getCurrentHostMaster.getPort.toString)  //端口号，不填默认为6379
            conf.set("spark.redis.timeout", connectTimeout.toString)  //超时时间
            RedisUtils.closeConnPool(pool)
        }else if(connectType.equals(RedisConstants.RedisConnectEnum.cluster) || connectType.equals(RedisConstants.RedisConnectEnum.masterSlave)){
            val allHostAndPort = new util.HashSet[String]()
            redisNodes.split(",").foreach(hostAndPort => allHostAndPort.add(hostAndPort))
            val hpIter: util.Iterator[String] = allHostAndPort.iterator()
            if(hpIter.hasNext){
                val hp: String = hpIter.next()
                val hpArr: Array[String] = hp.split(":")
                conf.set("spark.redis.host", hpArr(0))    //host,随便一个节点，自动发现
                conf.set("spark.redis.port", hpArr(1))  //端口号，不填默认为6379
                conf.set("spark.redis.timeout", connectTimeout.toString)  //超时时间
            }
        }

        if(null != redisPassword && redisPassword.nonEmpty)
            conf.set("spark.redis.auth",redisPassword)  //用户密码设置

        conf
    }


    /** 读取string类型的key生成DataFrame */
    def readString(key:Array[String],partitionNum:Int):DataFrame={
        var data:DataFrame = session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        val keyAndValueRDD: RDD[(String, String)] = session.sparkContext.fromRedisKV(key,partitionNum)(new RedisConfig(new RedisEndpoint(conf)))
        val valueRDD: RDD[String] = keyAndValueRDD.map(_._2)
        data = convertStringRDDtoDF(valueRDD)
        data
    }

    /** 读取list类型的key生成DataFrame */
    def readList(key:Array[String],partitionNum:Int):DataFrame={
        var data:DataFrame=session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        val valueRDD: RDD[String] = session.sparkContext.fromRedisList(key,partitionNum)(new RedisConfig(new RedisEndpoint(conf)))
        data = convertListRDDtoDF(valueRDD)
        data
    }

    /** 读取set类型的key生成DataFrame */
    def readSet(key:Array[String],partitionNum:Int):DataFrame={
        var data:DataFrame=session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        val valueRDD: RDD[String] = session.sparkContext.fromRedisSet(key,partitionNum)(new RedisConfig(new RedisEndpoint(conf)))
        data = convertSetRDDtoDF(valueRDD)
        data
    }

    /** 读取hash类型的key生成DataFrame */
    def readHash(key:Array[String],partitionNum:Int):DataFrame={
        var data:DataFrame=session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        val valueRDD: RDD[(String, String)] = session.sparkContext.fromRedisHash(key,partitionNum)(new RedisConfig(new RedisEndpoint(conf)))
        data = convertHashRDDtoDF(valueRDD)
        data
    }

    /** 读取hash类型的key生成DataFrame */
    def readHash4UniqueKey(keys:Array[String],partitionNum:Int):DataFrame={
        var data:DataFrame=session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        val keysList:mutable.Buffer[mutable.Buffer[String]]=mutable.Buffer[mutable.Buffer[String]]()
        var arrBuffer:mutable.Buffer[String]=mutable.Buffer[String]()
        log.info(s"redisSource hash read count $hashReadCount")
        for(i <- 0 until keys.length){
            if(arrBuffer.length != hashReadCount){
                arrBuffer.append(keys(i))
            }else{
                keysList.append(arrBuffer)
                arrBuffer= mutable.Buffer[String]()
                arrBuffer.append(keys(i))
            }
        }
        if(arrBuffer.nonEmpty)
            keysList.append(arrBuffer)


        connectType match {
            case RedisConstants.RedisConnectEnum.cluster =>
                val cluster: JedisCluster = RedisUtils.getClusterPool(redisNodes,redisPassword, redisProperties,this)
                keysList.foreach(arr=>{
                    var dataArr: mutable.Buffer[String] = arr.map(key => {
                        val dataMap: util.Map[String, String] = cluster.hgetAll(key)
                        val dataJson: String = JSON.toJSONString(dataMap, SerializerFeature.WriteMapNullValue)
                        dataJson
                    })
                    dataArr = dataArr.filter(!_.equals("{}")) //过滤key过期可能读到的空数据
                    val rddData: RDD[String] = session.sparkContext.makeRDD[String](dataArr,partitionNum)
                    val rowRDD:RDD[Row] = standardRDD2Rows(schema,convertStandardRDD(rddData,ignore_abnormal),ignore_abnormal)
                    val tempData:DataFrame = session.createDataFrame(rowRDD,schema)
                    data=data.unionByName(tempData)
                })
                RedisUtils.closeConn(cluster)
            case RedisConstants.RedisConnectEnum.sentinel =>
                val pool: JedisSentinelPool = RedisUtils.getSentinelPool(redisNodes,redisPassword,masterName, redisProperties,this)
                keysList.foreach(arr=>{
                    val conn: Jedis = pool.getResource
                    var dataArr: mutable.Buffer[String] = arr.map(key => {
                        val dataMap: util.Map[String, String] = conn.hgetAll(key)
                        val dataJson: String = JSON.toJSONString(dataMap, SerializerFeature.WriteMapNullValue)
                        dataJson
                    })
                    dataArr = dataArr.filter(!_.equals("{}")) //过滤key过期可能读到的空数据
                    val rddData: RDD[String] = session.sparkContext.makeRDD[String](dataArr,partitionNum)
                    val rowRDD:RDD[Row] = standardRDD2Rows(schema,convertStandardRDD(rddData,ignore_abnormal),ignore_abnormal)
                    val tempData:DataFrame = session.createDataFrame(rowRDD,schema)
                    data=data.unionByName(tempData)
                    RedisUtils.closeConn(conn)
                })
                RedisUtils.closeConnPool(pool)
            case RedisConstants.RedisConnectEnum.masterSlave =>
                val pool: JedisPool = RedisUtils.getSinglePool(redisNodes,redisPassword, redisProperties,this)
                keysList.foreach(arr=>{
                    val conn: Jedis = pool.getResource
                    var dataArr: mutable.Buffer[String] = arr.map(key => {
                        val dataMap: util.Map[String, String] = conn.hgetAll(key)
                        val dataJson: String = JSON.toJSONString(dataMap, SerializerFeature.WriteMapNullValue)
                        dataJson
                    })
                    dataArr = dataArr.filter(!_.equals("{}")) //过滤key过期可能读到的空数据
                    val rddData: RDD[String] = session.sparkContext.makeRDD[String](dataArr,partitionNum)
                    val rowRDD:RDD[Row] = standardRDD2Rows(schema,convertStandardRDD(rddData,ignore_abnormal),ignore_abnormal)
                    val tempData:DataFrame = session.createDataFrame(rowRDD,schema)
                    data=data.unionByName(tempData)
                    RedisUtils.closeConn(conn)
                })
                RedisUtils.closeConnPool(pool)
        }


        data
    }

    /** 转换RDD[String]格式的RDD为DataFrame */
    def convertRDDtoDF(valueRDD: RDD[String]): DataFrame = {
        var data:DataFrame=session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        var rowRDD:RDD[Row] = session.sparkContext.emptyRDD[Row]
        val filterRDD:RDD[String] = valueRDD.filter(value=>if(value != null) true else false) //过滤key过期可能读到的空数据
        write_mode match {
                //标准模式(json)
            case RedisConstants.RedisValueEnum.standard =>
                rowRDD = standardRDD2Rows(schema,convertStandardRDD(filterRDD,ignore_abnormal),ignore_abnormal)
                //csv模式
            case RedisConstants.RedisValueEnum.value2key =>
                rowRDD = value2KeyRDD2Rows(schema,convertValue2keyRDD(filterRDD,value_delimiter,ignore_abnormal),value_delimiter,ignore_abnormal)
                //混合格式(一个key中的数据可能某几行为标准模式,某几行为csv模式)
            case RedisConstants.RedisValueEnum.mix =>
                rowRDD = value2KeyRDD2Rows(schema,convertMixRDD(filterRDD,value_delimiter),value_delimiter,ignore_abnormal)

        }
        data = session.createDataFrame(rowRDD,schema)
        data
    }

    def convertListRDDtoDF(valueRDD: RDD[String]): DataFrame ={
        convertRDDtoDF(valueRDD)
    }

    def convertStringRDDtoDF(valueRDD: RDD[String]): DataFrame ={
        convertRDDtoDF(valueRDD)
    }

    def convertSetRDDtoDF(valueRDD: RDD[String]): DataFrame ={
        convertRDDtoDF(valueRDD)
    }

    /** hash有个特殊的uniqueValue mode, 需单独处理 */
    def convertHashRDDtoDF(valueRDD: RDD[(String, String)]): DataFrame ={
        var data:DataFrame=session.createDataFrame(session.sparkContext.emptyRDD[Row],schema)
        write_mode match {
                //字段名作为hash的小key(inner_key)的情况
            case RedisConstants.RedisValueEnum.uniqueValue =>
                //取出字段名及值==> Map((a1:aa),(a2:111))
                val map: Map[String, String] = valueRDD.collect().toMap
                //转换map为RDD 方便套用standardRDD2Rows方法
                val value: RDD[Map[String, String]] = session.sparkContext.makeRDD[Map[String,String]](Seq(map))
                val rowRDD:RDD[Row] = standardRDD2Rows(schema,value,ignore_abnormal)
                data = session.createDataFrame(rowRDD,schema)
                //标准模式,value2key模式,mix模式套用公共方法
            case _ =>
                val value: RDD[String] = valueRDD.map(_._2)
                data = convertRDDtoDF(value)
        }
        data
    }

    /** 取出json中的值生成map */
    def convertStandardRDD(valueRDD: RDD[String],ignore_abnormal:Boolean=false): RDD[Map[String, String]] ={
        implicit val formats:DefaultFormats = DefaultFormats
        var rdd: RDD[Map[String, String]] = valueRDD.mapPartitions(iter=>{
            //每个task统计异常条数,最后加到全局累加器
            var abnormalCount:Long=0
            val returnMap: Iterator[Map[String, String]] = iter.map(lineData => {
                var colNameAndValue: Map[String, String] = Map[String, String]()
                try {
                    //lineData为json字符串
                    if(!JSON.isValid(lineData) || !JSON.isValidObject(lineData))
                        throw new Exception(s"数据JSON格式错误 data==>[$lineData]")
                    val value: JValue = parse(lineData, true: Boolean)
                    colNameAndValue = value.values.asInstanceOf[Map[String, String]]
                } catch {
                    case e: Exception =>
                        if (!ignore_abnormal) {
                            //不忽略异常数据 抛错
                            throw new Exception(s"redis read 解析json数据失败, line==>[$lineData]", e)
                        } else {
                            //忽略脏数据
                            if (abnormalCount < 10)
                                logError(s"redis read 解析json数据失败,忽略异常开启,异常数据==>[$lineData]")
                            abnormalCount += 1
                            abnormalAccumulator.add(1)
                            colNameAndValue = null
                        }
                }
                colNameAndValue
            })
            returnMap
        })
        if(ignore_abnormal)
            rdd = rdd.filter(_ != null)
        rdd
    }

    /** 取出每行切分后的值生成Arr */
    def convertValue2keyRDD(valueRDD: RDD[String],delimiter:String,ignore_abnormal:Boolean): RDD[Array[String]] ={
        var rdd: RDD[Array[String]] = session.sparkContext.emptyRDD[Array[String]]
        if(schema.size == 1 || delimiter == ""){
            rdd = valueRDD.map(lineData => {
                Array(lineData)
            })
        }else{
            rdd = valueRDD.map(lineData => {
                val lineArr: Array[String] = lineData.split(delimiter, -1)
                if (!ignore_abnormal  && lineArr.length < schema.size) {
                    logInfo(s"=>>> line=${lineArr.mkString}")
                    throw new Exception(s"数据映射失败！实际字段数=${lineArr.length} 期望字段数=${schema.size} 分隔符=$delimiter 数据行=$lineData")
                }
                lineArr
            })
        }

        rdd
    }

    /** 混合格式的值, 优先判断json格式, 转化失败后尝试csv切分, 若都失败, 则抛出异常 */
    def convertMixRDD(valueRDD: RDD[String],delimiter:String): RDD[Array[String]] ={
        implicit val formats:DefaultFormats = DefaultFormats
        val colNames: List[String] = columnInfoMetaList.map(_.field)
        val rdd: RDD[Array[String]] = valueRDD.map(lineData => {

            var strings: Array[String] = new Array[String](colNames.length)
            try{
                val value: JValue = parse(lineData, true:Boolean)
                val colNameAndValue: Map[String, String] = value.values.asInstanceOf[Map[String, String]]
                for (index <- colNames.indices) {
                    //若当前json数据中没有redisconfig中配置的对应字段,则填充NULL
                    strings(index)=colNameAndValue.getOrElse(colNames(index),null)
                }
            }catch {
                case e:JsonParseException=>
                    try{
                        if(schema.size == 1 || delimiter == ""){
                            strings = Array[String](lineData)
                        }else{
                            strings = lineData.split(delimiter, -1)
                        }

                    }catch{
                        case e:Exception =>
                            throw new Exception(s"redis read fail, 数据格式错误:$lineData",e)

                    }

                case e:Exception=>
                    throw new Exception(s"redis read fail, 数据格式错误:$lineData",e)

            }
            strings
        })
        rdd
    }

    /** map转row */
    def standardRDD2Rows(schema: StructType, rdd: RDD[Map[String, String]],ignore_abnormal:Boolean=false): RDD[Row] = {
        var rowRDD:RDD[Row]=rdd.mapPartitions(iter=>{
            //每个task统计异常条数,最后加到全局累加器
            var abnormalCount:Long=0
            val returnRows: Iterator[Row] = iter.map(map => {
                var row: Row = Row()
                try {
                    for (i <- schema.indices) {
                        val colType: DataType = schema(i).dataType
                        val colName: String = schema(i).name
                        //若map中没有对应的字段, 则填充NULL
                        //val realValue: String = map.getOrElse(colName,null)
                        val realValue: String = map.get(colName) match {
                            case Some(v) => String.valueOf(map(colName))
                            case None => null
                        }
                        row = RddConvertUtils.mergeRow(row, colType, realValue)
                    }
                } catch {
                    case e: Exception =>
                        if (ignore_abnormal) {
                            //忽略脏数据
                            if (abnormalCount < 10)
                                logError(s"redis read 解析json数据失败,忽略异常开启,异常数据==>[${map.mkString(" ")}]")
                            abnormalCount += 1
                            abnormalAccumulator.add(1)
                            row = null //map之后过滤
                        } else {
                            //不忽略异常数据 直接抛错
                            throw new Exception(s"=>>> 数据转换失败! line=${map.mkString(" ")}", e)
                        }

                }
                row
            })
            returnRows
        })
        if(ignore_abnormal)
            rowRDD = rowRDD.filter(_ != null) //过滤忽略异常数据产生的null
        rowRDD
    }

    /** arr转row*/
    def value2KeyRDD2Rows(schema: StructType, rdd: RDD[Array[String]],delimiter:String = " ",ignore_abnormal:Boolean=false): RDD[Row] = {
        var rowRDD:RDD[Row]=rdd.mapPartitions(iter=>{
            //每个task统计异常条数,最后加到全局累加器
            var abnormalCount:Long=0
            val returnRows: Iterator[Row] = iter.map(attributes => {
                var row: Row = Row()
                try {
                    for (i <- schema.indices) {
                        if (i >= attributes.length && !ignore_abnormal) {
                            //若根据分隔符切分数据后少于json中配置的字段数, 则后续字段填充NULL (弃用)
                            //row = Row.merge(row, Row(null))
                            logInfo(s"=>>> line=${attributes.mkString(delimiter)}")
                            throw new Exception(s"数据映射失败！实际字段数=${attributes.length} 期望字段数=${schema.size} 分隔符=$delimiter 数据行=${attributes.mkString(delimiter)}")
                        } else {
                            val colType: DataType = schema(i).dataType
                            row = RddConvertUtils.mergeRow(row, colType, attributes(i))
                        }
                    }
                } catch {
                    case e: Exception =>
                        if (ignore_abnormal) {
                            //忽略异常数据
                            if (abnormalCount < 10)
                                logError(s"数据解析失败,忽略异常开启,异常数据==>[${attributes.mkString(delimiter)}]")
                            abnormalCount += 1
                            abnormalAccumulator.add(1)
                            row = null //map之后过滤
                        } else {
                            throw new Exception(s"=>>> 数据转换失败! line=${attributes.mkString(delimiter)}", e)
                        }

                }
                row
            })
            returnRows
        })
        if(ignore_abnormal)
            rowRDD = rowRDD.filter(_ != null) //过滤忽略异常数据产生的null
        rowRDD
    }

    def getTransDelimiter(delimiter: String): String = {
        var trans_delimiter = ""
        for (i <- delimiter) {
            trans_delimiter += "\\" + i //转义
        }
        trans_delimiter
    }

    override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
        val fieldSchema:ArrayBuffer[KhaosStructField] = ArrayBuffer[KhaosStructField]()
        implicit val formats:DefaultFormats = DefaultFormats
        val info:RedisSourceConfig = parse(config, true).extract[RedisSourceConfig]
        val extrFields:List[ExtractFieldInfo] = info.extract_fields
        for (ef <- extrFields) {
            fieldSchema += KhaosStructField(ef.field, ef.data_type)
        }
        fieldSchema.toList
    }

}
