package com.kingsoft.dc.khaos.module.spark.source


import java.sql.{Date, Timestamp}
import java.{lang, util}

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.ds.HbaseConnect
import com.kingsoft.dc.khaos.extender.meta.model.table.DmTable
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, HbaseConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.HBaseRKOption
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ExtractFieldInfo, HBaseConfig}
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util.{FileUtils, HbaseTableInputFormatKerberos, MetaUtils, SparkJobHelper}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.SerializableWritable
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types._
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
  * Created by haorenhui on 2019/06/28.
  */
class HbaseSource extends SourceStrategy  with Logging  with Serializable {

    private var sparkSession: SparkSession = _
    private var filter: String = _
    private var hbaseConfig: HBaseConfig = _
    private var maxVerisons: Int = 1
    private var columnInfoMetaList: List[ExtractFieldInfo] = Nil
    private var rowkey_option: List[HBaseRKOption] = Nil
    private var rowkey_option_num: Int = 0

    //HBase连接配置
    @transient private var hbaseConf: Configuration = _
    private var broadcastedConf: Broadcast[SerializableWritable[Configuration]] = _
    private var HBASE_ZOOKEEPER_QUORUM: String = _
    private var HBASE_PRINCIPAL: String = _
    private var HBASE_KEYTABPATH: String = _
    private var HBASE_KRB5PATH: String = _
    private var USE_KERBEROS: Boolean = false
    private var ROWKEY_COL_IS_STORE: Boolean = true
    private var ROWKEY_DELIMETER: String = _

    private var hbaseProperties: Map[String, String] = Map[String, String]()

    /** 数据抽取 */
    override def source(kc: KhaosContext,
                        module_id: String,
                        config: String,
                        dependence: Dependency): DataFrame = {
        init(kc, config,module_id)
        val data: DataFrame = doRead()
        data
    }


    def initMeta(kc: KhaosContext,module_id:String): Unit ={
      implicit val formats: DefaultFormats = DefaultFormats
        //权限校验
        val checkResult: Boolean = MetaUtils.checkReadAuth(kc,
            hbaseConfig.db_name,
            hbaseConfig.table_name,
            hbaseConfig.extender.auth.clazz,
            compact(render(hbaseConfig.extender.auth.params)))

        if(!checkResult){
            log.error(s"hbase reader fail, 权限验证未通过")
            throw new Exception(s"hbase reader fail, 权限验证未通过")
        }

        //获取元数据
        val entity: MetaDataEntity = MetaUtils.getHBaseMeta(kc,
            hbaseConfig.db_name,
            hbaseConfig.table_name,
            hbaseConfig.extender.meta.clazz,
            compact(render(hbaseConfig.extender.meta.params)),
            this)
        val connect: HbaseConnect = entity.dsHBaseConnect
        val dmTable: DmTable = entity.tableEntiy

        //获取rowkey拼接规则
        dmTable.getParams.asScala.foreach(param=> {
            param.get("pKey") match {
                case "ROWKEY_COLS" =>
                    rowkey_option = parse(param.get("pValue").toString, true:Boolean).extract[List[HBaseRKOption]]
                    rowkey_option.foreach((rk: HBaseRKOption) => {
                        for (i <- columnInfoMetaList.indices) {
                            val col: ExtractFieldInfo = columnInfoMetaList(i)
                            if (rk.colName.equals(col.field)) {
                                rk.data_type = Option(col.data_type)
                            }
                        }
                    })

                case "ROWKEY_RANDOM_NUMBER_LENGTH" =>
                    if(param.get("pValue").toString.nonEmpty)
                        rowkey_option_num = param.get("pValue").toString.toInt

                case _ =>
            }
        })

        ROWKEY_COL_IS_STORE = true
        ROWKEY_DELIMETER = ""
        if(false){
            ROWKEY_COL_IS_STORE = false
            ROWKEY_DELIMETER = "\\"+":"
        }

        hbaseConf = HBaseConfiguration.create()
        //内置hbase 进行kerberos认证
        if(entity.getDatasourceEntiy.isDefaultDs()){
            hbaseConf.addResource(kc.conf.getString("env.HADOOP_CONF_DIR")+"/hbase-site.xml")

        }

        //外置hbase
        HBASE_ZOOKEEPER_QUORUM=connect.getZkAddresses
        val zookeeper_znode_parent: String = connect.getZookeeperZnodeParent
        hbaseConf.set(HConstants.ZOOKEEPER_QUORUM,HBASE_ZOOKEEPER_QUORUM)
        hbaseConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT,zookeeper_znode_parent)

        if(connect.getUseKrbs.toBoolean){
          USE_KERBEROS = true

          HBASE_PRINCIPAL=connect.getPrincipal
          val keytabFile: String = connect.getKeytabFile
          val krb5File: String = connect.getKrb5File

          //默认realm 如果HBASE_PRINCIPAL里包含realm,则替换
          var realm = "HADOOP.COM"
          val prinsArr: Array[String] = HBASE_PRINCIPAL.split("@")
          if(prinsArr.length>=2){
            realm = prinsArr(1)
          }
          hbaseConf.set("hbase.security.authentication","kerberos")
          hbaseConf.set("hadoop.security.authentication","kerberos")
          hbaseConf.set("hbase.master.kerberos.principal","hbase/_HOST@" + realm)
          hbaseConf.set("hbase.regionserver.kerberos.principal","hbase/_HOST@" + realm)

          /*
          //项目keytab krb5文件 暂时不用 底层未做权限控制
          HBASE_PRINCIPAL = kc.conf.getString("proxy.user")
          HBASE_KEYTABPATH = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + kc.conf.getString("proxy.keytab.location").split("/").last
          HBASE_KRB5PATH = kc.conf.getString("proxy.krb5.conf")
          */

          //拿到数据管理返回的信息, 把keytab,krb5文件内容写到hdfs上,生成文件, 后续使用
          val keytabPath: String = System.getenv("SPARK_YARN_STAGING_DIR")+"/hbase_source_"+module_id+".keytab"
          val krb5Path: String = System.getenv("SPARK_YARN_STAGING_DIR")+"/krb5_"+module_id+".conf"

          FileUtils.decoderBase64File(keytabFile,keytabPath,FileSystem.get(sparkSession.sparkContext.hadoopConfiguration))
          FileUtils.decoderBase64File(krb5File,krb5Path,FileSystem.get(sparkSession.sparkContext.hadoopConfiguration))
          HBASE_KEYTABPATH=keytabPath
          HBASE_KRB5PATH=krb5Path

          log.info("hbase 开启kerberos")
          log.info("==>user "+HBASE_PRINCIPAL)
          log.info("==>keytab "+HBASE_KEYTABPATH)
          log.info("==>krb5 "+HBASE_KRB5PATH)

          //设置自定义参数, 在自定义类里取出使用
          hbaseConf.set("hbase.tableformat.kerberos.principal",HBASE_PRINCIPAL)
          hbaseConf.set("hbase.tableformat.kerberos.keytabpath",HBASE_KEYTABPATH)
          hbaseConf.set("hbase.tableformat.kerberos.krb5path",HBASE_KRB5PATH)

          kc.sparkSession.sparkContext.addFile(HBASE_KRB5PATH)
          kc.sparkSession.sparkContext.addFile(HBASE_KEYTABPATH)
        }

        hbaseConf.set(HbaseTableInputFormatKerberos.INPUT_TABLE,hbaseConfig.db_name+":"+hbaseConfig.table_name)

        // 设置过滤条件 rowkey范围
        if(null != hbaseConfig.advanced_options && hbaseConfig.advanced_options.rowkey_range.nonEmpty){
            if(hbaseConfig.advanced_options.rowkey_range.get.on_off.get){
                val row_start: String = hbaseConfig.advanced_options.rowkey_range.get.row_start.get
                val row_end: String = hbaseConfig.advanced_options.rowkey_range.get.row_end.get
                if(null != row_start && !row_start.isEmpty)
                    hbaseConf.set(HbaseTableInputFormatKerberos.SCAN_ROW_START,hbaseConfig.advanced_options.rowkey_range.get.row_start.get)
                if(null != row_end && !row_end.isEmpty)
                    hbaseConf.set(HbaseTableInputFormatKerberos.SCAN_ROW_STOP,hbaseConfig.advanced_options.rowkey_range.get.row_end.get)
            }
        }
        // 设置过滤条件 数据版本
        if(null != hbaseConfig.advanced_options && hbaseConfig.advanced_options.data_version.nonEmpty){
            if(hbaseConfig.advanced_options.data_version.get.on_off.get){
                val max_versions: Int = hbaseConfig.advanced_options.data_version.get.max_versions.getOrElse(1)
                val timestamp: String = hbaseConfig.advanced_options.data_version.get.timestamp.getOrElse("")
                val min_timestamp: String = hbaseConfig.advanced_options.data_version.get.min_timestamp.getOrElse("")
                val max_timestamp: String = hbaseConfig.advanced_options.data_version.get.max_timestamp.getOrElse("")

                if(null != timestamp && !timestamp.isEmpty){
                    hbaseConf.set(HbaseTableInputFormatKerberos.SCAN_TIMESTAMP,timestamp)
                }else if(!"".equals(min_timestamp) && !"".equals(max_timestamp)){
                    hbaseConf.set(HbaseTableInputFormatKerberos.SCAN_TIMERANGE_START,min_timestamp)
                    hbaseConf.set(HbaseTableInputFormatKerberos.SCAN_TIMERANGE_END,max_timestamp)
                }else if(max_versions != 1){
                    hbaseConf.set(HbaseTableInputFormatKerberos.SCAN_MAXVERSIONS,max_versions.toString)
                    maxVerisons=max_versions.toInt
                }
            }
        }


        val col_list: List[String] = columnInfoMetaList.map(col=>col.field)
        hbaseConf.set(HbaseTableInputFormatKerberos.SCAN_COLUMNS,col_list.mkString(" "))

        broadcastedConf = sparkSession.sparkContext.broadcast(new SerializableWritable(hbaseConf))
    }

    /**
      * 初始化相关参数
      *
      * @return void
      */
    def init(kc: KhaosContext, config: String,module_id:String): Unit = {
        implicit val formats: DefaultFormats = DefaultFormats
        hbaseConfig = parse(config, true:Boolean).extract[HBaseConfig]

        columnInfoMetaList = hbaseConfig.extract_fields
        filter = hbaseConfig.filter
        sparkSession = kc.sparkSession

        initMeta(kc,module_id)
        initProperties(kc)
    }

    //初始化hbase配置文件
    def initProperties(kc: KhaosContext): Unit ={
        try {
            hbaseProperties = kc.conf.getAllWithUnPrefix("module.hbase.source.original.").toMap
            log.info("hbaseSource hbaseProperties")
            hbaseProperties.foreach {case(k,v) =>log.info(k + "   " + v)}
            hbaseProperties.foreach {case(k,v) => hbaseConf.set(k,v)}
        } catch {
            case  e:Exception=>
                log.error("未读取到hbase配置! 改用默认配置")
        }
    }



    /** 读取数据 */
    def doRead(): DataFrame = {
        var data: DataFrame = null
        try {
            var hbaseRDD: RDD[(ImmutableBytesWritable, Result)]= null
            if(USE_KERBEROS){
                hbaseRDD = sparkSession.sparkContext.newAPIHadoopRDD(getConf,
                    classOf[HbaseTableInputFormatKerberos],
                    classOf[ImmutableBytesWritable],
                    classOf[Result])
            }else{
                hbaseRDD = sparkSession.sparkContext.newAPIHadoopRDD(getConf,
                    classOf[TableInputFormat],
                    classOf[ImmutableBytesWritable],
                    classOf[Result])
            }

            var rdd: RDD[Row] = null
            //根据获取版本不同,API不同
            rdd = maxVerisons match {
                case 1 => mapRDD(hbaseRDD)
                case _   => flatMapRDD(hbaseRDD)
            }
            val columnArr = new ArrayBuffer[(String, String)]()
            //添加rowkey列
            columnArr.append(("ROW",ColumnType.STRING))
            for (i <- columnInfoMetaList.indices) {
                columnArr += (columnInfoMetaList(i).field -> columnInfoMetaList(i).data_type)
            }
            val schema: StructType = dynamicBuildDFSchema(columnArr)
            data = sparkSession.createDataFrame(rdd,schema)
            //数据版本大于1 则去重
            if(maxVerisons > 1)
                data=data.distinct()
        } catch {
            case e: Exception =>
                e.printStackTrace()
                log.error(s"hbase reader read fail,失败信息:" + e.getMessage + "失败原因:" + e.getCause)
                throw new Exception(s"hbase reader read fail,失败信息:" + e.getMessage + "失败原因:" + e.getCause,e)
        }

        data = convertData(data)

        //设置列族过滤条件
        if (!filter.isEmpty) {
            data=data.where(filter)
        }
        //传给下游时去掉rowkey列
        data=data.drop("ROW")
        data
    }

    /** 获取最新版本时调用 maxVerison=1*/
    def mapRDD(hbaseRDD:RDD[(ImmutableBytesWritable, Result)]): RDD[Row] ={
        var rks: Array[String] = null
        var rkOptionNameList: List[String] = null
        var rkAndTypeMap: Map[String, Option[String]] = null
        if(!ROWKEY_COL_IS_STORE){
            rkOptionNameList = rowkey_option.map(_.colName)
            rkAndTypeMap = rowkey_option.map(rko=>(rko.colName,rko.data_type)).toMap
        }
        hbaseRDD.map(tp => {
            val result: Result = tp._2
            var rowkey: String = Bytes.toString(result.getRow)
            var row = Row()

            if(!ROWKEY_COL_IS_STORE){
                if(rowkey_option_num != 0) {
                    rowkey = rowkey.substring(0, rowkey.length - rowkey_option_num - ROWKEY_DELIMETER.length)
                }
                rks = rowkey.split(ROWKEY_DELIMETER,-1)
            }


            row =  Row.merge(row, Row(rowkey))

            for(i <- columnInfoMetaList.indices){
                val column: ExtractFieldInfo = columnInfoMetaList(i)
                val field: String = column.field
                var value: Array[Byte] = null

                if(!ROWKEY_COL_IS_STORE && rkOptionNameList.contains(field)){
                    value = rkAndTypeMap(field).get match {
                        case ColumnType.STRING =>   Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                        case ColumnType.NUMBER =>   Bytes.toBytes(java.lang.Long.valueOf(rks(rkOptionNameList.indexOf(field))))
                        case ColumnType.DECIMAL =>  Bytes.toBytes(java.lang.Double.valueOf(rks(rkOptionNameList.indexOf(field))))
                        case ColumnType.TIME =>     Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                        case ColumnType.DATE =>     Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                        case ColumnType.DATETIME => Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                    }
                }else{
                    val nameArr: Array[String] = field.split(":", -1)
                    val family: Array[Byte] = Bytes.toBytes(nameArr(0).trim)
                    val qualifier: Array[Byte] = Bytes.toBytes(nameArr(1).trim)
                    value = result.getValue(family,qualifier)
                }

                row = mergeRow(row,column.data_type,value)
            }
            row
        })
    }

    /** 获取历史版本时调用 maxVerison>1 根据时间戳聚合多个版本 */
    def flatMapRDD(hbaseRDD:RDD[(ImmutableBytesWritable, Result)]): RDD[Row] ={
        var rks: Array[String] = null
        var rkOptionNameList: List[String] = null
        var rkAndTypeMap: Map[String, Option[String]] = null
        if(!ROWKEY_COL_IS_STORE){
            rkOptionNameList = rowkey_option.map(_.colName)
            rkAndTypeMap = rowkey_option.map(rko=>(rko.colName,rko.data_type)).toMap
        }
        hbaseRDD.flatMap((tp: (ImmutableBytesWritable, Result)) => {
            val result: Result = tp._2
            var rowkey: String = Bytes.toString(result.getRow)

            if(!ROWKEY_COL_IS_STORE){
                if(rowkey_option_num != 0) {
                    rowkey = rowkey.substring(0, rowkey.length - rowkey_option_num - ROWKEY_DELIMETER.length)
                }
                rks = rowkey.split(ROWKEY_DELIMETER,-1)
            }

            val array: ArrayBuffer[Row] = ArrayBuffer[Row]()


            import scala.collection.JavaConverters._
            //拿到的map是(列簇->(列->(时间戳,值)))
            val map: util.NavigableMap[Array[Byte], util.NavigableMap[Array[Byte], util.NavigableMap[lang.Long, Array[Byte]]]] = result.getMap
            val buffer:ArrayBuffer[(lang.Long, Map[String,Array[Byte]])] = ArrayBuffer[(lang.Long, Map[String,Array[Byte]])]()
            //遍历得到每个列对应的值
            map.asScala.foreach(familyMap => {
                val family: String = Bytes.toString(familyMap._1)
                familyMap._2.asScala.foreach(qualifierMap => {
                    val qualifier: String = Bytes.toString(qualifierMap._1)
                    qualifierMap._2.asScala.foreach(valueMap => {
                        val timestamp: lang.Long = valueMap._1
                        val value: Array[Byte] = valueMap._2
                        buffer.append((timestamp, Map(family+":"+qualifier->value)))
                    })
                })
            })
            //根据时间戳聚合
            val multiple_values: Map[lang.Long, ArrayBuffer[(lang.Long, Map[String,Array[Byte]])]] = buffer.groupBy(tuple => tuple._1)
            val values: Iterable[ArrayBuffer[(lang.Long, Map[String,Array[Byte]])]] = multiple_values.values

            //生成不同版本对应的字段 值
            val lineData: Iterable[mutable.HashMap[String, Array[Byte]]] = values.map(rowData => {
                val result: mutable.HashMap[String, Array[Byte]] = mutable.HashMap[String, Array[Byte]]()
                rowData.map(fieldAndValue => {
                    result += fieldAndValue._2.lastOption.get
                })
                result
            })
            //生成row
            lineData.foreach(fieldAndValue => {
                var row = Row()
                row = Row.merge(row, Row(rowkey))

                for(i <- columnInfoMetaList.indices) {

                    val column = columnInfoMetaList(i)
                    val field: String = column.field

                    val value: Array[Byte] = fieldAndValue.getOrElse(field,{
                        var resultValue: Array[Byte] = null
                        if(!ROWKEY_COL_IS_STORE && rkOptionNameList.contains(field)){
                            resultValue = rkAndTypeMap(field).get match {
                                case ColumnType.STRING =>   Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                                case ColumnType.NUMBER =>   Bytes.toBytes(java.lang.Long.valueOf(rks(rkOptionNameList.indexOf(field))))
                                case ColumnType.DECIMAL =>  Bytes.toBytes(java.lang.Double.valueOf(rks(rkOptionNameList.indexOf(field))))
                                case ColumnType.TIME =>     Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                                case ColumnType.DATE =>     Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                                case ColumnType.DATETIME => Bytes.toBytes(rks(rkOptionNameList.indexOf(field)))
                            }
                        }
                        resultValue
                    })

                    row = mergeRow(row,column.data_type,value)
                }

                array.append(row)
            })
            array
        })
    }

    /** 获得广播变量中的hbaseConf */
    def getConf: Configuration = {
        var conf: Configuration = null
        try {
            conf = broadcastedConf.value.value
        }
        catch {
            case e: Exception =>
                e.printStackTrace()
                log.error(s"hbase reader Unable to getConfig from broadcast,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}")
                throw new Exception(s"hbase reader Unable to getConfig from broadcast,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}",e)

        }
        conf
    }

    /**
      * 转换DF中的字段类型
      *
      * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
      */
    def convertData(data: DataFrame): DataFrame = {
        var convertData: DataFrame = data
        for (i <- columnInfoMetaList.indices) {
            val entity: ExtractFieldInfo = columnInfoMetaList(i)
            val colName: String = entity.field
            val colType: String = entity.data_type
            val length:  String = entity.length.get


            colType match {
                case ColumnType.DATE => convertData = convertData.withColumn(colName, data.col(colName).cast(DateType))
                case ColumnType.DATETIME => convertData = convertData.withColumn(colName, data.col(colName).cast(TimestampType))
                case ColumnType.DECIMAL =>
                    val lengthArr: Array[String] = length.split(",")
                    val realLen: Int = lengthArr(0).toInt
                    val decimals: Int = lengthArr(1).toInt
                    convertData = convertData.withColumn(colName, data.col(colName).cast(DecimalType(realLen,decimals)))
                case _ => convertData = convertData
            }
        }

        convertData
    }

    /** RDD数据转换Row*/
    def mergeRow(row: Row, data_type: String, value: Array[Byte]): Row = {
        var newRow: Row = row
        data_type match {
            case ColumnType.STRING =>
                if(null == value){
                    newRow =  Row.merge(newRow, Row(null))
                }else{
                    newRow =  Row.merge(newRow, Row(Bytes.toString(value)))
                }
            case ColumnType.NUMBER =>
                if(null == value){
                    newRow =  Row.merge(newRow, Row(null))
                }else{
                    try{
                        newRow = Row.merge(newRow, Row(Bytes.toLong(value)))
                    }catch {
                        case e:IllegalArgumentException =>
                            newRow = Row.merge(newRow, Row(Bytes.toInt(value).toLong))

                        case e:Exception =>
                            e.printStackTrace()
                            log.error(s"hbase 数据读取数值型数据类型转换失败失败信息: ${e.getMessage}, 失败原因: ${e.getCause}")
                            throw new Exception(s"hbase 数据读取数值型数据类型转换失败失败信息: ${e.getMessage}, 失败原因: ${e.getCause}",e)

                    }
                }
            case ColumnType.DECIMAL =>
                if(null == value){
                    newRow =  Row.merge(newRow, Row(null))
                }else{
                    newRow = Row.merge(newRow, Row(Bytes.toDouble(value)))
                }
            case ColumnType.TIME =>
                if(null == value){
                    newRow =  Row.merge(newRow, Row(null))
                }else{
                    newRow = Row.merge(newRow, Row(Bytes.toString(value)))
                }
            case ColumnType.DATE =>
                if(null == value){
                    newRow =  Row.merge(newRow, Row(null))
                }else{
                    newRow = Row.merge(newRow, Row(Date.valueOf(Bytes.toString(value))))
                }
            case ColumnType.DATETIME =>
                if(null == value){
                    newRow =  Row.merge(newRow, Row(null))
                }else{
                    newRow = Row.merge(newRow, Row(Timestamp.valueOf(Bytes.toString(value))))
                }
            case _ =>
                if(null == value){
                    newRow =  Row.merge(newRow, Row(null))
                }else{
                    newRow =  Row.merge(newRow, Row(Bytes.toString(value)))
                }
        }
        newRow
    }

    /**
      * 动态构建schema信息
      *
      * @return
      */
    def dynamicBuildDFSchema(columnArr: ArrayBuffer[(String, String)]): org.apache.spark.sql.types.StructType = {

        val fields: Array[StructField] = columnArr.map(
            col => {
                val colType: String = col._2.trim.toUpperCase
                colType match {
                    case ColumnType.STRING => StructField(col._1, StringType, nullable = true)
                    case ColumnType.NUMBER => StructField(col._1, LongType, nullable = true)
                    case ColumnType.DATE => StructField(col._1, DateType, nullable = true)
                    case ColumnType.DECIMAL => StructField(col._1, DoubleType, nullable = true)
                    case ColumnType.TIME => StructField(col._1, StringType, nullable = true)
                    case ColumnType.DATETIME => StructField(col._1, TimestampType, nullable = true)
                    case _ => StructField(col._1, NullType, nullable = true)
                }
            }).toArray
        val schema: StructType = org.apache.spark.sql.types.StructType(fields)
        schema
    }

    override def schema(dc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
        val fieldSchema:ArrayBuffer[KhaosStructField] = ArrayBuffer[KhaosStructField]()
        implicit val formats:DefaultFormats = DefaultFormats
        hbaseConfig = parse(config, true:Boolean).extract[HBaseConfig]
        for (ef <- columnInfoMetaList) {
            fieldSchema += KhaosStructField(ef.field, ef.data_type)
        }
        fieldSchema.toList
    }
}
