package com.kingsoft.dc.khaos.module.spark.source

import java.sql.{Date, Timestamp}

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, MetaDataConstants, MppSqlTypes, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ExtractFieldInfo, ExtractInfo, HAWQSourceConfig, MppSourceConfig}
import com.kingsoft.dc.khaos.module.spark.request.model.{JdbcConnectEntity, StructFieldEntity}
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.Logging
import com.sun.xml.bind.v2.TODO
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

class HAWQSource extends Logging with Serializable {
  private var _connectEntity: JdbcConnectEntity = null
  private var _hawq_Config: HAWQSourceConfig = null
  private var _tableFields: List[ExtractFieldInfo] = null
  private var _kc: KhaosContext = null

  // mpp数据库物理地址
  private var _host: String = null
  private var _port: String = null
  private var _username: String = null
  private var _password: String = null
  private var _instansename: String = null
  private var dbName = ""
  private var tblName = ""

  //cos 临时存储数据目录
  private var _fs: FileSystem = null
  private val HDFS_WORK_DIR = "/di/.working_output"
  private val currTime = System.currentTimeMillis()
  private var _module_id = ""
  private var _location: String = ""

  // 分表
  private var tblNameList: scala.List[String] = scala.List[String]()
  private var tableSplit: DmTableSplit = null
  // 目前暂时只支持orc格式
  private val FILE_FORMAT = "orc"

  // 文件类型格式
  object FormatEnum {
    val ORC_FORMAT = "orc"
    val CSV_FORMAT = "csv"
    val TEXT_FORMAT = "text"
    val PARQUET_FORMAT = "parquet"
  }

  // 使用ascii字符
  private var _dataSourceType: String = ""

  /** 数据抽取 */
  def source(kc: KhaosContext,
             module_id: String,
             config: String,
             dependence: Dependency,
             ds_config: Map[String, String]): DataFrame = {
    // 根据json解析出module
    implicit val formats = DefaultFormats
    val hawqConfig: HAWQSourceConfig = parse(config, true).extract[HAWQSourceConfig]
    this._hawq_Config = hawqConfig
    this._kc = kc
    this._module_id = module_id
    //库表名加引号
    dbName = s"""\"${_hawq_Config.db_name}\""""
    tblName = s"""\"${_hawq_Config.table_name}\""""

    // 初始化配置
    init(ds_config)
    var result: DataFrame = null
    log.info("KDW_for_HAWQ DataSource Synchronization!")
    try {
      //企业云2.1针对hawq的业务检核试运行取值约束规则，入参json中多个source
      //      val sourceInfo1 = parse(kc.conf.getString("1"), true).extract[ExtractInfo]
      //      var config: String = compact(render(sourceInfo1.strategy.config))
      //      if (sourceInfo1.id.equals("1") && sourceInfo1.name.equals("读取KDW_for_HAWQ数据") && sourceInfo1.clazz_type.equals("source")) {
      //        _tableFields = parse(config, true).extract[HAWQSourceConfig].extract_fields
      //      } else {
      //        _tableFields = _hawq_Config.extract_fields
      //      }
      val successFlag = read4HAWQ2HDFS()
      if (successFlag) {
        result = read4HDFS()
      } else {
        log.info("KDW_for_HAWQ 内部表导入HDFS外部表失败!")
      }
    } catch {
      case e: Exception =>
        e.printStackTrace()
        throw new Exception("缓存数据失败,失败信息:" + e.getMessage + "失败原因:" + e.getCause)
    } finally {
      val dropExtTblSql = s"DROP EXTERNAL TABLE IF EXISTS ${dbName}.${getMppTmpExtTableName()}"
      log.info("开始删除外部表!")
      val successFlag = MppUtils.executeSql(getConnectUrl(), _username, _password, dropExtTblSql)
      if (successFlag) {
        log.info("成功删除外部表!")
      }
    }
    result
  }

  /**
   * 初始化数据管理、jdbc和cos
   */
  def init(ds_config: Map[String, String]): Unit = {
    initMetaData(ds_config)
    initJdbcConnectInfo()
    _fs = FileSystem.get(_kc.sparkSession.sparkContext.hadoopConfiguration)
  }

  /**
   * 从数据管理获取mpp物理地址
   */
  def initMetaData(ds_config: Map[String, String]): Unit = {
    log.info("初始化物理地址...")
    //    val className = _hawq_Config.extender.meta.clazz
    //    val dbname = _hawq_Config.db_name
    //    val tblname = _hawq_Config.table_name
    //
    //    val mppConnect = MetaUtils.getMPPMeta(_kc,
    //      dbname,
    //      tblname,
    //      className,
    //      compact(render(_hawq_Config.extender.auth.params)), this).getDsMppConnect
    //
    //    _host = mppConnect.getHost
    //    _port = mppConnect.getPort
    //    _username = mppConnect.getUsername
    //    _password = mppConnect.getPassword
    //    _instansename = mppConnect.getInstanceName
    //    _dataSourceType = mppConnect.getSourceMode

    _host = ds_config("host")
    _port = ds_config("port")
    _username = ds_config("username")
    _password = ds_config("password")
    _instansename = ds_config("instansename")
  }

  /**
   * 初始化JDBC
   */
  def initJdbcConnectInfo(): Unit = {
    log.info("初始化JDBC")
    val url = getConnectUrl()
    val user = _username
    val password = _password

    this._connectEntity = new JdbcConnectEntity(url,
      user,
      password,
      dbName,
      tblName
    )
  }


  /**
   * 获取jdbc连接url
   *
   * @return
   */
  def getConnectUrl(): String = {
    val host = _host
    val port = _port
    val dbName = _instansename
    val url = s"jdbc:postgresql://${host}:${port}/${dbName}"
    log.info("url: " + url)
    url.trim
  }

  /**
   * 构建外部表名
   *
   * @return 表名
   */
  def getMppTmpExtTableName(): String = {
    val idArr = _kc.conf.getString("job.inst.id")
    s"""\"${_hawq_Config.table_name}_${idArr}_${currTime}\""""
  }

  /**
   * 根据内部表元数据字段类型转换为schema类型（String->StringType）
   *
   * @param columnNameEntity
   * @return (字段索引,(字段名,字段转换后类型)),例如(1,(id,StringType))
   */
  def fieldTypeToSchema(columnNameEntity: mutable.HashMap[String, StructFieldEntity]): List[(Int, (String, DataType))] = {
    var dfType: DataType = null
    val columnNameAndDataType = new collection.mutable.HashMap[Int, (String, DataType)]
    log.info("源表字段数: " + columnNameEntity.size)
    for (columFields <- columnNameEntity) {
      // 表中列的名字
      val columuName = columFields._1
      // 表中列的类型
      val columnType = columFields._2.getFieldType
      // 表中的列的索引
      val index = columFields._2.getFieldIndex
      columnType.toLowerCase match {
        case MppSqlTypes.CHAR | MppSqlTypes.VARCHAR | MppSqlTypes.LONGVARCHAR | MppSqlTypes.BPCHAR => {
          dfType = StringType
        }
        case MppSqlTypes.INT | MppSqlTypes.INT2 | MppSqlTypes.INT4 | MppSqlTypes.INT8 | MppSqlTypes.TINYINT | MppSqlTypes.SMALLINT | MppSqlTypes.INTEGER => {
          dfType = IntegerType
        }
        case MppSqlTypes.BOOLEAN => {
          dfType = BooleanType
        }
        case MppSqlTypes.FLOAT | MppSqlTypes.FLOAT8FLOAT | MppSqlTypes.FLOAT8 => {
          dfType = FloatType
        }
        case MppSqlTypes.DOUBLE => {
          dfType = DoubleType
        }
        case MppSqlTypes.DECIMAL | MppSqlTypes.NUMERIC => {
          //这里所有浮点型都转换为Double类型,原因是mpp对于外部表数据格式不支持decimal
          dfType = DoubleType
        }
        case MppSqlTypes.DATE | MppSqlTypes.DATETIME => {
          dfType = DateType
        }
        case MppSqlTypes.TIMESTAMP | MppSqlTypes.TIMESTAMPTZ => {
          dfType = TimestampType
        }
        case MppSqlTypes.TIME_WITH_TIMEZONE | MppSqlTypes.TIME_WITHOUT_TIMEZONE => {
          // DF不支持time类型字段,这里转成string类型,需要在sink端根据目标表转换回来
          dfType = StringType
        }
        case _ => {
          throw new Exception(s"can't convert type [$columnType] to [${dfType}]")
        }
      }
      // 根据索引进行排序确保构建的StructType与rowRDD顺序数量一致
      columnNameAndDataType += index -> (columuName -> dfType)
    }
    val res: List[(Int, (String, DataType))] = columnNameAndDataType.toList.sortBy(_._1)
    log.info("构建有序DataType成功!")
    res
  }

  /**
   * 根据内部表映射外部表到cos
   *
   * @param connectEntity   jdbc连接对象
   * @param dataformat      数据格式,目前只支持csv
   * @param delimiter       文件分割符
   * @param accessKeyId     连接cos所需的ak
   * @param serectAccessKey 连接cos所需的sk
   * @param location        cos文件系统临时存储目录,默认为/di/.working_output
   * @param ossType         对象存储类型,默认为 cos
   * @return 创建的sql语句(创建外部表,导入外部表数据,删除外部表)
   */
  def getHAWQExtTableSql(connectEntity: JdbcConnectEntity,
                         dataformat: String,
                         location: String,
                         filter: String): String = {

    // 获取分表状态
    tableSplit = TableSplitUtils.getTableSplit(_kc,
      _hawq_Config.db_name,
      _hawq_Config.table_name,
      _hawq_Config.extender.meta.clazz,
      compact(render(_hawq_Config.extender.meta.params)))
    _tableFields = _hawq_Config.extract_fields
    val extTblFieldDefinedStr: String = getHAWQSourceExtTableFieldDefined(_tableFields)
    if (tableSplit == null) { // 未开启分表

      var copySql = ""
      //      val createTableDDL = s"CREATE EXTERNAL TABLE ${dbName}.${getMppTmpExtTableName()} (LIKE ${dbName}.${getMppTableName()})LOCATION ('hdfs://${location}') FORMAT '${dataformat}'"
      val createTableDDL = s"CREATE EXTERNAL TABLE ${dbName}.${getMppTmpExtTableName()} (${extTblFieldDefinedStr}) LOCATION ('hdfs://${location}') FORMAT '${dataformat}'"
      if (filter != "" && filter != null) {
        copySql = s"INSERT INTO ${dbName}.${getMppTmpExtTableName} (SELECT * FROM ${dbName}.${tblName} where ${_hawq_Config.filter})"
      } else {
        copySql = s"INSERT INTO ${dbName}.${getMppTmpExtTableName} (SELECT * FROM ${dbName}.${tblName})"
      }
      val dropExtTblSql = s"DROP EXTERNAL TABLE IF EXISTS ${dbName}.${getMppTmpExtTableName}"
      createTableDDL + ";" + copySql + ";" + dropExtTblSql

    } else {

      // 开启分表,每张分表构造一组建表语句
      tableSplit.getStrategyType match {
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => { // 枚举分表
          val splitValues = tableSplit.getStrategyValue.split(",").toList
          //获取分表名称列表
          tblNameList = TableSplitUtils.getRealTable(_kc,
            _hawq_Config.db_name,
            _hawq_Config.table_name,
            _hawq_Config.extender.meta.clazz,
            compact(render(_hawq_Config.extender.meta.params)), this, tableSplit, "in", splitValues)
          //          tblNameList = tblNameList.map(tblname => tblname.toLowerCase())
          val totalSQL = generateSqlStatements(
            dataformat,
            location,
            tblNameList,
            filter)
          totalSQL
        }
        case TableSplitUtils.StrategyTypeEnum.DATETIME => { // 时间分表
          val jobBizDate: String = _kc.conf.getString(SchedulerConstants.BIZ_DATE)
          val splitTime = tableSplit.getStrategyValue match {
            case TableSplitUtils.StrategyValueEnum.year => jobBizDate.substring(0, 4)
            case TableSplitUtils.StrategyValueEnum.month => jobBizDate.substring(0, 6)
            case TableSplitUtils.StrategyValueEnum.day => jobBizDate
          }
          val splitValues: scala.List[String] = scala.List[String](splitTime)
          //获取分表名称列表
          tblNameList = TableSplitUtils.getRealTable(_kc,
            _hawq_Config.db_name,
            _hawq_Config.table_name,
            _hawq_Config.extender.meta.clazz,
            compact(render(_hawq_Config.extender.meta.params)), this, tableSplit, "=", splitValues)
          val totalSQL = generateSqlStatements(
            dataformat,
            location,
            tblNameList,
            filter)
          totalSQL
        }
        // 业务分表
        case TableSplitUtils.StrategyTypeEnum.BUSSINESS => {
          var suffixValue = ""
          var realTable = _hawq_Config.table_name

          if (_hawq_Config.sub_table != None) {
            if (_hawq_Config.sub_table.get.on_off == "true") {
              suffixValue = _hawq_Config.sub_table.get.suffix.trim
              if (suffixValue.contains("___")) {
                throw new IllegalArgumentException("业务分表后缀禁止包含三个连续下划线!")
              }
              if (suffixValue != "" && suffixValue != null) {
                realTable = realTable + "___" + suffixValue
              } else {
                throw new IllegalArgumentException("未正确填写业务分表后缀!")
              }
            } else {
              throw new IllegalArgumentException("未开启业务分表开关!")
            }
          }
          //获取分表真实名字
          tblNameList = List[String](realTable)
          val totalSQL = generateSqlStatements(
            dataformat,
            location,
            tblNameList,
            filter)
          totalSQL
        }
      }
    }
  }

  /**
   * 开启分表,生成sql语句
   *
   * @param tblList 真实表名列表
   * @param filter  过滤条件
   * @return 一系列sql语句
   */
  def generateSqlStatements(dataformat: String,
                            location: String,
                            tblList: scala.List[String],
                            filter: String): String = {
    val tblArr = new ArrayBuffer[String]
    var copySql = ""
    val extTblFieldDefinedStr: String = getHAWQSourceExtTableFieldDefined(_hawq_Config.extract_fields)
    for (tblName <- tblList) {
      // 防止出现外部表名重复,进行拼接.拼接规则:内部表名_任务实例id_时间戳
      val tblNameEXT = s"""\"${tblName}_${_kc.conf.getString("job.inst.id")}_${currTime}\""""
      val createTableDDL = s"CREATE WRITABLE EXTERNAL TABLE ${dbName}.${tblNameEXT} (${extTblFieldDefinedStr}) LOCATION ('hdfs://${location + "/" + tblName}"
      var tbName = s"""\"${tblName}\""""
      if (filter != "" && filter != null) {
        copySql = s"INSERT INTO ${dbName}.${tblNameEXT} (SELECT * FROM ${dbName}.${tbName} where ${_hawq_Config.filter})"
      } else {
        copySql = s"INSERT INTO ${dbName}.${tblNameEXT} (SELECT * FROM ${dbName}.${tbName})"
      }
      val dropExtTblSql = s"DROP EXTERNAL TABLE IF EXISTS ${dbName}.${tblNameEXT}"
      tblArr += createTableDDL + ";" + copySql + ";" + dropExtTblSql
    }
    // sql语句
    tblArr.mkString(";")
  }

  /**
   * 获取数据在cos的物理存储目录
   *
   * @param workDir 默认存储路径
   * @return cos路径 cosn://1-111111/di/working/
   */
  def getLocation(workDir: String): String = {
    val jobID = _kc.conf.getString(SchedulerConstants.JOB_INSTANCE_ID)
    var location: String = ""
    val hdfsInfo = CosApiUtils.getHDFSInfoFromFile()
    var nameNodeAddress = ""
    //    var nameNodePort = ""
    val dev = _kc.conf.getString(SchedulerConstants.RUN_ENV)
    dev match {
      case "test" => {
        nameNodeAddress = hdfsInfo.getProperty("hdfs.namenode.address.offline")
        //        nameNodePort = hdfsInfo.getProperty("hdfs.namenode.port.offline")
      }
      case _ => {
        nameNodeAddress = hdfsInfo.getProperty("hdfs.namenode.address.online")
        //        nameNodePort = hdfsInfo.getProperty("hdfs.namenode.port.online")
      }
    }
    val cluster_namespace = _kc.conf.getString("cluster.namespace")
    //    val systemPath = System.getenv("SPARK_YARN_STAGING_DIR")
    val systemPath = "/tmp"

    val ip = nameNodeAddress //企业云2.0直接使用域名
    if (workDir.startsWith("/")) {
      //      location = s"${bucket}.${endpoint}${workDir}/${jobID}/${_module_id}/${currTime}"
      location = s"${ip}${systemPath.replace(cluster_namespace, "")}${workDir}/${jobID}/${_module_id}/${currTime}/"
    } else {
      location = s"${ip}/${systemPath.replace(cluster_namespace, "")}/${workDir}/${jobID}/${_module_id}/${currTime}/"
    }
    log.info(s"LOCATION: ${location}")
    location
  }

  /**
   * 将数据导入外部表
   *
   * @return 是否成功
   */
  def read4HAWQ2HDFS() = {
    var flag = false
    val url = getConnectUrl()
    val user = _username
    val password = _password

    _location = getLocation(HDFS_WORK_DIR)
    val totalSQL = getHAWQExtTableSql(
      _connectEntity,
      FILE_FORMAT,
      _location,
      _hawq_Config.filter)

    // 提交所有sql语句
    log.info("totalSQL ==> " + totalSQL)
    val res = MppUtils.executeSql(url, user, password, totalSQL)
    if (res == true) {
      log.info("数据映射到外部表成功!")
      flag = true
    }
    flag
  }

  /**
   * 从kdw_for_hawq外部表中读取数据 kdw_for_hawq->hdfs->df
   *
   * @return 输出到下游的DataFrame
   */
  def read4HDFS(): DataFrame = {
    log.info("开始读取KDW_for_HAWQ中的文件...")
    var hdfsReadPath = ""
    //    if (HDFS_WORK_DIR.endsWith("/")) {
    //      hdfsReadPath = HDFS_WORK_DIR + _kc.conf.getString("job.inst.id") + "/" + _module_id + "/" + currTime + "/*"
    //    } else {
    //      hdfsReadPath = HDFS_WORK_DIR + "/" + _kc.conf.getString("job.inst.id") + "/" + _module_id + "/" + currTime + "/*"
    //    }


    val cluster_namespace = _kc.conf.getString("cluster.namespace")
    val systemPath = System.getenv("SPARK_YARN_STAGING_DIR")
    val jobID = _kc.conf.getString(SchedulerConstants.JOB_INSTANCE_ID)

    // 定义文件格式
    val type_format = FILE_FORMAT
    var resDataFrame: DataFrame = null
    hdfsReadPath = s"/tmp${HDFS_WORK_DIR}/${jobID}/${_module_id}/${currTime}/"
    log.info("hdfsReadPath: " + hdfsReadPath)
    type_format match {
      case FormatEnum.ORC_FORMAT => {
        //        println("hdfs exists: " + _fs.exists(new Path(hdfsReadPath)))
        if (_fs.exists(new Path(s"$hdfsReadPath"))) {
          resDataFrame = _kc.sparkSession.read.orc(cluster_namespace + hdfsReadPath)
          log.info("读取kdw_for_hawq数据成功!")
        } else {
          // 构造一个数据为空的DataFrame,其元数据存在
          var tableSchema: mutable.HashMap[String, StructFieldEntity] = null
          // 获取真实表的元数据信息
          if (tableSplit != null) {
            tableSchema = _connectEntity.getMppTrueTableSchema(tblNameList.head, 6000)
          } else {
            // 获取源表schema
            tableSchema = _connectEntity.getMppTableSchema(6000)
          }
          // 表的名字和转换后的dataType类型:Map{(1,("id",IntegerType))}
          val tblNameAndDataType: List[(Int, (String, DataType))] = fieldTypeToSchema(tableSchema)
          tblNameAndDataType.foreach(i => logInfo(s"$i"))
          // 构建structType
          val schemaArr = new ArrayBuffer[StructField]()
          for (elem <- tblNameAndDataType) {
            schemaArr.append(StructField(elem._2._1, elem._2._2, true))
          }
          // 1.构造一个空的RDD
          val emptyRDD = _kc.sparkSession.emptyDataFrame.rdd
          // 2.构造元数据
          val schema = StructType(schemaArr)
          // 3.生成空DataFrame
          resDataFrame = _kc.sparkSession.createDataFrame(emptyRDD, schema)
          log.warn("源表数据为空!!!")
        }
      }
      case FormatEnum.CSV_FORMAT
      => {
        val cosRDD: RDD[String] = _kc.sparkSession.sparkContext.textFile(hdfsReadPath)

        if (cosRDD.isEmpty()) {
          throw new Exception(s"没有读取到COS源文件数据......")
        }
        var tableSchema: mutable.HashMap[String, StructFieldEntity] = null

        // 获取真实表的元数据信息
        if (tableSplit != null) {
          tableSchema = _connectEntity.getMppTrueTableSchema(tblNameList.head, 6000)
        } else {
          // 获取源表schema
          tableSchema = _connectEntity.getMppTableSchema(6000)
        }

        // 表的名字和转换后的dataType类型:Map{(1,("id",IntegerType))}
        val tblNameAndDataType: List[(Int, (String, DataType))] = fieldTypeToSchema(tableSchema)

        // 构建structType
        val schemaArr = new ArrayBuffer[StructField]()
        for (elem <- tblNameAndDataType) {
          schemaArr.append(StructField(elem._2._1, elem._2._2, true))
        }
        // 根据表字段和表字段类型生成structype
        //Array(StructField("id", IntegerType, true), StructField("name", StringType, true), StructField("phone", DoubleType, true))
        val schema = StructType(schemaArr.toArray)


        // 从配置中获取到的分隔符
        val rowRDD = cosRDD.filter(!_.trim.equals(""))
          .map(_.split("\\" + ",", -1)).map(attributes => {
          var row: Row = Row()
          for (i <- 0 until schema.fields.length) {
            //元数据schema和文件对应schema顺序及数量一致
            val colType = schema.fields.toList(i).dataType
            colType match {
              case StringType => {
                if (MetaDataConstants.NULL == attributes(i)) {
                  row = Row.merge(row, Row(null))
                } else {
                  row = Row.merge(row, Row(attributes(i)))
                }
              }
              case IntegerType => {
                if (MetaDataConstants.NULL == attributes(i)) {
                  //数据中包含NULL值
                  row = Row.merge(row, Row(null))
                } else {
                  row = Row.merge(row, Row(attributes(i).toInt))
                }
              }
              case DateType => {
                if (MetaDataConstants.NULL == attributes(i)) {
                  row = Row.merge(row, Row(null))
                } else {
                  //  row = Row.merge(row, Row(attributes(i)))
                  row = Row.merge(row, Row(Date.valueOf(attributes(i))))
                }
              }
              case TimestampType => {
                if (MetaDataConstants.NULL == attributes(i)) {
                  row = Row.merge(row, Row(null))
                } else {
                  if (attributes(i).contains("\\+")) {
                    val colArr = attributes(i).split("\\+", -1)
                    if (colArr.length > 1) {
                      row = Row.merge(row, Row(Timestamp.valueOf(colArr(0))))
                    } else {
                      row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
                    }
                  } else {
                    row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
                  }
                }
              }
              case FloatType => {
                if (MetaDataConstants.NULL == attributes(i)) {
                  row = Row.merge(row, Row(null))
                } else {
                  row = Row.merge(row, Row(attributes(i).toFloat))
                }
              }
              case DoubleType => {
                if (MetaDataConstants.NULL == attributes(i)) {
                  row = Row.merge(row, Row(null))
                } else {
                  row = Row.merge(row, Row(attributes(i).
                    toDouble))
                }
              }
              case NullType => row = {
                Row.merge(row, Row(attributes(i)))
              }
              case _ => {
                row = Row.merge(row, Row(attributes(i)))
              }
            }
          }
          row
        })
        // Apply the schema to the RDD
        resDataFrame = _kc.sparkSession.createDataFrame(rowRDD, schema)
        log.info("创建DataFrame成功!")
      }
      case _ => {
        throw new IllegalArgumentException(s"暂不支持读取 ${type_format} 类型文件格式!")
      }
    }
    resDataFrame
  }

  /** 获取上游的Schema */
  def schema(dc: KhaosContext,
             config: String,
             dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val mppInfo = parse(config, true).extract[MppSourceConfig]
    val extrFields = mppInfo.extract_fields
    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }

  /**
   * 根据config的抽取字段生成mpp外部表字段定义
   *
   * @param extract_fields extract_fields 字段列表
   * @return 建表字段定义,比如a int, b varchar, c double precision
   */
  def getHAWQSourceExtTableFieldDefined(extract_fields: List[ExtractFieldInfo]): String = {
    val fieldList = new Array[String](extract_fields.length)
    var varcharLength = 50
    var p = 8
    var s = 4
    for (index <- extract_fields.indices) {
      val fieldType = extract_fields(index).data_type
      //decimal的特殊处理，在kdw_for_hawq中不支持decimal直接转double precision，spark的程序会获取其类型，需要转成numeric(p,s)
      //varchar的特殊处理，企业云2.2因spark升级后导致spark无法读取varchar为（-1）的字段长度
      if (fieldType.equalsIgnoreCase(ColumnType.DECIMAL)) {
        val length = extract_fields(index).length.get
        val strings = length.split(",")
        p = strings(0).trim.toInt
        s = strings(1).trim.toInt
      } else if (fieldType.equalsIgnoreCase(ColumnType.STRING) || fieldType.equalsIgnoreCase(ColumnType.TIME)) {
        varcharLength = extract_fields(index).length.getOrElse("50").toInt
      }
      var targetType = "varchar"
      fieldType match {
        case ColumnType.STRING => targetType = s"varchar(${varcharLength})"
        case ColumnType.NUMBER => targetType = "bigint"
        case ColumnType.DECIMAL => targetType = s"numeric(${p},${s})"
        case ColumnType.TIME => targetType = s"varchar(${varcharLength})"
        case ColumnType.DATETIME => targetType = "timestamp"
        case ColumnType.DATE => targetType = "date"
        case "SERIAL4" => targetType = "bigint"
        case "SERIAL8" => targetType = "bigint"
        case _ => targetType = "varchar"
      }
      fieldList(index) = s"""\"${extract_fields(index).field}\" ${targetType}"""
    }
    fieldList.mkString(",")
  }
}
