package com.kingsoft.dc.khaos.module.spark.sink

import java.util

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{DataTypeEnum, MetaDataConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.CosSinkConfig
import com.kingsoft.dc.khaos.module.spark.model.center.metric.{CosProcessDataMetric, SyncProcessDataMetric}
import com.kingsoft.dc.khaos.module.spark.model.cos.CosDataStatusInfo
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.ExchangeFileUtils
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.control.ControlFile
import com.kingsoft.dc.khaos.module.spark.model.exchangefile.ddl.StructFileDDL
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo, StructFieldEntity}
import com.kingsoft.dc.khaos.module.spark.source.CosSource
import com.kingsoft.dc.khaos.module.spark.util._
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.functions.{col, concat_ws, lit}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random

/**
  *
  * create by goosoog 2019/6/20.
  *
  */
class CosSink extends SinkStrategy {

  final val SEPARATOR = MetaDataConstants.DATA_SEPARATOR
  private var sparkSession: SparkSession = null
  private var submitTime: String = null
  private var cosFs: FileSystem = null
  private var meta: MetaDataEntity = null
  private var columnEntiy: util.List[DmTableColumn] = null
  private var isAddHeader: Boolean = false
  private var writeOption: String = ""
  private var cosDataStatusInfo: CosDataStatusInfo = null
  private var custFileName: String = ""
  private var accu: LongAccumulator = _
  private var oldFileSize: Long = 0
  private var characterSet = ""

  //cos 文件对象内容字段信息
  val NA_FILL_NUMERIC = 0
  val NA_FILL_STRING = MetaDataConstants.NULL
  //job运行时设置的临时目录
  val JOB_TMP_DIR = "/di/tmp/job_"
  //数据缓存工作目录
  val WORKING_DIR = "/.working/di"

  def init(kc: KhaosContext, cosSinkConf: CosSinkConfig): Unit = {
    //鉴权
    val checkResult = MetaUtils.checkWriteAuth(kc,
      cosSinkConf.db_name,
      cosSinkConf.table_name,
      cosSinkConf.extender.auth.clazz,
      compact(render(cosSinkConf.extender.auth.params)))

    //获取元数据
    meta = MetaUtils.getCosMeta(kc,
      cosSinkConf.db_name,
      cosSinkConf.table_name,
      cosSinkConf.extender.meta.clazz,
      compact(render(cosSinkConf.extender.meta.params)),
      this)
    //    columnEntiy = meta.columnEntiy
    columnEntiy = meta.getColumnEntiy
    characterSet = MetaUtils.getTableCharacterSet(meta.getTableEntiy) //获取文件字符集
    this.sparkSession = kc.sparkSession
    initCosFileSystem
    this.submitTime = kc.conf.getString(SchedulerConstants.SUBMIT_TIME, "")
  }

  /**
    * 初始化Cos文件系统
    */
  def initCosFileSystem(): Unit = {
    cosFs = FileSystem.get(addCosFileSystem())
  }

  /**
    * 添加cos文件系统配置至spark容器
    */
  def addCosFileSystem(): org.apache.hadoop.conf.Configuration = {
    //    val hadoopConf = HadoopCosUtils.appendCosHadoopConfigs(sparkSession.sparkContext.hadoopConfiguration, cosConfig)
    val hadoopConfiguration = sparkSession.sparkContext.hadoopConfiguration
    val cosConfig = meta.getCosAccessConfig()
    val hadoopConf = HadoopCosUtils.appendCosHadoopConfigs(hadoopConfiguration, cosConfig)
    hadoopConf.setBoolean("dfs.support.append", true) //增加文件追加功能
    sparkSession.sparkContext.hadoopConfiguration.addResource(hadoopConf)
    sparkSession.sparkContext.hadoopConfiguration
  }

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {

    //解析json
    implicit val formats = DefaultFormats
    val cosSinkConf = parse(compact(render(config)), true).extract[CosSinkConfig]
    init(kc, cosSinkConf)

    isAddHeader = if ("true".equals(cosSinkConf.add_head)) true else false
    writeOption = cosSinkConf.write_option
    val jobId = kc.conf.getString(SchedulerConstants.JOB_INSTANCE_ID)
    val objectKeySuffix = MetaUtils.getCosDynamicsPath(kc)

    val workDir = s"${WORKING_DIR}/${jobId}"
    val outputDir = meta.getCosOutputPath
    custFileName = cosSinkConf.file_name.getOrElse("") //930新增

    //处理映射关系 默认值等
    val tempdf = DataframeUtils.setDefaultValue(cosSinkConf.extract_fields, meta.getColumnEntiy, dataFrame)
    var df = DataframeUtils.convertDataType(cosSinkConf.extract_fields, tempdf)

    //df字段重新排序 setDefaultValue可能会打乱字段顺序
    //    val cols: List[String] = cosSinkConf.extract_fields.map(_.field)
    //    df = df.select(cols.head, cols.tail: _*)
    df = DataframeUtils.sortDataCol(df, meta.getColumnEntiy)

    logInfo("开始缓存数据至临时目录...")
    //    logInfo(s"数据内容 limit 5 : ${dataFrame.show(5)}")
    val cacheSuccess = cacheData(df, objectKeySuffix, workDir)
    if (cacheSuccess) {
      logInfo("缓存数据移动至最终存储目录...")
      moveDataToOutput(workDir, outputDir)
      deleteCacheData(workDir)

      //上报数据状态
      DataframeUtils.reportDataStatusCos(kc,
        cosDataStatusInfo,
        cosSinkConf.db_name, cosSinkConf.table_name,
        cosSinkConf.extender.meta.clazz,
        compact(render(cosSinkConf.extender.meta.params)))

      //上报运维中心指标
      try{
        val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
        if(writeOption.equals(MetaDataConstants.APPEND)){
          //文件追加模式,会有两次action,触发计数器计算两遍
          metric.setProcessDataLValue(accu.value.toLong / 2)
        }else{
          metric.setProcessDataLValue(accu.value.toLong)
        }

        CenterMetricUtils.reportSyncProcessData(metric, kc)
        val cosProcessDataMetric: CosProcessDataMetric = CenterMetricUtils.buildCosProcessDataMetric(kc)
        val nowFileSize: Long = cosDataStatusInfo.getFileSize.toLong
        log.info(s"nowFileSize is $nowFileSize")
        var reportProcessSize: Long = 0
        if(oldFileSize != 0){
          reportProcessSize = nowFileSize - oldFileSize
        }else{
          reportProcessSize = nowFileSize
        }
        cosProcessDataMetric.setProcessDataLValue(reportProcessSize)
        CenterMetricUtils.reportCosProcessData(cosProcessDataMetric, kc)
      }catch {
        case e: Exception =>
          e.printStackTrace()
          log.error("上报异常",e.getMessage)
      }

    }
    this
  }

  /**
    * 添加表头
    *
    * @param dataFrame
    * @param tempDF //加工后的dataframe
    * @param delimiter
    * @return
    */
  def addFileHeader(dataFrame: DataFrame, tempDF: DataFrame, delimiter: String): DataFrame = {
    val schemaString = dataFrame.schema.fieldNames.mkString(delimiter)
    val rowArr: RDD[String] = tempDF.rdd.map(row => {
      row.mkString
    }).coalesce(1)
    val ss = dataFrame.sparkSession
    import ss.implicits._
    //表头
    val schemaRdd = ss.sparkContext.makeRDD(Array(schemaString)).toDF("value").coalesce(1)
    logInfo(s"=>>> 添加表头 ...${schemaRdd.collect().mkString(delimiter)}")
    (schemaRdd.union(tempDF)).coalesce(1)
  }

  def addFileHeaderByRDD(dataFrame: DataFrame, tempDF: DataFrame, delimiter: String) = {
    val schemaString = dataFrame.schema.fieldNames.mkString(delimiter)
    val rowArray: Array[Row] = tempDF.rdd.collect
    val rows: ArrayBuffer[Row] = new ArrayBuffer[Row]()
    rows.append(Row(schemaString))
    for (row <- rowArray) {
      rows.append(row)
    }

    val spark = dataFrame.sparkSession
    logInfo(s"=>>> 添加表头 ...${schemaString}")
    val rdd = spark.sparkContext.makeRDD(rows, 1)
    val structFields = Array(StructField("value", StringType, true))
    val structType = StructType(structFields)
    spark.createDataFrame(rdd, structType).coalesce(1)
  }

  /**
    * 添加表头
    *
    * @param dataFrame
    * @param tempDF //加工后的dataframe
    * @param delimiter
    * @return
    */
  def addFileHeaderByDf(dataFrame: DataFrame, tempDF: DataFrame, delimiter: String) = {
    val schemaString = dataFrame.schema.fieldNames.mkString(delimiter)

    var flag: Boolean = true
    val value: Dataset[Row] = tempDF.flatMap(line => {
      if (flag) {
        flag = false
        List(Row(schemaString), line)
      } else {
        List(line)
      }
    })(RowEncoder(tempDF.schema))
    value.coalesce(1)
  }

  /**
    * 覆盖文件
    *
    * @param tmpCacheDatFile
    * @param outputFile
    * @return
    */
  def doOverWrite(tmpCacheDatFile: String, outputFile: String): String = {
    val outputDatFile = outputFile //1030新增
    val ctrlFileName = ExchangeFileUtils.genControlFileName(outputFile)
    val ddlFileName = ExchangeFileUtils.genDDLFileName(outputFile)

    HdfsUtils.deletePath(outputDatFile, cosFs)
    HdfsUtils.deletePath(ctrlFileName, cosFs)
    HdfsUtils.deletePath(ddlFileName, cosFs)
    HdfsUtils.renameFile(tmpCacheDatFile, outputDatFile, cosFs)
    outputDatFile
  }

  /**
    * 文件内容追加
    *
    * @param fileFormat
    * @param oldFilePath
    * @param data
    * @return
    */
  def doAppend(fileFormat: String, oldFilePath: String, data: DataFrame) = {
    var targetData = data
    val tuple: (DataFrame, Boolean) = readOldFile(fileFormat, oldFilePath, targetData)
    var oldDataFrame: DataFrame = tuple._1
    val skipHeader: Boolean = tuple._2
    isAddHeader = skipHeader

    // 临时字段 用来排序 保证旧数据在前,用完后删除
    var sortFieldName = "row_number_over" + Random.nextInt(10)
    while (targetData.columns.contains(sortFieldName)) {
      sortFieldName = sortFieldName + Random.nextInt(10)
    }
    if (null != oldDataFrame) {
      oldDataFrame = oldDataFrame.withColumn(sortFieldName, lit(1).cast(StringType))
      targetData = targetData.withColumn(sortFieldName, lit(null).cast(StringType))
      //orderBy 调用了sample方法,触发一次action,write时再次触发action
      targetData = oldDataFrame.unionByName(targetData).orderBy(col(sortFieldName).desc)
    }
    targetData = targetData.drop(sortFieldName)
    targetData
  }

  /**
    * 缓存临时数据
    *
    * @param data              待缓存的数据
    * @param objectKeyFileName object最后层级的文件名称，比如:cosn://${bucket}/dir1/{objectKeySuffix}
    * @param workDir           缓存临时目录
    */
  def cacheData(data: DataFrame, objectKeyFileName: String, workDir: String): Boolean = {
    import org.apache.spark.sql.functions._
    var successFlag = false
    var fileSize: String = ""
    var fileName: String = ""
    var outputFile = ""
    var oldFilePath = ""
    cosDataStatusInfo = new CosDataStatusInfo
    try {
      var delimiter = meta.getCosFileDelimiter()
      var targetData = data

      logInfo(s"=>>> sink delimiter: $delimiter")

      val fileFormat = MetaUtils.getFileFormat(meta.getTableEntiy).trim.toLowerCase

      //1030新增
      if (StringUtils.isNotBlank(custFileName)) {
        outputFile = getOutputObject(custFileName)
        oldFilePath = outputFile
      } else if (fileFormat.equals("json")){
        logInfo("equals json")
        outputFile = getOutputObject(objectKeyFileName)
        oldFilePath = ExchangeFileUtils.genJsonFileName(outputFile)
      } else{
        logInfo("equals csv")
        outputFile = getOutputObject(objectKeyFileName)
        oldFilePath = ExchangeFileUtils.genDatFileName(outputFile)
      }
      logInfo(s"outputFile:$outputFile, oldFilePath:$oldFilePath")
      val (tempData, accumulator) = DataframeUtils.calculateDataNum(sparkSession, targetData, "CosSink")
      accu = accumulator

      targetData = doAppend(fileFormat, oldFilePath, tempData)

      //定长处理
      val isField = MetaUtils.isfixedLength(meta.getTableEntiy)
      if (isField) {
        // 替换null 保证生成文件的分隔符数量正常
        targetData = replaceNa(targetData)

        //定长
        var tempDF = targetData
        val columns: Array[String] = targetData.columns
        tempDF = tempDF.select(columns.map(col_name => col(col_name).cast(StringType)): _*)
        tempDF = DataframeUtils.UnLoading(tempDF, meta.getColumnEntiy, delimiter) //只有一个name="value"的列

        if (isAddHeader) {
          tempDF = addFileHeader(targetData, tempDF, delimiter)
        }
        logInfo(s"=>>> 定长导出文件 ...")
        tempDF.coalesce(1).write.format("text").mode("append").save(s"${workDir}")
      } else {
        //不定长
        logInfo(s"=>>> 不定长导出文件 ...")
        if (StringUtils.isBlank(delimiter)) {
          delimiter = MetaDataConstants.DATA_SEPARATOR
        }

        fileFormat match {
          case "csv" => {
            // 替换null 保证生成文件的分隔符数量正常
            targetData = replaceNa(targetData)
            val colNames = targetData.columns
            val colArr = new Array[Column](colNames.size)
            for (index <- 0 until colNames.size) {
              colArr(index) = targetData.col(colNames(index))
            }
            //拼接为一个字段
            val tempDF = targetData.select(concat_ws(delimiter, colArr: _*).cast(StringType).as("value"))
            if (isAddHeader) {
              targetData = addFileHeaderByRDD(targetData, tempDF, delimiter)
              targetData.write.format("text").mode("append").save(s"${workDir}")
            } else {
              tempDF.coalesce(1).write.format("text").mode("append").save(s"${workDir}")
            }
          }
          case "json" => {
            targetData.coalesce(1).write.format("json").save(s"${workDir}")
          }
          case _ => throw new Exception("=>>> 暂时不支持写入该数据类型！")
        }

        /*        if ("json".equals(fileFormat)) {
                  targetData.coalesce(1).write.format("json").save(s"${workDir}")
                } else {
                  val tempDF = targetData.select(concat_ws(delimiter, colArr: _*).cast(StringType).as("value"))
                  if (isAddHeader) {
                    targetData = addFileHeaderByRDD(targetData, tempDF, delimiter)
                    targetData.write.format("text").mode("append").save(s"${workDir}")
                  } else {
                    tempDF.coalesce(1).write.format("text").mode("append").save(s"${workDir}")
                  }
                }*/
      }
      logInfo(s"=>>>targetData schema:${targetData.printSchema()}")
      //文件名过滤
      val fileList = HdfsUtils.listFiles(workDir, "_", cosFs)
      for (index <- 0 until fileList.size()) {
        val tmpCacheDatFile = workDir + "/" + fileList.get(index)

        //数据文件
        var outputDatFile = ""
        //1030新增
        if (StringUtils.isNotBlank(custFileName)) {
          outputDatFile = outputFile
        } else if ("json".equals(fileFormat)) {
          outputDatFile = ExchangeFileUtils.genJsonFileName(outputFile)
        } else {
          outputDatFile = ExchangeFileUtils.genDatFileName(outputFile)
        }

        fileName = doOverWrite(tmpCacheDatFile, outputDatFile) //1030新增

        //控制文件
        val ctrlFileName = ExchangeFileUtils.genControlFileName(outputFile)
        fileSize = generateCtrlFile(outputDatFile, ctrlFileName)
        //ddl文件
        val ddlFileName = ExchangeFileUtils.genDDLFileName(outputFile)
        generateDDLFile(outputDatFile, ddlFileName)

      }
      successFlag = true
      logInfo("缓存数据成功")
    } catch {
      case e: Throwable => {
        e.printStackTrace()
        logError("缓存数据失败,错误信息:" + e.getMessage + ",错误引发原因:" + e.getCause)
        throw new Exception("缓存数据失败,错误信息:" + e.getMessage + ",错误引发原因:" + e.getCause)
      }
    }
    //20200901 上报base路径+同步页面配置的子目录
    val fileLocation: String = genDataStatusLocation(fileName)
    cosDataStatusInfo.setFileLocation(fileLocation)
    cosDataStatusInfo.setFileName(fileName.split("/").last)
    cosDataStatusInfo.setFileSize(fileSize)
    successFlag
  }

  /** append模式 如果存在旧文件 读旧文件内容 */
  def readOldFile(fileFormat: String, oldFilePath: String, data: DataFrame): (DataFrame, Boolean) = {
    var oldata: DataFrame = null
    var skipHeader: Boolean = isAddHeader
    val source = new CosSource
    if (writeOption.toLowerCase.equals(MetaDataConstants.APPEND)) {
      if (HdfsUtils.isExists(oldFilePath, cosFs)) {
        oldFileSize = getFileSize(oldFilePath)
        log.info(s"oldFileSize is $oldFileSize")
        fileFormat match {
          case "json" => oldata = sparkSession.read.format("json").load(oldFilePath)
          case _ => oldata = {
            val delimiter = meta.getCosFileDelimiter

            val csvRddData: RDD[String] = sparkSession.sparkContext.textFile(oldFilePath)

            val schemaString = data.schema.fieldNames.mkString(delimiter)
            var rddData: RDD[String] = null
            // 判断第一行是否为表头
            if (csvRddData.isEmpty()) {
              skipHeader = false
            } else {
              val firstLine: String = csvRddData.first().replace(" ", "")
              if (firstLine.equals(schemaString)) {
                skipHeader = true
              } else {
                skipHeader = false
              }
            }

            rddData = source.readTextFileNotCheckNull(sparkSession, oldFilePath, skipHeader, characterSet)
            val value: RDD[String] = source.fixedRead(rddData, delimiter, meta)

            //取交换接口的所有字段名称和字段类型
            val columnArr = new ArrayBuffer[(String, String)]()
            val columnInfoMetaList: java.util.List[DmTableColumn] = meta.getColumnEntiy
            for (i <- 0 until columnInfoMetaList.size) {
              columnArr += (columnInfoMetaList.get(i).getColName -> columnInfoMetaList.get(i).getColType)
            }
            //Generate the schema based on the string of schema
            val schema: org.apache.spark.sql.types.StructType = SparkJobHelper.dynamicBuildDFSchema(columnArr)

            val rowRDD = source.convert2Rows(schema, delimiter, value)
            sparkSession.createDataFrame(rowRDD, schema)
          }
        }
      }
    }
    (oldata, skipHeader)
  }


  /**
    * 去除dataframe的空值，数字型替换为空值，其余替换为空字符串
    *
    * @param data 待处理的dataframe
    * @return 替换后的dataframe
    */
  def replaceNa(data: DataFrame): DataFrame = {
    var targetData = data
    val colNames = targetData.columns
    val fields = targetData.schema.fields
    for (i <- 0 until colNames.length) {
      val field = fields(i)
      val fieldType = field.dataType
      val colName = colNames(i)
      if (!fieldType.isInstanceOf[NumericType]) {
        targetData = targetData.withColumn(colName, targetData.col(colName).cast(StringType))
      }
    }
    targetData = targetData.na.fill(NA_FILL_NUMERIC)
    targetData = targetData.na.fill(NA_FILL_STRING)
    targetData
  }

  /**
    * 解决路径中包含“//”问题
    *
    * @param objectKeyFileName
    * @return
    */
  def getOutputObject(objectKeyFileName: String): String = {
    //var objectName = meta.getCosOutputPath + "/" + objectKeyFileName
    var objectName = ""
    val metaPath: String = meta.getCosOutputPath
    if(metaPath.endsWith("/") && objectKeyFileName.startsWith("/")){
      //base目录以/结束,且用户配置以/开头
      objectName = metaPath + objectKeyFileName.stripSuffix("/")
    }else if(metaPath.endsWith("/") && !objectKeyFileName.startsWith("/")){
      //base目录以/结束,且用户配置不以/开头
      objectName = metaPath + objectKeyFileName
    }else if(!metaPath.endsWith("/") && objectKeyFileName.startsWith("/")){
      //base目录不以/结束,且用户配置以/开头
      objectName = metaPath + objectKeyFileName
    }else{
      objectName = metaPath + "/" + objectKeyFileName
    }
    objectName = HadoopCosUtils.keyToPath(objectName)
    objectName.replaceAll("//", "/")
  }

  /**
    * 生成控制文件
    *
    * @param datFileName  数据文件名
    * @param ctrlFileName 控制文件名
    */
  def generateCtrlFile(datFileName: String, ctrlFileName: String): String = {
    val recordCount = sparkSession.sparkContext.textFile(datFileName).count()
    val contentSummary = cosFs.getContentSummary(new Path(datFileName))
    val fileSize = contentSummary.getLength
    var cf: ControlFile = new ControlFile(DataTypeEnum.TXT.toString,
      datFileName,
      String.valueOf(recordCount),
      String.valueOf(fileSize),
      submitTime,
      DateUtils.getNowTime())
    cf.writeXml(ctrlFileName, cosFs)
    String.valueOf(fileSize)
  }

  /**
    * 生成数据文件对应的控制文件
    *
    * @param datFileName 数据文件名
    * @param ddlFileName ddl文件名
    */
  def generateDDLFile(datFileName: String, ddlFileName: String): Unit = {
    var ddl = new StructFileDDL()
    ddl.setFileName(datFileName)
    ddl.setFieldCount(String.valueOf(meta.getColumnEntiy.size))
    ddl.setFileVersion("1.0")
    ddl.setIsfixedlength("1")
    var fields = new util.ArrayList[StructFieldEntity]()
    import scala.collection.JavaConverters._
    val columnList = meta.getColumnEntiy
    columnList.asScala.foreach(col => {
      var structField = new StructFieldEntity(col.getColName, col.getColType)
      fields.add(structField)
    })
    ddl.setFielddescription(fields)
    ddl.writeXml(ddlFileName, cosFs)
  }

  /**
    * 移动缓存文件至输出目录
    *
    * @param cacheDir
    * @param outputDir
    */
  def moveDataToOutput(cacheDir: String, outputDir: String): Unit = {
    HdfsUtils.moveFiles(cacheDir, outputDir, cosFs)
  }

  /**
    * 删除缓存数据
    *
    * @param cacheDir 缓存目录
    */
  def deleteCacheData(cacheDir: String): Unit = {
    HdfsUtils.deletePath(cacheDir, cosFs)
  }

  /**
    * TODO
    *
    * @param
    * @param fileName base路径加作业指定的文件名(文件名可能带有子路径) e.g: /aaa/bb.dat   bb.dat
    * @return void
    */
  def genDataStatusLocation(fileName: String): String = {
    //拼接base路径和文件名
    val metaPathWithFileName: String = "/" + fileName
    //去除文件名
    var fileLocation: String = metaPathWithFileName.split("/").dropRight(1).mkString("/")
    //转换拼接时可能生成的 //
    fileLocation = fileLocation.replaceAll("///", "//").replaceAll("//", "/").stripSuffix("/")
    fileLocation
  }

  def getFileSize(datFileName: String): Long ={
    val contentSummary = cosFs.getContentSummary(new Path(datFileName))
    val fileSize = contentSummary.getLength
    fileSize
  }
}
