package com.sugon.jgj

import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.sql.functions.lit


object RepairData {

  def main(args: Array[String]): Unit = {
    /**
      * //    repairDF.write.partitionBy("").format("orc").mode("append").saveAsTable("ysk.ccs")
      * //    repairDF.write.format("orc").mode("append").save(desPath + "/tmp")
      *
      * 1. 传入参数
      * 2. 创建临时交换目录
      * 3. 导入orc文件，添加缺少的字段
      * 4. 将文件存到临时目录
      * 5. 清理工作
      *
      * spark-submit --master yarn  --num-executors 3 SparkDemo-jar-with-dependencies.jar /admin/12/ysk/douhj0827/20* /cs  20200109023 4
      *
      */

    if (args.length < 4) {
      throw new IllegalArgumentException("输入参数错误,参数依次为 '源路径地址 目标路径地址 修复日期 切分数量'")
    }
    // 输入参数
    val sourcePath = args(0)
    val desPath = args(1)
    val addDate = args(2)
    val SplitNums = args(3)

    val tmpPath = s"${desPath}/.repair_tmp"

    val repairTmpDirPath = new Path(tmpPath)

    val spark = SparkSession.builder().appName("repair " + sourcePath + "_" + addDate + " _" + desPath)
      //      .master("local[*]")
      .enableHiveSupport()
      .getOrCreate()

    val hadoopConf = spark.sparkContext.hadoopConfiguration
    val fs = FileSystem.get(hadoopConf)
    if (!fs.exists(repairTmpDirPath)) {
      fs.mkdirs(repairTmpDirPath)
    }

    val sourceDF: DataFrame = spark.read.orc(sourcePath)
    val repairDF: DataFrame = sourceDF.withColumn("field_auto_timestamp", lit(addDate))

    repairDF.repartition(SplitNums.toInt).write.format("orc").mode(SaveMode.Overwrite).save(tmpPath)

    moveFiles(fs.listStatus(repairTmpDirPath)
      .filter(!_.getPath.toString.contains("_SUCCESS")).map(_.getPath.toString), tmpPath, desPath, fs, addDate.substring(0, 8))

    val tmpDirPath = new Path(tmpPath)
    fs.delete(tmpDirPath, true)
    spark.close()
  }

  def moveFiles(fileList: Array[String], inputPath: String, outputPath: String, fs: FileSystem, inFileDatPrefix: String) = {
    val tmpDir = new Path(outputPath)
    if (!fs.exists(tmpDir)) {
      //创建临时目录
      fs.mkdirs(tmpDir)
    }
    var num = 0
    for (file <- fileList) {
      val sourcePath = new Path(file)
      val originFileName = file.substring(file.lastIndexOf("/") + 1)
      if (originFileName != "_SUCCESS") {
        var destLocation = file.replace(inputPath, outputPath)
        destLocation = destLocation.substring(0, destLocation.lastIndexOf("/") + 1)
        num = recursiveIncr(fs, destLocation, inFileDatPrefix, num)
        val newFileName = inFileDatPrefix + "_" + num + "_merge"
        destLocation = destLocation + newFileName
        val destPath = new Path(destLocation)
        num += 1
        fs.rename(sourcePath, destPath)
      }
    }
  }

  /**
    * 递归获取文件的索引值 以免 原有文件被覆盖
    *
    * @param fs
    * @param destLocationDir
    * @param inFileDatPrefix
    * @param num
    * @return
    */
  def recursiveIncr(fs: FileSystem, destLocationDir: String, inFileDatPrefix: String, num: Int): Int = {
    val destPath = new Path(destLocationDir + inFileDatPrefix + "_" + num + "_merge")
    println("======================================" + destPath)
    val name = destPath.getName
    if (fs.exists(destPath)) recursiveIncr(fs, destLocationDir, inFileDatPrefix, name.substring(name.indexOf("_") + 1, name.lastIndexOf("_")).toInt + 1) else num
  }


}
