package com.zyuc.spark.merge

import java.util
import java.util.concurrent.{Executors, TimeUnit}

import com.zyuc.java.hdfs.FileBox
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.log4j.Logger
import org.apache.spark.sql.functions.lit

import scala.collection.mutable

/**
  * Created by zhoucw on 18-9-3 下午2:25.
  */
class MergeService {

  val logger = Logger.getLogger("MergeService")

  private var mergeRootPath: String = _
  private var mergeTempPath: String = _
  private var mergeDataPath: String = _
  private var mergeBatchId: String = _
  private var inputPathField: String = _
  private var boxVolume: Long = _
  private var filterFileSize: Long = _
  private var boxListsMap = mutable.Map[String, util.ArrayList[FileBox]]()
  private var threadNum: Int = _
  private var inputPathMap = mutable.Map[String, String]()
  private var fileSystem: FileSystem = _
  private var spark: SparkSession = _
  private val INPUT_PATH_FIELD_NAME = "inputPathId"

  /**
    *
    * @param fileSystem
    * @param spark
    * @param mergeRootPath
    * @param mergeBatchId
    * @param inputPathMap
    * @param filterFileSize
    * @param boxVolume
    * @param threadNum
    */
  def this(fileSystem: FileSystem, spark: SparkSession,
           mergeRootPath: String, mergeBatchId: String,
           inputPathMap: mutable.Map[String, String],
           filterFileSize: Long, boxVolume: Long,
           threadNum: Int) {
    this

    this.spark = spark
    this.fileSystem = fileSystem

    this.mergeRootPath = mergeRootPath
    this.mergeTempPath = mergeRootPath + "/" + mergeBatchId + "/temp"
    this.mergeDataPath = mergeRootPath + "/" + mergeBatchId + "/data"
    this.mergeBatchId = mergeBatchId

    this.filterFileSize = filterFileSize
    this.boxVolume = boxVolume
    this.inputPathMap = inputPathMap

    this.threadNum = threadNum

  }


  /**
    * 验证是否有重复的合并路径, 并且传入的路径数必须大于0
    *
    * @return
    */
  private def validate(): Boolean = {

    var result = true

    // 值重复
    val valueSetSize = inputPathMap.values.toSet.size

    if (valueSetSize != inputPathMap.size || valueSetSize < 1) {
      logger.error(s"valueSetSize:${valueSetSize}, inputPathMap.size:${inputPathMap.size}, required: valueSetSize == inputPathMap.size || valueSetSize > 0")
      result = false
    }

    if (boxVolume <= filterFileSize) {
      logger.error(s"boxVolume:${boxVolume}, filterFileSize:${filterFileSize}, required: boxVolume > filterFileSize")
      result = false
    }
    if(boxVolume<0 || filterFileSize<0){
      logger.error(s"boxVolume:${boxVolume}, filterFileSize:${filterFileSize}, required: boxVolume>0 && filterFileSize>0")
      result = false
    }

    if(threadNum<0 || threadNum>100){
      logger.error(s"threadNum:${threadNum},  required: 0<threadNum<=100")
      result = false
    }

    if(mergeRootPath.length<=1 || mergeBatchId.length<1){
      logger.error(s"mergeRootPath:${mergeRootPath},mergeBatchId:${mergeBatchId}, required: mergeRootPath.length>1, mergeBatchId.length>=1")
      result = false
    }

    if(fileSystem.exists(new Path(mergeRootPath + "/" + mergeBatchId))){
      logger.error(s"mergePath:${mergeRootPath}/${mergeBatchId} already exists")
      result = false
    }

    result
  }


  /**
    * 遍历目录, 生成目录的箱子列表, 保存到boxListsMap
    *
    * @param inputPathMap
    * @return
    */
  private def setFileBoxLists(inputPathMap: mutable.Map[String, String]) = {

    val fileBoxService = new FileBoxService(filterFileSize, boxVolume)
    inputPathMap.foreach(m => {
      val inputPathId = m._1
      val inputPath = m._2
      val fileBoxList = fileBoxService.getBoxList(inputPath, fileSystem)
      if (fileBoxList != null && fileBoxList.size() > 0) {
        boxListsMap.put(inputPathId, fileBoxList)
      }
    })
  }


  /**
    * 将一组boxList文件移动到临时目录： ${tempBoxParentPath}/${boxId}/
    *
    * @param inputPath
    * @param tempBoxParentPath
    * @param boxList
    */
  private def moveSingleBoxList2Temp(inputPath: String, tempBoxParentPath: String,
                                     boxList: util.ArrayList[FileBox]) = {

    val boxListIterator = boxList.listIterator()
    while (boxListIterator.hasNext) {
      val box = boxListIterator.next()
      val fileListIterator = box.getLinkedList.listIterator()
      val boxId = box.getId
      while (fileListIterator.hasNext) {
        val hfile = fileListIterator.next().getFileName
        renameFile(fileSystem, new Path(inputPath + "/" + hfile), new Path(tempBoxParentPath + "/" + boxId + "/" + hfile))
      }
    }
  }


  /**
    * 将一组箱子生成DataFrame
    *
    * @param tempBoxParentPath
    * @return
    */
  private def getSingleBoxListDataFrame(tempBoxParentPath: String) = {
    // 每一组箱子的收敛大小设置为1, 即每组箱子的文件生成一个大文件
    var resultDF: DataFrame = null
    fileSystem.globStatus(new Path(tempBoxParentPath + "/*")).foreach(x => {
      try {
        val df = spark.read.format("orc").load(x.getPath.toString).coalesce(1)
        if (resultDF == null) {
          resultDF = df
        } else {
          resultDF = resultDF.union(df)
        }
      } catch {
        case e: Exception => {
          logger.error(s"inputPath ${tempBoxParentPath} cannot convert to dataFrame, " + e.getMessage)
        }
      }
    }
    )
    resultDF
  }


  /**
    * 将数据移动到数据目录
    */
  private def moveMergedFilesOfSingleBoxList2Data(mergeTempDataPath: String, dataPath: String) = {
    // 移动数据到正式目录
    var index: Int = 0
    fileSystem.globStatus(new Path(mergeTempDataPath + "/*")).filter(_.getLen > 0). // 不移动_SUCCESS文件
      foreach(x => {
      val srcPath = x.getPath
      val newName = "merge_" + mergeBatchId + "_" + index + srcPath.getName.substring(srcPath.getName.lastIndexOf("."))
      index = index + 1
      renameFile(fileSystem, x.getPath, new Path(dataPath + "/" + newName))
    })
  }


  def handleMerge(): String = validate() match {
    case true => {
      mergeFiles()
    }
    case false => {
      logger.error("validate error")
      "validate error"
    }
  }

  /**
    * 小文件合并主调程序
    *
    */
  def mergeFiles(): String = {

    var begin = System.currentTimeMillis()
    var timeLog = "mergeBatchId:" + mergeBatchId

    try {

      // 生成所有路径的箱子列表
      setFileBoxLists(inputPathMap)

      timeLog =  timeLog + ", groupBox:" + (System.currentTimeMillis() - begin)
      begin = System.currentTimeMillis()

      val mergeDfSet = new mutable.HashSet[DataFrame]()

      // 并行将输入路径下的文件移动到临时目录, 并生成DataFrame
      // 使用多线程加载dataFrame
      val executor = Executors.newFixedThreadPool(threadNum)

      boxListsMap.foreach(boxListMap => {

        executor.submit(new Runnable {
          override def run(): Unit = {
            val inputPathId = boxListMap._1
            val inputPath = inputPathMap.get(inputPathId).get
            val tempBoxParentPath = mergeTempPath + "/" + inputPathId

            val boxList = boxListMap._2
            moveSingleBoxList2Temp(inputPath, tempBoxParentPath, boxList)

            val df = getSingleBoxListDataFrame(tempBoxParentPath)
            if (df != null) {
              mergeDfSet.add(df.withColumn(INPUT_PATH_FIELD_NAME, lit(inputPathId)))
            }
          }
        })

      })

      executor.shutdown()
      executor.awaitTermination(Long.MaxValue, TimeUnit.MINUTES)

      timeLog = timeLog + ", dataFrameSet:" + (System.currentTimeMillis() - begin)
      begin = System.currentTimeMillis()


      // 生成DataFrame
      var mergeDF: DataFrame = null
      mergeDfSet.iterator.foreach(df => {
        if (mergeDF == null) {
          mergeDF = df
        } else {
          mergeDF = mergeDF.union(df)
        }
      })

      timeLog = timeLog + ", unionDataFrameSet:" + (System.currentTimeMillis() - begin)
      begin = System.currentTimeMillis()

      // 将合并后的文件保存到mergeDataPath目录, 按照inputPathId分区
      mergeDF.write.format("orc").partitionBy(INPUT_PATH_FIELD_NAME).mode(SaveMode.Overwrite).save(mergeDataPath)

      timeLog = timeLog + ", saveMergedFiles:" + (System.currentTimeMillis() - begin)
      begin = System.currentTimeMillis()



      // 将合并后的文件移动到数据目录
      fileSystem.globStatus(new Path(mergeDataPath + "/*")).foreach(p => {
        val pName = p.getPath.getName
        if (pName != "_SUCCESS") {
          val inputPathId = pName.substring(pName.lastIndexOf("=") + 1)
          val inputPath = inputPathMap.get(inputPathId).get
          val mergeTempDataPath = mergeDataPath + "/" + INPUT_PATH_FIELD_NAME + "=" + inputPathId
          moveMergedFilesOfSingleBoxList2Data(mergeTempDataPath, inputPath)
        }
      })

      timeLog = timeLog + ", move2Data:" + (System.currentTimeMillis() - begin)
      begin = System.currentTimeMillis()

      // 删除合并的中间目录
      deleteMergeRootPath()
      timeLog = timeLog + ", deleteTempPath:" + (System.currentTimeMillis() - begin)

    } catch {
      case e: Exception => {
        logger.error("merge error: " + e.getMessage)
        e.printStackTrace()
        timeLog = timeLog + ", exception:" + (System.currentTimeMillis() - begin)
      }
    }
    logger.info(timeLog)
    timeLog + "\r\n"
  }

  /**
    * 删除合并的中间目录
    *
    * @return
    */
  private def deleteMergeRootPath() = {
    // 删除合并的临时目录
    fileSystem.delete(new Path(mergeRootPath + "/" + mergeBatchId), true)
  }


  /**
    * 文件重命名
    * @param fileSystem
    * @param srcPath
    * @param dstPath
    * @return
    */
  def renameFile(fileSystem: FileSystem,
                 srcPath: Path, dstPath: Path): Boolean = {

    if (!fileSystem.exists(dstPath.getParent)) {
      fileSystem.mkdirs(dstPath.getParent)
    }

    fileSystem.rename(srcPath, dstPath)
  }
}


object MergeService1 {


  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().enableHiveSupport().master("local[*]").appName("test").getOrCreate()
    val sc = spark.sparkContext
    val fileSystem = FileSystem.get(sc.hadoopConfiguration)

    val mergeRootPath = "/hadoop/accesslog_etl/output/merge"
    val mergeBatchId = "1808012350-3"
    var inputPathMap = mutable.Map[String, String]()
    inputPathMap.put("317", "/hadoop/accesslog_etl/output/data/hid=317/d=180801/h=23/m5=50")
    inputPathMap.put("327", "/hadoop/accesslog_etl/output/data/hid=327/d=180801/h=23/m5=50")
    // inputPathMap.put("323", "/hadoop/accesslog_etl/output/data/hid=327/d=180801/h=23/m5=50")

    val boxVolume: Long = 128*1024*1024
    val filterFileSize: Long = 20006401

    val mergeService = new MergeService(fileSystem, spark, mergeRootPath, mergeBatchId, inputPathMap, filterFileSize, boxVolume, 10)

    mergeService.handleMerge()

  }
}
