package com.zyuc.spark.merge

import java.io.ByteArrayInputStream
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.IOUtils
import org.apache.spark.sql.SparkSession
import org.apache.hadoop.conf.Configuration
import scala.collection.mutable

/**
  * Created by zhoucw on 18-9-5 下午1:56.
  */
object MergeMain {


  /**
    * 写日志到hdfs文件, 失败重试3次
    *
    * @param mergeMsg
    * @param conf
    * @param logFile
    */
  def logMerge(mergeMsg: String, conf: Configuration, logFile: String) = {

   // conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER")
   // conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true")
    conf.setBoolean("dfs.support.append", true)

    val fileSystem = FileSystem.get(conf)

    try {
      writeData2Hdfs(fileSystem, mergeMsg, logFile)
    }
    catch {
      case e: Exception => {
        Thread.sleep(10000)
        try {
          writeData2Hdfs(fileSystem, mergeMsg, logFile)
        } catch {
          case e: Exception => {
            Thread.sleep(10000)
            writeData2Hdfs(fileSystem, mergeMsg, logFile)
          }
        }
      }
    }
  }

  def writeData2Hdfs(fileSystem: FileSystem, data: String, hdfsFile: String) = {

    if (!fileSystem.exists(new Path(hdfsFile))) {
      fileSystem.createNewFile(new Path(hdfsFile))
    }
   // fileSystem.setReplication(new Path(hdfsFile), 1)
    //要追加的文件流，inpath为文件
    val in = new ByteArrayInputStream(data.getBytes())
    val out = fileSystem.append(new Path(hdfsFile))
    IOUtils.copyBytes(in, out, 4096, true)
  }


  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().enableHiveSupport().master("local[*]").appName("test").getOrCreate()
    //val spark = SparkSession.builder().enableHiveSupport().getOrCreate()

    val sc = spark.sparkContext
    val conf = sc.hadoopConfiguration
    val fileSystem = FileSystem.get(conf)

    val mergeRootPath = sc.getConf.get("spark.merge.mergeRootPath", "/hadoop/accesslog_etl/output/merge")
    val dataTime = sc.getConf.get("spark.merge.dataTime", "201808012350")
    val mergeTimes = sc.getConf.get("spark.merge.mergeTimes", "1") // 第几次合并
    val hids = sc.getConf.get("spark.merge.hids", "317,327")
    val inputBasePath = sc.getConf.get("spark.merge.inputBasePath", "/hadoop/accesslog_etl/output/data")
    val filterFileSizeStr = sc.getConf.get("spark.merge.filterFileSize", "12000")
    val boxVolumeStr = sc.getConf.get("spark.merge.boxVolume", "120000")
    val threadNumStr = sc.getConf.get("spark.merge.threadNum", "10")
    val ifLog = sc.getConf.get("spark.merge.ifLog", "1")
    val logFile = sc.getConf.get("spark.merge.logFile", "/temp/merge.log")

    val mergeBatchId = dataTime + "_" + mergeTimes
    val filterFileSize = filterFileSizeStr.toLong
    val boxVolume = boxVolumeStr.toLong
    val threadNum = threadNumStr.toInt

    var inputPathMap = mutable.Map[String, String]()
    val d = dataTime.substring(2, 8)
    val h = dataTime.substring(8, 10)
    val m5 = dataTime.substring(10, 12)
    hids.split(",").foreach(hid => {
      inputPathMap.put(hid, inputBasePath + "/hid=" + hid + "/d=" + d + "/h=" + h + "/m5=" + m5)
    })

    val mergeService = new MergeService(fileSystem, spark, mergeRootPath, mergeBatchId, inputPathMap, filterFileSize, boxVolume, threadNum)

    val mergeMsg = mergeService.handleMerge()

    if (ifLog == "1") {
      logMerge(mergeMsg, conf, logFile)
    }
  }
}
