package cn.ipanel.bigdata.boot.source.genre

import cn.ipanel.bigdata.boot.date.Hour
import cn.ipanel.bigdata.boot.logger.Logger
import cn.ipanel.bigdata.boot.source.{DataSource, File, Mapper}
import org.apache.commons.lang3.exception.ExceptionUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.spark.rdd.RDD

/**
 * Author: lzz
 * Date: 2023/05/16 16:15
 */
abstract class Hdfs(genre: String) extends Mapper with File {

  private[this] final lazy val SC = spark.sparkContext
  private final val DATE = "$DATE"

  override def getSourceGenre: DataSource.Genre = DataSource.GENRE_HDFS
  override def getSourceModel: DataSource.Model = DataSource.MODEL_READ
  override def getGenre: String = genre

  def toRdd(yyyyMMdd: String): RDD[String] = {
    val pathname = getPathname.replace(DATE, yyyyMMdd)
    Logger.I(s"Read Hdfs[$pathname]")
    try {
      val rdd = SC.textFile(pathname)
      println("first: " + rdd.first())
      rdd
    } catch {
      case e: Exception =>
        Logger.E(
          s"""Read Hdfs[$pathname] Failed.
             | Because: ${ExceptionUtils.getStackTrace(e)}
             |""".stripMargin)
        SC.emptyRDD[String]
    }
  }

  /**
   * 实现读取 hdfs 中 flume 日志，文件名的时间戳读取指定时间前后的1小时的日志数据
   * @param hour
   * @return
   */
  def toRddHours(hour: Hour): RDD[String] = {
    var path : List[String] = null
    try {
      path = if (hour.toHour == 0) {
        getFile(hour).:::(getFile(hour.prev()))
      } else if (hour.toHour == 23) {
        getFile(hour).:::(getFile(hour.next()))
      } else {
        getFile(hour)
      }
      Logger.I("hdfs read dictionary: " + path.mkString(","))

      val rdd = spark.read.textFile(path.seq:_*).rdd
      Logger.I(rdd.first())
      rdd
    } catch {
      case e: Exception =>
        Logger.E(
          s"""Read Hdfs[$path] Failed.
             | Because: ${ExceptionUtils.getStackTrace(e)}
             |""".stripMargin)
        SC.emptyRDD[String]
    }
  }

  def getFile(hour: Hour): List[String] = {
    val pathname = getPathname
    val path = pathname.substring(0, pathname.indexOf(DATE)) + hour.asDay.toDate.toString
    Logger.I("base path: " + path)
    import java.net.URI

    def getHdfs(path: String) = {
      val conf = new Configuration()
      FileSystem.get(URI.create(path), conf)
    }

    def getFilesAndDirs(path: String): Array[Path] = {
      val fs = getHdfs(path).listStatus(new Path(path))
      FileUtil.stat2Paths(fs)
    }

    val list = getFilesAndDirs(path).filter(getHdfs(path).getFileStatus(_).isFile()).map(x => {
      val name = x.toString
      val timestamp = if (name.endsWith(".tmp")) {
        // 特殊处理这种文件 /data/flume/kafka/20230825/FlumeData.1692932416814.tmp
        name.substring(name.length - 17, name.length - 4)
      } else {
        name.substring(name.length - 13, name.length)
      }
      (timestamp.toLong, x)
    }).filter(x => x._1 > hour.prev().st && x._1 < hour.next().et)
      .map(x => x._2.getParent + "/" + x._2.getName)
      .toList
//    println(list)
    list
  }
}