package com.sunzm.spark.core

import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.util.ShutdownHookManager
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkFiles
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.lionsoul.ip2region.{DbConfig, DbSearcher}

/**
 *
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-06-18 11:45
 */
object DistributeCacheFileDemo {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    val spark: SparkSession = SparkSession
      .builder()
      .appName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .master("local[*]")
      .config("spark.default.parallelism", 6)
      .config("spark.sql.shuffle.partitions", 6)
      .getOrCreate()

    val sc = spark.sparkContext

    //Add a file to be downloaded with this Spark job on every node
    //path can be either a local file, a file in HDFS (or other Hadoop-supported
    //   * filesystems), or an HTTP, HTTPS or FTP URI.
    //1，使用 addFile
    //sc.addFile("data/ip2region.db")

    //2，使用广播变量
    // 加载ip地理位置字典文件成为一个字节数组对象
    val fs: FileSystem = FileSystem.get(new Configuration())
    val path = new Path("data/ip2region.db")
    // 获取db文件的长度
    val dbFileLen = fs.listStatus(path)(0).getLen
    // 构造一个字节数组
    val buff = new Array[Byte](dbFileLen.toInt)
    // 打开db文件的输入流
    val in = fs.open(path)
    // 利用IO工具一次性将整个文件读入上面的字节数组
    IOUtils.readFully(in, buff)
    // 将buff广播出去
    val ip2regionBC = spark.sparkContext.broadcast(buff)

    val dataRDD: RDD[String] = sc.textFile("data/spark/rdd/ipdata.txt")

    val resultRDD: RDD[(String, String)] = dataRDD.mapPartitions((p: Iterator[String]) => {
      val dbConfig = new DbConfig()

      //使用 cacheFil
      //val cacheFileName = SparkFiles.get("ip2region.db")
      //val searcher = new DbSearcher(dbConfig, cacheFileName)

      //使用广播变量
      val ipDbBytes = ip2regionBC.value
      val searcher = new DbSearcher(dbConfig, ipDbBytes)

      p.map(line => {
        val ipStr = StringUtils.trim(line)
        //val block = searcher.binarySearch(ipStr)
        val block = searcher.memorySearch(ipStr)

        (ipStr, block.getRegion)
      })

    })

    resultRDD.foreachPartition(p => {
      p.foreach {
        case (ipStr, region) => println(s"${ipStr} -> ${region}")
      }
    })

    val shutdownHookManager = ShutdownHookManager.get()

    //Spark的优先级为40 （FileSystem.SHUTDOWN_HOOK_PRIORITY + 30 = 10 + 30 = 40）
    shutdownHookManager.addShutdownHook(new Runnable {
      override def run(): Unit = {
        println("程序退出前的后处理工作...")
      }
    }, 100)

    //sc.stop()
    spark.close()
  }
}
