package com.hxk.topn

import java.net.URL

import com.hxk.local.BoundedPriorityQueue
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer

object PriorityQueueTopN {

  //初始化一个logger ，打印自己代码的日志
  private val logger = LoggerFactory.getLogger("PriorityQueueTopN")

  def main(args: Array[String]): Unit = {

    val topN = 10
    val path = "data/topn/1000000"
    val savePath = "result/topnx/1000000"
    val urlPath = "data/url/1000000"
    val urlSavePath = "result/urlx/1000000"
    val ur2SavePath = "result/urlxxx/1000000"

    runningTopN1(path, savePath, topN, line => line.split(" ").map((_, 1L)))

    runningTopN1(urlPath, urlSavePath, topN, line => Iterator.single((getDomain(line), 1L)))

    runningTopN2(urlPath, ur2SavePath, topN, line => Iterator.single((getDomain(line), 1L)))

  }
  /**
    * 获取指定url的domain
    * @param url
    * @return
    */
  def getDomain(url: String): String = {
    // 以host作为domain的值
    new URL(url).getHost
  }

  /**
    *  mapPartition 加上 PriorityQueue 实现TopN
    * @param inputPath
    * @param outputPath
    * @param topN
    * @param parseFunc
    */
  def runningTopN1(inputPath: String, outputPath: String, topN: Int,
                  parseFunc: String => TraversableOnce[(String, Long)]): Unit = {

    val sc = {
      val conf = new SparkConf()
        .setMaster("local[*]")
        .setAppName("topn")
      SparkContext.getOrCreate(conf)
    }

    val rdd = sc.textFile(inputPath)
    //文件中对所有的word进行map计数 =》（word . word的数量）
    val wordCountRDD: RDD[(String,Long)] = rdd
      .filter(!_.isEmpty) // 过滤为空的数据
      .flatMap(line => parseFunc(line)) // 按照给定函数进行数据转换操作, 并将结果转换为key/value键值对
      .reduceByKey(_ + _) // 聚合求每个word出现的总次数

    val ord = new Ordering[(String,Long)] {
      override def compare(x: (String, Long), y: (String, Long)): Int = y._2.compare(x._2) //按照出现次数降序排
    }
    //使用一次mapPartitions对每一个分区计算topN :
    // 把每一个分区的所有数据放到一个topN的BoundedPriorityQueue并且返回
    val priorityQueueResultRDD: RDD[BoundedPriorityQueue[(String,Long)]] = wordCountRDD.mapPartitions{ items => {
        val queue = new BoundedPriorityQueue[(String,Long)](topN, ord.reversed())
        queue.addAll(Utils.takeOrdered(items, topN)(ord)) //取出items前topN个元素给到queue
        //将数据转换为迭代器返回
        logger.info("一个分区调用一次")
        Iterator.single(queue)
        //queue.iterator() 这样不行
      }
    }

    val result = if (priorityQueueResultRDD.partitions.length == 0) {
      Array.empty
    } else {
      import scala.collection.JavaConversions._
      //各个分区的priorityQueue进行聚合为一个priorityQueue
      //queue1是BoundedPriorityQueue需要进行网络传输，所以需要序列化
      priorityQueueResultRDD.reduce{ (queue1,queue2) =>
        queue1.addAll(queue2)
        queue1
      }.toArray.sorted(ord)
    }

    //结果保存
    FileSystem.get(sc.hadoopConfiguration).delete(new Path(outputPath), true)
    sc.parallelize(result, 1).saveAsTextFile(outputPath)

    //关闭SparkContext
    sc.stop()

  }

  /**
    * 使用Spark API top 求 topN 源码内部实现原理是：一次mapPartitions + PriorityQueue
    * @param inputPath
    * @param outputPath
    * @param topN
    * @param parseFunc
    */
  def runningTopN2(inputPath: String, outputPath: String, topN: Int,
                   parseFunc: String => TraversableOnce[(String, Long)]): Unit = {

    val sc = {
      val conf = new SparkConf()
        .setMaster("local[*]")
        .setAppName("topn")
      SparkContext.getOrCreate(conf)
    }

    val rdd = sc.textFile(inputPath)
    //文件中对所有的word进行map计数 =》（word . word的数量）
    val wordCountRDD: RDD[(String,Long)] = rdd
      .filter(!_.isEmpty) // 过滤为空的数据
      .flatMap(line => parseFunc(line)) // 按照给定函数进行数据转换操作, 并将结果转换为key/value键值对
      .reduceByKey(_ + _) // 聚合求每个word出现的总次数

    val ord = new Ordering[(String,Long)] {
      override def compare(x: (String, Long), y: (String, Long)): Int = x._2.compare(y._2) //按照出现次数降序排
    }

    //使用Spark API top 求 topN 源码内部实现原理是：一次mapPartitions + PriorityQueue
    val topNResult = wordCountRDD.top(topN)(ord)

    //结果保存
    FileSystem.get(sc.hadoopConfiguration).delete(new Path(outputPath), true)
    sc.parallelize(topNResult, 1).saveAsTextFile(outputPath)

    //关闭SparkContext
    sc.stop()

  }


}
