package com.hxk.topn

import java.net.URL

import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer

object UrlTopN {

  //初始化一个logger ，打印自己代码的日志
  private val logger = LoggerFactory.getLogger("TopN")

  def main(args: Array[String]): Unit = {

    val topN = 10
    val UrlPath = "data/url/1000000"
    val UrlSavePath = "result/url/1000000"


    val sc = {
      val conf = new SparkConf()
        .setMaster("local[*]")
        .setAppName("topn")
      SparkContext.getOrCreate(conf)
    }

    val rdd = sc.textFile(UrlPath)
    //文件中对所有的word进行map计数 =》（word . word的数量）
    val wordCountRDD: RDD[(String,Long)] = rdd
      .filter(!_.isEmpty) // 过滤为空的数据
      .flatMap(line => Iterator.single((getDomain(line), 1L))) // 按照给定函数进行数据转换操作, 并将结果转换为key/value键值对
      .reduceByKey(_ + _) // 聚合求每个word出现的总次数

    //对每1个分区计算topN
    val topNResultRDD: RDD[(String,Long)] = wordCountRDD.mapPartitions{ iter => {
        val currentPartitionTopN:  ListBuffer[(String,Long)] =
        iter.foldLeft(ListBuffer[(String,Long)]()) { case (buffer, currentWordCount) => {
              buffer += currentWordCount
              val result = if(buffer.size > topN) {
                //如果数量超过topN了，那么先排序然后删除数量最小的,然后返回删除后的集合
                val sortedBuffer = buffer.sortBy(_._2)
                sortedBuffer.remove(0)
                sortedBuffer
              } else {
                buffer
              }
              result
            }
        }
        //将数据转换为迭代器返回
       logger.info("一个分区调用一次")
        currentPartitionTopN.toIterator
      }
    }
    //将每个分区的TopN计算结果重置为一个分区，好再进行一次TopN
    val reParationRDD: RDD[(String,Long)] = topNResultRDD.repartition(1)

    val secondTopNResult: RDD[(String,Long)] = reParationRDD.mapPartitions{ iter => {
        val currentPartitionTopN:  ListBuffer[(String,Long)] =
          iter.foldLeft(ListBuffer[(String,Long)]()) { case (buffer, currentWordCount) => {
              buffer += currentWordCount
              val result = if(buffer.size > topN) {
                //如果数量超过topN了，那么先排序然后删除数量最小的,然后返回删除后的集合
                val sortedBuffer = buffer.sortBy(_._2)
                sortedBuffer.remove(0)
                sortedBuffer
              } else {
                buffer
              }
              result
            }
          }
        //将数据转换为迭代器返回
        logger.info("重分区后调用一次")
        currentPartitionTopN.sortBy(_._2).reverse.toIterator
      }
    }
    //secondTopNResult.collect().foreach(println)

    //结果保存
    FileSystem.get(sc.hadoopConfiguration).delete(new Path(UrlSavePath), true)
    secondTopNResult.saveAsTextFile(UrlSavePath)

    //关闭SparkContext
    sc.stop()
  }

  /**
    * 获取指定url的domain
    * @param url
    * @return
    */
  def getDomain(url: String): String = {
    // 以host作为domain的值
    new URL(url).getHost
  }

}
