package com.bigdata.spark.projectone

import java.net.URL

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

/**
 * @author Gerry chan
 * @version 1.0
 * 缓存机制
 * 自定义一个分区器 (防止数据倾斜)
 * 按照每种学科数据放到不同的分区器里
 */
object ObjectCount02 {
  def main(args: Array[String]): Unit = {
    val conf:SparkConf = new SparkConf().setAppName("Objectcount2").setMaster("local[*]")
    val sc:SparkContext = new SparkContext(conf)
    //获取数据
    val file:RDD[String] = sc.textFile("access.txt")

    //提取出url并生成一个元组
    val urlAndOne: RDD[(String, Int)] = file.map(line=>{
      val fields = line.split("\t")
      val url = fields(1)
      (url, 1)
    })
    //把相同的url聚合
    val sumedUrl:RDD[(String, Int)] = urlAndOne.reduceByKey(_+_)

    //获取学科信息并缓存
    val cacheProject:RDD[(String,(String,Int))] = sumedUrl.map(x=> {
      val url = x._1
      val project = new URL(url).getHost
      val count = x._2
      (project, (url,count))
    }).cache()
    //调用spark自带的分区器此时会发生哈希碰撞，需要自定义分区器
    val res:RDD[(String,(String, Int))] = cacheProject.partitionBy(new HashPartitioner(3))
    res.saveAsTextFile("./out")

    //得到所有学科
    val projects:Array[String] = cacheProject.keys.distinct().collect()
    //调用自定义分区器并得到分区号
    val partitioner:ProjectPartitioner = new ProjectPartitioner(projects)
    //分区
    val partitioned:RDD[(String,(String,Int))] = cacheProject.partitionBy(
      partitioner
    )
    //对每个分区的数据进行排序去top3
    val res2:RDD[(String,(String,Int))] = partitioned.mapPartitions(it=> {
      it.toList.sortBy(_._2._2).reverse.take(3).iterator
    })

    res2.saveAsTextFile("./out")
    sc.stop()
  }
}

