package org.wj.algorithm

import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

object TopN extends App {


  val conf = new SparkConf().setMaster("local").setAppName(this.getClass.getName)

  val treeSet = new mutable.TreeSet[Double]()
  val finalSet = new mutable.TreeSet[Double]()

  val sc = new SparkContext(conf)

  sc.broadcast()
  private val sourceRdd: RDD[String] = sc.textFile("P:\\Project\\Idea\\bigdata\\spark\\src\\main\\resources\\data\\scores.txt", 4)

  sourceRdd.map(clo=>{
    var score = -1.0
    if(StringUtils.isNotBlank(clo)){
      val line = clo.split(" ")
      score = line(1).toDouble
    }
    score
  })
    .mapPartitions(iter=>{
      while (iter.hasNext){
        treeSet += iter.next()
        if(treeSet.size>1){
          treeSet - treeSet.last
        }
      }
      treeSet.take(1).iterator
    }).takeOrdered(2).foreach(println(_))

}
