package org.zjt.spark.book

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
  * 对子串的计数
  *
  *   topN实现 :
  *
  *          1、sortBy(_._2, false).take(3)                    sort和take实现
  *          2、top(3)(Ordering.by[(String,Int), Int](_._2))   top 重新Ordering.by(需要排序的序列元素)
  *
  */
object KMer extends App {

  //
  var sparkConf = new SparkConf().setMaster("local[2]").setAppName("WeatherSort")
  val sc = new SparkContext(sparkConf)

  val kBroadcast = sc.broadcast(3)

  val rdd = sc.textFile("D:\\Idea workspace\\scala-demo\\src\\main\\resource\\k-mer.data").flatMap {
    line => {
      val k = kBroadcast.value
      val data = ArrayBuffer[(String, Int)]()
      for (i <- k to line.length) {
        data += new Tuple2(line.substring(i - 3, i), 1)
      }
      data
    }
  }.reduceByKey(_ + _).persist()



  //TOPn 排序并得到(_._2)最大的N个   。对(_._2)排序
  println(rdd.top(3)(Ordering.by[(String,Int), Int](_._2)).mkString(","))


  //top 不能和sort连用排序得到topN ，因为是value排序
  println(rdd.sortBy(_._2, false).take(3).mkString(","))
  sc.stop()
}
