package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code23Action5 {
  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("CoGroup"))

    val value1RDD: RDD[(String, Int)] = sc.parallelize(List(("k1", 11), ("k1", 1), ("k2", 2), ("k3", 33), ("k5",45), ("k4", 4), ("k3", 3), ("k3", 33)), 2)


    // lookup从RDD中取到一个指定Key的数据
    val list1: List[Int] = value1RDD.lookup("key1").toList
    println(list1)
    val list2: List[Int] = value1RDD.lookup("k1").toList
    println(list2)


    // 对value1RDD取Key进行去重求个数
    val count: Long = value1RDD
      .map(_._1).distinct().count()
    println(count)


    // top 对当前的数据，进行降序排序，再去取其前N个数据
    val topList: List[Int] = value1RDD.map(_._2).top(3).toList
    println(topList)


    // def reduce(f: (T, T) => T): T
    // T代表当前RDD中的每个元素
    val reduceRes: (String, Int) = value1RDD.reduce(
      // 如果Key相同,那么取和 不同则取最大值
      (kv1: (String, Int), kv2: (String, Int)) => {
        if (kv1._1 == kv2._1) {
          (kv2._1, kv1._2 + kv2._2)
        } else {
          (kv2._1, math.max(kv1._2, kv2._2))
        }
      }
    )
    println(reduceRes)



  }
}
