package cn.doitedu.day05

import cn.doitedu.day01.utils.SparkUtil
import org.apache.spark.rdd.RDD

/**
 * @Date 22.4.3
 * @Created by HANGGE
 * @Description
 */
object C20_转换算子_MapValues {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSc
    val wordAndOne = sc.textFile("data/a.txt").flatMap(_.split("\\s+")).map((_, 1))
    // 转换算子
    //val res: RDD[(String, Int)] = wordAndOne.aggregateByKey(0)(_ + _, _ + _)
    //当局部计算逻辑和全局的计算逻辑一致的时候使用reduceByKey
    val res: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
   // res.foreach(println)
   val rdd = sc.parallelize(List(
     ("zss", 88),
     ("zss", 98),
     ("lss", 88),
     ("lss", 68),
     ("lss", 77),
     ("ww", 88)
   ))

    val rdd2: RDD[(String, Int)] = rdd.mapValues(_ + 10)
    rdd2.foreach(println)

  }

}
