package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo13MapValues {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Join算子演示")
    val context = new SparkContext(conf)
    //====================================================
    //需求：统计总分年级排名前10的学生的各科分数
    //读取分数文件数据
    val scoreRDD: RDD[(String, String, String)] = context.textFile("spark/data/score.txt") // 读取数据文件
      .map((s: String) => s.split(",")) // 切分数据
      .filter((arr: Array[String]) => arr.length == 3) // 过滤掉脏数据
      .map {
        //整理数据，进行模式匹配取出数据
        case Array(sid: String, subject_id: String, score: String) => (sid, subject_id, score)
      }

    //计算每个学生的总分
    val sumScoreWithSidRDD: RDD[(String, Int)] = scoreRDD.map {
      case (sid: String, _: String, score: String) => (sid, score.toInt)
    }.reduceByKey((x: Int, y: Int) => x + y)

    /**
     * mapValues算子：也是作用在kv类型的RDD上
     * 主要的作用键不变，处理值
     */
    val resRDD: RDD[(String, Int)] = sumScoreWithSidRDD.mapValues(_ + 1000)
    resRDD.foreach(println)

    //等同于
    val res2RDD: RDD[(String, Int)] = sumScoreWithSidRDD.map((kv: (String, Int)) => (kv._1, kv._2 + 1000))
  }
}
