package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code09GroupBy {
  def main(args: Array[String]): Unit = {
    /**
     * groupBy算子：
     *    可以对数据进行分组操作，再给定参数时，可以添加其自定义的分区数， 改变其并发量
     *
     */


    val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("Mysql2Text"))

    val groupByRDD: RDD[(String, Iterable[GroupByScore])] = sc
      .textFile("spark_code/data/score.txt", 4)
      .map {
        case oneLine => {
          val splitRes: Array[String] = oneLine.split(",")
          GroupByScore(splitRes(0), splitRes(1), splitRes(2).toInt)
        }
      }
      .groupBy(_.id, 2)
    println(groupByRDD.getNumPartitions)

    groupByRDD
      .map {
        case (id, groupByScoreIter) => {
          (id, groupByScoreIter.map(_.score).sum)
        }
      }
      .foreach(println)
  }
}

case class GroupByScore(id: String, courseID: String, score: Int)
