package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo6GroupByKey {
  def main(args: Array[String]): Unit = {

    /**
      * map  传入一行返回一行
      *
      */
    //创建sparkp配置文件对象
    val conf = new SparkConf().setAppName("map").setMaster("local")

    //上下文对象
    val sc = new SparkContext(conf)


    //读取文件构建RDD
    val studentRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    val kvRDD = studentRDD.map(line => {
      val split = line.split(",")
      val clazz = split(4)
      (clazz, line)
    })

    /**
      * groupByKey  按照key进行分组  只有kv格式的RDD才有
      * groupBy  指定列进行分组
      */

    kvRDD
      .groupByKey()//通过key进行分组
      .map(kv => {
        val clazz = kv._1
        val stus = kv._2

        //stus.toList   尽量不要这样写   可能会到账内存溢出
        var i = 0L
        for (elem <- stus) {
          i += 1
        }

        (clazz, i)
      })
      .foreach(println)


  }
}
