package cn.huq.day02

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{Aggregator, HashPartitioner, SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

object GroupByDemo {

  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext(config = new SparkConf().setAppName("GroupByDemo").setMaster("local"))

    val list = List("陕西省,西安市,5000", "陕西省,汉中市,4500", "福建省,厦门市,7000")

    val rdd: RDD[String] = sc.parallelize(list)

    val rdd1: RDD[(String, String, Double)] = rdd.map(e => {
      val fields: Array[String] = e.split(",")
      (fields(0), fields(1), fields(2).toDouble)
    })

    val grouped: RDD[(String, Iterable[(String, String, Double)])] = rdd1.groupBy(t => t._1)

    /**
    def groupBy[K](f: T => K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K] = null)
        : RDD[(K, Iterable[T])] = withScope {
      val cleanF = sc.clean(f)
      this.map(t => (cleanF(t), t)).groupByKey(p)
     }
     */

    val res: Array[(String, Iterable[(String, String, Double)])] = grouped.collect()
    println(res.toBuffer)




    sc.stop()
  }

}
