package com.doit.spark.day04

import com.doit.spark.day01.utils.SparkUtil
import org.apache.spark.rdd.RDD

/**
 * @DATE 2022/1/6/11:18
 * @Author MDK
 * @Version 2021.2.2
 * */
object C02_分布式闭包 {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSc
    val ls = List[(Int, String, Int)]((1,"袜子", 28), (2, "裤子", 99), (3, "帽子", 66), (4, "褂子", 199))
    val rdd: RDD[(Int, String, Int)] = sc.makeRDD(ls, 2)

    //makeRDD方法与parallelize方法作用相同  makeRDD底层调用的就是parallelize方法
//    val value: RDD[(Int, String, Int)] = sc.parallelize(ls, 2)
    val mp = Map[String, Int](("袜子", 100), ("裤子", 200), ("帽子", 300), ("褂子", 100))


    val rdd2: RDD[(Int, String, Int, Int)] = rdd.map(tp => {
      val name = tp._2
      val number = mp.get(name).get
      (tp._1, tp._2, tp._3, number)
    })

    //实现订单数据和数量之间的关联
    rdd2.foreach(println)
    mp.foreach(println)
  }
}
