package com.doitedu.core

import com.doitedu.utils.{MyPartitioner, SparkUtil}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

/**
 * @Date: 22.7.2 
 * @Author: HANGGE
 * @qq: 598196583
 * @Tips: 学大数据 ,到多易教育
 * @Description:
 */
object C19_Join_shuffle {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSparkContext("join")
    val userData = sc.textFile("data/join/user.txt")
    val cityData = sc.textFile("data/join/city.txt")

    val idAndname = userData.map(line => {
      val arr = line.split(",")
      (arr(0), arr(1))
    })

    val joinRDD1 = idAndname.partitionBy(new MyPartitioner(2))

    val idAndcity= cityData.map(line => {
      val arr = line.split(",")
      (arr(0), arr(1))
    })

    val joinRDD2 = idAndcity.partitionBy(new MyPartitioner(2))

    // 出现了shuffle
    val joinedRDD: RDD[(String, (String, String))] = idAndname.join(idAndcity)

    val  resRDD =  joinedRDD.map(tp=>(tp._1 , tp._2._1 , tp._2._2))


    resRDD.foreach(println)




    sc.stop
  }



}
