from doctest import master

from pyspark.context import SparkContext

sc = SparkContext(master='local', appName='demo2')

# 1、读取数据
students_rdd = sc.textFile("../../data/students.txt")

scores_rdd = sc.textFile("../../data/score.txt")

stu_kv_rdd = students_rdd.map(lambda line: (line.split(",")[0], line))
sco_kv_rdd = scores_rdd.map(lambda line: (line.split(",")[0], line))


# 1、reduce join： 会产生shuffle
# reduce_join_rdd = stu_kv_rdd.join(sco_kv_rdd)
# reduce_join_rdd.foreach(print)

# 2、map join
# 只适合大表关联小表
# 当Driver和Executor内存不够时，需要增加内存
# --driver-memory
# --executor-memory

# 1、将小表的数据拉取到Driver端
# collectAsMap: 将kv格式的RDD转换成一个字典，将RDD的数据拉取到Driver端，放在Driver的内存中
students_dict = stu_kv_rdd.collectAsMap()

# 2、将小表广播到Executor
students_dict_bro = sc.broadcast(students_dict)

# 3、使用map算子关联学生表的数据
def map_join_fun(kv):
    id = kv[0]
    score_info = kv[1]

    # 获取广播变量
    students = students_dict_bro.value
    # 通过学号获取学生信息
    # 字段通过key获取value在数据量很大的时候效率也会降低（小表控制在千万级别内）
    student_info = students.get(id)

    return student_info,score_info


map_join_rdd = sco_kv_rdd.map(map_join_fun)

map_join_rdd.foreach(print)

while True:
    pass