#coding:utf8
from pyspark import SparkContext,SparkConf

if __name__ == '__main__':
    conf = SparkConf().setAppName('test_persit')
    sc = SparkContext(conf=conf)

    # 本地变量，存在Driver中
    stu_info_list = [(1, '张大仙', 11),
                     (2, '王晓晓', 13),
                     (3, '张甜甜', 11),
                     (4, '王大力', 11)]

    # 1. 将本地Python List对象标记为广播变量
    broadcast = sc.broadcast(stu_info_list)

    # spark变量，存在HDFS的分区中
    score_info_rdd = sc.parallelize([
        (1, '语文', 99),
        (2, '数学', 99),
        (3, '英语', 99),
        (4, '编程', 99),
        (1, '语文', 99),
        (2, '编程', 99),
        (3, '语文', 99),
        (4, '英语', 99),
        (1, '语文', 99),
        (3, '英语', 99),
        (2, '编程', 99)
    ])


    # 将rdd中的id替换成学生姓名
    def map_func(data):
        name = ''
        for stu in broadcast.value:
            if data[0] == stu[0]:
                name = stu[1]
                break
        return name,data[1],data[2]

    print(score_info_rdd.map(map_func).collect())

