# golm将RDD按照分区进行嵌套
#coding:utf8
from pyspark import SparkContext,SparkConf

if __name__ == '__main__':
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    sc = SparkContext(conf=conf)

    # 构建员工rdd
    rdd = sc.parallelize([1,2,3,4,5,6,7,8,9],3)
    print("分区数量:",rdd.getNumPartitions())
    print(rdd.glom().collect())