#coding:utf8
from pyspark import SparkContext,SparkConf

if __name__ == '__main__':
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    sc = SparkContext(conf=conf)

    rdd = sc.parallelize([1,3,2,5,3,6,8,4,2,4,5,1],3)

    def process(iters):
        result = list()
        for i in iters:
            result.append(i*100)
        return result
    # 结果和map一样，但是IO次数比map少很多，因为map是一个元素一个元素传递，这里是放入了整个迭代器进去
    print(rdd.mapPartitions(process).collect())