# coding:utf8
import findspark

findspark.init()
from pyspark import SparkConf, SparkContext
# (2条消息) 『pyspark』〇：spark的安装、配置和使用_olizxq的博客-CSDN博客_findspark
# https://blog.csdn.net/olizxq/article/details/118249447
if __name__ == '__main__':
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    sc = SparkContext(conf=conf)

    rdd = sc.parallelize([1, 3, 2, 4, 7, 9, 6], 3)


    def process(iter):
        result = list()
        for it in iter:
            result.append(it * 10)

        return result


    print(rdd.mapPartitions(process).collect())
