from pyspark import SparkConf, SparkContext
import os
os.environ['PYSPARK_PYTHON'] = r"C:\Users\admin\AppData\Local\Programs\Python\Python38\python.exe"
conf = SparkConf().setMaster("local[*]").setAppName("test_spark")
sc = SparkContext(conf=conf)
# 准备一个RDD
rdd = sc.parallelize([1, 2, 3, 4, 5])
# collect算子，输出RDD为list对象
rdd_list: list = rdd.collect()
print(rdd_list)
print(type(rdd_list))
# reduce算子，对RDD进行两两聚合
num = rdd.reduce(lambda a, b: a + b)
print(num)
# take算子，取出RDD前N个元素，组成list返回
take_list = rdd.take(3)
print(take_list)
# count算子，统计并返回rdd内元素个数
num_count = rdd.count()
print(num_count)