from pyspark import SparkConf, SparkContext
import os

# 设置python解释器环境变量
os.environ['PYSPARK_PYTHON'] = 'D:/PYTHON/python3.10/python.exe'

conf = SparkConf().setMaster('local[*]').setAppName('my_test_spark')
sc = SparkContext(conf=conf)

# 数据输入
rdd = sc.parallelize([1, 2, 3, 6, 7, 8])

print(F'collect将rdd内容转换为list: {rdd.collect()}')
# 两两相加
print(F'reduce对rdd内容进行自定义聚合：{rdd.reduce(lambda a, b: a + b)}')
print(F'take取出rdd中前N个元素组成李斯特：{rdd.take(3)}')
print(F'count统计rdd元素个数：{rdd.count()}')

sc.stop()
