from pyspark import SparkConf,SparkContext
import os
os.environ["PYSPARK_PYTHON"] = "D:/Python3.10.7/python.exe"

conf = SparkConf().setMaster("local[*]").setAppName("My App")
sc = SparkContext(conf=conf)

p0 = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print(p0) #  ParallelCollectionRDD[0] at readRDDFromFile at PythonRDD.scala:289
print(p0.collect()) #  [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]

#reduce案例
p1 = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print(p1.reduce(lambda x, y: x + y)) # 55

#take算子
p2 = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8])
print(p2.take(3)) # [1, 2, 3]

#count
p1 = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print(p1.count()) # 10


sc.stop()