from pyspark import SparkConf, SparkContext
import os

os.environ['PYSPARK_PYTHON'] = "C:/Python310/python.exe"
os.environ['HADOOP_HOME'] = "D:/dev-tools/hadoop-3.0.0"

conf = SparkConf().setMaster("local[*]").setAppName("test_spark")
conf.set("spark.default.parallelism", "1")
sc = SparkContext(conf=conf)

rdd = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9])

rdd.saveAsTextFile("D:/spark_output")
