from pyspark import SparkContext, SparkConf

# 创建SparkConf对象
conf = SparkConf().setAppName("DataInput").setMaster("local[*]")

# 创建SparkContext对象
sc = SparkContext(conf = conf)

# 通过parallelize()方法将数据集加载到内存中，并创建SparkContext对象
listData = [1, 2, 3, 4, 5]
tupleData = (6, 7, 8, 9, 10)
dictData = {"name": "Alice", "age": 25, "city": "Beijing"}
setData = {11, 12, 13, 14, 15}
stringData = "hello, world!"

# 加载数据集到内存中
rddList = sc.parallelize(listData)
rddTuple = sc.parallelize(tupleData)
rddDict = sc.parallelize(dictData)
rddSet = sc.parallelize(setData)
rddString = sc.parallelize(stringData)

# 打印RDD的类型和内容
print("RDD of list: ", rddList.collect())
print("RDD of tuple: ", rddTuple.collect())
print("RDD of dict: ", rddDict.collect())
print("RDD of set: ", rddSet.collect())
print("RDD of string: ", rddString.collect())

# 读取文本文件并创建RDD
rddTxt = sc.textFile("/Users/lzy/Documents/workspace/python/study/practice/pyspark学习/aa.txt")
print("RDD of txt: ", rddTxt.collect())


# 读取文本文件
# 读取文本文件并创建RDD
# 读取JSON文件并创建RDD
# 读取CSV文件并创建RDD

sc.stop()