# 导包
# from pyspark import SparkConf, SparkContext
# import os
#
# # 设置环境变量
# os.environ['PYSPARK_PYTHON'] = "D:/Python/Python3.10.11/python.exe"
# # 创建SparkConf对象
# conf = SparkConf().setAppName("TestApp").setMaster("local[*]")
#
# # 基于SparkConf创建SparkContext对象
# sc = SparkContext(conf=conf)

# # 打印pySpark版本信息
# rddlist=sc.parallelize([1,2,3,4,5])
# rddtuple=sc.parallelize((1,2,3,4,5))
# rddstr=sc.parallelize("hello world")
# rddset=sc.parallelize({1,2,3,4,5})
# rdddict=sc.parallelize({"a":1,"b":2,"c":3,"d":4,"e":5})
# print(rddlist.collect(),rddtuple.collect(),rddstr.collect(),rddset.collect(),rdddict.collect())
# # 关闭SparkContext对象
# sc.stop()

# 读取文件
# lines=sc.textFile("/PythonLearn/B2/pout.txt")
# print(lines.collect())
# sc.stop()


# map操作
# ...
# rdd = sc.parallelize([1, 2, 3, 4, 5])
# def func(x):
#     return x * x
# print(rdd.map(func).collect())
# # 输出：[1, 4, 9, 16, 25]
#
# print(rdd.map(lambda x: x*x).collect())
# # 输出：[1, 4, 9, 16, 25]


# flatMap操作
# rdd=sc.parallelize(["hello world","hello spark","hello hadoop"])
# rdd2=rdd.flatMap(lambda x:x)
# print(rdd2.collect())


# groupByKey操作
# rdd=sc.parallelize([("a",1),("b",2),("a",3),("c",4),("a",5)])
# rdd2=rdd.reduceByKey(lambda x,y:x+y)
# print(rdd2.collect())
# # 输出结果：[('b', 2), ('a', 9), ('c', 4)]


# 综合案例：统计hello.txt文件中每个单词出现的次数
# lines=sc.textFile("hello.txt")
# words=lines.flatMap(lambda x:x.split(" ")) # 切分单词
# # 统计每个单词出现的次数
# wordcount=words.map(lambda x:(x,1)).reduceByKey(lambda x,y:x+y)
# # 排序输出
# print(wordcount.sortBy(lambda x:x[1],ascending=False).collect())
# # 统计输出I出现了几次
# print(wordcount.filter(lambda x:x[0]=="I").collect())


# 过滤偶数
# rdd=sc.parallelize([1,2,3,4,5,6,7,8,9,10])
# rdd2=rdd.filter(lambda x:x%2==0)
# print(rdd2.collect())
# 输出结果：[2, 4, 6, 8, 10]
