from pyspark import SparkConf,SparkContext
import os
os.environ['PYSPARK_PYTHON'] = 'D://Python3.10.7//python.exe'

#创建SparkConf
conf = SparkConf().setMaster("local[*]").setAppName("My App")
#创建SparkContext
sc = SparkContext(conf=conf)
#打印版本
print(sc.version)

#获取rdd
rdd1 = sc.parallelize([1,2,3,4,5,6,7,8,9,10])
rdd2 = sc.parallelize((1,2,3,3,2,1))
#读取文件
rdd3 = sc.textFile("E://test//CurrentShift.txt")

def map_func(x):
    return x*2
#1.map方法
rdd1 = rdd1.map(map_func)
#2.lamda
rdd2 = rdd2.map(lambda x:x*2)
print(rdd1.collect())
print(rdd2.collect())
print(rdd3.collect())

#flatmap解除嵌套
rdd4 = sc.parallelize([[1,2],[3,4],[5,6]])
rdd5 = rdd4.flatMap(lambda x:x)
print(rdd5.collect()) #[1, 2, 3, 4, 5, 6]
rdd6 = sc.parallelize(["1 2 1","3 2 4","8 7 9"])
rdd6 = rdd6.flatMap(lambda x:x.split(" "))
print(rdd6.collect()) #['1', '2', '1', '3', '2', '4', '8', '7', '9']

#reduceByKey 聚合
rdd7 = sc.parallelize([("a",1),("b",2),("a",3),("b",4),("a",5),("b",6)])
rdd8 = rdd7.reduceByKey(lambda a,b:a+b)
print("reduceByKey:",rdd8.collect()) #[('a', 9), ('b', 12)]
#如果是减法，则是第一个减去所有其他的
rdd7 = sc.parallelize([("a",1),("b",2),("a",3),("b",4),("a",5),("b",6)])
rdd8 = rdd7.reduceByKey(lambda a,b:a-b)
print("reduceByKey:",rdd8.collect()) #[('b', -8), ('a', -7)]

#停止
sc.stop()