# from pyspark import SparkContext

# sc = SparkContext('local')
# old = sc.parallelize([1,2,3,4,5], 2)
# newMap = old.map(lambda x:(x, x**2))
# newReduce = old.reduce(lambda a,b: a + b)
# print(newMap.glom().collect())
# print(newReduce)

from pyspark import SparkContext
 
sc = SparkContext('local')
#第二个参数2代表的是分区数，默认为1
old=sc.parallelize([1,2,3,4,5],2)
newMap = old.map(lambda x:(x,x**2))
newFlatMap = old.flatMap(lambda x:(x, x + 1, x**2))
newFlatMapFilter = newFlatMap.filter(lambda x: (x < 6))
# newReduce = newMap.values().reduce(lambda a,b:(a + b)
newDistintFlatMapFilter = newFlatMapFilter.distinct()
# print(newMap.collect())
# print('===================================')
# print(newFlatMap.collect())
# print('----------------------------------')
# print(newFlatMapFilter.collect())
# print(newDistintFlatMapFilter.collect())
print(newMap.values().sum())
