from com.zsw.config.RemoteSparkConfig import RemoteSparkConfig

list = ['1', '2', '3']
tuple = ('1', '2', '3')
set = {'1', '2', '3'}
dict = {'1': 'abc', '2': 'def', '3': 'ghi'}
# RemoteSparkConfig 配置类用法
config = RemoteSparkConfig("Test01")
context = config.get_spark_context()
rdd = context.parallelize(list)
#写法一
rdd.map(lambda x:int(x) *10)
#写法二
def func(x):
    return int(x) * 10
rdd2 = rdd.map(func)
rdd2 = rdd.map(func)
print(rdd2.collect())
rdd_map = rdd.map(lambda x: int(x) * 10).map(lambda x: int(x) + 5)
print(rdd_map.collect())

list = [[1,2,3],[4,5,6],[7,8]]
rdd4 = context.parallelize(list)
flat_map = rdd4.flatMap(lambda x: x)
print(flat_map.collect())
# 结果为：
# [1, 2, 3, 4, 5, 6, 7, 8]

list = [('男', 99), ('男', 88), ('女', 77), ('女', 66), ('男', 55)]
rdd5 = context.parallelize(list)
rdd_key = rdd5.reduceByKey(lambda a, b: a + b)
print(rdd_key.collect())
# 结果为：
# [('女', 143), ('男', 242)]

list1 = [1,2,3,4,5,6,7,8,9,10]
rdd6 = context.parallelize(list1)
rdd__filter = rdd6.filter(lambda x: x % 2 == 0)
print(rdd__filter.collect())
# 结果为：
# [2, 4, 6, 8, 10]

#take算子 去RDD前n个元素，组成list
list1 = [1, 2, 3, 4, 5]
rdd7 = context.parallelize(list1,numSlices= 1)  # #设置分区数为1
take = rdd7.take(3)
print(take)
#saveAsTextFile 算子将RDD的数据写入文本文件中
take.saveAsTextFile("C:/Users/HLY/Desktop/test")
context.stop()