from pyspark import SparkConf, SparkContext

# /Users/study/python
# 创建sparkConf类对象
conf = SparkConf().setMaster("local[*]").setAppName("test_spark_app")
# 链式调用，等价于
# conf = SparkConf()
# conf.setMaster("local[*]")
# conf.setAppName("test_spark_app")


# 基于配置类创建SparkContext对象
sc = SparkContext(conf=conf)


# 1.读取数据
# 读取基本类型
# my_list: list = [1, 2, 3, 4, 5]
# my_tuple: tuple = (1, 2, 3, 4, 5)
# my_set: set = {1, 2, 3, 4, 5}
# my_dict: dict = {"1": 1, "2": 2}
# my_str: str = "hello world"
#
# rdd1 = sc.parallelize(my_list)
# rdd2 = sc.parallelize(my_tuple)
# rdd3 = sc.parallelize(my_set)
# rdd4 = sc.parallelize(my_dict)
# rdd5 = sc.parallelize(my_str)
#
# print(rdd1.collect())
# print(rdd2.collect())
# print(rdd3.collect())
# print(rdd4.collect())
# print(rdd5.collect())
#
# print(sc.version)
# 读取文件
# rdd = sc.textFile("/Users/study/python/hello.txt")

# 2.处理数据
rdd = sc.parallelize([1, 2, 3, 4, 5])
rdd1 = sc.parallelize(["one 1", "two 2", "three 3", "four 4", "five 5"])

# T -> U表示传入什么参数可以任意返回
# T -> T表示传入什么类型必须返回什么类型
# rdd中的map方法，对每个元素进行处理
rdd2 = rdd.map(lambda x: x * 10)
print(rdd2.collect())
# flatMap比map多一个解除嵌套的功能
# rdd3 = rdd1.flatMap(lambda x: x.split(" "))
# 分组聚合reduceByKey
rdd4 = sc.parallelize([('男', 99), ('女', 88), ('男', 77), ('女', 66)])
rdd5 = rdd4.reduceByKey(lambda x, y: x + y)
print(rdd5.collect())

# filter过滤
rdd6 = sc.parallelize([1, 2, 3, 4, 5, 5])
rdd7 = rdd6.filter(lambda x: x % 2 == 0)
print(rdd7.collect())

# distinct去重
rdd8 = rdd6.distinct().collect()
print(rdd8)

# sortBy(指定函数中谁作为排序条件，升序还是降序，分区)
sc.stop()

