from pyspark.context import SparkContext

sc = SparkContext(master="local", appName="word_count")

# 读取数据
students_rdd = sc.textFile("../../data/students.txt")

# 1、foreach：将rdd中的数据一行一行传递给后面的函数
students_rdd.foreach(lambda line: print(line))
students_rdd.foreach(print)

# 2、count:统计行数
count = students_rdd.count()
print(f"count:{count}")

# 3、sum求和
sum_age = students_rdd.map(lambda stu: int(stu.split(",")[2])).sum()
print(f"sum_age:{sum_age}")
print(f"avg_age:{sum_age / count}")


# 4、reduce：全局聚合
reduce = students_rdd.map(lambda stu: int(stu.split(",")[2])).reduce(lambda a, b: a + b)
print(f"reduce:{reduce}")

# 5、collect：将RDD转换成列表
students_list = students_rdd.collect()
print(students_list)


# 6、take：取top
take = students_rdd.take(2)
print(take)

# 7、saveAsTextFile：保存结果
# students_rdd.saveAsTextFile("../../data/student_save")

def fun(iter):
    for i in iter:
        print(i)


# 8、foreachPartition：一次循环一个分区
students_rdd.foreachPartition(fun)

