# 1、创建环境
from pyspark.context import SparkContext

sc = SparkContext(master='local', appName='demo2_map')

# 2、读取数据
students_rdd = sc.textFile("../../data/students.txt")

# students_rdd = students_rdd.filter(lambda line: line.split(",")[-1] == "文科一班")

# 统计每个班级的平均年龄

# 取出班级和年龄
kv_rdd = students_rdd.map(lambda student: (student.split(",")[-1], int(student.split(",")[2])))


# reduceByKey: 对相同key的value进行聚合计算
def reduce_fun(x, y):
    print(f"x:{x}")
    print(f"y:{y}")
    return x + y


# 计算总的年龄
reduce_by_key_rdd = kv_rdd.reduceByKey(reduce_fun)


# reduce_by_key_rdd.foreach(print)


# 计算最大年龄
def max_age_fun(x, y):
    if x > y:
        return x
    else:
        return y


max_age_rdd = kv_rdd.reduceByKey(max_age_fun)

# max_age_rdd.foreach(print)

# 计算平均年龄
kvs_rdd = students_rdd.map(lambda student: (student.split(",")[-1], (int(student.split(",")[2]), 1)))

# 同时九四u按总的年龄和总的人数
def avg_age_fun(x, y):
    age = x[0] + y[0]
    num = x[1] + y[1]
    return age, num

# reduceByKey: 会在map端对相同的key进行预聚合,执行效率比groupByKey高
avg_avg_reduce_rdd = kvs_rdd.reduceByKey(avg_age_fun)

# 计算平均年龄
# ('理科二班', (1782, 79))
avg_age_rdd = avg_avg_reduce_rdd.map(lambda kv: (kv[0], round(kv[1][0] / kv[1][1],2)))
avg_age_rdd.foreach(print)


while True:
    pass