from pyspark import SparkConf
from pyspark import SparkContext

if __name__ == '__main__':
    conf = SparkConf().setMaster("local[*]").setAppName("spark01")
    sc = SparkContext(conf=conf)

    # 需求： 取出每个分区相同的key相应值的最大值，然后相加

    # 数据：
    # 数据:
    lst = [("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8)]
    rdd1 = sc.parallelize(lst, 2)
    rdd2 = rdd1.aggregateByKey(zeroValue=0, seqFunc=max, combFunc=lambda a, b: a + b)

    print(rdd2.collect())
