from pyspark.sql import SparkSession

if __name__ == '__main__':
    spark = SparkSession.builder. \
        appName("test"). \
        master("local[*]"). \
        config("spark.sql.shuffle.partitions", 2). \
        getOrCreate()

    sc = spark.sparkContext

    rdd = sc.parallelize([1, 2, 3, 4, 5], 3)
    df = rdd.map(lambda x: [x]).toDF(['num'])
    df.show()

    # 折中方式来处理 就是使用RDD的mapPartitions算子来完成聚合操作
    # 如何使用mapPartitions API完成UDAF聚合，一定要单分区
    singe_partition_rdd = df.rdd.repartition(1)
    print(singe_partition_rdd.collect())


    def process(iter):
        sum = 0
        for row in iter:
            sum += row['num']
        return [sum]


    print(singe_partition_rdd.mapPartitions(process).collect())
