# -*- coding:utf-8 -*-
# @Author: shenyuyu
# @Time: 2023/6/29 20:12
# @File: qu_4_自定义udaf函数.py
import string
import time

from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StringType, IntegerType, ArrayType
from pyspark.sql import functions as F

if __name__ == '__main__':
    # 0. 构建执行环境入口对象SparkSession
    spark = SparkSession.builder. \
        appName("test"). \
        master("local[*]"). \
        config("spark.sql.shuffle.partitions", 2). \
        getOrCreate()
    sc = spark.sparkContext

    rdd = sc.parallelize([1, 2, 3, 4, 5], 3)

    df = rdd.map(lambda x: [x]).toDF(['num'])

    # 通过df对象获取rdd对象,分成一个区
    rdd1 = df.rdd.repartition(1)


    # mappartitions

    # [Row(),Row()]
    def sum_fun(data):
        sum = 0
        for d in data:
            sum += d["num"]
        return [sum]


    rdd2 = rdd1.mapPartitions(sum_fun)
    print(rdd2.collect())