import pandas as pd
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
import os

# 锁定远端操作环境, 避免存在多个版本环境的问题
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ["PYSPARK_PYTHON"] = "/root/anaconda3/bin/python"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/root/anaconda3/bin/python"

# 快捷键:  main 回车
if __name__ == '__main__':
    print("通过自定义UDAF函数的方式, 来解决纵向迭代问题")

    # 1- 创建 sparkSession对象:
    spark = SparkSession.builder.master('local[*]').appName("_01_udaf").getOrCreate()

    # 开启arrow:
    spark.conf.set('spark.sql.execution.arrow.pyspark.enabled',True)

    # 2- 初始化数据:
    spark.sql("""create or replace temporary  view t2 (c1,c2,c3,c4) as values(1,1,6,1),
        (1,2,23,NULL),
        (1,3,8,NULL),
        (1,4,4,NULL),
        (1,5,10,NULL),
        (2,1,23,1),
        (2,2,14,NULL),
        (2,3,17,NULL),
        (2,4,20,NULL)""")

    @F.pandas_udf('float') # 仅支持在DSL中使用
    def udaf_fun(c3:pd.Series,c4:pd.Series) -> float:
        tt = 0
        for i in range(0,len(c3)):
            if i == 0:
                tt = c4[i]
            else:
                tt = (tt + c3[i]) /2
        return tt

    spark.udf.register('udaf_fun',udaf_fun) # 用于在SQL中使用
    spark.sql("""
        select
           c1,
           c2,
           c3,
           udaf_fun(c3,c4) over (partition by c1 order by c2) as  c4
        from t2;
    
    """).show()
