import pandas as pd
from pyspark.sql import SparkSession, functions as F
from pyspark.sql.types import *
import decimal

# 指定连接数仓位置
# config('spark.sql.warehouse.dir','hdfs://node1:8020/user/hive/warehouse')
# 指定连接metastore
# config('hive.metastore.uris','thrift://node1:9083')
# pyarrow 开启
# config('spark.sql.execution.arrow.pyspark.enabled','true')
spark = SparkSession.builder. \
    config('spark.sql.warehouse.dir', 'hdfs://node1:8020/user/hive/warehouse'). \
    config('hive.metastore.uris', 'thrift://node1:9083'). \
    config('spark.sql.execution.arrow.pyspark.enabled', 'true'). \
    enableHiveSupport(). \
    getOrCreate()


# 自定义函数
# 需要接受整列数据，所有在定义的时候需要使用pandas的series类型接受整列数据
# 注册
# 当policy_year=1时，lx=1，当policy_year！=1时，则lx=(上一行dx+上一行的lx)/2
@F.pandas_udf(returnType=DecimalType(10,4))
def func(dx: pd.Series, lx: pd.Series) -> decimal.Decimal:
    # print('-'*100)
    # print(lx)
    # 定义初始值
    dx_tmp = decimal.Decimal(0)
    lx_tmp = decimal.Decimal(0)
    for index in range(0, len(lx)):
        # index = 0 说明是第一行数据
        if index == 0:
            # 第一行数据赋值
            dx_tmp = decimal.Decimal(int(dx[index]))
            lx_tmp = decimal.Decimal(int(lx[index]))

        else:
            # 其他行数据计算
            lx_tmp =( (dx_tmp + lx_tmp) / decimal.Decimal(2) ).quantize(decimal.Decimal('0.0000'))# lx 进行计算
            dx_tmp = decimal.Decimal(int(dx[index])).quantize(decimal.Decimal('0.0000'))  # dx不计算直接获取当前行数据

    return lx_tmp


spark.udf.register('func_udaf', func)

# 在代码中进行sql操作
# 返回的结果存储在df中
df = spark.sql(
    'create table itcast.tb3 as  select sex,policy_year,dx,func_udaf(dx,lx_1) over(partition by sex order by policy_year) as lx from itcast.tb2_lx_1 ')
df.show()
