from pyspark.sql import SparkSession,functions as F
from pyspark.sql.types import *
import pandas as pd
import decimal

# 代码中需要连接hive读取hive中的表数据
# spark.sql.warehouse.dir 指定数仓位置
# hive.metastore.uris 指定连接hive的metastore服务
spark = SparkSession.builder.\
    config('spark.sql.warehouse.dir','hdfs://node1:8020/user/hive/warehouse').\
    config('hive.metastore.uris','thrift://node1:9083').\
    config('spark.sql.execution.arrow.pyspark.enabled','true').\
    enableHiveSupport().\
    getOrCreate()

# 使用pandas的Series 可以接受整列字段数据
@F.pandas_udf(returnType=ArrayType(DecimalType(17,12)))
def func(qx_d: pd.Series, qx_ci: pd.Series, lx_d: pd.Series) -> decimal:
    lx_d_tmp = decimal.Decimal(0)
    dx_d_tmp = decimal.Decimal(0)
    dx_ci_tmp = decimal.Decimal(0)
    for i in range(0,len(lx_d)): # 通过len获取lx元素遍历次数
        if i== 0:  # 条件成立，说明是第一行数据
            lx_d_tmp = decimal.Decimal(lx_d[i])   # 获取第一行的 lx_d数据
            dx_d_tmp = decimal.Decimal(qx_d[i])  # 获取第一行的 dx_d数据
            dx_ci_tmp = decimal.Decimal(qx_ci[i])  # 获取第一行的 dx_ci数据
        else:
            # 说明是后几行数据    开始计算第二行往后的数据
            lx_d_tmp = (lx_d_tmp-dx_d_tmp-dx_ci_tmp).quantize(decimal.Decimal('0.000000000000'))  # 计算的是后几行数据
            dx_d_tmp = (lx_d_tmp * qx_d[i]).quantize(decimal.Decimal('0.000000000000'))
            dx_ci_tmp = (lx_d_tmp * qx_ci[i]).quantize(decimal.Decimal('0.000000000000'))
    return [lx_d_tmp, dx_d_tmp, dx_ci_tmp]


# 注册自定义函数
spark.udf.register('func', func)

# 进行sql使用
df =spark.sql('create table insurance_dw.perm_scr5 as select age_buy,nursing_Age,sex,t_age,ppp,bpp,interest_rate,sa,policy_year,age,ppp_,bpp_,qx,kx,qx_ci,qx_d,lx,func(qx_d,qx_ci,lx_d) over(partition by sex,ppp,age_buy order by policy_year) as data_list from insurance_dw.perm_scr5_1')
df.show()

