from pyspark.sql import SparkSession, functions as F
from pyspark.sql.types import *
import pandas as pd
import decimal

# 使用pandas的Series 可以接受整列字段数据

# 代码中需要连接hive获取hive中的表数据
# spark.sql.warehouse.dir 指定数仓位置
# hive.metastore.uris 指定连接hive的metastore服务
spark = SparkSession.builder. \
    config('spark.sql.warehouse.dir', 'hdfs://node1:8020/user/hive/warehouse'). \
    config('hive.metastore.uris', 'thrift://node1:9083'). \
    config('spark.sql.execution.arrow.pyspark.enabled', 'true'). \
    enableHiveSupport(). \
    getOrCreate()


@F.pandas_udf(returnType=DecimalType(17, 12))
def func(qx: pd.Series, lx: pd.Series) -> decimal:
    # 接受dx整列数据  ，lx的整列数据
    qx_tmp = decimal.Decimal(0)
    lx_tmp = decimal.Decimal(0)
    for i in range(0, len(lx)):  # 通过len(lx)获取lx的元素遍历次数
        if i == 0:  # 条件成立说明是第一行数
            qx_tmp = decimal.Decimal(qx[i])  # 获取了第一行dx数据
            lx_tmp = decimal.Decimal(lx[i])  # 获取了第一行lx数据

        else:
            # 说明是后几行数据  开始计算第二行往后的数据
            # decimal数据计算后指定保留的小数位数
            lx_tmp = (lx_tmp * (1 - qx_tmp)).quantize(decimal.Decimal('0.000000000000'))  # 计算的就是后几行数据
            qx_tmp = decimal.Decimal(qx[i])  # 获取dx_tmp的后几行数据
    return lx_tmp


# 注册自定义函数
spark.udf.register('func', func)

# 将sql使用
df = spark.sql(
    'CREATE TABLE dw.prem_src4 AS SELECT age_buy,nursing_age,sex,t_age,ppp,bpp,interest_rate,sa,policy_year,age,ppp_,bpp_,qx,kx,qx_ci,qx_d,func(qx,lx) OVER(PARTITION BY sex,ppp,age_buy ORDER BY policy_year) AS lx FROM dw.prem_src4_1')
df.show()
