import decimal

import pandas as pd
import pyspark.sql.functions as F
from pyspark import SparkContext, SparkConf
from pyspark.shell import spark
from pyspark.sql import SparkSession
import os

# 锁定远端操作环境, 避免存在多个版本环境的问题
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ["PYSPARK_PYTHON"] = "/root/anaconda3/bin/python"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/root/anaconda3/bin/python"

# 快捷键:  main 回车
# if __name__ == '__main__':
#
#     @F.pandas_udf('decimal(17,12)')
#     def udaf_lx(lx:pd.Series,qx:pd.Series) -> decimal:
#         tmp_lx = decimal.Decimal(0)
#         tmp_qx = decimal.Decimal(0)
#
#         for i in range(0,len(lx)):
#             if i == 0:
#                 tmp_lx = decimal.Decimal(lx[i])
#                 tmp_qx = decimal.Decimal(qx[i])
#             else:
#                 tmp_lx = (tmp_lx * (1- tmp_qx)).quantize(decimal.Decimal('0.000000000000'))
#                 tmp_qx = decimal.Decimal(qx[i])
#
#         return  tmp_lx
#
#
# spark.udf.register('udaf_lx', udaf_lx)
#
# spark.sql("""
#     create table if not exists insurance_dw.prem_src4_2 as
# select
#     age_buy,
#     Nursing_Age,
#     sex,
#     t_age,
#     ppp,
#     bpp,
#     interest_rate,
#     sa,
#     policy_year,
#     age,
#     ppp_,
#     bpp_,
#     qx,
#     kx,
#     qx_ci,
#     qx_d,
#     udaf_lx(lx,qx) over(partition by ppp,sex,age_buy order by policy_year) as lx
# from insurance_dw.prem_src4_1;
# """).show()