import decimal

import pandas as pd
import pyspark.sql.functions as F
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import os

# 锁定远端操作环境, 避免存在多个版本环境的问题
os.environ['SPARK_HOME'] = '/export/server/spark'
# os.environ["PYSPARK_PYTHON"] = "/root/anaconda3/bin/python"
# os.environ["PYSPARK_DRIVER_PYTHON"] = "/root/anaconda3/bin/python"
os.environ["PYSPARK_PYTHON"] = "/export/server/anaconda3/bin/python3"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/export/server/anaconda3/bin/python3"

# 定义 计算lx的函数:  udaf_lx
# 快捷键:  main 回车
if __name__ == '__main__':
    print("保险项目的spark程序的入口:")

    # 1- 创建 SparkSession对象: 支持与HIVE的集成
    spark = SparkSession \
        .builder \
        .master("local[*]") \
        .appName("insurance_main") \
        .config("spark.sql.shuffle.partitions", 4) \
        .config("spark.sql.warehouse.dir", "hdfs://node1:8020/user/hive/warehouse") \
        .config("hive.metastore.uris", "thrift://node1:9083") \
        .config("spark.sql.legacy.createHiveTableByDefault", "false") \
        .config("spark.ui.port", "4042") \
        .enableHiveSupport() \
        .getOrCreate()


    # 定义 计算lx的函数:  udaf_lx
    @F.pandas_udf('decimal(17,12)')
    def udaf_lx(lx: pd.Series, qx: pd.Series) -> decimal:
        tmp_lx = decimal.Decimal(0)
        tmp_qx = decimal.Decimal(0)

        for i in range(0, len(lx)):
            if i == 0:
                tmp_lx = decimal.Decimal(lx[i])
                tmp_qx = decimal.Decimal(qx[i])
            else:
                tmp_lx = (tmp_lx * (1 - tmp_qx)).quantize(decimal.Decimal('0.000000000000'))
                tmp_qx = decimal.Decimal(qx[i])

        return tmp_lx


    # 定义一个UDAF函数用于计算: lx_d dx_d dx_ci
    @F.pandas_udf('string')
    def udaf_3col(lx_d: pd.Series, qx_d: pd.Series, qx_ci: pd.Series) -> str:
        tmp_lx_d = decimal.Decimal(0)
        tmp_dx_d = decimal.Decimal(0)
        tmp_dx_ci = decimal.Decimal(0)

        for i in range(0, len(lx_d)):
            if i == 0:
                tmp_lx_d = decimal.Decimal(lx_d[i])
                tmp_dx_d = decimal.Decimal(qx_d[i])
                tmp_dx_ci = decimal.Decimal(qx_ci[i])
            else:
                tmp_lx_d = (tmp_lx_d - tmp_dx_d - tmp_dx_ci).quantize(decimal.Decimal('0.000000000000'))
                tmp_dx_d = (tmp_lx_d * qx_d[i]).quantize(decimal.Decimal('0.000000000000'))
                tmp_dx_ci = (tmp_lx_d * qx_ci[i]).quantize(decimal.Decimal('0.000000000000'))

        return str(tmp_lx_d) + ',' + str(tmp_dx_d) + ',' + str(tmp_dx_ci)


    # 注册
    spark.udf.register('udaf_lx', udaf_lx)
    spark.udf.register('udaf_3col', udaf_3col)

    # spark.sql("drop table if exists insurance_dw.prem_src4_2")
    # create table if not exists insurance_dw.prem_src4_2 as
    spark.sql("""

select
    age_buy,
    Nursing_Age,
    sex,
    t_age,
    ppp,
    bpp,
    interest_rate,
    sa,
    policy_year,
    age,
    ppp_,
    bpp_,
    qx,
    kx,
    qx_ci,
    qx_d,
    udaf_lx(lx,qx) over(partition by ppp,sex,age_buy order by policy_year) as lx
from insurance_dw.prem_src4_1""").show()

# spark.sql("drop table if exists insurance_dw.prem_src5_2")
# spark.sql("""create table if not exists insurance_dw.prem_src5_2 as
# select
#     age_buy,
#     Nursing_Age,
#     sex,
#     t_age,
#     ppp,
#     bpp,
#     interest_rate,
#     sa,
#     policy_year,
#     age,
#     ppp_,
#     bpp_,
#     qx,
#     kx,
#     qx_ci,
#     qx_d,
#     lx,
#     udaf_3col(lx_d,qx_d,qx_ci) over(partition by  ppp,sex,age_buy order by policy_year) as lx_d_dx_d_dx_ci
# from insurance_dw.prem_src5_1""").show()


