# 3) 将保险精算结果表导出到MYSQL中:
import decimal

import pandas as pd
from pyspark import StorageLevel

from pyspark.sql import functions as F
from pyspark.sql import SparkSession
import os
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ["PYSPARK_PYTHON"] = "/export/server/anaconda3/bin/python"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/export/server/anaconda3/bin/python"

spark = SparkSession \
    .builder \
    .master("local[*]") \
    .appName("insurance_main") \
    .config("spark.sql.shuffle.partitions", 4) \
    .config("spark.sql.warehouse.dir", "hdfs://node1:8020/user/hive/warehouse") \
    .config("hive.metastore.uris", "thrift://node1:9083") \
    .enableHiveSupport() \
    .getOrCreate()

df = spark.sql("""
        select
            t1.age_buy,
            t1.sex,
            t1.ppp,
            t1.bpp,
            t1.policy_year,
            t1.sa,
            t1.cv_1a,
            t1.cv_1b,
            t1.sur_ben,
            t1.np,
            t2.rsv2_re,
            t2.rsv1_re,
            t2.np_
        from insurance_dw.cv_src t1 join  insurance_dw.rsv_src t2
            on t1.age_buy = t2.age_buy and t1.ppp = t2.ppp and t1.sex = t2.sex and t1.policy_year = t2.policy_year;
    """)
# 设置缓存, 将其缓存到内存中, 如果内存放不下, 放置到磁盘上
df.persist(storageLevel=StorageLevel.MEMORY_AND_DISK).count()

df.createTempView('t1')
# 3.1 将这个结果灌入到 HIVE的APP层库中
spark.sql("""
        insert overwrite table insurance_app.policy_actuary
        select  * from  t1
    """)
# 3.2 将这个结果灌入到 mysql的APP层库中
df.write.jdbc(
        "jdbc:mysql://node1:3306/insurance_app?createDatabaseIfNotExist=true&serverTimezone=UTC&characterEncoding=utf8&useUnicode=true",
        'policy_actuary',
        'overwrite',
        {'user': 'root', 'password': '123456'}
    )