import os

from pyspark import StorageLevel
from pyspark.sql import SparkSession

os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ["PYSPARK_PYTHON"] = "/export/server/anaconda3/bin/python"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/export/server/anaconda3/bin/python"

# 创建SparkSession（启用Hive支持）
spark = SparkSession \
    .builder \
    .master("local[2]") \
    .appName("SparkSQLAppName") \
    .config("spark.sql.shuffle.partitions", 4) \
    .enableHiveSupport().getOrCreate()

# 1. 如果目标数据库不存在，先创建数据库
spark.sql("CREATE DATABASE IF NOT EXISTS insurance_dw")
spark.sql("CREATE DATABASE IF NOT EXISTS insurance_app")

# 2. 如果源表不存在，创建并加载数据到源表
# 这里假设你已经有数据文件，我创建简单的示例表结构
spark.sql("""
    CREATE TABLE IF NOT EXISTS insurance_dw.cv_src (
        age_buy INT,
        sex STRING,
        ppp INT,
        bpp INT,
        policy_year INT,
        sa DOUBLE,
        cv_1a DOUBLE,
        cv_1b DOUBLE,
        sur_ben DOUBLE,
        np DOUBLE
    ) STORED AS PARQUET
""")

spark.sql("""
    CREATE TABLE IF NOT EXISTS insurance_dw.rsv_src (
        age_buy INT,
        sex STRING,
        ppp INT,
        bpp INT,
        policy_year INT,
        rsv2_re DOUBLE,
        rsv1_re DOUBLE,
        np_ DOUBLE
    ) STORED AS PARQUET
""")

# 3. 执行核心查询
df = spark.sql("""
    SELECT
        t1.age_buy,
        t1.sex,
        t1.ppp,
        t1.bpp,
        t1.policy_year,
        t1.sa,
        t1.cv_1a,
        t1.cv_1b,
        t1.sur_ben,
        t1.np,
        t2.rsv2_re,
        t2.rsv1_re,
        t2.np_
    FROM insurance_dw.cv_src t1 
    JOIN insurance_dw.rsv_src t2
        ON t1.age_buy = t2.age_buy 
        AND t1.ppp = t2.ppp 
        AND t1.sex = t2.sex 
        AND t1.policy_year = t2.policy_year
""")

# 设置缓存, 将其缓存到内存中, 如果内存放不下, 放置到磁盘上
df.persist(storageLevel=StorageLevel.MEMORY_AND_DISK).count()

# 创建临时视图
df.createTempView('t1')

# 4. 将结果写入到HIVE的APP层库中
spark.sql("""
    CREATE TABLE IF NOT EXISTS insurance_app.policy_actuary 
    LIKE t1
    STORED AS PARQUET
""")

spark.sql("""
    INSERT OVERWRITE TABLE insurance_app.policy_actuary
    SELECT * FROM t1
""")

# 5. 查询并打印结果
result = spark.sql("""
    SELECT * FROM insurance_app.policy_actuary
""")

# 显示结果
result.show()

# 停止SparkSession
spark.stop()
