from pyspark.sql import DataFrame, SparkSession, functions as F
from pyspark.sql.types import StringType
from pyspark.storagelevel import StorageLevel

"""
需求1：各省份销售额的统计
需求2：TOP3销售省份中，有多少店铺达到过日销售额1000+
需求3：TOP3省份中，各省平均单单价
需求4：TOP3省份中，各个省份的支付类型比例

receivable: 订单金额
storeProvince: 店铺省份
dataTS: 订单销售日期
payType: 支付类型
storeID: 店铺ID
"""

if __name__ == '__main__':
    spark = SparkSession.builder. \
        appName("SparkSQL Example"). \
        master("local[*]"). \
        config("spark.sql.shuffle.partitions", 3). \
        config("spark.sql.warehouse.dir", "hdfs://laoban-test/user/hive/warehouse"). \
        config("hive.metastore.uris", "thrift://11.50.138.181:9083,thrift://11.50.138.179:9083"). \
        enableHiveSupport(). \
        getOrCreate()

    # 1.读取数据
    # 省份信息，缺失值过滤，同时省份信息中 会有 "null"字符串
    # 订单金额，数据集中有的订单金额单笔>10000，是测试数据
    df = spark.read.format("json").load("../../data/input/mini.json") \
        .dropna(thresh=1, subset=["storeProvince"]) \
        .filter("storeProvince != 'null'") \
        .filter("receivable < 10000") \
        .select("storeProvince", "storeID", "receivable", "dateTS", "payType")
    df.show()

    # TODO 需求1：各省销售额统计
    province_sale_df: DataFrame = df.groupBy("storeProvince").sum("receivable") \
        .withColumnRenamed("sum(receivable)", "money") \
        .withColumn("money", F.round("money", 2)) \
        .orderBy("money", ascending=False)
    province_sale_df.show(truncate=False)

    province_sale_df.write.mode("overwrite") \
        .format("jdbc") \
        .option("url", "jdbc:mysql://localhost:3306/test?useSSL=false&useUnicode=true") \
        .option("dbtable", "province_sale") \
        .option("user", "root") \
        .option("password", "Password") \
        .option("encoding", "utf-8") \
        .save()

    # 写出hive表 saveAsTable 可以写成表 要求已经配置好Spark On Hive，配置好后
    # 会将表写入到Hive
    province_sale_df.write.mode("overwrite").saveAsTable("default.province_sale", "parquet")

    # TODO 需求2：TOP3销售省份中，有多少店铺达到过日销售额1000+
    # 2.1 先找到TOP3的销售省份
    top3_province_df = province_sale_df \
        .limit(3) \
        .select("storeProvince") \
        .withColumnRenamed("storeProvince", "top3_province")

    # 2.2 和原始的df进行内关联
    top3_province_df_joined = df.join(top3_province_df, on=[df["storeProvince"] == top3_province_df["top3_province"]])
    top3_province_df_joined.persist(StorageLevel.MEMORY_AND_DISK)

    # 广东省 1 2021-01-01 9999
    # 广东省 2 2021-01-02 9999
    # 一个店铺多天达到，进行去除取一条
    province_hot_store_count_df: DataFrame = top3_province_df_joined \
        .groupBy("storeProvince", "storeID", F.from_unixtime(df["dateTS"].substr(0, 10), "yyyy-MM-dd").alias("day")) \
        .sum("receivable").withColumnRenamed("sum(receivable)", "money") \
        .filter("money > 1") \
        .dropDuplicates(subset=["storeID"]) \
        .groupBy("storeProvince").count()
    province_hot_store_count_df.show()

    # 写出到mysql
    province_hot_store_count_df.write.mode("overwrite") \
        .format("jdbc") \
        .option("url", "jdbc:mysql://localhost:3306/test?useSSL=false&useUnicode=true") \
        .option("dbtable", "province_hot_store_count") \
        .option("user", "root") \
        .option("password", "Password") \
        .option("encoding", "utf-8") \
        .save()
    # 写成到hive
    province_hot_store_count_df.write.mode("overwrite").saveAsTable("default.province_hot_store_count", "parquet")

    # TODO 需求3：TOP3省份中，各个省份的平均订单价格（单单价）
    top3_province_order_avg_df: DataFrame = top3_province_df_joined \
        .groupBy("storeProvince") \
        .avg("receivable") \
        .withColumnRenamed("avg(receivable)", "money") \
        .withColumn("money", F.round("money", 2)) \
        .orderBy("money", ascending=False)
    top3_province_order_avg_df.show(truncate=False)

    top3_province_order_avg_df.write.mode("overwrite") \
        .format("jdbc") \
        .option("url", "jdbc:mysql://localhost:3306/test?useSSL=false&useUnicode=true") \
        .option("dbtable", "province_hot_store_avg") \
        .option("user", "root") \
        .option("password", "Password") \
        .option("encoding", "utf-8") \
        .save()
    top3_province_order_avg_df.write.mode("overwrite").saveAsTable("default.province_hot_store_avg", "parquet")


    def udf_func(percent):
        return str(round(percent * 100, 2)) + "%"


    my_udf = F.udf(udf_func, StringType())

    # TODO 需求4：TOP3省份中，各个省份的支付比例
    top3_province_df_joined.createOrReplaceTempView("province_pay")
    pay_type_df = spark.sql("""
        select storeProvince, payType, (count(payType) / total) as percent from
        (select storeProvince, payType, count(1) over(partition by storeProvince) as total from province_pay) as sub
        group by storeProvince, payType, total
    """).withColumn("percent", my_udf("percent"))
    pay_type_df.show()

    pay_type_df.write.mode("overwrite") \
        .format("jdbc") \
        .option("url", "jdbc:mysql://localhost:3306/test?useSSL=false&useUnicode=true") \
        .option("dbtable", "pay_type") \
        .option("user", "root") \
        .option("password", "Password") \
        .option("encoding", "utf-8") \
        .save()
    pay_type_df.write.mode("overwrite").saveAsTable("default.pay_type", "parquet")

    top3_province_df_joined.unpersist()
