# -*- coding:utf-8 -*-
# @Author: shenyuyu
# @Time: 2023/6/30 15:32
# @File: qu_1.py
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StringType, IntegerType, DoubleType, TimestampType
from pyspark.sql import functions as F

if __name__ == '__main__':
    spark = SparkSession.builder.appName("s").master("local[*]") \
        .config("spark.sql.shuffle.partitions", "2") \
        .config("spark.sql.warehouse.dir", "hdfs://hadoop1:9820/user/hive/warehouse") \
        .config("hive.metastore.uris", "thrift://hadoop1:9083") \
        .enableHiveSupport().getOrCreate()
    # schema = StructType() \
    #     .add("storeProvince", StringType(), True) \
    #     .add("storeID", IntegerType(), True) \
    #     .add("dateTS", TimestampType(), True) \
    #     .add("receivable", DoubleType(), True) \
    #     .add("payType", StringType(), True)
    df = spark.read.format("json") \
        .load("file:///tmp/pycharm_project_161/data/mini.json") \
        .dropna(thresh=1, subset=['storeProvince']) \
        .filter("storeProvince != 'null'") \
        .filter("receivable < 10000") \
        .select("storeProvince", "storeID", "receivable", "dateTS", "payType")
    print("原数据")
    df.show()

    # todo 1. 各省 销售 指标 每个省份的销售额统计
    print("各省 销售 指标 每个省份的销售额统计")
    province_sales = df.groupby("storeProvince").sum("receivable") \
        .withColumnRenamed("sum(receivable)", "receivable_sum") \
        .withColumn("receivable_sum", F.round("receivable_sum", 2)) \
        .orderBy("receivable_sum", ascending=False)
    province_sales.show()
    # 写入mysql数据库
    province_sales.write.mode("overwrite").format("jdbc")\
        .option("url", "jdbc:mysql://hadoop1:3306/bigdata?useSSL=false&useUnicode=true&characterEncoding=utf8")\
        .option("dbtable", "province_sale")\
        .option("user", "root")\
        .option("password", "123456")\
        .option("encoding", "utf-8")\
        .save()
    # 写入hive数据库
    province_sales.write.mode("overwrite").saveAsTable("myhive.province_sales", "parquet")

    # todo 2. TOP3 销售省份中, 有多少家店铺 日均销售额 1000+
    print("TOP3 销售省份中, 有多少家店铺 日均销售额 1000+")
    # 得到top3的销售省份
    top3_province = province_sales.select("storeProvince") \
        .limit(3).withColumnRenamed("storeProvince", "top3_province")
    # top3_province.show()
    # 得到top3的省份中的店铺
    top3_province_store = df.join(top3_province, on=df["storeProvince"] == top3_province["top3_province"]) \
        .withColumn("date", F.from_unixtime(df["dateTS"].substr(0, 10), "yyyy-MM-dd"))
    # 得到日销售量
    top3_province_store_receivable_daily_sum = top3_province_store.groupby("storeProvince", "storeID", "date") \
        .sum("receivable") \
        .withColumnRenamed("sum(receivable)", "receivable_sum").orderBy("receivable_sum", ascending=False)
    # 去重处理
    top3_province_store_receivable_daily_sum_dayu1000 = top3_province_store_receivable_daily_sum \
        .where(top3_province_store_receivable_daily_sum["receivable_sum"] > 1000) \
        .dropDuplicates(subset=["storeID"]) \
        .groupby("storeProvince").count()
    top3_province_store_receivable_daily_sum_dayu1000.show()
    # 写入mysql数据库
    top3_province_store_receivable_daily_sum_dayu1000.write.mode("overwrite").format("jdbc") \
        .option("url", "jdbc:mysql://hadoop1:3306/bigdata?useSSL=false&useUnicode=true&characterEncoding=utf8") \
        .option("dbtable", "top3_province_store_receivable_daily_sum_dayu1000") \
        .option("user", "root") \
        .option("password", "123456") \
        .option("encoding", "utf-8") \
        .save()
    # 写入hive数据库
    top3_province_store_receivable_daily_sum_dayu1000.write.mode("overwrite") \
        .saveAsTable("myhive.top3_province_store_receivable_daily_sum_dayu1000", "parquet")

    # todo 3. TOP3 省份中 各个省份的平均单单价
    print("TOP3 省份中 各个省份的平均单单价")
    top3_province_store_receivable_avg = top3_province_store.groupby("storeProvince") \
        .avg("receivable").withColumnRenamed("avg(receivable)", "receivable_avg") \
        .withColumn("receivable_avg", F.round("receivable_avg", 2)) \
        .orderBy("receivable_avg", ascending=False)
    top3_province_store_receivable_avg.show()
    # 写入mysql数据库
    top3_province_store_receivable_avg.write.mode("overwrite").format("jdbc") \
        .option("url", "jdbc:mysql://hadoop1:3306/bigdata?useSSL=false&useUnicode=true&characterEncoding=utf8") \
        .option("dbtable", "top3_province_store_receivable_avg") \
        .option("user", "root") \
        .option("password", "123456") \
        .option("encoding", "utf-8") \
        .save()
    # 写入hive数据库
    top3_province_store_receivable_avg.write.mode("overwrite") \
        .saveAsTable("myhive.top3_province_store_receivable_avg", "parquet")

    # todo 4. TOP3 省份中, 各个省份的支付类型比例
    print("TOP3 省份中, 各个省份的支付类型比例")
    top3_province_store.show()
    # 获取top3支付类型总数量
    pay_sum = top3_province_store.groupby("storeProvince").count()
    # pay_sum.show()
    # 获取各个支付类型总数量
    top3_province_payType_sum = top3_province_store.groupby("storeProvince", "payType") \
        .count() \
        .withColumnRenamed("count", "pay_type_count")
    # top3_province_payType_sum.show()
    top3_province_payType_sum = top3_province_payType_sum.withColumnRenamed("storeProvince", "storeProvince1")
    # 多表查询得到各个省单个支付类型以及总数的表
    top3_province_payType_and_payTypesum = top3_province_payType_sum \
        .join(pay_sum, on=top3_province_payType_sum["storeProvince1"] == pay_sum["storeProvince"])
    top3_province_payType_and_payTypesum.createTempView("top3_province_payType_and_payTypesum")
    # spark.sql("select * from top3_province_payType_and_payTypesum").show()
    pay_type_ratio = spark.sql(
        "select storeProvince, payType, pay_type_count / count as pay_type_ratio from top3_province_payType_and_payTypesum")
    pay_type_ratio.show()
    # 写入mysql数据库
    pay_type_ratio.write.mode("overwrite").format("jdbc") \
        .option("url", "jdbc:mysql://hadoop1:3306/bigdata?useSSL=false&useUnicode=true&characterEncoding=utf8") \
        .option("dbtable", "pay_type_ratio") \
        .option("user", "root") \
        .option("password", "123456") \
        .option("encoding", "utf-8") \
        .save()
    # 写入hive数据库
    pay_type_ratio.write.mode("overwrite").saveAsTable("myhive.pay_type_ratio", "parquet")
