from pyspark.sql import SparkSession
from pyspark.sql.functions import col, sum, month, format_number

# 创建 SparkSession
spark = SparkSession.builder \
    .appName("MySQL to Spark") \
    .config("spark.jars", "/usr/local/spark-3.2.1-bin-hadoop3.2/jars/mysql-connector-java-8.0.26.jar") \
    .getOrCreate()

# 定义数据库连接参数
url = "jdbc:mysql://192.168.10.103:3306/gxin?useUnicode=true&characterEncoding=utf8"
properties = {
    "user": "root",
    "password": "root",
    "driver": "com.mysql.cj.jdbc.Driver"
}

# 读取 MySQL 表中的数据
df = spark.read.jdbc(url, "zfbider_23_info", properties=properties)

# 显示数据
df.show()

# 1. 过滤2023年1地区的数据
df_2023_region_1 = df.filter((col("time").between("2023-01-01", "2023-12-31")) & (col("region") == 1))

# 2. 提取月份并转换为字符串类型
df_2023_region_1 = df_2023_region_1.withColumn("month", month(col("time")).cast("string"))

# 3. 按月份分组并计算每月的中标金额
monthly_award_amount = df_2023_region_1.groupBy("month").agg(format_number(sum("award_amount"), 2).cast("string").alias("award_amount"))

# 4. 按月份排序
monthly_award_amount = monthly_award_amount.withColumn("month_int", col("month").cast("int")) \
    .orderBy("month_int") \
    .drop("month_int")

# 显示排序后的数据
monthly_award_amount.show()

# 定义目标表的名称
monthly_award_amount_table = "region_1_monthly_award_amount_2023"

# 将统计结果写回 MySQL 数据库，并捕获异常
def write_to_mysql(df, table_name):
    try:
        df.write.jdbc(url, table_name, mode="overwrite", properties=properties)
        print(f"成功存入 {table_name} 表中")
    except Exception as e:
        print(f"未能存入 {table_name} 表中，错误信息：{str(e)}")
        import traceback
        traceback.print_exc()

# 写入1地区每月中标金额表
write_to_mysql(monthly_award_amount, monthly_award_amount_table)

# 停止 SparkSession
spark.stop()