from pyspark.sql import SparkSession
from pyspark.sql.functions import col, sum, count

# 创建 SparkSession
spark = SparkSession.builder \
    .appName("MySQL to Spark") \
    .config("spark.jars", "/usr/local/spark-3.2.1-bin-hadoop3.2/jars/mysql-connector-java-8.0.26.jar") \
    .getOrCreate()

# 定义数据库连接参数
url = "jdbc:mysql://192.168.10.103:3306/gxin"
properties = {
    "user": "root",
    "password": "root",
    "driver": "com.mysql.cj.jdbc.Driver"
}

# 读取 MySQL 表中的数据
df = spark.read.jdbc(url, "zfbider_23_info", properties=properties)

# 显示数据
df.show()

# 1. 计算每个项目的总金额
project_total_amount = df.groupBy("project_name").agg(sum("award_amount").alias("total_amount"))
project_total_amount.show()

# 2. 计算每个地区的项目数量
region_project_count = df.groupBy("region").agg(count("id").alias("project_count"))
region_project_count.show()

# 3. 计算每个单位的项目数量和总金额
unit_project_stats = df.groupBy("award_unit").agg(
    count("id").alias("project_count"),
    sum("award_amount").alias("total_amount")
)
unit_project_stats.show()

# 定义新表的名称
project_total_amount_table = "project_total_amount"
region_project_count_table = "region_project_count"
unit_project_stats_table = "unit_project_stats"

# 将统计结果写回 MySQL 数据库
project_total_amount.write.jdbc(url, project_total_amount_table, mode="overwrite", properties=properties)
region_project_count.write.jdbc(url, region_project_count_table, mode="overwrite", properties=properties)
unit_project_stats.write.jdbc(url, unit_project_stats_table, mode="overwrite", properties=properties)

# 停止 SparkSession
spark.stop()