from pyspark.sql import SparkSession
from pyspark.sql.functions import col, sum, month, format_number
import pandas as pd

# 创建 SparkSession
spark = SparkSession.builder \
    .appName("MySQL to Spark") \
    .config("spark.jars", "/usr/local/spark-3.2.1-bin-hadoop3.2/jars/mysql-connector-java-8.0.26.jar") \
    .getOrCreate()

# 定义数据库连接参数
url = "jdbc:mysql://192.168.10.103:3306/gxin?useUnicode=true&characterEncoding=utf8"
properties = {
    "user": "root",
    "password": "root",
    "driver": "com.mysql.cj.jdbc.Driver"
}

# 读取 MySQL 表中的数据
df = spark.read.jdbc(url, "zfbider_23_info", properties=properties)

# 显示数据
df.show()

# 1. 过滤2023年1、2、3地区的数据
df_2023 = df.filter(col("time").between("2023-01-01", "2023-12-31"))

# 2. 按地区和月份分组并计算每月的中标金额
df_2023 = df_2023.withColumn("month", month(col("time")).cast("string"))
grouped_df = df_2023.groupBy("region", "month").agg(format_number(sum("award_amount"), 2).cast("string").alias("award_amount"))

# 3. 转换为Pandas DataFrame并进行透视
pandas_df = grouped_df.toPandas()
pivot_df = pandas_df.pivot(index='month', columns='region', values='award_amount').reset_index()

# 4. 重命名列
pivot_df.columns = ['month', 'region_1_award_amount', 'region_2_award_amount', 'region_3_award_amount']

# 5. 按月份排序
pivot_df = pivot_df.sort_values(by='month').reset_index(drop=True)

# 6. 显示结果
print(pivot_df)

# 7. 将结果写回 MySQL 数据库
def write_to_mysql(df, table_name):
    try:
        df_spark = spark.createDataFrame(pivot_df)
        df_spark.write.jdbc(url, table_name, mode="overwrite", properties=properties)
        print(f"成功存入 {table_name} 表中")
    except Exception as e:
        print(f"未能存入 {table_name} 表中，错误信息：{str(e)}")
        import traceback
        traceback.print_exc()

# 写入1、2、3地区每月中标金额表
write_to_mysql(pivot_df, "region_monthly_award_amount_2023")

# 停止 SparkSession
spark.stop()