import hashlib
from typing import Iterable, List
from pyspark.sql import SparkSession
from pyspark.sql import DataFrame
from pyspark.sql.types import Row
import argparse

def init_spark_session(app_name: str, master: str):
    try:
        session = SparkSession.builder \
            .appName(app_name) \
            .master(master) \
            .config("spark.executor.memory", "4g") \
            .config("spark.driver.memory", "4g") \
            .config("spark.jars", "/root/mysql-connector-java-8.0.23.jar") \
            .getOrCreate()
        print(f"Create Spark session succeed, version: {session.version}")
        return session
    except Exception as e:
        print(f"Create Spark session error: {e}")
        raise

def read_mysql(spark: SparkSession):
    url = "jdbc:mysql://192.168.1.10:3306/employees"
    properties = {
        "user": "root",
        "password": "Reins5403!",
        "driver": "com.mysql.cj.jdbc.Driver"
    }

    df = spark.read.jdbc(url=url, table="employees", properties=properties)
    df = df.where("emp_no < 20000")
    return df

def partition_by_row_count(df: DataFrame, primary_key, rows_per_partition=200):
    min_primary_key = df.agg({primary_key: "min"}).collect()[0][0]
    max_primary_key = df.agg({primary_key: "max"}).collect()[0][0]
    partition_ranges = list(range(min_primary_key, max_primary_key, rows_per_partition))

    def custom_partitioner(id_value):
        for i, start in enumerate(partition_ranges):
            if start < id_value <= start + rows_per_partition:
                return i
        return len(partition_ranges) - 1

    rdd_with_key = df.rdd.map(lambda row: (row[primary_key], row))
    partitioned_rdd = rdd_with_key.partitionBy(len(partition_ranges), custom_partitioner)
    partitionedDF = partitioned_rdd.map(lambda x: x[1]).toDF(df.schema)

    print("After partitions: " + str(partitionedDF.rdd.getNumPartitions()))

    return partitionedDF

def stop_spark_session(spark: SparkSession):
    spark.stop()
    print("Spark session Stopped")

def map_partitions(iter: Iterable[Row]) -> List[str]:
    def sum_str(str_iter: Iterable[str]) -> str:
        ret = ""
        for string in str_iter:
            ret += string
        return ret

    return [hashlib.sha256(sum_str(str(r) for r in iter).encode()).hexdigest()]

def main():
    parser = argparse.ArgumentParser(description='PySpark Application')
    parser.add_argument(
        '--master', default="local[*]", help="Spark Master's address (default: local[*])")
    parser.add_argument('--app-name', default="PySparkApp", help='Application Name (default: PySparkApp)')
    args = parser.parse_args()

    spark = init_spark_session(args.app_name, args.master)

    df = read_mysql(spark)

    partitioned_df = partition_by_row_count(df, "emp_no", rows_per_partition=5)
    partition_digests = partitioned_df.rdd.mapPartitions(map_partitions).collect()
    for i, digest in enumerate(partition_digests):
        print(f"Partition {i}: Digest: {digest}")

if __name__ == "__main__":
    main()
