# !/usr/bin/env python
# -*- coding: utf-8 -*-

from pyspark.sql import SparkSession
from pyspark.sql.functions import expr, count, max, min, round
from pyspark.sql.types import DoubleType


if __name__ == '__main__':
    # 初始化SparkSession
    spark = SparkSession.builder.appName("Binning Example").master("local[*]").getOrCreate()

    # 示例数据
    data = [("Alice", 10), ("Bob", 20), ("Charlie", 30), ("David", 40),
            ("Eve", 50), ("Frank", 60), ("Grace", 70), ("Hannah", 80),
            ("Ian", 90), ("Jack", 100), ("AAA", 91)]

    # 创建DataFrame
    columns = ["Name", "Score"]
    df = spark.createDataFrame(data, schema=columns)
    df.printSchema()

    # 分箱个数
    binned_count = 5

    first_df = df.select(round((max("Score") - min("Score"))/(binned_count * 10), 0).alias("diff"),
                         min("Score").alias("min")).first()

    diff = first_df["diff"] * 10
    minV = first_df["min"]
    print("diff,min:", diff, minV)

    # 假设我们想要将数据基于Score列以10为间隔进行分箱
    # 使用expr和floor函数实现等距分箱
    df_binned = df.withColumn("Bin", expr(f"floor((Score - {minV}) / {diff}) * {diff}").cast(DoubleType()))

    df_binned.show()

    # 统计每个箱体内的个数
    df_binned_counts = df_binned.groupBy("Bin").agg(count("*").alias("Count")).orderBy("Bin")

    df_binned_counts.printSchema()
    # 显示结果
    df_binned_counts.show()

    # 显示结果
    df = df_binned_counts.toPandas()
    # 将对应的结果
    # 转换为所需的字典格式
    chartData = {column: df[column].tolist() for column in df.columns}
    print(chartData)

    # 停止SparkSession
    spark.stop()
