import org.apache.spark.sql.SparkSession

object StringPartition {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("String Partition")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext

    // 创建包含字符串的 RDD
    val rdd = sc.parallelize(Seq("apple", "banana", "orange", "pear", "watermelon", "grape", "pineapple"))

    // 使用 `partitionBy` 或自定义分区器按照字符串长度分区
    val partitionedRDD = rdd.map(x => (x.length <= 5, x)) // 标记是否长度小于等于 5
                            .partitionBy(new org.apache.spark.HashPartitioner(2)) // 分区
                            .values                         // 只保留值部分

    // 打印结果（按分区查看）
    partitionedRDD.glom().collect().zipWithIndex.foreach { case (partition, index) =>
      println(s"Partition $index: ${partition.mkString(", ")}")
    }

    spark.stop()
  }
}
