import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

object PartitionByStringLengthExample {

  def main(args: Array[String]): Unit = {
    // 创建Spark配置对象
    val conf = new SparkConf().setAppName("PartitionByStringLength").setMaster("local[*]")
    val sc = new SparkContext(conf)

    // 创建包含字符串的RDD
    val stringsRdd = sc.parallelize(Seq(
      "apple", "banana", "orange", "pear", "watermelon", "grape", "pineapple"
    ))

    // 定义一个函数，根据字符串长度返回分区索引（0表示长度小于等于5，1表示长度大于5）
    def partitionFunc(str: String): Int = if (str.length <= 5) 0 else 1

    // 使用partitionBy函数按照字符串长度进行分区
    val partitionedRdd = stringsRdd.partitionBy(partitionFunc)

    // 打印各分区的字符串
    partitionedRdd.foreachPartitionWithIndex { case (partitionIndex, stringsIterator) =>
      println(s"Partition $partitionIndex:")
      stringsIterator.foreach(println)
    }

    // 停止SparkContext
    sc.stop()
  }
}