from pyspark import SparkConf, SparkContext

if __name__ == '__main__':
    # 构建SparkConf对象
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    # 构建SparkContext执行环境入口对象
    sc = SparkContext(conf=conf)

    rdd = sc.parallelize([("a", 1), ("a", 1), ("a", 1), ("b", 1), ("c", 1)])


    def part(key):
        if key == 'a' or key == 'c':
            return 0
        else:
            return 1


    rdd2 = rdd.partitionBy(2, part)
    """
    partitionBy:对RDD进行自定义分区操作,只能针对KV型数据
    参数1:分区数量
    参数2:分区规则,函数传入
    """
    print(rdd2.glom().collect())
    # 输出结果 [[('a', 1), ('a', 1), ('a', 1), ('c', 1)], [('b', 1)]]
