from pyspark import *
conf = SparkConf().setMaster('local[2]').setAppName('dayet')
sc = SparkContext(conf=conf)
def core():
    lines=sc.textFile('E:\\shuju\\first.txt').cache()
    wordcount=lines.flatMap(lambda line:line.split(' '))\
        .map(lambda word:(word,1)).reduceByKey(lambda a,b:(a+b)).first()
        # .foreach(print)
    print(wordcount)
def f(x): return (a+"_" for a in x)
def makerdd():
    rdd1=sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
    a=sc.parallelize(range(1,9))
    b=a.groupBy(lambda a: a % 2).collect()
    for d in b[0][1]:
        print(d)
    for c in b[1][1]:
        print(c)
    print(b)
    print(rdd1.flatMapValues(f).groupBy(lambda a:a[1]!='a').collect)
if __name__ == '__main__':

    # core()
    makerdd()