# coding:utf-8
from pyspark import SparkConf, SparkContext
import os

os.environ['JAVA_HOME'] = '/server/jdk'

if __name__ == '__main__':
    conf = SparkConf().setAppName('test').setAppName('local[*]')
    sc = SparkContext(conf=conf)

    rdd = sc.parallelize(['hadoop spark hadoop','spark hadoop hadoop','hadoop flink spark'])

    # 需求：得到所有的对象，组成RDD
    rdd2 = rdd.map(lambda line:line.split(' '))
    print(rdd2.collect())

    # flatMap后就会去掉外层，接触嵌套（展开打平）
    rdd3 = rdd.flatMap(lambda line:line.split(' '))
    print(rdd3.collect())