# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# @Author: appleyuchi
# @Date:   2018-08-17 19:02:12
# @Last Modified by:   appleyuchi
# @Last Modified time: 2018-08-20 15:51:42
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkConf, SparkContext 
# 初始化SparkSession
spark = SparkSession.builder.appName("RDD_and_DataFrame") .config("spark.some.config.option", "some-value").getOrCreate()


def g(x):
    print x
list = ["Hadoop","Spark","Hive","Spark"]
rdd = spark.sparkContext.parallelize(list)

print"----------------------------------下面是RDD-----------------------------------------------------"
print type(rdd)
print rdd.collect()
print"-----------------------------------下面是pipelinedRDD----------------------------------------------------"
pipelinedRDD = rdd.map(lambda word : (word,1))
print type(pipelinedRDD)
pipelinedRDD.foreach(g)


# DataFrame的输出用show()
# <class 'pyspark.rdd.RDD'>的输出用collect()
# <class 'pyspark.rdd.PipelinedRDD'>的输出用foreach(g)
# def g(x):
    # print x
