# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# @Author: appleyuchi
# @Date:   2018-08-17 19:02:12
# @Last Modified by:   appleyuchi
# @Last Modified time: 2018-08-18 12:45:40



#这个代码包含了一种
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql import Row

# 初始化SparkSession
spark = SparkSession.builder.appName("RDD_and_DataFrame") .config("spark.some.config.option", "some-value").getOrCreate()
print "-----------------------------下面是<class 'pyspark.sql.dataframe.DataFrame'>-----------------------------------------------"
print"type of spark:",type(spark)
print "-----------------------------下面是 <class 'pyspark.context.SparkContext'>-----------------------------------------------"
sc = spark.sparkContext
print"type of sc",type(sc)
print "-----------------------------下面是 <class 'pyspark.rdd.RDD'>-----------------------------------------------"
lines = sc.textFile("employee.txt")
print "type of lines:",type(lines)
print "------------------------------下面是<class 'pyspark.rdd.PipelinedRDD'>-----------------------------------------------"
parts = lines.map(lambda data: data.split(","))
print "type of parts:",type(parts)
employee = parts.map(lambda p: Row(name=p[0], salary=int(p[1])))
print "type of employee:",type(employee)
print "--------------------------------下面是<class 'pyspark.sql.dataframe.DataFrame'>----------------------------------------------"
#RDD转换成DataFrame
employee_temp = spark.createDataFrame(employee)
print "type of employee_temp:",type(employee_temp)#这个dataframe是被当做数据库的表格来使唤了.
print "--------------------------------下面是<class 'pandas.core.frame.DataFrame'>---------------------------------------------"
employee_pandas=employee_temp.toPandas()
print"type of employee_pandas:",type(employee_pandas)

#因为pandas有一种dataframe,spark也有一种datafram,所以这里是他们之间的转换方法.

print "--------------------------------下面是<class 'pyspark.sql.dataframe.DataFrame'>----------------------------------------------"
 #显示DataFrame数据
employee_temp.show()
 #创建视图
employee_temp.createOrReplaceTempView("employee")
#过滤数据
employee_result = spark.sql("SELECT name,salary FROM employee WHERE salary >= 14000 AND salary <= 20000")
print"type of employee_result:",type(employee_result)
print "------------------------------下面是<class 'pyspark.rdd.RDD'>-----------------------------------------------"
# DataFrame转换成RDD
result1=employee_result.rdd
print "type of result1:",type(result1)

print "-------------------------------下面是<class 'pyspark.rdd.PipelinedRDD'>------------------------------------------"
result2= employee_result.rdd.map(lambda p: "name: " + p.name + "  salary: " + str(p.salary))
print"type of result2:",type(result2)
print "-------------------------------下面是<type 'list'>------------------------------------------------"
result3 = employee_result.rdd.map(lambda p: "name: " + p.name + "  salary: " + str(p.salary)).collect()
print"type of result3:",type(result3)
print "result3=",result3
     #打印RDD数据
for n in result3:
    print n

#参考资料
# https://blog.csdn.net/dufufd/article/details/79091834
