# 创建SparkSession对象
from pyspark import SparkContext,SparkConf
from pyspark.sql import SparkSession
spark = SparkSession.builder.config(conf = SparkConf()).getOrCreate()


'''
df = spark.read.json("file:///usr/local/spark/examples/src/main/resources/people.json")
df.show()
'''

'''
# # DataFrame 的保存
# df.write.json("file:///home/hadoop/MyTmp/dataframe.json")
'''

'''
# 示例文件 people.json 中创建一个 DataFrame ,名称为
# peopleDF ,把 peopleDF 保存到另外一个 JSON 文件中,然后,再
# 从 peopleDF 中选取一个列(即 name 列),把该列数据保存到一个
# 文本文件中
peopleDF = spark.read.format("json").load("file:///usr/local/spark/examples/src/main/resources/people.json")
peopleDF.select("name", "age").write.format("json").save("file:///home/hadoop/MyTmp/newpeople.json")
peopleDF.select("name").write.format("text").save("file:///home/hadoop/MyTmp/newpeople.txt")
'''


# DataFrame 的常用操作

'''
df = spark.read.format("json").load("file:///usr/local/spark/examples/src/main/resources/people.json")
print()
df.printSchema.show()

'''


'''
df = spark.read.format("json").load("file:///usr/local/spark/examples/src/main/resources/people.json")
df.select(df["name"],df["age"]+1).show()
'''

'''
# filter()
df = spark.read.format("json").load("file:///usr/local/spark/examples/src/main/resources/people.json")
df.filter(df["age"]>20).show()
'''

'''
# groupBy()
df = spark.read.format("json").load("file:///usr/local/spark/examples/src/main/resources/people.json")
df.groupBy("age").count().show()
'''


df = spark.read.format("json").load("file:///usr/local/spark/examples/src/main/resources/people.json")
df.sort(df["age"].desc()).show()

print("*"*20)
df.sort(df["age"].desc(),df["name"].asc()).show()