from pyspark import SparkContext
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.types import StructType, StructField, StringType

ss = SparkSession.builder.getOrCreate()


job = StructField("job", StringType(), False)
company = StructField("company", StringType(), False)
distinct = StructField("address", StringType(), False)
salary = StructField("salary", StringType(), False)

# 读取数据文件
# df = ss.read.csv("file:///Users/sonto/Workspace/P1905/spark_example/jobs.csv",
#                   sep=",",
#                  schema=StructType([job, company, distinct, salary]))

df = ss.read.load("file:///Users/sonto/Workspace/P1905/spark_example/jobs.csv",
                 format="csv",
                 schema=StructType([job, company, distinct, salary]))


assert isinstance(df, DataFrame)

# 显示数据
# df.show()

# 查询指定的列
# df1 = df.select("job", "salary") # select job, salary from ...
# df1.show()

# 条件查询
# for row in df.filter(df.job == "Python开发工程师").collect():
#     print(row.salary)
    # print(row)


# select job, salary from ... where address = '成都-高新区'
# df1 = df.filter(df.address == "成都-高新区").select("job", "salary")

# 创建临时视图用于执行SQL语句进行操作
# df1.createTempView("tbl_job")
# ss.sql("select salary from tbl_job").show()

# df1.createGlobalTempView('tbl_job1')
# df.createOrReplaceTempView("tbl_job")
# ss.sql("select address, salary from tbl_job").show()


# 保存文件
# df1.write.csv("file:///Users/sonto/Workspace/P1905/spark_example/高新区.csv")


ss.stop()