from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType

ss = SparkSession.builder.getOrCreate()

# 定义列名
job = StructField('job', StringType(), False)
company = StructField('company', StringType(), False)
address = StructField('address', StringType(), False)
salary = StructField('salary', StringType(), False)

# 读取文件
df = ss.read.csv("file:///Users/liuqi/PycharmProjects/P1905/liuqi/spark/jobs.csv",
                 schema=StructType([job, company, address, salary]))

# 显示数据  df.show(n=999)指定显示999行
# df.show()

# 查询指定列
# df.select("job", "salary").show()

# 条件查询  where和filter等同
# df.where(df.address == "成都").show()
# df.filter(df.address == "成都").show()

# 多重查询
# df.filter(df.address == "成都-武侯区").select("job", 'company', 'salary').show()

df1 = df.filter(df.address == "成都-武侯区").select("job", 'company', 'salary')
# 创建临时视图用于sql查询
# df1.createTempView("tbl_job")

# ss.sql查询
# select *
# for x in ss.sql("select * from tbl_job").collect():
#     print(x)

# select some
# ss.sql("select job,salary from tbl_job").show()

# 查询并保存
# df1.write.csv("file:///Users/liuqi/PycharmProjects/P1905/liuqi/spark/df1.csv")
# df.filter(df.address == "成都-武侯区").select("job", 'company', 'salary').write.csv("file:///Users/liuqi/PycharmProjects/P1905/liuqi/spark/df.csv")
ss.stop()
