# -*- coding: utf-8 -*-
import logging
import time

# 导入必要的库
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import IntegerType, FloatType

if __name__ == '__main__':
    filename = "rent.csv"

    logging.info("开始spark分析")
    start_time = time.time()

    # 创建SparkSession
    spark = SparkSession.builder.\
        master("local").\
        appName("rent_als"). \
        config("spark.sql.shuffle.partitions", 2). \
        getOrCreate()

    # 构建表结构的描述对象: StructType对象
    schema = StructType().\
        add("area", StringType(), nullable=True).\
        add("located", StringType(), nullable=True).\
        add("address", StringType(), nullable=True).\
        add("type", StringType(), nullable=True).\
        add("floor_area", FloatType(), nullable=True).\
        add("room_orientation", StringType(), nullable=True).\
        add("house_type", StringType(), nullable=True).\
        add("height", StringType(), nullable=False).\
        add("price", IntegerType(), nullable=True).\
        add("labels", StringType(), nullable=False)

    # 读取CSV文件
    rent_df = spark.read.csv(filename, header=True, schema=schema)
    # 过滤掉贵的离谱的写字楼
    rent_df = rent_df.filter(rent_df.price >= 50).filter(rent_df.price <= 40000)
    # 过滤掉house_type以'未知室'开头的记录-逻辑非操作符~来表示“不以'未知室'开头”。
    rent_df = rent_df.filter(~rent_df["house_type"].startswith("未知室"))

    # 建立临时表
    rent_df.createOrReplaceTempView("rent")

    # 租房面积
    # spark.sql("select * from rent order by floor_area desc").show()

    spark.sql("select house_type, count(*) as cnt from rent group by house_type order by cnt desc").show(n=100)
    print(spark.sql("select house_type, count(*) as cnt from rent group by house_type order by cnt desc").count())

