# -*- coding: utf-8 -*-
import logging
import time
import os
import json

# 导入必要的库
from pyspark.sql import SparkSession
from pyspark.sql.functions import *

# 创建SparkSession
spark = SparkSession.builder. \
    master("local"). \
    appName("rent_als"). \
    config("spark.sql.shuffle.partitions", 2). \
    getOrCreate()

# 读取CSV文件
filename = "rent.csv"
rent_df = spark.read.csv(filename, header=True)
# 过滤掉贵的离谱的写字楼
rent_df = rent_df.filter(rent_df.price >= 50).filter(rent_df.price <= 40000)
# 过滤掉house_type以'未知室'开头的记录-逻辑非操作符~来表示“不以'未知室'开头”。
rent_df = rent_df.filter(~rent_df["house_type"].startswith("未知室"))

# 建立临时表
rent_df.createOrReplaceTempView("rent")


# 对DataFrame中存储的csv数据进行解析和统计计数，最终返回每个name出现的次数
def countByCsv(field):
    # 对每一行数据执行flatMap操作，将name字段解析出来，并转换成(name, 1)的形式
    flat_mapped_rdd = rent_df.rdd.flatMap(lambda row: [(row[field], 1)])

    # 将数据重新分区为1个分区进行reduce操作
    repartitioned_rdd = flat_mapped_rdd.repartition(1)

    # 使用reduceByKey对相同key的值进行累加
    result = repartitioned_rdd.reduceByKey(lambda x, y: x + y)

    return result


# 租房户型统计
def countByHouseType():
    # 调用countByCsv函数，统计genres字段中不同genre出现的次数
    res = countByCsv("house_type")

    # 对结果按照count降序排序，并限制结果数量为10
    formatted_result = [{"house_type": v[0], "count": v[1]} for v in
                        sorted(res.collect(), key=lambda x: x[1], reverse=True)[:20]]

    return formatted_result


# 租房小区的关键字
def countByKeywords():
    res = countByCsv("address").sortBy(lambda x: -x[1]).take(100)
    return list(map(lambda v: {"x": v[0], "value": v[1]}, res))


# 租房数量最多的小区
def countByAddresses():
    res = countByCsv("address").sortBy(lambda x: -x[1]).take(10)
    return list(map(lambda v: {"address": v[0], "count": v[1]}, res))


# 租房类型
def countByType():
    res = countByCsv("type").filter(
        lambda v: v[0] != '').sortBy(lambda x: -x[1]).take(10)
    return list(map(lambda v: {"type": v[0], "count": v[1]}, res))


# 房间面积和租金关系
def countByAreaPrice():
    return rent_df.select(rent_df["floor_area"], "price", "address").filter(rent_df["floor_area"] <= 1000).collect()


def save(path, data):
    with open(path, 'w', encoding='utf-8') as f:
        f.write(data)


if __name__ == "__main__":
    m = {
        "countByHouseType": {
            "method": countByHouseType,
            "path": "house_type.json"
        },
        "countByAreaPrice": {
            "method": countByAreaPrice,
            "path": "area_price.json"
        },
        "countByKeywords": {
            "method": countByKeywords,
            "path": "keywords.json"
        },
        "countByAddresses": {
            "method": countByAddresses,
            "path": "address_count.json"
        },
        "countByType": {
            "method": countByType,
            "path": "type.json"
        }
    }

    base = "static/"
    if not os.path.exists(base):
        os.mkdir(base)

    for k in m:
        p = m[k]
        f = p["method"]
        save(base + m[k]["path"], json.dumps(f(), ensure_ascii=False))
        print("done -> " + k + " , save to -> " + base + m[k]["path"])
