# -*- coding: utf-8 -*-
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType


def house_analyse(filename):
    # 程序主入口
    spark = SparkSession.builder.master("local").appName("house_analyse").getOrCreate()
    df = spark.read.csv(filename, header=True)
    analyse_info = {}
    # 类型转换，十分重要，保证了price列作为int用来比较，否则会用str比较, 同时排除掉一些奇怪的价格，比如写字楼的出租超级贵
    # 或者有人故意标签1元，其实要面议, 还有排除价格标记为面议的
    df = df.filter(df.price != '面议').withColumn("price", df.price.cast(IntegerType()))
    df = df.filter(df.price >= 2).filter(df.price <= 40000)

    mean = df.agg({"price": "mean"}).first()['avg(price)']
    min = df.agg({"price": "min"}).first()['min(price)']
    max = df.agg({"price": "max"}).first()['max(price)']
    # 返回值是一个list，所以在最后加一个[0]
    mid = df.approxQuantile("price", [0.5], 0.01)[0]
    analyse_info['mean'] = mean
    analyse_info['min'] = min
    analyse_info['max'] = max
    analyse_info['mid'] = mid
    print("analyse_info:",analyse_info)
    return analyse_info

def pandas_analyse(filename):
    import pandas as pd
    df = pd.read_csv(filename)
    df.loc[:,'price'] = df['price'].astype('int32')
    analyse_info = {}
    mean = df['price'].mean()
    min = df['price'].min()
    max = df['price'].max()
    mid = df['price'].median()
    analyse_info['mean'] = int(mean)
    analyse_info['min'] = int(min)
    analyse_info['max'] = int(max)
    analyse_info['mid'] = int(mid)
    print(analyse_info)
    return analyse_info

if __name__ == '__main__':
    # house_analyse('./思明.csv')
    pandas_analyse("./思明.csv")