#使用洲内商家平均位置的欧式距离来去除离群值
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as func
def data_process(raw_data_path):
    #sparksession
    spark=SparkSession.builder.config(conf=SparkConf()).getOrCreate()
    #读：创建DataFrame
    business=spark.read.json(raw_data_path)
    #"categories"类型需要切割
    split_col=func.split(business['categories'],',')
    business=business.withColumn("categories",split_col).filter(business["city"]!="").dropna()      #dropna()过滤缺失数据
    #创建临时表business
    business.createOrReplaceTempView("business")

    b_etl=spark.sql("SELECT business_id,name,city,state,latitude,longitude,stars,review_count,is_open,categories,attributes FROM business").cache()
    b_etl.createOrReplaceTempView("b_etl")
    #创建一个临时表，表的列是商户id和欧式距离值
        #内连接INNER JOIN 多表连接 inner join 与join相同，前面用了欧式距离
    outlier=spark.sql("SELECT b1.business_id,SQRT(POWER(b1.latitude - b2.avg_lat, 2)+POWER(b1.longitude-b2.avg_long,2)) as dist FROM b_etl b1 INNER JOIN (SELECT state,AVG(latitude) as avg_lat,AVG(longitude) as avg_long FROM b_etl GROUP BY state) b2 on b1.state=b2.state ORDER BY dist DESC")
    outlier.createOrReplaceTempView("outlier")
    #创建一个表，目的是过滤欧式距离小于10的商户信息
    joined=spark.sql("SELECT b.* FROM b_etl b INNER JOIN outlier o ON b.business_id = o.business_id WHERE o.dist < 10")
    #写：保存为parquet格式
    joined.write.parquet("file:///home/qnstar/PycharmProjects/yelp_analysis/business_etl",mode="overwrite")

if __name__=="__main__":
    raw_hdfs_path='file:///home/qnstar/Documents/datasets/yelp/yelp_academic_dataset_business.json'
    print("Start cleaning raw data!")
    data_process(raw_hdfs_path)
    print("Successfully done")