from pyspark.sql import SparkSession, functions, Window
from pyspark.sql.functions import *
from pyspark.sql.types import *

spark = SparkSession \
    .builder \
    .master('local') \
    .appName('Yelp') \
    .getOrCreate()

df_review = spark.read.option('header', True).option('multiLine', False).json('dataset\\yelp_academic_dataset_review.json')
# df_user = spark.read.option('header', True).option('multiLine', False).json('dataset\\yelp_academic_dataset_user.json')
# df_tip = spark.read.option('header', True).option('multiLine', False).json('dataset\\yelp_academic_dataset_tip.json')
# df_business = spark.read.option('header', True).option('multiLine', False).json('dataset\\yelp_academic_dataset_business.json')
# df_checkin = spark.read.option('header', True).option('multiLine', False).json('dataset\\yelp_academic_dataset_checkin.json')
# df_user.printSchema()
# df_user.show()
# df_business.createOrReplaceTempView('business')
df_review.createOrReplaceTempView('review')
# df_user.createOrReplaceTempView('user')

###### 商户分析 ######
# 01 找出美国最常见商户（前20）

window_1 = Window.orderBy(col('Num').desc())
# spark.sql("""
#     SELECT name, COUNT(name) as Num
#     FROM business
#     GROUP BY name
#     ORDER BY COUNT(name) DESC
#     LIMIT 20
# """).show(truncate=False)
# df_business \
#     .select('name')\
#     .groupby('name')\
#     .agg(count('name').alias('Num'))\
#     .orderBy(col('Num').desc())\
#     .select('name', rank().over(window_1).alias('rank'))\
#     .where('rank <= 20')\
#     .show()


# # 02 找出美国商户最多的前10个城市
# spark.sql("""
#     SELECT city, count(business_id) as Num
#     FROM business
#     GROUP BY city
#     ORDER BY COUNT(business_id) DESC
#     LIMIT 10
# """).show(truncate=False)
# df_business \
#     .select('city', 'business_id')\
#     .groupby('city')\
#     .agg(count(col('business_id')).alias('Num'))\
#     .orderBy(col('Num').desc())\
#     .select('city', rank().over(window_1).alias('rank'))\
#     .where('rank <= 10')\
#     .show()

# # 03 找出美国商户最多的前5个州
# spark.sql("""
#     SELECT state, count(business_id) as Num
#     FROM business
#     GROUP BY state
#     ORDER BY COUNT(business_id) DESC
#     LIMIT 5
# """).show(truncate=False)
# df_business \
#     .select('state', 'business_id')\
#     .groupby('state')\
#     .agg(count('business_id').alias('Num'))\
#     .orderBy(col('Num').desc())\
#     .select('state', 'Num', rank().over(window_1).alias('rank'))\
#     .where('rank <= 10')\
#     .show()

# # 04 找出美国最常见商户，并显示平均评分（前20）
# spark.sql("""
#     SELECT name, avg(stars) as avg_stars
#     FROM business
#     GROUP BY name
#     ORDER BY COUNT(name) DESC
#     LIMIT 20
# """).show(truncate=False)

# df_business \
#     .select('name')\
#     .groupby('name')\
#     .agg(count('name').alias('Num'))\
#     .orderBy(col('Num').desc())\
#     .select('name', rank().over(window_1).alias('rank'))\
#     .where('rank <= 20')\
#     .show()

# df_business\
#     .select('name', 'stars')\
#     .groupby('name')\
#     .agg(count('name').alias('Num'))\
#     .agg(avg('stars').alias('avg_stars'))\
#     .orderBy(col('Num').desc())\
#     .select('name', 'avg_stars', rank().over(window_1).alias('rank'))\
#     .where('rank <= 20')\
#     .show()

# 05 统计评分最高的城市（前10）
# spark.sql("""
#     SELECT city, avg(stars) as avg_stars
#     FROM business
#     GROUP BY city
#     ORDER BY avg_stars DESC
#     LIMIT 10
# """).show(truncate=False)

# window_2 = Window.orderBy(col('avg_stars').desc())
# df_business\
#     .select('city', 'stars')\
#     .groupby('city')\
#     .agg(avg('stars').alias('avg_stars'))\
#     .orderBy(col('avg_stars').desc())\
#     .select('city', 'avg_stars', rank().over(window_2).alias('rank'))\
#     .where('rank <= 10')\
#     .show(truncate=False)

# 06 统计category的数量
# spark.sql("""
#     SELECT COUNT(DISTINCT categories)
#     FROM business
# """).show(truncate=False)

# 07 统计最多的category及数量（前10）
# spark.sql("""
#     SELECT categories, count(categories) as cnt
#     FROM business
#     GROUP BY categories
#     ORDER BY cnt DESC
#     LIMIT 10
# """).show(truncate=False)
#
# df_business \
#     .select('categories')\
#     .groupby('categories')\
#     .agg(count('categories').alias('Num'))\
#     .orderBy(col('Num').desc())\
#     .select('categories', 'Num', rank().over(window_1).alias('rank'))\
#     .where('rank <= 10')\
#     .show()

# 08 收获五星评论最多的商户（前20） undone
# spark.sql("""
#     SELECT name, COUNT(review_id) as cnt
#     FROM business LEFT JOIN review ON business.business_id = review.business_id
#     WHERE review.stars = 5
#     GROUP BY name
#     ORDER BY cnt DESC
#     LIMIT 20
# """).show(truncate=False)
#
# df_business\
#     .join(df_review, df_business['business_id'] == df_review['business_id'])\
#     .select('name', 'stars')\
#     .groupby('name')\
#     .agg(count('name').alias('Num'))\
#     .where('stars == 5.0')\
#     .orderBy(col('Num').desc())\
#     .select('name', 'Num', rank().over(window_1).alias('rank'))\
#     .where('rank <= 20')\
#     .show()

# 09 统计不同类型（中国菜、美式、墨西哥）的餐厅类型及数量
# df_business\
#     .select(explode(split(col('categories'),', ')).alias('category')).show()

# df_business\
#     .filter(col('categories').like('%Restaurants%'))\
#     .withColumn('category', explode(split(col('categories'), ',')))\
#     .select('category')\
#     .groupby('category')\
#     .agg(count('category').alias('cnt'))\
#     .where("category like '%Chinese%' or category like '%American%' or category like '%Mexican%' ")\
#     .orderBy(col('cnt').desc())\
#     .select('category', 'cnt')\
#     .show()

# 10 统计不同类型（中国菜、美式、墨西哥）的餐厅的评论数量
# df_business\
#     .filter(col('categories').like('%Restaurants%'))\
#     .withColumn('category', explode(split(col('categories'), ',')))\
#     .select('category', 'review_count')\
#     .groupby('category')\
#     .agg(sum('review_count').alias('cnt')) \
#     .where("category like '%Chinese%' or category like '%American%' or category like '%Mexican%' ") \
#     .orderBy(col('cnt').desc())\
#     .select('category', 'cnt')\
#     .show()

# 11 统计不同类型（中国菜、美式、墨西哥）的餐厅的评分分布


###### 用户分析 ######
# 01 分析每年加入的用户数量
# df_user \
#     .withColumn('year', year('yelping_since')) \
#     .select('year') \
#     .groupby('year') \
#     .agg(count('year').alias('Num')) \
#     .orderBy(col('year').asc()) \
#     .show()

# 02 统计评论达人（review_count）
# df_user\
#     .select('user_id', 'name', 'review_count')\
#     .orderBy(col('review_count').desc())\
#     .show()

# 03 统计人气最高的用户（fans）
# df_user \
#     .select('user_id', 'name', 'fans') \
#     .orderBy(col('fans').desc()) \
#     .show()

# 04 统计每年优质用户、普通用户比例
# df_user\
#     .select(explode(split(col('elite').alias('elite'), ','))).show()

# 每年用户人数
# df_user\
#     .select(count('user_id'))\
#     .show()
#
# df_user\
#     .withColumn('elite', explode(split(col('elite').alias('elite'), ',')))\
#     .select('elite')\
#     .groupby('elite')\
#     .agg(count('elite').alias('cnt'))\
#     .orderBy(col('elite').asc())\
#     .select('elite', 'cnt')\
#     .show()

# 05 显示每年总用户数、沉默用户数（未写评论）的比例



###### 评分分析 ######
# 01 统计每年的评论数
df_review \
    .withColumn('year', year('date')) \
    .select('year') \
    .groupby('year') \
    .agg(count('year').alias('review_count')) \
    .orderBy(col('year').asc()) \
    .show()

# 02 统计评分周（周一~周天）次数统计


# 03 统计评分的分布情况
# 04 统计有用（helpful）、有趣（funny）及酷（cool）的评论及数量
# 05 每年全部评论用户排行榜

###### 打卡分析 ######
# 01 统计每年的打卡次数
# 02 统计24小时每小时打卡次数
# 03 统计最喜欢打卡的城市
# 04 全部商家的打卡排行榜