import os

from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import *
from pyspark import find_spark_home

os.environ.setdefault('HADOOP_HOME', 'E:\\hadoop-2.9.2')

spark = SparkSession \
    .builder \
    .master('local') \
    .appName('HelloSpark') \
    .getOrCreate()

# df_user = spark.read.option('multline', True).json('dataset/yelp_academic_dataset_user.json')
# df_user.show(20)

df_checkin = spark.read.option('multline', True).json('dataset/yelp_academic_dataset_checkin.json')
df_business = spark.read.option('multline', True).json('dataset/yelp_academic_dataset_business.json')
# df_checkin.show(20)

# 四、打卡分析
print("四、打卡分析")

# explode函数
df_checkin = df_checkin.withColumn('date1',explode(split(col('date'),",")))

# 1.统计每年的打卡次数
print("1.统计每年的打卡次数")
df_checkin.withColumn('date2', to_timestamp(col('date1'), 'yyyy-MM-dd HH:mm:ss')) \
    .withColumn('Year',year('date2').alias('Year'))\
    .groupby('Year').count().show(truncate=False)

# 2.统计24小时每小时打卡次数
print("2.统计24小时每小时打卡次数")
df_checkin.withColumn('date2', to_timestamp(col('date1'), 'yyyy-MM-dd HH:mm:ss')) \
    .withColumn('Hour',hour('date2').alias('Hour')) \
    .groupby('Hour').count().show(truncate=False)

# 连接checkin和business表
df_checkin_business = df_checkin.join(df_business, df_checkin['business_id'] == df_business['business_id'])

# 3.统计最喜欢打卡的城市
print("3.统计最喜欢打卡的城市")
df_checkin_business.groupby('city').count() \
    .sort(desc('count')) \
    .show(truncate=False)

# 4.全部商家的打卡排行榜
print("4.全部商家的打卡排行榜")
df_checkin_count = df_checkin.groupby('business_id').count()
df_checkin_count_business = df_checkin_count.join(df_business, df_checkin_count['business_id'] == df_business['business_id'])
df_checkin_count_business.select('name','count') \
    .sort(desc('count')) \
    .show(truncate=False)








