%pyspark

import time
start_time = time.time()

from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, lower, regexp_replace, hour, trim
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations

hc = HiveContext(sc)

# 使用Hive中已存在的表，注意这里的表名要和您实际创建的一致
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

##################################
# 五、打卡分析
##################################

# 每年的打卡次数
year_ck_count = c_df.select(explode(split(col('checkin_dates'), ',')).alias('datetime')) \
    .withColumn('year_c', year(to_date(trim(col('datetime')), "yyyy-MM-dd HH:mm:ss"))) \
    .groupBy('year_c').agg(count("*").alias("checkin_year_count")) \
    .orderBy(desc("checkin_year_count"))
z.show(year_ck_count)

# 24小时每小时打卡次数
hour_ck = c_df.select(explode(split(col('checkin_dates'), ',')).alias('datetime')) \
    .withColumn('hour_c', hour(to_date(trim(col('datetime')), "yyyy-MM-dd HH:mm:ss"))) \
    .groupBy('hour_c').agg(count("*").alias("checkin_per_hour")) \
    .orderBy(desc("checkin_per_hour"))
z.show(hour_ck)

# 最喜欢打卡的城市
# 需join business表获得城市信息
city_ck = c_df.select('business_id', explode(split(col('checkin_dates'), ',')).alias('datetime')) \
    .join(b_df, 'business_id') \
    .groupBy('city') \
    .agg(count('*').alias('cnt')) \
    .orderBy(desc('cnt')) \
    .limit(20)
z.show(city_ck)

# 全部商家的打卡排行榜（前5）
top_checkin_business = c_df.select('business_id', explode(split(col('checkin_dates'), ',')).alias('time')) \
    .groupBy('business_id') \
    .count() \
    .orderBy(desc("count")) \
    .limit(5)
z.show(top_checkin_business)

end_time = time.time()
execution_time = end_time - start_time
print("Execution Time: {:.2f} seconds".format(execution_time))