%pyspark

import time
start_time = time.time()

from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, lower, regexp_replace, dayofweek
from pyspark.sql.functions import udf, trim
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations

hc = HiveContext(sc)

# 使用Hive中已存在的表，注意这里的表名要和您实际创建的一致
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

##################################
# 四、评分分析
##################################

# 评分分布情况（1-5分）
star_dist = r_df.groupBy("rev_stars").agg(count("*").alias("count_per_star")).orderBy("rev_stars")
z.show(star_dist)

# 评分周（周一~周日）次数统计
daily_reviews = r_df.withColumn("dow", dayofweek(col("rev_date"))).groupBy("dow").agg(count("*").alias("review_count")).orderBy("dow")
z.show(daily_reviews)

# 拥有次数最多的5分评价商家
top_5_star_business = r_df.where(col("rev_stars") == 5).groupBy("rev_business_id").agg(count("*").alias("five_star_count")).orderBy(desc("five_star_count")).limit(20)
z.show(top_5_star_business)

end_time = time.time()
execution_time = end_time - start_time
print("Execution Time: {:.2f} seconds".format(execution_time))