%pyspark

import time
start_time = time.time()

from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, dayofweek, lower, regexp_replace
from pyspark.sql.functions import udf, trim
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations

hc = HiveContext(sc)

# 使用Hive中已存在的表，注意这里的表名要和您实际创建的一致
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

##################################
# 一、商户分析
##################################

# 1-1 找出美国最常见商户（前20）
df1 = b_df.groupBy("name") \
          .agg(count("name").alias("常见商户数量")) \
          .orderBy(col("常见商户数量").desc()) \
          .limit(20)
z.show(df1)

# 1-2 找出美国商户最多的前10个城市
df2 = b_df.groupBy("city") \
          .agg(count("city").alias("城市")) \
          .orderBy(desc("城市")) \
          .limit(10)
z.show(df2)

# 1-3 找出美国商户最多的前5个州
df3 = b_df.groupBy("state") \
          .agg(count("state").alias("商户数量")) \
          .orderBy(col("商户数量").desc()) \
          .limit(5)
z.show(df3)

# 1-4 最常见商户并显示平均评分（前20）
df4 = b_df.groupBy(col('name')) \
          .agg(count(col('name')).alias('count'), sum('stars').alias('sum_star')) \
          .orderBy(col('count').desc()) \
          .limit(20)
z.show(df4)

# 1-5 统计评分最高的城市（前10）
df5 = b_df.select(col("city"), col("stars")) \
          .groupBy("city") \
          .sum("stars") \
          .orderBy(col("sum(stars)").desc()) \
          .limit(10)
z.show(df5)

# 1-6 统计category的数量
df6 = b_df.select(explode(split(col('categories'), ',')).alias('category')) \
          .groupBy("category") \
          .agg(count("category").alias("数量")) \
          .orderBy(desc("数量")) \
          .limit(10)
z.show(df6)

# 1-7 统计最多的category及数量（前10） （与1-6类似，这里重复一次）
df7 = b_df.select(explode(split(col('categories'), ',')).alias('category')) \
          .groupBy("category") \
          .agg(count("category").alias("数量")) \
          .orderBy(desc("数量")) \
          .limit(10)
z.show(df7)

# 1-8 收获五星评论最多的商户（前20）
df8 = b_df.select(col('name'), col('review_count')) \
          .where(col("stars") == 5.0) \
          .orderBy(col('review_count').desc()) \
          .limit(20)
z.show(df8)

# 统计不同类型（中国菜、美式、墨西哥）餐厅数量
# 假定categories中包含"Chinese","American","Mexican"
df9 = b_df.select(explode(split(col('categories'), ', ')).alias('category')) \
    .where((col('category')=='Chinese') | (col('category').like('American%')) | (col('category')=='Mexican')) \
    .groupBy(col('category')) \
    .agg(count(col('category')).alias('count'))
z.show(df9)

# 统计不同类型（中国、美式、墨西哥）餐厅的评论数量
# 假设review表中business_id与business关联，根据business的category筛选
cuisine_business = b_df.select('business_id', explode(split(col('categories'), ', ')).alias('category'))
r_df = r_df.withColumnRenamed("rev_business_id", "business_id")
review_with_category = r_df.join(cuisine_business, 'business_id')



df10_1 = review_with_category.where("category like 'Chinese'").agg(sum("rev_useful").alias("Chinese_review_count"))
df10_2 = review_with_category.where("category like 'Mexican'").agg(sum("rev_useful").alias("Mexican_review_count"))
df10_3 = review_with_category.where("category like 'American%'").agg(sum("rev_useful").alias("American_review_count"))
z.show(df10_1)
z.show(df10_2)
z.show(df10_3)

# 统计不同类型（中、美、墨）的餐厅的评分分布
df11_1 = review_with_category.where("category like 'Chinese'") \
    .groupBy('rev_stars') \
    .agg(count('rev_stars').alias('count_Chinese')) \
    .orderBy(desc('count_Chinese'))
z.show(df11_1)

df11_2 = review_with_category.where("category like 'Mexican'") \
    .groupBy('rev_stars') \
    .agg(count('rev_stars').alias('count_Mexican')) \
    .orderBy(desc('count_Mexican'))
z.show(df11_2)

df11_3 = review_with_category.where("category like 'American%'") \
    .groupBy('rev_stars') \
    .agg(count('rev_stars').alias('count_American')) \
    .orderBy(desc('count_American'))
z.show(df11_3)
