%pyspark

import time
start_time = time.time()

from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, dayofweek, lower, regexp_replace
from pyspark.sql.functions import udf, trim
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations

hc = HiveContext(sc)

# 使用Hive中已存在的表，注意这里的表名要和您实际创建的一致
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

##################################
# 一、商户分析
##################################

# 1-1 找出美国最常见商户（前20）
df1 = b_df.groupBy("name") \
          .agg(count("name").alias("常见商户数量")) \
          .orderBy(col("常见商户数量").desc()) \
          .limit(20)
z.show(df1)

# 1-2 找出美国商户最多的前10个城市
df2 = b_df.groupBy("city") \
          .agg(count("city").alias("城市")) \
          .orderBy(desc("城市")) \
          .limit(10)
z.show(df2)

# 1-3 找出美国商户最多的前5个州
df3 = b_df.groupBy("state") \
          .agg(count("state").alias("商户数量")) \
          .orderBy(col("商户数量").desc()) \
          .limit(5)
z.show(df3)

# 1-4 最常见商户并显示平均评分（前20）
df4 = b_df.groupBy(col('name')) \
          .agg(count(col('name')).alias('count'), sum('stars').alias('sum_star')) \
          .orderBy(col('count').desc()) \
          .limit(20)
z.show(df4)

# 1-5 统计评分最高的城市（前10）
df5 = b_df.select(col("city"), col("stars")) \
          .groupBy("city") \
          .sum("stars") \
          .orderBy(col("sum(stars)").desc()) \
          .limit(10)
z.show(df5)

# 1-6 统计category的数量
df6 = b_df.select(explode(split(col('categories'), ',')).alias('category')) \
          .groupBy("category") \
          .agg(count("category").alias("数量")) \
          .orderBy(desc("数量")) \
          .limit(10)
z.show(df6)

# 1-7 统计最多的category及数量（前10） （与1-6类似，这里重复一次）
df7 = b_df.select(explode(split(col('categories'), ',')).alias('category')) \
          .groupBy("category") \
          .agg(count("category").alias("数量")) \
          .orderBy(desc("数量")) \
          .limit(10)
z.show(df7)

# 1-8 收获五星评论最多的商户（前20）
df8 = b_df.select(col('name'), col('review_count')) \
          .where(col("stars") == 5.0) \
          .orderBy(col('review_count').desc()) \
          .limit(20)
z.show(df8)

# 统计不同类型（中国菜、美式、墨西哥）餐厅数量
# 假定categories中包含"Chinese","American","Mexican"
df9 = b_df.select(explode(split(col('categories'), ', ')).alias('category')) \
    .where((col('category')=='Chinese') | (col('category').like('American%')) | (col('category')=='Mexican')) \
    .groupBy(col('category')) \
    .agg(count(col('category')).alias('count'))
z.show(df9)

# 统计不同类型（中国、美式、墨西哥）餐厅的评论数量
# 假设review表中business_id与business关联，根据business的category筛选
cuisine_business = b_df.select('business_id', explode(split(col('categories'), ', ')).alias('category'))
r_df = r_df.withColumnRenamed("rev_business_id", "business_id")
review_with_category = r_df.join(cuisine_business, 'business_id')



df10_1 = review_with_category.where("category like 'Chinese'").agg(sum("rev_useful").alias("Chinese_review_count"))
df10_2 = review_with_category.where("category like 'Mexican'").agg(sum("rev_useful").alias("Mexican_review_count"))
df10_3 = review_with_category.where("category like 'American%'").agg(sum("rev_useful").alias("American_review_count"))
z.show(df10_1)
z.show(df10_2)
z.show(df10_3)

# 统计不同类型（中、美、墨）的餐厅的评分分布
df11_1 = review_with_category.where("category like 'Chinese'") \
    .groupBy('rev_stars') \
    .agg(count('rev_stars').alias('count_Chinese')) \
    .orderBy(desc('count_Chinese'))
z.show(df11_1)

df11_2 = review_with_category.where("category like 'Mexican'") \
    .groupBy('rev_stars') \
    .agg(count('rev_stars').alias('count_Mexican')) \
    .orderBy(desc('count_Mexican'))
z.show(df11_2)

df11_3 = review_with_category.where("category like 'American%'") \
    .groupBy('rev_stars') \
    .agg(count('rev_stars').alias('count_American')) \
    .orderBy(desc('count_American'))
z.show(df11_3)


##################################
# 二、用户分析
##################################
%pyspark

import time
start_time = time.time()

from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, dayofweek, lower, regexp_replace, trim
from pyspark.sql.functions import countDistinct
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations

hc = HiveContext(sc)

# 使用Hive中已存在的表
# 假设users表的字段为：user_yelping_since (而非yelping_since)
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')   
r_df = hc.table('review')

##################################
# 二、用户分析
##################################

# 分析每年加入用户数 (将原先的 col("yelping_since") 改为 col("user_yelping_since"))
df12 = u_df.withColumn("year_join", year(to_date(col("user_yelping_since"), "yyyy-MM-dd"))) \
    .groupBy("year_join") \
    .agg(count("user_id").alias("new_users"))
z.show(df12)

# 评论达人（review_count最高）
df13 = u_df.select('user_id','user_review_count') \
    .orderBy(desc('user_review_count')) \
    .limit(20)
z.show(df13)

# 人气最高的用户（fans最多）
df14 = u_df.select('user_id','user_fans') \
    .orderBy(desc('user_fans')) \
    .limit(20)
z.show(df14)

# 假设 user_elite 是一个逗号分隔的字符串，如 "2015,2016,2017"
elite_years = u_df.select('user_id', explode(split(col('user_elite'), ',')).alias('elite_year')) \
    .withColumn('elite_year_int', col('elite_year').cast('int'))

df15 = elite_years.groupBy('elite_year_int') \
    .agg(count('user_id').alias('elite_count')) \
    .orderBy(desc('elite_count'))

z.show(df15)

# 显示每年总用户数、沉默用户数比例
# 沉默用户：review_count=0 (同理，user_review_count)
df16_1 = u_df.agg(countDistinct('user_id').alias('total_users'))
df16_2 = u_df.where(col('user_review_count')==0).agg(count('user_id').alias('silent_users'))
z.show(df16_1)
z.show(df16_2)

# 统计每年的新用户数、评论数、精英用户、打卡数 
# 去掉tip相关逻辑
year_user = u_df.withColumn("year_user", year(to_date(col("user_yelping_since"), "yyyy-MM-dd"))) \
    .groupBy("year_user").agg(countDistinct("user_id").alias("new_users"))

year_review = r_df.withColumn("year_rev", year(to_date(col("rev_date"), "yyyy-MM-dd"))) \
    .groupBy("year_rev").agg(count("*").alias("review_count"))

year_elite = u_df.select("user_id", explode(col("user_elite")).alias("elite_year")) \
    .withColumn("elite_year_int", col("elite_year").cast("int")) \
    .groupBy("elite_year_int") \
    .agg(countDistinct("user_id").alias("elite_users"))

year_ck = c_df.select(explode(split(col('date'), ',')).alias('datetime')) \
    .withColumn('year_ck', year(to_date(trim(col('datetime')), "yyyy-MM-dd HH:mm:ss"))) \
    .groupBy('year_ck').agg(count("*").alias("checkin_count"))

from pyspark.sql.functions import coalesce
result_all = year_user.join(year_review, year_user.year_user == year_review.year_rev, "full") \
    .join(year_elite, (col("year_user")==col("elite_year_int"))|(col("year_rev")==col("elite_year_int")), "full") \
    .join(year_ck, (col("year_user")==col("year_ck"))|(col("year_rev")==col("year_ck"))|(col("elite_year_int")==col("year_ck")),"full")

final_result = result_all.select(
    coalesce(col("year_user"), col("year_rev"), col("elite_year_int"), col("year_ck")).alias("year"),
    "new_users",
    "review_count",
    "elite_users",
    "checkin_count"
).orderBy("year")
z.show(final_result)

end_time = time.time()
execution_time = end_time - start_time
print("Execution Time: {:.2f} seconds".format(execution_time))



##################################
# 三、评论分析
##################################

# 每年的评论数
df_rev_year = r_df.withColumn("year_r", year(to_date(col("date"), "yyyy-MM-dd"))) \
    .groupBy("year_r") \
    .agg(count("*").alias("yearly_review_count")) \
    .orderBy("year_r")
z.show(df_rev_year)

# 有用、有趣、酷评论总数
useful_funny_cool = r_df.agg(sum("useful").alias("total_useful"), sum("funny").alias("total_funny"), sum("cool").alias("total_cool"))
z.show(useful_funny_cool)

# 每年全部评论用户排行榜
# 按年统计用户评论次数，取前若干名用户
user_rank_year = r_df.withColumn("year_r", year(to_date(col("date"), "yyyy-MM-dd"))) \
    .groupBy("year_r", "user_id") \
    .agg(count("review_id").alias("user_review_count")) \
    .orderBy(desc("year_r"), desc("user_review_count")) \
    .limit(50)
z.show(user_rank_year)

# 提取最常见Top20词语（需分词、过滤停用词）
stopwords = ["a", "an", "the", "with", "and", "or", "for", "on", "of", "in", "at", "to", "from", "this", "that", "it", 
             "is", "are", "be", "was", "were", "has", "had", "have", "as", "by", "about", "then", "so", "if", "not"]

clean_reviews = r_df.select("review_id", lower(regexp_replace(col("text"), "[^a-zA-Z0-9\\s]", "")).alias("clean_text"))
words = clean_reviews.select(explode(split(col("clean_text"), "\\s+")).alias("word")).filter(col("word")!="").filter(~col("word").isin(stopwords))
top20_words = words.groupBy("word").agg(count("*").alias("cnt")).orderBy(desc("cnt")).limit(20)
z.show(top20_words)

# 全部词频，用于词云
word_freq = words.groupBy("word").agg(count("*").alias("count")).orderBy(desc("count"))
# z.show(word_freq) # 可查看


# 单词关系图（共现分析）
def tokenize_filter(text):
    import re
    text = text.lower()
    text = re.sub("[^a-z0-9\\s]", "", text)
    w = text.split()
    return [x for x in w if x not in stopwords and x != ""]

tokenize_filter_udf = udf(tokenize_filter, ArrayType(StringType()))
filtered_reviews = r_df.select("review_id", tokenize_filter_udf(col("text")).alias("words"))

def generate_pairs(words):
    pairs = []
    for combo in combinations(sorted(set(words)), 2):
        pairs.append(combo)
    return pairs

pairs_udf = udf(lambda w: generate_pairs(w), ArrayType(ArrayType(StringType())))
pairs_df = filtered_reviews.withColumn("word_pairs", pairs_udf(col("words"))).select(explode(col("word_pairs")).alias("pair"))
word_cooccurrence = pairs_df.groupBy("pair").agg(count("*").alias("cooccur_count")).orderBy(desc("cooccur_count")).limit(50)
z.show(word_cooccurrence)


##################################
# 四、评分分析
##################################

# 评分分布情况（1-5分）
star_dist = r_df.groupBy("stars").agg(count("*").alias("count_per_star")).orderBy("stars")
z.show(star_dist)

# 评分周（周一~周日）次数统计
daily_reviews = r_df.withColumn("dow", dayofweek(col("date"))).groupBy("dow").agg(count("*").alias("review_count")).orderBy("dow")
z.show(daily_reviews)

# 拥有次数最多的5分评价商家
top_5_star_business = r_df.where(col("stars")==5).groupBy("business_id").agg(count("*").alias("five_star_count")).orderBy(desc("five_star_count")).limit(20)
z.show(top_5_star_business)


##################################
# 五、打卡分析
##################################

# 每年的打卡次数
year_ck_count = c_df.select(explode(split(col('date'), ',')).alias('datetime')) \
    .withColumn('year_c', year(to_date(trim(col('datetime')), "yyyy-MM-dd HH:mm:ss"))) \
    .groupBy('year_c').agg(count("*").alias("checkin_year_count")) \
    .orderBy(desc("checkin_year_count"))
z.show(year_ck_count)

# 24小时每小时打卡次数
from pyspark.sql.functions import hour
hour_ck = c_df.select(explode(split(col('date'), ',')).alias('datetime')) \
    .withColumn('hour_c', hour(to_date(trim(col('datetime')), "yyyy-MM-dd HH:mm:ss"))) \
    .groupBy('hour_c').agg(count("*").alias("checkin_per_hour")) \
    .orderBy(desc("checkin_per_hour"))
z.show(hour_ck)

# 最喜欢打卡的城市
# 需join business表获得城市信息
city_ck = c_df.select('business_id', explode(split(col('date'), ',')).alias('datetime')) \
    .join(b_df, 'business_id') \
    .groupBy('city') \
    .agg(count('*').alias('cnt')) \
    .orderBy(desc('cnt')) \
    .limit(20)
z.show(city_ck)

# 全部商家的打卡排行榜（前5）
top_checkin_business = c_df.select('business_id', explode(split(col('date'), ',')).alias('time')) \
    .groupBy('business_id') \
    .count() \
    .orderBy(desc("count")) \
    .limit(5)
z.show(top_checkin_business)


##################################
# 六、综合分析 （思路展示）
##################################

# 为店家提供更成功的建议：分析已完成的数据（如attributes: WiFi, Parking, CreditCards）
# 用户推荐潜在好友：已在之前的回答中给出思路（基于user的friends字段）
# 用户画像：根据前面代码提取用户评论偏好、粉丝数、精英年限、活跃城市、菜系偏好等信息综合描述。

end_time_global = time.time()
print("Global Execution Time: {:.2f} seconds".format(end_time_global - start_time))