%pyspark

import time
start_time = time.time()

from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, lower, regexp_replace
from pyspark.sql.functions import udf, trim
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations

hc = HiveContext(sc)

# 使用Hive中已存在的表，注意这里的表名要和您实际创建的一致
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

##################################
# 三、评论分析
##################################

# 每年的评论数
df_rev_year = r_df.withColumn("year_r", year(to_date(col("rev_date"), "yyyy-MM-dd"))) \
    .groupBy("year_r") \
    .agg(count("*").alias("yearly_review_count")) \
    .orderBy("year_r")
z.show(df_rev_year)

# 有用、有趣、酷评论总数
useful_funny_cool = r_df.agg(sum("rev_useful").alias("total_useful"), sum("rev_funny").alias("total_funny"), sum("rev_cool").alias("total_cool"))
z.show(useful_funny_cool)

# 每年全部评论用户排行榜
# 按年统计用户评论次数，取前若干名用户
user_rank_year = r_df.withColumn("year_r", year(to_date(col("rev_date"), "yyyy-MM-dd"))) \
    .groupBy("year_r", "rev_user_id") \
    .agg(count("review_id").alias("user_review_count")) \
    .orderBy(desc("year_r"), desc("user_review_count")) \
    .limit(50)
z.show(user_rank_year)

# 提取最常见Top20词语（需分词、过滤停用词）
stopwords = ["a", "an", "the", "with", "and", "or", "for", "on", "of", "in", "at", "to", "from", "this", "that", "it", 
             "is", "are", "be", "was", "were", "has", "had", "have", "as", "by", "about", "then", "so", "if", "not"]

clean_reviews = r_df.select("review_id", lower(regexp_replace(col("rev_text"), "[^a-zA-Z0-9\\s]", "")).alias("clean_text"))
words = clean_reviews.select(explode(split(col("clean_text"), "\\s+")).alias("word")).filter(col("word")!="").filter(~col("word").isin(stopwords))
top20_words = words.groupBy("word").agg(count("*").alias("cnt")).orderBy(desc("cnt")).limit(20)
z.show(top20_words)

# 全部词频，用于词云
word_freq = words.groupBy("word").agg(count("*").alias("count")).orderBy(desc("count"))
# z.show(word_freq) # 可查看

# 单词关系图（共现分析）
def tokenize_filter(text):
    import re
    text = text.lower()
    text = re.sub("[^a-z0-9\\s]", "", text)
    w = text.split()
    return [x for x in w if x not in stopwords and x != ""]

tokenize_filter_udf = udf(tokenize_filter, ArrayType(StringType()))
filtered_reviews = r_df.select("review_id", tokenize_filter_udf(col("rev_text")).alias("words"))

def generate_pairs(words):
    pairs = []
    for combo in combinations(sorted(set(words)), 2):
        pairs.append(combo)
    return pairs

pairs_udf = udf(lambda w: generate_pairs(w), ArrayType(ArrayType(StringType())))
pairs_df = filtered_reviews.withColumn("word_pairs", pairs_udf(col("words"))).select(explode(col("word_pairs")).alias("pair"))
word_cooccurrence = pairs_df.groupBy("pair").agg(count("*").alias("cooccur_count")).orderBy(desc("cooccur_count")).limit(50)
z.show(word_cooccurrence)

end_time = time.time()
execution_time = end_time - start_time
print("Execution Time: {:.2f} seconds".format(execution_time))