%pyspark

import time
import random
import json
start_time = time.time()

from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, lower, regexp_replace, avg
from pyspark.sql.functions import udf, trim
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations
from collections import Counter

hc = HiveContext(sc)

# 使用Hive中已存在的表，注意这里的表名要和您实际创建的一致
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

##################################
# 六、综合分析
##################################

# 为店家提供如何经营更成功的建议
# 分析成功商家的属性
successful_business = b_df.filter(col("stars") >= 4.0)  # 假设4星及以上为成功商家
successful_attributes = successful_business.select("attributes").rdd.flatMap(lambda x: x).collect()
# 扁平化属性字典
flat_attrs = []
for attr_dict in successful_attributes:
    if attr_dict is not None:
        attr_dict = json.loads(attr_dict)  # 将字符串转换为字典
        flat_attrs.extend(attr_dict.items())
# 统计属性出现频率
attr_counter = Counter(["{}={}".format(k, v) for k, v in flat_attrs])
# 显示最常见的前20个属性
most_common_attrs = attr_counter.most_common(20)
print("成功商家最常见的属性：")
for attr, count in most_common_attrs:
    print("{}: {}".format(attr, count))

# 随机选择一个用户ID
random_user_id = u_df.select("user_id").rdd.flatMap(lambda x: x).collect()
example_user_id = random.choice(random_user_id)

# 用户推荐潜在好友
# 基于共同好友推荐
def recommend_friends(user_id):
    user_friends = u_df.filter(col("user_id") == user_id).select("user_friends").rdd.flatMap(lambda x: x).collect()
    friend_recommendations = u_df.filter(col("user_id").isin(user_friends)).select("user_friends").rdd.flatMap(lambda x: x).collect()
    friend_recommendations = [friend for friend in friend_recommendations if friend not in user_friends and friend != user_id]
    return Counter(friend_recommendations).most_common(10)

recommended_friends = recommend_friends(example_user_id)
print("推荐好友：", recommended_friends)

# 用户画像
def user_profile(user_id):
    user_info = u_df.filter(col("user_id") == user_id).collect()[0]
    user_reviews = r_df.filter(col("rev_user_id") == user_id).select("rev_text").rdd.flatMap(lambda x: x).collect()
    profile = {
        "user_id": user_info.user_id,
        "name": user_info.user_name,
        "review_count": user_info.user_review_count,
        "yelping_since": user_info.user_yelping_since,
        "friends_count": len(user_info.user_friends.split(",")),
        "fans": user_info.user_fans,
        "average_stars": user_info.user_average_stars,
        "elite_years": user_info.user_elite,
        "reviews": user_reviews
    }
    return profile

user_profile_info = user_profile(example_user_id)
print("用户画像：", user_profile_info)

end_time = time.time()
execution_time = end_time - start_time
print("Execution Time: {:.2f} seconds".format(execution_time))



#优化版本  (用户画像有点问题，需要修改)
%pyspark

import time
import random
import json
from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, lower, regexp_replace, avg, trim, countDistinct
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations
from collections import Counter

hc = HiveContext(sc)

# 使用Hive中已存在的表
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

start_time = time.time()

##################################
# 六、综合分析 请分开运行！！！
##################################

# 为店家提供如何经营更成功的建议
# 假设将4星及以上定义为成功商家
successful_business = b_df.filter(col("stars") >= 4.0)

# 收集成功商家属性字段
successful_attributes = successful_business.select("attributes").rdd.flatMap(lambda x: x).collect()

flat_attrs = []
for attr_str in successful_attributes:
    if attr_str is not None:
        attr_dict = json.loads(attr_str)  # 将字符串转换为字典
        for k, v in attr_dict.items():
            if isinstance(v, dict):
                v = str(v)
            flat_attrs.append((str(k), str(v)))

# 统计属性出现频率
attr_counter = Counter(["{}={}".format(k, v) for k, v in flat_attrs])
most_common_attrs = attr_counter.most_common(20)

# 将最常见属性转换为DataFrame展示
attr_df = spark.createDataFrame(most_common_attrs, ["attribute", "count"])
print("成功商家最常见的属性（前20）：")
z.show(attr_df.orderBy(col("count").desc()))





%pyspark

import time
import random
import json
from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, lower, regexp_replace, avg, trim, countDistinct
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations
from collections import Counter

hc = HiveContext(sc)

# 使用Hive中已存在的表
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

start_time = time.time()

##################################
# 给定一个用户，为用户推荐潜在好友 请分开运行！！！
##################################

random_user_id = u_df.select("user_id").rdd.flatMap(lambda x: x).collect()
example_user_id = random.choice(random_user_id)

def recommend_friends(user_id):
    friends_str = u_df.filter(col("user_id") == user_id).select("user_friends").collect()[0][0]
    if friends_str is None or friends_str.strip() == "":
        return []

    user_friends = [f.strip() for f in friends_str.split(",") if f.strip()]
    if not user_friends:
        return []

    friends_of_friends = u_df.filter(col("user_id").isin(user_friends)).select("user_friends").rdd.flatMap(lambda x: x).collect()
    fof_list = []
    for fof_str in friends_of_friends:
        if fof_str:
            fof_list.extend([f.strip() for f in fof_str.split(",") if f.strip()])

    recommendations = [f for f in fof_list if f not in user_friends and f != user_id]
    return Counter(recommendations).most_common(10)

recommended_friends = recommend_friends(example_user_id)
if recommended_friends:
    rec_friends_df = spark.createDataFrame(recommended_friends, ["recommended_friend_id", "occurrence"])
    print("为用户 {} 推荐潜在好友（前10）：".format(example_user_id))
    z.show(rec_friends_df.orderBy(col("occurrence").desc()))
else:
    print("为用户 {} 未找到潜在好友推荐。".format(example_user_id))






    %pyspark

import time
import random
import json
from pyspark.sql import HiveContext
from pyspark.sql.functions import col, count, desc, sum, explode, split, to_date, year, lower, regexp_replace, avg, trim, countDistinct
from pyspark.sql.types import StringType, ArrayType
from itertools import combinations
from collections import Counter


hc = HiveContext(sc)

# 使用Hive中已存在的表
b_df = hc.table('business')
c_df = hc.table('checkin')
u_df = hc.table('users')  
r_df = hc.table('review')

start_time = time.time()

##################################
# 用户画像  请分开运行！！！
##################################

def user_profile(user_id):
    user_info_row = u_df.filter(col("user_id") == user_id).collect()
    if not user_info_row:
        return None

    user_info = user_info_row[0]
    user_reviews = r_df.filter(col("rev_user_id") == user_id).select("rev_text").rdd.flatMap(lambda x: x).collect()

    friends_count = 0
    if user_info.user_friends and user_info.user_friends.strip():
        friends_count = len([f.strip() for f in user_info.user_friends.split(",") if f.strip()])

    profile = {
        "user_id": user_info.user_id,
        "name": user_info.user_name,
        "review_count": user_info.user_review_count,
        "yelping_since": user_info.user_yelping_since,
        "fans": user_info.user_fans,
        "average_stars": user_info.user_average_stars,
        "elite_years": user_info.user_elite,
        "friends_count": friends_count,
        "reviews": user_reviews
    }
    return profile

user_profile_info = user_profile(example_user_id)

if user_profile_info:
    print("用户画像：")
    print("用户ID:", user_profile_info['user_id'])
    print("用户名:", user_profile_info['name'])
    print("评论数:", user_profile_info['review_count'])
    print("加入时间:", user_profile_info['yelping_since'])
    print("粉丝数:", user_profile_info['fans'])
    print("平均评分:", user_profile_info['average_stars'])
    print("精英年份:", user_profile_info['elite_years'])
    print("好友数量:", user_profile_info['friends_count'])

    # 只显示前3条评论预览
    preview_reviews = user_profile_info['reviews'][:3]
    print("部分评论预览 (最多展示3条):")
    for i, rev in enumerate(preview_reviews, 1):
        print("评论{}: {}".format(i, rev))
else:
    print("未找到该用户信息！")

end_time = time.time()
execution_time = end_time - start_time
print("Execution Time: {:.2f} seconds".format(execution_time))
