from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.window import Window
import pandas as pd
import matplotlib.pyplot as plt

spark = SparkSession.builder.appName("UserBehaviorAnalysis").getOrCreate()

# 加载数据
# 为DataFrame添加相应的列名
users = spark.read.csv("file:///home/annie/movie/data/users.dat", sep='::', inferSchema=True)\
.toDF("userId", "Gender", "Age", "Occupation", "Zip-code")
ratings = spark.read.csv("file:///home/annie/movie/data/ratings.dat", sep='::', inferSchema=True)\
.toDF("UserID", "MovieID", "Rating", "Timestamp")
movies = spark.read.csv("file:///home/annie/movie/data/movies.dat", sep='::', inferSchema=True)\
.toDF("MovieID", "Title", "Genres")

# ######
# # 任务1: 年代偏好
# #  分析每个年代（例如1990s，2000s）最受欢迎的电影类别。
# #  找出每个年代评分最高的10部电影。
# ######
# 使用正则表达式从title中提取年份，并计算年代
movies_with_year = movies.withColumn("year", regexp_extract(col("title"), r"\((\d{4})\)", 1).cast("integer"))
movies_with_decades = movies_with_year.withColumn("decade", (floor(col("year") / 10) * 10))

# 联接电影和评分数据，并计算每部电影的平均评分
movie_decades = ratings.join(movies_with_decades, "movieId") \
.groupBy("decade", "movieId", "title").agg(avg("rating").alias("avg_rating"))
# 联接电影和评分数据
# movie_genres = ratings.join(movies_with_decades, "movieId") 
movie_genres = ratings.join(movies_with_decades, "movieId") \
    .groupBy("decade", "movieId", "title", "Genres").agg(avg("rating").alias("avg_rating"))
# 使用explode函数将类别分开
data_exploded_two = movie_genres.withColumn("genre", explode(split(col("Genres"), "[|]")))

# 根据年代和电影类别分组，计算平均评分和评分次数
genre_ratings = data_exploded_two.groupBy("decade", "genre").agg(avg("avg_rating").alias("avg_rating"), count("avg_rating").alias("rating_count"))

# 使用窗口函数排序每个年代的电影类别
windowSpec = Window.partitionBy("decade").orderBy(col("avg_rating").desc())
top_genres = genre_ratings.withColumn("rank", row_number().over(windowSpec)).filter(col("rank") <= 1)
# 使用窗口函数排序每个年代的电影
top_movies = movie_decades.withColumn("rank", row_number().over(windowSpec)).filter(col("rank") <= 10)

print('# 任务1：年代偏好')
top_movies.show()
top_genres.show()

#####
# 任务2: 季节性偏好
# 	将评分的时间戳转换为月份。
# 	分析在每个季节（春、夏、秋、冬）最受欢迎的电影类别。
# 	找出在特定假日（如圣诞节、万圣节）评分最高的电影。
#####
# 从评分时间戳中提取月份，并将月份映射到对应的季节
ratings_with_month = ratings.withColumn("month", month(from_unixtime(col("timestamp"))))
ratings_with_seasons = ratings_with_month.withColumn("season",
                                                     when(col("month").isin([3,4,5]), "Spring")
                                                     .when(col("month").isin([6,7,8]), "Summer")
                                                     .when(col("month").isin([9,10,11]), "Autumn")
                                                     .otherwise("Winter"))

# 联接电影和评分数据
data = ratings_with_seasons.join(movies, "movieId")

# 使用explode函数将类别分开
data_exploded = data.withColumn("genre", explode(split(col("genres"), "[|]")))

# 根据季节和电影类别分组，计算平均评分和评分次数
genre_ratings = data_exploded.groupBy("season", "genre").agg(avg("rating").alias("avg_rating"), count("rating").alias("rating_count"))

# 过滤掉评分次数少于10000的电影类别
filtered_ratings = genre_ratings.filter(col("rating_count") > 10000)

# 对每个季节的电影类别按平均评分排序
windowSpec = Window.partitionBy("season").orderBy(col("avg_rating").desc())
top_genres = filtered_ratings.withColumn("rank", row_number().over(windowSpec)).filter(col("rank") <= 1)
print('# 任务2：每个季节最受欢迎的电影类型')
top_genres.show()
# 筛选12月份的评分
december_ratings = ratings_with_month.filter(col("month") == 12)

# 联接电影数据
december_movie_ratings = december_ratings.join(movies, "movieId")

# 根据电影分组，计算平均评分和评分次数
movie_ratings = december_movie_ratings.groupBy("Title").agg(avg("Rating").alias("avg_rating"), count("Rating").alias("rating_count"))

# 对电影按平均评分排序，找出评分最高的电影
top_movies = movie_ratings.orderBy(col("avg_rating").desc()).limit(10)

print('# 任务2：12月份评分最高的电影')
top_movies.show()
# 将 Spark DataFrame 转换为 Pandas DataFrame
top_genres_pd = top_genres.toPandas()

# 绘制条形图
fig_two, ax_two = plt.subplots(figsize=(10, 6))
for i, season in enumerate(top_genres_pd['season'].unique()):
    season_data = top_genres_pd[top_genres_pd['season'] == season]
    ax_two.bar(season_data['season'], season_data['rating_count'], label=season, alpha=0.6)

# 在每个条形上标注对应的电影类别和平均评分
for index_two, row_two in top_genres_pd.iterrows():
    ax_two.text(row_two['season'], row_two['rating_count'], f"{row_two['genre']} ({row_two['avg_rating']:.2f})", ha='center', va='bottom')

# 设置图表标题和标签
ax_two.set_title('Top Genres by Season')
ax_two.set_xlabel('Season')
ax_two.set_ylabel('Rating Count')
ax_two.legend()

# 显示图表
plt.show()
plt.savefig("./2.jpg")
# #####
# # 任务3: 忠诚用户分析
# #  找出评分次数在上四分位数的用户。
# #  分析这些用户相对于整体用户群体的偏好差异。
# #####

# 计算整体用户的电影偏好
overall_movie_preferences = ratings.groupBy("movieId").agg(avg("rating").alias("overall_avg_rating"))

# 计算每个用户的评分次数
user_rating_counts = ratings.groupBy("userId").agg(count("rating").alias("rating_count"))

# 计算上四分位数 (75% 分位数)
quantile_75 = user_rating_counts.stat.approxQuantile("rating_count", [0.75], 0.0)[0]

# 过滤出评分次数在上四分位数以上的用户
top_users_ids = user_rating_counts.filter(col("rating_count") >= quantile_75).select("userId")

# 计算上四分位数用户的电影偏好
top_users_ratings = ratings.join(top_users_ids, "userId")
top_movie_preferences = top_users_ratings.groupBy("movieId").agg(avg("rating").alias("top_users_avg_rating"))

# 偏好差异分析
preferences_diff = overall_movie_preferences.join(top_movie_preferences, "movieId")
preferences_diff = preferences_diff.withColumn("diff", col("top_users_avg_rating") - col("overall_avg_rating"))
print('# 任务3：忠诚用户分析')
preferences_diff.orderBy("diff", ascending=False).show()

# 添加一个新列来区分新旧电影
movies_with_categories = movies_with_decades.withColumn("category",when(col("decade")<1980,"old_movie").otherwise("new_movie"))

# 联接电影和评分数据
movie_category = ratings.join(movies_with_categories, "movieId") 
# 分析新旧电影的平均评分
average_ratings = movie_category.groupBy("category").agg(
    avg("rating").alias("avg_rating"),
    count("rating").alias("count_ratings")
)
print('# 任务4：新旧电影评分对比')
average_ratings.show()
# 分析新旧电影的评分分布
rating_distribution = movie_category.groupBy("category", "rating").agg(
    count("rating").alias("count")
).orderBy("category", "rating")
rating_distribution.show()
# 收集数据到本地
categories = average_ratings.select("category").rdd.flatMap(lambda x: x).collect()
avg_ratings = average_ratings.select("avg_rating").rdd.flatMap(lambda x: x).collect()

# 绘制条形图
x = np.arange(len(categories))  # the label locations
width = 0.35  # the width of the bars

fig, ax = plt.subplots()
rects1 = ax.bar(x, avg_ratings, width, label='Average of Ratings')

ax.set_ylabel('Average of Ratings')
ax.set_title('Average of Ratings between Old Movies and New Movies')
ax.set_xticks(x)
ax.set_xticklabels(categories)
ax.legend()

ax.bar_label(rects1, padding=3)
plt.savefig("./4.1.jpg")
plt.figure()
#收集数据到本地
data_four = rating_distribution.toPandas()

#创建二维数组
categories_four = data_four['category'].unique()
rating_four = data_four['rating'].unique()
matrix_four = np.zeros((len(rating_four),len(categories_four)))
for index,row in data_four.iterrows():
    row_index_four = np.where(rating_four == row['rating'])[0][0]
    col_index_four = np.where(categories_four == row['category'])[0][0]
    matrix_four[row_index_four,col_index_four] = row['count']

#绘制直方图
fig_four,ax_four = plt.subplots()
cax = ax_four.imshow(matrix_four,cmap='viridis',aspect='auto',extent=(1,6.5,0.5,5.5))

#设置标签
ax_four.set_yticks(np.arange(1,6))
ax_four.set_yticklabels(rating_four)
ax_four.set_xticks(np.arange(1, len(categories_four) * 2, 2))  # 这里假设 categories_four 有2个元素
ax_four.set_xticklabels(categories_four,ha='center')

#添加色标
fig_four.colorbar(cax,ax=ax_four)

ax_four.set_title('Rating Distribution by Movie Category')
plt.savefig("./4.2.jpg")
# 关闭SparkSession
spark.stop()