import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

# 设置默认字体为 SimSun
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimSun']  # 中文字体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号“-”显示为方块的问题

# 读取数据
df = pd.read_csv(r"C:\Users\卷\Desktop\程序\python数据分析\实验1\王天鸿 唐梓晴 苏彩凤 梁宇彤 数据集.csv")

# 删除不必要的列
del df['Unnamed: 0']

# 用户数据处理
user_df = pd.DataFrame()
user_df['uid'] = df.groupby('uid')['like'].count().index.tolist()
user_df.set_index('uid', inplace=True)
user_df['浏览量'] = df.groupby('uid')['like'].count()
user_df['点赞量'] = df.groupby('uid')['like'].sum()
user_df['观看作者数'] = df.groupby('uid')['author_id'].nunique()
user_df['观看作品数'] = df.groupby('uid')['item_id'].nunique()
user_df['观看作品平均时长'] = df.groupby('uid')['duration_time'].mean()
user_df['观看配乐数'] = df.groupby('uid')['music_id'].nunique()
user_df['完整观看数'] = df.groupby('uid')['finish'].sum()
user_df['去过的城市数'] = df.groupby('uid')['user_city'].nunique()
user_df['观看作品城市数'] = df.groupby('uid')['item_city'].nunique()

# 作者数据处理
author_df = pd.DataFrame()
author_df['author_id'] = df.groupby('author_id')['like'].count().index.tolist()
author_df.set_index('author_id', inplace=True)
author_df['总浏览量'] = df.groupby('author_id')['like'].count()
author_df['总点赞量'] = df.groupby('author_id')['like'].sum()
author_df['总观完量'] = df.groupby('author_id')['finish'].sum()
author_df['总作品数'] = df.groupby('author_id')['item_id'].nunique()

item_time = df.groupby(['author_id', 'item_id'])['duration_time'].mean().reset_index()
author_df['作品平均时长'] = item_time.groupby('author_id')['duration_time'].mean()
author_df['使用配乐数量'] = df.groupby('author_id')['music_id'].nunique()
author_df['发布作品日数'] = df.groupby('author_id')['real_time'].nunique()

# 合并用户数据和作者数据作为训练数据
merged_df = user_df.merge(author_df, left_index=True, right_index=True)

# 选择特征和目标变量
X = merged_df[['浏览量', '观看作者数', '观看作品数', '观看作品平均时长', '观看配乐数', '完整观看数', '去过的城市数', '观看作品城市数',
               '总浏览量', '总点赞量', '总观完量', '总作品数', '作品平均时长', '使用配乐数量', '发布作品日数']]
y = merged_df['点赞量']  # 预测点赞量，可以选择其他目标变量，如 '浏览量'

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# 选择模型
model = RandomForestRegressor(n_estimators=100, random_state=42)

# 训练模型
model.fit(X_train, y_train)

# 模型预测
y_pred = model.predict(X_test)

# 评估模型
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)

print(f'Mean Squared Error (MSE): {mse}')
print(f'Mean Absolute Error (MAE): {mae}')
print(f'R² Score: {r2}')

# 绘制预测与真实值的对比图
plt.figure(figsize=(10, 6))
plt.scatter(y_test, y_pred)
plt.xlabel('真实点赞量')
plt.ylabel('预测点赞量')
plt.title('真实点赞量 vs 预测点赞量')
plt.show()

# 网格搜索调参
param_grid = {
    'n_estimators': [50, 100, 200],
    'max_depth': [None, 10, 20, 30],
    'min_samples_split': [2, 5, 10]
}

grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=5, scoring='neg_mean_absolute_error')
grid_search.fit(X_train, y_train)

# 输出最优参数
print("GridSearch 最优参数:", grid_search.best_params_)

# 使用最优参数重新训练模型
best_model = grid_search.best_estimator_

# 评估最优模型
y_pred_best = best_model.predict(X_test)
mse_best = mean_squared_error(y_test, y_pred_best)
mae_best = mean_absolute_error(y_test, y_pred_best)
r2_best = r2_score(y_test, y_pred_best)

print(f'最优模型 - Mean Squared Error (MSE): {mse_best}')
print(f'最优模型 - Mean Absolute Error (MAE): {mae_best}')
print(f'最优模型 - R² Score: {r2_best}')

# 绘制最优模型的预测与真实值的对比图
plt.figure(figsize=(10, 6))
plt.scatter(y_test, y_pred_best)
plt.xlabel('真实点赞量')
plt.ylabel('预测点赞量')
plt.title('最优模型 真实点赞量 vs 预测点赞量')
plt.show()

# 随机搜索调参
param_dist = {
    'n_estimators': [50, 100, 200],
    'max_depth': [None, 10, 20, 30],
    'min_samples_split': [2, 5, 10]
}

random_search = RandomizedSearchCV(estimator=model, param_distributions=param_dist, n_iter=10, cv=5, scoring='neg_mean_absolute_error')
random_search.fit(X_train, y_train)

# 输出最优参数
print("RandomizedSearch 最优参数:", random_search.best_params_)

# 使用最优参数重新训练模型
best_random_model = random_search.best_estimator_

# 评估最优模型
y_pred_random = best_random_model.predict(X_test)
mse_random = mean_squared_error(y_test, y_pred_random)
mae_random = mean_absolute_error(y_test, y_pred_random)
r2_random = r2_score(y_test, y_pred_random)

print(f'最优模型 - Mean Squared Error (MSE): {mse_random}')
print(f'最优模型 - Mean Absolute Error (MAE): {mae_random}')
print(f'最优模型 - R² Score: {r2_random}')

# 绘制最优模型的预测与真实值的对比图
plt.figure(figsize=(10, 6))
plt.scatter(y_test, y_pred_random)
plt.xlabel('真实点赞量')
plt.ylabel('预测点赞量')
plt.title('最优随机搜索模型 真实点赞量 vs 预测点赞量')
plt.show()
