#####数据预处理和相关性分析
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

# 读取数据文件
file_path = r'C:\Users\许泽轩\Desktop\cleaned_all_kendrick_tracks.csv'
df = pd.read_csv(file_path)

# 选择想要分析的列
columns = ['release_date', 'explicit', 'popularity', 'duration_ms', 'danceability', 'energy', 'valence', 'tempo', 'loudness', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'release_year', 'duration_min']

# 将'release_date'和'explicit'列转换为数值类型
df['release_date'] = pd.to_datetime(df['release_date']).map(pd.Timestamp.toordinal)
df['explicit'] = df['explicit'].map({True: 1, False: 0})

# 计算相关系数矩阵
correlation_matrix = df[columns].corr()

# 使用seaborn绘制热力图
plt.figure(figsize=(12, 10))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', fmt=".2f")
plt.title('Correlation Heatmap')
plt.show()  


######特征重要性分析
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns

# 假设df是包含数据的DataFrame

# 将分类变量转换为数值型
label_encoders = {}
for column in ['explicit', 'release_date']:
    if df[column].dtype == 'object':
        label_encoders[column] = LabelEncoder()
        df[column] = label_encoders[column].fit_transform(df[column])

# 将'release_date'转换为数值型
df['release_date'] = pd.to_datetime(df['release_date']).map(pd.Timestamp.toordinal)

# 选择特征和目标变量
X = df[['explicit', 'popularity', 'duration_ms', 'danceability', 'energy', 'valence', 'tempo', 'loudness', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'release_year', 'duration_min']]
y = df['popularity']  # 假设'popularity'是目标变量

# 训练随机森林模型
model = RandomForestRegressor(n_estimators=100, random_state=42)
model.fit(X, y)

# 获取特征重要性
feature_importances = pd.Series(model.feature_importances_, index=X.columns).sort_values(ascending=False)

# 可视化特征重要性
plt.figure(figsize=(10, 8))
sns.barplot(x=feature_importances.values, y=feature_importances.index)
plt.title('Feature Importances')
plt.show()
    

#####趋势分析
#    import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

# 加载数据
file_path = r'C:\Users\许泽轩\Desktop\cleaned_all_kendrick_tracks.csv'
df = pd.read_csv(file_path)

# 将'release_date'列转换为日期格式，并提取年份
df['release_date'] = pd.to_datetime(df['release_date'])
df['release_year'] = df['release_date'].dt.year

# 定义要分析的特征列表
features = ['explicit', 'popularity', 'duration_ms', 'danceability', 'energy', 'valence', 'tempo', 'loudness', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'duration_min']

# 创建子图
fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(20, 16))
fig.subplots_adjust(hspace=0.5, wspace=0.2)

# 绘制每个特征的趋势
for i, feature in enumerate(features):
    ax = axes[i//4, i%4]
    sns.lineplot(x='release_year', y=feature, data=df, ax=ax)
    ax.set_title(f'Trend of {feature} Over Years')
    ax.set_xlabel('Year')
    ax.set_ylabel(feature)

# 显示图表
plt.show()

######  XGBoost模型训练和调优
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import mean_squared_error
import joblib

# 加载数据
file_path = r'C:\Users\许泽轩\Desktop\cleaned_all_kendrick_tracks.csv'
df = pd.read_csv(file_path)

# 选择特征和目标变量
features = ['release_year', 'energy', 'loudness', 'danceability']
X = df[features]
y = df['popularity']

# 数据划分
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 创建XGBoost回归模型
model = xgb.XGBRegressor(objective='reg:squarederror', random_state=42)

# 定义参数网格
param_grid = {
    'n_estimators': [50, 100, 200],
    'learning_rate': [0.01, 0.1, 0.2],
    'max_depth': [3, 5, 7],
    'subsample': [0.8, 0.9, 1.0],
    'colsample_bytree': [0.8, 0.9, 1.0]
}

# 使用网格搜索进行调优
grid_search = GridSearchCV(model, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(X_train, y_train)

# 输出最佳参数和最佳得分
print(f'Best parameters: {grid_search.best_params_}')
print(f'Best score: {grid_search.best_score_}')

# 使用最佳参数重新训练模型
best_model = grid_search.best_estimator_
best_model.fit(X_train, y_train)

# 预测测试集并计算均方误差
y_pred = best_model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print(f'New Mean Squared Error: {mse}')

# 保存模型
joblib.dump(best_model, 'best_model.joblib')

# 加载模型
loaded_model = joblib.load('best_model.joblib')

###模型评估
# 计算训练集和测试集的预测值
y_train_pred_xgb = best_model.predict(X_train)
y_test_pred_xgb = best_model.predict(X_test)

# 计算MSE和RMSE
train_mse_xgb = mean_squared_error(y_train, y_train_pred_xgb)
test_mse_xgb = mean_squared_error(y_test, y_test_pred_xgb)

train_rmse_xgb = np.sqrt(train_mse_xgb)
test_rmse_xgb = np.sqrt(test_mse_xgb)

# 计算R²
from sklearn.metrics import r2_score
train_r2_xgb = r2_score(y_train, y_train_pred_xgb)
test_r2_xgb = r2_score(y_test, y_test_pred_xgb)

# 输出评估结果
print(f"XGBoost Training MSE: {train_mse_xgb:.4f}")
print(f"XGBoost Test MSE: {test_mse_xgb:.4f}")
print(f"XGBoost Training RMSE: {train_rmse_xgb:.4f}")
print(f"XGBoost Test RMSE: {test_rmse_xgb:.4f}")
print(f"XGBoost Training R²: {train_r2_xgb:.4f}")
print(f"XGBoost Test R²: {test_r2_xgb:.4f}")

