import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.arima.model import ARIMA
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm

# 1. 数据预处理和清洗
# 读取CSV文件
df = pd.read_csv('./data/数据分析/Coffee_Chain_Sales.csv')
# 将Date列转换为datetime格式
df['Date'] = pd.to_datetime(df['Date'], format='%m/%d/%Y')
# 提取年份、月份作为新特征
df['Year'] = df['Date'].dt.year
df['Month'] = df['Date'].dt.month
features = df[['Year', 'Month', 'AreaCode', 'Cogs', 'Margin', 'Profit', 'InventoryMargin']]
target = df['Sales']
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.2, random_state=42)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

lr_model = LinearRegression()
lr_model.fit(X_train_scaled, y_train)
lr_predictions = lr_model.predict(X_test_scaled)

svr_model = SVR(kernel='rbf')
svr_model.fit(X_train_scaled, y_train)
svr_predictions = svr_model.predict(X_test_scaled)

X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.2, shuffle=False)  # shuffle必须为False以保持时间序列的连续性
model = ARIMA(y_train, order=(1, 1, 1))
model_fit = model.fit()
yhat = model_fit.forecast(steps=len(y_test))

X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(features, target, test_size=0.2, random_state=42)
model_reg = sm.OLS(y_train_reg, X_train_reg).fit()
predictions_reg = model_reg.predict(X_test_reg)
print(f"model_reg: {model_reg.summary()}")

lr_mse = mean_squared_error(y_test, lr_predictions)
svr_mse = mean_squared_error(y_test, svr_predictions)
arima_mse = mean_squared_error(y_test, yhat)
rmse = np.sqrt(lr_mse)
svr_rmse = np.sqrt(svr_mse)
arima_rmse = np.sqrt(arima_mse)

print(f"Cost profit Linear Regression MSE: {lr_mse}")
print(f"Cost profit Linear Regression RMSE: {rmse}")
print(f"Cost profit Support Vector Regression MSE: {svr_mse}")
print(f"Cost profit Support Vector Regression RMSE: {svr_rmse}")
print(f"Cost profit arima Regression MSE: {arima_mse}")
print(f"Cost profit arima Regression RMSE: {arima_rmse}")

# 绘制线性回归模型的预测结果散点图
plt.figure(figsize=(10, 5))
plt.scatter(y_test, lr_predictions, label='Linear Regression Predictions')
plt.xlabel('Actual Sales')
plt.ylabel('Predicted Sales')
plt.title('Cost profit Linear Regression Predictions vs Actual Sales')
plt.legend()
# plt.savefig('linear_regression_predictions.png')  # 保存图像文件到当前目录
# plt.close()  # 关闭图像，释放资源
plt.show()

# 绘制支持向量机回归模型的预测结果散点图
plt.figure(figsize=(10, 5))
plt.scatter(y_test, svr_predictions, label='SVR Predictions')
plt.xlabel('Actual Sales')
plt.ylabel('Predicted Sales')
plt.title('Cost profit SVR Predictions vs Actual Sales')
plt.legend()
plt.show()

# 绘制均方误差对比图（使用条形图）
models = ['Linear Regression', 'SVR']
mses = [lr_mse, svr_mse]
plt.figure(figsize=(10, 5))
plt.bar(models, mses, color='blue')
plt.xlabel('Model')
plt.ylabel('Mean Squared Error')
plt.title('Cost profit MSE Comparison between Models')
plt.show()

# 可视化预测结果与实际值的对比（仅展示线性回归模型的结果）散点图
plt.scatter(y_test, lr_predictions)
plt.xlabel('Actual Sales')
plt.ylabel('Predicted Sales')
plt.title('Cost profit Actual vs Predicted Sales (Linear Regression)')
plt.show()


# 5. 可视化展示
# 展示时间序列预测结果（二维线图）
plt.figure(figsize=(10, 5))
plt.plot(y_test.index, y_test.values, label='Actual Sales')
plt.plot(y_test.index, yhat, label='Predicted Sales')
plt.legend()
plt.title('Cost profit Actual vs Predicted Sales (ARIMA Model)')
plt.xlabel('Time Period')
plt.ylabel('Sales')
plt.show()

# 展示多元回归分析中显著变量的系数（这里以热图的形式展示）
coefficients = model_reg.params
coefficients_df = pd.DataFrame(coefficients, columns=['Coefficient'])
coefficients_df['Variable'] = coefficients_df.index
plt.figure(figsize=(10, 8))
sns.set(font_scale=1.2)
sns.heatmap(coefficients_df[['Coefficient']].T, annot=True, fmt='.2f')
plt.title('Cost profit Regression Coefficients Heatmap')
plt.xlabel('Variables')
plt.ylabel('Coefficient')
plt.show()

# 缩小范围进行预测
features = df[['Year', 'Month', 'Sales']]  # 简化为时间序列预测示例
target = df['Sales']
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.2, random_state=42)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
lr_model = LinearRegression()
lr_model.fit(X_train_scaled, y_train)
lr_predictions = lr_model.predict(X_test_scaled)
svr_model = SVR(kernel='rbf')
svr_model.fit(X_train_scaled, y_train)
svr_predictions = svr_model.predict(X_test_scaled)
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.2, shuffle=False)
model = ARIMA(y_train, order=(1, 1, 1))
model_fit = model.fit()
yhat = model_fit.forecast(steps=len(y_test))
lr_mse = mean_squared_error(y_test, lr_predictions)
svr_mse = mean_squared_error(y_test, svr_predictions)
arima_mse = mean_squared_error(y_test, yhat)
rmse = np.sqrt(lr_mse)
svr_rmse = np.sqrt(svr_mse)
arima_rmse = np.sqrt(arima_mse)
print(f"Linear Regression MSE: {lr_mse}")
print(f"Linear Regression RMSE: {rmse}")
print(f"Support Vector Regression MSE: {svr_mse}")
print(f"Support Vector Regression RMSE: {svr_rmse}")
print(f"arima Regression MSE: {arima_mse}")
print(f"arima Regression RMSE: {arima_rmse}")

# X = df[['Year', 'Month', 'Sales', 'Cogs', 'Margin', 'Profit', 'InventoryMargin']]
# y = df['Sales']
X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(features, target, test_size=0.2, random_state=42)
model_reg = sm.OLS(y_train_reg, X_train_reg).fit()
predictions_reg = model_reg.predict(X_test_reg)
print(f"model_reg: {model_reg.summary()}")

# 绘制线性回归模型的预测结果散点图
plt.figure(figsize=(10, 5))
plt.scatter(y_test, lr_predictions, label='Linear Regression Predictions')
plt.xlabel('Actual Sales')
plt.ylabel('Predicted Sales')
plt.title('Linear Regression Predictions vs Actual Sales')
plt.legend()
# plt.savefig('linear_regression_predictions.png')  # 保存图像文件到当前目录
# plt.legend()
plt.show()
plt.close()  # 关闭图像，释放资源

# 绘制支持向量机回归模型的预测结果散点图
plt.figure(figsize=(10, 5))
plt.scatter(y_test, svr_predictions, label='SVR Predictions')
plt.xlabel('Actual Sales')
plt.ylabel('Predicted Sales')
plt.title('SVR Predictions vs Actual Sales')
plt.legend()
plt.show()

# 绘制均方误差对比图（使用条形图）
models = ['Linear Regression', 'SVR']
mses = [lr_mse, svr_mse]
plt.figure(figsize=(10, 5))
plt.bar(models, mses, color='blue')
plt.xlabel('Model')
plt.ylabel('Mean Squared Error')
plt.title('MSE Comparison between Models')
plt.show()

# 可视化预测结果与实际值的对比（仅展示线性回归模型的结果）散点图
plt.scatter(y_test, lr_predictions)
plt.xlabel('Actual Sales')
plt.ylabel('Predicted Sales')
plt.title('Actual vs Predicted Sales (Linear Regression)')
plt.show()

# 5. 可视化展示
# 展示时间序列预测结果（二维线图）
plt.figure(figsize=(10, 5))
plt.plot(y_test.index, y_test.values, label='Actual Sales')
plt.plot(y_test.index, yhat, label='Predicted Sales')
plt.legend()
plt.title('Actual vs Predicted Sales (ARIMA Model)')
plt.xlabel('Time Period')
plt.ylabel('Sales')
plt.show()

# 展示多元回归分析中显著变量的系数（这里以热图的形式展示）
coefficients = model_reg.params
coefficients_df = pd.DataFrame(coefficients, columns=['Coefficient'])
coefficients_df['Variable'] = coefficients_df.index
plt.figure(figsize=(10, 8))
sns.set(font_scale=1.2)
sns.heatmap(coefficients_df[['Coefficient']].T, annot=True, fmt='.2f')
plt.title('Regression Coefficients Heatmap')
plt.xlabel('Variables')
plt.ylabel('Coefficient')
plt.show()