import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pyspark.sql import SparkSession
from pyspark.ml.feature import VectorAssembler, StringIndexer
from pyspark.ml.regression import LinearRegression, RandomForestRegressor
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml import Pipeline

# 初始化 SparkSession
spark = SparkSession.builder.appName("NBAPlayerPrediction").getOrCreate()

# 1. 读取数据
file_path = "/home/developer/Desktop/NBA_Bigdata/数据/proceed/merge_process.csv"  # 替换为您的文件路径
data = spark.read.option("header", "true").option("inferSchema", "true").csv(file_path)

# 2. 数据预处理
# 删除不必要的列（如PName, Rk）
data = data.drop("Player", "Rk")

# Index categorical columns
team_indexer = StringIndexer(inputCol="Tm", outputCol="team_index")
pos_indexer = StringIndexer(inputCol="Pos", outputCol="pos_index")

# Assemble features into a feature vector
feature_columns = [col for col in data.columns if col not in ['pre_pts', 'Tm', 'Pos']]
assembler = VectorAssembler(inputCols=feature_columns, outputCol="features")

# Define the models
lr = LinearRegression(featuresCol="features", labelCol="pre_pts")
rf = RandomForestRegressor(
    featuresCol="features",
    labelCol="pre_pts",
    numTrees=80,
    maxDepth=30,
    maxBins=64,
    subsamplingRate=0.8,
    featureSubsetStrategy="sqrt",
    seed=42
)

# Split the data into a training set and a test set
train_data, test_data = data.randomSplit([0.8, 0.2], seed=42)

# Create Pipelines
pipeline_lr = Pipeline(stages=[team_indexer, pos_indexer, assembler, lr])
pipeline_rf = Pipeline(stages=[team_indexer, pos_indexer, assembler, rf])

# Train the models
model_lr = pipeline_lr.fit(train_data)
model_rf = pipeline_rf.fit(train_data)

# Make predictions
predictions_lr = model_lr.transform(test_data)
predictions_rf = model_rf.transform(test_data)

# Convert predictions to Pandas DataFrame for plotting
predictions_lr_pd = predictions_lr.select("pre_pts", "prediction").withColumnRenamed("prediction", "lr_prediction").toPandas()
predictions_rf_pd = predictions_rf.select("pre_pts", "prediction").withColumnRenamed("prediction", "rf_prediction").toPandas()

# Merge results for comparison
predictions_combined = predictions_lr_pd.merge(predictions_rf_pd, on="pre_pts")

# Plotting
plt.figure(figsize=(14, 7))
sns.scatterplot(x=predictions_combined["pre_pts"], y=predictions_combined["lr_prediction"], label="Linear Regression Prediction", alpha=0.6)
sns.scatterplot(x=predictions_combined["pre_pts"], y=predictions_combined["rf_prediction"], label="Random Forest Prediction", alpha=0.6)
sns.lineplot(x=predictions_combined["pre_pts"], y=predictions_combined["pre_pts"], color="red", linestyle="--", label="Actual Points")
plt.xlabel("Actual Points")
plt.ylabel("Predicted Points")
plt.title("Linear Regression vs Random Forest Predictions")
plt.legend()
plt.savefig("/home/developer/Desktop/PYY_RESULT/comparison_predictions.png")

# Model evaluation: Linear Regression
evaluator = RegressionEvaluator(labelCol="pre_pts", predictionCol="prediction", metricName="mae")
mae_lr = evaluator.evaluate(predictions_lr, {evaluator.metricName: "mae"})
mse_lr = evaluator.evaluate(predictions_lr, {evaluator.metricName: "mse"})
r2_lr = evaluator.evaluate(predictions_lr, {evaluator.metricName: "r2"})

# Model evaluation: Random Forest
mae_rf = evaluator.evaluate(predictions_rf, {evaluator.metricName: "mae"})
mse_rf = evaluator.evaluate(predictions_rf, {evaluator.metricName: "mse"})
r2_rf = evaluator.evaluate(predictions_rf, {evaluator.metricName: "r2"})

# Output metrics
print("Linear Regression Metrics:")
print(f"  Mean Absolute Error (MAE): {mae_lr}")
print(f"  Mean Squared Error (MSE): {mse_lr}")
print(f"  R-squared: {r2_lr}")

print("\nRandom Forest Metrics:")
print(f"  Mean Absolute Error (MAE): {mae_rf}")
print(f"  Mean Squared Error (MSE): {mse_rf}")
print(f"  R-squared: {r2_rf}")

# Stop the Spark session
spark.stop()