import sys
from pyspark.sql.functions import explode
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit, CrossValidator
from pyspark.ml.recommendation import ALS
from pyspark.sql import SparkSession

spark = SparkSession.builder.appName("als").getOrCreate()
options = {
    "url": "jdbc:mysql://localhost:3306/recommend?serverTimezone=UTC",
    "driver": "com.mysql.cj.jdbc.Driver",
    "dbtable": "(select userid,carid,count(*) counter from `log` group by userid,carid ) tmp",
    "user": "root",
    "password": "123"
}
data = spark.read.format("jdbc").options(**options).load()

train, test = data.randomSplit([0.8, 0.2])
als = ALS(userCol="userid", itemCol="carid", ratingCol="counter", implicitPrefs=True)

param_grid = ParamGridBuilder().addGrid(als.rank, [10, 15]).addGrid(als.maxIter, [10, 20]).addGrid(als.regParam,[0.01, 0.05]).build()

evaluator = RegressionEvaluator(metricName="rmse", labelCol="counter", predictionCol="prediction")
train_valid_split = TrainValidationSplit(estimator=als, estimatorParamMaps=param_grid, evaluator=evaluator)
model = train_valid_split.fit(train)

best_model = model.bestModel

path = "C:\\alsmodel"
best_model.save(path)
