from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, DoubleType, IntegerType
from pyspark.sql.functions import to_json, col

HADOOP_SERVER = r'namenode.fredyvia.asia:9000'
INPUT_PATH = r'/ratings.csv'
OUTPUT_PATH_USER = r'/user'
OUTPUT_PATH_MOVIE = r'/movie'

spark = SparkSession.builder \
    .getOrCreate()

source_file = r'hdfs://{}{}'.format(HADOOP_SERVER, INPUT_PATH)
res_file_user = r'hdfs://{}{}'.format(HADOOP_SERVER, OUTPUT_PATH_USER)
res_file_movie = r'hdfs://{}{}'.format(HADOOP_SERVER, OUTPUT_PATH_MOVIE)

ratings = spark.read.csv(source_file, header=True, schema=StructType([
    StructField(
        'userId', IntegerType(), True),
    StructField(
        'movieId', IntegerType(), True),
    StructField('rating', DoubleType(), True)]))

(training, test) = ratings.randomSplit([0.8, 0.2])

# Build the recommendation model using ALS on the training data
# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="movieId", ratingCol="rating",
          coldStartStrategy="drop")
model = als.fit(training)

# Evaluate the model by computing the RMSE on the test data
predictions = model.transform(test)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
                                predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print("Root-mean-square error = " + str(rmse))

# Generate top 10 movie recommendations for each user
userRecs = model.recommendForAllUsers(10)
# Generate top 10 user recommendations for each movie
movieRecs = model.recommendForAllItems(10)

userRecs.printSchema()
print(type(userRecs))
movieRecs.printSchema()
print(type(movieRecs))

userRecs = userRecs.withColumn(
    "recommendations", to_json(col("recommendations"))).orderBy("userId")
movieRecs = movieRecs.withColumn(
    "recommendations", to_json(col("recommendations"))).orderBy("movieId")

userRecs.printSchema()
print(type(userRecs))
movieRecs.printSchema()
print(type(movieRecs))
userRecs.coalesce(1).write.mode('overwrite').option('header', True).csv(res_file_user)
movieRecs.coalesce(1).write.mode('overwrite').option('header', True).csv(res_file_movie)
