from pyspark.ml.recommendation import ALS
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.types import Row

session = SparkSession.builder.getOrCreate()
# 获取上下文对象
sc = session.sparkContext.getOrCreate()
df = sc.textFile("hdfs://localhost:9000/score.csv").map(lambda line: line.split(",")) \
    .map(lambda i: Row(uid=int(i[0]), mid=int(i[1]), rating=float(i[2]))).toDF()
print(df.count())

# 随机划分数据集:80%为训练集;20%为测试集
train, test = df.randomSplit([0.8, 0.2])
#创建ALS交替最小二乘法模型对象,来训练划分好的数据
als = ALS(maxIter=5, regParam=0.01, userCol="uid", itemCol="mid", ratingCol="rating",
          coldStartStrategy="drop")
#用训练集train训练算法模型
model = als.fit(train)

userInput = input("Please enter the command('y' to continue,'n' to quit): ")
while userInput != 'n':

    if userInput == 'y':
        # 模型实现给用户推荐电影
        uid = input("Please enter the user id:")
        number = input("Please enter the recommend number of movie:")
        user = df.select("uid").where("uid==" + str(uid)).distinct()
        # 调用model的推荐函数
        result = model.recommendForUserSubset(user, int(number))
        # 报告预测数据:新增一列结果
        result = result.withColumn("movies", explode("recommendations"))
        result.show()
        for m in result.collect():
            print(m)
    # 在循环内于一次推荐计算后再次询问是否继续查询
    userInput = input("Please enter the command('y' to continue,'n' to quit): ")
