from pyspark import SparkContext
import os

from pyspark.mllib.evaluation import RegressionMetrics
from pyspark.mllib.recommendation import ALS

os.environ['PYSPARK_PYTHON'] = "python3"

sc = SparkContext.getOrCreate()
rdd = sc.textFile("file:///Users/sonto/Workspace/P1905/spark_example/ml_lesson/ml-100k/u.data")

def map_line(line):
    d = line.split("\t")
    return (int(d[0]), int(d[1]), float(d[2]))

rating = rdd.map(map_line)


model = ALS.train(rating, 50, 10, 0.01)
# print(model.userFeatures().collect())
# print(model.productFeatures().collect())

# 查看用户对商品的评价
# rating_ = model.predict(789, 123)
# print(rating_)

# 显示用户前五的排名
# print(model.recommendProducts(789, 5))

# title_rdd = sc.textFile("file:///Users/sonto/Workspace/P1905/spark_example/ml_lesson/ml-100k/u.item").map(lambda x: x.split("|")[0:2])
# movies_for_user = rating.keyBy(lambda u: u[0]).lookup("789")
# print(sc.parallelize(movies_for_user).sortBy(lambda x:x[2], ascending=False).take(5))


data = model.predictAll(rating.map(lambda u: (u[0], u[1]))).map(lambda u:((u[0], u[1]), u[2])).join(rating.map(lambda u:((u[0], u[1]), u[2]))).map(lambda u: u[1])
rm = RegressionMetrics(data)
print(rm.meanAbsoluteError)
print(rm.meanSquaredError)

# data= [(u[0], u[1], u[2], model.predict(u[0], u[1])) for u in rating.collect()]
#
# for d in data:
#     print(d)
