# -*- coding: utf-8 -*-

# 导入pyspark
from pyspark import SparkContext
from pyspark.mllib.clustering import KMeans
from pyspark.mllib.recommendation import ALS, Rating
from pyspark.mllib.linalg import Vectors

# k-均值算法
def kmeansCluster(trainMovies, testMovies, numClusters):
	# train(trainData, numClusters, 最大迭代次数, 最大轮回次数)
	movieClusterModel = KMeans.train(trainMovies.map(lambda x: Vectors.dense(x[1])), numClusters, maxIterations=10, 
										runs=3, initializationMode="random")
	# wcss return
	wcss = movieClusterModel.computeCost(testMovies.map(lambda x: Vectors.dense(x[1])))
	return wcss

# main函数部分
sc = SparkContext("yarn-client", "K-Means Spark App")

# 加载数据集 - rating
ratingPath = "hdfs://192.168.119.141:9100/data/movie/u.data"
rating_data_raw = sc.textFile(ratingPath)
# 分隔数据集
ratings_records = rating_data_raw.map(lambda x: x.split('\t'))
# 数据 - ALS rating
ratings_data = ratings_records.map(lambda x: Rating(int(x[0]), int(x[1]), float(x[2])))
# 缓存rdd
ratings_data.cache()
# train(ratings, rank, iterations=5, lambda_=0.01, blocks=-1, nonnegative=False, seed=None)[source]
# 返回两个RDD - userFeatures / productFeatures
alsModel = ALS.train(ratings_data, 50, 10, 0.1)
movieAls = alsModel.productFeatures()
# train/test 数据准备 
trainMovies, testMovies = movieAls.randomSplit([0.8, 0.2], 17)

f = open('kmeansQuality.txt','w')
f.write("\n<br>===wcss===<br>\n")

params = [5, 10, 20, 30, 40, 50, 60]
metrics = [kmeansCluster(trainMovies, testMovies, k) for k in params]

for i in range(0, len(params)):
	f.write("\n<br>%d: %f<br>\n"%(params[i], metrics[i]))

# 关闭文件指针
f.close()

# sc关闭
sc.stop()
