from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql.types import StructType, StructField, ArrayType, DoubleType, IntegerType
from pyspark.sql.functions import to_json, col

AVERAGE = 2.5
TOP_USER = 3
TOP_MOVIE = 10
HADOOP_SERVER = r'namenode.fredyvia.asia:9000'
INPUT_PATH = r'/ratings.csv'
OUTPUT_PATH = r'/ucf'
spark = SparkSession.builder \
    .getOrCreate()

source_file = r'hdfs://{}{}'.format(HADOOP_SERVER,INPUT_PATH)
res_file = r'hdfs://{}{}'.format(HADOOP_SERVER,OUTPUT_PATH)

df = spark.read.csv(source_file, header=True, schema=StructType([
    StructField(
        'userId', IntegerType(), True),
    StructField(
        'movieId', IntegerType(), True),
    StructField('rating', DoubleType(), True)]))

df.printSchema()

u_like = df.filter(df.rating > AVERAGE)
u_like.show()
u_like_array = u_like.select('userId', 'movieId').groupBy(
    'userId').agg(F.collect_set('movieId').alias('movieIds'))

u_i_union_similarity_column_struct = StructType([
    StructField('userId1', IntegerType(), False),
    StructField('userId2', IntegerType(), False),
    StructField('similarity', DoubleType(), False),
    StructField('recom', ArrayType(IntegerType()), False)
])

u_i_union_similarity = spark.createDataFrame(
    spark.sparkContext.emptyRDD(), u_i_union_similarity_column_struct)
count = 0
for row1 in u_like_array.rdd.collect():
  for row2 in u_like_array.rdd.collect():
    if(row1['userId'] >= row2['userId']):
      continue
    set1 = set(row1['movieIds'])
    set2 = set(row2['movieIds'])
    inter = set1 & set2
    union = set1 | set2
    if(len(inter) != 0):
      u_i_union_similarity = u_i_union_similarity.union(
          spark.createDataFrame(
              data=[({'userId1': row1['userId'], 'userId2':row2['userId'], 'similarity': len(inter)/len(union), 'recom': list(set2-set1)}),
                    ({'userId1': row2['userId'], 'userId2':row1['userId'], 'similarity': len(inter)/len(union), 'recom': list(set1-set2)}), ], schema=u_i_union_similarity_column_struct)
      )
      count += 1
      if(count >= 20):
        break
  if(count >= 20):
    break
u_i_union_similarity.show()

u_m_column_struct = StructType([
    StructField('userId', IntegerType(), False),
    StructField('movieId', IntegerType(), False),
    StructField('recom', DoubleType(), False),
])
u_m_recommend = spark.createDataFrame(
    spark.sparkContext.emptyRDD(), u_m_column_struct)

for row in u_i_union_similarity.rdd.collect():
  for movieId in row['recom']:
    print(movieId)
    rating = df.filter(
        (df.userId == row['userId2']) & (df.movieId == movieId)).first()['rating']
    u_m_recommend = u_m_recommend.union(
        spark.createDataFrame(
            data=[({'userId': row['userId1'], 'movieId':movieId, 'recom':rating * row['similarity']})], schema=u_m_column_struct)
    )
u_m_recommend.show()

res = u_m_recommend.groupBy('userId').agg(F.collect_list(F.create_map(F.col('movieId'),
                                                                       F.col('recom'))).alias('recom'))
res.show()
res = res.withColumn(
    "recom", to_json(col("recom"))).orderBy("userId")
res.coalesce(1).write.mode('overwrite').option('header','true').csv(res_file)