package com.xbai.spark.recall.engine.training

import breeze.numerics.{pow, sqrt}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * user-item-rating
  * 用户之间相同item评分乘积的和做分子
  * 用户评分平方和的开方乘积做分母
  * (x1*y1+x2*y2......)/|X|*|Y|
  * |X| = sqrt(x1^2 + x2^2 + x3^2)
  * |Y| = sqrt(y1^2 + y2^2 + y3^2)
  *
  * @author xbai
  * @Date 2021/1/13
  */
class UserCF {

  /**
    * 计算每个用户评分平方和的开方，即
    * |X| = sqrt(x1^2 + x2^2 + x3&#94;2)
    *
    * @param userItemRating userId-itemId-rating
    * @param spark          spark环境
    * @return userId-sqrtRatingSum: user-rating 评分的模
    */
  def getUserSqrtRatingSum(userItemRating: DataFrame, spark: SparkSession): DataFrame = {
    import spark.implicits._
    val rddMap: RDD[(String, Double)] = userItemRating.rdd.map(x => (x(0).toString, x(2).toString))
      .groupByKey()
      .mapValues(x => sqrt(x.toArray.map(rating => pow(rating.toDouble, 2)).sum))
    val userRating: DataFrame = rddMap.toDF("userId", "sqrtRatingSum")
    println("========== userSqrtRatingSum ==========")
    userRating.show(3)
    userRating
  }

  /**
    * 计算两两用户之间相同item评分乘积的和，即
    * (x1*y1+x2*y2......)
    *
    * @param userItemRating userId-itemId-rating
    * @param spark          spark环境
    * @return userId-userId1-ratingSumPro
    */
  def getUserSameItemProductSum(userItemRating: DataFrame, spark: SparkSession): DataFrame = {
    import org.apache.spark.sql.functions.monotonically_increasing_id
    val data: DataFrame = userItemRating.withColumn("id", monotonically_increasing_id())
    val dataCopy: DataFrame = data.selectExpr("userId as userId1", "itemId as itemId1", "rating as rating1", "id as id1")

    // 将含有相同item的用户组合
    val selectData: DataFrame = dataCopy.join(data, dataCopy("itemId1") === data("itemId"))
      .filter("cast(id as long) != cast(id1 as long)")
      .selectExpr("userId", "userId1", "rating", "rating1")
    val userData: DataFrame = selectData.selectExpr("userId", "userId1", "rating*rating1 as ratingProduct")
    val userRatingSum: DataFrame = userData.groupBy("userId", "userId1")
      .sum("ratingProduct")
      .withColumnRenamed("sum(ratingProduct)", "ratingSumPro")

    println("========== userSameItemProductSum ==========")
    userRatingSum.show(3)
    userRatingSum
  }

  /**
    * 计算用户的相似度
    *
    * @param userRating         userId-sqrtRatingSum
    * @param userSameItemRating userId-userId1-ratingSumPro
    * @param spark              spark环境
    * @return userId-userId1-usersim
    */
  def userSimilarity(userRating: DataFrame, userSameItemRating: DataFrame, spark: SparkSession): DataFrame = {
    val dataCopy: DataFrame = userRating.selectExpr("userId as userId1", "sqrtRatingSum as sqrtRatingSum1")
    val middleData: DataFrame = userSameItemRating.join(userRating, "userId").join(dataCopy, "userId1")
    val userSimilar: DataFrame = middleData.selectExpr("userId", "userId1", "ratingSumPro/(sqrtRatingSum*sqrtRatingSum1) as usersim")

    println("========== userSimilar ==========")
    userSimilar.show(3)
    userSimilar
  }

}
