package com.xbai.spark.recall.test

import breeze.numerics.{pow, sqrt}
import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{desc, row_number}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
  * @author xbai
  * @Date 2021/1/21
  */
object RecallItemCF {

  def main(args: Array[String]): Unit = {
    // 创建 spark 环境
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("RecallItemCF")
    val spark: SparkSession = SparkSession.builder().config(conf)
      .enableHiveSupport().getOrCreate()
    spark.sql("use db_recall")
    import spark.implicits._

    /*1.导入数据并加工*/
    // 导入数据
    val userProfile: DataFrame = spark.sql("select * from user_profile")
    val userListen: DataFrame = spark.sql("select userId, musicId, cast(remainTime as double), cast(durationHour as double) from user_listen")
    userListen.show(3)
    // 计算用户听某一首歌曲的总时间
    val itemTotalTime: DataFrame = userListen.selectExpr("userId", "musicId", "remainTime")
      .groupBy("userId", "musicId")
      .sum("remainTime")
      .withColumnRenamed("sum(remainTime)", "itemTotalTime")
    // 计算用户听歌的总时长
    val totalTime: DataFrame = userListen.selectExpr("userId", "musicId", "remainTime")
      .groupBy("userId")
      .sum("remainTime")
      .withColumnRenamed("sum(remainTime)", "totalTime")
    // uid-iid-rating
    val data: DataFrame = itemTotalTime.join(totalTime, "userId")
      .selectExpr("userId", "musicId as itemId", "itemTotalTime/totalTime as rating")
    data.write.mode("overwrite").saveAsTable("rating")

    /**
      * 计算物品相似度
      */
    // 1.计算分子
    // 计算两个物品点击的用户有哪些，过滤掉相同物品，因为相同物品的用户一样，计算相似度为1，不必计算
    val dataCopy: DataFrame = data.selectExpr("userId as userId1", "itemId as itemId1", "rating as rating1")
    val productRating: DataFrame = dataCopy.join(data, dataCopy("userId1") === data("userId"))
      .filter("cast(itemId as long)!=cast(itemId1 as long)")
      .selectExpr("itemId", "itemId1", "rating*rating1 as productRating")
    // 得到分子
    val productRatingSum: DataFrame = productRating.groupBy("itemId", "itemId1")
      .sum("productRating")
      .withColumnRenamed("sum(productRating)", "productRatingSum")
    productRatingSum.show(3)

    // 2.计算分母
    val itemSqrtRatingSum: DataFrame = data.rdd.map(x => (x(1).toString, x(2).toString))
      .groupByKey()
      .mapValues(x => sqrt(x.toArray.map(rating => pow(rating.toDouble, 2)).sum))
      .toDF("itemId", "itemSqrtRatingSum")

    // 计算相似性
    val itemSqrtRatingSumCopy: DataFrame = itemSqrtRatingSum.selectExpr("itemId as itemId1", "itemSqrtRatingSum as itemSqrtRatingSum1")
    val itemSimilar: DataFrame = productRatingSum.join(itemSqrtRatingSum, "itemId")
      .join(itemSqrtRatingSumCopy, "itemId1")
      .selectExpr("itemId", "itemId1", "productRatingSum/(itemSqrtRatingSum*itemSqrtRatingSum1) as itemSimilar")

    val itemSimilarSort: Dataset[Row] = itemSimilar.sort(desc("itemId"), desc("itemSimilar"))
      .filter("itemSimilar > 0")
    itemSimilarSort.write.mode("overwrite").saveAsTable("itemSimilar")
    val dataCache: DataFrame = spark.sql("select * from rating")
    val itemSimilarCache: DataFrame = spark.sql("select * from itemSimilar")
    // 此处对每个用户进行统计，其最喜欢的topn个音乐，当然如果有时间可以基于时间筛选，进行加权
    val userLikeItem: DataFrame = dataCache.withColumn("rank", row_number().over(Window.partitionBy("itemId")
      .orderBy($"rating".desc)))
      .filter(s"rank <= 5").drop("rank")
    userLikeItem.show(10)

    val recommandList: DataFrame = userLikeItem.join(itemSimilarCache, "itemId")
      .filter("itemId<>itemId1")
      .selectExpr("userId", "itemId1 as recallItem", "rating*itemSimilar as recallWeight")
      .withColumn("rank", row_number().over(Window.partitionBy("userId").orderBy($"recallWeight".desc)))
      .filter(s"rank<=20").drop("rank")
    recommandList.show(10)
  }
}
