package com.mininglamp.itemcf

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
  * Project: ECommerceRecommendSystem
  * Package: com.mininglamp.itemcf
  * Description:基于itemcf协同过滤推荐
  *
  * Created by ZhouPeng on 2022/01/11 17:56
  **/
object ItemCFRecommender {

  val MONGODB_RATING_COLLECTION = "Rating"

  //定义基于itemCF推荐的结果集写入mongo集合名
  val ITEM_CF_PRODUCTS_RECS = "ItemCFProductRecs"
  //定义相似商品列表的长度
  val MAX_RECOMMENDATION_LENGTH = 10

  def main(args: Array[String]): Unit = {
    //基本配置
    val config = Map(
      "spark.cores" -> "local[*]",
      "mongo.uri" -> "mongodb://master:27017/recommender",
      "mongo.db" -> "recommender"
    )

    val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("ItemCFRecommender")
    val sparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import sparkSession.implicits._
    implicit val mongoConfig = MongoConfig(config("mongo.uri"), config("mongo.db"))

    val readConfig = ReadConfig(Map(
      "uri" -> mongoConfig.mongoUri,
      "collection" -> MONGODB_RATING_COLLECTION
    ))

    //加载rating数据
    val ratingDF = MongoSpark.load(sparkSession.sparkContext, readConfig).map(
      document => {
        (document.getInteger("userId"), document.getInteger("productId"))
      }
    ).toDF("userId", "productId")
      .cache()

    //todo 核心算法，计算同现相似度，得到商品的相似列表
    //1. 计算每个商品被评分的次数。用商品id做groupby
    val ratingCountDF = ratingDF.groupBy("productId").count()

    //2. 在原评分表上添加每个商品的评分次数字段
    val ratingDFWithCount = ratingDF.join(ratingCountDF, "productId")

    //3. 统计两个商品被同一个用户评分过的次数
    val joinDF = ratingDFWithCount.join(ratingDFWithCount, "userId")
      .toDF("userId", "product1", "count1", "product2", "count2")
    //创建临时表
    joinDF.createOrReplaceTempView("joined")

    //将product1,product2做groupby
    val cooccurrenceDF = sparkSession.sql(
      """
        |select product1,product2,
        |count(userId) as cocount,
        |first(count1) as count1,
        |first(count2) as count2
        |from joined
        |group by product1,product2
      """.stripMargin).cache()

    val itemCFDF = cooccurrenceDF.map(
      row => {
        val coocSim = cooccurrenceSim(row.getAs[Long]("cocount"), row.getAs[Long]("count1"), row.getAs[Long]("count2"))
        (row.getAs[Int]("product1"), (row.getAs[Int]("product2"), coocSim))
      }
    ).rdd
      .groupByKey()
      .map {
        case (productId, resc) => {
          ProductRecs(productId, resc.filter(x => x._1 != productId).toList
            .take(MAX_RECOMMENDATION_LENGTH).sortWith(_._2 > _._2)
            .map(x => Recommendation(x._1, x._2)))
        }
      }.toDF()

    //结果写入mongo
    itemCFDF.write
      .option("uri", mongoConfig.mongoUri)
      .option("collection", ITEM_CF_PRODUCTS_RECS)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()

    sparkSession.stop()
  }

  /**
    * 计算两个商品的同现相似度
    *
    * @param cocount
    * @param count1
    * @param count2
    * @return
    */
  def cooccurrenceSim(cocount: Long, count1: Long, count2: Long): Double = {
    cocount / math.sqrt(count1 * count2)
  }
}

/**
  * 标准推荐对象样例类
  *
  * @param productId
  * @param score
  */
case class Recommendation(productId: Int, score: Double)


/**
  * 商品相似度列表样例类
  *
  * @param productId
  * @param recs
  */
case class ProductRecs(productId: Int, recs: Seq[Recommendation])


/**
  * mongodb配置样例类
  *
  * @param mongoUri mongo连接uri
  * @param mongodb  mongo连接数据库
  */
case class MongoConfig(mongoUri: String, mongodb: String)

/**
  * ProductRating样例类
  * 4867
  * 457976
  * 5.0
  * 1395676800
  */
case class Rating(userId: Int, productId: Int, score: Double, timestamp: Long)

