package com.cw.recommend.itemCF

import com.cw.recommend.common.Runner.{RunnerConfig, runSpark}
import com.cw.recommend.common.constant.{ITEM_CF_RECOMMEND_LIST, RATING_COLLECTION, RECOMMEND_NUM}
import com.cw.recommend.common.model.{ProductSimItem, ProductSimList}
import com.cw.recommend.common.util.MongoDBUtil.readMongoDB

object ItemCF {

  def main(args: Array[String]): Unit = {


    implicit val conf = RunnerConfig("itemCF")

    runSpark { spark =>
      import spark.implicits._

      val ratingDF = readMongoDB(spark, RATING_COLLECTION).select("userId", "productId")
      //      ratingDF.show()

      // 查询每个物品的评分数量
      val productRatingCountDF = ratingDF.groupBy("productId").count() // |productId|count|
      val ratingWithCountDF = ratingDF.join(productRatingCountDF, "productId")

      // 将用户评分过的物品笛卡尔积
      val joinDF = ratingWithCountDF.join(ratingWithCountDF, "userId")
        .toDF("userId", "productId1", "count1", "productId2", "count2")
        .filter("productId1 != productId2")
      joinDF.createTempView("joined")
      //      joinDF.show()

      // 计算两个物品被相同用户评分的次数
      val coOccurDF = spark.sql(
        """
select
  productId1,
  first(count1) count1,
  productId2,
  first(count2) count2,
  count(productId1, productId2) as co_occur
from joined
group by productId1, productId2
        """).as[(Int, Long, Int, Long, Long)]

      val recommendList = coOccurDF.rdd
        .map { case (product1, count1, product2, count2, coOccur) =>
          (product1, ProductSimItem(product2, coOccur / math.sqrt(count1 * count2))) // 同现相似度
        }.groupByKey()
        .map { case (productId, items) =>
          ProductSimList(productId, items.toSeq.sortWith(_.sim > _.sim).take(RECOMMEND_NUM))
        }.toDF

      import com.cw.recommend.common.util.MongoDBUtil._
      recommendList.sinkMongoDB(ITEM_CF_RECOMMEND_LIST)


    }


  }


}
