package cb_rec

import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.ml.linalg.{SparseVector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.codehaus.jackson.map.ObjectMapper

/**
  * Created by hunter.coder 涛哥  
  * 2019/5/3 13:55
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description:  基于ITEM的内容相似度CB算法
  **/
object ItemCB {

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)


    val spark = SparkSession.builder().appName("item-cb").master("local").getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    /**
      * 构建item-profile
      */
    val df = spark.read.option("header", "true").csv("G:\\data_shark\\doit_recommender\\src\\main\\scala\\cb_rec\\item.profile.dat")
    val raw = df.select('pid, concat_ws(" ", 'cat1, 'cat2, 'cat3, 'kwds).as("kwds"))


    val tokenizer = new Tokenizer().setInputCol("kwds").setOutputCol("wds")
    val wdf = tokenizer.transform(raw)
    wdf.show(10, false)

    val tF = new HashingTF().setInputCol("wds").setOutputCol("wd_tf").setNumFeatures(10000)
    val tfdf = tF.transform(wdf)
    val idf = new IDF().setInputCol("wd_tf").setOutputCol("tf_idf")
    val idfModule = idf.fit(tfdf)
    val tfIdf: DataFrame = idfModule.transform(tfdf)
    val items = tfIdf.select("pid", "tf_idf")
    val ip = items.crossJoin(items.select('pid.as("o_pid"), 'tf_idf.as("o_tfidf"))).filter("pid<o_pid")
      /*.filter("pid != o_pid")*/
      .rdd
      .map(row => {
        val pid1 = row.getAs[String]("pid")
        val pid2 = row.getAs[String]("o_pid")
        val v1 = row.getAs[SparseVector]("tf_idf")
        val v2 = row.getAs[SparseVector]("o_tfidf")
        // 欧几里得算法
        val dist = (1 / (1 + Math.sqrt(Vectors.sqdist(v1, v2))))
        // 余弦相似度算法
        val dist2 = DistUtil.cosineDist(v1, v2)


        (pid1, pid2, dist, dist2)
      })
      .toDF("pid1", "pid2", "simi1", "simi2")
    println("item profile .........")
    ip.show(20, false)


    /**
      * 构建user-profile矩阵
      *
      */

    val ud = spark.read.textFile("G:\\data_shark\\doit_recommender\\src\\main\\scala\\cb_rec\\u.profile.dat")

    val ui_rate = ud.map(js => {
      val om = new ObjectMapper()
      val node = om.readTree(js)

      val gid = node.get("gid").getTextValue
      val tp = node.get("logtype").getTextValue
      val pid = node.get("event").get("pid").getTextValue
      var score: Double = 0

      tp match {
        case "pv" => score += 1
        case "add_cart" => score += 2
        case "rate" => {
          val rateScore = node.get("event").get("score").getDoubleValue
          score += (rateScore - 2.5)
        }
      }
      (gid, pid, score)
    }).toDF("gid", "pid", "score")

    val up = ui_rate.groupBy("gid", "pid").agg(sum('score).as("score"))
    println("user profile ...........")
    up.show(10, false)


    val rec_tmp = up.join(ip, 'pid === 'pid1)
      .selectExpr("gid", "pid", "pid2", "score", "score * simi2 as simi")
      .orderBy("gid", "pid", "simi")

    rec_tmp.show(10, false)

    rec_tmp.select("gid","pid2","simi")
      .groupBy("gid","pid2")
      .agg(sum('simi) as "simi")
      .createTempView("rec_tmp")

    spark.sql(
      """
        |select
        |gid,pid2,simi,
        |row_number() over(partition by gid order by simi desc) as rn
        |
        |from rec_tmp
        |
      """.stripMargin).where("rn<=2").show(10,false)

    spark.close()

  }

}
