package cn.edu.recommender

import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.ml.linalg.SparseVector
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jblas.DoubleMatrix

import java.lang.Thread.sleep

case class Product(productId: Int, name: String, imageUrl: String, categories: String, tags: String)

object SameTagRecommender {

  // in
  val MONGODB_PRODUCT_COLLECTION = "Product"
  // out
  val Same_Tag_PRODUCT_RECS = "SameTagProductRecs"

  def main(args: Array[String]): Unit = {

    // 定义配置
    val config = Map(
      "spark.cores" -> "local[*]",
      "mongo.uri" -> "mongodb://localhost:27017/recommender",
      "mongo.db" -> "recommender"
    )

    val mongoConfig = MongoConfig(config("mongo.uri"), config("mongo.db"))

    // 创建一个 SparkSession
    val spark = SparkSession.builder().appName("SameTagRecommender").master(config("spark.cores")).getOrCreate()

    // 在对 DataFrame 和 Dataset 进行操作许多操作都需要这个包进行支持
    import spark.implicits._

    //数据加载进来
    val productTagsDF = spark
      .read
      .option("uri", mongoConfig.uri)
      .option("collection", MONGODB_PRODUCT_COLLECTION)
      .format("com.mongodb.spark.sql")
      .load()
      .as[Product]
      .map(x => (x.productId, x.name, x.tags.map(c => if (c == '|') ' ' else c)))
      .toDF("productId", "name", "tags")
      .cache()

    // 实例化一个分词器，默认按空格分
    val tokenizer = new Tokenizer().setInputCol("tags").setOutputCol("words")

    // 用分词器做转换
    val productTagsDF2 = tokenizer.transform(productTagsDF)

    // 定义一个 HashingTF 工具
    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(200)

    // 用 HashingTF 做处理
    val productTagsDF3 = hashingTF.transform(productTagsDF2)

    // 定义一个 IDF 工具
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")

    // 将词频数据传入，得到 idf 模型（统计文档）
    val idfModel = idf.fit(productTagsDF3)

    // 用 tf-idf 算法得到新的特征矩阵
    val productTagsDF4 = idfModel.transform(productTagsDF3)

    // 提取特征向量
    val productFeaturesRDD = productTagsDF4
      .map(row => (row.getAs[Int]("productId"), row.getAs[SparseVector]("features").toArray))
      .rdd  // DoubleMatrix 需要基于 RDD
      .map{ case (productId, features) => (productId, new DoubleMatrix(features)) }


    // 计算相似标签推荐
    val productRecsDS = productFeaturesRDD.cartesian(productFeaturesRDD)
      .filter {
        case (a, b) => a._1 != b._1
      }
      .map {
        case (a, b) =>
          val simScore = this.consinSim(a._2, b._2) // 求余弦相似度
          (a._1, (b._1, simScore))
      }
      .filter(_._2._2 > 0.6)
      .groupByKey()
      .map {
        case (productId, items) => ProductRecs(productId, items.toList.map(x => Recommendation(x._1, x._2)))
      }
      .toDS()

    productRecsDS
      .write
      .option("uri", mongoConfig.uri)
      .option("collection", Same_Tag_PRODUCT_RECS)
      .mode("overwrite")
      .format("com.mongodb.spark.sql")
      .save()

    sleep(300000)
    // 关闭 Spark
    spark.stop()
  }

  //计算两个商品之间的余弦相似度
  def consinSim(product1: DoubleMatrix, product2:DoubleMatrix): Double = {
    product1.dot(product2) / ( product1.norm2() * product2.norm2() )
  }
}
