package com.cw.recommend.contentBased

import com.cw.recommend.common.Runner.{RunnerConfig, runSpark}
import com.cw.recommend.common.constant.{CONTENT_BASED_PRODUCT_SIM_LIST, PRODUCT_COLLECTION, PRODUCT_SIM_LIST}
import com.cw.recommend.common.feature.computeFeatureSimilarity
import com.cw.recommend.common.util.MongoDBUtil.readMongoDB
import com.cw.recommend.common.model._
import com.cw.recommend.common.util.SyntaxUtil.Syntax
import org.apache.spark.ml.feature._
import org.apache.spark.ml.linalg.SparseVector

object ContentBased {

  def main(args: Array[String]): Unit = {

    implicit  val config = RunnerConfig("ContentBased")

    runSpark { spark =>
      import spark.implicits._

      val productTagsDF = readMongoDB(spark, PRODUCT_COLLECTION)
        .as[Product]
        .map(p => (p.productId, p.name, p.tags.replaceAll("\\|", " ")))
        .toDF("productId","name", "raw_tags")

      // 文本分词
      val tokenizer = new Tokenizer().setInputCol("raw_tags").setOutputCol("tags")
      val tagsDF = tokenizer.transform(productTagsDF)
//      println(tagsDF.take(1).toSeq)

      // 分词转稀疏向量
      val hashingTF = new HashingTF().setInputCol("tags").setOutputCol("rawFeatures").setNumFeatures(500)
      val sparseVecDF = hashingTF.transform(tagsDF)
//      sparseVecDF.show(false)

      // 加权特征向量
      val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
      val idfModel = idf.fit(sparseVecDF)
      val featuredDF = idfModel.transform(sparseVecDF)
//      featuredDF.show(false)

      val productFeatures = featuredDF.map(row => row.getAs[Int]("productId") -> row.getAs[SparseVector]("features").toArray).rdd
      val productSimList = productFeatures
        .cartesian(productFeatures)
        .filter { case (a, b) => a._1 != b._1 } |>
        (computeFeatureSimilarity(spark, _))

      productSimList.show()
      import com.cw.recommend.common.util.MongoDBUtil._
      productSimList.sinkMongoDB(CONTENT_BASED_PRODUCT_SIM_LIST)




    }











  }



}
