package com.xxh.user.rec

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.spark.SparkConf
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel}
import org.apache.spark.ml.linalg.SparseVector
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jblas.DoubleMatrix


//存放格式
case class VideoRecScore(vid: String, score: Double)

//存入数据库的最终格式
case class VideoSimilarlyRec(vid: String, rec: Seq[VideoRecScore])


object VideoContentRec {

  val MONGODB_URI = "mongodb://localhost:27017/movierec.movie"

  def main(args: Array[String]): Unit = {

    //创建sparkConfig
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("ContentRec")
      .set("spark.mongodb.input.uri", MONGODB_URI);

    //创建SparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate();


    val videoDf = readDataFrameFromMongo("video", spark)

    //自带的库，提取出标签的特征向量
    val hf: HashingTF = new HashingTF().setInputCol("tags").setOutputCol("rawFeatures").setNumFeatures(50)

    val featureRdd: DataFrame = hf.transform(videoDf.limit(100))

    featureRdd.show(50)

    //利用TF-IDF对热门标签惩罚

    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")

    val model: IDFModel = idf.fit(featureRdd)

    val resultFrame: DataFrame = model.transform(featureRdd)

//    resultFrame.show(50)

    // 只要 id , 矩阵 两列
    val videoFeature: RDD[(String, DoubleMatrix)] = resultFrame.rdd
      .map(row =>
        (row.getAs[String]("bv"), row.getAs[SparseVector]("features").toArray)
      )
      .map(x => (x._1, new DoubleMatrix(x._2)))


    val videoSimiarlyRdd: RDD[VideoSimilarlyRec] = videoFeature.cartesian(videoFeature)
      //自己不要
      .filter {
        case (a, b) => a._1 != b._1
      }
      .map {
        case (a, b) => {
          val simScore = this.consinSim(a._2, b._2)
          (a._1, (b._1, simScore))
        }
      }
      .filter(_._2._2 > 0.6) //选择相似度大于0.6的视频
      .groupByKey()
      .map {
        case (vid, scores) => {
          VideoSimilarlyRec(vid = vid, rec = scores.toList.sortWith(_._2 > _._2).take(5000).map(x => {
            VideoRecScore(x._1, x._2)
          }))
        }
      }


    import spark.implicits._
    videoSimiarlyRdd.toDF().show(20);


  }

  //把dataFrame保存到DB
  def SaveToDB(df: DataFrame, collectionName: String): Unit = {
    df.write
      .option("uri", "mongodb://localhost:27017/movierec")
      .option("collection", collectionName)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()
  }

  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }

  //计算余弦相似度
  def consinSim(matrix1: DoubleMatrix, matrix2: DoubleMatrix): Double = {
    matrix1.dot(matrix2) / (matrix1.norm2() * matrix2.norm2())
  }


}
