package com.xxh.user.rec

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.spark.SparkConf
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.ml.linalg.SparseVector
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jblas.DoubleMatrix


case class Recommendation(mid: Int, score: Double)

case class Movie(mid: Int, mName: String, issue: String, types: String)
//电影相似推荐
case class MovieSimiarRec(mid: Int, rec: Seq[Recommendation])


object ContentRec {
  val MONGODB_URI = "mongodb://localhost:27017/movierec.movie"

  def main(args: Array[String]): Unit = {
    //创建sparkConfig
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("ContentRec")
      .set("spark.mongodb.input.uri", MONGODB_URI);


    import org.apache.spark.sql.functions._
    //创建SparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate();

    import spark.implicits._
    val movieDf = readDataFrameFromMongo("movie", spark)

    val newMovieDf: DataFrame = movieDf.withColumn("types", regexp_replace(col("types"), "\\|", " "))

    newMovieDf.show(10)
    val tk = new Tokenizer().setInputCol("types").setOutputCol("type")

    val frame: DataFrame = tk.transform(newMovieDf)
    frame.show(10)

    val hf: HashingTF = new HashingTF().setInputCol("type").setOutputCol("rawFeatures").setNumFeatures(50)
    val featureRdd: DataFrame = hf.transform(frame)

    //结果是一个稀疏向量
    //参数1 :向量长度
    //参数2：有值的位置
    //参数2：具体的值
    featureRdd.show(50)

    //对热门标签惩罚
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")

    val model: IDFModel = idf.fit(featureRdd)

    val resultFrame: DataFrame = model.transform(featureRdd)

    resultFrame.show(50)

    //稀疏向量转为(mid,matrix)的形式,方便计算相似度
    val movieFeatures: RDD[(Int, DoubleMatrix)] = resultFrame.rdd
      .map(row =>
        (row.getAs[Int]("mid"), row.getAs[SparseVector]("features").toArray)
      ).map(x => (x._1, new DoubleMatrix(x._2)))


    //相似度计算，过滤结果存入业务数据库
    //电影进行相似度计算,进行相似推荐
    val movieSimarRec: RDD[MovieSimiarRec] = movieFeatures.cartesian(movieFeatures)
      .filter {
        case (a, b) => a._1 != b._1
      }
      .map {
        case (a, b) => {
          val simScore = this.consinSim(a._2, b._2)
          (a._1, (b._1, simScore))
        }
      }
      .filter(_._2._2 > 0.6) //选择相似度大于0.6的电影
      .groupByKey()
      .map {
        case (mid, rec) => {
          MovieSimiarRec(mid, rec.toList.sortWith(_._2 > _._2).take(20).map(x => {
            Recommendation(x._1, x._2)
          }))
        }
      }
    SaveToDB(movieSimarRec.toDF(), "movie_content_rec")


  }

  //计算余弦相似度
  def consinSim(matrix1: DoubleMatrix, matrix2: DoubleMatrix): Double = {
    matrix1.dot(matrix2) / (matrix1.norm2() * matrix2.norm2())
  }


  //把dataFrame保存到DB
  def SaveToDB(df: DataFrame, collectionName: String): Unit = {
    df.write
      .option("uri", "mongodb://localhost:27017/movierec")
      .option("collection", collectionName)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()
  }

  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }


}