package com.xxh.user.rec

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config._
import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jblas.DoubleMatrix

//电影相似推荐
case class MovieSimiarRec(mid: Int, rec: Seq[Recommendation])

case class Recommendation(mid: Int, score: Double)

//基于评分为用户推荐电影
case class UserRecommendation(uid: Int, rec: Seq[Recommendation])



//基于模型计算推荐。
object offlineRec {

  //  必须先指定一个数据表
  val MONGODB_URI = "mongodb://localhost:27017/movierec.movie"

  def main(args: Array[String]): Unit = {

    //创建sparkConfig
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("OfflineRecommend")
      .set("spark.mongodb.input.uri", MONGODB_URI);

    //创建SparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._

    val ratingDf = readDataFrameFromMongo("new_rating", spark).cache()


    val allUserRDD: RDD[Int] = ratingDf.rdd.filter(row => {
      row.getAs[Int]("score") > 0
    }).map(r => {
      r.getAs[Int]("uid")
    }).distinct()

    val allMovieRDD: RDD[Int] = ratingDf.rdd.filter(row => {
      row.getAs[Int]("score") > 0
    }).map(r => {
      r.getAs[Int]("mid")
    }).distinct()

    //用户、电影评分空矩阵（未写入）
    val userMovies = allUserRDD.cartesian(allMovieRDD)


    val data: RDD[Rating] = ratingDf.limit(10000).map(row => {
      Rating(row.getAs[Int]("uid"), row.getAs[Int]("mid"), row.getAs[Int]("score"))
    }).rdd

    /**
     * rank:特征维度
     * iterNum：迭代次数
     * lambda：正则化系数，默认是0.01
     */
    val (rank, iterNum, lambda) = (10, 5, 0.01)
    val model = ALS.train(data, rank, iterNum, lambda)

    //参数说明：
    //用户对电影的兴趣程度
    val pred: RDD[Rating] = model.predict(userMovies)
    pred.toDF().show(10)


    //过滤筛选推荐top20 为用户进行个性化推荐
    val userRec: RDD[UserRecommendation] = pred
      .filter(_.rating > 0)
      .map(row => {
        (row.user, (row.product, row.rating))
      })
      .groupByKey()
      .map {
        case (uid, rec) => {
          UserRecommendation(uid, rec.toList.sortWith(_._2 > _._2).take(20).map(x => {
            Recommendation(x._1, x._2)
          }))
        }
      }
    userRec.toDF().show(10)

    SaveToDB(userRec.toDF(), "user_movie_rec")


    //根据ALS生成的物品特征
    val movieFeatures: RDD[(Int, DoubleMatrix)] = model.productFeatures.map {
      case (mid, features) => {
        (mid, new DoubleMatrix(features))
      }
    }


    //电影进行相似度计算,进行相似推荐
    val movieSimarRec: RDD[MovieSimiarRec] = movieFeatures.cartesian(movieFeatures)
      .filter {
        case (a, b) => a._1 != b._1
      }
      .map {
        case (a, b) => {
          val simScore = this.consinSim(a._2, b._2)
          (a._1, (b._1, simScore))
        }
      }
      .filter(_._2._2 > 0.6) //选择相似度大于0.6的电影
      .groupByKey()
      .map {
        case (mid, rec) => {
          MovieSimiarRec(mid, rec.toList.sortWith(_._2 > _._2).take(20).map(x => {
            Recommendation(x._1, x._2)
          }))
        }
      }


//    SaveToDB(movieSimarRec.toDF(), "movie_simar_rec")

  }

  //计算余弦相似度
  def consinSim(matrix1: DoubleMatrix, matrix2: DoubleMatrix): Double = {
    matrix1.dot(matrix2) / (matrix1.norm2() * matrix2.norm2())
  }

  //把dataFrame保存到DB
  def SaveToDB(df: DataFrame, collectionName: String): Unit = {
    df.write
      .option("uri", "mongodb://localhost:27017/movierec")
      .option("collection", collectionName)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()
  }

  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }




}
