import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.jblas.DoubleMatrix
//电影推荐相似度
object OfflineRecommender  extends  App {
   //1.环境初始化
   private val spark: SparkSession = Constant.initEnv()
  //2.导入隐式转换的相关包
  import  spark.implicits._

  //3.从rating表读取数据  用户ID[uid]，物品ID[mid]，偏好值[score]
  private val ratingRDD: RDD[(Int, Int, Double)] = spark.read
    .option("uri", Constant.MONGO_URL)
    .option("collection", Constant.RATING_COLLECTION)
    .format("com.mongodb.spark.sql")
    .load()
    .as[MovieRating] //用户ID[uid]，物品ID[mid]，偏好值[score] 时间戳
    .rdd
    .map(rating => (rating.uid, rating.mid, rating.score))
    .cache()//加入缓存

    //读取rating后只拿其中的 用户ID[uid]，物品ID[mid]，偏好值[score]
    //uid
    private val userRDD: RDD[Int] = ratingRDD.map(_._1).distinct()//去重
   //mid
   private val movieRDD: RDD[Int] = spark.read
     .option("uri", Constant.MONGO_URL)
     .option("collection", Constant.MOVIE_COLLECTION)
     .format("com.mongodb.spark.sql")
     .load()
     .as[Movie]
     .rdd
     .map(_.mid)
     .cache()
  //rating==>数据值 rank==>特征值[50个维度] iterations ==>迭代的次数[5]
  // lambda==>过拟合参数/正则化参数
  //用户ID[uid]，物品ID[mid]，偏好值[score]
  private val trainData: RDD[Rating] = ratingRDD.map(x => Rating(x._1, x._2, x._3))

  val(rank,iterations,lambda) =(50,5,0.1)

  //预测矩阵数据模型   用户的协同过滤算法
  private val model: MatrixFactorizationModel = ALS.train(trainData, rank, iterations, lambda)

  //电影评分矩阵：uid mid 笛卡尔
  private val userMovies: RDD[(Int, Int)] = userRDD.cartesian(movieRDD)
  //预测评分//(uid,mid,score)
  private val preRating: RDD[Rating] = model.predict(userMovies)
  //(用户AID，用户BID，相似度)
  private val userRecs: DataFrame = preRating.filter(_.rating > 0)
    //(uid,(mid,score)) ==>(user,(product,rating))
    .map(rating => (rating.user, (rating.product, rating.rating)))
    .groupByKey()
    .map {
      case (uid, recs) => UserRecs(uid, recs
        .toList.sortWith(_._2 > _._2)
        .take(10)
        .map(x => Recommendation(x._1, x._2)))
    }.toDF()
   //用户相似度
 // Constant.DataIntoMongoDB(userRecs,Constant.USER_RECS)


  /*物品的协同过滤算法---余弦相似度*/
  //(x,y)==>(mid,scores)
  val movieFeatures = model.productFeatures.map{
    case(mid,features) =>(mid,new DoubleMatrix(features))
  }



  //笛卡尔积   过滤
  private val movieRecs: DataFrame = movieFeatures.cartesian(movieFeatures)
    .filter {
      //mid 1  mid 2 过滤掉自己和自己，因为自己和自己的相似度最高
      case (a, b) => a._1 != b._1
    }.map {
    //(a,(b,simcore))
    case (a, b) => {
      val simScore = consinSim(a._2, b._2) //余弦相似度
      //(amid,(bmid,simScore))
      (a._1, (b._1, simScore))
    }
  }.filter(_._2._2 > 0.8)
    .groupByKey()
    .map {
      case (mid, recs) => MovieRecs(mid, recs
        .toList.sortWith(_._2 > _._2)
        .take(10)
        .map(x => Recommendation(x._1, x._2)))
    }.toDF()

  //物品相似度
   Constant.DataIntoMongoDB(movieRecs,Constant.MOVIE_RECS)
  //余弦相似度 公式
  def consinSim(movie1: DoubleMatrix, movie2: DoubleMatrix) = {

    movie1.dot(movie2) / (movie1.norm2()* movie2.norm2())

  }
}
