package com.etc

import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.sql.SparkSession

/**
* Movie数据集 数据集字段进行分割
*
* 1^                           电影ID
* Toy Story (1995)^            电影的名字
* In the highlands ....^       电影的描述
* 85^                          电影的时长
* August 26, 1997^              电影的发行日期
* 1995^                         电影的拍摄日期
* English ^                     电影的语言
* Action|Drama|Romance|War ^    电影的类型
* Liam Neeson|Jessica Lange...  电影的演员
* Michael Caton-Jones           电影的导演
* tag1|tag2|tag3|....           电影的Tag
*/
case class Movie(val mid: Int, val name: String, val descri: String, val timelong: String, val issue: String,
                 val shoot: String, val language: String, val genres: String, val actors: String, val directives: String)

/**
  * Rating数据集：用户对电影的评分数据集
  * 1                    用户ID
  * 31                   电影ID
  * 2.5                  用户对于电影的评分
  * 1260759144           用户对于电影的评分的时间
  */
case class MovieRating(val uid: Int, val mid: Int, val score: Double, val timestamp: Int)


/**
  * Mongodb的连接配置
  *
  * @param url
  * @param db
  */
case class MongodbConfig(val url: String, val db: String)


/**
  * 推荐类
  * @param rid   电影id
  * @param r     电影评分
  */
case class Recommendation(rid:Int, r:Double)

/**
  * 用户的推荐
  * @param uid 用户id
  * @param recs  要推荐的内容
  */
case class UserRecs(uid:Int, recs:Seq[Recommendation])



/**
  * 离线推荐
  */
object Test {
  val MONGODB_MOVIE_COLLECTION = "Movie"
  val MONGODB_RATING_COLLECTION = "Rating"

  //用户推荐表

  val USER_RECS = "user_recs"

  def main(args: Array[String]): Unit = {

    val config = Map(
      "spark.cores" -> "local[2]",
      "spark.name" -> "Test",
      "mongodb.url" -> "mongodb://localhost:27017/movies",
      "mongodb.db" -> "movies"
    )

    //需要配置一个SparkConf配置
    val conf = new SparkConf().setMaster(config("spark.cores")).setAppName(config("spark.name"))

    //set("spark.executor.memory","2G").set("spark.diver.memory","2G")

    val spark = SparkSession.builder().config(conf).getOrCreate()

    //连接mongodb数据库
    val mongodbconf = MongodbConfig(config("mongodb.url"),config("mongodb.db"))

    import spark.implicits._

    val ratingRDD =spark
      .read
      .option("uri",mongodbconf.url)
      .option("collection",MONGODB_RATING_COLLECTION)
      .format("com.mongodb.spark.sql")
      .load()
      .as[MovieRating]
      .rdd
      .map(rating => (rating.uid,rating.mid,rating.score) )

    //用户的数据集
    val userRDD = ratingRDD.map(_._1).distinct()

    //电影的数据集 RDD  Int
    val movieRDD =spark
      .read
      .option("uri",mongodbconf.url)
      .option("collection",MONGODB_MOVIE_COLLECTION)
      .format("com.mongodb.spark.sql")
      .load()
      .as[Movie]
      .rdd
      .map(_.mid)

    //创建连接数据库
   val trainData = ratingRDD.map(x => Rating(x._1,x._2,x._3))

    val (rank,iterations,lambda) = (50,5,0.1)

    //训练模型
    val model = ALS.train(trainData,rank,iterations,lambda)

    //计算用户推荐矩阵

    //需要构造一个推荐userProduct  RDD(int ,int)
    val usermovies = userRDD.cartesian(movieRDD)

    val perRating = model.predict(usermovies)

    val userRecs = perRating
      .filter(_.rating > 0)
      .map(rating => {
        (rating.user,(rating.product,rating.rating))
      }).groupByKey()
      .map{
        case (uid ,recs) => UserRecs(uid,recs.toList.sortWith(_._2 >_._2).take(10).map(x => Recommendation(x._1,x._2)))
      }
      .toDF()

    //检查有没有这个表，如果有就删
//    mongodbconf(mongodbconf.db)("MONGODB_RATING_COLLECTION")
    userRecs
    .write
    .option("uri",mongodbconf.url)
    .option("collection",MONGODB_RATING_COLLECTION)
    .mode("overwrite")
    .format("com.mongodb.spark.sql")
    .save()

   spark.close()
  }
}
