package com.wtw.offline

import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jblas.DoubleMatrix

case class MovieRating(uid: Int, mid: Int, score: Double, timestamp: Int)

//基于预测评分的用户推荐列表
case class UserRecs(uid: Int, recs: String)

//基于LFM的电影相似度列表
case class MovieRecs(mid: Int, recs: String)

case class MysqlConfig(url: String, user: String, password: String, driver: String)

object OfflineRecommender {

  def consinSim(movie1: DoubleMatrix, movie2:DoubleMatrix) : Double ={
    movie1.dot(movie2) / ( movie1.norm2()  * movie2.norm2() )
  }


  val MYSQL_MOVIE_COLLECTION = "Movie"
  val MYSQL_RATING_COLLECTION = "Rating"

  val USER_RESCS = "UserRecs"
  val MOVIE_RESCS = "MovieRecs"
  val USER_MAX_RECOMMENDATION = 20

  def storeDFInMysql(df: DataFrame, collections_name: String)(implicit mysqlConfig: MysqlConfig) = {
    df.write.mode("overwrite")
      .format("jdbc")
      .option("driver", mysqlConfig.driver)
      .option("url", mysqlConfig.url)
      .option("dbtable", collections_name) //表名
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .save()
  }

  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.url" -> "jdbc:mysql://localhost:3306/recommend",
      "mysql.user" -> "root",
      "mysql.password" -> "root",
      "mysql.driver" -> "com.mysql.jdbc.Driver"
    )

    val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineRecommender")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    val sc = spark.sparkContext

    import spark.implicits._
    implicit val mysqlConfig = MysqlConfig(config("mysql.url"), config("mysql.user"), config("mysql.password"), config("mysql.driver"))

    val ratingRDD = spark.read.format("jdbc")
      .option("url", config("mysql.url"))
      .option("driver", config("mysql.driver"))
      .option("user", config("mysql.user"))
      .option("password", config("mysql.password"))
      .option("dbtable", MYSQL_RATING_COLLECTION)
      .load()
      .as[MovieRating]
      .rdd
      .map(rating => (rating.uid, rating.mid, rating.score))
      .cache()

    //提取打过分的用户和被打过分电影
    val userRDD = ratingRDD.map(_._1).distinct()
    val movieRDD = ratingRDD.map(_._2).distinct()


    //训练模型

    val trainData = ratingRDD.map(rating => Rating(rating._1, rating._2, rating._3))
    val (rank, iterations, lambda) = (50, 5, 0.01)
    val model = ALS.train(trainData, rank, iterations, lambda)

    //基于用户和电影的隐特征 预测电影评分 得到用户推荐列表
    //计算user和movie的笛卡尔积
    val userMovies: RDD[(Int, Int)] = userRDD.cartesian(movieRDD)

    val preRatings: RDD[Rating] = model.predict(userMovies)
    val userRecs = preRatings
      .filter(_.rating > 0)
      .map(rating => (rating.user, (rating.product, rating.rating)))
      .groupByKey()
      .map{
        case (uid,recs) => UserRecs(uid, recs.toList.sortWith(_._2 >
          _._2).take(USER_MAX_RECOMMENDATION).map(x => {
          "[" + x._1 + "," + x._2 + "]"
        }).mkString(","))
        }
      .toDF()


    storeDFInMysql(userRecs, USER_RESCS)

    //基于电影隐特征 计算相似度矩阵 得到电影相似度列表

    val movieFeatures = model.productFeatures.map{case (mid,features) =>
      (mid, new DoubleMatrix(features))
    }

    val movieRecs = movieFeatures.cartesian(movieFeatures)
      .filter{case (a,b) => a._1 != b._1}
      .map{case (a,b) =>
        val simScore = this.consinSim(a._2,b._2) // 求余弦相似度
        (a._1,(b._1,simScore))
      }.filter(_._2._2 > 0.6)
      .groupByKey()
      .map{case (mid,items) =>
        MovieRecs(mid,items.toList.map(x => {
          "[" + x._1 + "," + x._2 + "]"
        }).mkString(","))
        }
      .toDF()

    storeDFInMysql(movieRecs, MOVIE_RESCS)

    spark.stop()
  }
}
