package com.ocean.offlinelfmrecommend

import java.sql.Connection

import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}


/**
  * 该对象和其他模块中的Rating一样，只是为了区分mlib包中的Rating对象，重新取了一个名字
  *
  * @param uid
  * @param mid
  * @param score
  * @param timestamp
  */
case class MovieRating(uid: Int, mid: Int, score: Double, timestamp: Long)

/**
  * mysql连接配置
  *
  * @param uri
  * @param user
  * @param password
  */
case class MysqlConfig(uri: String, user: String, password: String)


/**
  * 基于LFM的推荐
  */
object OfflineLFMRecommend {

  //定义常量
  val MySql_RATING_Table = "rating"

  //基于预测评分的用户推荐列表，基本结构是：uid-->List(mid,score)
  val USER_RECOMMENDATION = "user_recommendation"
  //基于LFM电影特征向量得到的电影相似度列表,基本结构是：mid-->List(mid,score)
  val MOVIE_RECOMMENDATION = "movie_recommendation"
  //给每个用户推荐的电影数量
  val USER_MAX_RECOMMENDATION = 20
  //电影的相似度阈值
  val MOVIE_SIMILARITY_THRESHOLD = 0.6


  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.uri" -> "jdbc:mysql://192.168.10.105:3306/recommend?useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true&useSSL=false",
      "mysql.user" -> "root",
      "mysql.password" -> "cde32wsxzaq1"
    )

    // 创建一个sparkConf
    val sparkConf: SparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineLFMRecommend").set("spark.driver.maxResultSize", "5g")

    // 创建一个SparkSession
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._

    implicit val mysqlConfig = MysqlConfig(config("mysql.uri"), config("mysql.user"), config("mysql.password"))

    val ratinRDD: RDD[(Int, Int, Double)] = spark.read.format("jdbc")
      .option("url", mysqlConfig.uri)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("dbtable", MySql_RATING_Table)
      .load()
      .as[MovieRating]
      .rdd
      .map(rating => (rating.uid, rating.mid, rating.score)) //去掉时间戳
      .cache()

    //取出所有的用户
    val userRDD: RDD[Int] = ratinRDD.map(_._1).distinct()
    //去除所有的电影
    val movieRDD: RDD[Int] = ratinRDD.map(_._2).distinct()

    /**
      * Rating的结构为(user，product, rating)
      */
    val trainingData: RDD[Rating] = ratinRDD.map(x => (Rating(x._1, x._2, x._3)))

    /**
      * 给出模型训练的经验参数
      * rank就是特征向量的维度，iterations为迭代次数，lambda为正则化参数的系数
      *
      * @see com.ocean.offlinelfmrecommend.ALSTrainer
      */
    val (rank, iterations, lambda) = (100, 10, 0.1)
    //训练模型
    val model: MatrixFactorizationModel = ALS.train(trainingData, rank, iterations, lambda)

    //基于用户和电影的隐特征，计算预测评分，得到用户推荐列表
    //首先对用户和电影做笛卡尔积，得到一个空评分矩阵
    val userMoviesRDD: RDD[(Int, Int)] = userRDD.cartesian(movieRDD)

    //用训练好的模型预测评分
    val predictRatings: RDD[Rating] = model.predict(userMoviesRDD)

    val userRecommendationDF: DataFrame = predictRatings
      //首先过滤掉评分小于0的
      .filter(_.rating > 0)
      //转换成(user,(product,rating))的结构
      .map(ratingItem => (ratingItem.user, (ratingItem.product, ratingItem.rating)))
      .groupByKey()
      .map {
        case (uid, userRecommendationIter) => {
          val recommendationForUser: List[(Int, Double)] = userRecommendationIter.toList.sortBy(_._2)(Ordering.Double.reverse).take(USER_MAX_RECOMMENDATION)
          (uid, recommendationForUser)
        }
      }.flatMap(
      pair => {
        val uid: Int = pair._1
        val recommendationList: List[(Int, Double)] = pair._2
        for (elem <- recommendationList) yield {
          //最终将结果展开，解构成(用户ID,给用户推荐的电影ID,给用户推荐的电影的评分)这样一个结构
          (uid, elem._1, elem._2)
        }
      }
    ).toDF("uid", "recommend_mid", "recommend_mid_score")

    saveToMySqlUserRecommendation(USER_RECOMMENDATION, userRecommendationDF)

    //基于电影隐特征，计算相似度矩阵，得到电影相似度列表
    val movieFeatures: RDD[(Int, Array[Double])] = model.productFeatures

    //对所有电影特征向量计算他们的两两相似度
    val movieRecommendationDF: DataFrame = movieFeatures.cartesian(movieFeatures)
      .filter {
        //如果是自己和自己计算相似度肯定是1，这个没有意义
        case (movieFeature1, movieFeature2) => movieFeature1._1 != movieFeature2._1
      }
      .map {
        case (movieFeature1, movieFeature2) => {
          val simScore: Double = this.cosineSimilarity(movieFeature1._2, movieFeature2._2)
          //有了相似度之后，包装成(电影特征1,(电影特征2,两个电影的相似评分))这样一个tuple
          (movieFeature1._1, (movieFeature2._1, simScore))
        }
      }.filter(_._2._2 > MOVIE_SIMILARITY_THRESHOLD)
      .groupByKey()
      .map {
        case (mid, similarMovieIter) => {
          val sortedSimilarMovies: List[(Int, Double)] = similarMovieIter.toList.sortBy(_._2)(Ordering.Double.reverse)
          (mid, sortedSimilarMovies)
        }
      }.flatMap(pair => {
      val mid: Int = pair._1
      val similarMovieList: List[(Int, Double)] = pair._2
      for (elem <- similarMovieList) yield {
        (mid, elem._1, elem._2)
      }
    }).toDF("mid", "sim_mid", "sim_score")

    saveToMySqlMovieRecommendation(MOVIE_RECOMMENDATION, movieRecommendationDF)
    spark.stop()
  }

  /**
    *
    * @param USER_RECOMMENDATION
    * @param userRecommendationDF
    * @param mysqlConfig
    */
  def saveToMySqlUserRecommendation(USER_RECOMMENDATION: String, userRecommendationDF: DataFrame)(implicit mysqlConfig: MysqlConfig) = {
    val connection: Connection = JDBCUtil.getConnection
    userRecommendationDF.write
      .mode("overwrite")
      .format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", mysqlConfig.uri)
      .option("dbtable", USER_RECOMMENDATION)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("isolationLevel", "SERIALIZABLE")
      .option("truncate", "true")
      .option("batchsize", "2000")
      .save()

    //如果索引创建失败说明之前创建过了，这个也没事
    try {
      connection.prepareStatement(
        s"""
           |create index idx_uid on  $USER_RECOMMENDATION(uid)
      """.stripMargin).execute()

      connection.prepareStatement(
        s"""
           |create index idx_recommend_mid on  $USER_RECOMMENDATION(recommend_mid)
      """.stripMargin)
    } catch {
      case e: Exception => e.printStackTrace()
    }

    connection.close()
  }

  /**
    *
    * @param MOVIE_RECOMMENDATION
    * @param movieRecommendationDF
    * @param mysqlConfig
    */
  def saveToMySqlMovieRecommendation(MOVIE_RECOMMENDATION: String, movieRecommendationDF: DataFrame)(implicit mysqlConfig: MysqlConfig) = {
    val connection: Connection = JDBCUtil.getConnection

    movieRecommendationDF.write
      .mode("overwrite")
      .format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", mysqlConfig.uri)
      .option("dbtable", MOVIE_RECOMMENDATION)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("isolationLevel", "SERIALIZABLE")
      .option("truncate", "true")
      .option("batchsize", "2000")
      .save()

    try {
      connection.prepareStatement(
        s"""
           |create index idx_mid on  $MOVIE_RECOMMENDATION(mid)
      """.stripMargin).execute()

      connection.prepareStatement(
        s"""
           |create index idx_sim_mid on  $MOVIE_RECOMMENDATION(sim_mid)
      """.stripMargin)
    } catch {
      case e: Exception => e.printStackTrace()
    }

    connection.close()

  }


  /**
    * 计算余弦相似度
    *
    * @param x 向量1
    * @param y 向量2
    * @return
    */
  def cosineSimilarity(x: Array[Double], y: Array[Double]): Double = {
    require(x.size == y.size)
    dotProduct(x, y) / (magnitude(x) * magnitude(y))
  }

  def dotProduct(x: Array[Double], y: Array[Double]): Double = {
    (for ((a, b) <- x zip y) yield a * b) sum
  }

  def magnitude(x: Array[Double]): Double = {
    math.sqrt(x map (i => i * i) sum)
  }
}
