package com.ocean.statisticrecommend

import java.text.SimpleDateFormat

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
  * 电影样例类
  *
  * @param mid    电影ID
  * @param name   电影的名称
  * @param genres 电影所属类别 每一项用“|”分割
  */
case class Movie(mid: Int, name: String, genres: String)

/**
  * 评分样例类
  *
  * @param uid       用户ID
  * @param mid       电影ID
  * @param score     分数
  * @param timestamp 时间戳
  */
case class Rating(uid: Int, mid: Int, score: Double, timestamp: Long)

/**
  * mysql连接配置
  *
  * @param uri
  * @param user
  * @param password
  */
case class MysqlConfig(uri: String, user: String, password: String)

/**
  * 统计推荐服务
  */
object StatisticRecommend {

  //定义常量
  val MySql_MOVIE_Table = "movie"
  val MySql_RATING_Table = "rating"

  //参加IMDB公式计算的最小评分数量
  val LEAST_RATING_NUM = 20

  //统计的表的名称
  //按照电影评分排序的表
  val RATE_MORE_MOVIES = "rate_more_movies"
  //按照最近电影评分排序的表
  val RATE_MORE_RECENTLY_MOVIES = "rate_more_recently_movies"
  //按照评分平均数排序的表
  val AVERAGE_MOVIES = "average_movies"
  //按照类别电影top10的表
  val GENRES_TOP_MOVIES = "genres_top_movies"


  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.uri" -> "jdbc:mysql://192.168.10.105:3306/recommend?useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true&useSSL=false",
      "mysql.user" -> "root",
      "mysql.password" -> "cde32wsxzaq1"
    )

    // 创建一个sparkConf
    val sparkConf: SparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("StatisticRecommend")

    // 创建一个SparkSession
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._

    implicit val mysqlConfig = MysqlConfig(config("mysql.uri"), config("mysql.user"), config("mysql.password"))

    //从业务数据库（movie和rating）读取数据
    val movieDF: DataFrame = spark.read.format("jdbc")
      .option("url", mysqlConfig.uri)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("dbtable", MySql_MOVIE_Table)
      .load()
      .as[Movie]
      .toDF()

    val ratingDF: DataFrame = spark.read.format("jdbc")
      .option("url", mysqlConfig.uri)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("dbtable", MySql_RATING_Table)
      .load()
      .as[Rating]
      .toDF()

    //创建rating视图
    ratingDF.createOrReplaceTempView("rating")

    //1.历史热门统计，统计所有电影评分最多的
    val rateMoreMoviesDF: DataFrame = spark.sql("select mid, count(mid) as count from rating group by mid order by count desc")

    saveDFToMySql(rateMoreMoviesDF, RATE_MORE_MOVIES)

    //2.近期热门统计，按照"yyyyMM"格式选取最近的评分数据，统计评分个数并排序
    val dateFormat = new SimpleDateFormat("yyyyMM")
    //先做时间数据转换
    spark.udf.register("yyyyMM", (timestamp: Long) => dateFormat.format(new java.util.Date(timestamp * 1000)).toInt)
    val ratingOfYearMonth: DataFrame = spark.sql("select mid, score, yyyyMM(timestamp) as yearmonth from rating")
    ratingOfYearMonth.createOrReplaceTempView("ratingOfYearMonth")

    //先对时间进行group by，然后对mid进行group by
    val rateMoreRecentlyMoviesDF: DataFrame = spark.sql("select mid, count(mid) as count, yearmonth from ratingOfYearMonth group by yearmonth, mid order by yearmonth desc, count desc")

    saveDFToMySql(rateMoreRecentlyMoviesDF, RATE_MORE_RECENTLY_MOVIES)

    //3.优质电影统计，统计各电影的平均评分（按照IMDB的加权公式计算）并且排序
    //每部电影的平均分
    val averageMovieDF: DataFrame = spark.sql("select mid, avg(score) as avg_score from rating group by mid")
    //所有电影的平均分
    val total_avg: DataFrame = spark.sql("select avg(avg_score) as total_avg from (select mid, avg(score) as avg_score from rating group by mid) temp")
    val total_movie_avg: Double = total_avg.rdd.first().getAs[Double]("total_avg")
    //筛选出评分个数大于等于20的电影
    val qualifiedRatingsRDD: RDD[Row] = rateMoreMoviesDF.filter(row => row.getAs[Long]("count") >= LEAST_RATING_NUM).rdd
    val midAvgScoreMap: collection.Map[Int, Double] = averageMovieDF.rdd.map(row => (row.getAs[Int]("mid"), row.getAs[Double]("avg_score"))).collectAsMap()
    //计算最终的加权平均
    val weightedAverageMovieDF: DataFrame = computeWeightedRating(spark, qualifiedRatingsRDD, midAvgScoreMap, total_movie_avg)
    saveDFToMySql(weightedAverageMovieDF, AVERAGE_MOVIES)

    //4.统计各类别的top10
    //提前定义好所有类别（为了方便起见）
    val genres = List(
      "Action",
      "Adventure",
      "Animation",
      "Comedy",
      "Children",
      "Crime",
      "Documentary",
      "Drama",
      "Fantasy",
      "Film-Noir",
      "Horror",
      "Musical",
      "Mystery",
      "Romance",
      "Sci-Fi",
      "Thriller",
      "War",
      "Western"
    )

    //把平均评分加入到movie中,因为没有平均评分的电影我们也不需要，所以使用默认的inner join
    val movieWithWeightedAvgScoreDF: DataFrame = movieDF.join(weightedAverageMovieDF, "mid")

    val genresRDD: RDD[String] = spark.sparkContext.makeRDD(genres)
    //对电影和类别做笛卡尔积将电影展开
    val genresTopMoviesDF: DataFrame = genresRDD.cartesian(movieWithWeightedAvgScoreDF.rdd)
      .filter {
        //模式匹配(类别，电影的一行数据),取出电影一行数据中的类别（因为可能很多)，如果包含一个参数类别，那就将其保留下来
        case (genre, movieRow) => movieRow.getAs[String]("genres").toLowerCase.contains(genre.toLowerCase)
      }
      .map {
        //将数据格式转变成(类别,(电影ID,评分))
        case (genre, movieRow) => (genre, (movieRow.getAs[Int]("mid"), movieRow.getAs[Double]("weighted_avg_score")))
      }
      //根据类别group by
      .groupByKey()
      .map {
        case (genre, movieIter) => {
          val top10Movies: List[(Int, Double)] = movieIter.toList.sortBy(_._2)(Ordering.Double.reverse).take(10)
          (genre, top10Movies)
        }
      }.flatMap(pair => {
      val key: String = pair._1
      val rows: List[(Int, Double)] = pair._2
      for (row <- rows) yield {
        (key, row._1, row._2)
      }
    }).toDF("genre", "mid", "weighted_avg_score")

    saveDFToMySql(genresTopMoviesDF, GENRES_TOP_MOVIES)

    spark.stop()
  }

  /**
    * 计算电影的加权平均
    *
    * @param spark               sparkSession
    * @param qualifiedRatingsRDD 有资格进入计算的评分
    * @param midAvgScoreMap      电影和平均分的map
    * @param total_movie_avg     所有电影的平均分
    * @return
    */
  def computeWeightedRating(spark: SparkSession, qualifiedRatingsRDD: RDD[Row], midAvgScoreMap: scala.collection.Map[Int, Double], total_movie_avg: Double): DataFrame = {
    import spark.implicits._
    qualifiedRatingsRDD.map {
      row => {
        val mid: Int = row.getAs[Int]("mid")
        val ratingNum: Double = row.getAs[Long]("count").toDouble
        val midAvgScore: Double = midAvgScoreMap.getOrElse(mid, total_movie_avg)
        val weightedAvg: Double = (ratingNum / (ratingNum + LEAST_RATING_NUM)) * midAvgScore +
          (LEAST_RATING_NUM / (ratingNum + LEAST_RATING_NUM)) * total_movie_avg
        (mid, weightedAvg)
      }
    }.sortBy(_._2,ascending = false)
      .toDF("mid", "weighted_avg_score")

  }


  def saveDFToMySql(df: DataFrame, dbTable: String)(implicit mysqlConfig: MysqlConfig) = {

    df.write
      .mode("overwrite")
      .format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", mysqlConfig.uri)
      .option("dbtable", dbTable)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("isolationLevel", "SERIALIZABLE")
      .option("truncate", "true")
      .option("batchsize", "2000")
      .save()
  }

}
