package com.wtw.statistics

import java.text.SimpleDateFormat
import java.util.Date

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

case class Movie(mid: Int, name: String, descri: String, timelong: String, issue: String,
                 shoot: String, language: String, genres: String, actors: String, directors: String)

case class Rating(uid: Int, mid: Int, score: Double, timestamp: Int)

case class MongoConfig(uri: String, db: String)

case class MysqlConfig(url: String, user: String, password: String, driver: String)

//基准推荐对象
case class Recommendation(mid: Int, score: Double)

//定义电影类别top10推荐对象
//case class GenresRecommendation(genres: String, recs: Seq[Recommendation])
case class GenresRecommendation(genres: String, recs: String)

object StatisticsRecommender {

  val MYSQL_MOVIE_COLLECTION = "Movie"
  val MYSQL_RATING_COLLECTION = "Rating"

  //统计的表的名称
  val RATE_MORE_MOVIES = "RateMoreMovies"
  val RATE_MORE_RECENTLY_MOVIES = "RateMoreRecentlyMovies"
  val AVERAGE_MOVIES = "AverageMovies"
  val GENRES_TOP_MOVIES = "GenresTopMovies"


  def storeDFInMysql(df: DataFrame, collections_name: String)(implicit mysqlConfig: MysqlConfig) = {
    df.write.mode("overwrite")
      .format("jdbc")
      .option("driver", mysqlConfig.driver)
      .option("url", mysqlConfig.url)
      .option("dbtable", collections_name) //表名
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .save()
  }

  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.url" -> "jdbc:mysql://localhost:3306/recommend",
      "mysql.user" -> "root",
      "mysql.password" -> "root",
      "mysql.driver" -> "com.mysql.jdbc.Driver"
    )

    val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("StatisticsRecommender")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    val sc = spark.sparkContext

    import spark.implicits._

    implicit val mysqlConfig = MysqlConfig(config("mysql.url"), config("mysql.user"), config("mysql.password"), config("mysql.driver"))

    //数据加载进来

    val movieDF = spark.read.format("jdbc")
      .option("url", config("mysql.url"))
      .option("driver", config("mysql.driver"))
      .option("user", config("mysql.user"))
      .option("password", config("mysql.password"))
      .option("dbtable", MYSQL_MOVIE_COLLECTION)
      .load()

    val ratingDF = spark.read.format("jdbc")
        .option("url", config("mysql.url"))
        .option("driver", config("mysql.driver"))
        .option("user", config("mysql.user"))
        .option("password", config("mysql.password"))
        .option("dbtable", MYSQL_RATING_COLLECTION)
        .load()

    ratingDF.createOrReplaceTempView("ratings")


    //1. 历史评分最多

    val rateMoreMoviesDF = spark.sql("select mid, count(mid) as count from ratings group by mid")

    storeDFInMysql(rateMoreMoviesDF, RATE_MORE_MOVIES)

    //2. 近期评分最多 将日期转化为YYYYMM进行统计

    val simpleDateFormat = new SimpleDateFormat("yyyyMM")

    //注册一个UDF函数，用于将timestamp装换成年月格式   1260759144000  => 201605
    spark.udf.register("changeDate",(x:Int) => simpleDateFormat.format(new Date(x * 1000L)).toInt)

    // 将原来的Rating数据集中的时间转换成年月的格式
    val ratingOfYearMonth = spark.sql("select mid, score, changeDate(timestamp) as yearmonth from ratings")

    // 将新的数据集注册成为一张表
    ratingOfYearMonth.createOrReplaceTempView("ratingOfMonth")

    val rateMoreRecentlyMovies = spark.sql("select mid, count(mid) as count ,yearmonth from ratingOfMonth group by yearmonth,mid")
    storeDFInMysql(rateMoreRecentlyMovies, RATE_MORE_RECENTLY_MOVIES)

    //3. 平均评分最多

    val averageMoviesDF = spark.sql("select mid, avg(score) as avg from ratings group by mid")
    storeDFInMysql(averageMoviesDF, AVERAGE_MOVIES)

    //4. 各类别电影top统计

    val movieWithScore = movieDF.join(averageMoviesDF,Seq("mid"))
    //所有的电影类别
    val genres = List("Action","Adventure","Animation","Comedy","Crime","Documentary","Drama","Family","Fantasy","Foreign","History","Horror","Music","Mystery"
      ,"Romance","Science","Tv","Thriller","War","Western")

    //将电影类别转换成RDD
    val genresRDD = spark.sparkContext.makeRDD(genres)

    //计算电影类别top10

//    storeDFInMysql(genrenTopMovies, GENRES_TOP_MOVIES)

    val genrenTopMovies: DataFrame = genresRDD.cartesian(movieWithScore.rdd)
      .filter{
        case (genres,row) => row.getAs[String]("genres").toLowerCase.contains(genres.toLowerCase)
      }
      .map{
        // 将整个数据集的数据量减小，生成RDD[String,Iter[mid,avg]]
        case (genres,row) => {
          (genres,(row.getAs[Int]("mid"), row.getAs[Double]("avg")))
        }
      }.groupByKey()
      .map{
        case (genres, items) => GenresRecommendation(genres,items.toList.sortWith(_._2 > _._2).take(10).map(item => {
         "[" + item._1 + "," + item._2 + "]"
        }).mkString(","))
      }.toDF()

    storeDFInMysql(genrenTopMovies, GENRES_TOP_MOVIES)
    spark.stop()
  }
}
