package com.xxh.user.rec

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable.Set // 可以在任何地方引入 可变集合


case class Recommendation(mid: Int, score: Double)

//按照类型推荐
case class TypeRecommendation(typeName: String, rec: Seq[Recommendation])

object statisticsRec {


  //  必须先指定一个数据表
  val MONGODB_URI = "mongodb://localhost:27017/movierec.movie"

  def main(args: Array[String]): Unit = {

    //创建sparkConfig
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("OfflineRecommend")
      .set("spark.mongodb.input.uri", MONGODB_URI);

    //创建SparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._
    val movieDf = readDataFrameFromMongo("movie", spark)

    val ratingDf = readDataFrameFromMongo("rating", spark)
    movieDf.createOrReplaceTempView("temp_movie")
    ratingDf.createOrReplaceTempView("ratings")

    //统计出电影的平均得分

    //1 热门电影(最多评论数的电影)
    val rateMostMovieDF = spark.sql("select mid ,count(mid) as count from ratings group by mid order by count desc")
//    SaveToDB(rateMostMovieDF, "rate_most_movies")

    //2 电影均分统计
    val rateAvgMovieDF = spark.sql("select mid ,avg(score) as avgscore from ratings group by mid order by avgscore desc")
//    SaveToDB(rateAvgMovieDF, "avg_score_movies")

    //3 各类的TOP-N 统计
    // 统计所有的电影类别
    val allTypes = calTypes(movieDf)

    //带有均分的电影DF
    val movieWithAvgScore = movieDf.join(rateAvgMovieDF, "mid")
    val typesRdd: RDD[String] = spark.sparkContext.makeRDD(allTypes)

    SaveToDB(typesRdd.toDF(),"movie_type")


    //各种类型电影的TOP 10
    val typeTopMovieDf: DataFrame = typesRdd.cartesian(movieWithAvgScore.rdd)
      .filter {
        case (types, row) => {
          row.getAs[String]("types").contains(types)
        }
      }
      .map {
        case (types, row) => {
          (types, (row.getAs[Int]("mid"), row.getAs[Double]("avgscore")))
        }
      }.groupByKey()
      .map { case (str, items) => {
        //各种类型取前10条
        TypeRecommendation(str, items.toList.sortWith(_._2 > _._2).take(10).map {
          case (mid, score) => {
            Recommendation(mid, score)
          }
        })
      }
      }.toDF()
//    SaveToDB(typeTopMovieDf, "type_top_movies")

  }


  //  计算所有的电影类别
  def calTypes(movieDf: DataFrame): List[String] = {
    val type_list = Set("")

    val typeDf: DataFrame = movieDf.select("types")
    val array: Array[String] = typeDf.collect().map(row => {
      row.getAs[String]("types")
    })
    val str: Array[Array[String]] = array.map(item => {
      //注意转义
      item.split("\\|")
    })
    str.foreach(i => {
      i.foreach(item => {
        if (!type_list.contains(item)) {
          type_list.add(item)
        }
      })
    })
    type_list.remove("")
    val list: List[String] = type_list.toList
    return list
  }


  def SaveToDB(df: DataFrame, collectionName: String): Unit = {
    df.write
      .option("uri", "mongodb://localhost:27017/movierec")
      .option("collection", collectionName)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()
  }


  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }

}
