import java.text.SimpleDateFormat
import java.util.Date

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object StatisticRecommender extends App {
  //1.初始化环境==》spark:SparkSession
  private val spark: SparkSession = Constant.initEnv()

  //2.隐式转换
  import spark.implicits._
  //隐式声明连接到MongDB 地址，库
  implicit private val mongoConfig: MongoConfig = MongoConfig(Constant.MONGO_URL,Constant.MONGO_DB)

  //3.从MongDBd的rating表读取数据(读取出来的数据按MovieRating样例类处理，并将转换为DataFrame)
  private val ratingDF: DataFrame = spark.read
    //获取url的方法：1.直接调用已经在Constant类里封装好的MONGO_URL
    //2.通过之前在创建的样例类MongoConfig，再根据现在实例化MongoConfig得到mongoConfig对象再调用其属性uri
    .option("uri", mongoConfig.uri) //Constant.MONGO_URL
    .option("collection", Constant.RATING_COLLECTION)
    .format("com.mongodb.spark.sql")
    .load()
    .as[MovieRating]
    .toDF()
   //3.1创建临时视图
   ratingDF.createTempView("ratings")//表名

  //从MongDB的Movie表读取数据
  private val movieDF: DataFrame = spark.read
    .option("uri", mongoConfig.uri) //Constant.MONGO_URL
    .option("collection", Constant.MOVIE_COLLECTION) //Movie表
    .format("com.mongodb.spark.sql")
    .load()
    .as[Movie]
    .rdd
    .toDF()

   //读取表里的某一行数据(类别)的方法可以不显示在代码里
   /*movieDF.map{
     case row =>{
       row.getAs[String]("genres")
     }
   }.flatMap(_.split("\\|")).distinct().toDF().show()//Crime|Drama|Thriller*/
  //4.业务需求
  /*4.1评分最多的电影
  *
  * 评分==》Rating表
  * 最多==》count（）
  *
  * */
  /*private val rateMoreMovies: DataFrame = spark.sql("select mid,count(score) as count from ratings group by mid order by count")
  //将从MongoDBRating表的查询到的结果再次存入MongoDB的RateMoreMovies表
  Constant.DataIntoMongoDB(rateMoreMovies,Constant.RATE_MORE_MOVIES)*/

  //4.2电影平均分 avg
  private val averageMovies: DataFrame = spark.sql("select mid, avg(score) as avg from ratings group by mid")
  //将从MongoDBRating表的查询到的结果再次存入MongoDB的AverageMovies表
  //Constant.DataIntoMongoDB(averageMovies,Constant.AVERAGE_MOVIES)

  /*4.3最近热门的电影
  * 最近：月维度（电影评分时间：timestamp==>yyyy-MM）
  * 热门：
  *
  *
  * */
  //最近
  //电影评分时间timestamp==>yyyy-MM
  /*private val simpleDateFormat = new SimpleDateFormat("yyyyMM")
  //注册函数（将秒132156156 ==自动转换==》yyyyMM）
  spark.udf.register("changeDate",(x:Long)=>simpleDateFormat.format(new Date(x * 1000L)))
  private val ratingofyearMonth: DataFrame = spark.sql("select mid,score,changeDate(timestamp) as yearMoth from  ratings")
  ratingofyearMonth.createTempView("ratingofyearMonth")

  //热门
  //sql:从ratingofyearMonth中查询mid，count(mid)，yearMoth按照yearMoth,mid分组。按照yearMoth，count做倒序排序
  private val rateMoreRecentlyMovies: DataFrame = spark.sql("select mid,count(mid) as count,yearMoth from ratingofyearMonth group by yearMoth,mid order by yearMoth desc,count desc")
  Constant.DataIntoMongoDB(rateMoreRecentlyMovies,Constant.RATE_MORE_RECENTLY_MOVIES)*/

  /*4.4每个电影类别TopN
  *
  * 电影：来自rating表
  * 类别：来自Movie表==》MovieDF读
  * TopN:排序规则【平均分】 排序sortwith+take
  * */
  //类别：来自Movie表==》MovieDF读
  private val genres = List("Crime","Romance","Thriller","Adventure","Drama","War","Documentary",
    "Fantasy","Mystery","Musical","Animation","Film-Noir","IMAX","Horror","Western","Comedy","Children","Action","Sci-Fi")
     //list ==>RDD
     private val genresRDD: RDD[String] = spark.sparkContext.makeRDD(genres)

  //TopN:排序规则【平均分】 排序sortwith+take
  //1.MovieDF join AverageMovies 表
  private val movieWithScore: DataFrame = movieDF.join(averageMovies, "mid")

  //2.genresRDD 与 movieWithScore 做笛卡尔积（需要：movieWithScore: DataFrame ==》RDD）
  private val genresTopMovies: DataFrame = genresRDD.cartesian(movieWithScore.rdd)
    //3.过滤
    .filter {
      //19类别，10个电影表字段[genres]  ;toLowerCase() 所有的字符转小写
      case (genres, movieRow) => movieRow.getAs[String]("genres")
        .toLowerCase.contains(genres.toLowerCase())
    }.map {
    //转换rdd=>tup对
    //(genres(mid,avg)) avg==> _._2._2  eg:(a,b)==>_._2
    case (genres, movieRow) => (genres,(movieRow.getAs[Int]("mid"), movieRow.getAs[Double]("avg")))
    //4.Groupby
  }.groupByKey()
    .map {
      case (genres, items) => GenresRecommendation(genres,items.toList
        .sortWith(_._2 > _._2)
        .take(10)
        .map(x => Recommendation(x._1, x._2)))
    }.toDF()
  genresTopMovies.show()
   //5.存入MongoDB中
  Constant.DataIntoMongoDB(genresTopMovies,Constant.GENRES_TOP_MOVIES)

  //额外知识：调用其他函数的时候需要自己导入依赖
  /*import org.apache.spark.sql.functions._
  concat_ws(collect_list())*/
}
