package com.joyxj.movielens

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ArrayBuffer

/**
  * 电影相关操作描述
  *
  * @author xiaoj
  * @version 2018-03-13
  */
object MoviesOps {

  def main(args:Array[String]) :Unit = {
    //1、电影数Top n的流派
    val conf = new SparkConf().setMaster("local").setAppName("MoviesOps")
    val sparkSession = SparkSession.builder().config(conf).appName("MoviesOps").getOrCreate()
    val sc = sparkSession.sparkContext
    hotGenres(sc)
  }

  /**
    * 热门分类
    * 此处的热门分类是该分类下最多的电影数
    */
  private def hotGenres(sc:SparkContext):Unit = {
    val lines = sc.textFile("E:\\BaiduNetdiskDownload\\ml-20m\\movies.csv",10)
    lines
      .map(line =>{
        //处理每一行数据时，考虑数据多样性
        if(line.contains(",\"") || line.contains("\",")) {
          //数据中包含',"'和'",'
          (line.split(",\"")(0),line.split("\",")(1))
        } else {
          (line.split(",")(0),line.split(",")(2))
        }
      })
      .flatMap(item=>{
        val list = new ArrayBuffer[(String,String)]()
        (item._2.split("\\|")).foreach(ii => {list += Tuple2.apply(item._1,ii)})
        list
      }).map(item =>(item._2,item._1)).map(item=>(item._1,1)).reduceByKey(_+_).map(item=>(item._2,item._1))
      .sortByKey(false).take(5).map(item=>(item._2,item._1)).foreach(item =>println(item))
  }

}
