package com.zzl.spark.recommend

import java.io.File

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD

object Recommend {

  def main(args: Array[String]): Unit = {
    SetLogger
    val (rddRating,movieTitle)= PrepareData()
    val model = ALS.train(rddRating,10,25,0.1)
    recommend(model,movieTitle)
  }


  def recommend(model:MatrixFactorizationModel, movieTitle:Map[Int,String]):Unit={
    var choose=""
    while(choose != "3"){
      println("请选择要推荐的类型 1:针对用户推荐电影 2.针对电影推荐给感兴趣的用户 3.离开?")
      choose = Console.readLine()
      if(choose =="1"){
        print("请输入用户id?")
        val inputUserId=readInt()
        RecommendMovies(model,movieTitle,inputUserId)//针对此用户推荐电影
      }else if(choose=="2"){
        println("请输入电影的id?")
        val inputMovieID=readInt()
        RecommendUsers(model,movieTitle,inputMovieID);//针对此电影推荐用户
      }
    }
  }

  def SetLogger={
    System.setProperty("hadoop.home.dir", "E:\\hadoop")
    Logger.getLogger("org").setLevel(Level.OFF)
    Logger.getLogger("com").setLevel(Level.OFF)
    System.setProperty("spark.ui.showConsoleProgress","false")
    Logger.getRootLogger.setLevel(Level.OFF)
  }

  def PrepareData():(RDD[Rating],Map[Int,String])={
    //1.创建用户评分数据
    val sc = new SparkContext(new SparkConf().setAppName("Recommend").setMaster("local[*]"))
    println("开始读取用户评分数据中...")
    val dataDir="file:/D:/ideawork/SparkLearning/"
    val rawUserData=sc.textFile(dataDir+"u.data")
    val rawRatings =rawUserData.map(_.split("\t").take(3))
    val ratingsRDD =rawRatings.map{
      case Array(user,movie,rating)=>Rating(user.toInt,movie.toInt,rating.toDouble)
    }
    println("共计:"+ratingsRDD.count().toString+" 条ratings")
    //2.创建电影ID与名称对照表
    println("开始读取电影数据中..")
    val itemRDD=sc.textFile(new File(dataDir,"u.item").toString)
    val movieTitle =itemRDD.map(line=>line.split("\\|").take(2)).map(array=>(array(0).toInt,array(1))).collect().toMap
    //3.显示数据记录数
    val numRationgs =ratingsRDD.count()
    val numUsers =ratingsRDD.map(_.user).distinct().count()
    val numMovies =ratingsRDD.map(_.product).distinct().count()
    println("共计: ratings: "+numRationgs+" User: "+numUsers+ " Movie: "+numMovies)
    return (ratingsRDD,movieTitle)
  }

  def RecommendMovies(model: MatrixFactorizationModel, movieTitle: Map[Int, String], inputUserId: Int): Unit = {
    val recommendMovie =model.recommendProducts(inputUserId,10)
    var i =1
    println("针对用户Id "+ inputUserId+" 推荐下列电影:")
    recommendMovie.foreach(r=>{
      println(i.toString+"."+ movieTitle(r.product)+" 评分:"+r.rating.toString)
      i+=1
    })
  }
  def RecommendUsers(model: MatrixFactorizationModel, movieTitle: Map[Int, String], inputMovieID: Int) ={
    val recommendUsers =model.recommendUsers(inputMovieID,10)
    var i =1
    println("针对电影id "+ inputMovieID+" 推荐下列用户:")
    recommendUsers.foreach(r=>{
      println(i.toString+"用户id:"+ r.user+" 评分:"+r.rating.toString)
      i+=1
    })
  }
}
