package learn.recommend


import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.{SparkConf, SparkContext}
import org.jblas.DoubleMatrix
/**
  * 协同过滤demo2，基于物品的推荐：根据物品的相似度给某个用户推荐物品
  */
object ItemCFDemo {
 
  def parseRating(str:String):Rating={
    val fields = str.split("\t")
    Rating(fields(0).toInt,fields(1).toInt,fields(2).toDouble)
  }
 
  /**
    * 计算两个向量的余弦相似度，1为最相似，0为不相似，-1为相反
    * 余弦相似度=向量的点积/各向量范数的乘积     值域为[-1,1]
    * @param vec1 向量1
    * @param vec2 向量2
    * @return
    */
  def cosineSimilarity(vec1:DoubleMatrix,vec2:DoubleMatrix)={
    vec1.dot(vec2)/(vec1.norm2()*vec2.norm2())
  }
 
 
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("ItemCFDemo")
    val sc = new SparkContext(conf)
    //构建ALS模型
    val ratingData = sc.textFile("E:\\test\\ml-100k\\u.dat")
    val ratingsRDD = ratingData.map(parseRating(_))
    //四个参数：评级RDD、
    val model = ALS.train(ratingsRDD,50,10,0.01)
 
    //从电影ID到标题的映射
    val movies = sc.textFile("E:\\test\\ml-100k\\u.item")
    val titles = movies.map(line=>line.split("\\|")).map(array=>(array(0).toInt,array(1))).collectAsMap()
 
    //获取给定物品在模型中对应的因子，并构建成向量
    val itemId=465
    val itemFactor: Array[Double] = model.productFeatures.lookup(itemId).head
    val itemVector = new DoubleMatrix(itemFactor)
 
    //求各个物品的余弦相似度
    val sims = model.productFeatures.map {
      case (id, factor) =>
        val factorVector = new DoubleMatrix(factor)
        val sim = cosineSimilarity(factorVector, itemVector)
        (id, sim)
    }
    //取出余弦相似度最高的10个，即为跟给定物品最相似的10种物品
    val sortedSims: Array[(Int, Double)] = sims.top(10)(Ordering.by[(Int,Double),Double]{case (id,similarity)=>similarity})
 
    println("与"+titles(itemId)+"最为相似的10部电影：")
    sortedSims.map{case (id,sim)=>(titles(id),sim)}.foreach(tuple=>println("电影："+tuple._1+"，相似度："+tuple._2))
  }
} 