package com.xxh.user.rec

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.xxh.user.rec.forTest.{calIntSeqCartesian, readDataFrameFromMongo}
import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{col, count, explode, lit}
import org.apache.spark.sql.{DataFrame, Row, SparkSession, functions}
import org.jblas.DoubleMatrix

import scala.collection.mutable.ArrayBuffer;


case class MovieSimilarScore(mid1: Int, mid: Int, score: Double)

object MovieRecTest {
  val MONGODB_URI = "mongodb://localhost:27017/movierec.movie"


  def main(args: Array[String]): Unit = {

    //创建sparkConfig
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("OfflineRecommend")
      .set("spark.mongodb.input.uri", MONGODB_URI);
    //创建SparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._
    val ratingDf = readDataFrameFromMongo("rating", spark).cache()
    val data: RDD[Rating] = ratingDf.map(row => {
      Rating(row.getAs[Int]("uid"), row.getAs[Int]("mid"), row.getAs[Int]("score"))
    }).rdd


    //划分训练和测试集
    val Array(testRDD, trainRDD) = data.randomSplit(Array(0.2, 0.8), seed = 233)

    //用户，电影评分的空矩阵作为输入
    val input = getUserMovieNullRating(data);

    //得到推荐结果和电影的特征矩阵（只算一次就可以了）
    //genUserMovieRec(trainRDD, input, spark);


    //从数据库得到推荐结果和电影的特征矩阵
    //readDataFrameFromMongo("test_movie_similar_score",spark);
    val recResultDf = readDataFrameFromMongo("test_user_rec", spark);

    //df转成rdd格式
    val recResultRdd = recResultDf.rdd.map(row => {
      val recs = row.getAs[Seq[Row]](1).map(x => {
        Recommendation(x.getInt(0), x.getDouble(1))
      })
      UserRecommendation(row.getAs[Int]("uid"), recs);
    })

    // recResultRdd.toDF().show(10)

    //计算召回率
    //    calRecall(testRDD, recResultRdd, spark)

    //计算覆盖率
    //calCoverage(recResultRdd, spark)

    //计算多样性
    calDiversity(recResultRdd, spark);

  }


  //把dataFrame保存到DB
  def SaveToDB(df: DataFrame, collectionName: String): Unit = {
    df.write
      .option("uri", "mongodb://localhost:27017/movierec")
      .option("collection", collectionName)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()
  }

  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }


  //生成用户-电影频分空矩阵
  def getUserMovieNullRating(ratingDf: RDD[Rating]): RDD[(Int, Int)] = {

    val allUserRDD: RDD[Int] = ratingDf.filter(row => {
      row.rating > 0
    }).map(r => {
      r.user
    }).distinct()

    val allMovieRDD: RDD[Int] = ratingDf.filter(row => {
      row.rating > 0
    }).map(r => {
      r.product
    }).distinct()

    //用户、电影评分空矩阵（未写入）
    val userMovies = allUserRDD.cartesian(allMovieRDD)
    return userMovies;
  }



  //计算召回率

  /**
   * @param testRdd      测试集RDD
   * @param recResultRdd 推荐结果RDD
   */
  def calRecall(testRdd: RDD[Rating], recResultRdd: RDD[UserRecommendation], spark: SparkSession): Unit = {

    import spark.implicits._

    //命中数，为用户推荐的视频刚好在测试集中有
    var hit: Double = 0
    //用户评分
    var all: Double = testRdd.count()

    //测试集的rating变成（uid,mid）的格式
    val realRdd = testRdd.map(row => {
      (row.user, row.product)
    })

    //推荐结果变成（uid,mid）的形式
    val recallRdd = recResultRdd
      .map(row => {
        (row.uid, row.rec.toList.map(item => {
          item.mid
        }))
      })
      .toDF()
      .select($"*", explode($"_2")) //爆炸推荐的电影ID
      .map(row => {
        (row.getAs[Int]("_1"), row.getAs[Int]("col"))
      }).rdd

    //取交集
    hit = realRdd.intersection(recallRdd).count()
    print("命中：" + hit)
    print("总共：" + all)
    //打完收工
    println("召回率为:" + (hit / all))


    realRdd.toDF().show(50)

    recallRdd.toDF().show(50)
  }


  //计算覆盖率
  def calCoverage(recResultRdd: RDD[UserRecommendation], spark: SparkSession): Unit = {

    //计算电影总数
    val movieframe = readDataFrameFromMongo("movie", spark);
    var allNum = movieframe.count();

    //计算推荐列表中电影有多少部
    val recNum = recResultRdd.flatMap(r => {
      r.rec.map(i => {
        i.mid
      })
    }).distinct().count()

    //输出结果
    print("电影总数为:" + allNum);
    print("推荐的总数为：" + recNum);


  }

  //计算多样性
  def calDiversity(recResultRdd: RDD[UserRecommendation], spark: SparkSession): Unit = {

    import spark.implicits._

    //拿到电影相似度列表
    val movieSimilarScoreDF = readDataFrameFromMongo("test_movie_similar_score", spark)
    //列合并
    val df = movieSimilarScoreDF.rdd.map(row => {
      ((row.getAs[Int]("mid1"), row.getAs[Int]("mid")), row.getAs[Double]("score"))
    }).toDF().withColumnRenamed("_1", "mid").withColumnRenamed("_2", "score");

    //变成(uid,[（mid,mid）,(mid,mid),.....])的形式
    val userMoviesRdd = recResultRdd.map(row => {
      //为用户推荐的电影列表
      val seqMids = row.rec.map(i => i.mid)
      //电影两两求出笛卡尔积，返回元组
      val tuples = calIntSeqCartesian(seqMids)
      (row.uid, tuples)
    })

    //变成dataFrame,列爆炸
    val userMoviesDF = userMoviesRdd.toDF().withColumnRenamed("_1", "uid").withColumnRenamed("_2", "midCartesian")
    val umDf = userMoviesDF.select($"*", explode($"midCartesian")).withColumnRenamed("col", "mid")

    //两个dataframe 连接
    val user_movieSimilarScoreDf = umDf.join(df, Seq("mid"), "left")

    //按用户iD聚合，并求和视频相似度
    val resultDf = user_movieSimilarScoreDf.groupBy("uid").agg(functions.sum("score"), count("uid"))

    //多样性计算公式
    val SumDiversity = resultDf.rdd.map(row => {
      var sumSimilar = row.getAs[Double]("sum(score)")
      //单个用户推荐的物品数
      val Ru = row.getAs[Long]("count(uid)")
      //单个用户一次推荐的多样性（我算了两次，所以0.5被约去了）
      var Diversity = 1 - (sumSimilar / (Ru * (Ru - 1)))
      (Diversity)
    }).reduce((x, y) => x + y)
    //用户数
    val userNum = resultDf.count()

    //计算总体多样性
    println("这次推荐的总体多样性为：" + SumDiversity)
    println("总体用户数：" + userNum)
    println("总体多样性平均：" + SumDiversity / userNum)
  }


  //计算余弦相似度
  def consinSim(matrix1: DoubleMatrix, matrix2: DoubleMatrix): Double = {
    matrix1.dot(matrix2) / (matrix1.norm2() * matrix2.norm2())
  }


  //返回电影的相似度
  def calMovieSimialer(mid1: Int, mid2: Int, movieSimilarScore: RDD[(Int, Int, Double)]): Double = {
    val score = movieSimilarScore.filter(row => {
      row._1 == mid1 && row._2 == mid2
    }).first()._3
    score
  }

  //两两求出笛卡尔,顺便去重
  def calIntSeqCartesian(seq: Seq[Int]): ArrayBuffer[(Int, Int)] = {
    val result = ArrayBuffer[(Int, Int)]()
    seq.foreach(r1 => {
      seq.foreach(r2 => {
        if (!r1.equals(r2)) {
          result += ((r1, r2))
        }
      })
    })
    println(result)
    return result
  }


  /**
   *
   * @param trainRdd   评分的训练集
   * @param userMovies 用户：电影评分空矩阵
   */
  //生成推荐结果
  def genUserMovieRec(trainRdd: RDD[Rating], userMovies: RDD[(Int, Int)], spark: SparkSession) = {

    import spark.implicits._

    //训练集计算模型
    val model = ALS.train(trainRdd, 30, 35, 0.1)

    //模型计算结果
    val pred: RDD[Rating] = model.predict(userMovies)

    //模型生成的物品特征
    val movieFeatures: RDD[(Int, DoubleMatrix)] = model.productFeatures.map {
      case (mid, features) => {
        (mid, new DoubleMatrix(features))
      }
    }

    //生成推荐结果
    val userRec: RDD[UserRecommendation] = pred
      .filter(_.rating > 0)
      .map(row => {
        (row.user, (row.product, row.rating))
      })
      .groupByKey()
      .map {
        case (uid, rec) => {
          UserRecommendation(uid, rec.toList.sortWith(_._2 > _._2).take(60).map(x => {
            Recommendation(x._1, x._2)
          }))
        }
      }
    //电影相似度计算结果
    val movieSimilarScoreRdd = movieFeatures.cartesian(movieFeatures)
      .filter {
        case (a, b) => a._1 != b._1
      }
      .map {
        case (a, b) => {
          val simScore = this.consinSim(a._2, b._2)
          MovieSimilarScore(a._1, b._1, simScore)
        }
      }


    //计算结果存到数据库
    SaveToDB(movieSimilarScoreRdd.toDF(), "test_movie_similar_score");
    SaveToDB(userRec.toDF(), "test_user_rec")


    print("推荐结果计算完成")

  }


}
