package com.xxh.user.rec

import breeze.numerics.constants.c
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.Rating
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession, functions}
import org.apache.spark.sql.functions.{array, array_union, col, concat, concat_ws, count, explode}
import org.apache.spark.sql.types.{IntegerType, StringType}
import org.codehaus.jackson.map.`type`.ArrayType
import org.jblas.DoubleMatrix

import scala.collection.mutable.ArrayBuffer
import scala.reflect.io.File.separator

case class userMovieSimilarScore(uid: Int, score: Double);

object forTest {

  val MONGODB_URI = "mongodb://localhost:27017/movierec.movie"

  def main(args: Array[String]): Unit = {
    //创建sparkConfig
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("OfflineRecommend")
      .set("spark.mongodb.input.uri", MONGODB_URI);
    //创建SparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._

    //测试召回率算法是否准确

    //人为生成测试集
    val testRdd = spark.sparkContext.parallelize(Seq(
      Rating(1, 1, 3),
      Rating(1, 2, 4),
      Rating(1, 3, 4),
      Rating(1, 4, 4),
      Rating(2, 1, 4),
      Rating(2, 2, 4),
    ))
    //人为生成推荐列表
    val recRdd = spark.sparkContext.parallelize(Seq(
      UserRecommendation(1, Seq(Recommendation(2, 5), Recommendation(3, 5), Recommendation(9, 5))),
      UserRecommendation(2, Seq(Recommendation(2, 5), Recommendation(3, 5), Recommendation(9, 5)))
    ));

    //计算召回
    calRecall(testRdd, recRdd, spark);

    //计算覆盖率
    //calCoverage(recRdd, spark)

    val movieSimilarScoreRDD = spark.sparkContext.parallelize(Seq(
      (1, 2, 3.1),
      (1, 3, 3.1),
      (1, 4, 3.1),
      (1, 5, 3.1),
      (2, 5, 4.1),
      (3, 5, 2.1),
      (9, 5, 3.1),
    ))
    //计算多样性
    //    calDiversity(recRdd, spark)

  }


  //计算召回率

  /**
   * @param testRdd      测试集RDD
   * @param recResultRdd 推荐结果RDD
   */
  def calRecall(testRdd: RDD[Rating], recResultRdd: RDD[UserRecommendation], spark: SparkSession): Unit = {

    import spark.implicits._

    //命中数，为用户推荐的视频刚好在测试集中有
    var hit: Long = 0
    //用户评分
    var all: Long = testRdd.count()

    //测试集的rating变成（uid,mid）的格式
    val realRdd = testRdd.map(row => {
      (row.user, row.product)
    })

    //推荐结果变成（uid,mid）的形式
    val recallRdd = recResultRdd
      .map(row => {
        (row.uid, row.rec.toList.map(item => {
          item.mid
        }))
      })
      .toDF()
      .select($"*", explode($"_2")) //爆炸推荐的电影ID
      .map(row => {
        (row.getAs[Int]("_1"), row.getAs[Int]("col"))
      }).rdd

    //取交集
    hit = realRdd.intersection(recallRdd).count()
    print("命中：" + hit)
    print("总共：" + all)
    //打完收工
    println("召回率为:" + (hit / all * 1.0000))


    realRdd.toDF().show(50)

    recallRdd.toDF().show(50)
  }


  //计算覆盖率
  def calCoverage(recResultRdd: RDD[UserRecommendation], spark: SparkSession): Unit = {

    //计算电影总数
    val movieframe = readDataFrameFromMongo("movie", spark);
    var allNum = movieframe.count();

    //计算推荐列表中电影有多少部
    val recNum = recResultRdd.flatMap(r => {
      r.rec.map(i => {
        i.mid
      })
    }).distinct().count()

    //输出结果
    print("电影总数为:" + allNum);
    print("推荐的总数为：" + recNum);
  }


  //计算多样性
  def calDiversity(recResultRdd: RDD[UserRecommendation], spark: SparkSession): Unit = {

    import spark.implicits._

    //拿到电影相似度列表
    val movieSimilarScoreDF = readDataFrameFromMongo("test_movie_similar_score", spark)
    //列合并
    val df = movieSimilarScoreDF.rdd.map(row => {
      ((row.getAs[Int]("mid1"), row.getAs[Int]("mid")), row.getAs[Double]("score"))
    }).toDF().withColumnRenamed("_1", "mid").withColumnRenamed("_2", "score");


    //变成(uid,[（mid,mid）,(mid,mid),.....])的形式
    val userMoviesRdd = recResultRdd.map(row => {
      //为用户推荐的电影列表
      val seqMids = row.rec.map(i => i.mid)
      //电影两两求出笛卡尔积，返回元组
      val tuples = calIntSeqCartesian(seqMids)
      (row.uid, tuples)
    })
    //变成dataFrame,列爆炸
    val userMoviesDF = userMoviesRdd.toDF().withColumnRenamed("_1", "uid").withColumnRenamed("_2", "midCartesian")
    val umDf = userMoviesDF.select($"*", explode($"midCartesian")).withColumnRenamed("col", "mid")



    //两个dataframe 连接
    //   val user_movieSimilarScoreDf = umDf.join(df, Seq("mid"), "left")
    //   user_movieSimilarScoreDf.show(10)


    //人为创建user_movieSimilarScoreDf
    val user_movieSimilarScoreDf = spark.createDataFrame(Seq(
      userMovieSimilarScore(1, 0.8017698448385062),
      userMovieSimilarScore(2, 0.8017698448385062),
      userMovieSimilarScore(1, 0.7591403653186142),
      userMovieSimilarScore(2, 0.7591403653186142),
      userMovieSimilarScore(1, 0.5591403653186142),
      userMovieSimilarScore(1, 0.4591403653186142),
    ))

    //按用户iD聚合，并求和视频相似度
    val resultDf = user_movieSimilarScoreDf.groupBy("uid").agg(functions.sum("score"), count("uid"))
    resultDf.show(10)

    //多样性计算公式
    val SumDiversity = resultDf.rdd.map(row => {
      var sumSimilar = row.getAs[Double]("sum(score)")
      //单个用户推荐的物品数
      val Ru = row.getAs[Long]("count(uid)")
      //单个用户一次推荐的多样性（我算了两次，所以0.5被约去了）
      var Diversity = 1 - (sumSimilar / (Ru * (Ru - 1)))
      (Diversity)
    }).reduce((x, y) => x + y)
    //用户数
    val userNum = resultDf.count()

    //计算总体多样性
    println("这次推荐的总体多样性为：" + SumDiversity)
    println("总体用户数：" + userNum)
    println("总体多样性平均：" + SumDiversity / userNum)
  }

  //计算余弦相似度
  def consinSim(matrix1: DoubleMatrix, matrix2: DoubleMatrix): Double = {
    matrix1.dot(matrix2) / (matrix1.norm2() * matrix2.norm2())
  }


  //测试方法
  def test1(spark: SparkSession) {

    var sumDiversity = 0.0;

    //人为生成推荐列表
    val recRdd = spark.sparkContext.parallelize(Seq(
      UserRecommendation(1, Seq(Recommendation(2, 5), Recommendation(3, 5), Recommendation(9, 5))),
      UserRecommendation(2, Seq(Recommendation(2, 5), Recommendation(3, 5), Recommendation(9, 5)))
    ));

    import spark.implicits._
    val movieSimilarScoreRdd = spark.sparkContext.parallelize(Seq(
      (1, 2, 0.1),
      (1, 3, 0.4),
      (1, 4, 0.5),
      (2, 4, 0.5),
      (2, 5, 0.5),
      (7, 6, 0.5),
      (8, 6, 0.5),
    ));

    val movieSimilarScoreDf = movieSimilarScoreRdd.toDF();

    //    recRdd.foreach(row => {
    //      //为用户推荐的电影列表
    //      val seq = row.rec.map(i => i.mid)
    //      val tempDf=movieSimilarScoreDf.where($"_1".isin(seq.toList: _*))
    //      tempDf.createOrReplaceTempView("tmpDF")
    //      //算出为某一用户推荐的视频列表的两两之间的相似度之和
    //      val sumSimilar = spark.sql("select sum(_3)  as score from tmpDF").first().getAs[Double]("score")
    //      //带入单个用户多样性的计算公式
    //      //Diversity=1-(∑_(i,j∈R(u),i≠j)▒〖s(i,j)〗)/(1/2|R(u)|(|R(u)|-1))
    //      //为某一用户推荐的视频数量
    //      val Ru = row.rec.size
    //      val Diversity = 1 - (sumSimilar / 0.5 * Ru * (Ru - 1))
    //      sumDiversity = sumDiversity + Diversity
    //    })


    //变成(uid,[mid1,mid2,....])的形式
    val userMoviesRdd = recRdd.map(row => {
      //为用户推荐的电影列表
      val seqMids = row.rec.map(i => i.mid)
      (row.uid, seqMids)
    })
    //变成(uid,avgSimilar)的形式
    userMoviesRdd.map(r1 => {
      r1._2.map(r2 => {

      })
    })

    //聚合电影的相似度表


    //    val seq= Seq(1,2,3)
    //    //不能直接传入集合，不然会报错
    //    val tempDf =movieSimilarScoreDF.where($"_1".isin(seq.toList: _*))
    //    tempDf.show()
    //    tempDf.createOrReplaceTempView("tmpDF")
    //    //算出相似度之和
    //    val sumSimilar=spark.sql("select sum(_3)  as score from tmpDF").first().getAs[Double]("score")
    //    println(sumSimilar)


  }


  //两两求出笛卡尔,顺便去重
  def calIntSeqCartesian(seq: Seq[Int]): ArrayBuffer[(Int, Int)] = {
    val result = ArrayBuffer[(Int, Int)]()
    seq.foreach(r1 => {
      seq.foreach(r2 => {
        if (!r1.equals(r2)) {
          result += ((r1, r2))
        }
      })
    })
    println(result)
    return result
  }


  //返回电影的相似度
  def calMovieSimialer(mid1: Int, mid2: Int, movieSimilarScore: RDD[(Int, Int, Double)]): Double = {
    val score = movieSimilarScore.filter(row => {
      row._1 == mid1 && row._2 == mid2
    }).first()._3
    score
  }


  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }
}


