package com.spark.mongodb

import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import com.mongodb.spark.MongoSpark
import org.apache.spark.mllib.evaluation.{RankingMetrics, RegressionMetrics}
import org.apache.spark.rdd.RDD
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, SparkContext}
import org.bson.BsonDocument

import scala.util.Random
import org.jblas.DoubleMatrix
import redis.clients.jedis.{Jedis, JedisPool, JedisPoolConfig}

import scala.collection.mutable.ArrayBuffer

object DataAnalysis {
  //余弦相似度计算，1表示完全一致，0表示两者无相关性，-1表示两者完全相反
  def cosineSimilarity(vec1: DoubleMatrix, vec2: DoubleMatrix): Double = {
    vec1.dot(vec2) / (vec1.norm2() * vec2.norm2())
  }

  class JedisClient() extends Serializable {
    val config: JedisPoolConfig = new JedisPoolConfig
    config.setMaxIdle(10)
    var pool = new JedisPool(config, "localhost", 6379)

    def getJedis(): Jedis = {
      pool.getResource()
    }
  }
  def getMAP(model : MatrixFactorizationModel,sc : SparkContext,ratings : RDD[Rating]): Double ={
    val itemFactors = model.productFeatures.map { case (id, factor)
    => factor }.collect()
    val itemMatrix = new DoubleMatrix(itemFactors)
    //分布式广播商品的特征矩阵,在map等转换操作里面，需要全局变量，而且是可序列化的，否则用map，set等集合是不行的
    val imBroadcast = sc.broadcast(itemMatrix)
    val allRecs = model.userFeatures.map{ case (userId, array) =>
      val userVector = new DoubleMatrix(array)
      val scores = imBroadcast.value.mmul(userVector)
      val sortedWithId = scores.data.zipWithIndex.sortBy(-_._1)
      val recommendedIds = sortedWithId.map(_._2 + 1).toSeq   //+1,矩阵从0开始
      (userId, recommendedIds)
    }
    val userMovies = ratings.map{ case Rating(user, product, rating) =>
      (user, product)}.groupBy(_._1)
    val predictedAndTrueForRanking = allRecs.join(userMovies).map{ case
      (userId, (predicted, actualWithIds)) =>
      val actual = actualWithIds.map(_._2)
      (predicted.toArray, actual.toArray)
    }
    //求MAP,越大越好
    val rankingMetrics = new RankingMetrics(predictedAndTrueForRanking)
    rankingMetrics.meanAveragePrecision
  }
  def areaUnderCurve(
                      positiveData: RDD[Rating],
                      bAllItemIDs: Broadcast[Array[Int]],
                      predictFunction: (RDD[(Int,Int)] => RDD[Rating])) = {
    // What this actually computes is AUC, per user. The result is actually something
    // that might be called "mean AUC".

    // Take held-out data as the "positive", and map to tuples
    val positiveUserProducts = positiveData.map(r => (r.user, r.product))
    // Make predictions for each of them, including a numeric score, and gather by user
    val positivePredictions = predictFunction(positiveUserProducts).groupBy(_.user)

    // BinaryClassificationMetrics.areaUnderROC is not used here since there are really lots of
    // small AUC problems, and it would be inefficient, when a direct computation is available.

    // Create a set of "negative" products for each user. These are randomly chosen
    // from among all of the other items, excluding those that are "positive" for the user.
    val negativeUserProducts = positiveUserProducts.groupByKey().mapPartitions {
      // mapPartitions operates on many (user,positive-items) pairs at once
      userIDAndPosItemIDs => {
        // Init an RNG and the item IDs set once for partition
        val random = new Random()
        val allItemIDs = bAllItemIDs.value
        userIDAndPosItemIDs.map { case (userID, posItemIDs) =>
          val posItemIDSet = posItemIDs.toSet
          val negative = new ArrayBuffer[Int]()
          var i = 0
          // Keep about as many negative examples per user as positive.
          // Duplicates are OK
          while (i < allItemIDs.size && negative.size < posItemIDSet.size) {
            val itemID = allItemIDs(random.nextInt(allItemIDs.size))
            if (!posItemIDSet.contains(itemID)) {
              negative += itemID
            }
            i += 1
          }
          // Result is a collection of (user,negative-item) tuples
          negative.map(itemID => (userID, itemID))
        }
      }
    }.flatMap(t => t)
    // flatMap breaks the collections above down into one big set of tuples

    // Make predictions on the rest:
    val negativePredictions = predictFunction(negativeUserProducts).groupBy(_.user)

    // Join positive and negative by user
    positivePredictions.join(negativePredictions).values.map {
      case (positiveRatings, negativeRatings) =>
        // AUC may be viewed as the probability that a random positive item scores
        // higher than a random negative one. Here the proportion of all positive-negative
        // pairs that are correctly ranked is computed. The result is equal to the AUC metric.
        var correct = 0L
        var total = 0L
        // For each pairing,
        for (positive <- positiveRatings;
             negative <- negativeRatings) {
          // Count the correctly-ranked pairs
          if (positive.rating > negative.rating) {
            correct += 1
          }
          total += 1
        }
        // Return AUC: fraction of pairs ranked correctly
        correct.toDouble / total
    }.mean() // Return mean AUC over users
  }
  def predictMostListened(sc: SparkContext, train: RDD[Rating])(allData: RDD[(Int,Int)]) = {
    val bListenCount =
      sc.broadcast(train.map(r => (r.product, r.rating)).reduceByKey(_ + _).collectAsMap())
    allData.map { case (user, product) =>
      Rating(user, product, bListenCount.value.getOrElse(product, 0.0))
    }
  }
  def getAUC(allData : RDD[Rating],sc : SparkContext): Double ={
    val Array(trainData,cvData) = allData.randomSplit(Array(0.9,0.1))
    trainData.cache()
    cvData.cache()
    val allItemIDs = allData.map(_.product).distinct().collect()
    val bAllItemIDs = sc.broadcast(allItemIDs)
    val mostListenedAUC =areaUnderCurve(cvData, bAllItemIDs, predictMostListened(sc, trainData))
    mostListenedAUC
  }
  case class userSimilarity(me : String,other : String, similarity: Double )
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("mongo")
      .set("spark.mongodb.input.uri", "mongodb://127.0.0.1:27017/testdb.daily_user_info")
      .set("spark.mongodb.output.uri", "mongodb://127.0.0.1:27017/testdb.hello_user")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")
    val originRDD = MongoSpark.load(sc)
    //query
    val matchQuery = new BsonDocument("$match", BsonDocument.parse("{\"adID\":{\"$ne\":null}}"))
    val matchQuery2 = new BsonDocument("$match", BsonDocument.parse("{\"platform\":{\"$ne\":null}}"))
    val matchQuery3 = new BsonDocument("$match", BsonDocument.parse("{\"deviceBrand\":{\"$ne\":null}}"))

    val clickAD = originRDD.withPipeline(Seq(matchQuery, matchQuery2, matchQuery3))
    val sumDevice = clickAD.count()
    //clickAD.foreach(println)
    val adDF = clickAD.toDF()

    //val count = adDF.groupBy("adID").count().sort("count").show(100)
    //adDF.groupBy("adID", "platform").count().show()
    //val db = adDF.groupBy("adID", "deviceBrand").count().sort("adID")
    var ads = adDF.select("adID").distinct().collectAsList()
    val user2ad = adDF.select("deviceID", "clicked_adIDs")
    val jedisClient = new JedisClient()
    val jedis = jedisClient.getJedis()
    jedis.auth("SjhkHD3J5k6H8SjSbK3SC")
    val userInfo = scala.collection.mutable.HashMap.empty[Int, String]
    val adInfo = scala.collection.mutable.HashMap.empty[Int, String]
    val arr = ArrayBuffer[Rating]()
    user2ad.rdd.collect().foreach(r => {
      jedis.auth("SjhkHD3J5k6H8SjSbK3SC")
      //user info
      if (!jedis.exists(r.getAs(0))) {
        jedis.incr("userNum")
        jedis.set(r.getAs(0), jedis.get("userNum"))
        jedis.sadd("user", r.getAs(0))
      }
      userInfo.put(jedis.get(r.getAs(0)).toInt, r.getAs(0))
      //click ad info
      val ads = r.getAs[Seq[String]](1).distinct
      ads.foreach(id => {
        if (!jedis.exists(id)) {
          jedis.incr("adNum")
          jedis.set(id, jedis.get("adNum"))
          jedis.sadd("ad", id)
        }
        adInfo.put(jedis.get(id).toInt, id)
        val rating = Rating(jedis.get(r.getAs(0)).toInt, jedis.get(id).toInt, 5D)
        arr += rating
      })
    }
    )
    val ratings = sc.parallelize(arr.toArray.toList)

    //println(ratings.foreach(println))
    // arr.toArray[Rating].foreach(println)
    //create model
    val rank = 102
    //10~200，在低阶近似矩阵中隐含特征个数。rank越大越好，不过越大需要spark内存越高。
    val numIterations = 12
    //每次迭代als都尽可能收敛得到更好的模型
    val lamda = 0.01
    //正则化过程，控制模型过拟合情况
    val model = ALS.train(ratings, rank, numIterations, lamda)

    //predict result
    val usersProducts = ratings.map {
      case Rating(user, product, rate) =>
        (user, product)
    }
    val predictions = model.predict(usersProducts).map {
      case Rating(user, product, rate) =>
        ((user, product), rate)
    }
    val ratesAndPreds = ratings.map {
      case Rating(user, product, rate) =>
        ((user, product), rate)
    } join (predictions)

    val MSE = ratesAndPreds.map {
      case ((user, product), (r1, r2)) =>
        val err = (r1 - r2)
        err * err
    }.mean()
    println("Data Set count : "+originRDD.count())
    println("But,click device user count : " + sumDevice)
    println("Used for trained data set size : " + ratings.collect().length)
    println("user count : "+model.userFeatures.count())
    println("ad count : "+model.productFeatures.count())

    //println("Mean Squared Error = " + MSE)
    //Root Mean Squared Error
    val predictedAndTrue = ratesAndPreds.map { case ((user, product), (actual, predicted)) => (actual, predicted) }
    val regressionMetrics = new RegressionMetrics(predictedAndTrue)
    println("Mean Squared Error = " + regressionMetrics.meanSquaredError)
    println("Root Mean Squared Error = " + regressionMetrics.rootMeanSquaredError)

    //计算全局平均准确率MAP
    println("Mean Average Precision = " + getMAP(model,sc,ratings))
    println("AUC = "+ getAUC(ratings,sc))

    val adID = jedis.smembers("ad")
    val userID = jedis.smembers("user")

    import scala.collection.JavaConversions._
    //ad similarity
    for (k <- adID) {
      val numId = jedis.get(k).toInt
      val itemFactor = model.productFeatures.lookup(numId).head
      val itemVector = new DoubleMatrix(itemFactor)

      val sims = model.productFeatures.map { case (id, factor) =>
        val factorVector = new DoubleMatrix(factor)
        // 将每个ad 的特征向量转换
        val sim = cosineSimilarity(factorVector, itemVector) //计算每个ad与本ad的相似度
        (id, sim)
      }
      val sortedSims = sims.top(5)(Ordering.by[(Int, Double), Double] { case (id, similarity) => similarity }) //top N 其中top 1肯定是自己
      //sortedSims.slice(1, 2).map { case (id, sim) => (id, sim) }.mkString("\n")//截取1~2索引的值，合并成string
      //println(numId)
      //转换成原来的id
      //println(sortedSims.slice(1, 5).map { case (id, sim) => (id, adInfo.get(id).get, sim) }.mkString("\n"))
    }
    //user similarity
    // var userSimilarities = scala.collection.mutable.ArrayBuffer[String]
    //    for (k <- userID) {
    //      val userId = jedis.get(k).toInt
    //      val userFactor = model.userFeatures.lookup(userId).head
    //      val userVector = new DoubleMatrix(userFactor)
    //      val sims = model.userFeatures.map { case (id, factor) =>
    //        val factorVector = new DoubleMatrix(factor)
    //        val sim = cosineSimilarity(factorVector, userVector)
    //        (id,sim)
    //      }
    //      val sortedSims = sims.top(10)(Ordering.by[(Int,Double),Double]{case (id,similarity) => similarity})
    //      println(sortedSims.slice(1,10).map{case (id,sim) => userSimilarity(k,userInfo.get(id).get,sim)}.mkString("\n"))
    //      //userSimilarities += sortedSims.slice(1,5).map{case (id,sim) => (userId,id,sim)}.mkString("\n")
    //    }

    model.recommendProducts(5848,3).foreach(x => {
      println("user="+x.user+",ad="+x.product+",rating"+x.rating)
    })
    //save model and data set
    //model.save(sc,"/Users/qifei/work/recommendModel")
    //save user and ad feature
    //val outPutPath = "/Users/qifei/work"
    //model.userFeatures.map{case (id,vec) => id +"\t"+vec.mkString(",")}.saveAsTextFile(outPutPath+"/userFeatures")
    //model.productFeatures.map{case (id,vec) => id + "\t"+ vec.mkString(",")}.saveAsTextFile(outPutPath+"/productFeatures")
  }
}
