package com.yomob.ml.recommand

import java.io.FileInputStream
import java.util.Properties

import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import com.mongodb.spark.MongoSpark
import org.apache.spark.mllib.evaluation.{RankingMetrics, RegressionMetrics}
import org.apache.spark.rdd.RDD
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, SparkContext}
import org.bson.BsonDocument

import scala.util.Random
import org.jblas.DoubleMatrix
import redis.clients.jedis.{Jedis, JedisPool, JedisPoolConfig}

import scala.collection.mutable.ArrayBuffer

object RecommendModel {
  //余弦相似度计算，1 absolute similarity，0 no related，-1 complete opposite
  def cosineSimilarity(vec1: DoubleMatrix, vec2: DoubleMatrix): Double = {
    vec1.dot(vec2) / (vec1.norm2() * vec2.norm2())
  }

  class JedisClient(url: String) extends Serializable {
    val config: JedisPoolConfig = new JedisPoolConfig
    config.setMaxIdle(10)
    var urls = url.split(",")
    var ip = urls(0).split(":")
    var port = ip(1).toInt
    var pool = new JedisPool(config, ip(0), port)

    def getJedis(): Jedis = {
      pool.getResource()
    }
  }

  def getMAP(model: MatrixFactorizationModel, sc: SparkContext, ratings: RDD[Rating]): Double = {
    val itemFactors = model.productFeatures.map { case (id, factor)
    => factor
    }.collect()
    val itemMatrix = new DoubleMatrix(itemFactors)
    //分布式广播商品的特征矩阵,在map等转换操作里面，需要全局变量，而且是可序列化的，否则用map，set等集合是不行的
    val imBroadcast = sc.broadcast(itemMatrix)
    val allRecs = model.userFeatures.map { case (userId, array) =>
      val userVector = new DoubleMatrix(array)
      val scores = imBroadcast.value.mmul(userVector)
      val sortedWithId = scores.data.zipWithIndex.sortBy(-_._1)
      val recommendedIds = sortedWithId.map(_._2 + 1).toSeq //+1,矩阵从0开始
      (userId, recommendedIds)
    }
    val userMovies = ratings.map { case Rating(user, product, rating) =>
      (user, product)
    }.groupBy(_._1)
    val predictedAndTrueForRanking = allRecs.join(userMovies).map { case
      (userId, (predicted, actualWithIds)) =>
      val actual = actualWithIds.map(_._2)
      (predicted.toArray, actual.toArray)
    }
    //求MAP
    val rankingMetrics = new RankingMetrics(predictedAndTrueForRanking)
    rankingMetrics.meanAveragePrecision
  }

  def areaUnderCurve(
                      positiveData: RDD[Rating],
                      bAllItemIDs: Broadcast[Array[Int]],
                      predictFunction: (RDD[(Int, Int)] => RDD[Rating])) = {
    val positiveUserProducts = positiveData.map(r => (r.user, r.product))
    val positivePredictions = predictFunction(positiveUserProducts).groupBy(_.user)
    val negativeUserProducts = positiveUserProducts.groupByKey().mapPartitions {
      // mapPartitions operates on many (user,positive-items) pairs at once
      userIDAndPosItemIDs => {
        val random = new Random()
        val allItemIDs = bAllItemIDs.value
        userIDAndPosItemIDs.map { case (userID, posItemIDs) =>
          val posItemIDSet = posItemIDs.toSet
          val negative = new ArrayBuffer[Int]()
          var i = 0
          while (i < allItemIDs.size && negative.size < posItemIDSet.size) {
            val itemID = allItemIDs(random.nextInt(allItemIDs.size))
            if (!posItemIDSet.contains(itemID)) {
              negative += itemID
            }
            i += 1
          }
          negative.map(itemID => (userID, itemID))
        }
      }
    }.flatMap(t => t)
    val negativePredictions = predictFunction(negativeUserProducts).groupBy(_.user)
    positivePredictions.join(negativePredictions).values.map {
      case (positiveRatings, negativeRatings) =>
        var correct = 0L
        var total = 0L
        // For each pairing,
        for (positive <- positiveRatings;
             negative <- negativeRatings) {
          // Count the correctly-ranked pairs
          if (positive.rating > negative.rating) {
            correct += 1
          }
          total += 1
        }
        correct.toDouble / total
    }.mean() // Return mean AUC over users
  }

  def predictMostListened(sc: SparkContext, train: RDD[Rating])(allData: RDD[(Int, Int)]) = {
    val bListenCount =
      sc.broadcast(train.map(r => (r.product, r.rating)).reduceByKey(_ + _).collectAsMap())
    allData.map { case (user, product) =>
      Rating(user, product, bListenCount.value.getOrElse(product, 0.0))
    }
  }

  def getAUC(allData: RDD[Rating], sc: SparkContext): Double = {
    val Array(trainData, cvData) = allData.randomSplit(Array(0.9, 0.1))
    trainData.cache()
    cvData.cache()
    val allItemIDs = allData.map(_.product).distinct().collect()
    val bAllItemIDs = sc.broadcast(allItemIDs)
    val mostListenedAUC = areaUnderCurve(cvData, bAllItemIDs, predictMostListened(sc, trainData))
    mostListenedAUC
  }

  def getUserSimilarity(): Unit = {
    //val userID = jedis.smembers("user")
    //        for (k <- userID) {
    //          val userId = jedis.get(k).toInt
    //          val userFactor = model.userFeatures.lookup(userId).head
    //          val userVector = new DoubleMatrix(userFactor)
    //          val sims = model.userFeatures.map { case (id, factor) =>
    //            val factorVector = new DoubleMatrix(factor)
    //            val sim = cosineSimilarity(factorVector, userVector)
    //            (id,sim)
    //          }
    //          val sortedSims = sims.top(10)(Ordering.by[(Int,Double),Double]{case (id,similarity) => similarity})
    //          println(sortedSims.slice(1,10).map{case (id,sim) => userSimilarity(k,userInfo.get(id).get,sim)}.mkString("\n"))
    //          //userSimilarities += sortedSims.slice(1,5).map{case (id,sim) => (userId,id,sim)}.mkString("\n")
    //        }
  }

  case class userSimilarity(me: String, other: String, similarity: Double)

  def getSparkContext(runModel: String, mongoUri: String, db: String,sparkUrl : String): SparkContext = {
    if (runModel == null || !runModel.equals("cluster")) {
      val conf = new SparkConf().setMaster("local").setAppName("mongo")
        .set("spark.mongodb.input.uri", "mongodb://127.0.0.1:27017/testdb.daily_user_info")
        .set("spark.mongodb.output.uri", "mongodb://127.0.0.1:27017/testdb.hello_user")
      new SparkContext(conf)
    } else {
      val conf = new SparkConf().set("spark.mongodb.input.uri", mongoUri + "/" + db + ".daily_user_info")
      conf.setMaster(sparkUrl)
      conf.setAppName("als")
      new SparkContext(conf)
    }
  }

  def main(args: Array[String]): Unit = {
    if (args.length < 1) {
      println("please special the config file 'conf.properties',for example: ./conf.properties")
      System.exit(1)
    }
    val properties = new Properties()
    properties.load(new FileInputStream(args(0)))

    val mongoURI = properties.getProperty("mongodb.uri")
    val mongoDB = properties.getProperty("mongodb.database")
    val redisUrl = properties.getProperty("redis.url.list")
    val redisPwd = properties.getProperty("redis.password")
    val modelSavePath = properties.getProperty("model.save.path")
    val sparkUrl = properties.getProperty("spark.master.url")
    val runModel = properties.getProperty("run.model")
    //init jedis client and sc
    val jedisClient = new JedisClient(redisUrl)
    val jedis = jedisClient.getJedis()
    jedis.auth(redisPwd)
    val sc = getSparkContext(runModel, mongoURI, mongoDB,sparkUrl)
    sc.setLogLevel("warn")
    val originRDD = MongoSpark.load(sc)
    //query dataSet
    val matchQuery = new BsonDocument("$match", BsonDocument.parse("{\"adID\":{\"$ne\":null}}"))
    val matchQuery2 = new BsonDocument("$match", BsonDocument.parse("{\"deviceID\":{\"$ne\":null}}"))
    val matchQuery3 = new BsonDocument("$match", BsonDocument.parse("{\"clicked_adIDs\":{\"$ne\":null}}"))

    val clickAD = originRDD.withPipeline(Seq(matchQuery, matchQuery2, matchQuery3))
    val sumDevice = clickAD.count()
    val adDF = clickAD.toDF()
    val user2ad = adDF.select("deviceID", "clicked_adIDs")

    val userInfo = scala.collection.mutable.HashMap.empty[Int, String]
    val adInfo = scala.collection.mutable.HashMap.empty[Int, String]
    val arr = ArrayBuffer[Rating]()
    //迭代每个用户、以及其所点击的广告，并数值化处理,存放redis
    user2ad.rdd.collect().foreach(r => {
      jedis.auth(redisPwd)
      //user info
      if (!jedis.exists(r.getAs(0))) {
        jedis.incr("userNum")
        jedis.set(r.getAs(0), jedis.get("userNum"))
        jedis.sadd("user", r.getAs(0))
      }
      userInfo.put(jedis.get(r.getAs(0)).toInt, r.getAs(0))
      //click ad info
      val ads = r.getAs[Seq[String]](1).distinct
      ads.foreach(id => {
        if (!jedis.exists(id)) {
          jedis.incr("adNum")
          jedis.set(id, jedis.get("adNum"))
          jedis.sadd("ad", id)
        }
        adInfo.put(jedis.get(id).toInt, id)
        val rating = Rating(jedis.get(r.getAs(0)).toInt, jedis.get(id).toInt, 5D)
        arr += rating
      })
    }
    )
    //训练数据集
    val ratings = sc.parallelize(arr.toArray.toList)
    //10~200，在低阶近似矩阵中隐含特征个数。rank越大越好，不过越大需要spark内存越高。
    val rank = 10
    //每次迭代als都尽可能收敛得到更好的模型
    val numIterations = 10
    //正则化过程，控制模型过拟合情况
    val lamda = 0.01
    //create model
    val model = ALS.train(ratings, rank, numIterations, lamda)
    //predict result
    val usersProducts = ratings.map {
      case Rating(user, product, rate) =>
        (user, product)
    }
    val predictions = model.predict(usersProducts).map {
      case Rating(user, product, rate) =>
        ((user, product), rate)
    }
    val sourceRatings = ratings.map{
      case Rating(user,product,rate) =>
        (userInfo.get(user).get,user,adInfo.get(product).get,product,rate)
    }
    val ratesAndPreds = ratings.map {
      case Rating(user, product, rate) =>
        ((user, product), rate)
    } join (predictions)

    val MSE = ratesAndPreds.map {
      case ((user, product), (r1, r2)) =>
        val err = (r1 - r2)
        err * err
    }.mean()

    println("Data Set count : " + originRDD.count())
    println("But,click device user count : " + sumDevice)
    println("Used for trained data set size : " + ratings.collect().length)
    println("user count : " + model.userFeatures.count())
    println("ad count : " + model.productFeatures.count())
    val predictedAndTrue = ratesAndPreds.map { case ((user, product), (actual, predicted)) => (actual, predicted) }
    val regressionMetrics = new RegressionMetrics(predictedAndTrue)
    println("Mean Squared Error = " + regressionMetrics.meanSquaredError)
    println("Root Mean Squared Error = " + regressionMetrics.rootMeanSquaredError)
    //计算全局平均准确率MAP
    //println("Mean Average Precision = " + getMAP(model,sc,ratings))
    //println("AUC = " + getAUC(ratings, sc))

    //迭代广告，计算相似度，存储top 20
    val adID = jedis.smembers("ad")
    val adSimilarities = scala.collection.mutable.ArrayBuffer.empty[String]
    import scala.collection.JavaConversions._
    for (k <- adID) {//k real ad name
      val numId = jedis.get(k).toInt
      val itemFactor = model.productFeatures.lookup(numId).head
      val itemVector = new DoubleMatrix(itemFactor)
      val sims = model.productFeatures.map { case (id, factor) =>
        val factorVector = new DoubleMatrix(factor)
        // 将每个ad 的特征向量转换
        val sim = cosineSimilarity(factorVector, itemVector) //计算每个ad与本ad的相似度
        (id, sim)
      }
      //top 20
      val sortedSims = sims.top(10)(Ordering.by[(Int, Double), Double] { case (id, similarity) => similarity }) //top N 其中top 1肯定是自己
      val s = sortedSims.slice(1,10).map{ case (id, sim) => (k, adInfo.get(id).get, sim) }.mkString("\n")
      adSimilarities += s
    }
    // get user recommend
    val users = ratings.map(_.user).distinct()
    println("Start to recommend ad for all Users.")
    val start = System.currentTimeMillis()
    val recommendPro = users.collect.flatMap{user =>
      model.recommendProducts(user,8)
    }
    val outPutPath = modelSavePath
    sc.parallelize(recommendPro.toList).saveAsTextFile(outPutPath+"/recommends1")
    val endTime = System.currentTimeMillis()
    println("Finished recommend ad for all Users,cost "+(endTime-start)/1000+"s.")

    //save model and data set
    val adRDD = sc.parallelize(adInfo.toArray.toList)
    val userRDD = sc.parallelize(userInfo.toArray.toList)
//    model.save(sc,modelSavePath)
//    ratings.saveAsTextFile(outPutPath+"/ratings")
//    adRDD.saveAsTextFile(outPutPath+"/ad")
//    userRDD.saveAsTextFile(outPutPath+"/user")
//    sc.parallelize(adSimilarities.toArray.toList).saveAsTextFile(outPutPath+"/adSims")
//    sourceRatings.saveAsTextFile(outPutPath+"/sourceRatings")
    sc.stop()
    jedis.close()
  }
}

