package com.fanli.bigdata.rec

import org.apache.log4j.{Level, Logger}
import org.apache.solr.client.solrj.impl.HttpSolrServer
import org.apache.solr.common.SolrInputDocument
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.ml.recommendation.ALS.Rating
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.{SparkContext, SparkConf}
import org.json4s._
import org.json4s.jackson.Serialization
import scopt.OptionParser

import scala.collection.JavaConversions

object SuperMainPageRec {

  case class Params(
           ratings: String = null,
           predicted: String = null,
           solrUrl: String = null,
           commitStep: Int = 10000,
           recTopNum: Int = 1000,
           userStep: Int = 100000,
           maxIter: Int = 10,
           regParam: Double = 0.1,
           rank: Int = 10,
           numBlocks: Int = 10,
           implicitPrefs: Boolean = false) extends AbstractParams[Params]

  def main (args: Array[String]): Unit = {

    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

    val defaultParams = Params()

    val parser = new OptionParser[Params]("SuperMainPageRec") {
      head("SuperMainPageRec: an super user action data.")
      opt[String]("ratings")
        .required()
        .text("path to a super user action dataset of ratings")
        .action((x, c) => c.copy(ratings = x))
      opt[String]("predicted")
        .required()
        .text("path to a predicted data set(pid)")
        .action((x, c) => c.copy(predicted = x))
      opt[String]("solr")
        .required()
        .text("path to solr index")
        .action((x, c) => c.copy(solrUrl = x))
      opt[Int]("commitStep")
        .text(s"commitStep, default: ${defaultParams.commitStep}}")
        .action((x, c) => c.copy(commitStep = x))
      opt[Int]("userStep")
        .text(s"userStep, default: ${defaultParams.userStep}}")
        .action((x, c) => c.copy(userStep = x))
      opt[Int]("recTopNum")
        .text(s"recTopNum, default: ${defaultParams.recTopNum}}")
        .action((x, c) => c.copy(recTopNum = x))
      opt[Int]("rank")
        .text(s"rank, default: ${defaultParams.rank}}")
        .action((x, c) => c.copy(rank = x))
      opt[Int]("maxIter")
        .text(s"max number of iterations, default: ${defaultParams.maxIter}")
        .action((x, c) => c.copy(maxIter = x))
      opt[Double]("regParam")
        .text(s"regularization parameter, default: ${defaultParams.regParam}")
        .action((x, c) => c.copy(regParam = x))
      opt[Int]("numBlocks")
        .text(s"number of blocks, default: ${defaultParams.numBlocks}")
        .action((x, c) => c.copy(numBlocks = x))
      opt[Unit]("implicitPrefs")
        .text("use implicit preference")
        .action((_, c) => c.copy(implicitPrefs = true))
      note(
        """
          |Example command line to run this app:
          |
          | bin/spark-submit --class org.apache.spark.examples.ml.MovieLensALS \
          |  examples/target/scala-*/spark-examples-*.jar \
          |  --rank 10 --maxIter 15 --regParam 0.1 \
          |  --ratings data/mllib/als/sample_movielens_ratings.txt
        """.stripMargin)
    }

    parser.parse(args, defaultParams).map { params =>
      run(params)
    } getOrElse {
      System.exit(1)
    }
  }

  def run(params: Params): Unit = {
    val conf = new SparkConf()
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    import sqlContext.implicits._

    // for training
    val srcRatings  = sc.textFile(params.ratings).map(parseRating).cache()
    val training = srcRatings.map(_._2).cache()   // training data set

    // stat count
    val numRatings = training.count()
    val numUsers = srcRatings.map(_._2.user).distinct().count()  // distinct hashcode uid
    val numItems = srcRatings.map(_._2.item).distinct().count()  // distinct hashcode item
    println(s"Got $numRatings ratings from $numUsers users on $numItems items.")

    // predict items hashcode---> src item(String)
    val srcItems = sc.textFile(params.predicted).map(data => (data.hashCode, data)).cache()

    // hashcode ---> string id  map
    val userIDMap = srcRatings.map(data => (data._2.user, data._1)).collect().toMap  // hashcode --> src uid(string)
    val itemIDMap = srcItems.collect().toMap      // hashcode --> src item(string)

    srcRatings.unpersist(blocking = false)

    // get training model
    val als = new ALS()
      .setRank(params.rank)
      .setMaxIter(params.maxIter)
      .setRegParam(params.regParam)
      .setNumBlocks(params.numBlocks)

    val model = als.fit(training.toDF())

    // group to predicted
    userIDMap.map(_._1).toList.grouped(params.userStep).foreach{ uids =>
      val uidRdd = sc.parallelize(uids)
      val predictedData = uidRdd.cartesian(srcItems.map(_._1))
                                .map(r => Rating[Int](r._1, r._2, 1.0f)).cache()
      val predictions = model.transform(predictedData.toDF()).cache()

      // Evaluate the model.
      predictions.select("user", "item", "prediction").rdd
        .mapPartitions { partition =>
        partition.map { case Row(user : Int, item : Int, prediction: Float) => (user, item, "%.6f".format(prediction).toFloat) }
      } .filter { r =>
        if (r._3.equals(Float.NaN)) false   // filter prediction == Float.NaN
        else true
      } .groupBy(_._1)
        .mapPartitions { partition =>
        partition.map { r =>
          (r._1,
           r._2.toSeq
            .map(data => (data._2, data._3)))}
      }.collect()
        .grouped(params.commitStep)
        .foreach(solrIndex(_, userIDMap, itemIDMap, params.solrUrl, params.recTopNum))

      predictions.unpersist(blocking = false)
    }

    srcItems.unpersist(blocking = false)

    sc.stop()
  }

  def parseRating(str: String) = {
    val fields = str.split(",")
    assert(fields.size == 3)
    (fields(0), Rating[Int](fields(0).hashCode, fields(1).hashCode, fields(2).toFloat))
  }

  def getSolrServer(url : String): HttpSolrServer = {
    val server = new HttpSolrServer(url)
    server.setSoTimeout(60000);
    server.setConnectionTimeout(60000)
    server.setDefaultMaxConnectionsPerHost(100)
    server.setMaxTotalConnections(100)
    server.setFollowRedirects(false)
    server.setAllowCompression(true)
    server.setMaxRetries(1)

    server
  }

  def solrIndex(elements : Array[(Int, Seq[(Int, Float)])],
                userIDMap : Map[Int, String],
                itemIDMap : Map[Int, String],
                solrUrl : String,
                topNum : Int): Unit = {
    implicit val formats = DefaultFormats
    val docs = elements.map { element =>
      val document = new SolrInputDocument();
      val rec_items = element._2.map(r => (itemIDMap(r._1), r._2)).sortWith(_._2 > _._2).slice(0, topNum)
      document.addField("user_id", userIDMap(element._1))
      document.addField("rec_items", Serialization.write(rec_items))
      document
    }

    val server = getSolrServer(solrUrl)
    server.add(JavaConversions.asJavaCollection(docs))
    server.commit()
    //server.optimize()

    println("######commit to index count: " + docs.size)
  }

}
