package com.fanli.bigdata.rec

import org.apache.log4j.{Level, Logger}
import org.apache.spark.mllib.recommendation.{MatrixFactorizationModel, ALS}
import org.apache.spark.{SparkContext, SparkConf}
import scopt.OptionParser

object SuperMainPageTrainingModel {

  case class Params(
           ratings: String = null,
           modelPath: String = "/tmp",
           checkpointDir : String = "/tmp",
           numIterations: Int = 20,
           lambda: Double = 0.1,
           alpha: Double = 1.0,
           rank: Int = 10,
           numBlocks: Int = 10,
           checkpointInterval : Int = 10,
           savePartitions : Int = 10,
           implicitPrefs: Boolean = false) extends AbstractParams[Params]

  def main (args: Array[String]): Unit = {

    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

    val defaultParams = Params()

    val parser = new OptionParser[Params]("SuperMainPageTrainingModel") {
      head("SuperMainPageRec: an super user action data.")
      opt[String]("ratings")
        .required()
        .text("path to a super user action dataset of ratings")
        .action((x, c) => c.copy(ratings = x))
      opt[String]("modelPath")
        .text(s"modelPath, default: ${defaultParams.modelPath} ")
        .action((x, c) => c.copy(modelPath = x))
      opt[String]("checkpointDir")
        .required()
        .text("path to a checkpoint directory")
        .action((x, c) => c.copy(checkpointDir = x))
      opt[Int]("rank")
        .text(s"rank, default: ${defaultParams.rank}}")
        .action((x, c) => c.copy(rank = x))
      opt[Int]("numIterations")
        .text(s"numIterations, default: ${defaultParams.numIterations}")
        .action((x, c) => c.copy(numIterations = x))
      opt[Double]("lambda")
        .text(s"lambda, default: ${defaultParams.lambda}")
        .action((x, c) => c.copy(lambda = x))
      opt[Double]("alpha")
        .text(s"alpha, default: ${defaultParams.alpha}")
        .action((x, c) => c.copy(alpha = x))
      opt[Int]("numBlocks")
        .text(s"number of blocks, default: ${defaultParams.numBlocks}")
        .action((x, c) => c.copy(numBlocks = x))
      opt[Int]("savePartitions")
        .text(s"save partitions, default: ${defaultParams.savePartitions}")
        .action((x, c) => c.copy(savePartitions = x))
      opt[Int]("checkpointInterval")
        .text(s"checkpointInterval, default: ${defaultParams.checkpointInterval}")
        .action((x, c) => c.copy(checkpointInterval = x))
      opt[Unit]("implicitPrefs")
        .text("use implicit preference")
        .action((_, c) => c.copy(implicitPrefs = true))
      note(
        """
          |Example command line to run this app:
          |
          | bin/spark-submit --class org.apache.spark.examples.mllib.MovieLensALS \
          |  examples/target/scala-*/spark-examples-*.jar \
          |  --rank 10 --numIterations 15 --regParam 0.01 --alpha 1.0 \
          |  --ratings data/mllib/als/sample_movielens_ratings.txt
        """.stripMargin)
    }

    parser.parse(args, defaultParams).map { params =>
      run(params)
    } getOrElse {
      System.exit(1)
    }
  }

  def run(params: Params): Unit = {

    println(s"ratings   path: ${params.ratings}")
    println(s"model     path: ${params.modelPath}")
    println(s"checkpoint dir: ${params.checkpointDir}")
    println(s"iterations    : ${params.numIterations}")
    println(s"lambda        : ${params.lambda}")
    println(s"alpha         : ${params.alpha}")
    println(s"rank          : ${params.rank}")
    println(s"numBlocks     : ${params.numBlocks}")
    println(s"savePartitions: ${params.savePartitions}")
    println(s"implicitPrefs : ${params.implicitPrefs}")

    val conf = new SparkConf()
    val sc = new SparkContext(conf)
    sc.setCheckpointDir(params.checkpointDir)

    // for training
    val srcRatings  = sc.textFile(params.ratings)
                        .map(ParseUserAction.parseLine)
                        .filter{r =>
                            if (r._1._1.isEmpty) false
                            else true
                         }
                         .groupBy(_._1)
                         .map { r =>
                            (r._1, r._2.map(_._2).sum)
                         }
                         .map(ParseUserAction.parseRating)
                         .cache()
    val training = srcRatings.map(_._2).cache()   // training data set

    // stat count
    val numRatings = training.count()
    val numUsers   = srcRatings.map(_._2.user).distinct().count()  // distinct hashcode uid
    val numItems   = srcRatings.map(_._2.product).distinct().count()  // distinct hashcode item
    println(s"Got $numRatings ratings from $numUsers users on $numItems items.")

    srcRatings.unpersist()

    val model = new ALS()
      .setRank(params.rank)
      .setIterations(params.numIterations)
      .setLambda(params.lambda)
      .setAlpha(params.alpha)
      .setImplicitPrefs(params.implicitPrefs)
      .setUserBlocks(params.numBlocks)
      .setProductBlocks(params.numBlocks)
      .setCheckpointInterval(params.checkpointInterval)
      .run(training)

    // save model with specified partitions
    val userFeatures = model.userFeatures.coalesce(params.savePartitions)
    val productFeatures = model.productFeatures.coalesce(params.savePartitions)
    val saveModel = new MatrixFactorizationModel(model.rank, userFeatures, productFeatures)

    println(s"save model user features partitions: ${saveModel.userFeatures.partitions.length}")
    println(s"save model product features partitions: ${saveModel.productFeatures.partitions.length}")

    saveModel.save(sc, params.modelPath)

    sc.stop()
  }
}
