package com.fanli.bigdata.rec

import org.apache.log4j.{Level, Logger}
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel}
import org.apache.spark.{SparkConf, SparkContext}
import scopt.OptionParser

object tSuperMainPageTrainingModel {

  case class Params(
           ratings: String = null,
           modelPath: String = "/tmp",
           checkpointDir : String = "/tmp",
           numIterations: Int = 20,
           lambda: Double = 0.1,
           alpha: Double = 1.0,
           rank: Int = 10,
           numBlocks: Int = 10,
           checkpointInterval : Int = 10,
           savePartitions : Int = 10,
           implicitPrefs: Boolean = false) extends AbstractParams[Params]

  def main (args: Array[String]): Unit = {
      run()
  }

  def run(): Unit = {
    val conf = new SparkConf().setAppName("MySpakDemo1").setMaster("local[*]")
    val sc = new SparkContext(conf)

    // for training
    val srcRatings  = sc.textFile("file:///D:/Project/P_spark/RecApplication/test_data/ratings02.dat")
                        .map(ParseUserAction.parseLine)
                        .filter{r =>
                            if (r._1._1.isEmpty) false
                            else true
                         }
                         .groupBy(_._1)
                         .map { r =>
                            (r._1, r._2.map(_._2).sum)
                         }
                         .map(ParseUserAction.parseRating)
                         .cache()
    val training = srcRatings.map(_._2).cache()   // training data set
    println(srcRatings.foreach { x => println(x)})
    // stat count
    val numRatings = training.count()
    val numUsers   = srcRatings.map(_._2.user).distinct().count()  // distinct hashcode uid
    val numItems   = srcRatings.map(_._2.product).distinct().count()  // distinct hashcode item
    println(s"Got $numRatings ratings from $numUsers users on $numItems items.")

    sc.stop()
  }
}
