package ALS

import java.io.File
import java.util

import org.apache.commons.io.FileUtils
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object ALSDemo {

  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", new File("").getAbsolutePath)

    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("My App")
    val sc: SparkContext = new SparkContext(conf)
    val map = new util.HashMap[String, Int]

    val inputFile1: String = "topics/topics.csv"
    val inputFile2: String = "topics/topicsName.csv"
    //    val inputFile2: String = "ml-20m/ratings.csv"

//    val train = sc.textFile(inputFile1).flatMap(_.split("\n"))
//      .map(_.split(",") match {
//        case Array(user, item, rate) => Rating(user.toInt, item.toInt, math.log(rate.toDouble + math.E))
//      })//.filter(r=>r.user==20223)
    val train = sc.textFile(inputFile2).flatMap(_.split("\n"))
      .map(_.split(",") match {
        case Array(topicId, name, count) => Rating(25276,topicId.toInt,count.toDouble)
      })


    val maps=new util.HashMap[Int,String]()
    FileUtils.readLines(new File(inputFile2)).forEach(
      line=>{
        val sts = line.split(",")
        maps.put(sts(0).toInt,sts(1))
      }
    )
//    train.foreach(println)


    val model= MatrixFactorizationModel.load(sc,"ALSmodel")

//    val model = new ALS()
//      .setIterations(20)
//      .setRank(60)
//      .setLambda(0.005).....
//      .run(train)
//    model.save(sc,"ALSmodel")

//    evaluateMode(train, model)
     predict(train, model,maps)
  }

  private def predict(ratings: RDD[Rating], model: MatrixFactorizationModel,hashMap: util.HashMap[Int,String]) {

    //使用训练数据训练模型
    val usersProducets = ratings.map {
      case Rating(user, product, rate) => (user, product)
    }

    //预测数据
    val predictions = model.predict(usersProducets).map {
      case Rating(user, product, rate) => ((user, product), rate)
    }

    val ratesAndPreds = ratings.map {
      case Rating(user, product, rate) =>
        ((user, product), rate)
    }.join(predictions)

    ratesAndPreds.sortBy(-_._2._2).take(50).foreach(e=>{
      println("https://www.zhihu.com/topic/"+e._1._2+" "+hashMap.get(e._1._2)+" "+e._2._1.toInt+" "+(math.pow(math.E,e._2._2)-math.E))
    })


  }

  /**
   * 模型评估
   */
  private def evaluateMode(ratings: RDD[Rating], model: MatrixFactorizationModel) {

    //使用训练数据训练模型
    val usersProducets = ratings.map {
      case Rating(user, product, rate) => (user, product)
    }

    //预测数据
    val predictions = model.predict(usersProducets).map {
      case Rating(user, product, rate) => ((user, product), rate)
    }

    //将真实分数与预测分数进行合并
    val ratesAndPreds = ratings.map {
      case Rating(user, product, rate) =>
        ((user, product), rate)
    }.join(predictions)

    //计算均方
    val MSE = ratesAndPreds.map {
      case ((user, product), (r1, r2)) =>

        val err = (r1 - r2) //(math.pow(math.E, r1) - math.pow(math.E, r2))
        err * err
    }.mean()


    val sb = new StringBuilder("0,1,2,3\n")
    ratesAndPreds.map {
      case ((user, product), (r1, r2)) =>
        user + "," + product + "," + (math.pow(math.E,r1)-1) + "," + (math.pow(math.E,r2)-1) + "\n"
    }.take(Int.MaxValue).foreach(sb ++= _)
    FileUtils.write(new File("s"),sb,false)


    //打印出均方差值
    println(s"MSE = ${MSE}")
  }

}
