package com.learn.lb.spark.sql.work

import org.apache.spark.{SparkConf, SparkContext}

/**
 * 统计每个用户，历史观看列表（评分低的过滤掉，并且按照评分来排序，取top5）
 *
 * @author laibo
 * @since 2019/8/12 15:18
 */
object UserWatchSpark {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("UserWatchSparkTask")
    val spark = new SparkContext(conf)
    val dataRdd = spark.textFile("E:\\idea\\workspace\\bd-learn\\spark-learn\\src\\main\\resources\\train_new.data")
    val outputPath = "E:\\idea\\workspace\\bd-learn\\spark-learn\\src\\main\\resources\\train_new_res"
    dataRdd.filter(f => {
      //过滤掉评分低于2分的和不符合规范的数据
      val lineArr = f.split("\t")
      lineArr.length > 2 && lineArr(2).toDouble > 2
    }).map(m => {
      val lineArr = m.split("\t")
      //将每一行转换为元祖形式方便分组，其中key是user_id, value是(item_id, score)
      (lineArr(0), (lineArr(1), lineArr(2).toDouble))
    }).groupByKey().sortBy(_._1.toInt, ascending = true)
      .map(v => {
        //v 是一个 ( user_id, seq((item_id, score)) )
        val scoreList = v._2.toArray.sortWith(_._2 > _._2)
        v._2.toArray.sorted
        //取前5条，因为数组是从0开始的，所以最大值为4
        val limit = if (scoreList.length > 4) 4 else scoreList.length
        //拼接结果，格式为：user_id \t item_1:score \t item_2:score
        val rsBuilder = new StringBuilder()
        rsBuilder.append(v._1)
        for (i <- 0 to limit) {
          val score = scoreList(i)
          rsBuilder.append("\t").append(score._1).append(":").append(score._2)
        }
        rsBuilder.toString()
      }).saveAsTextFile(outputPath)
  }
}
