package recommendPackage

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random

object SparkMlib {

  def main(args: Array[String]): Unit = {

    // 设置Spark的配置
    val conf = new SparkConf().setMaster("local[*]").setAppName("helloMlib")
    val sc = new SparkContext(conf)
    val hdfs_path="hdfs://niit01:9000/home/recommendPackage/off"
    // 读取输入和输出路径
    val input_path = "src/main/resources/student.data";
    val output_path = "src/main/resources/offlineResult"
    // 读取输入文件，并使用flatMap操作将其展平为一个元素为(stuId, (GAP, rating))的RDD
    val lines = sc.textFile(input_path)
    val max_prefs_per_user = 20 // 每个用户最大的item数
    val topn = 6 // 离线推荐3个
    val random = new Random()
    val value: RDD[(String, Iterable[(String, Double)])] = lines.map { x =>
      val fileds = x.split("\t")
      (fileds(4).toString, (fileds(6).toString, fileds(6).toDouble + Math.abs(random.nextInt(2653) + 1)))
      //根据每个用户的ID，搞成评价列表进行处理。
    }.groupByKey()

    //(gpa,(sruId,rate),(sruId,rate).(sruId,rate))
    val value1: RDD[(String, Iterable[(String, Double)])] = value.flatMap { x =>
      //获取学生Id
      val user = x._1
      val is_list = x._2
      var is_arr = is_list.toArray //变成可变数列
      var is_list_len = is_arr.length
      if (is_list_len > max_prefs_per_user) {
        //不可以超过20个推荐物品
        is_list_len = max_prefs_per_user
      }
      //累加
      var i_us_arr = new ArrayBuffer[(String, (String, Double))]
      for (i <- 0 until is_list_len) {
        i_us_arr += ((is_arr(i)._1, (user, is_arr(i)._2)))
      }
      i_us_arr
    }.groupByKey()
    //    value1.foreach(println)
    //(stuId,(gpa,afterRate))
    val ui_rdd = value1.flatMap { x => // 对每个物品进行归一化
      val item = x._1
      val u_list = x._2
      val us_arr = u_list.toArray
      var sum: Double = 0.0
      for (i <- 0 until us_arr.length) {
        //        sum += pow(us_arr(i)._2, 2) // 计算用户对该绩点评分的平方和
        sum += us_arr(i)._2;
      }

      //      sum = sqrt(sum) // 对平方和开根号，得到归一化的数值
      sum /= us_arr.length;
      // 将(item, user, rating)重新组装为(user, (item, raring_normalized))
      var u_is_arr = new ArrayBuffer[(String, (String, Double))]
      for (i <- 0 until us_arr.length) {
        u_is_arr += ((us_arr(i)._1, (item, us_arr(i)._2 / sum)))
      }
      u_is_arr
    }.groupByKey()
    ui_rdd.foreach(println)
    // 将物品对展开为(item1, item2)的形式，并计算它们的相似度得分
    //（gpa,gpa,conX）
    val unpack_rdd = ui_rdd.flatMap { x =>
      val is_arr = x._2.toArray
      var ii_s_arr = new ArrayBuffer[((String, String), Double)]()
      for (i <- 0 until is_arr.length - 1) {
        for (j <- i + 1 until is_arr.length) {
          ii_s_arr += (((is_arr(i)._1, is_arr(j)._1), is_arr(i)._2 * is_arr(j)._2))
          ii_s_arr += (((is_arr(j)._1, is_arr(i)._1), is_arr(i)._2 * is_arr(j)._2))
        }
      }
      ii_s_arr
    }
    unpack_rdd.foreach(println)
    //  (item1, (item2, similarity_score))的形式
    val value2: RDD[(String, Iterable[(String, Double)])] = unpack_rdd.groupByKey().map { x =>
      val ii_pair = x._1
      val s_list = x._2
      val s_arr = s_list.toArray
      var score: Double = 0.0
      for (i <- 0 until s_arr.length) {
        score += s_arr(i)
      }
      (ii_pair._1, (ii_pair._2, score))
    }.groupByKey()
    value2.foreach(println)

    val pack_rdd = value2.map { x =>
      val item_a = x._1
      val item_list = x._2
      //排序
      val bs_arr = item_list.toArray.sortWith(_._2 > _._2)

      var len = bs_arr.length
      if (len > topn) {
        len = topn
      }
      // 将推荐结果转化成字符串，并将其存储到输出路径
      val s = new StringBuilder
      for (i <- 0 until len) {
        val item = bs_arr(i)._1
        val score = "%1.4f" format bs_arr(i)._2
        s.append(item + ":" + score)
        s.append(",")
      }
      item_a + "," + s
    }.saveAsTextFile(hdfs_path)

  }

}
