import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer
import scala.math.{pow, sqrt}

/**
  * @Author cheng
  * @Date 2022 10 25 17 26
  **/
object SparkMlib {

  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir","D:\\codes\\jar-file\\hadoop-2.7.3")
    System.setProperty("HADOOP_USER_NAME","root")
    // spark core
    // 需求：  协同过滤 , CB/cf    实际是通过物品与物品之间的相似度，决定是否要推荐。

    val conf = new SparkConf().setMaster("local[*]").setAppName("helloMlib")
    val sc = new SparkContext(conf)


    val input_path = args(0).toString
    val output_path = args(1).toString


    val lines = sc.textFile(input_path)
    val max_prefs_per_user = 20
    val topn = 3

    // Step 1. normalization
    val ui_rdd = lines.map { x =>
      val fileds = x.split("\t")
      (fileds(0).toString, (fileds(1).toString, fileds(2).toDouble))
    }.groupByKey().flatMap { x =>
      val user = x._1
      val is_list = x._2

      var is_arr = is_list.toArray

      var is_list_len = is_arr.length
      if (is_list_len > max_prefs_per_user) {
        is_list_len = max_prefs_per_user
      }

      var i_us_arr = new ArrayBuffer[(String, (String, Double))]
      for (i <- 0 until is_list_len) {
        i_us_arr += ((is_arr(i)._1, (user, is_arr(i)._2)))
      }
      i_us_arr
    }.groupByKey().flatMap { x =>
      val item = x._1
      val u_list = x._2
      val us_arr = u_list.toArray

      var sum:Double = 0.0
      for (i <- 0 until us_arr.length) {
        sum += pow(us_arr(i)._2, 2)
      }

      sum = sqrt(sum)

      var u_is_arr = new ArrayBuffer[(String, (String, Double))]
      for (i <- 0 until us_arr.length) {
        u_is_arr += ((us_arr(i)._1, (item, us_arr(i)._2 / sum)))
      }

      u_is_arr
    }.groupByKey()//.saveAsTextFile(output_path)

    // Step 2. unpack:gen pairs
    val unpack_rdd = ui_rdd.flatMap { x=>
      val is_arr = x._2.toArray

      var ii_s_arr = new ArrayBuffer[((String, String), Double)]()
      for (i <- 0 until is_arr.length - 1) {
        for (j <- i+1 until is_arr.length) {
          ii_s_arr += (((is_arr(i)._1, is_arr(j)._1), is_arr(i)._2 * is_arr(j)._2))
          ii_s_arr += (((is_arr(j)._1, is_arr(i)._1), is_arr(i)._2 * is_arr(j)._2))
        }
      }
      ii_s_arr
    }//.saveAsTextFile(output_path)

    // Step 3. pack
    unpack_rdd.groupByKey().map { x =>
      val ii_pair = x._1
      val s_list = x._2

      val s_arr = s_list.toArray

      var score:Double = 0.0
      for (i <- 0 until s_arr.length) {
        score += s_arr(i)
      }
      (ii_pair._1, (ii_pair._2, score))
    }.groupByKey().map { x =>
      val item_a = x._1
      val item_list = x._2

      val bs_arr = item_list.toArray.sortWith(_._2 > _._2)

      var len = bs_arr.length
      if (len > topn) {
        len = topn
      }

      val s = new StringBuilder
      for (i <- 0 until len) {
        val item  = bs_arr(i)._1
        val score = "%1.4f" format bs_arr(i)._2
        s.append(item + ":" + score)
        s.append(",")
        System.out.println(item+":"+score)
      }
      item_a + "\t" + s
    }.saveAsTextFile(output_path)















  }
}
