package org.zjt.spark.book

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
  * DESC    购物车中的同现商品排行
  *
  **/
object RecommendProduct extends App {
  var sparkConf = new SparkConf().setMaster("local[2]").setAppName("RecommendProduct")
  val sc = new SparkContext(sparkConf)
  fun3()
  sc.stop()


  /**
    * 1、得到用户对商品的得分矩阵
    *
    * userID： product：likeScore
    *
    */
  def fun1(): Unit = {
    val rdd = sc.textFile("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/(sample)sam_tianchi_2014002_rec_tmall_log.csv").distinct().coalesce(2).persist().map {
      line => {
        //i161,u2625,click,2014/9/18 15:03
        val array: Array[String] = line.split(",")
        val score = if (array(2).equals("click")) 1 else {
          if (array(2) equals ("cart")) 2 else 3
        }
        (array(1), array(0) + ":" + score)
      }
    }.groupByKey().mapValues(HandlerTool.reduce(_)) //mapvalues 将每一个key中的value实现遍历操作，得到每个key对象的value


    println(rdd.collect().mkString("\n"))
    rdd.saveAsTextFile("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/product-like")
  }


  /**
    * 2、得到用户的同现矩阵(在畅销产品、给广告费较多的产品中得到该信息)
    *
    * product：product relationNum
    *
    */
  def fun2(): Unit = {
    val broadcast = sc.broadcast[Int](2000)
    val longAccumulator = sc.longAccumulator("OnlineBlacklistCount")
    val rdd = sc.textFile("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/(sample)sam_tianchi_2014002_rec_tmall_log.csv").coalesce(3).persist().map {
      line => {
        val array: Array[String] = line.split(",")
        (array(1), array(0))
      }
    }.groupByKey().values.flatMap {
      value => {
        val data = value.toBuffer[String].distinct.toArray[String]
        var arrayBuffer = ArrayBuffer[(String, Int)]()
        for (a <- data) {
          for (b <- data) {
            if (!a.equals(b)) {
              var var1 = if (a < b) new Tuple2[String, Int](a + ":" + b, 1) else Tuple2[String, Int](b + ":" + a, 1)
              arrayBuffer += var1
            }
          }
        }
        arrayBuffer
      }
    }.reduceByKey(_ + _).sortBy(a => a._2, false).filter {
      a => {
        var result: Boolean = false
        val top = broadcast.value
        if (longAccumulator.count < top) {
          longAccumulator.add(1)
          result = true
        }
        result
      }
    }
    println(rdd.collect().mkString("\n"))
    rdd.saveAsTextFile("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/Co-occurrence")
  }


  def fun3(): Unit = {

    val broadcast = sc.broadcast[Int](2000)
    var top = sc.broadcast[Int](10)
    val longAccumulator = sc.longAccumulator("OnlineBlacklistCount")
    val rddP = sc.textFile("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/Co-occurrence").coalesce(3).persist().map {
      a => {
        //(i455:i83,18)
        val line = a.substring(1, a.length - 1)
        val product1 = line.split(":")(0)
        val product2 = (line.split(":")(1)).split(",")(0)
        val relation = ((line.split(":")(1)).split(",")(1)).toInt
        val value = product2 + "," + relation
        (product1, value)
      }
    }


    val rddU = sc.textFile("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/product-like").coalesce(3).persist().flatMap {
      a => {
        //(u2786,ArrayBuffer(i295:1, i206:1, i308:1, i46:1, i390:1, i237:1, i528:1, i429:1, i324:1, i519:1, i542:1, i464:1, i38:1, i53:1))
        var data: ArrayBuffer[(String, String)] = ArrayBuffer[(String, String)]()
        val line = a.substring(1, a.length - 1)
        val uid = line.split(",ArrayBuffer")(0)
        var values = line.split(",ArrayBuffer")(1)
        values = values.substring(1, values.length - 1)
        for (x <- values.split(",")) {
          val pid = x.split(":")(0)
          val like = (x.split(":")(1)).toInt
          val value = uid + "," + like
          data += new Tuple2[String, String](pid, value)
        }
        data
      }
    }

    val rdd = rddP.leftOuterJoin(rddU).groupByKey().flatMap {
      (a) => {
        //用户id -> productId:得分
        var mapA = new mutable.HashMap[String, Int] // p内容
        var mapB = new mutable.HashMap[String, Int] // u内容
        val pid = a._1
        val values = a._2
        for (value <- values) {
          //value._1      P:商户号:relation
          var var1 = value._1.split(",")
          mapA += (var1(0) -> (var1(1)).toInt)

          val var3 = value._2 match {
            case Some(var1) => var1
            case None => null
          }
          if (var3 != null) {
            var1 = var3.split(",")
            mapB += (var1(0) -> (var1(1)).toInt)
          }
        }
        var result = ArrayBuffer[(String, String)]()
        for (product <- mapA.keySet) {
          val num = mapA(product); //关联系数
          for (user <- mapB.keySet) {
            val score = mapB(user) * num
            result += new Tuple2[String, String](user, product + ":" + score) //userid -> 相关货品：分数
          }
        }
        result
      }
    }.groupByKey().flatMap {
      a => {
        var map = new mutable.HashMap[String, Double]
        val userId: String = a._1
        for (value <- a._2) {
          var score = map.getOrElse(value.split(":")(0), 0.0) + 1.0
          score += (value.split(":")(1)).toDouble
          map += (value.split(":")(0) -> score)
        }
        var data = ArrayBuffer[(String, String)]()
        for (key <- map.keySet) {
          data += new Tuple2(userId, key + ":" + map(key))
        }
        data
      }
    }.groupByKey().map {
      a => {
        val values = a._2.map(_.split(":")(1).toDouble).toArray.sorted
        val newValues = a._2.filter {
          a => if (values.length < top.value || a.split(":")(1).toDouble >= values(values.length - top.value)) true else false
        }
        (a._1, newValues)
      }
    }


    println(rdd.collect().mkString("\n"))
    Thread.sleep(50000000L)
    rdd.saveAsTextFile("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/product-recommend")
    sc.stop()


  }


  /**
    * 左外和sql中的left join相同  两两结合
    */
  def fun4(): Unit = {
    val rdd1 = sc.parallelize(1 to 5).map(a => (a, "zhang" + a))
    val rdd2 = sc.parallelize(Array(1, 2, 3, 2, 3, 2, 4, 3, 4, 4)).map(a => (a, "lisi" + a))
    val rdd = rdd1.leftOuterJoin(rdd2)
    println(rdd.collect().mkString("\n"))
    sc.stop()
  }


  /**
    * 左外和sql中的join相同   两两结合
    */
  def fun5(): Unit = {
    val rdd1 = sc.parallelize(1 to 5).map(a => (a, "zhang" + a))
    val rdd2 = sc.parallelize(Array(1, 2, 3, 2, 3, 2, 4, 3, 4, 4)).map(a => (a, "lisi" + a))
    val rdd = rdd1.join(rdd2)
    println(rdd.collect().mkString("\n"))
    Thread.sleep(50000)
    sc.stop()
  }


}


object HandlerTool {
  def reduce(data: Iterable[String]): Iterable[String] = {
    var map = new mutable.HashMap[String, Int]
    data.map {
      a => {
        val ele = a.split(":")
        val value = map.getOrElse(ele(0), 0) + 1
        map += (ele(0) -> value)
      }
    }
    val result = map.map((a) => a._1 + ":" + a._2)
    result
  }
}