package org.zjt.spark.book

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ArrayBuffer

/**
  * DESC    求出两个朋友之间的共同好友
  *
  *     1、user:user(有序姓名)   friends
  *     2、reduce合并friends
  *     3、将friends得到重复的。
  *
  **/
object CommonFriends extends App {
  var sparkConf = new SparkConf().setMaster("local[2]").setAppName("CommonFriends")
  val sc = new SparkContext(sparkConf)
  val line: RDD[String] = sc.textFile("D:\\Idea workspace\\scala-demo\\src\\main\\resource\\friends.txt")
  line.coalesce(3)
  var step = line.flatMap {
    a => {
      val result = ArrayBuffer[Tuple2[String, Array[String]]]()
      val head: String = a.split(" ")(0)
      val friends: Array[String] = a.split(" ").drop(1)
      for (friend <- friends) {
        val key = if (head > friend) (friend + ":" + head) else (head + ":" + friend)
        val value = friends.filter(a => !a.contains(friend))
        result += new Tuple2[String, Array[String]](key, value)
      }
      result
    }
  }.reduceByKey(_ ++ _).map(a => (a._1, Handler.intersection(a._2))).filter(_._2 != null)

  step.collect().foreach(a => println(a._1 + " * " + a._2.mkString(",")))
  sc.stop()
}

object Handler {
  def intersection(value: Array[String]): Array[String] = {
    val common = ArrayBuffer[String]()
    val arrayBuffer = ArrayBuffer[String]()
    arrayBuffer ++= value
    for (i <- 0 until arrayBuffer.length) {
      val tmp = arrayBuffer(i)
      if (arrayBuffer.filter(a => a.equals(tmp)).size > 1) {
        common += tmp
      }
    }
    common.distinct.toArray[String]
  }
}