package com.haozhen.rdd

/**
  * @author haozhen
  * @email haozh@ync1.com
  * @date 2021/1/30  14:03
  */
object FrieldsDemo {

  def main(args: Array[String]): Unit = {
    import org.apache.spark.rdd.RDD
    import org.apache.spark.{SparkConf, SparkContext}
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getCanonicalName().init)

    val sc = new SparkContext(conf)

    sc.setLogLevel("WARN")

    val lines: RDD[String] = sc.textFile("data/frields.dat")
// 方法一 两两好友
//    val rdd1: RDD[(String, Array[String])] = lines.map {
//      line =>
//        val frields: Array[String] = line.split(",")
//        (frields(0), frields(1).trim.split("\\s+"))
//    }
//    val rdd2: RDD[((String, Array[String]), (String, Array[String]))] = rdd1.cartesian(rdd1)
//
//    val rdd3: RDD[((String, String), Set[String])] = rdd2.filter {
//      case ((f1, _), (f2, _)) =>
//        f1 < f2
//    }.map {
//      case ((id, f1), (id2, f2)) => ((id, id2), f1.toSet & f2.toSet)
//    }
//    rdd3.foreach(println)
    //方法二 拆分好友信息
    val rdd1 = lines.map {
      line =>
        var frields = line.split(",")
        val frields2: Array[String] = frields(1).trim.split("\\s+")
        val iterator: Iterator[Array[String]] = frields2.combinations(2)
        iterator.filter(it => it(0) < it(1)).map(it => ((it(0),it(1)), frields(0)))
    }.flatMap(x=>x)
    val rdd2: RDD[((String, String), Iterable[String])] = rdd1.groupByKey()
    rdd2.foreach(println)

    sc.stop()
  }
}
