package yang

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ListBuffer

object SimilarityJoin {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("homework")
    val sc = new SparkContext(conf)
    val data = sc.parallelize(List(("R1", "C,D,F"), ("R2", "A,B,E,F,G"),
      ("R3", "A,B,C,D,E"), ("R4", "B,C,D,E,F"), ("R5", "A,E,G")), 2);
    val data1 = data.map(x => ((x._1, x._2.split(",").length), (x._2)))
    val result = data1.flatMapValues(_.split(","))
    val result10 = result.groupBy(_._2).map(x => x._2)
    val result11 = result10.map(x => {
      x.map(_._1)
    })
    val result12 = result11.map(x => {
      val list = x.toList;
      val list1 = new ListBuffer[((String, Int), (String, Int))];
      for (i <- 0 until list.length - 1) {
        for (j <- i + 1 until list.length)
          list1.append((list(i), list(j)))
      }
      list1
    })
    val result13 = result12.collect().flatten
    var rdd10 = sc.parallelize(result13)
    var rdd11 = rdd10.map(x => {
      (x, 1)
    })
    var rdd12 = rdd11.reduceByKey(_ + _)
    val rdd13 = rdd12.filter(x => {
      if ((0.6 / 1.6 * (x._1._1._2 + x._1._2._2)) >= x._2)
        false
      else
        true
    })
    val finalresult = rdd13.map(x => {
      (x._1._1._1, x._1._2._1)
    })
    println("符合条件的相似结果：" + finalresult.collect().toBuffer)
  }
}
