import scala.io.Source
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable

import org.apache.spark.SparkConf 
import org.apache.spark.SparkContext 
import org.apache.spark.rdd.RDD
import org.apache.spark.HashPartitioner
object DBFD {
  
  def main(args: Array[String]): Unit = {
    val filePath = args(0)  //文件路径，除了支持本地路径，还支持HDFS等远程文件路径
    val N = args(1).toInt   //节点数
   
    val conf = new SparkConf() 
    val sc = new SparkContext(conf) 
    val r: RDD[Array[Int]] = sc.textFile(filePath, N)//使用默认的RangePartitioner进行分区
    //                                    /Users/monologue/Downloads/evaluation/bots_10m_10.csv
    //val r: RDD[Array[Int]] = sc.textFile("/Users/monologue/Downloads/evaluation/bots_10m_10.csv")
    
    .map(line => line.trim.split(",").map(_.hashCode()))
    val row = r.count()
    val col = r.first().length
    var rA: RDD[(Int, Array[Int])] = null//hashPartiton

    val R: Set[Int] = Array.range(0, col).toSet
    var O: Array[Int] = null
    var computed: mutable.Set[Int] = mutable.Set()//记录已计算过的属性
    val P: mutable.Map[String, Int] = mutable.Map()//记得换成HashMap试试，据说HashMap更快;而且String可以换成Int试试，Int^Int
    val FDs: mutable.Set[String] = mutable.Set()
    
    var L: ArrayBuffer[String] = null
    var RHSPlus: Map[String, mutable.Set[Int]] = null//这里完全可以用和L一一对应的数组存储，但操作起来不如Map方便。
    
    def ParallelComputePartitionA(A: Int): Int = {
        return r.map(t => t(A)).distinct().collect().distinct.length
    }
    ///input: col,R,r
    ///初始化P和O，为搜索做准备
    def PrepareForSearching() = {
      val order = Array.range(0,col)
      for(A <- R){
        val kinds = ParallelComputePartitionA(A)
        println("第"+A+"个属性的种类为："+kinds)
        P += (A.toString() -> kinds) 
        order(A) = kinds//映射
      }
      //Array((count, index))
      val countMap = order.zipWithIndex
      // Sorted By count : Array((count, index))
      val sortMap = countMap.sortBy(_._1)
      // Array(sortedIndex)
      O = sortMap.map(_._2).reverse
    }
    
    def ExchangeDataWith(A: Int): RDD[(Int, Array[Int])] = r.map(t => (t(A),t)).partitionBy(new HashPartitioner(N))//使用HashPartitioner进行重新分区，也即论文中提及的数据重分布。
    
    def ParallelComputePartitionX(X: String): Int = {
      rA.mapPartitions(iter=>{
        val setX = mutable.Set[String]()
        while(iter.hasNext){
          val elem = iter.next()
          setX += X.split(",").map(_.toInt).map(elem._2(_)).mkString(",")
        }
        List[Int]().::(setX.size).iterator
      })
      .reduce(_+_)
    }
    
    def ParallelComputePartitionL(): Array[(String, Int)] = {
      rA.mapPartitions(iter=>{
        val setXs = ArrayBuffer[mutable.Set[String]]()
        for(i <- 0 until L.length){
          setXs += mutable.Set[String]()
        }
        while(iter.hasNext){
          val elem = iter.next()
          for(i <- 0 until L.length){
            setXs(i) += L(i).split(",").map(attr=>elem._2(attr.toInt)).mkString(",") 
          }
        }
        val result = List.range(0, L.length).map(i => (L(i), setXs(i).size))
        List[(String, Int)]().:::(result).iterator
      })
      .reduceByKey(_+_)
      .collect()
    }
    
    def JudgeFD(X: String): Boolean = {
      var foundFD = false
      val Attrs = X.split(",").map(_.toInt)
      
      if(Attrs.length==1){ return false }
      
      val RHSs = RHSPlus(X) & Attrs.toSet
      for(RHS <- RHSs){
        val LHS = Attrs.filter(_ != RHS).mkString(",")
        if(P(X) == P(LHS)){
          val FD = (LHS+" -> "+RHS)
          println("发现函数依赖："+FD)
          FDs += FD
          RHSPlus(X).remove(RHS)
          RHSPlus(X) --= (R.toSet -- Attrs)
          foundFD = true
        }
      }
      return foundFD
    }
    
    def RHSPlusPruning(X: String): Boolean = {
      if(RHSPlus(X).isEmpty){
        L -= X
        return true
      }
      return false
    }
    def KeyPruning(X: String): Boolean = {
      if(P(X) == row){
        val LHS = X
        val RHSs = RHSPlus(X).toSet -- X.split(",").map(_.toInt)
        for(RHS <- RHSs){
          val FD = (LHS+" -> "+RHS)
          println("发现函数依赖："+FD)
          FDs += FD
        }
        L -= X
        return true
      }
      return false
    }
    def ComputeRemainingRHS(X: String) = {
      val RHSs = computed & RHSPlus(X)
      for(RHS <- RHSs){
        if(P.get(X+","+RHS) == None){
          P += ((X+","+RHS) -> ParallelComputePartitionX((X+","+RHS)))
        }
        if(P(X+","+RHS) == P(X)){
          val FD = (X+" -> "+RHS)
          println("发现函数依赖："+FD)
          FDs += FD
          RHSPlus(X) -= RHS
        }
      }
    }
    
    def ComputeCurrentLevel() = {
      if(L(0).split(",").length>1){//L为第一层级（L中的属性集X只含有一个属性）时无须计算，因为在PrepareForSearching函数中已计算并添加到了P集合
          P ++= ParallelComputePartitionL()
       }
      for(X <- L.toArray){
        
        val foundFD = JudgeFD(X)
        if(foundFD){
          val isPruned = RHSPlusPruning(X)
        }else{
          val isPruned = KeyPruning(X)
          if(!isPruned){ ComputeRemainingRHS(X) }
        }
      }
    }
    def NextLevel(A: Int): Unit = {
      if(L.isEmpty){ return }
      val currentL = L.map(_.split(","))
      val nextL = ArrayBuffer[String]()
      val currentRHSPlus = RHSPlus
      val nextRHSPlus = mutable.Map[String, mutable.Set[Int]]()
      if(currentL(0).length==1){
        //未计算属性，剩余(left)属性
        val leftAttrs = O.filter(!(computed+A).contains(_))
        for(leftAttr <- leftAttrs){
          val X = A+","+leftAttr
          nextL += X
          //fuck ，this bug is bullshit
          //下面两行截然不同的效果
          //nextRHSPlus += (X -> currentRHSPlus(A.toString()))
           nextRHSPlus += (X -> (mutable.Set() ++ currentRHSPlus(A.toString())))
        }
      }else{
        //Ks中每个元素代表：前n-1个属性均相同的元素的下标组成的集合
        //currentL：[ABC,ABD,ABE,ACD,ACE]转为Ks：[[0,1,2],[3,4]]
        var Ks = ArrayBuffer[ArrayBuffer[Int]]()
        val tempL = currentL.map(_.dropRight(1).toSet)
        Ks += ArrayBuffer(0)//初始化Ks为只含有一个元素（这个元素也为只含有一个元素的可变数组）的可变数组
        for(i <- 1 until currentL.length){
          if(tempL(i)==tempL(i-1)){
            Ks.last += i
          }else{
            Ks += ArrayBuffer(i)
          }
        }
        Ks = Ks.filterNot(_.length==1)//如果前n-1个属性相同的元素的个数只有1个（也即没有相同的含有这种前n-1个属性的元素），则肯定无法生成下一层级的，所以过滤掉
        for(K <- Ks){
          for(i <- K){
            for(j <- K.drop(K.indexOf(i)+1)){
              val newX = currentL(i) :+ currentL(j).last
              var isOK = true
              for(index <- 1 until newX.length-2){
                val XsubA = newX.filterNot(_==newX(index)).mkString(",")
                if(!L.contains(XsubA)){
                  isOK = false
                }
              }
              if(isOK){
                nextL += newX.mkString(",")//生成了下一层级中的一个属性X
                //接下来还需要生成这个属性X对应的右方集
                var RHSs = mutable.Set[Int]()
                RHSs ++= R//初始化RHSs为全集
                for(index <- 1 until newX.length){
                  val XsubA = newX.filterNot(_==newX(index)).mkString(",")
                  RHSs &= currentRHSPlus(XsubA)
                }
                nextRHSPlus += (newX.mkString(",") -> RHSs)
              }
            }
          }
        }
      }
      L = nextL
      RHSPlus = nextRHSPlus.toMap
    }
    
    def DBTANE(A: Int) = {      
      L = ArrayBuffer(A.toString)
      val key = A.toString()
      val value = mutable.Set() ++ O.toSet - A
      RHSPlus = Map(key ->  value)
      while(!L.isEmpty){//能够推出下一层级则进行计算并返回true，否则不计算直接返回false
        ComputeCurrentLevel()
        NextLevel(A)
      }
    }
    def PrintMinFD() = {
      println("—————————————————————————————————————————————————————————————————————————")
      println("————————————————————————接下来判断以上输出的是否为最小的函数依赖——————————————————————————")
      println("—————————————————————————————————————————————————————————————————————————")
      for(FD <- FDs.toArray){//fuck ，this bug is bullshit, too!
        if(!FD.contains(",")){
          println(FD+"是最小的")
        }else{
          val smallerFD = FD.split(",").drop(1).mkString(",")//去除LHS中的第一个属性。例如：由"1,3,4 -> 2"得到"3,4 -> 2"
          if(FDs.contains(smallerFD)){
            println(FD+"不是最小的")
            FDs.remove(FD)
          }else{
            println(FD+"是最小的")
          }
        }
      }
      println("最小函数依赖的总数目为："+FDs.size)
      println(FDs.mkString("\n"))
    }
   
    PrepareForSearching()

    for(A <- O){
      rA = ExchangeDataWith(A)
      DBTANE(A)
      computed += A
    }
    PrintMinFD()
    
  }
}