import scala.io.Source
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable

import org.apache.spark.SparkConf 
import org.apache.spark.SparkContext 
import org.apache.spark.rdd.RDD
import org.apache.spark.HashPartitioner

//填写你计算机集群的总核数
val N = 4//节点数
//填写数据集(数据集需要去除第一行属性名称)路径
val filePath = "/Users/monologue/Desktop/tane/data/pglw00_2xattributes.csv"
// /Users/monologue/Downloads/evaluation/bots_50w_10.csv


val r: RDD[Array[Int]] = sc.textFile(filePath).map(line => line.trim.split(";").map(_.hashCode()))
val row = r.count()
val col = r.first().length
var rA: RDD[(Int, Array[Int])] = null//hashPartiton

val R: Set[Int] = Array.range(0, col).toSet
var O: Array[Int] = null
var computed: mutable.Set[Int] = mutable.Set()//记录已计算过的属性
val P: mutable.Map[String, Int] = mutable.Map()//记得换成HashMap试试，据说HashMap更快;而且String可以换成Int试试，Int^Int
val FDs: mutable.Set[String] = mutable.Set()

var L: ArrayBuffer[String] = null
var RHSPlus: Map[String, mutable.Set[Int]] = null//这里完全可以用和L一一对应的数组存储，但操作起来不如Map方便。
///input: col,R,r
///初始化P和O，为搜索做准备
def PrepareForSearching() = {

def ParallelComputePartition(A: Int): Int = {
return r.map(t => t(A)).distinct().collect().distinct.length
}
val order = Array.range(0,col)
for(A <- R){
val kinds = ParallelComputePartition(A)
println("第"+A+"个属性的种类为："+kinds)
P += (A.toString() -> kinds) 
order(A) = kinds//映射
}
//Array((count, index))
val countMap = order.zipWithIndex
// Sorted By count : Array((count, index))
val sortMap = countMap.sortBy(_._1)
// Array(sortedIndex)
O = sortMap.map(_._2).reverse
}

def ExchangeDataWith(A: Int): RDD[(Int, Array[Int])] = r.map(t => (t(A),t)).partitionBy(new HashPartitioner(N))

def ParallelComputePartition(X: String): Int = {
rA.mapPartitions(iter=>{
val setX = mutable.Set[Int]()
while(iter.hasNext){
val elem = iter.next()
setX += X.split(",").map(_.toInt).map(elem._2(_)).reduce(_+_)
}
List[Int]().::(setX.size).iterator
})
.reduce(_+_)
}

def ParallelComputePartitionL(): Array[(String, Int)] = {
rA.mapPartitions(iter=>{
val setXs = ArrayBuffer[mutable.Set[Int]]()
for(i <- 0 until L.length){
setXs += mutable.Set[Int]()
}
while(iter.hasNext){
val elem = iter.next()
for(i <- 0 until L.length){
setXs(i) += L(i).split(",").map(attr=>elem._2(attr.toInt)).reduce(_+_)
}
}
val result = List.range(0, L.length).map(i => (L(i), setXs(i).size))
List[(String, Int)]().:::(result).iterator
})
.reduceByKey(_+_)
.collect()
}

def JudgeFD(X: String): Boolean = {
var foundFD = false
val Attrs = X.split(",").map(_.toInt)

if(Attrs.length==1){ return false }

val RHSs = RHSPlus(X) & Attrs.toSet
for(RHS <- RHSs){
val LHS = Attrs.filter(_ != RHS).mkString(",")
if(P(X) == P(LHS)){
val FD = (LHS+" -> "+RHS)
println("发现函数依赖："+FD)
FDs += FD
RHSPlus(X) -= RHS
RHSPlus(X) --= (R.toSet -- Attrs)
foundFD = true
}
}
return foundFD
}

def RHSPlusPruning(X: String): Boolean = {
if(RHSPlus(X).isEmpty){
L -= X
return true
}
return false
}
def KeyPruning(X: String): Boolean = {
if(P(X) == row){
val LHS = X
val RHSs = RHSPlus(X).toSet -- X.split(",").map(_.toInt)
for(RHS <- RHSs){
val FD = (LHS+" -> "+RHS)
println("发现函数依赖："+FD)
FDs += FD
}
L -= X
return true
}
return false
}
def ComputeRemainingRHS(X: String) = {
val RHSs = computed & RHSPlus(X)
for(RHS <- RHSs){
if(P.get(X+","+RHS) == None){
P += ((X+","+RHS) -> ParallelComputePartition((X+","+RHS)))
}
if(P(X+","+RHS) == P(X)){
val FD = (X+" -> "+RHS)
println("发现函数依赖："+FD)
FDs += FD
RHSPlus(X) -= RHS
}
}
}

def ComputeCurrentLevel() = {
if(L(0).split(",").length>1){
P ++= ParallelComputePartitionL()
}
for(X <- L.toArray){
val foundFD = JudgeFD(X)
if(foundFD){
val isPruned = RHSPlusPruning(X)
}else{
val isPruned = KeyPruning(X)
if(!isPruned){ ComputeRemainingRHS(X) }
}
}
}
def NextLevel(A: Int): Unit = {
if(L.isEmpty){ return }
val currentL = L.map(_.split(","))
val nextL = ArrayBuffer[String]()
val currentRHSPlus = RHSPlus
val nextRHSPlus = mutable.Map[String, mutable.Set[Int]]()
if(currentL(0).length==1){
//未计算属性，剩余(left)属性
val leftAttrs = O.filter(!(computed+A).contains(_))
for(leftAttr <- leftAttrs){
val X = A+","+leftAttr
nextL += X
nextRHSPlus += (X -> (mutable.Set() ++ currentRHSPlus(A.toString())))
}
}else{
//Ks中每个元素代表：前n-1个属性均相同的元素的下标组成的集合
//currentL：[ABC,ABD,ABE,ACD,ACE]转为Ks：[[0,1,2],[3,4]]
var Ks = ArrayBuffer[ArrayBuffer[Int]]()
val tempL = currentL.map(_.dropRight(1).toSet)
Ks += ArrayBuffer(0)//初始化Ks为只含有一个元素（这个元素也为只含有一个元素的可变数组）的可变数组
for(i <- 1 until currentL.length){
if(tempL(i)==tempL(i-1)){
Ks.last += i
}else{
Ks += ArrayBuffer(i)
}
}
Ks = Ks.filterNot(_.length==1)//如果前n-1个属性相同的元素的个数只有1个（也即没有相同的含有这种前n-1个属性的元素），则肯定无法生成下一层级的，所以过滤掉
for(K <- Ks){
for(i <- K){
for(j <- K.drop(K.indexOf(i)+1)){
val newX = currentL(i) :+ currentL(j).last
var isOK = true
for(index <- 1 until newX.length-2){
val XsubA = newX.filterNot(_==newX(index)).mkString(",")
if(!L.contains(XsubA)){
isOK = false
}
}
if(isOK){
nextL += newX.mkString(",")//生成了下一层级中的一个属性X
//接下来还需要生成这个属性X对应的右方集
var RHSs = mutable.Set[Int]()
RHSs ++= R//初始化RHSs为全集
for(index <- 1 until newX.length){
val XsubA = newX.filterNot(_==newX(index)).mkString(",")
RHSs &= currentRHSPlus(XsubA)
}
nextRHSPlus += (newX.mkString(",") -> RHSs)
}
}
}
}
}
L = nextL
RHSPlus = nextRHSPlus.toMap
}

def DBTANE(A: Int) = {      
L = ArrayBuffer(A.toString)
val key = A.toString()
val value = mutable.Set() ++ O.toSet - A
RHSPlus = Map(key ->  value)

while(!L.isEmpty){//能够推出下一层级则进行计算并返回true，否则不计算直接返回false
ComputeCurrentLevel()
NextLevel(A)
}
}
def PrintMinFD() = {
println("—————————————————————————————————————————————————————————————————————————")
println("————————————————————————接下来判断以上输出的是否为最小的函数依赖——————————————————————————")
println("—————————————————————————————————————————————————————————————————————————")
for(FD <- FDs.toArray){
if(!FD.contains(",")){
//println(FD+"是最小的")
}else{
val smallerFD = FD.split(",").drop(1).mkString(",")//去除LHS中的第一个属性。例如：由"1,3,4 -> 2"得到"3,4 -> 2"
if(FDs.contains(smallerFD)){
println(FD+"不是最小的")
FDs.remove(FD)
}else{
//println(FD+"是最小的")
}
}
}
println("—————————————————————————————————————————————————————————————————————————")
println("————————————————————————接下来输出所有最小函数依赖——————————————————————————")
println("—————————————————————————————————————————————————————————————————————————")
println("最小函数依赖的总数目为："+FDs.size)
println(FDs.mkString("\n"))
}
val startTime = System.nanoTime()
PrepareForSearching()

for(A <- O){
rA = ExchangeDataWith(A)
DBTANE(A)
computed += A
}
PrintMinFD()
println("计算总时间消耗为："+(System.nanoTime()-startTime)/1000/1000/1000.0+"秒")
