package yhb.knn

import yhb.component.HandleHbase
import org.apache.hadoop.hbase.{TableName, HTableDescriptor}
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, DataFrame}

import scala.reflect.ClassTag

/**
  * Spark + Hbase 存储kd-Tree结构
  * Created by root on 15-12-16.
  */
class KDTree(private var src:DataFrame,private var leafTableName:String,private var nodeTableName:String) extends Serializable{

  private var leafTable:HTable = null
  private var nodeTable:HTable = null
  private var columns:Array[String] = if(src != null) src.columns else null

  def this(){
    this(null,null,null)
  }

  def this(df:DataFrame){
    this(df,null,null)
  }

  def this(leafTableName:String,nodeTableName:String){
    this(null,leafTableName,nodeTableName)
  }

  def setLeafANDNodeTableName(leafTableName:String,nodeTableName:String) = {
    this.leafTableName = leafTableName
    this.nodeTableName = nodeTableName
    this
  }

  def setDF(df:DataFrame) ={
    this.src = df
    columns = df.columns
    this
  }

  def getLeafTableName = leafTableName
  def getNodeTableName = nodeTableName

  /**
    * 非递归生成KDTree,这个实现是每一次完全划分一个维度,每个维度只遍历一次,在遍历一次的过程中多次划分,
    * 这种KDTree生成算法,生成速度相对较快,但是搜索的时候相对较慢
    */
  @deprecated
  def createKDTreeAxisFirst()={
    val dataSize = src.count()

    /**
      *treeRDD:RDD[(String,Long,Long,Row)] 这个变量用来记录最后生成的jKD-Tree
      *第一个元素String,表示记录在TD-Tree中的位置.
      *第二个元素Long,表示当前节点最后生成的叶子节点的个数.
      *第三个元素Long,表示当前节点,第一个叶子节点的起始索引.
      *第四个元素Row,表示原始记录.
      */
    var treeRDD = src.map(x=>("",dataSize,0l,x))

    /**
      *splitPositionRDD:RDD[(Int,String,Int,Double)] 这个变量用来记录KD-Tree的节点位置(每个维度的切割点位置)
      *第一个元素 Int,记录当前节点在树中的深度,0表示根节点
      *第二个元素 String,表示当前节点处于父节点的左边还是右边.
      *第三个元素 Int,表示节点所代表的切割维度.
      *第四个元素 Double,表示切分点(中位数).
      */
    var splitPositionRDD:RDD[(Int,String,Int,Double)] = null


    val depth = math.ceil(math.log(dataSize)/math.log(2)).toInt
    val depthPerAxis = depth/columns.length
    var depthRest = depth%columns.length
    var counter = 0
    //遍历一次索引号构建KDTree
    for(i <- columns.indices){
      val depthPerAxisFix = if(depthRest > 0){
        depthRest = depthRest - 1
        depthPerAxis + 1
      } else depthPerAxis
      var sortedRDD = treeRDD.sortBy{
        x => if(x._1 == "") (BigInt(0),x._4.getDouble(i)) else (BigInt(x._1,2),x._4.getDouble(i))
      }(
        new Ordering[(BigInt,Double)]{
          override def compare(x: (BigInt, Double), y: (BigInt, Double)): Int = {
            if(x._1 > y._1) 1
            else if (x._1 == y._1 && x._2 > y._2) 1
            else if (x._1 == y._1 && x._2 == y._2) 0
            else -1
          }
        },ClassTag((BigInt,Double).getClass)
       ).zipWithIndex().cache()
      //开始组层建立KDTree,KDTree的左子树,叶子节点对应维度的数字小于中位数;右子树,叶子节点对应维度的数字大于等于中位数
      for(d <- 1 to depthPerAxisFix){
        val currentDepth = counter
        val medianRDD =  sortedRDD.filter{
          case ((path,leafNodeNum,startIndex,row),index) =>
            leafNodeNum == 1 || index - startIndex == math.ceil(leafNodeNum / 2).toLong
        }.map{
          case ((path,leafNodeNum,startIndex,row),index)=>
            if(leafNodeNum == dataSize)
              (currentDepth,"root",i,row.getDouble(i))
            else if(leafNodeNum == 1)
              (currentDepth,"-1",i,row.getDouble(i))
            else if(path.indexOf("0",currentDepth-1) == currentDepth-1)
              (currentDepth,"0",i,row.getDouble(i))
            else
              (currentDepth,"1",i,row.getDouble(i))
        }

        splitPositionRDD = if(splitPositionRDD == null) medianRDD else splitPositionRDD union medianRDD

        sortedRDD = sortedRDD.map {
          case ((path,leafNodeNum,startIndex,row),index) =>
            if(leafNodeNum != 1){
              val leftNodeNum = math.ceil(leafNodeNum / 2).toLong
              if (index - startIndex < leftNodeNum)
                ((path + 0,leftNodeNum,startIndex,row),index)
              else
                ((path + 1,leafNodeNum-leftNodeNum,startIndex+leftNodeNum,row),index)
            }else{
              ((path,leafNodeNum,startIndex,row),index)
            }
        }

        counter += 1
      }
      treeRDD = sortedRDD.map(_._1)
    }

    /**
      *splitPositionRDD用索引号找子树,根节点索引号为0,假设一节点的索引号为i,则左子节点的位置为2i+1,右子节点的位置为2i+2
      */
    (treeRDD.map(x=>(x._1,x._4)),splitPositionRDD.zipWithIndex().filter(_._1._2 != "-1").map{
      case ((dep,position,axis,median),index) => (index,position,axis,median)
    })
  }

  /**
    * 非递归生成KDTree,传统方法实现,这个方法生成效率相对较低,但是搜索速度较快
    */
  def createKDTreeRDD():(RDD[(String, Row)], RDD[(Long, Int, Double)]) = {
    val dataSize = src.count()

    /**
    *treeRDD:RDD[(String,Long,Long,Row)] 这个变量用来记录最后生成的jKD-Tree
      *第一个元素String,表示记录在TD-Tree中的位置.
      *第二个元素Long,表示当前节点最后生成的叶子节点的个数.
      *第三个元素Long,表示当前节点,第一个叶子节点的起始索引.
      *第四个元素Row,表示原始记录.
      */
    var treeRDD = src.map(x=>("",dataSize,0l,x))

    /**
    *splitPositionRDD:RDD[(Int,String,Int,Double)] 这个变量用来记录KD-Tree的节点位置(每个维度的切割点位置)
      *第一个元素 Int,记录当前节点在树中的深度,0表示根节点
      *第二个元素 String,表示当前节点处于父节点的左边还是右边.
      *第三个元素 Int,表示节点所代表的切割维度.
      *第四个元素 Double,表示切分点(中位数).
      */
    var splitPositionRDD:RDD[(Int,String,Int,Double)] = null


    val depth = math.ceil(math.log(dataSize)/math.log(2)).toInt
    //循环建立KDTree
    for(dep <- 0 until depth){
      val axis = dep % columns.length
      var sortedRDD: RDD[((String, Long, Long, Row), Long)] =
        treeRDD.sortBy{
          x => if(x._1 == "") (BigInt(0),x._4.getDouble(axis)) else (BigInt(x._1,2),x._4.getDouble(axis))
        }(
          new Ordering[(BigInt,Double)]{
            override def compare(x: (BigInt, Double), y: (BigInt, Double)): Int = {
              if(x._1 > y._1) 1
              else if (x._1 == y._1 && x._2 > y._2) 1
              else if (x._1 == y._1 && x._2 == y._2) 0
              else -1
            }
          },ClassTag((BigInt,Double).getClass)
        ).zipWithIndex()

      val medianRDD =  sortedRDD.filter{
        case ((path,leafNodeNum,startIndex,row),index) =>
          leafNodeNum == 1 || index - startIndex == math.ceil(leafNodeNum / 2).toLong
      }.map{
        case ((path,leafNodeNum,startIndex,row),index)=>
          if(leafNodeNum == dataSize)
            (dep,"root",axis,row.getDouble(axis))
          else if(leafNodeNum == 1)
            (dep,"-1",axis,row.getDouble(axis))
          else if(path.indexOf("0",dep-1) == dep-1)
            (dep,"0",axis,row.getDouble(axis))
          else
            (dep,"1",axis,row.getDouble(axis))
      }

      splitPositionRDD = if(splitPositionRDD == null) medianRDD else splitPositionRDD union medianRDD

      sortedRDD = sortedRDD.map {
        case ((path,leafNodeNum,startIndex,row),index) =>
          if(leafNodeNum != 1){
            val leftNodeNum = math.ceil(leafNodeNum / 2).toLong
            if (index - startIndex < leftNodeNum)
              ((path + 0,leftNodeNum,startIndex,row),index)
            else
              ((path + 1,leafNodeNum-leftNodeNum,startIndex+leftNodeNum,row),index)
          }else{
            ((path,leafNodeNum,startIndex,row),index)
          }
      }
      treeRDD = sortedRDD.map(_._1)
    }

    /**
    *splitPositionRDD用索引号找子树,根节点索引号为0,假设一节点的索引号为i,则左子节点的位置为2i+1,右子节点的位置为2i+2
      */
    (treeRDD.map(x=>(x._1,x._4)),splitPositionRDD.zipWithIndex().filter(_._1._2 != "-1").map{
      case ((dep,position,axis,median),index) => (index,axis,median)
    })
  }

  /**
    * 保存KDTree到HBASE
    */
  def saveKDTree() {
    val (leafNodeRDD,nodeRDD) = createKDTreeRDD()
    if(leafTableName == null) throw new Exception("loss leafTableName")
    if(nodeTableName == null) throw new Exception("loss nodeTableName")
    HandleHbase.createTable(leafTableName,"leadInfo")
    HandleHbase.createTable(nodeTableName,"nodeInfo")
    val jobConf = new JobConf(HandleHbase.conf,this.getClass)
    val job = new Job(jobConf)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    //存叶子节点
    job.getConfiguration.set(TableOutputFormat.OUTPUT_TABLE,leafTableName)
    leafNodeRDD.map{
      case (path,row) =>
        val p = new Put(Bytes.toBytes(path))
        for(i <- columns.indices)
          p.add(Bytes.toBytes("leadInfo"),Bytes.toBytes("col"+(i+1)),Bytes.toBytes(row.getDouble(i)))
        (new ImmutableBytesWritable, p)
    }.saveAsNewAPIHadoopDataset(job.getConfiguration)

    //存其他节点
    job.getConfiguration.set(TableOutputFormat.OUTPUT_TABLE,nodeTableName)
    nodeRDD.map{
      case (index,axis,median) =>
        val p = new Put(Bytes.toBytes(index))
        p.add(Bytes.toBytes("nodeInfo"),Bytes.toBytes("axis"),Bytes.toBytes(axis))
        p.add(Bytes.toBytes("nodeInfo"),Bytes.toBytes("median"),Bytes.toBytes(median))
        (new ImmutableBytesWritable, p)
    }.saveAsNewAPIHadoopDataset(job.getConfiguration)
  }

  /**
    * 快速查找一个点在KDTree的路径
    */
  def searchPath(point:Array[Double]):String ={
    var path = ""
    if(nodeTableName != null && nodeTable == null){
      nodeTable = new HTable(HandleHbase.conf,nodeTableName)
    }
    var nodeIndex = 0l
    var node = nodeTable.get(new Get(Bytes.toBytes(nodeIndex)))
    while(!node.isEmpty){
      val median = Bytes.toDouble(node.getValue(Bytes.toBytes("nodeInfo"),Bytes.toBytes("median")))
      val axis = Bytes.toInt(node.getValue(Bytes.toBytes("nodeInfo"),Bytes.toBytes("axis")))
      val isLeft = point(axis)<median
      if(isLeft) path += "0" else path += "1"
      nodeIndex = if(isLeft) 2 * nodeIndex + 1 else 2 * nodeIndex + 2
      node = nodeTable.get(new Get(Bytes.toBytes(nodeIndex)))
    }
    path
  }

  /**
    * 在树中找k个最近邻
    */
  def searchKNN(point:Array[Double],k: Int) = {

  }

}