package cn.doitedu.dwetl.other

import java.util
import java.util.Map

import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DataTypes, StructField, StructType}

object EntropyUDAF extends UserDefinedAggregateFunction{
  override def inputSchema: StructType = {
    StructType(StructField("peifu",DataTypes.DoubleType) :: Nil)
  }

  override def bufferSchema: StructType = {
    new StructType()
      .add("mp",DataTypes.createMapType(DataTypes.DoubleType,DataTypes.IntegerType))
      .add("total",DataTypes.IntegerType)
  }

  override def dataType: DataType = DataTypes.DoubleType

  override def deterministic: Boolean = true

  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer.update(0,new util.HashMap[Double,Int]())
    buffer.update(1,0)
  }

  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    val ele = input.getDouble(0)
    val total = buffer.getInt(1)

    var map = buffer.getMap[Double,Int](0)
    map += ((ele,map.getOrElse(ele,0)+1))

    buffer.update(0,map)
    buffer.update(1,total+1)

  }

  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {

    var map1 = buffer1.getMap[Double, Int](0)
    val map2 = buffer2.getMap[Double, Int](0)
    for (elem <- map2) {
      map1 += ((elem._1,map1.getOrElse(elem._1,0)+map2.getOrElse(elem._1,0)))
    }

    buffer1.update(0,map1)
    buffer1(1) = buffer1.getInt(1)+buffer2.getInt(1)

  }

  override def evaluate(buffer: Row): Any = {
    val total = buffer.getInt(1).toDouble
    val resMap = buffer.getMap[Double, Int](0)
    var entro = 0.0
    for (elem <- resMap) {
      val pi = elem._2 / total
      entro += pi * Math.log(pi)/Math.log(2)
    }
    -entro
  }
}
