package cn.doitedu.dwetl.other

import java.util

import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DataTypes, StructField, StructType}

object EntropyUDAF2 extends UserDefinedAggregateFunction{
  override def inputSchema: StructType = {
    StructType(StructField("cat",DataTypes.StringType) :: StructField("peifu",DataTypes.DoubleType) :: Nil)
  }

  override def bufferSchema: StructType = {
    new StructType()
      .add("mp",DataTypes.createMapType(DataTypes.StringType,DataTypes.DoubleType))
  }

  override def dataType: DataType = DataTypes.DoubleType

  override def deterministic: Boolean = true

  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer.update(0,new util.HashMap[String,Double]())
  }

  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    val key: String = input.getString(0)
    val value: Double = input.getDouble(1)

    var map: collection.Map[String, Double] = buffer.getMap[String,Double](0)
    val newValue: Double = map.getOrElse(key,0.0)+ value
    map += ((key,newValue))

    buffer.update(0,map)

  }

  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {

    var map1: collection.Map[String, Double] = buffer1.getMap[String, Double](0)
    val map2: collection.Map[String, Double] = buffer2.getMap[String, Double](0)
    for (elem <- map2) {
      map1 += ((elem._1,map1.getOrElse(elem._1,0.0)+map2.getOrElse(elem._1,0.0)))
    }

    buffer1.update(0,map1)

  }

  override def evaluate(buffer: Row): Any = {
    val resMap = buffer.getMap[String, Double](0).filter(_._2!=0)
    val total = resMap.values.sum

    var sum = 0.0
    for (elem <- resMap) {
     sum += Math.log(total) * (elem._2/total)/(Math.log(elem._2/total))
    }
    if (resMap.size ==0) 0.0 else (-1)/sum
  }
}
