package cn.doitedu

import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DataTypes, StructField, StructType}

import scala.util.Random

object TestUdaf extends UserDefinedAggregateFunction{
  override def inputSchema: StructType = new StructType(Array(StructField("id",DataTypes.IntegerType)))

  override def bufferSchema: StructType = new StructType(Array(StructField("avg",DataTypes.IntegerType)))

  override def dataType: DataType = DataTypes.IntegerType

  override def deterministic: Boolean = {
    println("println .................................................")
    true
  }

  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer.update(0,2)
  }

  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    val buf = buffer.getAs[Int](0)
    val in = input.getAs[Int](0)
    val rd = new Random()
    if(in * rd.nextInt(20)< buf){
      buffer.update(0,in)
    }

  }

  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    update(buffer1,buffer2)
  }

  override def evaluate(buffer: Row): Any = buffer.getAs[Int](0)
}
