package cn.lsh.spark.sql;

import org.apache.spark.sql.Row;
import org.apache.spark.sql.expressions.MutableAggregationBuffer;
import org.apache.spark.sql.expressions.UserDefinedAggregateFunction;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;

import java.util.Arrays;

public class AvgLen extends UserDefinedAggregateFunction {
	@Override
	public StructType inputSchema() {
		return DataTypes.createStructType(Arrays.asList(DataTypes.createStructField("field", DataTypes.StringType, true)));
	}

	@Override
	public StructType bufferSchema() {
		return DataTypes.createStructType(Arrays.asList(
				//保存输入字段的总长度
				DataTypes.createStructField("length", DataTypes.IntegerType, true),
				//保存输入的字段数
				DataTypes.createStructField("count", DataTypes.IntegerType, true)));
	}

	@Override
	public DataType dataType() {
		return DataTypes.FloatType;
	}

	@Override
	public boolean deterministic() {
		return true;
	}

	@Override
	public void initialize(MutableAggregationBuffer buffer) {
		buffer.update(0, 0);
		buffer.update(1, 0);
	}

	@Override
	public void update(MutableAggregationBuffer buffer, Row input) {
		//同一分区的聚合
		String field = input.getString(0);
		int fieldLen = field == null ? 0 : field.length();
		buffer.update(0, buffer.getInt(0) + fieldLen);
		buffer.update(1, buffer.getInt(1) + 1);
	}

	@Override
	public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
		//合并不同分组的数据,不同分区的聚合
		buffer1.update(0, buffer1.getInt(0) + buffer2.getInt(0));
		buffer1.update(1, buffer1.getInt(1) + buffer2.getInt(1));
	}

	@Override
	public Object evaluate(Row row) {
		return (float) row.getInt(0) / row.getInt(1);
	}
}
