package david.java.flink_sql.udf;

import org.apache.commons.lang3.StringUtils;
import org.apache.flink.table.functions.AggregateFunction;

import java.util.Arrays;
import java.util.HashSet;

/**
 * @Description:  group by 后, 将字段 做成collect_set 操作, 不过最后格式为 String.join(",",  Set<String>) 类型
 * @Author: ZhaoDawei
 * @Date: Create in 3:42 下午 2022/6/8
 */
public class MergeStringUDAF extends AggregateFunction<String, MergeStringUDAF.MergeAccum> {

    @Override
    public String getValue(MergeAccum accumulator) {
        return accumulator.result;
    }

    /**
     * 功能描述: 注意,此处定义, 且里面的值必须为基础类型, 否则flink的dataType进行序列化的时候不会识别
     *          因此,在字符串合并的的逻辑放到了 getValue里面
     * @param
     * @return:
     */
    public static class MergeAccum {
        public String result;
    }



    @Override
    public MergeAccum createAccumulator() {
        MergeAccum acc = new MergeAccum();
        return acc;
    }

    public void accumulate(MergeAccum acc, String str) {
        if (StringUtils.isBlank(str)) {
            return ;
        }
        HashSet<String> set = new HashSet<>();
        String[] split1 = str.split(",");
        if (StringUtils.isNotBlank(acc.result)) {
            String[] split2 = acc.result.split(",");
            set.addAll(Arrays.asList(split2));
        }
        set.addAll(Arrays.asList(split1));
        acc.result = String.join(",", set);
        set.clear();
    }

    public void retract(MergeAccum acc, String str) {
        return;
    }

    public static void main(String[] args) {
        HashSet<String> set = new HashSet<>(Arrays.asList("a", "b", "c"));
        // System.out.println(set.toString());

        MergeStringUDAF mergeStringUDAF = new MergeStringUDAF();
    }
}
