package org.zjt.flink.wordcount;

import lombok.Data;
import org.apache.flink.api.common.functions.*;
import org.apache.flink.api.common.operators.Order;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.FilterOperator;
import org.apache.flink.api.java.operators.GroupReduceOperator;
import org.apache.flink.api.java.operators.SortPartitionOperator;
import org.apache.flink.util.Collector;
import org.zjt.flink.wordcount.util.WordCountData;
import scala.Tuple2;

import java.util.*;

/**
 * Description:
 *
 *  我的算法大致类似于将top-K字问题作为一个例子
 *
         （计算本地“字数”的目的是处理数据不平衡）：

         DataStream> -词
         timeWindow的1H - >
         转换为DataSet词- >

         通过随机分配rebalance- >
         本地“字数统计”使用mapPartition- >
         使用全局“字数统计” reduceGroup> -

         数据平衡 rebalance- >
         本地Partition的top-k使用mapPartition- >
         全局顶级-K采用reduceGroup
 *
 *
 * @author juntao.zhang
 * Date: 2018-09-29 下午4:05
 * @see
 */
public class WordQueryTopN {


    private static final Integer topN = 10;

    public static void main(String[] args) throws Exception {



        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

//
//        GroupReduceOperator<ElementWord, ElementWord> first = env.fromElements(WordCountData.WORDS).flatMap(new TextWordSplit()).filter((FilterFunction<ElementWord>) elementWord ->
//                !(Objects.isNull(elementWord.getWord()) || elementWord.getWord().length() < 4))
//                .groupBy("word")
//                .reduce((ReduceFunction<ElementWord>) (t1, t2) -> {
//                    t1.setCnt(t1.getCnt() + t2.getCnt());
//                    return t1;
//                }).sortPartition("cnt", Order.DESCENDING).setParallelism(1).first(topN); // 一个Partition
//
//
//        first.print();


        SortPartitionOperator<ElementWord> elementWordSortPartitionOperator = env.fromElements(WordCountData.WORDS).flatMap(new TextWordSplit()).filter((FilterFunction<ElementWord>) elementWord ->
                !(Objects.isNull(elementWord.getWord()) || elementWord.getWord().length() < 4))
                .groupBy("word")
                .reduce((ReduceFunction<ElementWord>) (t1, t2) -> {
                    t1.setCnt(t1.getCnt() + t2.getCnt());
                    return t1;
                }).sortPartition("cnt", Order.DESCENDING);


        GroupReduceOperator<ElementWord, ElementWord> elementWordElementWordGroupReduceOperator = elementWordSortPartitionOperator.rebalance().mapPartition(new MapPartitionFunction<ElementWord, ElementWord>(){
            /**
             * MapPartitionFunction 处理不同Partition,Partition内做聚合
             * @param iterable
             * @param collector
             * @throws Exception
             */
            @Override
            public void mapPartition(Iterable<ElementWord> iterable, Collector<ElementWord> collector) throws Exception {
                TreeMap<Integer, ElementWord> treemap = new TreeMap<>((y, x) -> (x < y) ? -1 : 1);
                Iterator<ElementWord> iterator = iterable.iterator();
                for (; iterator.hasNext(); ) {
                    ElementWord next = iterator.next();
                    treemap.put(next.cnt, next);
                    if (treemap.size() > topN) {
                        // 移除最后一个实例
                        treemap.pollLastEntry();
                    }
                }
                treemap.values().forEach(collector::collect);
                System.out.println("mapPartition treemap:" +treemap.values());
            }
        }).reduceGroup(new GroupReduceFunction<ElementWord, ElementWord>() {

            /**
             * 将所有的Partition 做聚合
             * @param iterable
             * @param collector
             * @throws Exception
             */
            @Override
            public void reduce(Iterable<ElementWord> iterable, Collector<ElementWord> collector) throws Exception {
                TreeMap<Integer, ElementWord> treemap = new TreeMap<>((y, x) -> (x < y) ? -1 : 1);
                Iterator<ElementWord> iterator = iterable.iterator();
                for (; iterator.hasNext(); ) {

                    ElementWord next = iterator.next();
                    treemap.put(next.cnt, next);
                    if (treemap.size() > topN) {
                        treemap.pollLastEntry();
                    }
                }
                treemap.values().forEach(collector::collect);

                System.out.println("reduceGroup treemap:" +treemap.values());
            }
        });


        elementWordElementWordGroupReduceOperator.setParallelism(4);
        List<ElementWord> collect = elementWordElementWordGroupReduceOperator.collect();
        System.out.println(collect);
    }


    public static class TextWordSplit implements FlatMapFunction<String, ElementWord> {
        @Override
        public void flatMap(String s, Collector<ElementWord> collector) throws Exception {

            String[] tokens = s.toLowerCase().split("\\W+");

            for (String token: tokens) {
                if (token.length() > 0) {
                    ElementWord elementWord = new ElementWord();
                    elementWord.setCnt(1);
                    elementWord.setWord(token);
                    collector.collect(elementWord);
                }
            }
        }
    }


    @Data
    public static class ElementWord{
        private String word;
        private Integer cnt;


    }

}
