/*
 * Licensed to the Apache Software Foundation (ASF) under one
 *   or more contributor license agreements.  See the NOTICE file
 *   distributed with this work for additional information
 *   regarding copyright ownership.  The ASF licenses this file
 *   to you under the Apache License, Version 2.0 (the
 *   "License"); you may not use this file except in compliance
 *   with the License.  You may obtain a copy of the License at
 *  
 *   http://www.apache.org/licenses/LICENSE-2.0
 *  
 *   Unless required by applicable law or agreed to in writing, software
 *   distributed under the License is distributed on an "AS IS" BASIS,
 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *   See the License for the specific language governing permissions and
 *   limitations under the License.
 */

package com.roy.kafka.trident;

import org.apache.storm.LocalDRPC;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.kafka.trident.TransactionalTridentKafkaSpout;
import org.apache.storm.trident.Stream;
import org.apache.storm.trident.TridentState;
import org.apache.storm.trident.TridentTopology;
import org.apache.storm.trident.operation.BaseFilter;
import org.apache.storm.trident.operation.builtin.Count;
import org.apache.storm.trident.operation.builtin.Debug;
import org.apache.storm.trident.operation.builtin.FilterNull;
import org.apache.storm.trident.operation.builtin.MapGet;
import org.apache.storm.trident.spout.ITridentDataSource;
import org.apache.storm.trident.testing.MemoryMapState;
import org.apache.storm.trident.testing.Split;
import org.apache.storm.trident.tuple.TridentTuple;
import org.apache.storm.tuple.Fields;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class TridentKafkaConsumerTopology {
    protected static final Logger LOG = LoggerFactory.getLogger(TridentKafkaConsumerTopology.class);

    /**
     * See {@link TridentKafkaConsumerTopology#newTopology(LocalDRPC, ITridentDataSource)}
     */
    public static StormTopology newTopology(ITridentDataSource tridentSpout) {
        return newTopology(null, tridentSpout);
    }

    /**
     * @param drpc The DRPC stream to be used in querying the word counts. Can be null in distributed mode
     * @return a trident topology that consumes sentences from the kafka topic specified using a
     * {@link TransactionalTridentKafkaSpout} computes the word count and stores it in a {@link MemoryMapState}.
     */
    public static StormTopology newTopology(LocalDRPC drpc, ITridentDataSource tridentSpout) {
        final TridentTopology tridentTopology = new TridentTopology();
        addDRPCStream(tridentTopology, addTridentState(tridentTopology, tridentSpout), drpc);
        return tridentTopology.build();
    }

    /**
     * 这个构建方式是先将kafka消息的结果构建成一个(word,count)结构的TridentState，一个中间结果集。
     * 然后主的流程从这个State上读取每个word的出现次数。
     */
    private static Stream addDRPCStream(TridentTopology tridentTopology, final TridentState state, LocalDRPC drpc) {
        //开启一个DRPC流
        return tridentTopology.newDRPCStream("words", drpc)
                //将(a b c)分解成分别包含a,b,c的三个tuple，该tuple中包含了原来的tuple字段与拆分出来的新字段，新的字段命名为word
                .each(new Fields("args"), new Split(), new Fields("word"))
                //按word字段进行分组
                .groupBy(new Fields("word"))
                //对(word,count)这样结构的结果集进行MapGet()操作，根据word字段查询value，并将该value作为count字段，加入到tuple中。
                .stateQuery(state, new Fields("word"), new MapGet(), new Fields("count"))
                //过滤count为null的字段。each的第二个方法函数如果没有发射数据(collect.emit)就会被过滤掉。
                .each(new Fields("count"), new FilterNull())
                //只保留tuple中的word和count两个字段。==这里是将原来tuple中还包含的原消息字段给去掉了。
                .project(new Fields("word", "count"))
                //filter方法对流中的tuple过滤。true就保留，false就过滤掉。
                .filter(new BaseFilter() {
                    @Override
                    public boolean isKeep(TridentTuple tuple) {
//                        LOG.debug("DRPC RESULT: " + tuple);  // Used to show the DRPC results in the worker log. Useful for debugging
                        System.err.println("DRPC RESULT:"+tuple);
                        return true;
                    }
                });
    }

    private static TridentState addTridentState(TridentTopology tridentTopology, ITridentDataSource tridentSpout) {
        //对数据源ITridentDataSource打开个流。
        final Stream spoutStream = tridentTopology.newStream("spout1", tridentSpout).parallelismHint(2);
        //主要是通过persistentAggregate构建的TridentState.
        //示例中这个tridentSpout是一个KafkaTridentSpoutOpaque实例,kafka数据源,会根据配置的translator返回字段。
        //这个示例中使用修改过的JUST_VALUE_FUNC方法，只返回topic和value两个字段。
//        return spoutStream.each(spoutStream.getOutputFields(), new Debug(true))
        return spoutStream.each(spoutStream.getOutputFields(), new BaseFilter() {
            @Override
            public boolean isKeep(TridentTuple tuple) {
                System.err.println("addTridentState RESULT:"+tuple);
                return true;
            }
        })
                //对value字段按空格拆分，拆分出的新字段声明为word字段。新的tuple中包含了新字段和原来tuple中的字段。
                .each(new Fields("value"), new Split(), new Fields("word"))
                //按word进行分组
                .groupBy(new Fields("word"))
                //将分组结果进行Count操作，操作完的新字段声明为count，这样tuple中包含(word,count)
                //注意这个persistentAggregate会重新构建tuple。也就是说原tuple中的topic等字段会被去掉
                .persistentAggregate(new DebugMemoryMapState.Factory(), new Count(), new Fields("count"));
    }
}
