//package org.nbict.iot.protocol.task.mongo;
//
//import org.apache.storm.Config;
//import org.apache.storm.LocalCluster;
//import org.apache.storm.LocalDRPC;
//import org.apache.storm.shade.org.apache.commons.collections.Factory;
//import org.apache.storm.trident.Stream;
//import org.apache.storm.trident.TridentState;
//import org.apache.storm.trident.TridentTopology;
//import org.apache.storm.trident.operation.builtin.Count;
//import org.apache.storm.trident.operation.builtin.Debug;
//import org.apache.storm.trident.state.StateType;
//import org.apache.storm.trident.testing.FixedBatchSpout;
//import org.apache.storm.trident.testing.Split;
//import org.apache.storm.tuple.Fields;
//import org.apache.storm.tuple.Values;
//
///**
// * Created by songseven on 18/6/25.
// */
//public class ReviewPastTridentDome {
//    @SuppressWarnings("unchecked")
//
//    public static void main(String[] args) throws InterruptedException
//
//    {
//
//        @SuppressWarnings("unchecked")
//
//        FixedBatchSpout spout01 = new FixedBatchSpout(new Fields("word"), 3,
//
//                new Values("abc8"),
//
//                new Values("eee9"),
//
//                new Values("iii8"),
//
//                new Values("kkk8"));
//
//        FixedBatchSpout spoutMerge01 = new FixedBatchSpout(new Fields("half_sentence"), 3,
//
//                new Values("this is a half of sentence 001"),
//
//                new Values("this is a half of sentence 002"),
//
//                new Values("this is a half of sentence 003"),
//
//                new Values("this is a half of sentence 004"));
//
//// spoutMerge01.setCycle(true);
//
//        FixedBatchSpout spoutMerge02 = new FixedBatchSpout(new Fields("sentence_half"), 2,
//
//                new Values("this is other half of sentence 001"),
//
//                new Values("this is other half of sentence 002"),
//
//                new Values("this is other half of sentence 003"),
//
//                new Values("this is other half of sentence 004"));
//
//// spoutMerge02.setCycle(true);
//
//        FixedBatchSpout spoutJoinOther = new FixedBatchSpout(new Fields("join_other","num"), 2,
//
//                new Values("other","001"),
//
//                new Values("other0","002"),
//
//                new Values("other1","003"),
//
//                new Values("other2","004"));
//
//// spoutJoinOther.setCycle(true);
//
//        TridentTopology topology = new TridentTopology();
//
//        Stream merge01 = topology.newStream("merge01", spoutMerge01);//merge需要flied数量相同
//
//        Stream merge02 = topology.newStream("merge02", spoutMerge02);
//
//        Stream join = topology.newStream("join", spoutJoinOther);
//
////--------将2个stream合并以后于另一个流jion，jion过后做统计，并存储。jion会同步流。
//
//        Stream split = topology.merge(new Fields("merge_sentence"), merge01, merge02)
//
//                .each(new Fields("merge_sentence"), new Split(), new Fields("word"));
//
//        topology.join(join, new Fields("join_other"),split, new Fields("word"),new Fields("other_00","num_00")).each(new Fields("other_00","num_00"), new Debug())
//
//                .groupBy(new Fields("num_00")).persistentAggregate(new Factory(StateType.Trans), new Fields("other_00","num_00"), new Count(), new Fields("count"))
//
//                .newValuesStream().each( new Fields("count"), new Debug());//只有other满足条件
//
////------合并2个流做存储，模拟原始数据的存储
//
//        Stream merge03=topology.newStream("merge03", spoutMerge01);
//
//        Stream merge04=topology.newStream("merge04", spoutMerge02);
//
//        TridentState state=topology.merge(new Fields("merge_sentence"), merge03,merge04)
//
//                .partitionPersist(new OrginalFactory(),new Fields("merge_sentence"), new OrginalUpdater(),new Fields("merge_sentence_after"));
//
//        state.newValuesStream().each(new Fields("merge_sentence_after"), new Debug());
//
////-------通过drpc调用存储的数据
//
//        LocalDRPC drpc = new LocalDRPC();
//
//        topology.newDRPCStream("queryTxid", drpc).each(new Fields("args"), new Debug())
//
//                .stateQuery(state, new Fields("args"), new OrinalQuery(), new Fields("txid","sen")).each(new Fields("args","txid","sen"), new Debug());
//
////如果在join之前有partitionPersist的操作（可能暗含着一切能保持state的操作），那么将会阻塞（partitionPersist不会被执行），这里可能是storm需要改进的地方。
//
//        Config config=new Config();
//
//        config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);//storm对persistentAggregate做持久化的保证强order，所以即便是TOPOLOGY_MAX_SPOUT_PENDING》1,在做持久化的时候，一样是按照顺序一个一个来处理，storm需要保证进入的数据按照严格的batchid递增的形式做数据处理
//
//        LocalCluster cluster=new LocalCluster();
//
//        cluster.submitTopology("merge-test", config, topology.build());
//
//        String result=drpc.execute("queryTxid", "1");
//
//        System.out.println("localDRPC result: "+result);
//
//        Thread.sleep(500000);
//
//        cluster.shutdown();
//}
