package com.lefu.risk.storm.rule;

import java.util.UUID;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;

import com.lefu.risk.storm.generdata.ShuffGenerDataSpout;

public class ParallelCalculateTopology {

	public static final String spoutName = "data_source_spout";

	public static final String jsbolt = "01bolt";

	public static final String statusCountBolt = "status_count_bolt";

	public static final String transCountBolt = "trans_count_bolt";

	public static final String allCountBolt = "all_count_bolt";

	public static final String topoName = "hahatopo";
	
	public static final String typeOrderAmount = "type_order_amount";
	public static final String statusOrderAmount = "status_order_amount";

	public static void main(String[] args) {
		TopologyBuilder builder = new TopologyBuilder();

		ShuffGenerDataSpout spout = new ShuffGenerDataSpout();

		DataRule_01Bolt _01bolt = new DataRule_01Bolt();

		StatusCountBolt scbolt = new StatusCountBolt();

		TransTypeCountBolt tcbolt = new TransTypeCountBolt();

		AllCountBolt allBolt = new AllCountBolt();
		
		OrderAmountBolt typeOrderAmountBolt = new OrderAmountBolt();
		OrderAmountBolt statusOrderAmountBolt = new OrderAmountBolt();

		BrokerHosts zks = new ZkHosts(
				"colony1:2181,colony2:2181,colony3:2181," + "colony4:2181,colony5:2181,colony6:2181,colony7:2181");

		KafkaSpout orderDataKafkaSpout = buildKafkaSpout(zks, "orderData", "/orderData", UUID.randomUUID().toString());

		builder.setSpout(spoutName, spout);
		
		builder.setSpout(spoutName, orderDataKafkaSpout,1);

		builder.setBolt(jsbolt, _01bolt,1).localOrShuffleGrouping(spoutName);

//		builder.setBolt(statusCountBolt, scbolt, 20).fieldsGrouping(jsbolt, new Fields("STATUS"));
//		builder.setBolt(transCountBolt, tcbolt, 20).fieldsGrouping(jsbolt, new Fields("TRANSATION_TYPE"));

		builder.setBolt(statusCountBolt, scbolt, 1).localOrShuffleGrouping(jsbolt);
		builder.setBolt(transCountBolt, tcbolt, 1).localOrShuffleGrouping(jsbolt);
		
		builder.setBolt(typeOrderAmount, typeOrderAmountBolt,1).fieldsGrouping(transCountBolt,new Fields("word"));
		builder.setBolt(statusOrderAmount, statusOrderAmountBolt,1).fieldsGrouping(transCountBolt,new Fields("word"));
		
//		builder.setBolt(allCountBolt, allBolt).globalGrouping(statusCountBolt);
//		builder.setBolt(allCountBolt + "_trans", allBolt).globalGrouping(transCountBolt);

		Config conf = new Config();

		conf.setMaxSpoutPending(40);
		conf.setNumWorkers(2);

		StormTopology topology = builder.createTopology();

		if (args.length > 0) {
			try {
				StormSubmitter.submitTopology(args[0], conf, topology);
			} catch (Exception e) {
				e.printStackTrace();
			}
		} else {
			LocalCluster localCluster = new LocalCluster();
			localCluster.submitTopology(topoName, conf, topology);
		}

	}

	private static KafkaSpout buildKafkaSpout(BrokerHosts hosts, String topic, String zkRoot, String id) {
		SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, id);
		spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		spoutConfig.startOffsetTime = -1L;
		return new KafkaSpout(spoutConfig);
	}

}
