package com.yun.storm.Sales;

import java.util.HashMap;
import java.util.Map;
import java.util.UUID;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;



/**
 * 实时统计Sales拓扑
 * @author WangXin
 * @version V1.0
 */
public class SalesTopology {

    
    public final static String SPOUT_ID = KafkaSpout.class.getSimpleName();
    public final static String SALESBOLT_ID = SalesBolt.class.getSimpleName();
    public final static String SALESTOPOLOGY_ID = SalesTopology.class.getSimpleName();
    public final static String SALESDBWRITEBOLT_ID = SalesDBWriteBolt.class.getSimpleName();
    
	public static void main(String[] args) {
		TopologyBuilder builder = new TopologyBuilder();

		// 表示kafka使用的zookeeper的地址
		String brokerZkStr = "10.14.1.30:2181,10.14.1.40:2181,10.14.1.50:2181";
		ZkHosts zkHosts = new ZkHosts(brokerZkStr);
		// 表示的是kafak中存储数据的主题名称
		String topic = "maxwell";
		// 指定zookeeper中的一个根目录，里面存储kafkaspout读取数据的位置等信息
		String zkRoot = "/kafkaspout";
		String id = UUID.randomUUID().toString();
		SpoutConfig spoutconf = new SpoutConfig(zkHosts, topic, zkRoot, id);

		builder.setSpout(SPOUT_ID, new KafkaSpout(spoutconf), 1);// 单线程
//		builder.setSpout(SPOUT_ID, new SalesSpout(), 1);
		builder.setBolt(SALESBOLT_ID, new SalesBolt(), 1).shuffleGrouping(SPOUT_ID);
		builder.setBolt(SALESDBWRITEBOLT_ID, new SalesDBWriteBolt(), 1).fieldsGrouping(SALESBOLT_ID, new Fields("productID"));// 单线程汇总

		Map<String, Object> conf = new HashMap<String, Object>();
		// conf.put(Config. TOPOLOGY_RECEIVER_BUFFER_SIZE , 8);
		conf.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, 32);
		conf.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 16384);
		conf.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 16384);

		try {
			if (args != null && args.length > 0) {
				StormSubmitter.submitTopology(SALESTOPOLOGY_ID, conf, builder.createTopology());//服务器环境
			} else {
				LocalCluster cluster = new LocalCluster();
				cluster.submitTopology(SALESTOPOLOGY_ID, conf, builder.createTopology());//本地环境
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}   