package pers.amos.eshop.storm;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;
import org.apache.storm.utils.Utils;
import pers.amos.eshop.storm.bolt.LogParseBolt;
import pers.amos.eshop.storm.bolt.ProductCountBolt;
import pers.amos.eshop.storm.spout.AccessLogKafkaSpout;


/**
 * 热数据统计拓扑
 */
public class HotProductTopology {

    public static void main(String[] args) throws Exception {
        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("AccessLogKafkaSpout", new AccessLogKafkaSpout(), 1);
        builder.setBolt("LogParseBolt", new LogParseBolt(), 1)
                .setNumTasks(2)
                .shuffleGrouping("AccessLogKafkaSpout");
        builder.setBolt("ProductCountBolt", new ProductCountBolt(), 2)
                .setNumTasks(2)
                .fieldsGrouping("LogParseBolt", new Fields("productId"));

        Config config = new Config();

        if (args != null && args.length > 0) {
            config.setNumWorkers(3);
            try {
                StormSubmitter.submitTopology(args[0], config, builder.createTopology());
            } catch (Exception e) {
                e.printStackTrace();
            }
        } else {
            // 说明是在IDEA里面本地运行
            config.setMaxTaskParallelism(20);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("HotProductTopology", config, builder.createTopology());

            /**
             * 直接在IDEA中启动就行 这里一定一定一定要将时间设置的足够大！！
             * 刚开始只设置了60s 程序刚启动完成就结束了 导致一直无法统计数据
             */
            Utils.sleep(6000000);
            cluster.shutdown();
        }
    }

}
