package com.lefu.risk.storm.topology;


import com.lefu.risk.storm.bolt.OrderReportBolt;
import com.lefu.risk.storm.bolt.OrderDataBolt;
import com.lefu.risk.storm.bolt.ResultDataBolt;
import com.lefu.risk.storm.bolt.ResultReportBolt;
import com.lefu.risk.storm.serializer.OrderDataSerializable;
import com.lefu.risk.storm.spout.LocalOrderDataSpout;
import com.lefu.risk.storm.spout.LocalResultDataSpout;
import com.lefu.risk.storm.to.OrderDataTo;
import com.lefu.risk.storm.utils.JedisPoolUtil;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.*;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils;

import java.math.BigDecimal;
import java.util.UUID;


public class RuleTopologyMain {

    public static void main(String[] args) {
        try {
            TopologyBuilder builder = new TopologyBuilder();
            // zk
            BrokerHosts zks = new ZkHosts(
                            "colony1:2181,colony2:2281,colony3:2381," +
                            "colony4:2181,colony5:2281,colony6:2381,colony7:2381");


            KafkaSpout orderDataKafkaSpout = buildKafkaSpout(zks,"orderData","/orderData",UUID.randomUUID().toString());
            KafkaSpout resultDataKafkaSpout = buildKafkaSpout(zks,"resultData","/resultData",UUID.randomUUID().toString());

            //集群上用的
//            builder.setSpout("orderDataSpout",orderDataKafkaSpout);
//            builder.setSpout("resultDataSpout",resultDataKafkaSpout);

            //本地测试用的
            builder.setSpout("orderDataSpout",new LocalOrderDataSpout()); //处理 orderData
            builder.setSpout("resultDataSpout",new LocalResultDataSpout()); //处理 resultData

            //order_data_bolt
            builder.setBolt("orderDataBolt",new OrderDataBolt(),30).setNumTasks(30).localOrShuffleGrouping("orderDataSpout");
            builder.setBolt("orderReportBolt",new OrderReportBolt(),30).setNumTasks(30).localOrShuffleGrouping("orderDataBolt");

            //result_data_bolt
            builder.setBolt("resultDataBolt",new ResultDataBolt(),30).setNumTasks(30).localOrShuffleGrouping("resultDataSpout");
            builder.setBolt("resultReportBolt",new ResultReportBolt(),30).setNumTasks(30).localOrShuffleGrouping("resultDataBolt");


            Config topologyConf = new Config();

            topologyConf.registerSerialization(OrderDataTo.class, OrderDataSerializable.class);
            topologyConf.setDebug(true);
//            topologyConf.setMaxSpoutPending(30000);
            if (args != null && args.length > 0) {
                topologyConf.setNumWorkers(10);
                topologyConf.setMaxTaskParallelism(1000);
                StormSubmitter.submitTopologyWithProgressBar(args[0], topologyConf, builder.createTopology());
            } else {
                topologyConf.setMaxTaskParallelism(40);
                LocalCluster cluster = new LocalCluster();
                cluster.submitTopology("riskTopology", topologyConf, builder.createTopology());
                Utils.sleep(800000000);
                cluster.shutdown();
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    private static KafkaSpout buildKafkaSpout(BrokerHosts hosts,String topic,String zkRoot,String id) {
        SpoutConfig spoutConfig = new SpoutConfig(hosts ,topic, zkRoot, id);
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        return new KafkaSpout(spoutConfig);
    }
}
