package com.leadbank.stormstudy.kafka;

import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.jdbc.bolt.JdbcInsertBolt;
import org.apache.storm.jdbc.common.Column;
import org.apache.storm.jdbc.common.ConnectionProvider;
import org.apache.storm.jdbc.common.HikariCPConnectionProvider;
import org.apache.storm.jdbc.mapper.JdbcMapper;
import org.apache.storm.jdbc.mapper.SimpleJdbcMapper;
import org.apache.storm.kafka.spout.KafkaSpout;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.topology.TopologyBuilder;

import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.UUID;

/**
 * Created by hp on 2018/7/16.
 */
public class KafkaTopology {



    public static void main(String[] args) throws Exception
    {
        System.out.println("main");
        // 创建一个拓扑
        TopologyBuilder builder = new TopologyBuilder();

//        String zkConnString = "10.1.21.5:2181,10.1.21.6:2181,10.1.21.7:2181";
        String topicName = "paymentInfo";
//        BrokerHosts hosts = new ZkHosts(zkConnString);
//        SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/" + topicName, UUID.randomUUID().toString());
//        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        KafkaSpout kafkaSpout = new KafkaSpout<>(KafkaSpoutConfig.builder("10.1.21.5:9092,10.1.21.6:9092,10.1.21.7:9092",topicName).build());

        // 设置Spout，这个Spout的名字叫做"Spout"，设置并行度为5
        builder.setSpout("kafka_spout", kafkaSpout, 5);
        // 设置slot——“split”，并行度为8，它的数据来源是spout的
        Map map = Maps.newHashMap();
        map.put("dataSourceClassName", "com.mysql.jdbc.jdbc2.optional.MysqlDataSource");//com.mysql.jdbc.jdbc2.optional.MysqlDataSource
        map.put("dataSource.url", "jdbc:mysql://10.1.1.222/bigdata");//jdbc:mysql://10.1.1.222/bigdata
        map.put("dataSource.user", "opruser");//opruser
        map.put("dataSource.password", "opruser");//opruser

        ConnectionProvider connectionProvider = new HikariCPConnectionProvider(map);
        //must specify column schema when providing custom query.
        List<Column> schemaColumns = Lists.newArrayList(new Column("orderId", Types.VARCHAR),
                new Column("createOrderTime", Types.DATE), new Column("num", Types.INTEGER));
        JdbcMapper mapper = new SimpleJdbcMapper(schemaColumns);
        JdbcInsertBolt userPersistanceBolt = new JdbcInsertBolt(connectionProvider, mapper)
                .withInsertQuery("insert into paymentinfo (orderId, createOrderTime, num) values (?,?,?)");
        builder.setBolt("filter", new FilterMessageBlot(), 8).shuffleGrouping("kafka_spout");
        //builder.setBolt("insert", userPersistanceBolt, 3).shuffleGrouping("filter");
        builder.setBolt("toredis", new Save2RedisBlot(),3).shuffleGrouping("filter");

        Config conf = new Config();
        conf.setDebug(true);
        if (args != null && args.length > 0) {
            conf.setNumWorkers(3);

            StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
        }
        else {
            conf.setMaxTaskParallelism(3);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("word-count", conf, builder.createTopology());

            Thread.sleep(10000);

            cluster.shutdown();
        }
    }
}
