package com.wis.bolt.to_kafka;

import com.wis.TopologyDrive;
import com.wis.pool.kafka.KafkaPool;
import com.wis.pool.kafka.KafkaProducerApp;
import org.apache.commons.lang.StringUtils;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Tuple;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Map;
import java.util.Properties;

/**
 * @Description: traffic_fan    风险告警信息
 * @author: fan
 * @Date: Created in 2018/10/30 15:17
 * @Modified By:
 */
public class ToRiskLinkKafkaBolt extends BaseRichBolt {
    private static final Logger LOGGER = LoggerFactory.getLogger(ToRiskLinkKafkaBolt.class);
    private OutputCollector collector;
    private static KafkaPool<String, String> pool = null;
    private static String topic = null;//kafka中的主题
    // private static BufferedWriter bufferedWriter = null;

    @Override
    public void prepare(Map map, TopologyContext context, OutputCollector collector) {
        this.collector = collector;
        String kafka_producer_server = map.get(TopologyDrive.KAFKA_PRODUCER_SERVER).toString();
        initPool(kafka_producer_server);
        topic = map.get(TopologyDrive.KAFKA_PRODUCER_RISK_LINK_TOPIC).toString();
        // try {
        //     bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(new File("d:\\data\\fengxian\\fengxianxian.txt")), "utf-8"));
        //     bufferedWriter.write("{\"type\": \"FeatureCollection\",\"features\": [");
        // } catch (Exception e) {
        //     e.printStackTrace();
        // }
    }

    @Override
    public void execute(Tuple tuple) {
        String key = tuple.getStringByField("key");
        String value = tuple.getStringByField("value");
        try {
            if (StringUtils.isNotBlank(value)) {
                KafkaProducerApp<String, String> producer = pool.borrowProducer();
                if (value.length() > 4) {
                    // bufferedWriter.write(line);
                    // bufferedWriter.write("\n");
                    // bufferedWriter.flush();
                    producer.send(topic, "35" + key, value);
                } else if (value.contains("over")) {
                    producer.send(topic, key.substring(1, 2) + key.substring(9, 21) + "-" + "risk", key.substring(1, 2) + key.substring(9, 21) + "-" + "risk");
                }
                pool.returnProducer(producer);
            }
        } catch (Exception e) {
            LOGGER.error(e.toString());
            e.printStackTrace();
        } finally {
            this.collector.ack(tuple);
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {

    }

    private void initPool(String server) {
        if (StringUtils.isNotBlank(server)) {
            Properties props = new Properties();
            //将生产者的连接设置到properties中
            props.put("bootstrap.servers", server);
            props.put("acks", "all");
            props.put("retries", 0);
            props.put("batch.size", 16384);
            props.put("linger.ms", 1);
            props.put("buffer.memory", 33554432);
            props.put("key.serializer",
                    "org.apache.kafka.common.serialization.StringSerializer");
            props.put("value.serializer",
                    "org.apache.kafka.common.serialization.StringSerializer");

            pool = new KafkaPool<String, String>(props);
        }
    }
}
