package com.wis.bolt.to_kafka;

import com.wis.TopologyDrive;
import com.wis.pool.kafka.KafkaPool;
import com.wis.pool.kafka.KafkaProducerApp;
import com.wis.traffic.entity.dao.DataTimes;
import com.wis.traffic.post.service.DataTimesService;
import com.wis.traffic.post.service.impl.DataTimesServiceImpl;
import org.apache.commons.lang.StringUtils;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Tuple;

import java.util.Map;
import java.util.Properties;

/**
 * @Description: traffic_fan    路段线数据
 * @author: fan
 * @Date: Created in 2018/10/29 16:18
 * @Modified By:
 */
public class ToMPGKafkaBolt extends BaseRichBolt {
    private OutputCollector collector;
    private static KafkaPool<String, String> pool = null;
    private static String topic = null;//kafka中的主题
    private static DataTimesService dataTimesService;

    @Override
    public void prepare(Map map, TopologyContext context, OutputCollector collector) {
        this.collector = collector;
        dataTimesService = new DataTimesServiceImpl();
        // Properties properties = new Utils().getProperties();
        // 获取kafka生产者的连接
        String kafka_producer_server = map.get(TopologyDrive.KAFKA_PRODUCER_SERVER).toString();
        // 初始化连接池
        initPool(kafka_producer_server);
        // kafka生产者的topic
        topic = map.get(TopologyDrive.KAFKA_PRODUCER_MPG_Point_TOPIC).toString();

    }

    @Override
    public void execute(Tuple tuple) {
        String keyLine = tuple.getStringByField("key");
        String line = tuple.getStringByField("value");
        try {

            if (StringUtils.isNotBlank(line)) {
                // if (line.length() > 10) {
                KafkaProducerApp<String, String> producer = pool.borrowProducer();

                producer.send(topic, keyLine, line);
                // 写完数据后需要将连接点还到连接池中
                pool.returnProducer(producer);
                // } else
                if (line.contains("over")) {
                    DataTimes dataTimes = new DataTimes();
                    dataTimes.setData_type(Integer.parseInt(line.substring(0, 1)));
                    dataTimes.setGeo_type(Integer.parseInt(line.substring(2, 3)));
                    dataTimes.setProduct_type(Integer.parseInt(line.substring(4, 5)));
                    dataTimes.setUpdated_time_zf(keyLine.substring(9, 21));
                    dataTimesService.insertOrUpdate(dataTimes);
                }
            }
        } catch (Exception e) {
            System.out.println("错误的数据：" + line);
            e.printStackTrace();
        } finally {
            this.collector.ack(tuple);
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {

    }

    @Override
    public void cleanup() {
        // 资源关闭
        if (pool != null) {
            pool.close();
        }
        super.cleanup();
    }

    private void initPool(String server) {
        if (StringUtils.isNotBlank(server)) {
            Properties props = new Properties();
            //将生产者的连接设置到properties中
            props.put("bootstrap.servers", server);
            props.put("acks", "all");
            props.put("retries", 0);
            props.put("batch.size", 16384);
            props.put("linger.ms", 1);
            props.put("buffer.memory", 33554432);
            props.put("key.serializer",
                    "org.apache.kafka.common.serialization.StringSerializer");
            props.put("value.serializer",
                    "org.apache.kafka.common.serialization.StringSerializer");

            pool = new KafkaPool<String, String>(props);
        }
    }
}
