//package com.utils.spring.config;
//
//import java.util.HashMap;
//import java.util.Map;
//
//import org.apache.kafka.clients.consumer.ConsumerConfig;
//import org.apache.kafka.clients.producer.ProducerConfig;
//import org.apache.kafka.common.serialization.StringDeserializer;
//import org.apache.kafka.common.serialization.StringSerializer;
//import org.springframework.beans.factory.annotation.Value;
//import org.springframework.context.annotation.Bean;
//import org.springframework.context.annotation.ComponentScan;
//import org.springframework.context.annotation.Configuration;
//import org.springframework.context.annotation.Import;
//import org.springframework.kafka.core.DefaultKafkaProducerFactory;
//import org.springframework.kafka.core.KafkaTemplate;
//
///**
// * 自定义kafka配置，增加KafkaAutoConfiguration没有的配置
// * 并创建相关bean
// * 2019-05-20 去掉kafka的功能
// * @author zy
// */
//@Import(PropertyConfig.class)
//@ComponentScan("com.kyhd.bidder.service")
//@Configuration
////@EnableKafka
//public class KafkaConfig {
//
//	@Value("${spring.kafka.bootstrap-servers}")
//	private String bootstrapServers;
//	
//	@Value("${spring.kafka.producer.acks}")
//	private String acks;
//	
//	@Value("${spring.kafka.producer.linger.ms}")
//	private String lingerMs;
//	
//	@Value("${spring.kafka.producer.batch-size}")
//	private String batchSize;
//	
//	@Value("${spring.kafka.producer.buffer-memory}")
//	private String bufferMemory;
//	
//	@Value("${spring.kafka.consumer.group-id}")
//	private String groupId;
//	
//	@Value("${spring.kafka.consumer.enable-auto-commit}")
//	private boolean enableAutoCommit;
//	
//	@Value("${spring.kafka.consumer.auto-commit-interval}")
//	private String autoCommitInterval;
//
//	//-------------------producer config------------------------
//	private Map<String, Object> producerProperties() {
//		Map<String, Object> props = new HashMap<>();
//		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
//		props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//		props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//		props.put(ProducerConfig.ACKS_CONFIG, acks);
//		props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
//		props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
//		props.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs);
////		props.put(ProducerConfig.RETRIES_CONFIG, 1);
//		return props;
//	}
//
//	@Bean(name = "produceFactory")
//	public DefaultKafkaProducerFactory<String, String> produceFactory() {
//		return new DefaultKafkaProducerFactory<String, String>(producerProperties());
//	}
//
//	@Bean
//	public KafkaTemplate<String, String> kafkaTemplate(DefaultKafkaProducerFactory<String, String> produceFactory) {
//		return new KafkaTemplate<String, String>(produceFactory);
//	}
//
//	//-------------------consumer config------------------------
//	@SuppressWarnings("unused")
//	private Map<String, Object> consumerProperties() {
//		Map<String, Object> props = new HashMap<>();
//		props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
//		props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
//		props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
//		//consumer向zookeeper提交offset的频率，单位是秒
//		props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
//		//ZooKeeper的最大超时时间
//		props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
//		//消息发送的最长等待时间.需大于session.timeout.ms这个时间
//		props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "40000");
//		//一次从kafka中poll出来的数据条数
//		props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);
//		//若是不满足fetch.min.bytes时，等待消费端请求的最长等待时间
//		props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "1000");
//		props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//		props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//		return props;
//	}
//
///*	@Bean(name = "listenerContainerFactory")
//	public ConcurrentKafkaListenerContainerFactory<String, String> listenerContainerFactory() {
//		ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
//		factory.setConsumerFactory(new DefaultKafkaConsumerFactory<String, String>(consumerProperties()));
//
//		//factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
//		//设置可批量拉取消息消费，看需求设置
////		factory.setConcurrency(100);
////		factory.setBatchListener(true);
//		return factory;
//	}*/
//
//	/*
//	 * //执行消息消费的类，用于处理消息，做一些业务逻辑 是实现了MessageListener接口的类，且必须实现onMessage方法
//	 * 
//	 * @Bean public MyMessageListener myMessageListener(){ return new
//	 * MyMessageListener(); }
//	 * 
//	 * //消费者容器配置信息
//	 * 
//	 * @Bean public ContainerProperties containerProperties(){ Pattern topicPattern
//	 * = Pattern.compile(".*[tT]opic.*"); //匹配满足正则的topic ContainerProperties
//	 * containerProperties = new ContainerProperties(topicPattern);//订阅满足正则表达式的topic
//	 * containerProperties.setMessageListener(myMessageListener());//
//	 * 订阅的topic的消息用myMessageListener去处理 return containerProperties; }
//	 * 
//	 * @Bean public KafkaMessageListenerContainer<String, String>
//	 * kafkaMessageListenerContainer(){ return new
//	 * KafkaMessageListenerContainer<>(consumerFactory(),containerProperties()); }
//	 */
//
//}
