/*
package com.pie4cloud.pie.bus.thread;

import com.pie4cloud.pie.bus.api.consumer.factory.KfkClientFactory;
import com.pie4cloud.pie.bus.api.dto.ConsumerDto;
import com.pie4cloud.pie.bus.api.feign.RemoteKfkConsumerService;
import com.pie4cloud.pie.common.core.constant.SecurityConstants;
import com.pie4cloud.pie.common.core.util.R;
import feign.Request;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

@Slf4j
public class ConsumerThread implements Runnable {
	private KafkaConsumer<String, String> consumer;
	private RemoteKfkConsumerService remoteKfkConsumerService;
	private ConsumerDto consumerDto;
	private Properties props;

	public ConsumerThread(KafkaConsumer<String, String> consumer, RemoteKfkConsumerService remoteKfkConsumerService, ConsumerDto consumerDto, Properties props) {
		this.consumer = consumer;
		this.remoteKfkConsumerService = remoteKfkConsumerService;
		this.consumerDto = consumerDto;
		this.props = props;
	}

	public void run() {
		String key = consumerDto.getKafGroup() + "_" + consumerDto.getTopic();
		Long value = Long.parseLong(consumerDto.getTime());
		KfkClientFactory.isStop.put(key, value);
		while (!Thread.currentThread().isInterrupted()) {//消费者是一个长期运行的程序，通过持续轮询向Kafka请求数据。在其他线程中调用consumer.wakeup()可以退出循环
			//在1000ms内等待Kafka的broker返回数据.超市参数指定poll在多久之后可以返回，不管有没有可用的数据都要返回
			boolean flag = true;
			if (value < KfkClientFactory.isStop.get(key)) {
				consumer.close();
				consumer = null;
				Thread.currentThread().interrupt();
				break;
			}
			if (consumer == null) {
				//1.创建消费者
				consumer = new KafkaConsumer<String, String>(props);
				consumer.subscribe(Collections.singletonList(consumerDto.getTopic()));
			}
			ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
			Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<TopicPartition, OffsetAndMetadata>();
			try {
				if (records.isEmpty()) {
					Thread.sleep(10000);
				}
				for (ConsumerRecord<String, String> record : records) {
					try {
						consumerDto.setContent(record.value());
						Request.Options options = new Request.Options(3, TimeUnit.HOURS, 3, TimeUnit.HOURS, true);
						R r = remoteKfkConsumerService.consume(options, consumerDto, SecurityConstants.FROM_IN);
						if (0 != r.getCode()) {
							flag = false;
							break;
						} else {
							currentOffsets.put(new TopicPartition(record.topic(), record.partition()),
									new OffsetAndMetadata(record.offset() + 1, "no metadata"));
						}
					} catch (Exception e) {
						flag = false;
						break;
					}
				}
				if(currentOffsets.size()>0){
					consumer.commitSync(currentOffsets);
					currentOffsets.clear();
				}
			} catch (Exception e) {
				log.error("消费出错",e.getMessage().toString());
				//Thread.currentThread().interrupt();
			}finally {
				if (!flag) {
					consumer.close();
					consumer = null;
				}
			}
		}

	}
}
*/
