package sampler;

import org.apache.jmeter.config.Arguments;
import org.apache.jmeter.protocol.java.sampler.JavaSamplerClient;
import org.apache.jmeter.protocol.java.sampler.JavaSamplerContext;
import org.apache.jmeter.samplers.SampleResult;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Arrays;
import java.util.Properties;

@SuppressWarnings("unused")
public class MySampler implements JavaSamplerClient {

	private final Logger log = LoggerFactory.getLogger(MySampler.class);
	Producer<String, String> producer = null;
	KafkaConsumer<String, String> consumer = null;
	int i = 0;
	private Properties propsProduct = new Properties();
	private Properties propsConsumer = new Properties();
	private byte[] key = "test_key".getBytes();
	private byte[] value = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX".getBytes();

	public static void main(String[] args) {
		// TODO Auto-generated method stub

	}

	@Override
	public Arguments getDefaultParameters() {

		Arguments params = new Arguments();

		return params;
	}

	@Override
	public void setupTest(JavaSamplerContext context) {

		// 集群地址，多个服务器用"，"分隔
		propsProduct.put("bootstrap.servers", "127.0.0.1:9092");
		// key、value的序列化，此处以字符串为例，使用kafka已有的序列化类
		propsProduct.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		propsProduct.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		// props.put("partitioner.class", "com.kafka.demo.Partitioner");//分区操作，此处未写
		propsProduct.put("request.required.acks", "1");
		producer = new KafkaProducer<>(propsProduct);


		// 集群地址，多个地址用"，"分隔
		propsConsumer.put("bootstrap.servers", "127.0.0.1:9092");
		// 设置消费者的group id
		propsConsumer.put("group.id", "group1");
		// 如果为真，consumer所消费消息的offset将会自动的同步到zookeeper。如果消费者死掉时，由新的consumer使用继续接替
		propsConsumer.put("enable.auto.commit", "true");
		// consumer向zookeeper提交offset的频率
		propsConsumer.put("auto.commit.interval.ms", "1000");
		propsConsumer.put("session.timeout.ms", "30000");
		// 反序列化
		propsConsumer.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		propsConsumer.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		consumer = new KafkaConsumer<String, String>(propsConsumer);
		// 订阅topic，可以为多个用,隔开
		consumer.subscribe(Arrays.asList("topic_test"));
	}

	@Override
	public SampleResult runTest(JavaSamplerContext context) {

		SampleResult sampleResult = new SampleResult();
		sampleResult.sampleStart();

		int count = 0;

//		Jedis jedis = JedisUtil.jedisPool.getResource();
//		jedis.lpush(key, value);
//		jedis.close();

//		ProducerRecord<String, String> producerRecord = new ProducerRecord<>("test-partition-1", "key-" + i++, "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX");
//		producer.send(producerRecord);

		@SuppressWarnings("deprecation")
		ConsumerRecords<String, String> consumerRecords = consumer.poll(100);
		for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
			count++;
		}

		sampleResult.sampleEnd();
		sampleResult.setSampleCount(count);
		sampleResult.setSuccessful(true);

		return sampleResult;
	}

	@Override
	public void teardownTest(JavaSamplerContext context) {
		// TODO Auto-generated method stub

	}
}
