package dacp.etl.kafka.hdfs.connect;

import java.util.Collection;
import java.util.Map;

import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.codahale.metrics.Counter;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;

import dacp.etl.kafka.hdfs.connect.monitor.MRegistry;
import dacp.etl.kafka.hdfs.connect.utils.CommonSinkConnectorConfig;
import dacp.etl.kafka.hdfs.connect.writer.DataWriter;
import dacp.etl.kafka.hdfs.connect.writer.DataWriter2;
import io.confluent.common.config.ConfigException;
import io.confluent.connect.hdfs.Version;

public class HdfsSinkTask extends SinkTask {

	private static final Logger log = LoggerFactory.getLogger(HdfsSinkTask.class);
	private DataWriter2 hdfsWriter;

	private static Timer timer = MRegistry.get().timer("Put");
	private Counter c1 = MRegistry.get().counter("PutSize");

	public HdfsSinkTask() {
	}

	@Override
	public String version() {
		return Version.getVersion();
	}

	/**
	 * 提交offset时执行
	 * */
	@Override
	public void flush(Map<TopicPartition, OffsetAndMetadata> arg0) {
		//nothing todo , asysn commit offset
	}

	@Override
	public void put(Collection<SinkRecord> records) {
		try {
			Context time = timer.time();
			c1.inc(records.size());
			hdfsWriter.write(records); 
			time.stop();
		} catch (Exception e) {
			log.error("fuck, Be strong .",e);
			try {
				Thread.sleep(1000L);
			} catch (InterruptedException e1) {}
		}
	}

	/**
	 * 执行一次，初始化
	 * */
	@Override
	public void start(Map<String, String> props) {
		try {
			CommonSinkConnectorConfig connectorConfig = new CommonSinkConnectorConfig(props); 
			hdfsWriter = new DataWriter2(connectorConfig, context);
		} catch (ConfigException e) {
			if (hdfsWriter != null) {
				hdfsWriter.stop(); 
			}
			throw new ConnectException("oh, shit .", e);
		}  

	}

	/**
	 * 异常退出 
	 * */
	@Override
	public void stop() {
		if (hdfsWriter != null) {
			hdfsWriter.stop();
		}
		log.info("bye-bye .");
	}

	/**
	 * rebanlance
	 */
	@Override
	public void open(Collection<TopicPartition> partitions) {
		if (hdfsWriter != null) { 		
			hdfsWriter.open(partitions); 
		}
		log.info("come on, brother hung up .");
	}

	/**
	 * rebanlance && error 
	 * */
	@Override
	public void close(Collection<TopicPartition> partitions) {
		//noting todo
	}

}
