package dacp.etl.kafka.hdfs.tools.readt;

import java.util.Collection;
import java.util.Map;

import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.codahale.metrics.Counter;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;

import dacp.etl.kafka.hdfs.connect.monitor.MRegistry;
import dacp.etl.kafka.hdfs.connect.writer.DataWriter;
import io.confluent.common.config.ConfigException;
import io.confluent.connect.hdfs.Version;

public class HdfsSinkTask extends SinkTask {

	private static final Logger log = LoggerFactory.getLogger(HdfsSinkTask.class);
	private DataWriter hdfsWriter;

	private static Timer timer = MRegistry.get().timer("HdfsSinkTask.put");
	
	private Counter c1 = MRegistry.get().counter("counter [" + Thread.currentThread().getId() +"]");
	
	private long wms = -1L;

	public HdfsSinkTask() {
	}

	@Override
	public String version() {
		return Version.getVersion();
	}

	@Override
	public void flush(Map<TopicPartition, OffsetAndMetadata> arg0) {
		// TODO Auto-generated method stub
	}

	@Override
	public void put(Collection<SinkRecord> records) {
		try {
			c1.inc(records.size());
			Context time = timer.time();
			if(wms > 0) Thread.sleep(wms);
			time.stop();
		} catch (ConnectException e) {
			throw new ConnectException(e);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
	}

	@Override
	public void start(Map<String, String> props) {
		try {
			HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); 
			wms = connectorConfig.getLong("dacp.test.write.sleep.ms"); 
			//hdfsWriter = new DataWriter(connectorConfig, context); 
		} catch (ConfigException e) {
			throw new ConnectException("Couldn't start HdfsSinkConnector due to configuration error.", e);
		} catch (ConnectException e) {
			log.info("Couldn't start HdfsSinkConnector:", e);
			log.info("Shutting down HdfsSinkConnector.");
			if (hdfsWriter != null) {
				hdfsWriter.close(context.assignment());
				hdfsWriter.stop(); 
			}
		}

	}

	@Override
	public void stop() {
		if (hdfsWriter != null) {
			//hdfsWriter.stop();
		}

	}

	@Override
	public void open(Collection<TopicPartition> partitions) {
 		//hdfsWriter.open(partitions);
	}

	@Override
	public void close(Collection<TopicPartition> partitions) {
 		//hdfsWriter.close(partitions);
	}

}
