package dacp.etl.kafka.hdfs.connect.writer;

import java.util.concurrent.ExecutorService;

import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTaskContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import dacp.etl.kafka.hdfs.connect.hive.HiveMetaStore;
import dacp.etl.kafka.hdfs.connect.names.FileName;
import dacp.etl.kafka.hdfs.connect.utils.CommonSinkConnectorConfig;
import io.confluent.connect.hdfs.partitioner.Partitioner;
import io.confluent.connect.hdfs.storage.Storage;

public class TopicTaskWriter extends AbstractTopicWriter{
	
	private static final Logger log = LoggerFactory.getLogger(TopicTaskWriter.class);

	public TopicTaskWriter(TopicPartition tp, Storage storage, RecordWriterProvider writerProvider, Partitioner partitioner,
			FileName namer, CommonSinkConnectorConfig connectorConfig, SinkTaskContext context, boolean hiveIntegration,
			String hiveDatabase, String hiveTable, HiveMetaStore hiveMetaStore, ExecutorService executorService) {
		super(tp, storage, writerProvider, partitioner, namer, connectorConfig, context, hiveIntegration, hiveDatabase,
				hiveTable, hiveMetaStore, executorService);
	}

	 
	public String encodePartition(SinkRecord sinkRecord) {
		return "partition=" + String.valueOf(sinkRecord.kafkaPartition());
	}
 

}
