package dacp.etl.kafka.hdfs.tools.writet;

import java.io.IOException;
import java.net.InetAddress;
import java.util.Collection;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTaskContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.codahale.metrics.Counter;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;

import dacp.etl.kafka.hdfs.connect.monitor.MRegistry;
import io.confluent.common.config.ConfigException;
import io.confluent.connect.hdfs.FileUtils;
import io.confluent.connect.hdfs.HdfsSinkConnecorConstants;
import io.confluent.connect.hdfs.HdfsSinkConnectorConfig;
import io.confluent.connect.hdfs.RecordWriter;
import io.confluent.connect.hdfs.partitioner.Partitioner;
import io.confluent.connect.hdfs.storage.Storage;
import io.confluent.connect.hdfs.storage.StorageFactory;

public class DataWriter {
	private static final Logger log = LoggerFactory.getLogger(DataWriter.class);

 	private String url;
	private Storage storage;
	private Configuration conf;
	private String topicsDir;
	private Set<TopicPartition> assignment;
	private Partitioner partitioner; 
	private HdfsSinkConnectorConfig connectorConfig;
	private SinkTaskContext context;
	private Thread ticketRenewThread;
	private volatile boolean isRunning;
	private RecordWriterProvider wprovider;
	
	private RecordWriter<SinkRecord> writer;
	
	private static Timer timer = MRegistry.get().timer("DataWriter.put");
	private static Counter c1 = MRegistry.get().counter("DataWriter counter [" + Thread.currentThread().hashCode() +"]");

	@SuppressWarnings("unchecked")
	public DataWriter(HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context) {
		try {
			String hadoopHome = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_HOME_CONFIG);
			System.setProperty("hadoop.home.dir", hadoopHome);

			this.connectorConfig = connectorConfig;
			this.context = context;

			String hadoopConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
			log.info("Hadoop configuration directory {}", hadoopConfDir);
			conf = new Configuration();  

			url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
			topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
			String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);


			boolean secureHadoop = connectorConfig
					.getBoolean(HdfsSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
			if (secureHadoop) {
				SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
				String principalConfig = connectorConfig
						.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
				String keytab = connectorConfig.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

				if (principalConfig == null || keytab == null) {
					throw new ConfigException(
							"Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
									+ "the path to the keytab of the principal.");
				}

				conf.set("hadoop.security.authentication", "kerberos");
				conf.set("hadoop.security.authorization", "true");
				String hostname = InetAddress.getLocalHost().getCanonicalHostName();
				// replace the _HOST specified in the principal config to the
				// actual host
				String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
				String namenodePrincipalConfig = connectorConfig
						.getString(HdfsSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

 				// namenode principal is needed for multi-node hadoop cluster
				if (conf.get("dfs.namenode.kerberos.principal") == null) {
					// conf.set("dfs.namenode.kerberos.principal",
					// namenodePrincipal);
					conf.set("dfs.namenode.kerberos.principal", namenodePrincipalConfig);
				}
				log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

				UserGroupInformation.setConfiguration(conf);
				UserGroupInformation.loginUserFromKeytab(principal, keytab);
				final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
				log.info("Login as: " + ugi.getUserName()); 
				 
			}
			
			Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
					.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
			storage = StorageFactory.createStorage(storageClass, conf, url);

			createDir(topicsDir);
			createDir(topicsDir + HdfsSinkConnecorConstants.TEMPFILE_DIRECTORY);
			createDir(logsDir); 
			
			wprovider = new RecordWriterProvider();
			
			String tempFile = FileUtils.tempFileName(url, topicsDir, "dacp", "av");
			
			writer = wprovider.getRecordWriter(conf, tempFile, null);

		} catch (ClassNotFoundException  e) {
			throw new ConnectException("Reflection exception: ", e);
		} catch (IOException e) {
			throw new ConnectException(e);
		}
	}

	public void close(Collection<TopicPartition> partitions) {
		try {
			writer.close();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	

	public void stop() {
		try {
			storage.close();
			close(null);
		} catch (IOException e) {
			throw new ConnectException(e);
		} 
	}

	public void write(Collection<SinkRecord> records) {
		c1.inc(records.size());
		Context time = timer.time();
		for (SinkRecord record : records) { 
			try {
				writer.write(record);
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		} 
		time.close();
		
	}

	private void createDir(String dir) throws IOException {
		String path = url + "/" + dir;
		if (!storage.exists(path)) {
			storage.mkdirs(path);
		}
	}
 

	public void open(Collection<TopicPartition> partitions) {
	}

}
