package dacp.etl.kafka.hdfs.connect.writer;

import java.io.IOException;
import java.net.InetAddress;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTaskContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import dacp.etl.kafka.hdfs.connect.hive.HiveMetaStore;
import dacp.etl.kafka.hdfs.connect.names.FileName;
import dacp.etl.kafka.hdfs.connect.utils.CommonSinkConnectorConfig;
import io.confluent.common.config.ConfigException;
import io.confluent.connect.hdfs.HdfsSinkConnecorConstants;
import io.confluent.connect.hdfs.partitioner.Partitioner;
import io.confluent.connect.hdfs.storage.Storage;
import io.confluent.connect.hdfs.storage.StorageFactory;

public class DataWriter {
	private static final Logger log = LoggerFactory.getLogger(DataWriter.class);

	private Map<TopicPartition, TopicPartitionWriter> topicPartitionWriters = new HashMap<>();;
	private String url;
	private Storage storage;
	private Configuration conf;
	private String topicsDir;
	private Set<TopicPartition> assignment;
	private Partitioner partitioner;
	
	private FileName namer;
	
	private RecordWriterProvider writerProvider;
	private CommonSinkConnectorConfig connectorConfig;
	private SinkTaskContext context;
	private Thread ticketRenewThread;
	private volatile boolean isRunning;
	
	private boolean hiveIntegration;
	private String hiveDatabase;
	private String hiveTable;
	private HiveMetaStore hiveMetaStore;
	private ExecutorService executorService; 

	@SuppressWarnings("unchecked")
	public DataWriter(CommonSinkConnectorConfig connectorConfig, SinkTaskContext context) {
		try {
			String hadoopHome = connectorConfig.getString(CommonSinkConnectorConfig.HADOOP_HOME_CONFIG);
			System.setProperty("HADOOP_USER_NAME", hadoopHome);

			this.connectorConfig = connectorConfig;
			this.context = context;

			String hadoopConfDir = connectorConfig.getString(CommonSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
			log.info("Hadoop configuration directory {}", hadoopConfDir);
			conf = new Configuration();
			if (!hadoopConfDir.equals("")) {
				conf.addResource(new Path(hadoopConfDir + "/core-site.xml"));
				conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml"));
			}

			boolean secureHadoop = connectorConfig
					.getBoolean(CommonSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
			if (secureHadoop) {
				SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
				String principalConfig = connectorConfig
						.getString(CommonSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
				String keytab = connectorConfig.getString(CommonSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

				if (principalConfig == null || keytab == null) {
					throw new ConfigException(
							"Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
									+ "the path to the keytab of the principal.");
				}

				conf.set("hadoop.security.authentication", "kerberos");
				conf.set("hadoop.security.authorization", "true");
				String hostname = InetAddress.getLocalHost().getCanonicalHostName();
				// replace the _HOST specified in the principal config to the
				// actual host
				String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
				String namenodePrincipalConfig = connectorConfig
						.getString(CommonSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

				String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname);
				// namenode principal is needed for multi-node hadoop cluster
				if (conf.get("dfs.namenode.kerberos.principal") == null) {
					// conf.set("dfs.namenode.kerberos.principal",
					// namenodePrincipal);
					conf.set("dfs.namenode.kerberos.principal", namenodePrincipalConfig);
				}
				log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

				UserGroupInformation.setConfiguration(conf);
				UserGroupInformation.loginUserFromKeytab(principal, keytab);
				final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
				log.info("Login as: " + ugi.getUserName());

				final long renewPeriod = connectorConfig
						.getLong(CommonSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG);

				isRunning = true;
				ticketRenewThread = new Thread(new Runnable() {
					@Override
					public void run() {
						synchronized (DataWriter.this) {
							while (isRunning) {
								try {
									log.info("now ticket renew stat waiting : {}", isRunning);
									DataWriter.this.wait(renewPeriod);
									log.info("now ticket renew stat : {}", isRunning);
									if (isRunning) {
										/*
										if (UserGroupInformation.isLoginKeytabBased()) {
											log.info("ticket reloginFromKeytab !!");
											ugi.reloginFromKeytab();
							              } else {
							            	  log.info("ticket reloginFromTicketCache !!");
							            	  //ugi.reloginFromTicketCache();
							            	  ugi.reloginFromKeytab();
							              }
							              */
										//ugi.reloginFromTicketCache();
										//ugi.checkTGTAndReloginFromKeytab();
										ugi.reloginFromKeytab();
										log.info("ticket relogin end !!");
									}
								} catch (IOException e) {
									log.error("Error renewing the ticket", e);
								} catch (InterruptedException e) {
									log.error("Error renewing the ticket 2", e);
								}
							}
						}
					}
				});
				log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod);
				ticketRenewThread.start();
			}

			url = connectorConfig.getString(CommonSinkConnectorConfig.HDFS_URL_CONFIG);
			topicsDir = connectorConfig.getString(CommonSinkConnectorConfig.TOPICS_DIR_CONFIG);
			String logsDir = connectorConfig.getString(CommonSinkConnectorConfig.LOGS_DIR_CONFIG);

			log.info("hdfs.url: {} " , url);
			Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
					.forName(connectorConfig.getString(CommonSinkConnectorConfig.STORAGE_CLASS_CONFIG));
			storage = StorageFactory.createStorage(storageClass, conf, url);

			createDir(topicsDir);
			createDir(logsDir);
			createDir(logsDir + HdfsSinkConnecorConstants.TEMPFILE_DIRECTORY);

			writerProvider = new RecordWriterProvider();

			partitioner = createPartitioner(connectorConfig);
			namer = createNamer(connectorConfig);
			
			
			hiveIntegration = connectorConfig.getBoolean(CommonSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
		    if (hiveIntegration) {
		        hiveDatabase = connectorConfig.getString(CommonSinkConnectorConfig.HIVE_DATABASE_CONFIG);
		        if(hiveDatabase.contains("|")){
		        	int inx = hiveDatabase.indexOf("|");
		        	hiveTable=hiveDatabase.substring(inx+1);
		        	hiveDatabase = hiveDatabase.substring(0, inx);
		        }
		        hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
		        executorService = Executors.newSingleThreadExecutor();
 		      }

		} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
			throw new ConnectException("Reflection exception: ", e);
		} catch (IOException e) {
			throw new ConnectException(e);
		}
	}

	public void close(Collection<TopicPartition> partitions) {
		for (TopicPartition tp : assignment) {
			try {
				TopicPartitionWriter topicPartitionWriter = topicPartitionWriters.get(tp);
				if(topicPartitionWriter != null)topicPartitionWriter.close();
			} catch (ConnectException e) {
				log.error("Error closing writer for {}. Error: {]", tp, e.getMessage());
			} finally {
				topicPartitionWriters.remove(tp);
			}
		}
	}
	

	public void stop() {
		try {
			storage.close();
			close(null);
		} catch (IOException e) {
			throw new ConnectException(e);
		}
		if (ticketRenewThread != null) {
			synchronized (this) {
				isRunning = false;
				log.info("ticketRenewThread closed!");
				this.notifyAll();
			}
		}
	}

	public void write(Collection<SinkRecord> records) {
		for (SinkRecord record : records) {
			String topic = record.topic();
			int partition = record.kafkaPartition();
			TopicPartition tp = new TopicPartition(topic, partition);
			topicPartitionWriters.get(tp).buffer(record);
		}

		for (TopicPartition tp : assignment) {
			topicPartitionWriters.get(tp).write();
		}
	}

	private void createDir(String dir) throws IOException {
		String path = storage.url() + "/" + dir;
		log.info("create dir ({}): [{}]" ,  storage.exists(path),path);
		if (!storage.exists(path)) {
			storage.mkdirs(path);
		}
	}

	@SuppressWarnings("unchecked")
	private Partitioner createPartitioner(CommonSinkConnectorConfig config)
			throws ClassNotFoundException, IllegalAccessException, InstantiationException {

		Class<? extends Partitioner> partitionerClasss = (Class<? extends Partitioner>) Class
				.forName(config.getString(CommonSinkConnectorConfig.PARTITIONER_CLASS_CONFIG));

		Map<String, Object> map = copyConfig(config);
		Partitioner partitioner = partitionerClasss.newInstance();
		partitioner.configure(map);
		return partitioner;
	}
	
	@SuppressWarnings("unchecked")
	private FileName createNamer(CommonSinkConnectorConfig config)
			throws ClassNotFoundException, IllegalAccessException, InstantiationException {

		Class<? extends FileName> fileNameClasss = (Class<? extends FileName>) Class
				.forName(config.getString(CommonSinkConnectorConfig.NAMES_CLASS_CONFIG));

		Map<String, Object> map = copyConfig(config);
		FileName fileNamer = fileNameClasss.newInstance();
		fileNamer.configure(map);
		return fileNamer;
	}
	

	private Map<String, Object> copyConfig(CommonSinkConnectorConfig config) {
		Map<String, Object> map = new HashMap<>();
		map.put(CommonSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG,
				config.getString(CommonSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG));
		map.put(CommonSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG,
				config.getLong(CommonSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG));
		map.put(CommonSinkConnectorConfig.PATH_FORMAT_CONFIG,
				config.getString(CommonSinkConnectorConfig.PATH_FORMAT_CONFIG));
		map.put(CommonSinkConnectorConfig.LOCALE_CONFIG, config.getString(CommonSinkConnectorConfig.LOCALE_CONFIG));
		map.put(CommonSinkConnectorConfig.TIMEZONE_CONFIG, config.getString(CommonSinkConnectorConfig.TIMEZONE_CONFIG));
		
		map.put(CommonSinkConnectorConfig.FILENAME_OFFSET_ZERO_PAD_WIDTH_CONFIG, 
				config.getInt(CommonSinkConnectorConfig.FILENAME_OFFSET_ZERO_PAD_WIDTH_CONFIG));
		map.put(CommonSinkConnectorConfig.HDFS_FILENAME_FORMAT, 
				config.getString(CommonSinkConnectorConfig.HDFS_FILENAME_FORMAT));
		
		return map;
	}

	public void open(Collection<TopicPartition> partitions) {
		assignment = new HashSet<>(partitions);
		for (TopicPartition tp : assignment) {
			TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(tp, storage, writerProvider,
					partitioner, namer, connectorConfig, context, hiveIntegration, hiveDatabase, hiveTable, hiveMetaStore,
					executorService);
			topicPartitionWriters.put(tp, topicPartitionWriter);
			// We need to immediately start recovery to ensure we pause
			// consumption of messages for the
			// assigned topics while we try to recover offsets and rewind.
			topicPartitionWriter.recover();
		}
	}

}
