package dacp.etl.kafka.hdfs.connect.writer;

import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTaskContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;

import dacp.etl.kafka.hdfs.connect.hive.HiveMetaStore;
import dacp.etl.kafka.hdfs.connect.monitor.MRegistry;
import dacp.etl.kafka.hdfs.connect.names.FileName;
import dacp.etl.kafka.hdfs.connect.utils.CommCache;
import dacp.etl.kafka.hdfs.connect.utils.CommonSinkConnectorConfig;
import dacp.etl.kafka.hdfs.connect.utils.FileUtils;
import dacp.etl.kafka.hdfs.connect.utils.IPUtil;
import dacp.etl.kafka.hdfs.connect.utils.NameReplace;
import io.confluent.common.config.ConfigException;
import io.confluent.connect.hdfs.HdfsSinkConnecorConstants;
import io.confluent.connect.hdfs.RecordWriter;
import io.confluent.connect.hdfs.partitioner.Partitioner;
import io.confluent.connect.hdfs.storage.Storage;
import io.confluent.connect.hdfs.storage.StorageFactory;

public class DataWriter2 {
	private static final Logger log = LoggerFactory.getLogger(DataWriter2.class);

	private String url;
	private Storage storage;
	private Configuration conf;
	private String topicsDir;
	private Partitioner partitioner;

	private RecordWriterProvider writerProvider;
	private CommonSinkConnectorConfig connectorConfig;
	private SinkTaskContext context;
	private Thread ticketRenewThread;
	private volatile boolean isRunning;

	private boolean hiveIntegration;
	private String hiveDatabase;
	private String hiveTable;
	private HiveMetaStore hiveMetaStore;
	private ExecutorService executorService;

	private Collection<TopicPartition> assigns;
	private long timeoutMs;
	private Queue<SinkRecord> buffer = new LinkedList<>();
	private AtomicLong recordCounter = new AtomicLong(0L);
	private String logsDir;

	private RecordWriter recordWriter;
	private String extension;

	private int taskId;
	private String instId;

	private String nameFmt;

	private AtomicReference<String> currentTargetDir = new AtomicReference<>();

	private int flushSize;

	private static Timer timer = MRegistry.get().timer("Write");
	private static Meter meter = MRegistry.get().meter("Rotate");

	@SuppressWarnings("unchecked")
	public DataWriter2(CommonSinkConnectorConfig connectorConfig, SinkTaskContext context) {
		try {
			isRunning = true;
			String hadoopHome = connectorConfig.getString(CommonSinkConnectorConfig.HADOOP_HOME_CONFIG);
			System.setProperty("HADOOP_USER_NAME", hadoopHome);

			this.connectorConfig = connectorConfig;
			this.context = context;

			String hadoopConfDir = connectorConfig.getString(CommonSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
			log.info("Hadoop configuration directory {}", hadoopConfDir);
			conf = new Configuration();
			if (!hadoopConfDir.equals("")) {
				conf.addResource(new Path(hadoopConfDir + "/core-site.xml"));
				conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml"));
			}

			boolean secureHadoop = connectorConfig
					.getBoolean(CommonSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
			if (secureHadoop) {
				SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
				String principalConfig = connectorConfig
						.getString(CommonSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
				String keytab = connectorConfig.getString(CommonSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

				if (principalConfig == null || keytab == null) {
					throw new ConfigException(
							"Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
									+ "the path to the keytab of the principal.");
				}

				conf.set("hadoop.security.authentication", "kerberos");
				conf.set("hadoop.security.authorization", "true");
				String hostname = InetAddress.getLocalHost().getCanonicalHostName();
				// replace the _HOST specified in the principal config to the
				// actual host
				String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
				String namenodePrincipalConfig = connectorConfig
						.getString(CommonSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

				String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname);
				// namenode principal is needed for multi-node hadoop cluster
				if (conf.get("dfs.namenode.kerberos.principal") == null) {
					// conf.set("dfs.namenode.kerberos.principal",
					// namenodePrincipal);
					conf.set("dfs.namenode.kerberos.principal", namenodePrincipalConfig);
				}
				log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

				UserGroupInformation.setConfiguration(conf);
				UserGroupInformation.loginUserFromKeytab(principal, keytab);
				final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
				log.info("Login as: " + ugi.getUserName());

				final long renewPeriod = connectorConfig
						.getLong(CommonSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG);

				ticketRenewThread = new Thread(new Runnable() {
					@Override
					public void run() {
						synchronized (DataWriter2.this) {
							while (isRunning) {
								try {
									DataWriter2.this.wait(renewPeriod);
									if (isRunning) {
										ugi.reloginFromKeytab();
									}
								} catch (IOException | InterruptedException e) {
									log.error("Error renewing the ticket", e);
								}
							}
						}
					}
				});
				log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod);
				ticketRenewThread.start();
			}

			url = connectorConfig.getString(CommonSinkConnectorConfig.HDFS_URL_CONFIG);
			topicsDir = connectorConfig.getString(CommonSinkConnectorConfig.TOPICS_DIR_CONFIG);
			logsDir = connectorConfig.getString(CommonSinkConnectorConfig.LOGS_DIR_CONFIG);

			log.info("hdfs.url: {} ", url);
			Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
					.forName(connectorConfig.getString(CommonSinkConnectorConfig.STORAGE_CLASS_CONFIG));
			storage = StorageFactory.createStorage(storageClass, conf, url);

			createDir(topicsDir);
			logsDir = logsDir + HdfsSinkConnecorConstants.TEMPFILE_DIRECTORY;
			createDir(logsDir);

			writerProvider = new RecordWriterProvider();
			extension = writerProvider.getExtension();

			partitioner = createPartitioner(connectorConfig);
			nameFmt = connectorConfig.getString(CommonSinkConnectorConfig.HDFS_FILENAME_FORMAT);

			hiveIntegration = connectorConfig.getBoolean(CommonSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
			if (hiveIntegration) {
				hiveDatabase = connectorConfig.getString(CommonSinkConnectorConfig.HIVE_DATABASE_CONFIG);
				if (hiveDatabase.contains("|")) {
					int inx = hiveDatabase.indexOf("|");
					hiveTable = hiveDatabase.substring(inx + 1);
					hiveDatabase = hiveDatabase.substring(0, inx);
				}
				hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
				executorService = Executors.newSingleThreadExecutor();
			}

			timeoutMs = connectorConfig.getLong(CommonSinkConnectorConfig.RETRY_BACKOFF_CONFIG);
			flushSize = connectorConfig.getInt(CommonSinkConnectorConfig.FLUSH_SIZE_CONFIG);

			instId = connectorConfig.getString(CommonSinkConnectorConfig.INST_ID);
			taskId = connectorConfig.getInt(CommonSinkConnectorConfig.TASK_ID);
			if (Strings.isNullOrEmpty(instId)) {
				instId = IPUtil.getLocalIpLastPiece() + "";
			}
			log.info("instId: {}", instId);

			final long period = connectorConfig.getLong(CommonSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG);

			if(period > 0){
				Thread clock = new Thread(new Runnable() {
					@Override
					public void run() {
						synchronized (DataWriter2.this) {
							while (isRunning) {
								// do
								try {
									long now = System.currentTimeMillis();
									long waitT = (now / period + 1) * period - now;
									log.info("[{}] waiting [{}] ms. ", getTmpFileSubDirectory(), waitT + 100L);
									DataWriter2.this.wait(waitT + 100L);
									log.info("[{}] waiting end . ", getTmpFileSubDirectory());
									doRotate();
									createTargetDir();
								} catch (Exception e) {
									log.error("in clock, hdfs problem ? confim dir create .", e);
								}
							}
						}
					}
				});
				clock.setPriority(Thread.MAX_PRIORITY);
				clock.start();
			}

		} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
			throw new ConnectException("Reflection exception: ", e);
		} catch (IOException e) {
			throw new ConnectException(e);
		}
	}

	protected synchronized void doRotate() throws IOException {
		closeTempFile();
		commitFile();
		recordCounter.set(0L);
		meter.mark();
	}
 

	public void stop() {
		try {
			storage.close(); 
			isRunning = false;
			synchronized (this) {
				this.notifyAll();
			}
		} catch (Exception e) {
			log.error("what happen ? ", e);
		}
	}

	public void write(Collection<SinkRecord> records) {
		for (SinkRecord record : records) {
			buffer(record);
		}
		write();
	}

	private void createDir(String dir) throws IOException {
		String path = storage.url() + "/" + dir;
		if (!storage.exists(path)) {
			storage.mkdirs(path);
		}
	}

	@SuppressWarnings("unchecked")
	private Partitioner createPartitioner(CommonSinkConnectorConfig config)
			throws ClassNotFoundException, IllegalAccessException, InstantiationException {

		Class<? extends Partitioner> partitionerClasss = (Class<? extends Partitioner>) Class
				.forName(config.getString(CommonSinkConnectorConfig.PARTITIONER_CLASS_CONFIG));

		Map<String, Object> map = copyConfig(config);
		Partitioner partitioner = partitionerClasss.newInstance();
		partitioner.configure(map);
		return partitioner;
	}

	private Map<String, Object> copyConfig(CommonSinkConnectorConfig config) {
		Map<String, Object> map = new HashMap<>();
		map.put(CommonSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG,
				config.getString(CommonSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG));
		map.put(CommonSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG,
				config.getLong(CommonSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG));
		map.put(CommonSinkConnectorConfig.PATH_FORMAT_CONFIG,
				config.getString(CommonSinkConnectorConfig.PATH_FORMAT_CONFIG));
		map.put(CommonSinkConnectorConfig.LOCALE_CONFIG, config.getString(CommonSinkConnectorConfig.LOCALE_CONFIG));
		map.put(CommonSinkConnectorConfig.TIMEZONE_CONFIG, config.getString(CommonSinkConnectorConfig.TIMEZONE_CONFIG));

		map.put(CommonSinkConnectorConfig.HDFS_FILENAME_FORMAT,
				config.getString(CommonSinkConnectorConfig.HDFS_FILENAME_FORMAT));

		return map;
	}

	public void open(Collection<TopicPartition> partitions) {
		assigns = partitions;
		recover();
	}

	public boolean recover() {
		try {
			pause();
			// clear();
			resume();
		} catch (ConnectException e) {
			setRetryTimeout(timeoutMs);
			return false;
		}
		return true;
	}

	private void pause() { 
		if(assigns != null)
		for (TopicPartition tp : assigns) {
			context.pause(tp);
		}
	}

	private void resume() {
		if(assigns != null)
		for (TopicPartition tp : assigns) {
			context.resume(tp);
		}
	}

	private void setRetryTimeout(long timeoutMs) {
		context.timeout(timeoutMs);
	}

	public synchronized void write() {
		Context time = timer.time();
		pause();
		int failCnt = 0;
		while (!buffer.isEmpty()) {
			try {
				if (shouldRotate()) {
					doRotate();
				}
				SinkRecord record = buffer.peek();
				writeRecord(record);
				buffer.poll();
				recordCounter.getAndIncrement();
				failCnt = 0;
			} catch (Exception e) {
				failCnt++;
				log.error("retry [{}], question of hfds ? Confirm it . ", failCnt, e);
				setRetryTimeout(timeoutMs);
				recordWriter = null;
				try {
					Thread.sleep(1000 * failCnt);
				} catch (InterruptedException e1) {}
				if(failCnt >= 10){
					log.error("retry [{}], give up . ", failCnt, e);
					writerProvider = new RecordWriterProvider();
				}
			} 
		}
		Preconditions.checkArgument(buffer.isEmpty(), "buffer is not empty .");
		resume();
		time.stop();
	}

	public void buffer(SinkRecord sinkRecord) {
		buffer.add(sinkRecord);
	}

	private void writeRecord(SinkRecord record) throws IOException {
		// 按分区新建目录
		String tmpSubDir = getTmpFileSubDirectory();
		RecordWriter<SinkRecord> writer = getWriter(record, tmpSubDir);
		writer.write(record);
	}

	private String getTmpFileSubDirectory() {
		return instId + "_" + taskId;
	}

	@SuppressWarnings("unchecked")
	private RecordWriter<SinkRecord> getWriter(SinkRecord record, String tmpSubDir) throws ConnectException {
		try {
			if (recordWriter != null) {
				return recordWriter;
			}
			// 临时文件根据encodedPartition
			String tempFile = writerProvider.getFileName();
			if (Strings.isNullOrEmpty(tempFile)) {
				tempFile = getTempFile(tmpSubDir);
			}
			
			recordWriter = writerProvider.getRecordWriter(storage.conf(), tempFile, record, connectorConfig);
			return recordWriter;
		} catch (IOException e) {
			throw new ConnectException(e);
		}
	}

	private String getTempFile(String directory) {
		return FileUtils.tempFileName(storage.url(), logsDir, directory, extension);
	}

	private boolean shouldRotate() {
		if (recordCounter.get() >= flushSize) {
			return true;
		}
		return false;
	}

	private void closeTempFile() throws IOException {
		if (recordWriter != null) {
			try {
				recordWriter.close();
			} finally {
				recordWriter = null;
			}
		}
	}

	private String createTargetDir() {

		synchronized (DataWriter2.class) {
			String directory = partitioner.encodePartition(null);
			currentTargetDir.set(directory);

			if (directory.equals(CommCache.LAST_TARGET_FILE.get())) {
				return directory;
			}
			String directoryName = FileUtils.directoryName(storage.url(), topicsDir, directory);
			log.info("[instid: {} taskid: {}] create DIR [{}] ", instId, taskId, directory);
			try {
				if (!storage.exists(directoryName)) {
					storage.mkdirs(directoryName);
				}
				if (hiveIntegration) {
					addHivePartition(directoryName);
				}
			} catch (IOException e) {
				e.printStackTrace();
			}

			CommCache.LAST_TARGET_FILE.set(directory);
			return directory;
		}

	}

	private void addHivePartition(final String location) {

		executorService.submit(new Callable<Void>() {
			@Override
			public Void call() throws Exception {
				log.info("Add hive Patition {}=>{}=>{} ", hiveDatabase, hiveTable, location);
				hiveMetaStore.addPartition(hiveDatabase, hiveTable, location);
				return null;
			}
		});
	}

	private void commitFile() throws IOException {
		// 目标目录
		String sourceFile = writerProvider.getFileName();
		if (Strings.isNullOrEmpty(sourceFile)) {
			return;
		}
		if (!storage.exists(sourceFile)) {
			return;
		}
		String directory = currentTargetDir.get();
		if (directory == null) { // 初始化
			directory = createTargetDir();
			Preconditions.checkNotNull(directory);
		}
		String committedFile = FileUtils.fileName(storage.url(), topicsDir, directory, genTargetFileName());
		storage.commit(sourceFile, committedFile);
		log.info("Committed from {} to {}", sourceFile, committedFile);
	}

	private String genTargetFileName() {
		Map<String, String> res = Maps.newHashMap();
		String dirctory = currentTargetDir.get();
		Splitter on = Splitter.on(File.separator.charAt(0));
		Iterable<String> split = on.split(dirctory);
		for (String s : split) {
			String[] split2 = s.split("=");
			if (split2.length == 2) {
				res.put(split2[0], split2[1]);
			}
		}
		res.put("id", instId + "" + taskId);
		if (!Strings.isNullOrEmpty(nameFmt)) {
			return NameReplace.replace(nameFmt, res);
		}
		return UUID.randomUUID().toString();
	}

}
