package dacp.etl.kafka.hdfs.connect.writer;

import java.io.EOFException;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;

import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.errors.IllegalWorkerStateException;
import org.apache.kafka.connect.errors.SchemaProjectorException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTaskContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;
import com.google.common.base.Strings;

import dacp.etl.kafka.hdfs.connect.hive.HiveMetaStore;
import dacp.etl.kafka.hdfs.connect.monitor.MRegistry;
import dacp.etl.kafka.hdfs.connect.names.FileName;
import dacp.etl.kafka.hdfs.connect.utils.CommonSinkConnectorConfig;
import dacp.etl.kafka.hdfs.connect.utils.FileUtils;
import io.confluent.connect.hdfs.HdfsSinkConnecorConstants;
import io.confluent.connect.hdfs.RecordWriter;
import io.confluent.connect.hdfs.partitioner.Partitioner;
import io.confluent.connect.hdfs.storage.Storage;

public class TopicPartitionWriter2 {
	private static final Logger log = LoggerFactory.getLogger(TopicPartitionWriter2.class);

	private Map<String, String> tempFiles;
	@SuppressWarnings("rawtypes")
	private Map<String, RecordWriter> writers;
	private TopicPartition tp;
	private Partitioner partitioner;

	private FileName namer;

	private String url;
	private String topicsDir;
	private String logsDir;
	private State state;
	private Queue<SinkRecord> buffer;
	private Storage storage;
	private SinkTaskContext context;
	private int recordCounter;
	private int flushSize;
	private long rotateIntervalMs;
	private long lastRotate;
	private RecordWriterProvider writerProvider;
	private Configuration conf;
	private Set<String> appended;
	private long offset;
	private Map<String, Long> startOffsets;
	private Map<String, Long> offsets;
	private long timeoutMs;
	private long failureTime;
	private String extension;

	private boolean hiveIntegration;
	private String hiveDatabase;
	private String hiveTable;
	private HiveMetaStore hiveMetaStore;
	private ExecutorService executorService;

	private CommonSinkConnectorConfig connectorConfig;

	private static Timer timer = MRegistry.get().timer("Write");
	private static Timer timer2 = MRegistry.get().timer("Write.closeTemp");
	private static Timer timer3 = MRegistry.get().timer("Write.commit");
	private static Meter meter = MRegistry.get().meter("Rotate");

	private String encodePartition(SinkRecord sinkRecord) {
		return "partition=" + String.valueOf(sinkRecord.kafkaPartition());
	}

	public TopicPartitionWriter2(TopicPartition tp, Storage storage, RecordWriterProvider writerProvider,
			Partitioner partitioner, FileName namer, CommonSinkConnectorConfig connectorConfig, SinkTaskContext context,
			boolean hiveIntegration, String hiveDatabase, String hiveTable, HiveMetaStore hiveMetaStore,
			ExecutorService executorService) {
		this.tp = tp;
		this.context = context;
		this.storage = storage;
		this.writerProvider = writerProvider;
		this.partitioner = partitioner;
		this.namer = namer;
		this.url = storage.url();
		this.conf = storage.conf();

		this.failureTime = -1L;
		
		this.hiveIntegration = hiveIntegration;
		this.hiveDatabase = hiveDatabase;
		this.hiveTable = hiveTable;
		this.hiveMetaStore = hiveMetaStore;
		this.executorService = executorService;

		this.connectorConfig = connectorConfig;

		topicsDir = connectorConfig.getString(CommonSinkConnectorConfig.TOPICS_DIR_CONFIG);
		flushSize = connectorConfig.getInt(CommonSinkConnectorConfig.FLUSH_SIZE_CONFIG);
		rotateIntervalMs = connectorConfig.getLong(CommonSinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG);
		timeoutMs = connectorConfig.getLong(CommonSinkConnectorConfig.RETRY_BACKOFF_CONFIG);

		logsDir = connectorConfig.getString(CommonSinkConnectorConfig.LOGS_DIR_CONFIG);

		buffer = new LinkedList<>();
		writers = new HashMap<>();
		tempFiles = new HashMap<>();
		appended = new HashSet<>();
		startOffsets = new HashMap<>();
		offsets = new HashMap<>();
		state = State.RECOVERY_STARTED;
		offset = -1L;
		extension = writerProvider.getExtension();
	}

	private enum State {
		RECOVERY_STARTED, RECOVERY_PARTITION_PAUSED, OFFSET_RESET, WRITE_STARTED, WRITE_PARTITION_PAUSED, SHOULD_ROTATE, TEMP_FILE_CLOSED, FILE_COMMITTED;

		private static State[] vals = values();

		public State next() {
			return vals[(this.ordinal() + 1) % vals.length];
		}
	}

	public boolean recover() {
		try {
			switch (state) {
			case RECOVERY_STARTED:
				log.info("Started recovery for topic partition {}", tp);
				pause();
				nextState();
			case RECOVERY_PARTITION_PAUSED:
				// TODO
				// clear();
				nextState();
			case OFFSET_RESET:
				resume();
				nextState();
				log.info("Finished recovery for topic partition {}", tp);
				break;
			default:
				log.error("{} is not a valid state to perform recovery for topic partition {}.", state, tp);
			}
		} catch (ConnectException e) {
			log.error("Recovery failed at state {}", state, e);
			setRetryTimeout(timeoutMs);
			return false;
		}
		return true;
	}

	private void setRetryTimeout(long timeoutMs) {
		context.timeout(timeoutMs);
	}

	public void write() {
		Context time = timer.time();
		long now = System.currentTimeMillis();

	    if (failureTime > 0 && now - failureTime < timeoutMs) {
	      return;
	    }
	    
		while (!buffer.isEmpty()) {
			try {
				switch (state) {
				case WRITE_STARTED:
					pause();
					nextState();
				case WRITE_PARTITION_PAUSED:
					SinkRecord record = buffer.peek();
					writeRecord(record);
					buffer.poll();
					if (shouldRotate(now)) {
						log.info("Starting commit and rotation for topic partition {} with start offsets {}"
								+ " and end offsets {}", tp, startOffsets, offsets);
						nextState();
						// Fall through and try to rotate immediately
					} else {
						break;
					}
				case SHOULD_ROTATE:
					Context time3 = timer2.time();
					lastRotate = System.currentTimeMillis();
					closeTempFile();
					time3.close();
					nextState();
				case TEMP_FILE_CLOSED:
					Context time2 = timer3.time();
					commitFile();
					time2.close();

					meter.mark();

					nextState();
				case FILE_COMMITTED:
					setState(State.WRITE_PARTITION_PAUSED);
					break;
				default:
					log.error("{} is not a valid state to write record for topic partition {}.", state, tp);
				}
			} catch (SchemaProjectorException | IllegalWorkerStateException e) {
				throw new RuntimeException(e);
			} catch (IOException | ConnectException e) {
				log.error("Exception on {}.", tp, e);
				setRetryTimeout(timeoutMs);
				failureTime = System.currentTimeMillis();
				break;  
			}
		}
		if (buffer.isEmpty()) {
			state = State.WRITE_STARTED;
			resume();
		}
		time.stop();
	}

	private void writeRecord(SinkRecord record) throws IOException {
		long kafkaOffset = record.kafkaOffset();

		offset = kafkaOffset;

		//按分区新建目录
		// String encodedPartition = partitioner.encodePartition(record);
		String encodedPartition = encodePartition(record);

		RecordWriter<SinkRecord> writer = getWriter(record, encodedPartition);
		
		writer.write(record);

		//记录开始offset以及当前offset 用来指导文件名
		if (!startOffsets.containsKey(encodedPartition)) {
			startOffsets.put(encodedPartition, kafkaOffset);
			offsets.put(encodedPartition, kafkaOffset);
		} else {
			offsets.put(encodedPartition, kafkaOffset);
		}
		recordCounter++;
	}

	@SuppressWarnings("unchecked")
	private RecordWriter<SinkRecord> getWriter(SinkRecord record, String encodedPartition) throws ConnectException {
		try {
			//写完一个文件会重新打开新文件，优化－复用上次文件名
			if (writers.containsKey(encodedPartition)) {
				return writers.get(encodedPartition);
			}
			
			//临时文件根据encodedPartition 
			String tempFile = getTempFile(encodedPartition);
			RecordWriter<SinkRecord> writer = writerProvider.getRecordWriter(conf, tempFile, record, connectorConfig);
			writers.put(encodedPartition, writer);
			return writer;
		} catch (IOException e) {
			throw new ConnectException(e);
		}
	}

	private void closeTempFile() throws IOException {
		for (String encodedPartition : tempFiles.keySet()) {
			closeTempFile(encodedPartition);
		}
	}

	private void closeTempFile(String encodedPartition) throws IOException {
		if (writers.containsKey(encodedPartition)) {
			RecordWriter writer = writers.get(encodedPartition);
			writer.close();
			writers.remove(encodedPartition);
		}
		// tempFiles.remove(encodedPartition);
	}

	private void commitFile() throws IOException {
		appended.clear();
		for (String encodedPartition : tempFiles.keySet()) {
			commitFile(encodedPartition);
		}
	}

	private void commitFile(String encodedPartiton) throws IOException {

		long startOffset = startOffsets.get(encodedPartiton);
		long endOffset = offsets.get(encodedPartiton);
		String tempFile = tempFiles.get(encodedPartiton);
		
		//目标目录
		String directory = getDirectory(partitioner.encodePartition(writerProvider.getRecord()));

		String committedFile = FileUtils.committedFileName(namer, url, topicsDir, directory, tp, startOffset, endOffset,
				extension);
		String directoryName = FileUtils.directoryName(url, topicsDir, directory);

		if (hiveIntegration) {
			addHivePartition(directoryName);
		}

		if (!storage.exists(directoryName)) {
			storage.mkdirs(directoryName);
		}
		storage.commit(tempFile, committedFile);
		startOffsets.remove(encodedPartiton);
		recordCounter = 0;
		log.info("Committed {} for {}", committedFile, tp);
	}

	public void close() throws ConnectException {
		log.info("Closing TopicPartitionWriter {}", tp);
		for (String encodedPartition : tempFiles.keySet()) {
			try {
				if (writers.containsKey(encodedPartition)) {
					log.info("Discarding in progress tempfile {} for {} {}", tempFiles.get(encodedPartition), tp,
							encodedPartition);
					closeTempFile(encodedPartition);
					deleteTempFile(encodedPartition);
				}
			} catch (IOException e) {
				log.error("Error discarding temp file {} for {} {} when closing TopicPartitionWriter:",
						tempFiles.get(encodedPartition), tp, encodedPartition, e);
			}
		}

		writers.clear();
		startOffsets.clear();
		offsets.clear();
	}

	private void deleteTempFile(String encodedPartiton) throws IOException {
		storage.delete(tempFiles.get(encodedPartiton));
	}

	public void buffer(SinkRecord sinkRecord) {
		buffer.add(sinkRecord);
	}

	public long offset() {
		return offset;
	}

	public Map<String, RecordWriter> getWriters() {
		return writers;
	}

	public Map<String, String> getTempFiles() {
		return tempFiles;
	}

	public String getExtension() {
		return writerProvider.getExtension();
	}

	private String getDirectory(String encodedPartition) {
		return partitioner.generatePartitionedPath(tp.topic(), encodedPartition);
	}

	private void nextState() {
		state = state.next();
	}

	private void setState(State state) {
		this.state = state;
	}

	private boolean shouldRotate(long now) {
		/*
		if (recordCounter >= flushSize) {
			return true;
		} else if (rotateIntervalMs <= 0) {
			return false;
		} else {
			return now - lastRotate >= rotateIntervalMs;
		}
		*/
		if (recordCounter >= flushSize || (rotateIntervalMs > 0 && (now - lastRotate >= rotateIntervalMs))) {
			return true;
		}
		return false;
	}

	private void pause() {
		context.pause(tp);
	}

	private void resume() {
		context.resume(tp);
	}

	private String getTempFile(String encodedPartition) {
		String tempFile;
		if (tempFiles.containsKey(encodedPartition)) {
			tempFile = tempFiles.get(encodedPartition);
		} else {
			String directory = HdfsSinkConnecorConstants.TEMPFILE_DIRECTORY + getDirectory(encodedPartition);
			tempFile = FileUtils.tempFileName(url, logsDir, directory, extension);
			tempFiles.put(encodedPartition, tempFile);
		}
		return tempFile;
	}

	private void addHivePartition(final String location) {

		executorService.submit(new Callable<Void>() {
			@Override
			public Void call() throws Exception {
				if (Strings.isNullOrEmpty(hiveTable))
					hiveTable = tp.topic();
				log.info("Add hive Patition {}=>{}=>{} ", hiveDatabase, hiveTable, location);
				hiveMetaStore.addPartition(hiveDatabase, hiveTable, location);
				return null;
			}
		});
	}

}
