/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.flume.sink;

import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurable;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.serialization.EventSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;

public class BranchRollingFileSink extends AbstractSink implements Configurable {

	private static final Logger logger = LoggerFactory.getLogger(BranchRollingFileSink.class);
	private static final long defaultRollInterval = 30;
	private static final int defaultBatchSize = 100;

	private int batchSize = defaultBatchSize;

	private long rollInterval;
	private ScheduledExecutorService rollService;

	private String defaultDirectory;
	private String pathKeyName;

	private String serializerType;
	private Context serializerContext;

	private SinkCounter sinkCounter;

	private Map<String, BranchIOManager> branchIOMap;

	private volatile boolean shouldRotate;

	public BranchRollingFileSink() {
		branchIOMap = new HashMap<String, BranchIOManager>();
		shouldRotate = false;
	}

	@Override
	public void configure(Context context) {

		defaultDirectory = context.getString("sink.defautDirectory");

		pathKeyName = context.getString("sink.pathKeyName");

		String rollInterval = context.getString("sink.rollInterval");

		serializerType = context.getString("sink.serializer", "TEXT");
		serializerContext = new Context(context.getSubProperties("sink." + EventSerializer.CTX_PREFIX));

		Preconditions.checkNotNull(pathKeyName, "PathKey Name is undefined");
		Preconditions.checkNotNull(serializerType, "Serializer type is undefined");

		if (rollInterval == null) {
			this.rollInterval = defaultRollInterval;
		} else {
			this.rollInterval = Long.parseLong(rollInterval);
		}

		batchSize = context.getInteger("sink.batchSize", defaultBatchSize);

		if (sinkCounter == null) {
			sinkCounter = new SinkCounter(getName());
		}
	}

	@Override
	public void start() {
		logger.info("Starting {}...", this);
		sinkCounter.start();
		super.start();

		if (rollInterval > 0) {

			rollService = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
					.setNameFormat("branchRollingFileSink-roller-" + Thread.currentThread().getId() + "-%d").build());

			/*
			 * Every N seconds, mark that it's time to rotate. We purposefully
			 * do NOT touch anything other than the indicator flag to avoid
			 * error handling issues (e.g. IO exceptions occuring in two
			 * different threads. Resist the urge to actually perform rotation
			 * in a separate thread!
			 */
			rollService.scheduleAtFixedRate(new Runnable() {

				@Override
				public void run() {
					logger.debug("Marking time to rotate all files.");
					shouldRotate = true;
				}

			}, rollInterval, rollInterval, TimeUnit.SECONDS);
		} else {
			logger.info("BranchRollInterval is not valid, file rolling will not happen.");
		}
		logger.info("BranchRollingFileSink {} started.", getName());
	}

	@Override
	public Status process() throws EventDeliveryException {

		if (shouldRotate) {

			logger.debug("Time to rotate all files.");

			try {
				for (BranchIOManager branchPathManager : branchIOMap.values()) {

					branchPathManager.close();
					sinkCounter.incrementConnectionClosedCount();
					shouldRotate = false;

					branchPathManager.rotate();
				}
			} catch (IOException ede) {
				sinkCounter.incrementConnectionFailedCount();
				throw new EventDeliveryException("Unable to close file  while delivering event", ede);
			}
		}

		Channel channel = getChannel();
		Transaction transaction = channel.getTransaction();
		Event event = null;
		Status result = Status.READY;

		try {
			transaction.begin();
			int eventAttemptCounter = 0;
			for (int i = 0; i < batchSize; i++) {
				event = channel.take();
				if (event != null) {

					///// path configured
					if (BranchPathConfig.configUpdated) {
						logger.info("The path configuration has changed, update IO map.");
						updateBranchIOMap();
					}

					sinkCounter.incrementEventDrainAttemptCount();
					eventAttemptCounter++;

					String directoryKey = event.getHeaders().get(pathKeyName);

					Preconditions.checkNotNull(directoryKey,
							"Undefined: Event headers has no key named " + pathKeyName);

					BranchIOManager branchIOManager = branchIOMap.get(directoryKey);

					if (branchIOManager == null) {
						branchIOManager = new BranchIOManager(defaultDirectory, directoryKey);
						branchIOMap.put(directoryKey, branchIOManager);
					}

					///// Day change ? close :
					if (branchIOManager.dayChanged()) {
						try {
							branchIOManager.close();
							sinkCounter.incrementConnectionClosedCount();
							branchIOManager.rotate();
						} catch (IOException ede) {
							sinkCounter.incrementConnectionFailedCount();
							throw new EventDeliveryException("Unable to close file while day changed.", ede);
						}
					}

					///// Closed ? open :
					if (branchIOManager.haveClosed()) {
						try {
							branchIOManager.open(serializerType, serializerContext);
							sinkCounter.incrementConnectionCreatedCount();
						} catch (IOException ede) {
							sinkCounter.incrementConnectionFailedCount();
							throw new EventDeliveryException("Failed to open file while delivering event", ede);
						}
					}

					branchIOManager.write(event);

					/*
					 * FIXME: Feature: Rotate on size and time by checking bytes
					 * written and setting shouldRotate = true if we're past a
					 * threshold.
					 */

					/*
					 * FIXME: Feature: Control flush interval based on time or
					 * number of events. For now, we're super-conservative and
					 * flush on each write.
					 */
				} else {
					// No events found, request back-off semantics from runner
					result = Status.BACKOFF;
					break;
				}
			}

			for (BranchIOManager branchPathManager : branchIOMap.values()) {
				branchPathManager.flush();
			}
			transaction.commit();
			sinkCounter.addToEventDrainSuccessCount(eventAttemptCounter);

		} catch (Exception ex) {
			transaction.rollback();
			throw new EventDeliveryException("Failed to process transaction", ex);
		} finally {
			transaction.close();
		}

		return result;
	}

	@Override
	public void stop() {
		logger.info("BranchRollingFile sink {} stopping...", getName());
		sinkCounter.stop();
		super.stop();
		try {
			for (BranchIOManager branchPathManager : branchIOMap.values()) {
				branchPathManager.close();
				sinkCounter.incrementConnectionClosedCount();
				shouldRotate = false;
			}
		} catch (IOException ede) {
			sinkCounter.incrementConnectionFailedCount();
			logger.error("Unable to close output stream. Exception follows.", ede);
		}

		if (rollInterval > 0) {
			rollService.shutdown();

			while (!rollService.isTerminated()) {
				try {
					rollService.awaitTermination(1, TimeUnit.SECONDS);
				} catch (InterruptedException e) {
					logger.debug("Interrupted while waiting for roll service to stop. " + "Please report this.", e);
				}
			}
		}
		logger.info("RollingFile sink {} stopped. Event metrics: {}", getName(), sinkCounter);
	}

	public void updateBranchIOMap() throws EventDeliveryException {
		try {
			
			for (Iterator<String> it = branchIOMap.keySet().iterator(); it.hasNext();) {
				
				String key = it.next();
				if (!BranchPathConfig.containsKey(key)) {

					//// Close old IO. And remove this BranchPathIOManager.
					branchIOMap.get(key).close();
					branchIOMap.remove(key);
					sinkCounter.incrementConnectionClosedCount();
					
					logger.info("Update branch io map: DELETE OLD key-{}", key);

				} else if (BranchPathConfig.hasAPathUpdated(key)) {

					//// close old IO. And remove this BranchPathIOManager.
					branchIOMap.get(key).close();
					branchIOMap.remove(key);

					BranchPathConfig.resetAPathStatu(key);
					sinkCounter.incrementConnectionClosedCount();
					
					logger.info("Update branch io map: UPDATE OLD key-{}", key);

				}
			}
			
			///// reset path configure update status.
			BranchPathConfig.resetUpdateStatu();
			
		} catch (IOException ede) {
			sinkCounter.incrementConnectionFailedCount();
			throw new EventDeliveryException("Unable to close file and remove IO while update config.", ede);
		}
	}

	public String getAllDirectories() {

		StringBuffer allDirectories = new StringBuffer();
		for (BranchIOManager branchPathManager : branchIOMap.values()) {
			allDirectories.append(branchPathManager.getCurrentFile() + ";\r\n");
		}
		return allDirectories.toString();
	}

	public long getRollInterval() {
		return rollInterval;
	}

	public void setRollInterval(long rollInterval) {
		this.rollInterval = rollInterval;
	}

}
