/*
Copyright 2010 Johan Maasing

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
 */
package nu.zoom.catonine.tail;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CoderResult;
import java.nio.charset.CodingErrorAction;
import java.util.LinkedList;
import java.util.List;
import java.util.regex.Matcher;

import nu.zoom.catonine.prefs.Preferences;
import nu.zoom.swing.desktop.common.BackendException;
import nu.zoom.swing.desktop.preferences.InvalidDataTypeException;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

/**
 * A tailer implementation that scans a file from start to end and breaks it
 * into log entries. When the initial tokenization of the file is done this
 * class watches for changes in the size of the file to detect appended content.
 * 
 * @author "Johan Maasing" &lt;johan@zoom.nu&gt;
 * 
 */
public class FullFileTailer extends AbstractRegularExpressionLogBlockTailer
		implements Tailer {
	private static final int MAX_UNDERFLOW_ERRORS = 10 ;
	private final Preferences preferences;
	private final Log log = LogFactory.getLog(getClass());
	private FileChannel fileChannel = null;
	private File file = null;
	private long lastEntryStartPosition = 0;
	private long lastReadFileSize = 0;
	private CharsetDecoder charsetDecoder = null;
	private ByteBuffer fileByteBuffer;
	private CharBuffer charBuffer;
	private int underflowerrorCounter = MAX_UNDERFLOW_ERRORS ;

	public FullFileTailer(final Preferences preferences) {
		super();
		this.preferences = preferences;
		this.charsetDecoder = Charset.defaultCharset().newDecoder();
		setReadBufferSize(64 * 1024);
	}

	@Override
	public synchronized void setReadBufferSize(int size) {
		// Lets make sure that at least an EOL fits in the buffer, not that it
		// matters but really small buffer sizes makes no sense.
		if (size < 3) {
			throw new IllegalArgumentException(
					"Buffer size may not be less than 3");
		}
		log.debug("Changing Readbuffer size to: " + size);
		fileByteBuffer = ByteBuffer.allocate(size);
		charBuffer = CharBuffer.allocate(size);
		log.trace("Readbuffer allocated");
	}

	@Override
	public synchronized void setFile(final File file)
			throws IllegalArgumentException, FileNotFoundException, IOException {

		final FileInputStream fis;
		this.file = file;
		fis = new FileInputStream(file);
		this.fileChannel = fis.getChannel();
		restart();
	}

	@Override
	public synchronized File getFile() {
		return this.file;
	}

	@Override
	public synchronized String setCharSet(String charsetName) {
		final Charset charset;
		if ((charsetName == null) || (!Charset.isSupported(charsetName))) {
			log.warn("Using default charset. Charset named: " + charsetName
					+ " is not supported");
			charset = Charset.defaultCharset();
		} else {
			charset = Charset.forName(charsetName);
		}
		charsetDecoder = charset.newDecoder();
		charsetDecoder.onMalformedInput(CodingErrorAction.REPLACE);
		log.trace("Charset changed to: " + charset);
		return charset.displayName();
	}

	private synchronized void resetCounters(final long size) {
		underflowerrorCounter = MAX_UNDERFLOW_ERRORS ;
		long offset = 0;
		try {
			Integer scrollbackSize = preferences.getScrollbackSize();
			if (scrollbackSize != null) {
				log.debug("Scrollback enabled");
				offset = Math.max(0, size - scrollbackSize.longValue() * 1024
						* 1024);
			} else {
				offset = 0;
			}
		} catch (InvalidDataTypeException e) {
			offset = 0;
			log.error(e);
		} catch (BackendException e) {
			offset = 0;
			log.error(e);
		}
		log.debug("Setting lastEntryStartPosition & lastReadFileSize to:"
				+ offset);
		this.lastEntryStartPosition = offset;
		this.lastReadFileSize = offset;
	}

	@Override
	protected synchronized void read() throws IOException {
		// We don't read the file if we are not properly initialized, which
		// might happen after the tailer has started
		if ((fileChannel != null)
				&& (fileChannel.isOpen() && (this.fileByteBuffer != null) && (this.charsetDecoder != null))) {
			final long size = fileChannel.size();
			if (size < lastReadFileSize) {
				log.debug("File has been truncated, resetting tailer counters");
				// File has been truncated, best to reset the tailer and read
				// incrementally next time we are called.
				resetCounters(size);
				fireReset();
			} else if (size > lastReadFileSize) {
				log.debug("File has grown from " + lastReadFileSize + " to "
						+ size);
				long remainder = 0;
				final LinkedList<TailerListener.LogEntry> entries = new LinkedList<TailerListener.LogEntry>();
				do {
					fileChannel.position(lastEntryStartPosition);
					fileByteBuffer.clear();
					final int bytesRead = fileChannel.read(fileByteBuffer);

					charsetDecoder.reset();
					charBuffer.clear();
					fileByteBuffer.flip();
					decode();
					charBuffer.flip();

					// If buffer does not end on a log entry boundary we back up
					// to the last known entry boundary
					// This will mean that a partial log entry at the end of
					// the file will not be displayed.
					long filePositionForNextBufferFill = convertToLines(
							entries, lastEntryStartPosition);
					remainder = lastEntryStartPosition + bytesRead
							- filePositionForNextBufferFill;
					lastEntryStartPosition = filePositionForNextBufferFill;
					// Check if we have looked at the entire file
				} while ((lastEntryStartPosition + remainder) < size);
				lastReadFileSize = size;
				if (entries.size() > 0) {
					fireLinesRead(entries);
				}
			}
		}
	}

	private long convertToLines(List<TailerListener.LogEntry> lines,
			final long fileoffset) {

		Matcher blockMatcher = createBlockMatcher();
		blockMatcher.reset(charBuffer);

		if (log.isTraceEnabled()) {
			log.trace("Breaking buffer into blocks using pattern: "
					+ blockMatcher.pattern().toString());
		}
		int startOfBlock = 0;
		int endOfBlock = 0;
		CharSequence blockSeq;
		final boolean blockMatcherStarting = isBlockMatcherStarting();

		while (blockMatcher.find()) {
			endOfBlock = blockMatcherStarting ? blockMatcher.start()
					: blockMatcher.end();
			if (endOfBlock > startOfBlock) {
				blockSeq = charBuffer.subSequence(startOfBlock, endOfBlock);
				lines.add(new TailerListener.LogEntry(
						fileoffset + startOfBlock, blockSeq.toString()));
			}
			startOfBlock = endOfBlock;
		}
		final long filePositionForNextBufferFill;
		// Deal with the remainder in the buffer
		final int limit = this.fileByteBuffer.limit();
		final int capacity = this.fileByteBuffer.capacity();
		if (endOfBlock < limit) {
			blockSeq = charBuffer.subSequence(endOfBlock,
					charBuffer.remaining());
			lines.add(new TailerListener.LogEntry(fileoffset + endOfBlock,
					blockSeq.toString()));
			if (endOfBlock == 0) {
				// there was no match in the entire buffer
				if (limit < capacity) {
					// The buffer was not filled to capacity, so if more bytes
					// becomes available we might find a divider.
					filePositionForNextBufferFill = fileoffset;
				} else {
					// Buffer was filled so it does not help to rescan this area
					// of the file, we will never find a divider
					filePositionForNextBufferFill = fileoffset + limit;
				}
			} else {
				filePositionForNextBufferFill = fileoffset + endOfBlock;
			}
		} else {
			filePositionForNextBufferFill = fileoffset + endOfBlock;
		}

		return filePositionForNextBufferFill;
	}

	private synchronized void decode() throws IOException {
		CoderResult coderResult = charsetDecoder.decode(fileByteBuffer,
				charBuffer, true);
		logErrors(coderResult);
		if (coderResult.isUnderflow()) {
			coderResult = charsetDecoder.flush(charBuffer);
			logErrors(coderResult);
		} else {
			log.error("Unable to decode file properly, decoder did not return underflow, is the file binary?");
			underflowerrorCounter-- ;
			if (underflowerrorCounter < 0) {
				throw new IOException("Too many underflows, unable to decode file. Is this a binary file?") ;
			}
		}
	}

	private void logErrors(CoderResult coderResult) {
		if (coderResult.isOverflow()) {
			log.error("Buffer overflow, the character buffer is not large enough to decode the characters, some information may be lost");
		} else if (coderResult.isError()) {
			log.warn("There was some error in decoding the file given the current charset");
		}
	}

	@Override
	public void restart() {
		if (this.fileChannel == null) {
			log.warn("Restart called on tailer but no file channel has been set");
			throw new NullPointerException(
					"File channel is null when restart was called");
		}
		try {
			resetCounters(this.fileChannel.size());
		} catch (IOException ex) {
			log.error(ex);
			if (fileChannel != null) {
				try {
					fileChannel.close();
				} catch (IOException ex1) {
					log.error(ex1);
				}
			}
			fileChannel = null;
		}
	}
}
