package de.pyranja.storage.core;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.ConcurrentSkipListSet;

import javax.annotation.concurrent.ThreadSafe;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.io.Files;

import de.pyranja.storage.exception.BinaryStorageException;
import de.pyranja.storage.exception.BlobNotFoundException;
import de.pyranja.storage.support.BlobToFile;

/**
 * The Storage is responsible for persisting binary data and provides access
 * to it.
 * @author Chris Borckholder
 */
@ThreadSafe
public class Storage {
	/* slf4j-logger */
	private final static Logger log = LoggerFactory.getLogger(Storage.class);
	// translates id to file path
	private final BlobToFile resolver;
	// source of imported data
	private final Staging eden;
	// holds blobs that are enqueued for deletion
	private final Set<String> marked;
	
	/**
	 * Creates a file based blob tree
	 * @param resolver that translates blob ids to file paths
	 * @param eden source of imported data
	 */
	public Storage(final BlobToFile resolver, final Staging eden) {
		super();
		this.resolver = resolver;
		this.eden = eden;
		marked = new ConcurrentSkipListSet<String>();
	}
	
	/**
	 * Lifecycle method. Tries to clean up marked blobs a last time.
	 */
	public void shutdown() {
		this.clean();
		log.info("{} shut down - remaining marked blobs {}", this, marked);
	}
	
	/**
	 * Instructs the Storage to pull the data for {@code id} and persist it.
	 * @param id of data
	 * @return true if the blob did not exist and was pulled in now
	 * @throws BlobNotFoundException if the blob is not staged
	 */
	public boolean pull(final String id) {
		log.trace("pulling {}", id);
		boolean imported = false;
		marked.remove(id);	// may have been marked for deletion earlier
		final File target = resolver.resolve(id);
		if (!target.exists()) {	// must fetch from eden
			final File source = eden.take(id);
			try {
				Files.move(source, target);
				imported = true;
			} catch (IOException e) {
				throw new BinaryStorageException("Failed to pull from "+ eden, id, e);
			}
		} else {	// blob already stored
			eden.free(id);
		}
		log.trace("pulled {} - was imported? {}", id, imported);
		return imported;
	}

	/**
	 * Invokes removal of the data associated with {@code id}. If the data is
	 * currently in use, removal may be deferred. 
	 * @param id of data
	 * @return true if the data was removed, false if removal was deferred
	 */
	public boolean remove(final String id) {
		log.trace("removing {}", id);
		boolean removed = true;
		final File target = resolver.resolve(id);
		try {
			java.nio.file.Files.deleteIfExists(target.toPath());
		} catch (IOException e) {
			// If deletion fails we add the blob to the set of marked ones. 
			// These should eventually be removed.
			log.warn("failed to remove a blob - marking {} for deletion", id, e);
			marked.add(id);
			removed = false;
		}
		return removed;
	}

	/**
	 * Attempts to find the data associated with {@code id} and opens a stream 
	 * to it.
	 * @param id of data
	 * @return a stream containing the data
	 * @throws BlobNotFoundException if id was not found
	 */
	public InputStream open(final String id) {
		log.trace("opening {}", id);
		final File target = resolver.resolve(id);
		try {
			final FileInputStream fin = new FileInputStream(target);
			return new BlobInputStream(fin,this,id);
		} catch (FileNotFoundException e) {
			throw new BlobNotFoundException(id,e);
		}
	}

	/**
	 * Notifies the {@link Storage} that a stream to the data associated with
	 * {@code id} was closed.
	 * @param id of data
	 * @return false if the notification was ignored or had no effect
	 */
	public void close(final String id) {
		log.trace("closing {}", id);
		// eager check if this blob should be removed
		if (marked.remove(id)) {
			this.remove(id);
		}
	}
	
	/**
	 * Attempts to remove all marked blobs.
	 */
	public void clean() {
		// drain all marked blobs to a temporary snapshot to avoid looping
		Set<String> process = new HashSet<String>();
		for (Iterator<String> it = marked.iterator(); it.hasNext();) {
			process.add(it.next());
			it.remove();
		}
		// attempt removal
		int removed = 0;
		for (String id : process) {
			if (this.remove(id)) {
				removed++;
			}
		}
		log.debug("out of {} marked blobs, {} were successfully removed", process.size(), removed);
	}

	/* (non-Javadoc)
	 * @see java.lang.Object#toString()
	 */
	@Override
	public String toString() {
		return "Storage [resolver=" + resolver + "]";
	}
	
	/**
	 * A stream decorator that invokes {@link Storage#close()} on the storage it
	 * originated from when it is closed.  
	 * @author Chris Borckholder
	 */
	static class BlobInputStream extends FilterInputStream {

		private final Storage parent;
		private final String id;
		
		/**
		 * Create a BlobInputStream wrapping given InputStream and calling back
		 * to given {@link Storage}.
		 * @param in wrapped stream
		 * @param parent origin storage
		 * @param id of blob that is the source of this stream
		 */
		BlobInputStream(final InputStream in, final Storage parent, final String id) {
			super(in);
			this.parent = parent;
			this.id = id;
		}
		
		/* (non-Javadoc)
		 * @see java.io.FilterInputStream#close()
		 */
		@Override
		public void close() throws IOException {
			try {
				super.close();
			} finally {
				parent.close(id);
			}
		}
	}
}
