/**
 * 
 */
package edu.umd.clip.lm.storage;

import java.io.*;
import java.lang.management.*;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.*;
import java.util.Map.Entry;
import java.util.zip.GZIPInputStream;

import edu.umd.clip.lm.factors.FactorTuple;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.storage.compact.*;
import edu.umd.clip.lm.storage.compact.DbDescriptor.WordFileDesc;
import edu.umd.clip.lm.util.*;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class CompactProbTreeStorage extends AbstractProbTreeStorage {
	private final Cache wordFileCache;
	ModelStorage models[];
	int maxFileSize = 512 * 1024 * 1024;
	long cacheSize;
	boolean compress = false;
	private static final String DESCRIPTOR_FNAME = "descriptor.data.gz";
	private static final String TMP_WORDS_DIR = "_tmp";
	private File dbPath;
	
	public CompactProbTreeStorage(long cacheSize) {
		this.cacheSize = cacheSize;
		Experiment exp = Experiment.getInstance();
		dbPath = new File(exp.getFiles().getDb());
		
		if (!dbPath.isDirectory()) {
			dbPath.mkdirs();
		}

		models = new ModelStorage[exp.getNumLMs()];
		wordFileCache = new Cache(500*exp.getNumLMs(), 0*exp.getNumLMs());
	}
	
	private static class ModelStorage {
		public static class WritingData implements Observer {
			final static private Object dummy = new Object();
			
			private LRU<DetachableOutputStream, Object> lru;
			/**
			 * 
			 */
			private Map<FactorTuple, WordFile> words;
			private static final int MAX_OPEN_FILES = 800;
			/**
			 * 
			 */
			@SuppressWarnings("serial")
			public WritingData() {
				this.lru = new LRU<DetachableOutputStream, Object>(MAX_OPEN_FILES) {
					@Override
					protected boolean removeEldestEntry(Entry<DetachableOutputStream, Object> eldest) {
						boolean result = super.removeEldestEntry(eldest);
						if (result) {
							try {
								eldest.getKey().detach();
							} catch (IOException e) {
								e.printStackTrace();
							}
						}
						return result;
					}
				};
			}

			/**
			 * @return the words
			 */
			public Map<FactorTuple, WordFile> getWords() {
				return words;
			}

			/**
			 * @param words the words to set
			 */
			public void setWords(Map<FactorTuple, WordFile> words) {
				this.words = words;
			}

			/* (non-Javadoc)
			 * @see java.util.Observer#update(java.util.Observable, java.lang.Object)
			 */
			@Override
			public void update(Observable o, Object arg) {
				DetachableOutputStream output = (DetachableOutputStream) arg;
				synchronized(lru) {
					lru.put(output, dummy);
				}
			}
		}
		
		DbDescriptor descriptor;
		RandomAccessFile files[];
		File dir;
		boolean writing;
		final WritingData writingData;

		public ModelStorage(DbDescriptor descriptor, File dir) throws IOException {
			this(descriptor, dir, 0, false);
		}
		/**
		 * @param descriptor
		 * @param dir
		 * @param writing
		 * @throws IOException 
		 */
		public ModelStorage(DbDescriptor descriptor, File dir, long maxBuffers, boolean writing) throws IOException {
			this.descriptor = descriptor;
			this.dir = dir;
			this.writing = writing;
			if (writing) {
				writingData = new WritingData();
				Set<FactorTuple> allOvertFactors = Experiment.getInstance().getTupleDescription().getAllOvertFactors().keySet();
				HashMap<FactorTuple, WordFile> wordFiles = new HashMap<FactorTuple, WordFile>(allOvertFactors.size()); 
				writingData.setWords(wordFiles);
				
				int buffersPerWord = (int) (maxBuffers / allOvertFactors.size());
				File tmpDir = new File(dir, TMP_WORDS_DIR);
				if (!tmpDir.isDirectory()) {
					tmpDir.mkdirs();
				}
				tmpDir.deleteOnExit();
				for(FactorTuple word : allOvertFactors) {
					WordFile wordFile = new WordFile(descriptor, tmpDir, buffersPerWord, writingData);
					wordFiles.put(word, wordFile);
				}
			} else {
				writingData = null;
				files = new RandomAccessFile[descriptor.getFilenames().length];
			}
		}
		
	}
	
	public boolean openForReading(int modelid) {
		File dir = new File(dbPath, Experiment.getInstance().getLmByNumId(modelid).getId());
		try {
			ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dir, DESCRIPTOR_FNAME)));
			DbDescriptor descriptor = new DbDescriptor(ois);
			ois.close();
			
			ModelStorage model = new ModelStorage(descriptor, dir);
			models[modelid] = model;
			return true;
		} catch (IOException e) {
			//e.printStackTrace();
		} catch (ClassNotFoundException e) {
			//e.printStackTrace();
		}
		return false;
	}
	
	public void openForWriting(int modelid, int maxClusterId) throws IOException {
		DbDescriptor descriptor = new DbDescriptor(maxClusterId);
		File dir = new File(dbPath, Experiment.getInstance().getLmByNumId(modelid).getId());
		if (!dir.isDirectory()) {
			dir.mkdirs();
		}
		final MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
		
		MemoryUsage memuse = memoryBean.getHeapMemoryUsage();
		long maxBuffers = memuse.getMax() / 4;
		
		ModelStorage model = new ModelStorage(descriptor, dir, maxBuffers, true);
		models[modelid] = model;
	}
	
	private static class CacheKey {
		final int modelid;
		final long factorBits;
		/**
		 * @param modelid
		 * @param factorBits
		 */
		public CacheKey(int modelid, long factorBits) {
			this.modelid = modelid;
			this.factorBits = factorBits;
		}
		@Override
		public int hashCode() {
			final int prime = 31;
			int result = 1;
			result = prime * result + (int) (factorBits ^ (factorBits >>> 32));
			result = prime * result + modelid;
			return result;
		}
		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (getClass() != obj.getClass())
				return false;
			CacheKey other = (CacheKey) obj;
			if (factorBits != other.factorBits)
				return false;
			if (modelid != other.modelid)
				return false;
			return true;
		}
	}
	
	private class Cache extends LRUCache<CacheKey, WordFile> {

		/**
		 * @param hardCapacity
		 * @param softCapacity
		 */
		public Cache(int hardCapacity, int softCapacity) {
			super(hardCapacity, softCapacity);
		}

		/* (non-Javadoc)
		 * @see edu.umd.clip.lm.util.LRUCache#loadItem(java.lang.Object)
		 */
		@Override
		protected WordFile loadItem(CacheKey key) {
			WordFile wordFile = null;
			try {
				ModelStorage modelStorage = models[key.modelid];
				WordFileDesc wordDesc = modelStorage.descriptor.getWordFileDesc(key.factorBits);
				
				byte fileid = wordDesc.fileId;
				if (modelStorage.files[fileid] == null) {
					synchronized(modelStorage.files) {
						if (modelStorage.files[fileid] == null) {
							modelStorage.files[fileid] = new RandomAccessFile(new File(modelStorage.dir, modelStorage.descriptor.getFilename(fileid)), "r");
						}
					}
				}
				byte data[] = new byte[wordDesc.size];
				ByteBuffer buffer = ByteBuffer.wrap(data);
				FileChannel channel = modelStorage.files[fileid].getChannel();
				int bytesRead = channel.read(buffer, wordDesc.offset);
				assert(bytesRead == wordDesc.size);
				InputStream input = new ByteArrayInputStream(data, 0, wordDesc.size);
				if (wordDesc.compressed) {
					input = new GZIPInputStream(input, 4096);
				}
				wordFile = new WordFile(modelStorage.descriptor, new DataInputStream(input));
				input.close();
			} catch(IOException e) {
				e.printStackTrace();
			}
			return wordFile;
		}
		
	}
	/* (non-Javadoc)
	 * @see edu.umd.clip.lm.storage.AsyncProducer#request(java.lang.Object, edu.umd.clip.lm.storage.AsyncConsumer)
	 */
	@Override
	public void request(Key key, AsyncConsumer<Key, OnDiskCompactProbTree> callback) {
		OnDiskCompactProbTree probTree = getProbTree(key);
		callback.receivedData(key, probTree);
	}

	/* (non-Javadoc)
	 * @see edu.umd.clip.lm.storage.AbstractProbTreeStorage#getProbTree(edu.umd.clip.lm.storage.AbstractProbTreeStorage.Key)
	 */
	@Override
	public OnDiskCompactProbTree getProbTree(Key key) {
		WordFile wordFile = wordFileCache.getItem(new CacheKey(key.modelid, key.factorBits));
		if (wordFile == null) return null;
		return wordFile.getProbTree(key.clusterid);
	}

	/* (non-Javadoc)
	 * @see edu.umd.clip.lm.storage.AbstractProbTreeStorage#setProbTree(edu.umd.clip.lm.storage.AbstractProbTreeStorage.Key, edu.umd.clip.lm.model.OnDiskCompactProbTree)
	 */
	@Override
	public void setProbTree(Key key, OnDiskCompactProbTree probTree) {
		WordFile wordFile = models[key.modelid].writingData.getWords().get(new FactorTuple(key.factorBits));
		wordFile.setProbTree(key.clusterid, probTree);
	}

	/* (non-Javadoc)
	 * @see edu.umd.clip.lm.storage.AbstractProbTreeStorage#getStats()
	 */
	@Override
	public String getStats() {
		return "Compact: word-cache stats: " + String.format("%%%f hits out of %d", wordFileCache.getHitRatio()*100, wordFileCache.getRequests());
	}

	/* (non-Javadoc)
	 * @see edu.umd.clip.lm.storage.AbstractProbTreeStorage#requestProbTree(edu.umd.clip.lm.storage.AbstractProbTreeStorage.Key)
	 */
	@Override
	protected void requestProbTree(Key key) {
	}

	@Override
	public void setTotalClusterCount(int modelid, int clusterid, long totalCount) {
		models[modelid].descriptor.setClusterCount(clusterid, totalCount);
	}

	
	public void close(int modelid) throws IOException {
		ModelStorage model = models[modelid];
		if (model.writing) {
			/*
			for(WordFile wordFile : model.writingData.getWords().values()) {
				wordFile.finishWriting();
			}
			*/
			if (!model.dir.isDirectory()) {
				model.dir.mkdirs();
			}
			
			ArrayList<String> filenames = new ArrayList<String>();
			byte fileId = (byte) filenames.size();
			String fname = "file-" + fileId + ".data";
			filenames.add(fname);
			
			FileOutputStream currentFile = new FileOutputStream(new File(model.dir, fname));
			long words[] = new long[model.writingData.getWords().size()];
			WordFileDesc descs[] = new WordFileDesc[words.length];
			
			int wordNum = 0;
			for(Map.Entry<FactorTuple, WordFile> entry : model.writingData.getWords().entrySet()) {
				FactorTuple word = entry.getKey();
				WordFile wordFile = entry.getValue();
				
				// flush the buffers
				wordFile.finishWriting();
				// compactify
				long startPosition = currentFile.getChannel().position();
				if (startPosition >= maxFileSize) {
					currentFile.close();
					
					fileId = (byte) filenames.size();
					fname = "file-" + fileId + ".data";
					filenames.add(fname);
					currentFile = new FileOutputStream(new File(model.dir, fname));
					startPosition = 0;
				}
				
				wordFile.compactify(currentFile, compress);
				long endPosition = currentFile.getChannel().position();
				
				WordFileDesc wordDesc = new WordFileDesc(fileId, (int)startPosition, (int) (endPosition - startPosition), compress);
				words[wordNum] = word.getBits();
				descs[wordNum] = wordDesc;
				++wordNum;
			}
			currentFile.close();
			CompactReadOnlyLong2ObjectHashMap<WordFileDesc> wordDescriptors = new CompactReadOnlyLong2ObjectHashMap<WordFileDesc>(words, descs);
			model.descriptor.setWordDescriptors(wordDescriptors);
			
			model.descriptor.setFilenames(filenames.toArray(new String[0]));
			
			ObjectOutputStream ous = new ObjectOutputStream(IO.getOutputStream(new File(model.dir, DESCRIPTOR_FNAME)));
			model.descriptor.writeExternal(ous);
			ous.close();
			
			long totalProbTrees = WordFile.getTotalprobtrees();
			long duplicateProbTrees = WordFile.getDuplicateprobtrees();
			long savedSpace = WordFile.getTotalspacesaved();
			
			System.err.printf("Eliminated duplicated %d out of %d prob trees, saved %d bytes\n", 
					duplicateProbTrees, totalProbTrees, savedSpace);
		}
	}

	/* (non-Javadoc)
	 * @see edu.umd.clip.lm.storage.AbstractProbTreeStorage#closeAll()
	 */
	@Override
	public void closeAll() {
		for(int i=0; i<models.length; ++i) {
			if (models[i] != null && models[i].writing) {
				try {
					close(i);
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
			models[i] = null;
		}
	}
}
