/*
 * Copyright 2011-2012 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.sabayframework.mem.impl.disk;

import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;

/**
 * Disk storage keeps data records on disk and references in memory. 
 * Memory consumption is proportional to number of entries and does not depend on size of the 
 * entries.
 * Call compact() periodically.It cleans unused disk space.
 * Limits: number of records <= 2^31, size of the file <= 2^40
 * Notes: It does not cache or compress data, use external systems to 
 * reduce IO operations.
 * The storage supports READ COMMITTED isolation level.
 * Use Transaction.tryLock in order to enforce higher isolation levels.
 * @author Alex Antonau
 * */
public abstract class AbstractDiskStorage {

	private static final String ENTRIES_COPY_FILE_NAME = "index.copy";
	private static final String ENTRIES_FILE_NAME = "index.log";
	private static final byte COMMIT_IN_PROGRESS = 0;
	private static final byte COMMIT_BEGIN = 1;
	private static final byte COMMIT_END = 2;
	private static final double MIN_USAGE_RATIO=0.75;

	protected final DataLog dataLog;
	protected final String workPath;
	protected final LogFile indexLog;
	private volatile long maxUsedAddress=1L;
	
	protected Long2ObjectOpenHashMap<DataEntry> entries;
	
	protected final ReentrantReadWriteLock lock;
		
	public AbstractDiskStorage(String workPath) throws IOException {
		this.indexLog = new LogFile(new File(workPath, ENTRIES_FILE_NAME));
		this.lock = indexLog.getLock();
		this.workPath = workPath; 
		dataLog = new DataLog(workPath);
		recover();
	}

	public long getNumberOfRecords() {
		lock.readLock().lock();
		try{
			return entries.size();
		}finally{
			lock.readLock().unlock();
		}
	}

	public boolean isOpen() {
		return entries!=null;
	}
	
	protected void checkOpen() throws IOException{
		if(entries==null) throw new ClosedStorageException("Storage was closed");
	}

	public long getNextAvailableAddress() throws IOException{
		lock.readLock().lock();
		try{
			checkOpen();
			if(entries.size()==Integer.MAX_VALUE)
				throw new IOException("Data map is out of free space");
			synchronized (this)	{
				if(maxUsedAddress == Integer.MAX_VALUE) maxUsedAddress = 1L;
				Long address = maxUsedAddress++;
				while(contains(address)) {
					if(maxUsedAddress == Integer.MAX_VALUE) maxUsedAddress = Integer.MIN_VALUE;
					address = maxUsedAddress++;
				}
				return address;
			}
		} finally{
			lock.readLock().unlock();
		}
	}
	private static int getEntrySize(){
		return DataEntry.sizeOf() + 1;
	}
	protected static DataOutputStream createOutputStream(File file) throws IOException {
		FileOutputStream fos = new FileOutputStream(file, true);
		BufferedOutputStream bos = new BufferedOutputStream(fos);
		DataOutputStream dos = new DataOutputStream(bos);
		return dos;
	}
	
	protected void commitEntries(final DataEntry[] entries) throws IOException{
		indexLog.write(new LogFile.AtomicWrite(){
			public void write(DataOutputStream dos) throws IOException {
				checkOpen();
				for(int i=0;i<entries.length;++i){
					updateEntry(entries[i]);
				}
				if(entries.length==1) {
					saveEntry(dos, entries[0], AbstractDiskStorage.COMMIT_END);
				}
				else if(entries.length > 1){
					saveEntry(dos, entries[0], AbstractDiskStorage.COMMIT_BEGIN);
					for(int i=1;i<entries.length-1;++i){
						saveEntry(dos, entries[i], AbstractDiskStorage.COMMIT_IN_PROGRESS);
					}
					saveEntry(dos, entries[entries.length -1], AbstractDiskStorage.COMMIT_END);
				}
			}
		});
	}
	private void recover() throws IOException{
		loadEntries();
		int entrySize = getEntrySize();
		try{
			//truncate file if needed
			long length = indexLog.getLength();
			if(length % entrySize !=0){
				RandomAccessFile raf= new RandomAccessFile(indexLog.getFile(), "rw");
				length = (length / entrySize) * entrySize;
				raf.setLength(length); 
				raf.close();
			}
		} catch (Exception e) {
			//can't truncate. create new file
			compactMapFile();
		}
	}
	private static void saveEntry(DataOutputStream raf, DataEntry e, byte commitMarker) throws IOException{
		e.writeTo(raf);
		raf.writeByte(commitMarker);
	}
	
	/*
	 * Method copies all of the nonstale data from that mostly stale 
	 * log into the currently active log, then deletes the now "empty" log 
	 * file from the filesystem.
	 * */
	public void compact() throws IOException{
		//start only when no one is waiting 
		if(lock.getQueueLength()==0 && lock.writeLock().tryLock()){
			try{
				compactLogFiles();
				if(lock.getQueueLength()==0){
					int entrySize = getEntrySize();
					long numberOfRecords = indexLog.getLength() / entrySize;
					double usageRatio = 1.0 * entries.size() / numberOfRecords; 
					if(usageRatio < MIN_USAGE_RATIO) compactMapFile();
				}
			} finally{
				lock.writeLock().unlock();
			}
		}
	}
		
	private void compactMapFile() throws IOException{
		File newFile = new File(workPath, ENTRIES_COPY_FILE_NAME);
		newFile.delete();
		DataOutputStream dos = createOutputStream(newFile);
		try{
			for(DataEntry e : entries.values()){
				//also remove entries with zero length
				if(e.getLength()>0)
					saveEntry(dos, e, COMMIT_END);
			}
			dos.close();
			dos = null;
			indexLog.substituteFile(newFile);
		} finally{
			if(dos!=null)dos.close();
		}
	}
	private void compactLogFiles() throws IOException{
		DataLogStats[] stats = getDataLogStats();
		//compact non-active logs
		for(int i=0;i<stats.length;++i){
			if(dataLog.getCurrentLogNumber() == stats[i].logNumber){
				continue;
			}
			if(!stats[i].locked && stats[i].getUtilizationRatio() < MIN_USAGE_RATIO){
				if(lock.getQueueLength()>0) return; //break the loop if someone is waiting
				copyLog(stats[i].logNumber);
			}
		}
		//compact current
		DataLogFile currentFile = dataLog.getLog(dataLog.getCurrentLogNumber());
		DataLogStats stat = getDataLogStat(currentFile);
		if(!stat.locked && stat.getUsageRatio() < MIN_USAGE_RATIO){
			dataLog.createNew();
			copyLog(stat.logNumber);
		}
	}
	private void copyLog(int logNumber) throws IOException{
		// copy record(s) from provided log to active log
		long bufferSize = 0;
		ArrayList<DataEntry> entryBuffer = new ArrayList<DataEntry>();
		ArrayList<DataEntry> logEntries=new ArrayList<DataEntry>();
		for(DataEntry e : entries.values()){
			if(logNumber == e.getLogFile()){
				logEntries.add(e);
			}
		}
		Collections.sort(logEntries, new Comparator<DataEntry>(){
			public int compare(DataEntry o1, DataEntry o2) {
				long diff = o1.getLogOffset() - o2.getLogOffset();
				return diff < 0 ? -1 : diff == 0? 0: 1;
			}
		});
		if(lock.getQueueLength()>0) return; //break the loop if someone is waiting
		for(DataEntry e : logEntries){
			entryBuffer.add(e);
			bufferSize += e.getLength();
			long availableMemory = Runtime.getRuntime().freeMemory() / 2;
			if(bufferSize >= availableMemory) continue;
			if(lock.getQueueLength()>0) return; //break the loop if someone is waiting
			byte[][] buffers = new byte[entryBuffer.size()][];
			for(int i=0;i<entryBuffer.size();++i){
				buffers[i]=dataLog.readData(entryBuffer.get(i));
			}
			DiskTransactionImpl t = beginTransaction();
			try{
				for(int i=0;i<entryBuffer.size();++i){
					t.update(entryBuffer.get(i).getAddress(), buffers[i]);
				}
				t.commit();
			} finally{
				if(t.isActive()) t.rollback();
			}
			entryBuffer.clear();
			bufferSize = 0;
		}
		//if log file is empty then delete it
		dataLog.deleteLog(logNumber);
	}
	public DataLogStats getDataLogStat(DataLogFile file) throws IOException{
		DataLogStats stat = new DataLogStats();
		stat.logNumber = file.getNumber();
		stat.totalBytes = file.getLength();
		stat.maxBytes = dataLog.getMaxFileSize();
		lock.readLock().lock();
		try{
			checkOpen();
			for(DataEntry e : entries.values()){
				if(e.getLogFile() == file.getNumber()){
					stat.entries++;
					stat.usedBytes+=e.getLength();
				}
			}
			stat.locked  = hasLogLocked(stat.logNumber);
		} finally{
			lock.readLock().unlock();
		}
		return stat;
	}
	public DataLogStats[] getDataLogStats() throws IOException{
		HashMap<Integer, DataLogStats> stats=new HashMap<Integer, DataLogStats>(dataLog.getFilesNumber()*2);
		lock.readLock().lock();
		try{
			checkOpen();
			DataLogFile[] files = dataLog.getLogFiles();
			for(DataLogFile f:files){
				DataLogStats stat = new DataLogStats();
				stat.logNumber = f.getNumber();
				stat.totalBytes = f.getLength();
				stat.maxBytes = dataLog.getMaxFileSize();
				stats.put(f.getNumber(), stat);
			}
			for(DataEntry e : entries.values()){
				Integer key = e.getLogFile();
				DataLogStats stat = stats.get(key);
				stat.entries++;
				stat.usedBytes+=e.getLength();
			}
			for(DataLogFile log : files){
				if(hasLogLocked(log.getNumber())){
					DataLogStats stat = stats.get(log.getNumber());
					if(stat!=null) stat.locked = true;
				}
			}
		} finally{
			lock.readLock().unlock();
		}
		DataLogStats[] res=stats.values().toArray(new DataLogStats[stats.size()]);
		Arrays.sort(res);
		return res;
	}
	
	private DataEntry updateEntry(DataEntry e){
		Long key = e.getAddress();
		if(e.getLength() > 0){
			if(key.longValue() > maxUsedAddress) maxUsedAddress = key.longValue();
			entries.put(key, e);
		}else entries.remove(key);
		return e;
	}
		
	private void loadEntries() throws IOException{
		int entrySize = getEntrySize();
		int numberOfRecords = (int)(indexLog.getLength() / entrySize);
		if(numberOfRecords == 0){
			entries = new Long2ObjectOpenHashMap<DataEntry>(100);
			return;
		}
		entries = new Long2ObjectOpenHashMap<DataEntry>((int)(numberOfRecords * 1.3));
		FileInputStream fis=new FileInputStream(indexLog.getFile());
		int bufferSize = Math.min(numberOfRecords, 1000) * entrySize;
		BufferedInputStream bis =new BufferedInputStream(fis, bufferSize);
		DataInputStream is = new DataInputStream(bis);
		try{
		    ArrayList<DataEntry> buffer=new ArrayList<DataEntry>();
		    for(int i = 0; i<numberOfRecords;++i){
		        DataEntry e = new DataEntry();
		        e.readFromTo(is);
		        byte commitMarker = is.readByte();
		    	buffer.add(e);
		    	if(commitMarker == COMMIT_END){
		    		for(DataEntry be: buffer){
		    			Long key = be.getAddress();
			    		if(key.longValue() > maxUsedAddress) maxUsedAddress = key.longValue();
			    		if(be.getLength() == 0) entries.remove(key);
			    		else entries.put(key, be);
		    		}
		    		buffer.clear();
		    	}
		    }
		} finally {
			is.close();
		}
	}

	protected abstract DiskTransactionImpl beginTransaction() throws IOException;
	protected abstract void completeDiskTransaction(DiskTransactionImpl t) throws IOException;
	protected abstract boolean hasLogLocked(int log);
	protected abstract DiskTransactionImpl tryLock(long[] addresses, DiskTransactionImpl current);
	protected abstract void flushCurrentLog() throws IOException;
	protected abstract void write(DataEntry entry, byte[] data, int offset) throws IOException;
	
	public byte[] read(long address) throws IOException {
		lock.readLock().lock();
		try{
			checkOpen();
			Long key = address;
			DataEntry e = entries.get(key);
			if(e==null || e.getLength()==0) return new byte[0];
			return dataLog.readData(e);
		}finally{
			lock.readLock().unlock();
		}
	}
	
	protected byte[] read(DataEntry e) throws IOException{
		lock.readLock().lock();
		try{
			checkOpen();
			if(e==null || e.getLength()==0) return new byte[0];
			return dataLog.readData(e);
		}finally{
			lock.readLock().unlock();
		}
		
	}

	public boolean contains(long address) throws IOException {
		lock.readLock().lock();
		try{
			checkOpen();
			Long key = address;
			DataEntry e = entries.get(key);
			if(e==null || e.getLength()==0) return false;
			return true;
		}finally{
			lock.readLock().unlock();
		}
	}
	public void close() throws IOException {
		lock.writeLock().lock();
		try{
			indexLog.close();
			dataLog.close();
			entries = null;
		} finally{
			lock.writeLock().unlock();
		}
	}

	public void delete() throws IOException {
		lock.writeLock().lock();
		try{
			indexLog.delete();
			dataLog.delete();
			entries = null;
		} finally{
			lock.writeLock().unlock();
		}
	}

	public long getUsedSpace() throws IOException {
		DataLogStats stats[] = getDataLogStats();
		long used = getNumberOfRecords() * getEntrySize();
		for(DataLogStats s: stats){
			used+=s.usedBytes;
		}
		return used;
	}
	
	public long getFreeSpace() throws IOException {
		long free = dataLog.getFreeSpace();  
		return free < 0? 0 : free;
	}
	
}
