package voldemort.store.cachestore.impl;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import voldemort.store.cachestore.*;

import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.util.List;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;

import static voldemort.store.cachestore.BlockUtil.*;
import static voldemort.store.cachestore.impl.CacheValue.createValue;
import static voldemort.store.cachestore.impl.ChannelStore.open;

/**
 * Created by IntelliJ IDEA.
 * User: thsieh
 * Date: May 15, 2010
 * Time: 2:17:38 PM
 * To change this template use File | Settings | File Templates.
 */

public class CacheStore {
    private static Log logger = LogFactory.getLog(CacheStore.class);
    private ConcurrentMap<Key, CacheBlock> map;
    // use to protect channel object data offset, total record, key offset and log offset
    private final ReentrantLock lock = new ReentrantLock();
    // interface to determine the size of block
    private BlockSize blockSize;
    // list of channelStore which handler the persistence
    private List<ChannelStore> list;
    private static final String FILENAME = "cachesotre";
    // index which represent current
    private volatile int curIndex ;
    // path for for data file
    private String path;
    // record data block overflow
    private AtomicLong overflow ;
    // max file size to trigger pack, default is 4 GB
    private volatile long Max_Size = (1 << 32) ;
    // 25 percent overflow
    private volatile long Over_Flow = ( Max_Size / 4);
    // pack data which single thread pool to prevent collision
    private ThreadPoolExecutor packService = new ThreadPoolExecutor(1, 1,0L, TimeUnit.MILLISECONDS,
                                new LinkedBlockingQueue<Runnable>());
    // use to prevent more than two request for packing data
    private final ReentrantLock packLock = new ReentrantLock();



    public CacheStore(String path){
        this( path, null);
    }

    public CacheStore(String path, BlockSize blockSize, int curIndex, String filename) {
        this.path = path;
        this.blockSize = blockSize;
        this.curIndex = curIndex;
        this.overflow = new AtomicLong(0);
        map = new ConcurrentHashMap(1190);
        list = new CopyOnWriteArrayList<ChannelStore>();
        //check to see if more file 0 / 1
        if ( curIndex  == 0 ) {
            list.add(0, open( getPath(path) + filename+curIndex, map));
            if ( ChannelStore.isChannelExist(getPath(path) + filename + 1 +".ndx")) {
                list.add(1, open( getPath(path) + filename + 1, map));
                this.curIndex = 1;
            }
            else
                list.add(1, open( getPath(path) + filename + 1, map, true));
        }
        else {
            if ( curIndex > 1 ) throw new StoreException("not support for index "+ curIndex +" > 1 ");
            for ( int i = 0 ; i <= curIndex ; i ++ )
                list.add(open( getPath(path) + filename+i, map));
        }
    }



    public CacheStore(String path, BlockSize blockSize, int curIndex) {
        this(path, blockSize, curIndex, FILENAME );
    }


    public CacheStore(String path, BlockSize blockSize) {
        this( path, blockSize, 0);
    }


    private String getPath(String path) {
        if ( path.charAt( path.length()-1) == '/')
            return path ;
        else
            return path +"/" ;
    }

    public ChannelStore getChannel(CacheBlock block) {
        int i = block.getIndex() ;
        if ( i >= list.size() ) {
            //list.add(i, open( getPath(path) + FILENAME+i, map));
            throw new StoreException("Channel list out of bound size "+list.size()+" expect " +i);
        }
        return list.get(i);
    }

    public List<ChannelStore> getList() {
        return list;
    }

    public int getCurIndex() {
        return curIndex;
    }

    public void setCurIndex(int curIndex) {
        this.curIndex = curIndex;
    }

    public ConcurrentMap<Key, CacheBlock> getMap() {
        return map;
    }

    public ReentrantLock getLock() {
        return lock;
    }

    public FileChannel getExistDataChannel(int index) throws IOException {
        return new RandomAccessFile( getPath(path) + FILENAME + index +".data", "rw").getChannel();

    }

    public Value get(Key key) {
        CacheBlock<byte[]> block = map.get( key);
        if ( block == null ) return null;
        else {
            if ( block.getData() == null ) {
                try {
                    //byte[] data = readChannel( block.getDataOffset2Len(), dataChannel );
                    ChannelStore channel = getChannel(block);
                    byte[] data = channel.readChannel( block.getDataOffset2Len(), channel.getDataChannel() );
                    block.setData( data);
                } catch (IOException ex) {
                    logger.error( ex.getMessage(), ex);
                    throw new StoreException(ex.getMessage());
                }
            }
            return createValue(block.getData(), block.getVersion() , block.getNode()) ;
        }
    }

    private int findBlockSize(int len) {
        if ( this.blockSize != null ) {
            int size = blockSize.defineSize(len);
            if ( size > MAX_BLOCK_SIZE ) {
                logger.warn("defineSize " + size +" exceeding "+MAX_BLOCK_SIZE);
                return MAX_BLOCK_SIZE;
            }
            else return size ;
        }
        else {
            return defineSize( len);
        }
    }

//    public CacheBlock makeBlock(int record, int blockSize, int dataLen, long version) {
//        // default node = 0, for no replication
//        return makeBlock(record, blockSize, dataLen, version, (short)0);
//    }

    public CacheBlock makeBlock(int record, int blockSize, int dataLen, long version, short node) {
        //long data = (dataOffset << LEN | dataLen) ;
        long data = convertOffset4Len(list.get(curIndex).getDataOffset(), dataLen) ;
        checkFileSize(list.get(curIndex).getDataOffset(), dataLen);
        long b2v = convertVersion4Size(version, blockSize) ;
        return new CacheBlock( record, data, b2v, (byte) curIndex,  node );
    }


    private void checkVersion(CacheBlock block, Value value) {
        if ( value.getVersion() > 0 ) {
            if ( value.getVersion() < block.getVersion() )
                throw new StoreException("Outdated version "+value.getVersion()+" current "+ block.getVersion() );
            if ( value.getVersion() > block.getVersion()+ 1 )
                logger.warn("Version "+value.getVersion()+" > "+ block.getVersion());
        }
        // version is 0,  just using current version + 1, otherwise overwrite with version from value
        long version = value.getVersion() == 0 ?  block.getVersion()+1 : value.getVersion();
        block.setVersion( version);
    }

    private void copyBlock(CacheBlock src, CacheBlock dst) {
        // coopy everything except version
        dst.setStatus(src.getStatus() );
        dst.setBlockSize( src.getBlockSize() );
        dst.setDataOffset( src.getDataOffset());
        dst.setDataLen( src.getDataLen() );
        dst.setNode( src.getNode());
    }

    public void put(Key key, Value<byte[]> value) {
        CacheBlock block = map.get( key);
        long keyOffset2Len = 0;
        if ( block == null ) {
            // write a new block
            ChannelStore channel = list.get(curIndex);
            int size = findBlockSize( value.getData().length );
            byte[] keyBytes = toKeyBytes(key);
            lock.lock();
            boolean add = true;
            try {
                block = makeBlock(channel.getTotalRecord(), size, value.getData().length, 1L , value.getNode());
                CacheBlock tmp = map.putIfAbsent(key , block);
                //concurrency issue compare version here, some thread get ahead after get, using
                //reassing block from tmp, no longer new block
                if ( tmp != null ){
                    checkVersion(tmp, value);
                    // check if tmp's block size can hold data
                    if ( tmp.getBlockSize() <  value.getData().length )
                        copyBlock(tmp, block) ;
                    else  //other wise add is false
                        add = false;
                    //change pointer
                    block = tmp ;
                }
                else { //add mode
                    keyOffset2Len = convertOffset4Len( channel.getKeyOffset(), keyBytes.length);
                    // update offset position
                    channel.setTotalRecord(channel.getTotalRecord()+1);
                    channel.setDataOffset( channel.getDataOffset() + size);
                    channel.setKeyOffset( channel.getKeyOffset() + keyBytes.length );
                }
                // update node
                block.setNode( value.getNode());
                // update cache data
                block.setData( value.getData());
            } finally {
                lock.unlock();
            }
            try {
                //base on add mode
                if ( ! add  )
                    channel.writeExistBlock(block);
                else
                    channel.writeNewBlock(block, keyOffset2Len, keyBytes);
            } catch (IOException ex) {
                logger.error( ex.getMessage(), ex);
                throw new StoreException(ex.getMessage());
            }
        }
        else {  // block is not null
            //lock on block
            synchronized( block) {
                // double check , in case deleted
                if ( map.get(key) == null ) map.put(key, block) ;
                ChannelStore channel = getChannel(block);
                // only validate for vrsion no exist, means > 0
                checkVersion( block, value);
                block.setNode( value.getNode());
                block.setData( value.getData());
                block.setDataLen( value.getData().length );
                //if ( block.getData() == null ) {
                checkFileSize( block.getDataOffset() , value.getData().length );
                if ( value.getData().length > block.getBlockSize() ) {
                    int size = findBlockSize( value.getData().length );
                    // record overflow
                    overflow.addAndGet( size);
                    byte[] keyBytes = toKeyBytes(key);
                    lock.lock();
                    try {
                        block.setDataOffset( channel.getDataOffset());
                        block.setBlockSize( size);
                        block.setDataLen( value.getData().length);
                        channel.setDataOffset( channel.getDataOffset()+ size) ;
                    } finally {
                        lock.unlock();
                    }
                    keyOffset2Len = convertOffset4Len( channel.getKeyOffset(), keyBytes.length);
                    try {
                        channel.writeNewBlock(block, keyOffset2Len, keyBytes);
                    } catch (IOException ex) {
                        logger.error( ex.getMessage(), ex);
                        throw new StoreException(ex.getMessage());
                    }
                }
                else {
                    try {
                        channel.writeExistBlock(block);
                    } catch (IOException ex) {
                        logger.error( ex.getMessage(), ex);
                        throw new StoreException(ex.getMessage());
                    }
                }
            }
        }

    }

    private void needPackDate() {
        if ( overflow.get() > Over_Flow ) {
            logger.info("Need to pack data curIndex " +curIndex );
            pack();
        }
    }

    /**
     *
     * @param key
     * @return   true for sucessful , false for fail
     */
    public boolean remove(Key key) {
        CacheBlock block = map.get( key);
        if ( block == null ) return false;
        else {
            try {
                getChannel( block ).removeBlock(block);
                // record over flow size
                overflow.addAndGet( block.getDataLen() );
                map.remove( key);
                return true;
            } catch (Exception ex) {
                logger.error(ex.getMessage() , ex);
                return false;
            }
        }
    }

    /**
     * close all channel file for this cachestore
     */
    public void close() {
        for ( int i = 0 ; i < list.size() ; i++)
            list.get(i).close();
    }

    /**
     * pack will sprung a back groud thread to pack list.get(curIndex), and move all data to a file ( 0 -> 1 and 1 -> 0)
     * each one would ast as backup of the other one
     */
    public void pack() {
        packLock.lock();
        try {
            // check concurrency first
            if (packService.getQueue().size() > 0  || packService.getActiveCount() > 0 ) {
                logger.info("Pack data service is running, will skip size = " + packService.getQueue().size() );
            }
            else {
                // reset overflow
                overflow.getAndSet( 0L) ;
                packService.execute( new PackThread(this, curIndex) );
                // need to make sure thread is still alive, otherwise channel.isOpen() will return false for packService thread
                try {
                    Thread.sleep( 2000L);
                } catch (Exception ex) {
                    //swallow exception and do nothing
                }
            }
        } finally {
            packLock.unlock();
        }


    }

    /**
     * to determine which index to use for packing data, use mode 2 logic
     * @param index - current index
     * @return
     */
    public int createChannel(int index) {
        try {
            //take module 2, to use it alternative for pack data ( 0 - > 1)
            int i = 0;
            if ( index % 2 == 0 )
                i = index + 1 ;
            else
                i = index - 1;
            logger.info("Add channel "+ i );
            if ( list.size() > i  ) {
                list.get(i).close();
                list.set(i, open( getPath(path) + FILENAME + i, map, true));
            }
            else
                list.add(i, open( getPath(path) + FILENAME + i, map, true));

            return i ;
        } catch (Exception ex) {
            throw new StoreException(ex.getMessage(), ex);
        }
    }

    /**
     * truncate all channels for same prefix
     *
     *
     * @param index
     */
    public void truncate(int index) {
        logger.info("truncate "+index);
        list.get(index).close();
        list.set(index, open( getPath(path) + FILENAME + index, map, true));
        list.get(index).close();
    }


}
