package org.jgroups.protocols;

import org.jgroups.Address;
import org.jgroups.Event;
import org.jgroups.Message;
import org.jgroups.View;
import org.jgroups.annotations.MBean;
import org.jgroups.annotations.ManagedAttribute;
import org.jgroups.annotations.ManagedOperation;
import org.jgroups.annotations.Property;
import org.jgroups.stack.FragRetransmitter;
import org.jgroups.stack.Protocol;
import org.jgroups.util.Util;

import java.io.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;


/**
 * User: thsieh
 * Date: Mar 13, 2009
 * Time: 10:40:00 AM
 * Extend from Frag2
 * Main goal to change the origal design of Jgroups
 * to meet large scale 1->n mulitcast requirement (more 200 nodes)
 * 1. Reduce the signle point of contention using seqno in Nakack
 * 2. Eliment global garbage collection in Stable
 */
@MBean(description="Fragments messages larger than fragmentation size into smaller packets")
public class FRAG3 extends Protocol implements FragRetransmitter.RetransmitCommand {
    public static final String name="FRAG3";
    // StreamHeader
    public static final String streamhd="STREAM";

    /* -----------------------------------------    Properties     -------------------------------------------------- */

    @Property(description="The max number of bytes in a message. Larger messages will be fragmented. Default is 1500 bytes")
    @ManagedAttribute(description="Fragmentation size", writable=true)
    int frag_size=1500;

    @Property(description="Estimate for message overhead. Default is 200 bytes ")
    @ManagedAttribute(description="Estimate number of bytes for headers plus src and dest ", writable=true)
    int overhead=200;


    /* --------------------------------------------- Fields ------------------------------------------------------ */


    /*the fragmentation list contains a fragmentation table per sender
     *this way it becomes easier to clean up if a sender (member) leaves or crashes
     */
    private final ConcurrentMap<Address, ConcurrentMap<Long,FragEntry>> fragment_list=new ConcurrentHashMap<Address,ConcurrentMap<Long,FragEntry>>(11);

    // lock for cur_id;
    private Lock idLock = new ReentrantLock();
    /** Used to assign fragmentation-specific sequence IDs (monotonically increasing) */
    private long curr_id= 0;

    private final Vector<Address> members=new Vector<Address>(11);

    @ManagedAttribute(description="Number of sent messages")
    AtomicLong num_sent_msgs=new AtomicLong(0);
    @ManagedAttribute(description="Number of received messages")
    AtomicLong num_received_msgs=new AtomicLong(0);
    @ManagedAttribute(description="Number of reqResent fragments")
    AtomicLong num_resent_frags =new AtomicLong(0);
    @ManagedAttribute(description="Number of duplicated fragments")
    AtomicLong num_duplicated_frags =new AtomicLong(0);

    @ManagedAttribute(description="Number of sent fragments")
    AtomicLong num_sent_frags=new AtomicLong(0);
    @ManagedAttribute(description="Number of received fragments")
    AtomicLong num_received_frags=new AtomicLong(0);
    //handle reliable message
    private short indexSize = 119;
    // timeStamp for lowestId for receiver
    private long timeStamp ;
    private ConcurrentMap<Long,MsgSent> datasMap ;
    // lowest completed id for sender
    private long lastRecvId = -1 ;
    // keep of all sending data byte[] by id
    private ConcurrentMap<Address, SenderBitMap> msgReceivedMap = new ConcurrentHashMap<Address ,SenderBitMap> (11);
    // bitmap lenght
    private final int BITS = 64;
    // localAddress
    private Address localAddr;
    // thresh hold for reqResent when single message
    // size of current member
    private int memberSize ;
    // ceiling for unicast
    private int ceiling;
    // factor for detemining ceiling of Unicast
    private float factor = 0.10f;
    // trigger to use byteStream, default 40 * frag_size
    @Property(description="message > size of fragment")
    @ManagedAttribute(description="default # of packet size for stream ", writable=true)
    private int size = 40;
    // defalut path for stream receiver
    @Property(description="path for streming")
    @ManagedAttribute(description="current path for streaming", writable=true)
    private String path ="";

    public final String getName() {
        return name;
    }

    public int getFragSize() {return frag_size;}
    public void setFragSize(int s) {frag_size=s;}
    public int getOverhead() {return overhead;}
    public void setOverhead(int o) {overhead=o;}
    public long getNumberOfSentMessages() {return num_sent_msgs.get();}
    public long getNumberOfSentFragments() {return num_resent_frags.get();}
    public long getNumberOfReceivedMessages() {return num_received_msgs.get();}
    public long getNumberOfReceivedFragments() {return num_duplicated_frags.get();}
    public short getIndexSize() { return indexSize ; }
    public void setIndexSize(short size ) { indexSize = size ; }
    public String getPath() { return path;}
    public void setPath(String path) { this.path = path; }

    private long getNextId() {
        try {
            idLock.lock();
            return curr_id++;
        } finally {
            idLock.unlock();
        }
    }


    private FragRetransmitter retransmitter;
    // delay before it start to execute daemon job
    private long delay = 1000 * 3 * 2;
    // period of execution seonds
    @Property(description="The interval of receiver and sender daemon. Default is 3000 in mini seconds")
    @ManagedAttribute(description="The interval of receiver and sender daemon size", writable=true)
    private long period = 1500 * 2;

    // determine multicast package loopback to sender,
    // default is false to reduce memory footprint on sender
    private boolean loopback = false;

    public boolean isLoopback() {    return loopback; }
    public void setLoopback(boolean loopback) { this.loopback = loopback;   }

    public long getDelay() {  return delay; }
    public void setDelay(long delay) { this.delay = delay;   }

    @Property(description="Delay in mini seconds of sender and receiver daemon")
    public long getPeriod() { return period;   }
    public void setPeriod(long period) {  this.period = period; }

    @Property(description="Message timeout")
    public int getTimeOut() { return timeOut;  }
    public void setTimeOut(int timeOut) {  this.timeOut = timeOut;   }

    @Property(description="Retry frequency for receiver")
    public int getRetry() { return retry;   }
    public void setRetry(int retry) {  this.retry = retry;  }

    @Property(description="Factor of total members to use unitcast instead of multicast")
    public float getFactor() { return factor;}
    public void setFactor(float factor) {  this.factor = factor;  }

    @Property(description="number package to triiger using streaming , default is 40")
    public int getSize() { return size;}
    public void setSize(int size) { this.size = size; }

    // assume the bandwithd default 4MB
    @Property(description="The expected bandwidth in lower end. Default is 4*1024*1024 Bytes")
    @ManagedAttribute(description="expected bandwidtht", writable=true)
    private int bandWidth = 4 * 1024 * 1024 ;
    // mini seconds per frag_size
    private int fragBW = bandWidth / 58000  ;
    // timeout unit of 1 seconds in mini format
    @Property(description="The timeout value for bandwidth. Default is 1000 mini seconds")
    @ManagedAttribute(description="timeout value", writable=true)
    private int timeOut = 1000;
    private int reqTime = timeOut / 2;
    // retry count
    @Property(description="The max number of retry. Default is 3")
    @ManagedAttribute(description="retry count", writable=true)
    private int retry = 3;
    private final int sentTime = timeOut/2 ;

    public void init() throws Exception {
        super.init();

        int old_frag_size=frag_size;
        frag_size-=overhead;
        if(frag_size <=0) {
            throw new Exception("frag_size=" + old_frag_size + ", overhead=" + overhead +
                      ", new frag_size=" + frag_size + ": new frag_size is invalid");
        }
        fragBW = bandWidth / frag_size ;
        this.reqTime = timeOut /2 ;
        // enforce indexSize and overflow are not smaller than default value
        if ( indexSize < 119 ) indexSize = 119;
        // for sendDataMap
        datasMap = new ConcurrentHashMap<Long, MsgSent>( indexSize);
        //this.bandWidth = this.
        timeStamp = System.currentTimeMillis() ;
        // initlaize retransmitter
        //TimeScheduler time = transport.
        retransmitter = new FragRetransmitter( this, getTransport().getTimer() );
        Map<String,Object> info=new HashMap<String,Object>(1);
        info.put("frag_size", frag_size);
        up_prot.up(new Event(Event.CONFIG, info));
        down_prot.down(new Event(Event.CONFIG, info));
        // setup sender and receiver Daemon
        Timer sender = new Timer("SenderDaemon");
        sender.schedule( new SenderDaemon(), delay, period);
        Timer receiver = new Timer("ReceiverDaemon");
        receiver.schedule( new ReceiverDaemon(), delay, period);
    }


    /**
     * implements restramitter interface
     * @param messageKey
     * @param msg
     */
    public void retransmit(long messageKey, Message msg) {
        //retransmitter.add( messageKey, msg);
        if ( log.isInfoEnabled() )  log.info("Start to restranmitting for key " + keyToString(messageKey) + " to " + msg.getDest().toString() ) ;
        num_resent_frags.getAndIncrement();
        down_prot.down( new Event(Event.MSG, msg ));
        if ( retransmitter.remove( messageKey) == -1 )
            if(log.isInfoEnabled()) log.info("Not able to remove from retransmitter key "+ keyToString(messageKey) );
    }

    // add the ability of getPhyical Adddress for backupward compatbility
    public Address getPhysicalAddress() {
        return (Address) down(new Event(Event.GET_PHYSICAL_ADDRESS, localAddr)) ;
    }


    public void resetStats() {
        super.resetStats();
        num_sent_msgs.set(0);
        num_resent_frags.set(0);
        num_duplicated_frags.set(0);
        num_received_msgs.set(0);
    }

    @Override
    public String toString() {
        return "Stats ids "+ curr_id +" msg send "+ num_sent_msgs+" msg recvd "+num_received_msgs+" num_sent_frags " +
            num_sent_frags + "  num_received_frags " +  num_received_frags + " frags reqResent "+num_resent_frags +
            " frags duplicated "+num_duplicated_frags +" lowest recvId " + lastRecvId +" since " + timeStamp  ;
    }



    /**
     * Fragment a packet if larger than frag_size (add a header). Otherwise just pass down. Only
     * add a header if fragmentation is needed !
     */
    public Object down(Event evt) {
        switch(evt.getType()) {

            //Streaming type is a user's defined type
            case Event.STREAMING :
                num_sent_msgs.incrementAndGet();
                streamMsg( (Message)evt.getArg());
                return null;
            case Event.MSG:
                Message msg=(Message)evt.getArg();
                num_sent_msgs.incrementAndGet();
                // mic add frag header regardless of frag size
                fragment(msg);
                return null;
//                break;
            case Event.VIEW_CHANGE:
                handleViewChange((View)evt.getArg());
                break;

            case Event.SET_LOCAL_ADDRESS:
                localAddr=(Address)evt.getArg();
                break;

            case Event.CONFIG:
                Object ret=down_prot.down(evt);
                if(log.isDebugEnabled()) log.debug("received CONFIG event: " + evt.getArg());
                handleConfigEvent((Map<String,Object>)evt.getArg());
                return ret;
        }

        return down_prot.down(evt);  // Pass on to the layer below us
    }


    /**
     * If event is a message, if it is fragmented, re-assemble fragments into big message and pass up
     * the stack.
     */
    public Object up(Event evt) {
        switch(evt.getType()) {

            case Event.MSG:
                Message msg=(Message)evt.getArg();
                FragHeader3 hdr=(FragHeader3)msg.getHeader(name);
                // how to handle reliability for message which have only one fragment
                if(hdr != null ) {
                    // for mutlicast package, check loopback setup
                    // skip package if src == localAddress
                    if ( ! loopback) {
                        if( msg.getSrc() != null && msg.getSrc().equals( localAddr)) return null;
                    }
                    // print the last fragment info, exclude type =3
                    if ( log.isInfoEnabled()) {
                        if ( hdr.type >= 4 || (hdr.frag_id + 1 ==  hdr.num_frags && hdr.type== 1)  )
                           log.info("header " + hdr.toString() +" src " + msg.getSrc().toString());
                    }
                    // process message type first
                    if ( hdr.type == FragHeader3.MSG  || hdr.type == FragHeader3.MSG_STREAM) {
                        SenderBitMap senderMap = getSenderMap( hdr, msg.getSrc() );
                        // check duplicate message first
                        if ( isDuplicateMsg( hdr ,senderMap )) {
                            num_duplicated_frags.getAndIncrement();
                            return null;
                        }
                        // check fragment size
                        if ( hdr.num_frags > 1) {
                            Message newMsg = unfragment(msg, hdr, senderMap);
                            if ( newMsg == null ) return null;
                            else  { // all fragment had been received
                                // move updateBitMap to unfragment
                                sendAck( msg);
                                return up_prot.up(new Event(Event.MSG, newMsg));
                            }
                        }
                        else {  // update bitMap for single fragment message
                            updateBitMap( hdr, senderMap );
                            if (hdr.type == FragHeader3.MSG_STREAM ) {
                                sendAck(msg);
                                Message newMsg = writeStream( msg, hdr);
                                return up_prot.up(new Event(Event.MSG, newMsg));
                            }
                            else {
                                sendAck(msg);
                                return up_prot.up(evt); // Pass up to the layer above us by default
                            }
                        }
                    }
                    else { // if type other than message
                        processOtherType( hdr, msg );
                        return null  ;
                    }
                }
                else   //hdr == null
                    log.warn("Message without FragHeader3 "+ msg.toString() );


                break;

            case Event.VIEW_CHANGE:
                handleViewChange((View)evt.getArg());
                break;

            case Event.SET_LOCAL_ADDRESS:
                localAddr=(Address)evt.getArg();
                break;

            case Event.CONFIG:
                Object ret=up_prot.up(evt);
                if(log.isDebugEnabled()) log.debug("received CONFIG event: " + evt.getArg());
                handleConfigEvent((Map<String,Object>)evt.getArg());
                return ret;
        }

        return up_prot.up(evt); // Pass up to the layer above us by default
    }


    /**
     * for single fragment write to file and return new message with internal and original file name
     * @param msg
     * @return
     */
    private Message writeStream(Message msg, FragHeader3 header) {
        String filename = getFileName(header, msg.getSrc().toString());
        OutputStream os = null ;
        try {
            os = new FileOutputStream( filename);
            os.write( msg.getRawBuffer() );
            StreamHeader stdHeader = (StreamHeader) msg.getHeader(streamhd);
            Message streamMsg = msg.copy( false) ;
            // concatent internal file name + 1 space + origal file name
            String oName  = (stdHeader != null ? stdHeader.getFilename() : "null");
            streamMsg.setBuffer( (filename+" "+ oName) .getBytes() );
            return streamMsg ;
        } catch (IOException ex) {
            log.error( ex.getMessage());
            return null;
        } finally {
            if ( os != null ) {
                try {
                    os.close();
                } catch (IOException e){
                    // swallow exception, nothing we could od
                }
            }
        }
    }

//    private void checkLastRecv(FragHeader3 header, SenderBitMap senderMap, Message msg) {
//        long recvId =  senderMap.highRecvdId ;
//        try {
//            senderMap.recvdLock.lock();
//            if ( header.id == senderMap.lowRecvdId + 1 ) senderMap.lowRecvdId = header.id ;
//            if ( header.id > senderMap.highestId )  senderMap.highRecvdId = header.id ;
//        } finally {
//            senderMap.recvdLock.unlock();
//        }
//        // check lastRecvdId had been completed or not
//        if ( header.id > recvId + 1 ) {
//            if ( log.isInfoEnabled() )  log.info("gap id " + header.id + " highRecv " + recvId +" lowRecv "+
//                    senderMap.lowRecvdId +" highest " + senderMap.highestId);
//        }
//    }


    /**
     * process otheer type ACK, XMIT_REQ, XMIT_INQ
     * @param header
     * @param msg
     */
    private void processOtherType(FragHeader3 header, Message msg) {
        switch (header.type) {
            case FragHeader3.MSG_ACK :
                processAck( header, msg );
                break ;
            case FragHeader3.XMIT_REQ :
                resendFrag( header, msg );
                break ;
            case FragHeader3.XMIT_INQ :
                processInq( header, msg );
                break ;
            default :
                log.warn("Not able to process header " + header.toString() );

        }
    }

    private long readBits(FragHeader3 header, SenderBitMap senderMap) {
        long key = header.id - (header.id % BITS);
        return getBits( key, senderMap ) ;
    }

    /**
     * receiver receive inquiry from sender to check if any missing package
     * @param header
     * @param msg
     */
    private void processInq(FragHeader3 header, Message msg) {
        // remove check for local address
        if ( msg.getSrc() == null ) return ;
        // check message had been received or not
        SenderBitMap senderMap = msgReceivedMap.get( msg.getSrc() );
        if ( senderMap == null ) {
            log.warn("Can not find SenderBitMap for src " + msg.getSrc().toString()+" hdr " + header.toString() );
            return ;
        }
        else if ( isReceived(header.id, senderMap )) {
            log.info(header.toString() + " had been received and will resend ack again " + msg.getSrc().toString() );
            // add send Ack in case previous ack was not received by sender
            sendAck( msg);
            return;
        }

        ConcurrentMap<Long,FragEntry> frag_table = fragment_list.get( msg.getSrc());
        if ( frag_table == null) {
            log.warn(header.toString()+ " is not in fragment_list " + msg.getSrc().toString());
            return;
        }
        FragEntry entry = frag_table.get(  header.id);
        if ( entry == null ) {
            reqResentSingle(msg.getSrc(), header.id,  0);
            if ( log.isInfoEnabled()) log.info("Request for reqResent single fragment "+ header.toString()+" src " + msg.getSrc().toString());
        }
        else {
            entry.reqLock.lock();
            try {
                if ( System.currentTimeMillis() - entry.lastReq > reqTime ) {
                    entry.lastReq = System.currentTimeMillis();
                    for ( int i=0 , j= 0; i < entry.fragments.length ; i++) {
                        if ( entry.fragments[i] == null ) {
                            reqResent(i, i+1 , msg);
                            j++;
                            if ( log.isInfoEnabled() ) log.info("Process inq i "+i+" reqResent " + j + "  src " + msg.getSrc().toString() +" " + header.toString());
                        }
                    }
                }
            } finally {
                entry.reqLock.unlock();
            }
        }
    }

    /**
     * resent the fragment upon receive XMIT_REQ from receiver
     * @param header
     * @param msg
     */

    private void resendFrag(FragHeader3 header, Message msg) {
        MsgSent msgSent = datasMap.get( header.id);
        if ( msgSent == null) log.warn("Not able to find datasMap "+ header.toString() );
        else {
            // skip resent if it has been sent within timelimit and make sure it is not in unicast resend mode
            if ( ! msgSent.isUniCast() && hasBeenSent( header, msgSent)) return ;
            byte[] bytes = msgSent.getBytes( header.frag_id);
            if ( bytes == null ) log.warn("frag_id is out of range "+ (msgSent.header.num_frags -1) +"  header " + header.toString() );
            else {
                if ( log.isInfoEnabled() ) log.info("Resent fragment " + header.toString() );
                num_resent_frags.getAndIncrement();
                Message newMsg = null;
                if (msgSent.isUniCast() ) {
                    log.info("Use unicast recvd "+ msgSent.totalReceived+" size "+ memberSize +" src "+msg.getSrc().toString() +" " + header.toString() );
                    newMsg = new Message( msg.getSrc() , localAddr,  bytes );
                }
                else
                    newMsg = new Message( msgSent.dest , localAddr,  bytes );
                // make sure the num_frags from msgSent not from msg, and type from msgSend.header.type not FragHeader3.MSG
                FragHeader3 hdr = new FragHeader3( header.id,  header.frag_id, msgSent.header.num_frags , msgSent.header.type );
                newMsg.putHeader( name, hdr );
                //set filename of streamHeader for last fragment , using hdr instead of header, because header.num_frags =1
                if ( hdr.frag_id + 1 == hdr.num_frags && hdr.type == FragHeader3.MSG_STREAM ) {
                    StreamHeader streamHdr = new StreamHeader(new String( msgSent.buffer ) );
                    newMsg.putHeader(streamhd, streamHdr);
                }
                Event event = new Event( Event.MSG , newMsg);
                down_prot.down( event);
            }
        }
    }


    private boolean hasBeenSent(FragHeader3 header, MsgSent msgSent) {
        Long time = msgSent.resentMap.get(header.frag_id);
        long current = System.currentTimeMillis();
        if ( time == null ) {
            msgSent.resentMap.put(header.frag_id, current );
            return false;
        }
        else {
            if ( current - time < sentTime) {
                if ( log.isInfoEnabled() ) log.info("Skip resend time " + sentTime + " header " + header.toString() );
                return true;
            }
            else {
                msgSent.resentMap.put(header.frag_id, current );
                return false;
            }
        }
    }


    /**
     * @param msgSent
     * @param id
     * @return byte[] of particular fragment
     */
    private byte[] getFragment(MsgSent msgSent, int id) {
        if ( msgSent.buffer.length  == 0 )
            return new byte[0];
        int size ;
        // check for last packet = header.num_frags - 1
        if ( id == msgSent.header.num_frags - 1) size = msgSent.buffer.length - ( frag_size * ( msgSent.header.num_frags - 2) );
        else size = frag_size;
        byte[] data = new byte[size ];
        System.arraycopy(msgSent.buffer, id * frag_size, data, 0, size );
        return data;
    }

    /**
     * remove some log to reduce log file size
     * @param header
     * @param msg
     */
    private void processAck(FragHeader3 header, Message msg) {
        MsgSent msgSent = datasMap.get( header.id );
        if ( msgSent == null ) ; //do nothing
        else {
            //int j = msgSent.totalReceived;
            msgSent.addList( msg.getSrc() );
            if ( msgSent.isComplete() ) {
                msgSent.close();
                if ( datasMap.remove( header.id ) != null ) {
                    long duration = System.currentTimeMillis() - msgSent.timeStamp ;
                    long sent = System.currentTimeMillis() - msgSent.startTime ;
                    if ( header.id > lastRecvId) {
                        lastRecvId = header.id ;
                        if ( log.isInfoEnabled() ) log.info("Remove ack id " + header.id + " datasMaps "+ sent +" total mini "
                                +duration +" size "+ msgSent.getSize()+" recvd "+ msgSent.totalReceived);
                    }
                    else if ( log.isInfoEnabled() ) log.info("Remove ack id " + header.id +" datasMaps "+ sent +" total mini "
                            +duration+" size "+ msgSent.getSize() +"  recvId "+ lastRecvId +" recvd "+ msgSent.totalReceived);
                }
            } // if complete
        }
    }

    private void sendAck(Message msg) {
        FragHeader3 hdr = (FragHeader3) msg.getHeader( name);
        FragHeader3 header = new FragHeader3( hdr.id,  hdr.num_frags-1, hdr.num_frags, FragHeader3.MSG_ACK );
        // dest = msg.getSrc(), source = null
        Message message = new Message( msg.getSrc(), localAddr, new byte[0]);
        message.putHeader( name, header );
        if( log.isInfoEnabled() ) log.info("Send ack " + header.toString() + " to " + msg.getSrc().toString() );
        down_prot.down( new Event( Event.MSG,  message ));
    }

    private boolean isReceived(long id, SenderBitMap senderMap) {
        long key = id - (id % BITS);
        short index = (short) (id % BITS );
        senderMap.indexLock.readLock().lock();
        try {
            // Note !!!! lowestId + BITS if lowestId >= 0, otherwise -1 , means not bitmaps yet
            if ( senderMap.lowestId >= 0 && id < senderMap.lowestId + BITS  ) return true;
            if ( ! senderMap.bitMap.containsKey( key ) ) return false;
            // otherwise check bitMap
            long bits = getBits( key, senderMap ) ;
            return isBitOn( bits, index ) ;
        } finally {
             senderMap.indexLock.readLock().unlock();
        }

    }

    private boolean isDuplicateMsg(FragHeader3 header, SenderBitMap senderMap) {
        return isBitOn( header.id, senderMap);
    }

    private boolean isBitOn(long id, SenderBitMap senderMap ) {
        long key = id - (id % BITS);
        short index = (short) (id % BITS );
        senderMap.indexLock.readLock().lock();
        try {
            // Note !!!! lowestId + BITS if lowestId >= 0, otherwise -1 , means not bitmaps yet
            if ( senderMap.lowestId >= 0 && id < senderMap.lowestId + BITS  ) return true;
            // otherwise check bitMap
            long bits = getBits( key, senderMap ) ;
            return isBitOn( bits, index ) ;
        } finally {
             senderMap.indexLock.readLock().unlock();
        }
    }

    /**
     * if key is not exist in SenderBitMap, if will create a new one
     * otherwise return it
     * @param key
     * @param senderMap
     * @return bitMap of key
     */
    private long getBits(long key, SenderBitMap senderMap) {
        if ( senderMap.bitMap.containsKey( key ) ) {
            return senderMap.bitMap.get(key);
        }
        else {
            long bitMap = 0 ;
            Long newBits = senderMap.bitMap.putIfAbsent(key, bitMap );
            if ( newBits != null ) bitMap = newBits ;
            return bitMap;
        }
    }

    private void copyBitMap(SenderBitMap source, SenderBitMap dest) {
        dest.indexLock.writeLock().lock();
        try {
            dest.firstId = source.firstId;
            dest.lowestId = source.lowestId ;
            dest.lowRecvdId = source.lowRecvdId ;
            dest.highestId = source.highestId ;
            dest.bitMap  = source.bitMap;
            dest.idQueue = source.idQueue ;
        } finally {
            dest.indexLock.writeLock().unlock();
        }
    }

    /**
     * if SenderMap for sender is not in msgReceivedMap, create a new one
     * @param header
     * @param address
     * @return SenderBitMap
     */
    private SenderBitMap getSenderMap(FragHeader3 header, Address address) {
        SenderBitMap senderMap = msgReceivedMap.get(address) ;
        if ( senderMap == null ) {
            senderMap = new SenderBitMap(indexSize, header.id);
            SenderBitMap oldMap = msgReceivedMap.putIfAbsent( address, senderMap);
            // check the result to ensure thread safety
            if ( oldMap != null ) {
                if (oldMap.firstId > senderMap.firstId  )  {
                    log.info( "Replace with cur because pre "+ oldMap.firstId+" > cur " + senderMap.firstId );
                    // since senderMap has small id, copy senderMap content to oldMap
                    copyBitMap(oldMap, senderMap);
                }
                senderMap = oldMap ;
            }
        }
        return senderMap ;
    }

    /**
     * when message is complete and update the IndexBitMap
     * @param header
     *
     */
    private void updateBitMap(FragHeader3 header, SenderBitMap senderMap) {
        num_received_msgs.getAndIncrement();
        updateBits(header.id, senderMap) ;
    }

    /**
     * update the data inside SenderBitMap
     * lowestId
     * @param id
     * @param senderMap
     */
    private void updateBits(long id, SenderBitMap senderMap) {
        senderMap.indexLock.writeLock().lock();
        try {
            long key = id - (id % BITS);
            long bitMap = getBits( key, senderMap );
            bitMap = updateData( bitMap, id );
            if ( id > senderMap.highestId )
                senderMap.highestId = id;
            // receive all msgs for key range
            if ( bitMap ==  0xFFFFFFFFFFFFFFFFL ) {
                if ( log.isInfoEnabled() ) log.info("updateBitMap received 64 id " + id +" key "+ key+ " senderMap " +
                    senderMap.toString() + " queue size " + senderMap.idQueue.size() );
                // if it next to lowestId , otherwise add IdQueue
                if ( key - senderMap.lowestId > 0 && key - senderMap.lowestId <= BITS) {
                    senderMap.lowestId = key;
                    senderMap.bitMap.remove(key);
                    senderMap.checkIdQueue();
                }
                else {
                   // add to queue and let check queue perform remove work
                    senderMap.bitMap.put(key , bitMap );
                    senderMap.idQueue.add(key );
                }
                //updateLowRecvId( key+BITS, senderMap);
            }
            else { // it is not full yet
                senderMap.bitMap.put( key , bitMap );
                //updateLowRecvId( id, senderMap);
                if( log.isInfoEnabled() ) log.info("updateBitMap id "+ id+ " value "+bitMap );

            }
        } finally {
            senderMap.indexLock.writeLock().unlock();
        }
    }


//    private void updateLowRecvId(long id, SenderBitMap senderMap ) {
//        if (senderMap.lowRecvdId >= id) return ;
//        senderMap.recvdLock.lock();
//        try {
//            senderMap.lowRecvdId = id;
//        } finally {
//            senderMap.recvdLock.unlock();
//        }
//    }

    private boolean isBitOn(long bits, short index) {
        long l = 1L << index ;
        if ( ( bits & l) == l )
            return true;
        else
            return false;
    }

    /**
     * turn on the bit of corresponding id
     * @param data
     * @param id
     * @return
     */
    private long updateData(long data, long id ) {
        short bit = (short) (id % BITS );
        long l = ( 1L << bit );
        return data | l ;
    }

    /**
     * assumtion it is not possible to have overlap of header.id in 32 bits space
     * which is around 4G message id in the same retransmiter map  ** it was <<16 chnage to <<32
     * @param header
     * @return long key for retransmiter map
     */
    private long convertHeaderKey(FragHeader3 header) {
       long key = (header.id << 32 ) + header.frag_id ;
       return key ;
    }

    String keyToString(long key) {
        return "[" + ((key >>> 32) & 0xFFFFFFFFL) + "," + (key & 0xFFFFFFFFL) + "]";
    }
    
    /**
     * make sure we ignore sign bits
     * @param id
     * @return
     */
    private long getKey4Long(long id) {
        return  (id >>> 32) + 0 ;
    }


    private void handleViewChange(View view) {
        Vector<Address> new_mbrs=view.getMembers(), left_mbrs;
        left_mbrs= Util.determineLeftMembers(members, new_mbrs);
        members.clear();
        members.addAll(new_mbrs);

        for(Address mbr: left_mbrs) {
            // the new view doesn't contain the sender, it must have left, hence we will clear its fragmentation tables
            fragment_list.remove(mbr);
            // remove from senderBitMap
            msgReceivedMap.remove( mbr);
            if(log.isInfoEnabled())
                log.info("[VIEW_CHANGE] removed " + mbr + " from fragmentation table");
        }
        syncMsgSent( left_mbrs );
        memberSize = members.size();
        if ( memberSize == 0) ceiling = 1;
        else {
            ceiling = (int) Math.ceil( memberSize * factor);
        }
    }

    /**
     * resync for sender when a receiver left group
     * it will remove leftmember from receiveSet of msgSent
     * @param leftMembers
     */
    private void syncMsgSent(Vector<Address> leftMembers) {
        Collection<MsgSent> values = datasMap.values();
        for ( MsgSent each : values) {
           if ( each.totalReceived > 0 ) {
                for ( Address addr : leftMembers) {
                    if ( each.receivedSet.contains( addr)) {
                        if ( log.isInfoEnabled() ) log.info(addr.toString()+ " remove from receivedSet for " +each.totalReceived );
                        each.lock.lock();
                        try {
                            each.receivedSet.remove(addr);
                            each.totalReceived --;
                        } finally {
                            each.lock.unlock();
                        }
                    }
                }
           }
        }
    }

    @ManagedOperation(description="removes all fragments sent by mbr")
    public void clearFragmentsFor(Address mbr) {
        if(mbr == null) return;
        fragment_list.remove(mbr);
        if(log.isTraceEnabled())
            log.trace("removed " + mbr + " from fragmentation table");
    }

    @ManagedOperation(description="Removes all entries from the fragmentation table. " +
            "Dangerous: this might remove fragments that are still needed to assemble an entire message")
     public void clearAllFragments() {
        fragment_list.clear();
    }

    /** Send all fragments as separate messages (with same ID !).
     Example:
     <pre>
     Given the generated ID is 2344, number of fragments=3, message {dst,src,buf}
     would be fragmented into:

     [2344,3,0]{dst,src,buf1},
     [2344,3,1]{dst,src,buf2} and
     [2344,3,2]{dst,src,buf3}
     </pre>
     */
    private void fragment(Message msg) {
        try {
            newFragment(msg);
            // move out
            //if ( msg.getObject() instanceof File) streamMsg( msg) ;
            //else newFragment(msg);
        }
        catch(Exception e) {
            if(log.isErrorEnabled()) log.error("fragmentation failure "+ e.getMessage(), e);
        }
    }

    private void newFragment(Message msg) throws Exception {
        newFragment(msg, FragHeader3.MSG );
    }

    private void streamMsg(Message msg) {
        File file = (File) msg.getObject();
        long length = file.length();
        //disable check the length
        //if ( length <= frag_size ) throw new RuntimeException("file size "+ length+" is too small for streaming");
        // it is ceilling of num of fragment
        int num_frags = (int) divideCeil( length, (long) frag_size ) ;
        int remain = (int) ( length - (long) frag_size * num_frags ) ;
        if ( remain == 0 ) remain = frag_size ;
        else remain = frag_size + remain;
        long id=getNextId(); // used as a seqno
        MsgSent sent ;
        FragHeader3 hdr=new FragHeader3(id, 0, num_frags, FragHeader3.MSG_STREAM, remain);
        BufferedInputStream ip = null ;
        try {
            ip = new BufferedInputStream(new FileInputStream( file));
            msg.putHeader( name, hdr);
            //pass flag false for streaming protocol
            sent = createMsgSent( msg, false , file);
            //int begin = 0;
            for (int i = 0 ; i < num_frags ; i++ ) {
                num_sent_frags.addAndGet(num_frags);
                Message frag_msg=msg.copy(false); // don't copy the buffer, only src, dest and headers.
                // read datas from input stream
                if ( i < num_frags - 1 ) {
                    byte[] datas= new byte[frag_size] ;
                    int s = ip.read( datas);
                    if ( s != frag_size ) log.warn("expect "+frag_size+" get " + s);
                    frag_msg.setBuffer(datas);
                }
                else {  //last fragment
                    // add streamHeader to lastMessage
                    StreamHeader streamHdr = new StreamHeader( file.getName() );
                    frag_msg.putHeader(streamhd, streamHdr);
                    byte[] datas= new byte[remain] ;
                    int s = ip.read( datas);
                    if ( s != remain ) log.warn("expect "+remain+" get " + s);
                    frag_msg.setBuffer(datas);
                }
                // make sure create a new header
                FragHeader3 header = new FragHeader3(id, i, num_frags, FragHeader3.MSG_STREAM, remain);
                frag_msg.putHeader(name, header);
                down_prot.down(new Event(Event.MSG, frag_msg));
            }
            // update the timeStamp after it complete all fragments
            sent.lock.lock();
            try {
                sent.timeStamp = System.currentTimeMillis();
            } finally {
                sent.lock.unlock();
            }
        } catch (Exception e) {
            throw new RuntimeException( e.getMessage(), e);
        } finally {
            if ( ip != null ) {
                try {
                    ip.close();
                } catch ( IOException ex ) {
                    //swallow exception
                    log.error( ex.getMessage(), ex);
                }
            }
        }

    }

    /**
     make sure every message has fragheader in regardless of size
     it is more efficent
     * @param msg
     * @byte type - type of message
     * @throws Exception
     */
    private void newFragment(Message msg, byte type) throws Exception {
        long id=getNextId(); // used as a seqno
        MsgSent sent ;
        if ( msg.getRawBuffer().length <= frag_size) {
            FragHeader3 hdr=new FragHeader3(id, 0, 1, type);
            msg.putHeader(name, hdr);
            sent = createMsgSent( msg);
            down_prot.down(new Event(Event.MSG, msg));
        }
        else {
            byte[] buffer=msg.getRawBuffer();
            int num_frags = divideCeil( (int) buffer.length , frag_size ) ;
            // remain <= 0
            int remain = buffer.length - frag_size * num_frags ;
            // it is o base, remain no need to substrct 1
            if ( remain == 0 ) remain = frag_size ;
            else remain = frag_size + remain ;

            FragHeader3 hdr=new FragHeader3(id, 0, num_frags, type, remain);
            msg.putHeader( name, hdr);
            sent =createMsgSent( msg);
            //int begin = 0;
            for (int i = 0, begin = 0; i < num_frags ; i++, begin += frag_size ) {
                num_sent_frags.addAndGet(num_frags);
                Message frag_msg=msg.copy(false); // don't copy the buffer, only src, dest and headers. But do copy the headers
                if ( i < num_frags -1 )
                    frag_msg.setBuffer(buffer, begin, frag_size);
                else   //last fragment
                    frag_msg.setBuffer(buffer, begin, remain);
                FragHeader3 header =new FragHeader3(id, i, num_frags, type, remain);
                frag_msg.putHeader(name, header);
                down_prot.down(new Event(Event.MSG, frag_msg));
            }
        }
        // update the timeStamp after it complete all fragments
        sent.lock.lock();
        try {
            sent.timeStamp = System.currentTimeMillis();
        } finally {
            sent.lock.unlock();
        }
    }

    /**
     *
     * @param msg - message
     * @param flag - true for regular message, false for streaming
     * @return MsgSent
     */

    private MsgSent createMsgSent( Message msg, boolean flag, File file) {
        FragHeader3 header = (FragHeader3 ) msg.getHeader(name);
        MsgSent msgSent = null ;
        if ( flag )
            msgSent = new MsgSent( msg.getRawBuffer(), header, msg.getDest() == null ? true : false , msg.getDest() );
        else {
            String filename = file.getName() ;
            msgSent = new MsgSent( filename.getBytes(), header, msg.getDest() == null ? true : false , msg.getDest() );
        }
        MsgSent tmp = datasMap.putIfAbsent( header.id,  msgSent);
        if ( tmp != null ) {  // tmp is not null, don't worry create datafile for streaming
            msgSent = tmp;
        }
        else {
           // tmp is null and it is streaming,  careate data file here
           if ( !flag )
               msgSent.creatFile( file );
        }
        return msgSent;

    }

    private MsgSent createMsgSent( Message msg) {
        return createMsgSent( msg, true, null);
    }

    public static long divideCeil( long dividend, long divisor  ) {
        return ( dividend % divisor == 0) ?  (int) dividend / divisor :  dividend/ divisor  + 1 ;
    }

    public static boolean isStreamMessage(Message msg) {
        FragHeader3 header =  (FragHeader3) msg.getHeader( name);
        if ( header.type == FragHeader3.MSG_STREAM ) return true;
        else return false;
    }

    /**
     *
     * @param dividend
     * @param divisor
     * @return the ceil of divisor
     */
    public static int divideCeil( int dividend, int divisor  ) {
        return  ( dividend % divisor == 0) ? dividend / divisor :  dividend/ divisor  + 1 ;
    }


    /**
     1. Get all the fragment buffers
     2. When all are received -> Assemble them into one big buffer
     3. Read headers and byte buffer from big buffer
     4. Set headers and buffer in msg
     5. Pass msg up the stack
     */
    private Message unfragment(Message msg, FragHeader3 hdr, SenderBitMap senderMap) {
        Address            sender=msg.getSrc();
        Message            assembled_msg=null;

        ConcurrentMap<Long,FragEntry> frag_table=fragment_list.get(sender);
        if(frag_table == null) {
            frag_table=new ConcurrentHashMap<Long,FragEntry>();
            ConcurrentMap<Long,FragEntry> tmp=fragment_list.putIfAbsent(sender, frag_table);
            if(tmp != null) // value was already present
                frag_table=tmp;
        }
        FragEntry entry=frag_table.get(hdr.id);
        if(entry == null) {
            //entry=new FragEntry(hdr.num_frags, hdr.id);
            entry = new FragEntry(hdr);
            FragEntry tmp = frag_table.putIfAbsent(hdr.id, entry);
            // createFile for the first time when tmp is null
            if(tmp != null) {
                // there is collision, wait other thread to complete
                if ( hdr.type == FragHeader3.MSG_STREAM) tmp.checkLock();
                entry = tmp;
            }
            else
                if (hdr.type == FragHeader3.MSG_STREAM) entry.createFile( hdr, msg.getSrc().toString());
        }
        num_received_frags.incrementAndGet();
        entry.lock();
        try {
            if (entry.set(hdr, msg) ) {  // msg had not been received
                if(entry.isComplete()) {
                    updateBitMap( hdr, senderMap);
                    // remove from map and move assembly after that
                    frag_table.remove(hdr.id);
                    // return file name in message body
                    if ( hdr.type == FragHeader3.MSG_STREAM) {
                        entry.close();
                        Message streamMsg = msg.copy( false) ;
                        // concatent internal file name + 1 space + origal file name
                        streamMsg.setBuffer( (getFileName( hdr, msg.getSrc().toString() )+" "+ entry.filename).getBytes() );
                        return streamMsg ;
                    }
                    else assembled_msg = entry.assembleMessage();
                }
            }
            // else had been received do nothing
        }
        finally {
            entry.unlock();
        }
        // assembled_msg=frag_table.add(hdr.id, hdr.frag_id, hdr.num_frags, msg);
        if(assembled_msg != null) {
            try {
                if(log.isTraceEnabled()) log.trace("assembled_msg is " + assembled_msg);
                assembled_msg.setSrc(sender); // needed ? YES, because fragments have a null src !!
                // let main message routine take care
                //up_prot.up(new Event(Event.MSG, assembled_msg));
            }
            catch(Exception e) {
                if(log.isErrorEnabled()) log.error("unfragmentation failed", e);
            }
        }
        return assembled_msg;
    }




    void handleConfigEvent(Map<String,Object> map) {
        if(map == null) return;
        if(map.containsKey("frag_size")) {
            frag_size=((Integer)map.get("frag_size")).intValue();
            if(log.isDebugEnabled()) log.debug("setting frag_size=" + frag_size);
        }
    }

    public TP getLocakTP() {
        return super.getTransport();
    }

    public void reqResent(int begin, int end, Message frag) {
        for ( int i = begin  ; i < end ; i ++) {
            Message newMsg = frag.copy( false);
            FragHeader3 header = (FragHeader3) frag.getHeader( name);
            // make sure it did not overrite origal header. create a hdr
            FragHeader3 hdr = new FragHeader3( header.id,  i, 1, FragHeader3.XMIT_REQ);
            // put new created header
            newMsg.putHeader( name, hdr);
            // make sure that estination is set as unicast message
            newMsg.setDest( frag.getSrc() );
            newMsg.setSrc( localAddr);
            long key = convertHeaderKey( hdr);
            retransmitter.add( key, newMsg);
        }
    }

    /**
     *
     * @param hdr - FragHeader3
     * @param prefix - source address
     * @return unique filename for each message
     */
    public String getFileName(FragHeader3 hdr, String prefix) {
        if ( path.length() > 0)
           return path+ "/file." + prefix.toLowerCase() + "." +hdr.id +"." + hdr.num_frags ;
        else
            return "file."+ prefix.toLowerCase() + "." +hdr.id +"." + hdr.num_frags ;
    }

    /**
     * Class represents+ an entry for a message. Each entry holds an array of byte arrays sorted
     * once all the byte buffer entries have been filled the fragmentation is considered complete.<br/>
     * All methods are unsynchronized, use getLock() to obtain a lock for concurrent access.
     */
    public class FragEntry {
        // each fragment is a byte buffer
        final Message fragments[];
        //the number of fragments we have received
        int  number_of_frags_recvd=0;
        // id for fragment
        long id;
        private final Lock lock=new ReentrantLock();
        // mic add timeStamp for each FragEntry is created
        private long timeStamp ;
        // hightest number must -1
        private int highest = -1;
        // lowest must -1 for initial state
        private int lowest = -1;
        // list of missing
        //private List<Integer> limboList = new ArrayList<Integer>();
        //private int duplicate = 0;
        // reqResent count
        private int count = 0;
        // check resent lock
        private final Lock reqLock = new ReentrantLock();
        // lastReq to prevent mulitple reqSent by sender inq and ReceiverDeamon
        private long lastReq = 0L;
        // outputStream
        private volatile RandomAccessFile output = null;
        private volatile boolean stream ;
        // file for streaming
        private final Lock fileLock=new ReentrantLock();
        // source streaming file name, arrive in last fragment
        private String filename = null ;
        /**
         *
         * @param header
         */
        public FragEntry(FragHeader3 header) {
            this.timeStamp = System.currentTimeMillis();
            this.id = header.id ;
            stream =  false ;
            fragments=new Message[header.num_frags];
            for(int i=0; i < header.num_frags; i++)
                fragments[i]=null;
        }


        public void checkLock() {
            int i = 0;
            while ( true ) {
                if ( stream ) break;
                else {
                    fileLock.lock();
                    try {
                        if ( stream ) break;
                    } finally {
                        fileLock.unlock();
                        try {
                            i++ ;
                            Thread.sleep(100);
                        } catch (InterruptedException e) {
                        }
                    }
                    if ( i > 20 ) throw new RuntimeException("check lock file, exceed 2 seconds, output is still null");
                }
            }
            log.info("Exit checkLock i "+i);
        }

        public void createFile(FragHeader3 header, String prefix) {
            fileLock.lock();
            try {
                // careate a output Stream
                if ( header.type == FragHeader3.MSG_STREAM) {
                    long len = (long) ( (header.num_frags - 1 ) * frag_size) +
                            (long) (header.remain == frag_size ? frag_size : header.remain) ;
                    try {
                        output = new RandomAccessFile( getFileName(header, prefix), "rw" );
                        output.setLength( len);
                        log.info("Create file "+ getFileName(header, prefix)+" length "+ len);
                        stream = true;
                    } catch ( IOException ex) {
                        // swallow exception
                        log.error( ex.getMessage(), ex);
                    }
                }
            } finally {
                fileLock.unlock();
            }

        }

        public void close() {
            if ( stream && output != null ) {
                try {
                    output.close();
                    log.info("Close file");
                } catch (IOException ex) {
                    log.error( ex.getMessage() );
                } finally {
                    output = null ;
                }
            }
        }
        /** Use to synchronize on FragEntry */
        public void lock() {
            lock.lock();
        }

        public void unlock() {
            lock.unlock();
        }

        /**
         * determine if it is need to request sender to resent
         * @param frag_id
         * @return
         */
        public boolean isNeedResent(int frag_id) {
            if ( frag_id > highest + 1)
                return true;
            else
                return false ;
        }

        /**
         * adds on fragmentation buffer to the message
         * @param hdr the number of the fragment being added 0..(tot_num_of_frags - 1)
         * @param frag the byte buffer containing the data for this fragmentation, should not be null
         */
        public boolean set(FragHeader3 hdr, Message frag) {
            // don't count an already received fragment (should not happen though because the
            // reliable transmission protocol(s) below should weed out duplicates
            if(fragments[hdr.frag_id] == null) {
                // assign a empty message for streaming
                if ( hdr.type == FragHeader3.MSG_STREAM)  fragments[hdr.frag_id]= new Message( false);
                else fragments[hdr.frag_id]=frag;
                number_of_frags_recvd++;
            }
            else {
                // duplicate fragments
                num_duplicated_frags.getAndIncrement();
                //duplicate++;
                return false ;
            }

            if ( isNeedResent( hdr.frag_id )) {
                if (log.isInfoEnabled() ) log.info("Request to reqResent id " + hdr.id +" from "+ (highest + 1) + " to " + (hdr.frag_id -1)
                        + " src " + frag.getSrc().toString());
                reqResent( highest +1 , hdr.frag_id, frag );
            } else {
                if( hdr.frag_id == highest + 1 )  ;  // do nothing
                else if ( hdr.frag_id > lowest ) { // receive packet that might be in retranmitter queue
                    // try to retrive from queue
                    long msgKey = convertHeaderKey( hdr);
                    int n = retransmitter.remove( msgKey);
                    if (log.isInfoEnabled() ) log.info("Cancel request from queue key " + keyToString(msgKey) + " header " + hdr.toString()+" n " +n );
                }
            } // else not require to reqResent
            //  update the highest or lowest
            if ( hdr.frag_id > highest  ) highest = hdr.frag_id ;
            if ( hdr.frag_id == lowest + 1 ) lowest = hdr.frag_id ;
            else ; // do nothing for < lowest or > lowest
            // check streaming
            if ( hdr.type == FragHeader3.MSG_STREAM ) writeToOutput(hdr, frag);
            return true;
        }

        private void setName(Message frag) {
            StreamHeader hdr = (StreamHeader) frag.getHeader(streamhd);
            if ( hdr == null ) log.warn("no stream header in last msg");
            else filename = hdr.getFilename();
        }

        private void writeToOutput(FragHeader3 hdr, Message frag) {
            //if (log.isInfoEnabled() ) log.info("file "+ hdr.toString() );
            //check if anything to write
            fileLock.lock();
            try {
                // need double check
                if( output == null ) {
                    if (log.isInfoEnabled() ) log.info("output is close, concurrent req file "+ hdr.toString() );
                    return ;
                }
                //assign file for first package
                if (  hdr.frag_id + 1 == hdr.num_frags ) setName( frag);
                long pos = frag_size * hdr.frag_id ;
                output.seek( pos);
                output.write( frag.getRawBuffer() );
            } catch (IOException e) {
                log.error( e.getMessage());
                try {
                    output.close();
                } catch (IOException ex) {

                } finally {
                    output = null ;
                }
            } finally {
                fileLock.unlock();
            }

        }
        /** returns true if this fragmentation is complete
         *  ie, all fragmentations have been received for this buffer
         *
         */
        public boolean isComplete() {
            /*first make a simple check*/
            if(number_of_frags_recvd < fragments.length) {
                return false;
            }
            /*then double check just in case*/
            for(int i=0; i < fragments.length; i++) {
                if(fragments[i] == null)
                    return false;
            }
            /*all fragmentations have been received*/
            return true;
        }

        /**
         * Assembles all the fragments into one buffer. Takes all Messages, and combines their buffers into one
         * buffer.
         * This method does not check if the fragmentation is complete (use {@link #isComplete()} to verify
         * before calling this method)
         * @return the complete message in one buffer
         *
         */
        private Message assembleMessage() {
            Message retval;
            byte[]  combined_buffer, tmp;
            int     combined_length=0, length, offset;
            int     index=0;

            for(Message fragment: fragments) {
                combined_length+=fragment.getLength();
            }

            combined_buffer=new byte[combined_length];
            retval=fragments[0].copy(false);

            for(int i=0; i < fragments.length; i++) {
                Message fragment=fragments[i];
                fragments[i]=null; // help garbage collection a bit
                tmp=fragment.getRawBuffer();
                length=fragment.getLength();
                offset=fragment.getOffset();
                System.arraycopy(tmp, offset, combined_buffer, index, length);
                index+=length;
            }

            retval.setBuffer(combined_buffer);
            return retval;
        }

        public String toString() {
            StringBuilder ret=new StringBuilder();
            ret.append("[tot_frags=").append(fragments.length).append(", number_of_frags_recvd=").append(number_of_frags_recvd).append(']');
            return ret.toString();
        }
    }

    /**
     * for sender to keep each message had been sent
     * it will be removed when all receiver ack, isCompleter() return true
     */
    class MsgSent {
        final boolean mulitcast;
        // byte array for message
        final byte[] buffer;
        // header for origal
        final FragHeader3 header ;
        // time stamp
        // default is 0, update timestamp after it is completed
        long timeStamp ;
        // no of ack by receiver
        int totalReceived ;
        // address for reqResent
        final Address dest;
        // counter fro retry
        private int count = 0 ;
        // address of all current receiver ack
        HashSet<Address> receivedSet = new HashSet(memberSize);
        Map<Integer, Long> resentMap = new ConcurrentHashMap<Integer, Long>();
        //InetAddress localIp ;
        final Lock lock = new ReentrantLock();
        // for starting time stamp
        final Lock dataLock = new ReentrantLock();
        final long startTime ;
        // for streaming case
        private RandomAccessFile dataFile = null ;
        // filename with full path
//        private String path = null;

        public MsgSent( byte[] buffer, FragHeader3 header, boolean multicast, Address dest ) {
            this.buffer = buffer ;
            this.header = header ;
            this.mulitcast = multicast;
            this.dest = dest;
            this.timeStamp = 0 ;
            this.startTime = System.currentTimeMillis() ;
            if ( multicast && ! loopback ) {
                this.receivedSet.add( localAddr);
                this.totalReceived = 1 ;
            }
            else this.totalReceived = 0;
        }

        public void addList(Address address) {
            lock.lock();
            try {
                // increament totalReceived only if it is not in hashset, receiver may send more than once
                if (receivedSet.add( address) )
                    totalReceived++;
            } finally {
                lock.unlock();
            }
        }

        public void creatFile(File file) {
            try {
                dataFile = new RandomAccessFile(file, "r");
            } catch (IOException e) {
                throw new RuntimeException(e.getMessage(), e);
            }
        }

        public void close() {
            if ( dataFile != null ) {
                dataLock.lock();
                try {
                    dataFile.close();
                } catch (IOException ex) {
                    log.error( ex.getMessage());
                } finally {
                    dataLock.unlock();
                }
            }
        }

        public long getSize() {
            if ( header.type == FragHeader3.MSG_STREAM) {
                // make ( ?  : )"
                return (long) (frag_size * (header.num_frags -1)) +
                        (long) ( header.remain == frag_size ? frag_size : header.remain);
            }
            else return (long) buffer.length ;
        }

        private byte[] getDatas(int id) {
            long pos = (long) id * frag_size;
            dataLock.lock();
            try {
                dataFile.seek( pos);
                int size = (id + 1 == header.num_frags ? header.remain : frag_size ) ;
                byte[] datas = new byte [ size];
                //log.info("resentFrag id "+id +" pos "+pos +" size "+datas.length );
                int len = dataFile.read( datas);
                if ( len != datas.length ) {
                    log.error("getDatas len "+len +" expect "+datas.length);
                    return null;
                }
                else
                    return datas;
            } catch (IOException e) {
                log.error( e.getMessage(), e);
                return null ;
            } finally {
                dataLock.unlock();
            }

        }

        /**
         * get byte[] from source for resent
         * @param id
         * @return  byte array of id fragment
         */
        private byte[] getBytes(int id ) {
            if ( header.type == FragHeader3.MSG_STREAM) return getDatas( id);
            // check last block
            else {
                if ( header.num_frags < id + 1  ) {
                    return null;
                }
                else if ( id + 1 ==  header.num_frags ) {
                    byte[] bytes = new byte[ header.remain ];
                    System.arraycopy( buffer, frag_size * id , bytes , 0, header.remain );
                    return bytes;
                }
                else {
                    byte[] bytes = new byte[ frag_size ];
                    System.arraycopy( buffer, frag_size * id , bytes , 0, frag_size );
                    return bytes;
                }
            }
        }

        /*
            check if we receive all acknowledge from receivers
         */
        public boolean isComplete() {
            if ( ! mulitcast )
                return true;
            if ( totalReceived <  memberSize )
                return false ;
            else
                return true;
            // skip this to get it faster
//            for ( Address each : members ) {
//                if ( ! receivedSet.contains( each ) ) // check ipAddress
//                    if ( ! each.equals(localAddr ) )
//                        return false;
//            }
        }

        public boolean isUniCast() {
            if ( mulitcast && totalReceived > 1 && memberSize - totalReceived < ceiling ) return true;
            else return false;
        }

        @Override
        public String toString() {
            return header.toString() + " recv " + totalReceived + " count " + count + " member size "+ memberSize;
        }

    }

    /**
     *
      */
    class SenderBitMap {
        // lowestId of complete received message
        long lowestId = -1 ;
        // the firstId when SenderBip was created
        long firstId  ;
        // highestId of received message
        long highestId = -1 ;
        // lowest id between retransmit range
        long lowRecvdId ;
        // highest id between retransmit range
        long highRecvdId ;

        // id which grater than lowestId, parking in queue
        List<Long> idQueue ;
        // timeStamp for lowestId for receiver
        ConcurrentMap<Long, Long> bitMap;
        // for SenderBitMap
        final ReadWriteLock indexLock =new ReentrantReadWriteLock();
        // for lastRecvdId and lastRecvd only, to reduce lock contention
        final Lock recvdLock = new ReentrantLock();
        public SenderBitMap(int size, long id) {
            bitMap = new ConcurrentHashMap( size);
            firstId = id ;
            lowRecvdId = id;
            highRecvdId = id;
            lowestId = findLowestId( id);
            // set up idQueue
            idQueue = new CopyOnWriteArrayList<Long>();
        }

        private long makeBits(int r) {
            long bits = 0;
            for (int i= 0 ; i < r ;  i++ ) {
                bits = bits | ( 1L <<  i );
            }
            return bits;
        }

        /**
         * lowest id must be one level below id, exapmale id = 65 low = 0
         * it is different than find current key
         * @param id for consrtuct SenderBitMap
         * @return lowestId for check
         */
        private long findLowestId(long id) {
            int r = (int) (id % BITS) ;
            long bits = 0;
            // all bits before r
            if ( r > 0 ) bits = makeBits(r)  ;
            //default value for low and key
            long low = -1 ;
            long key = 0;
            if ( id < BITS ) bitMap.put( key, bits);
            else {
                // must be one level below , means substract extra BITS
                low = id - r - BITS;
                key = id - r ;
                bitMap.put( key, bits );
            }
            if( log.isInfoEnabled() ) log.info("Assign lowestId " + low +" for id " + id +" bits " + bits+" key "+key);
            return low ;
        }

        void checkIdQueue() {
            if ( idQueue.size() == 0 ) return;
            //Collections.sort( idQueue);
            if ( log.isInfoEnabled()) log.info("IdQueue size " + idQueue.size());
            while ( true ) {
                if ( idQueue.size() == 0 ) return;
                else {
                    Long next = findNext();
                    if ( next == null ) return;
                    else {
                        if ( log.isInfoEnabled()) log.info("IdQueue remove from bitMap "+ next);
                        bitMap.remove( next);
                        lowestId = next ;
                        idQueue.remove( next ) ;
                    }
                }
            }
        } // checkIdQueue

        private Long findNext() {
            for( Long each : idQueue ) {
                if ( each - lowestId > 0 && each - lowestId <= BITS ) {
                    return each ;
                }
                else if ( each <= lowestId ) {
                    idQueue.remove( each ) ;
                    if ( log.isInfoEnabled()) log.info("IdQueue removed "+ each );
                }
            }
            return null;
        }

        @Override
        public String toString() {
            return " lowestId " +lowestId+" highestId " +highestId  + " firstId " + firstId ;
        }
    }


    public int getBandWidth() { return bandWidth; }
    public void setBandWidth(int bandWidth) { this.bandWidth = bandWidth;  }

//    public int getFrag_size() { return frag_size; }
//    public void setFrag_size(int frag_size) {  this.frag_size = frag_size;  }

    /**
     * daemon thread to check sender list and release memory for timeout,
     * house cleaning work
     */
    class SenderDaemon extends TimerTask {

        public void run() {
            Collection<MsgSent> msgSents = datasMap.values();
            if (datasMap.size() > 0)
                if(log.isInfoEnabled())  log.info("Check for sender daemon datasMap size "+ datasMap.size());
            // go through each msgSent, examine the size and time stamp
            for ( MsgSent each : msgSents) {
                each.lock.lock();
                try {
                    // send has not completed yet
                    if ( each.timeStamp == 0 ) continue ;
                    long elapse = System.currentTimeMillis() - each.timeStamp ;
                    long no = ( each.header.num_frags / fragBW ) + 1 ;
                    if ( memberSize == 1 ) {
                        if(log.isInfoEnabled()) log.info("Remove from datasMap header "+ each.header.id +" memberSize = 1");
                        datasMap.remove( each.header.id );
                    }
                    else {
                        // sender had one addtional retry
                        if ( elapse >  (no + 2 )  * timeOut && each.count > retry + 2  ) {
                            log.warn("Remove from datasMap header " + each.header.toString() +" for thresh hold " +elapse);
                            datasMap.remove( each.header.id );
                        }
                        else {
                            if ( elapse >= no * timeOut ) {
                                // send inquiry message
                                if ( log.isInfoEnabled()) log.info("Send inq " + each.header.toString() +" no "+ no +" elaspe "+ elapse );
                                FragHeader3 hdr = new FragHeader3( each.header.id, each.header.frag_id, each.header.num_frags,
                                    FragHeader3.XMIT_INQ );
                                each.count++;
                                // for multicast, it is below ceiling we will send unicast for remianing receivers
                                if ( each.mulitcast && each.totalReceived > 0 && memberSize - each.totalReceived < ceiling )
                                    sendUnicastInq( each, hdr);
                                else
                                    sendInq( hdr, each.dest );
                            }
                            else if ( log.isInfoEnabled()) log.info("MsgSent "+ each.toString() +" elasps " +elapse +" trigger " + (no * timeOut) );
                        }
                    }
                } catch (Exception ex) {
                    log.error( ex.getMessage(), ex);
                } finally {
                    each.lock.unlock();
                }
            }
        }
    }

    private void sendInq(FragHeader3 hdr, Address dest)  {
        Message msg = new Message( dest, localAddr , new byte[]{} );
        msg.putHeader( name, hdr);
        if ( log.isInfoEnabled()) log.info("Send inquery "+ hdr.toString() +" src " + localAddr.toString() );
        down_prot.down( new Event( Event.MSG , msg));
    }

    private void sendUnicastInq(MsgSent msgSent, FragHeader3 hdr) {
        List<Address> list = findInqList( msgSent.receivedSet);
        if ( log.isInfoEnabled()) log.info("Send unicast inquery "+ hdr.toString()+" size "+ list.toString() );
        for ( Address each : list) {
            Message msg = new Message( each, localAddr , new byte[]{} );
            msg.putHeader( name, hdr);
            down_prot.down( new Event( Event.MSG , msg));
        }
    }

    private List<Address> findInqList(HashSet<Address> set) {
        List<Address> list = new ArrayList<Address>();
        for ( Address each : members) {
            if ( !set.contains( each))
                list.add( each);
        }
        return list;
    }

    private boolean isInBitMap(long id, SenderBitMap senderMap) {
        if ( senderMap == null ) return false;
        long key = id - ( id % BITS);
        if ( senderMap.bitMap.containsKey( key) ) return true;
        else return false;
    }

    private void reqResentSingle(Address dest, long id, int frag_id) {
        Message msg = new Message( dest, localAddr, 1);
        FragHeader3 header = new FragHeader3(id, frag_id, 1, FragHeader3.XMIT_REQ );
        msg.putHeader( name, header);
        if ( log.isInfoEnabled()) log.info("reqResentSingle "+ header.toString() );
        down_prot.down(new Event(Event.MSG, msg));
    }

    private void setNextLowId(SenderBitMap senderMap) {
        long curLowId = findSmallest( senderMap.idQueue );
        // do nothing is it is 0
        if ( curLowId == 0 ) return;
        senderMap.indexLock.writeLock().lock();
        try {
            if ( curLowId > senderMap.lowestId ) {
                if ( log.isInfoEnabled() ) log.info("setNextLowId " + curLowId + " senderMap "+ senderMap.toString());
                senderMap.lowestId = curLowId ;
                senderMap.idQueue.remove( curLowId);
                senderMap.checkIdQueue();
            }
            else return ;
        } finally {
            senderMap.indexLock.writeLock().unlock();

        }
    }

    private long findSmallest(List<Long> list) {
        if ( list.size() == 0 ) return 0;
        long cur = Long.MAX_VALUE ;
        for ( Long each : list) {
            if ( each <  cur) cur = each;
        }
        if ( cur == Long.MAX_VALUE ) return 0;
        else return cur;
    }

    /**
     * daemon thread to check recever list, release memory for timeout
     * house cleaning
     */
    class ReceiverDaemon extends TimerTask {
        // make sure the delay time is sufficient
        private long delayTime = period ;

        private boolean isFirstBlock(long key, long first) {
            if ( first >= key  && first - key < BITS )
                return true;
            else
                return false;
        }

        private boolean isLastBlock(long key, long last) {
            if ( last >= key && last - key < BITS )
                return true;
            else
                return false;
        }

        private Long[] findOpenList(long bitMap, long key, SenderBitMap senderMap) {
            List<Long> list = new ArrayList<Long>();
            if ( isFirstBlock(key, senderMap.firstId ) && isLastBlock( key, senderMap.highestId )) {
                for ( short i = (short) (senderMap.firstId - key)  ; i < senderMap.highestId - key ; i++ ) {
                    if ( ! isBitOn( bitMap, i)) list.add( key + i);
                }
                return convertList(list);
            }
            else if ( isFirstBlock(key, senderMap.firstId )) {
                for ( short i = (short) (senderMap.firstId - key)  ; i < BITS ; i++ ) {
                    if ( ! isBitOn( bitMap, i)) list.add( key + i);
                }
                return convertList(list);
            }
            else if (isLastBlock( key, senderMap.highestId )) {
                for ( short i = 0 ; i < senderMap.highestId - key ; i++ ) {
                    if ( ! isBitOn( bitMap, i)) list.add( key + i);
                }
                return convertList(list);
            }
            else {
                // compare is it all on
                if (  bitMap ==  0xFFFFFFFFFFFFFFFFL ) return convertList(list);
                for ( short i = 0 ; i < BITS ; i++ ) {
                    if ( ! isBitOn( bitMap, i)) {
                        list.add( key + i);
                    }
                }
                return convertList(list);
            }
        }


        private Long[] convertList(List<Long> list) {
            Long[] toReturn = new Long[ list.size()];
            return list.toArray( toReturn);
        }

        // for take care single fragment message  it is single thread
        ConcurrentMap<Address, ConcurrentMap> addressMap = new ConcurrentHashMap<Address, ConcurrentMap>();

        /** backup thread
         *  go through msgReceiveMap
         */
        public void run() {
            long begin = System.currentTimeMillis();
            Set<Address> addressKeys = msgReceivedMap.keySet();

            for ( Address each : addressKeys ) {
                // map which contain all reqResent single fragment thread
                ConcurrentMap<Long, Short> map;
                // check for map of each address
                if ( ! addressMap.containsKey(each ))  {
                    map = new ConcurrentHashMap<Long, Short>();
                    addressMap.put( each, map);
                }
                else map = addressMap.get( each);

                // go through each sender address
                try {
                    SenderBitMap senderMap = msgReceivedMap.get(each);
                    ConcurrentMap<Long, FragEntry> fragMap = fragment_list.get(each );
                    Set<Long> keys = senderMap.bitMap.keySet();
                    if(senderMap.idQueue.size() > 20 ) {
                        log.info("key size "+ keys.size()+" queue size " + senderMap.idQueue.size() +" for "+ each.toString());
                        setNextLowId( senderMap);
                    }
                    for ( Long key :  keys ) {
                        //check highestId first, so we will not collide with current message
                        if ( key > senderMap.highestId ) continue ;
                        Long[] openList ;
                        long bitMaps ;
                        senderMap.indexLock.readLock().lock();
                        try {
                            // it had been removed, due to the concurrency
                            if ( ! senderMap.bitMap.containsKey( key) ) continue;
                            bitMaps = senderMap.bitMap.get(key);
                            openList = findOpenList( bitMaps, key, senderMap);
                        } finally {
                            senderMap.indexLock.readLock().unlock();
                        }
                        if(openList.length > 0 && log.isInfoEnabled()) log.info("OpenList " + openList.length + " senderMap "+
                                senderMap.toString()+" key " + keyToString(key) );
                        for ( Long id : openList ) {
                            // check if it had been updated for concurrency
                            if ( isInBitMap( id, senderMap) && ! isBitOn( id, senderMap) ) ;
                            else {
//                                if ( log.isInfoEnabled() ) log.info("id " + id + " bitmap is on skip");
                                continue;
                            }
                            // fix null pointer for entry is null
                            FragEntry entry ;
                            if ( fragMap == null ) entry = null;
                            else entry = fragMap.get( id);
                            // single fragment
                            if ( entry == null )  {
                                // single fragment message, create it if it is not in the map
                                if ( ! map.containsKey(id) )  map.put(id , (short) 0);
                                short times = map.get( id);
                                if ( times <= retry ) {
                                    times ++ ;
                                    if ( log.isInfoEnabled()) log.info("Resent id "+ id+" senderMap "+ senderMap.toString()+" times "+times );
                                    reqResentSingle(each, id, 0);
                                    map.put( id, times) ;
                                }
                                else {  //over retry count
                                    if (isInBitMap(id, senderMap) && ! isBitOn( id, senderMap)) updateBits(id, senderMap);
                                    if ( log.isInfoEnabled()) log.info("Remove id "+ id +" "+ senderMap.toString()+" times "+times );
                                    map.remove(id);
                                }
                            }
                            else { //multiple fragments
                                long elapse = System.currentTimeMillis() - entry.timeStamp ;
                                long no = ( entry.fragments.length / fragBW ) + 1 ;
                                // check timeStamp and counter
                                if ( elapse >= ( no * timeOut * 2) && entry.count > retry ) {
                                    log.warn("Remove id " + id +" src "  + each.toString() +" for threash hold " +
                                    " total recvd " + entry.number_of_frags_recvd + " expected " + entry.fragments.length );
                                    // remove data from fragMap
                                    fragMap.remove( id);
                                    // turn bit on as it was received
                                    if (isInBitMap(id, senderMap) && ! isBitOn(id, senderMap)) updateBits(id, senderMap);
                                }
                                else {
                                    if ( elapse > no * timeOut && entry.number_of_frags_recvd <  entry.fragments.length ) {
                                        entry.reqLock.lock();
                                        try {
                                            if ( System.currentTimeMillis() - entry.lastReq > reqTime ) {
                                                if( log.isInfoEnabled() ) log.info("reqResent no of recvd " + entry.number_of_frags_recvd +
                                                " size " + entry.fragments.length + " id " + id +" src "  + each.toString());
                                                // update last req
                                                entry.lastReq = System.currentTimeMillis();
                                                for ( int i = 0 ; i < entry.fragments.length ; i ++) {
                                                    if ( entry.fragments[i] == null)
                                                        reqResentSingle(each, id, i);
                                                }
                                                entry.count ++;
                                            }
                                        } finally {
                                            entry.reqLock.unlock();
                                        }
                                    }
                                }
                            } // else entry != null
                        } // for openList
                    } // for keys
                } catch (Exception ex) {
                    // swallow exception
                    log.error( ex.getMessage(), ex);
                }
            } // each
            cleanUp();
            long total = System.currentTimeMillis() - begin ;
            long toDelay ;
            if ( total >= delayTime )
                toDelay = delayTime ;
            else
                toDelay = delayTime - total ;
            // make sure we had enough delay time between each iteration
            try {
                Thread.sleep( toDelay);
            }  catch (InterruptedException ex)  {
                //swallow exception
                log.error( ex.getMessage(), ex);
            }
        }   // end of run


        // clean up those single fragment that had been received
        private void cleanUp() {
            Set<Address> addressKeys = addressMap.keySet();
            for ( Address each : addressKeys ) {
                try {
                    Map<Long, Short> map = addressMap.get( each);
                    Set<Long> ids = map.keySet();
                    for ( Long id : ids) {
                        //short times = map.get( id);
                        SenderBitMap senderMap = msgReceivedMap.get( each);
                        // note !!! must check isInBitMap first, other it might create a slot
                        if (isInBitMap(id, senderMap) && isBitOn( id, senderMap))  {
                            if ( log.isInfoEnabled()) log.info("Clean up id " + id + " had been recvd " + each.toString() );
                            map.remove( id);
                        }
                    }
                } catch (Exception ex) {
                    // swallow exception
                    log.error( ex.getMessage(), ex);
                }
            }

        }
    }
}
