/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.datanode;

import java.io.BufferedInputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.HashSet;
import java.util.TimerTask;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Inet4Address;
import java.net.SocketException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Arrays;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.ChunkQueueSink;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.SocketOutputStream;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;

/**
 * Original class, which actually conducted the reading. Under the modified prototype, this
 * class is now responsible for registering that a block be read, receiving the data from the 
 * actual BlockReader and sending the data back to the end recipient.
 * 
 */
class BlockSender implements java.io.Closeable, FSConstants {

	
	/**
	 * 
	 */
	//private static Timer CYCLICAL_BLOCK_LIST_TIMER = new Timer("CYCLICAL_BLOCK_LIST_TIMER");
	private static Map<Long,ReaderType> BLOCKS_TO_USE_MODIFIED_BLOCKSENDER = new HashMap<Long,ReaderType>();
	private OriginalBlockSender nonStreamingBlockSender = null;
	private StreamingBlockSender streamingBlockSender = null;
	
	public static void AMEND_BLOCKS_TO_USE_CYCLICAL_SCANNING(Map<Long,ReaderType> newBlockSet) {
		synchronized (BLOCKS_TO_USE_MODIFIED_BLOCKSENDER) {
			if (!newBlockSet.equals(BLOCKS_TO_USE_MODIFIED_BLOCKSENDER)) {
				
				BLOCKS_TO_USE_MODIFIED_BLOCKSENDER = newBlockSet;
				if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(BlockSender.class) + " BlockList amended"); }
				
				System.out.println("BlockList Amended:");
				for (Long blockID : BLOCKS_TO_USE_MODIFIED_BLOCKSENDER.keySet()) {
					System.out.println("Block ID: " + blockID + " - " + BLOCKS_TO_USE_MODIFIED_BLOCKSENDER.get(blockID));
				}
			}
		}
		
		
	}
	
	static {
		
		TimerTask reloadBlockListTimerTask = new ReloadBlockList();
		reloadBlockListTimerTask.run(); // Be sure to run immediately (cannot guarantee this would complete before class instantiated otherwise
		//CYCLICAL_BLOCK_LIST_TIMER.schedule(reloadBlockListTimerTask, PrototypeUtilities.CYCLICAL_BLOCK_LIST_REFRESH_PERIOD, PrototypeUtilities.CYCLICAL_BLOCK_LIST_REFRESH_PERIOD);
		if (PrototypeUtilities.DETAILED_DEBUG_MODE) { System.out.println("Header size: " + BlockMetadataHeader.getHeaderSize()); }
	}
	
	
	private static boolean debugMode = PrototypeUtilities.DETAILED_DEBUG_MODE;
	
	private Block block;
	private boolean chunkOffsetOK;
	private boolean corruptChecksumOK;
	private boolean verifyChecksum;
	private boolean closed = false;
	
	
	private long blockLength; // Length of the block
	private long offset; // Starting position to read from
	private long endOffset; // End position to read to
	private long totalRead; // Total bytes read
	private int maxChunksPerPacket; 
	private int packetSize; 
	
	private ChunkHandler chunkHandler;
	private BlockReadingProgress blockReadingProgress;
	
	private boolean transferToAllowed = true;
	//private OutputStream outputStreamToSendChunksTo;
	private boolean finished = false;
	//private ChunkQueueSink queueSink;
	
	/**
	 * Below fields are used to keep track of the BlockReaders that are used to actually read the blocks.
	 * The supporting functionality of each BlockSender having an ID is also used to help as an ID based
	 * solely on the block number will not be unique since many BlockSenders work with the same block.
	 */
	private int blockSenderID;
	private static int CURRENT_BLOCK_SENDER_ID = 0;
	private static Map<Long,BlockReader> BLOCK_READERS = new HashMap<Long,BlockReader>();
	private BlockReader blockReaderForRequiredBlock = null;
	

	/**
   	* Minimum buffer used while sending data to clients. Used only if
   	* transferTo() is enabled. 64KB is not that large. It could be larger, but
   	* not sure if there will be much more improvement.
   	*/
	
	public int getSenderID() {
		return blockSenderID;
	}
	
	private static final int MIN_BUFFER_WITH_TRANSFERTO = 64*1024;
	
  
	BlockSender(Block block, long startOffset, long length, boolean corruptChecksumOk, boolean chunkOffsetOK, boolean verifyChecksum, DataNode datanode) throws IOException {
	  
		this(block, startOffset, length, corruptChecksumOk, chunkOffsetOK, verifyChecksum, datanode, null);
	
		if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Finished Constructor 1"); }
	  
	}

	BlockSender(Block block, long startOffset, long length, boolean corruptChecksumOk, boolean chunkOffsetOK, boolean verifyChecksum, DataNode datanode, String clientTraceFmt) throws IOException {
	  
		if (PrototypeUtilities.ANNOUNCE_METHOD_NAME) { 
			System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " BlockSender constructor"); 
			System.out.println("Block: " + block.getBlockId() + " StartOffset: " + startOffset + " Length: "+ length + " corruptCheckOK: " + corruptChecksumOk + " chunkoffsetOK: " + chunkOffsetOK + " VerifyChksum: "+ verifyChecksum);
		}
		
		synchronized (BlockSender.class) {
			this.blockSenderID = CURRENT_BLOCK_SENDER_ID;
			CURRENT_BLOCK_SENDER_ID++;
		}
		
		if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Started Constructor 2: (TransferTo allowed: " + datanode.transferToAllowed); }
		
		if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Determining whether to use modified BlockSender or original one"); }
		
		synchronized (BLOCKS_TO_USE_MODIFIED_BLOCKSENDER) {
			if (!BLOCKS_TO_USE_MODIFIED_BLOCKSENDER.containsKey(Long.valueOf(block.getBlockId()))) {
				System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Using original streaming BlockSender (block " + block.getBlockId() + " not on list)"); 
				streamingBlockSender = new StreamingBlockSender(block, startOffset, length, corruptChecksumOk, chunkOffsetOK, verifyChecksum, datanode);
				return;
			}
			else {
				if (BLOCKS_TO_USE_MODIFIED_BLOCKSENDER.get(Long.valueOf(block.getBlockId())).equals(ReaderType.ORIGINAL_NON_STREAMED)) {
					System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Using original NON-streaming BlockSender (block " + block.getBlockId() + "  on list)");
					nonStreamingBlockSender = new OriginalBlockSender(block, startOffset, length, corruptChecksumOk, chunkOffsetOK, verifyChecksum, datanode);
					return;
				}
				else if (BLOCKS_TO_USE_MODIFIED_BLOCKSENDER.get(Long.valueOf(block.getBlockId())).equals(ReaderType.ORIGINAL_STREAMED)) {
					streamingBlockSender = new StreamingBlockSender(block, startOffset, length, corruptChecksumOk, chunkOffsetOK, verifyChecksum, datanode);
					System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Using original streaming BlockSender (block " + block.getBlockId() + "  on list)");
					return;
				}
				else {
					System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Using Prototype2 BlockSender (block " + block.getBlockId() + " IS on list)");
				}
			}
		}
		
		if (PrototypeUtilities.ANNOUNCE_METHOD_NAME) {
			if (streamingBlockSender != null) { 
				System.out.println("Using ORIGINAL blockSender methods - STREAMING");
			}
			else if (nonStreamingBlockSender != null) {
				System.out.println("Using ORIGINAL NON-STREAMING methods");
			}
			else {
				System.out.println("Using PROTOTYPE blockSender methods");
			}
		}
	  
		
		this.block = block;
		this.chunkOffsetOK = chunkOffsetOK;
		this.corruptChecksumOK = corruptChecksumOk;
		this.blockLength = datanode.data.getLength(block);
		this.transferToAllowed = datanode.transferToAllowed;
		this.verifyChecksum = verifyChecksum;
		
		if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Verify Checksum: " + this.verifyChecksum + " CorruptChksum OK : " + this.corruptChecksumOK + " ChunkOffsetOK" + this.chunkOffsetOK); }
	  
		/**
		 * Code section below is to ensure that there is a valid BlockReader able to
		 * do the actual reading from the block (and pass data back to the BlockSender to 
		 * actually send.
		 */
		
		synchronized (BLOCK_READERS) {
			
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + "Got BLOCK_READERS lock A"); }

			this.blockReaderForRequiredBlock = BLOCK_READERS.get(block.getBlockId());
			
			if (blockReaderForRequiredBlock == null) {
				if (PrototypeUtilities.SUMMARY_DEBUG_MODE) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + "Creating new BlockReader for Block " + block.getBlockId()); }
				blockReaderForRequiredBlock = new BlockReader(block, datanode, this.corruptChecksumOK);
				blockReaderForRequiredBlock.start();
				BLOCK_READERS.put(block.getBlockId(), blockReaderForRequiredBlock);
			}
			else {
				if (PrototypeUtilities.SUMMARY_DEBUG_MODE) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + "Using existing BlockReader for Block " + block.getBlockId()); }
			}
			
			if (PrototypeUtilities.SUMMARY_DEBUG_MODE) {
				for (Long blockId : BLOCK_READERS.keySet()) {
					System.out.println("BlockID in BLOCKREADERS: " + blockId);
				}
			}
			// Registering does not mean that data will start coming, it just stops the BlockSender from being destroyed
			this.blockReaderForRequiredBlock.register(this);
			
			if (PrototypeUtilities.SUMMARY_DEBUG_MODE) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + "Registered with BlockReader (my ID is " + this.getSenderID() + " Block ID: " + block.getBlockId()); }
			
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + "Releasing BLOCK_READERS lock A"); }
		}
	  
		
		/**
		 * The following code section is to adjust the offsets of the block that have been requested to
		 * ensure that they are valid (i.e. no request to read an offset beyond the length of the block.
		 */
		
		// If a negative length has been requested, then assume that entire block is to be read
	    if (length < 0) {
	    	length = blockLength;
	    }
	    
	    endOffset = blockLength; // Assume all of block is to be read until determined otherwise
	    // Is this right? Surely its a bug (but its in the original code, so who am I to question it?
	    
  	  	// Check if the bounds of what has been asked to be read is within the available range
  	  	if (startOffset < 0 || startOffset > endOffset || (length + startOffset) > endOffset) {
  	  		String msg = " Offset " + startOffset + " and length " + length + " don't match block " + block + " ( blockLen " + endOffset + " )";
  	  		if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + msg); }
  	  		throw new IOException(msg);
  	  	}
  	  	
  	  	// Ensure start offset points to the start of a check-summed chunk
  	  	offset = (startOffset - (startOffset % blockReaderForRequiredBlock.getBytesPerCheckSum()));
  	  	
  	  	// Now ensure that the end offset points to the end of a check-summed chunk
  	  	if (length >= 0) {
  	  		long tmpLen = startOffset + length;
  	  		if (tmpLen % blockReaderForRequiredBlock.getBytesPerCheckSum() != 0) {
  	  			tmpLen += (blockReaderForRequiredBlock.getBytesPerCheckSum() - tmpLen % blockReaderForRequiredBlock.getBytesPerCheckSum());
  	  		}
  	  		if (tmpLen < endOffset) {
  	  			endOffset = tmpLen;
  	  		}
  	  	}
  	  	
  	  	// Setup BlockReadingProgress to keep track of how much work has been done
  	  	if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Done adjusting offsets: Start Offset: " + offset); }
  	  	if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " EndOffset: " + endOffset + " BlockLength: " + blockLength); }
  	  	
  	  	
 	  	try {
 	  		if (debugMode) { 
 	  			System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Creating blockReadingProgress"); 
 	  		}
  	  		blockReadingProgress = new BlockReadingProgress(offset, endOffset - 1, blockLength, blockReaderForRequiredBlock.getBytesPerCheckSum());
 	  		if (debugMode) { 
 	  			System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " BlockReadingProgress for BlockSender: " + this.blockSenderID); 
 	  			System.out.println(blockReadingProgress);
 	  		}
  	  	}

 	  	
 	  	
  	  	catch (Exception ex) {
  	  		ex.printStackTrace();
  	  		throw new IOException("Unable to create BlockReadingProgress object: " + ex.getMessage());
  	  	}
  	  	
  
		if (debugMode) {
			System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Finished Constructor 2");
		}
		
		// Can blockLength be taken away and accessed via blockReaderForRequiredBlock?
		

  }

	/**
	* close opened files.
   	*/
	public void close() throws IOException {
	  
		if (PrototypeUtilities.ANNOUNCE_METHOD_NAME) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Close requested: " + this.blockSenderID);
		
		if (streamingBlockSender != null) {
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Requesting close() from STREAMING originalBlockSender"); }
			streamingBlockSender.close();
			return;
	  	}
		else if (nonStreamingBlockSender != null) {
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Requesting close() from NON STREAMING originalBlockSender"); }
			nonStreamingBlockSender.close();
			return;
		}
	  	else {
	  		if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Using modified blockSender for close() method"); }
		}

		
		
		
		
	  	// Ensure that the block reader no longer reads in data on this sender's behalf.
		synchronized (BLOCK_READERS) {
			
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + "Got BLOCK_READERS lock B"); }
			
			if (!closed) {
		  		if (this.blockReaderForRequiredBlock != null) {
		  			if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Calling de-registration method...");
			  		boolean canDeferenceBlockReader = blockReaderForRequiredBlock.deregister(this);
		  			if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Returned from de-registration method...");
			  		if (canDeferenceBlockReader) {
			  			if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Deferencing BlockReader");
			  			BLOCK_READERS.remove(this.block.getBlockId());
			  		}
			  	}
		  		this.closed = true;
			}
			
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + "Releasing BLOCK_READERS lock B"); }
	  	}
		
		//this.queueSink.stopRunning();
		
		if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Finished close request method");
	}


  
  

  /**
   * sendBlock() is used to read block and its metadata and stream the data to
   * either a client or to another datanode. 
   * 
   * @param out  stream to which the block is written to
   * @param baseStream optional. if non-null, <code>out</code> is assumed to 
   *        be a wrapper over this stream. This enables optimizations for
   *        sending the data, e.g. 
   *        {@link SocketOutputStream#transferToFully(FileChannel, 
   *        long, int)}.
   * @param throttler for sending data.
   * @return total bytes reads, including crc.
   */
  long sendBlock(DataOutputStream out, OutputStream baseStream, BlockTransferThrottler throttler) throws IOException {
	  
	  if (PrototypeUtilities.ANNOUNCE_METHOD_NAME) {
		  System.out.println("Starting sendBlock method");
	  }
	  
	  try {
	  
		  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " SendBlock requested " + this.getSenderID());
		  
	
		  
		  if (streamingBlockSender != null) {
			  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Requesting sendBlock from STREAMING originalBlockSender");
			  return streamingBlockSender.sendBlock(out, baseStream, throttler);
		  }
		  else if (nonStreamingBlockSender != null) {
			  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Requesting sendBlock from NON-STREAMING originalBlockSender");
			  return nonStreamingBlockSender.sendBlock(out, baseStream, throttler);
		  }
		  else {
			  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Using modified blockSender for sendBlock method");
		  }
	
	
		  if (out == null) {
			  IOException ioException = new IOException("out stream is null");
			  ioException.printStackTrace();
			  throw ioException;
		  }
		  
		  
		  
		  this.totalRead = 0;
		  //this.outputStreamToSendChunksTo = out;
		  
		  // Write the checksum header to the output stream, along with the initial offset of the block
		  try {
			  blockReaderForRequiredBlock.getChecksum().writeHeader(out);
			  if (chunkOffsetOK) {
				  out.writeLong(this.blockReadingProgress.getRequestedStartOffset());
			  }
			  out.flush();
		  }
		  catch (IOException ex) {
			  ex.printStackTrace();
			  throw PrototypeUtilities.ioeToSocketException(ex);
		  }
		  
		  // Now determine the packet size and the maximum number of chunks per packet
		  
		  packetSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER;
	
		  //if (transferToAllowed && !verifyChecksum) {
		//	  maxChunksPerPacket = (Math.max(BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + blockReaderForRequiredBlock.getBytesPerCheckSum() - 1) / blockReaderForRequiredBlock.getBytesPerCheckSum();
		//	  packetSize += blockReaderForRequiredBlock.getChecksumSize() * maxChunksPerPacket;
		//  }
		//  else {
			  maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + blockReaderForRequiredBlock.getBytesPerCheckSum() -1) / blockReaderForRequiredBlock.getBytesPerCheckSum());
			  packetSize = packetSize + (blockReaderForRequiredBlock.getBytesPerCheckSum() + blockReaderForRequiredBlock.getChecksumSize()) * maxChunksPerPacket;
		//  }
	
		  
		  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " PKT Size: " + packetSize + " Max chunks per Pkt: " + maxChunksPerPacket);
		  
		  // Now this BlockSender is ready to receive data from the block reader, it is time to activate it. 
		  this.chunkHandler = new ChunkHandler(offset, endOffset - 1, out, packetSize, maxChunksPerPacket, blockReaderForRequiredBlock.getChecksumSize(), blockReaderForRequiredBlock.getBytesPerCheckSum(), throttler, this.corruptChecksumOK, this.blockSenderID);
		  chunkHandler.start();
		  LatchedArrayBlockingQueue<Chunk> chunkQueue = blockReaderForRequiredBlock.activate(this);
		  
		  while (!isAllRequiredDataReadFully()) {
			  try {
				  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Attempting to take chunk from queue.");
				  // TODO Does there need to be a check that the chunk start-end offsets are within the required range?????
				  handleChunk(chunkQueue.take());
				  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Taken chunk from queue");
			  } 
			  catch (InterruptedException e) {
				  e.printStackTrace();
			  }
		  }
		  
		  if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Taken all chunks from queue. Switching latch."); }
		  chunkQueue.setStopTakingData(true);
		  
		  
		  
		  if (debugMode) {
			  System.out.println("Getting all data written lock");
		  }
		  totalRead = chunkHandler.waitUntilAllDataIsWritten();
		  
		  if (debugMode) {
			  System.out.println("Got all data written lock");
		  }
		  
		  
		  try {
			  if (debugMode) {
				  System.out.println("Writing end of block");
			  }
			  out.writeInt(0); // Marks end of block
			  out.flush();
		  }
		  catch (IOException ex) {
			  ex.printStackTrace();
			  throw PrototypeUtilities.ioeToSocketException(ex);
		  }
		
		  // Check if block reader is still needed, and if not remove it from collection
		  if (isAllRequiredDataReadFully()) {
			  this.close();
		  }
		  
		  
		  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Exiting sendData method");

		  
		  if (debugMode) {
			  System.out.println("Finished sendData method properly: " + totalRead);
		  }
		  return totalRead;
	  }
	  catch (IOException ex) {
		  ex.printStackTrace();
		  throw ex;
	  }
	  finally {
		  if (debugMode) {
			  System.out.println("Finished sendData method with Finally");
		  }
	  }
		  
    
  }
  
  private void handleChunk(Chunk chunk) {
	  
	  if (debugMode) {
		  System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Handling chunk: offset: " + chunk.getStartOffset() + " :: " + chunk.getEndOffset());
		  System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " BlockReadingProgress before chunk handled: ");
		  System.out.println(blockReadingProgress);
	  }
	  
	  if (!finished) {
		  
		  if ((chunk.getStartOffset() >= this.blockReadingProgress.getRequestedStartOffset()) && (chunk.getEndOffset() <= this.blockReadingProgress.getRequestedEndOffset())) {
			  try {
				  //this.outputStreamToSendChunksTo.write(data, 0, data.length);
				  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Dealing with chunk (inside range)");
				  if (!blockReadingProgress.isChunkComplete(chunk.getStartOffset(), chunk.getEndOffset())) {
					  if (debugMode) { 
						  System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Chunk not already processed - will process");
						  System.out.println(chunk);
					  }
					  
					  this.chunkHandler.handleChunk(chunk);
					  finished = this.blockReadingProgress.amendProgress(chunk.getStartOffset(), chunk.getEndOffset());
					  
				  }
				  else {
					  
					  if (debugMode) { 
						  System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Chunk already processed - will NOT process");
					  }
					  
				  }
				  
				  
			  } catch (Exception e) {
	
				e.printStackTrace();
			  }
		  }
		  else {
			  if (debugMode) System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Chunk outside of required range - ignoring");
		  }
		  // As the thread that is executing the sendBlock method may be waiting (since not all the data was read in when the 
		  // method execution started), give it an opportunity to re-test the condition as it may be the case that all the
		  // required data is now read in.
		  
		  if (debugMode) {
			  System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " BlockReadingProgress after chunk handled: ");
			  System.out.println(blockReadingProgress);
		  }
	  }
	  else {
		  
		  if (debugMode) {
			  System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Already finished - not processing new data");
			  System.out.println(blockReadingProgress);
			  
		  }
		  
	  }
	  
  }
  
  

  
  boolean isAllRequiredDataReadFully() {
	 return blockReadingProgress.allRequiredBlocksCompleted();
  }
  
  boolean isBlockReadFully() {
	  
	  
	  if (PrototypeUtilities.ANNOUNCE_METHOD_NAME) {
		  System.out.println("Requested method isBlockReadFully");
	  }
	  
		if (nonStreamingBlockSender != null) {
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Requesting isBlockFullyRead from NONSTREAMING originalBlockSender"); }
			return nonStreamingBlockSender.isBlockReadFully();
	  	}
		else if (streamingBlockSender != null) {
			if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Requesting isBlockFullyRead from STREAMING originalBlockSender"); }
			return streamingBlockSender.isBlockReadFully();
		}
	  	else {
	  		if (debugMode) { System.out.println(PrototypeUtilities.getPreambleString(this.getClass()) + " Using modified isBlockFullyRead for close() method"); }
		}
	  
	  return blockReadingProgress.blockCompletelyRead();
  }
  
  
  public boolean equals(Object object) {
	  if (object instanceof BlockSender) {
		  BlockSender blockSender = (BlockSender) object;
		  if (blockSender.blockSenderID == this.blockSenderID) {
			  return true;
		  }
	  }
	  return false;
  }
  
  public int hashcode() {
	  return this.blockSenderID % PrototypeUtilities.HASH_DIVISION_CONSTANT;
  }
  
  public enum ReaderType { ORIGINAL_STREAMED, ORIGINAL_NON_STREAMED, PROTOTYPE }
  
}
