/**
 * 
 */
package mapreduce4j;

import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;

/**
 * This is a BS class at the moment 
 * @author tim
 */
public class TextIntRecordReader extends RecordReader<Text, IntWritable> {
	protected DataInputStream is;
	protected ByteBuffer buffer;
	protected Text k;
	protected IntWritable v;

	
	@Override
	public Text getCurrentKey() {
		return k;
	}

	@Override
	public IntWritable getCurrentValue() {
		return v;
	}

	@Override
	public void initialize(ByteBuffer input) {
		// make a read only copy just in case
		buffer = input.asReadOnlyBuffer();
		is = new DataInputStream(newInputStream(buffer));
	}

	@Override
	public boolean nextKeyValue() {
		k = new Text("");
		v = new IntWritable(-1);
		try {
			k.readFields(is);
			v.readFields(is);
			
			
			return true;
		} catch (Exception e) {
			//e.printStackTrace();
			return false;
		}
		
	}
	
	// Returns an input stream for a ByteBuffer.
    // The read() methods use the relative ByteBuffer get() methods.
    public static InputStream newInputStream(final ByteBuffer buf) {
        return new InputStream() {
            public synchronized int read() throws IOException {
                if (!buf.hasRemaining()) {
                    return -1;
                }
                return buf.get();
            }
    
            public synchronized int read(byte[] bytes, int off, int len) throws IOException {
                // Read only what's left
                len = Math.min(len, buf.remaining());
                buf.get(bytes, off, len);
                return len;
            }
        };
    }	
}
