package ngram.writer;

import gnu.trove.list.array.TIntArrayList;
import gnu.trove.list.array.TLongArrayList;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import ngram.util.IOUtil;

/**
 *
 * @author g1wshimu
 */
public class V2FileWriter implements NGramFileWriter {

    private final DataOutputStream WRITER;
    /**
     * Buffer for statistics. The allocated bytes is the maximum possible
     * number of bytes needed to store the year, match count, volume count
     * information for one n-gram entry. The number of bytes will be the
     * CAPACITY multiply by the number of byte used to represent the year
     * (a short, 2 bytes), match count (a variable length long, maximum 9 
     * bytes), and volume count (a variable length long, maximum 9 bytes).
     */
    private final byte[] BUFFER = new byte[(1000 + 1) * (2 + 9 + 9)];

    public V2FileWriter(DataOutputStream writer) {
        this.WRITER = writer;
    }

    @Override
    public void writeNGram(String ngram, TIntArrayList years,
            TLongArrayList matchCount, TLongArrayList volumeCount) throws IOException {
        IOUtil.writeUTF(WRITER, ngram);

        /*
         * Need to write the number of bytes written first, then write
         * all the information. But since we do not know how many bytes we
         * will write (until writeVLong is called), we write all the data
         * to a separate buffer. After we are done, we know how many bytes
         * have been written, so we write the number of bytes into the
         * file, then write the content of the buffer.
         */
        ByteBuffer byteBuffer = ByteBuffer.wrap(BUFFER);
        DataOutput dataOutput = IOUtil.wrapByteBufferAsDataOutput(byteBuffer);
        // Write all the year, lineCount information        
        for (int i = 0; i < years.size(); i++) {
            dataOutput.writeShort(years.get(i));   
            org.apache.hadoop.io.file.tfile.Utils.writeVLong(dataOutput, matchCount.get(i));
            org.apache.hadoop.io.file.tfile.Utils.writeVLong(dataOutput, volumeCount.get(i));
        }
        int bytesWritten = byteBuffer.position();
        
        WRITER.writeInt(bytesWritten);
        WRITER.write(BUFFER, 0, bytesWritten);
        // No need to reset BUFFER since the next time we wrap another
        // DataOutput, it's position starts at 0
    }
}
