/*
 *  Copyright 2013 National Institute of Advanced Industrial Science and Technology
 *  
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *  
 *      http://www.apache.org/licenses/LICENSE-2.0
 *  
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

package org.sss.common;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.sss.common.io.DataWriter;
import org.sss.mapreduce.Broadcaster;
import org.sss.mapreduce.Encoder;
import org.sss.mapreduce.GroupID;
import org.sss.mapreduce.Partitioner;
import org.sss.mapreduce.SssException;
import org.sss.util.CallbackInterface;
import org.sss.util.Factory;
import org.sss.util.FactoryThreadLocal;

public class PostEncodingOutputQueue  {
  @SuppressWarnings("unused")
  private static Logger logger = LoggerFactory.getLogger(EncodingOutputQueue.class);
  private final List<OutputQueue> queues;
  private final Partitioner partitioner;
  private final int limitNBytes;
  private final DataReceiver<Chunk> dataWriter;
  private final WriterCounter counter;
  private final ThreadLocal<Encoder> keyEncoder;
  private final ThreadLocal<Encoder> valueEncoder;
  private final GroupID gid;
  private final CallbackInterface<Throwable> onCompletion;
  private final boolean broadcast;
  private final int numberOfQueues;

  public PostEncodingOutputQueue(GroupID gid,
                                 Factory<Encoder> keyEncoderFactory,
                                 Factory<Encoder> valueEncoderFactory,
                                 int numberOfQueues,        
                                 Partitioner partitioner,
                                 SentinelThreadPool iotp,
                                 DataWriter dataWriter,
                                 int limitNBytes,           
                                 CallbackInterface<Throwable> onCompletion) {
    this.gid = gid;
    this.limitNBytes = limitNBytes;
    this.numberOfQueues = numberOfQueues;
    this.queues = new ArrayList<OutputQueue>(numberOfQueues);
    this.partitioner = partitioner;
    this.broadcast = (partitioner.getClass() == Broadcaster.class);
    assert broadcast == gid.isBroadcast();
    if (broadcast) {
      queues.add(new OutputQueue(0));
    }
    else {
      for (int i = 0;i < numberOfQueues;++i) {
        queues.add(new OutputQueue(i));
      }
    }
    this.counter = new WriterCounter(numberOfQueues);
    this.dataWriter = new TaskSliceSubmitter<Chunk>(iotp, new ChunkHandler(dataWriter));
    this.keyEncoder = new FactoryThreadLocal<Encoder>(keyEncoderFactory);
    this.valueEncoder = new FactoryThreadLocal<Encoder>(valueEncoderFactory);
    this.onCompletion = onCompletion;
  }

  public void put(int partition, List<Bucket> buckets) throws SssException {
    queues.get(partition).put(buckets);
  }

  public void close() throws SssException {
    for (OutputQueue oq: queues) {
      oq.flush();
    }
    dataWriter.close();
  }

  public void setError(Throwable e) {
    dataWriter.setError(e);
  }

  public Throwable getError() {
    return dataWriter.getError();
  }

  public long[] getOutputCountCopy() {
    return counter.getCount();
  }

  public long[] getOutputNBytesCopy() {
    return counter.getNBytes();   
  }

  public class OutputQueue {
    private int partition;
    private int nBytes;
    private List<List<Bucket>> buckets;

    public OutputQueue(int partition) {
      this.partition = partition;
      this.buckets = new ArrayList<List<Bucket>>();
      this.nBytes = 0;
    }

    /**
     * .
     * @param buckets
     */
    public synchronized void put(List<Bucket> buckets) throws SssException {
      this.buckets.add(buckets);
      for (Bucket b: buckets) {
        this.nBytes += b.getRoughSize();
      }
      if (nBytes >= limitNBytes) {
        flush();
      }
    }

    public synchronized void flush() throws SssException {
      dataWriter.put(new Chunk(partition, buckets));
      this.buckets = new ArrayList<List<Bucket>>();
      this.nBytes  = 0;
    }
  }

  public class LocalBuffer implements DataReceiver<Bucket> {
    private final List<List<Bucket>> buffers;
    private final int limitNBytes;
    private int nBytes = 0;

    public LocalBuffer(int limitNBytes) {
      this.limitNBytes = limitNBytes;
      buffers = new ArrayList<List<Bucket>>();
      for (int i = 0;i < queues.size();++i) {
        buffers.add(new ArrayList<Bucket>());
      }
    }

    @Override
    public void put(Bucket bucket) throws SssException {
      nBytes += bucket.getRoughSize();
      int partition = (broadcast)?0:(partitioner.getPartition(bucket.key, bucket.value, buffers.size()) % buffers.size());
      buffers.get(partition).add(bucket);
      if (nBytes >= limitNBytes) {
        flush();
      }
    }

    @Override
    public void setError(Throwable e) {
      PostEncodingOutputQueue.this.setError(e);
    }

    @Override
    public Throwable getError() {
      return PostEncodingOutputQueue.this.getError();
    }

    private void flush() throws SssException {
      for (int i = 0;i < buffers.size();++i) {
        PostEncodingOutputQueue.this.put(i, buffers.get(i));
        buffers.set(i, new ArrayList<Bucket>());
      }
      nBytes = 0;
    }

    @Override
    public void close() throws SssException {
      flush();
    }
  }


  public DataReceiver<Bucket> getLocalBuffer(int limit) {
    return new LocalBuffer(limit);
  }

  private class ChunkHandler implements InputDrivenTask<Chunk> {
    private DataWriter dataWriter;

    public ChunkHandler(DataWriter dataWriter) {
      this.dataWriter = dataWriter;
    }
    @Override
    public void handle(Chunk chunk) throws SssException {
      try {
        int partition = chunk.partition;
        int n = 0;
        for (List<Bucket> b: chunk.buckets) {
          n += b.size();
        }
        int nSlot = gid.getNSlot();
        // logger.error("all buckets: " + n);

        EncodedBucket[] enc = new EncodedBucket[n];
        Encoder keyEnc = keyEncoder.get();
        Encoder valEnc = valueEncoder.get();
        int i = 0;
        for (List<Bucket> bs: chunk.buckets) {
          for (Bucket b: bs) {
            enc[i++] = new EncodedBucket(b.key.slot() & (nSlot-1),
                keyEnc.encode(b.key), valEnc.encode(b.value));
          }
        }
        if (broadcast) {
          for (int j = 0;j < numberOfQueues;++j) {
            long nb = dataWriter.write(gid, new DataChunk(j, enc));
            counter.countup(j, n, nb);
          }
        }
        else {
          long nb = dataWriter.write(gid, new DataChunk(partition, enc));
          counter.countup(partition, n, nb);
        }
      }
      catch (IOException e) {
        throw new SssException(e);
      }
    }
    @Override
    public void noMoreTaskSlice(Throwable e) {
      onCompletion.callback(e);
    }
  }

  private class Chunk {
    public final int partition;
    public final List<List<Bucket>> buckets;

    public Chunk(int partition, List<List<Bucket>> buckets) {
      this.partition = partition;
      this.buckets = buckets;
    }
  }
}
