/*
 *  Copyright 2013 National Institute of Advanced Industrial Science and Technology
 *  
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *  
 *      http://www.apache.org/licenses/LICENSE-2.0
 *  
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

package org.sss.client;

import static org.sss.common.Utils.newInstance;

import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Properties;

import org.slf4j.Logger;
import org.sss.common.Bucket;
import org.sss.common.DataReceiver;
import org.sss.common.PostEncodingOutputQueue;
import org.sss.mapreduce.Broadcaster;
import org.sss.mapreduce.Configuration;
import org.sss.mapreduce.GroupID;
import org.sss.mapreduce.HashPartitioner;
import org.sss.mapreduce.Partitioner;
import org.sss.mapreduce.SssException;
import org.sss.mapreduce.StorageNode;
import org.sss.mapreduce.datatype.Packable;
import org.sss.util.CallbackInterface;
import org.sss.util.SortedProperties;
import org.sss.util.StopWatch;

/**
 * Putter of data into storage servers.
 */
public final class DataPutter<K extends Packable, V extends Packable> implements Closeable {
  private static Logger logger = SssClient.getLogger(DataPutter.class);
  private final SssClient client;
  private final GroupID output;
  private PostEncodingOutputQueue queue;
  private DataReceiver<Bucket> localBuffer;
  private StopWatch stopWatch;
  private boolean done = false;
  /**
   * Create data putter.
  *
  * @param keyClass         Class of key type.
  * @param valueClass       Class of value type.
  * @param partitionerClass Class of partitioner.
  * @param output           Output <code>GroupID</code>
  */
 public static <K extends Packable, V extends Packable> DataPutter<K, V> create(SssClient client,
     Class<K> keyClass,
     Class<V> valueClass,
     Class<? extends Partitioner> partitionerClass,
     GroupID output) throws SssException {
   return new DataPutter<K, V>(client, keyClass, valueClass, partitionerClass, output);
 }

  /**
   * Create data putter.
   *
   * This function create DataPutter which generate output group ID.
   *
   * @param keyClass         Class of key type.
   * @param valueClass       Class of value type.
   * @param partitionerClass Class of partitioner.
   */
  public static <K extends Packable, V extends Packable> DataPutter<K, V> create(SssClient client,
      Class<K> keyClass,
      Class<V> valueClass,
      Class<? extends Partitioner> partitionerClass) throws SssException {
    return new DataPutter<K, V>(client, keyClass, valueClass, partitionerClass, null);
  }

  /**
   * Create data putter.
   *
   * This function create DataPutter which use HashPartitioner.
   *
   * @param keyClass         Class of key type.
   * @param valueClass       Class of value type.
   * @param output           Output <code>GroupID</code>
   */
  public static <K extends Packable, V extends Packable> DataPutter<K, V> create(SssClient client,
      Class<K> keyClass,
      Class<V> valueClass,
      GroupID output) throws SssException {
    return new DataPutter<K, V>(client, keyClass, valueClass, null, output);
  }

  /**
   * Create data putter.
   *
   * This function create DataPutter which use HashPartitioner and
   * generate output Group ID.
   *
   * @param keyClass         Class of key type.
   * @param valueClass       Class of value type.
   */
  public static <K extends Packable, V extends Packable> DataPutter<K, V> create(SssClient client,
      Class<K> keyClass,
      Class<V> valueClass) throws SssException {
    return create(client, keyClass, valueClass, null, null);
  }

  private DataPutter(SssClient client,
                    Class<K> keyClass,
                    Class<V> valueClass,
                    Class<? extends Partitioner> partitionerClass,
                    GroupID outputGID) throws SssException {
    this.client = client;
    List<StorageNode> storageNodes = client.getClusterManager().getStorageNodes();
    Configuration conf = client.getConfiguration();

    if (outputGID == null) {
      this.output = GroupID.createRandom(
          conf.getKeyFormat("put.output.keyformat", "job.new_group.keyformat"), false, false,
          conf.getInt("job.new_group.numslot", 16));
    } else {
      this.output = outputGID;
    }
    int queueLimitNBytes = conf.getInt("put.output_queue.limit_nbytes", 2 * 1024 * 1024);
    int localCacheLimit = conf.getInt("put.output_queue.local.limit_nbytes", 128 * 1024);
    if (this.output.isBroadcast()) {
      if (partitionerClass != null && partitionerClass != Broadcaster.class) {
        logger.warn("GroupID has broadcast flag. thus \"{}\" is not used.",
            partitionerClass.getName());
      }
      partitionerClass = Broadcaster.class;
    } else if (partitionerClass == Broadcaster.class) {
      throw new SssException("GroupID is not broadcast, but partitioner is Broadcaster.");
    } else if (partitionerClass == null) {
      partitionerClass = HashPartitioner.class;
    }
    this.stopWatch = StopWatch.start();
    this.queue = new PostEncodingOutputQueue(
        output, 
        client.getEncodings().getEncoderFactory(keyClass),
        client.getEncodings().getEncoderFactory(valueClass),
        storageNodes.size(),
        newInstance(partitionerClass), client.getIOTPWrite(),
        client.getIOManager().createDataWriter(),
        queueLimitNBytes,
        new CallbackInterface<Throwable>() {
          @Override
          public void callback(Throwable e) {
            ioWriteComplete(e);
          }
        });
    this.localBuffer = queue.getLocalBuffer(localCacheLimit);
  }

  /**
   * Get output <code>GroupID</code>.
   */
  public GroupID getOutputGroupID() {
    return output;
  }

  /**
   * Append key and value pair to Tuple Group.
   *
   * @param key   Key
   * @param value Value
   */
  public void append(K key, V value) throws SssException {
    localBuffer.put(new Bucket(key, value));
  }

  /**
   * Close this.
   */
  @Override
  public synchronized void close() throws IOException {
    try {
      if (localBuffer != null) {
        localBuffer.close();
        localBuffer = null;
        queue.close();
        waitIoWriteComplete();
        stopWatch.stop();
        writeInfo();
        queue = null;
      }
    }
    catch (SssException e) {
      throw new IOException(e);
    }
  }

  @Override
  protected void finalize() {
    try {
      close();
    } catch (IOException e) {
      logger.error("Error on finalize", e);
    }
  }

  private void writeInfo() throws SssException {
    Properties prop = new SortedProperties();
    List<StorageNode> storageNodes = client.getClusterManager().getStorageNodes();

    long[] count = queue.getOutputCountCopy();
    long countSum = 0;
    int i = 0;
    for (long c: count) {
      countSum += c;
      prop.setProperty(
          String.format("data_putter.output.%s.count", storageNodes.get(i)),
          "" + c);
      i++;
    }
    prop.setProperty("data_putter.output.total.count", "" + countSum);

    long[] nbytes = queue.getOutputNBytesCopy();
    long nbytesSum = 0;
    i = 0;
    for (long n: nbytes) {
      nbytesSum += n;
      prop.setProperty(
          String.format("data_putter.output.%s.size", storageNodes.get(i)),
          "" + n);
      i++;
    }
    prop.setProperty("data_putter.output.total.size", "" + nbytesSum);
    prop.setProperty("data_putter.time", stopWatch.toString());
    prop.setProperty("data_putter.output", output.toString());
    client.writeInfo(prop, "Data Putter execution information");
  }

  private synchronized void ioWriteComplete(Throwable e) {
    done = true;
    notifyAll();
  }

  private synchronized void waitIoWriteComplete() {
    while (!done) {
      try {
        wait();
      }
      catch (InterruptedException e) {
      }
    }
  }
}
