/*
 * $AIST_Release: 0.9.0 $
 * Copyright 2011 Information Technology Research Institute, National
 * Institute of Advanced Industrial Science and Technology
 * 
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 *    http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package hadoop

import java.io.IOException
import java.io.DataOutputStream
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.RecordWriter
import org.apache.hadoop.io.BytesWritable
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import com.google.protobuf.CodedOutputStream
import scala.collection.mutable.{Map => MMap}
import pbdata.Tables.Table

/**
 * Record writer for multi-table execution.
 */
class PBMultiTableRecordWriter(context: TaskAttemptContext) extends RecordWriter[BytesWritable, BytesWritable] {

  private val outputPath   = FileOutputFormat.getOutputPath(context)
  private val conf         = context.getConfiguration()
  private val fs           = FileSystem.get(conf)
  private val fileMap      = MMap[String, (DataOutputStream, CodedOutputStream)]()
  private val dataFieldNum = Table.getDescriptor.findFieldByName("data").getNumber

  require(outputPath != null)

  private def getWorkFile(id: String): Path = {

    val dir = new Path(outputPath, id)

    if (!fs.exists(dir) || !fs.getFileStatus(dir).isDir()) {
      fs.mkdirs(dir)
      if (!fs.getFileStatus(dir).isDir()) {
        throw new IOException("Can't create directory \"%s\".".format(dir))
      }
    }

    new Path(dir, FileOutputFormat.getUniqueFile(context, "part", ""))
  }

  override def write(key: BytesWritable, value: BytesWritable): Unit = {

    try {
      synchronized {
        val fn = Utils.decodeUTF8(key)
        val (_, cos) = fileMap.getOrElseUpdate(fn, {
          val file = getWorkFile(fn)
          val os = fs.create(file, false)

          (os, CodedOutputStream.newInstance(os))
        })

        cos.writeBytes(dataFieldNum, Utils.copyToByteString(value))
      }

    } catch {
      case e =>
        e.printStackTrace()
        throw e
    }
  }

  override def close(job: TaskAttemptContext) = {
    for ((os, cos) <- fileMap.values) {
      cos.flush()
      os.close()
    }
  }
}

class PBMultiTableOutputFormat extends FileOutputFormat[BytesWritable, BytesWritable] {

  def getRecordWriter(job: TaskAttemptContext): RecordWriter[BytesWritable, BytesWritable] = {

    // TODO: Supports compression.(requires?)
    return new PBMultiTableRecordWriter(job)
  }
}

