/*
 * $AIST_Release: 0.9.0 $
 * Copyright 2011 Information Technology Research Institute, National
 * Institute of Advanced Industrial Science and Technology
 * 
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 *    http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package aggr

import com.google.protobuf.Message
import com.google.protobuf.Descriptors._
import com.google.protobuf.DescriptorProtos._
import java.util.Properties
import java.io.FileOutputStream
import java.io.File
import java.lang.IllegalArgumentException
import java.lang.System.err
import scala.collection.JavaConversions._
import util.JavaInvoker
import SerializedTypes.SerializedType
import pbdata.Tables._

/**
 * Main module of aggregators.
 */
object Aggregators {

  val propertiesFile = "aggr.properties"

  trait Combiner {
    /**
     * Combines.
     */
    def combine(data: TraversableOnce[(Any, Any)])(writer: (Any, Any) => Unit): Unit
  }
  /**
   * Interface of aggregators.
   */
  trait Aggregator {
    /**
     * Aggregates.
     */
    def aggregate(data: TraversableOnce[(Any, Any)])(writer:(Any, Any) => Unit): Unit


    /**
     * Gets key type.
     * Uses default, if this is None.
     */
    def getKeyType(): Option[SerializedType]   = None

    /**
     * Gets value type.
     * Uses default, if this is None.
     */
    def getValueType(): Option[SerializedType] = None
  }

  /**
   * Interface of aggregator factory.
   */
  trait AggregatorFactory {
    /**
     * Create new aggregator.
     */
    def createNew(keyType: SerializedType,
                  valType: SerializedType,
                  args: List[Long]): Aggregator

    def hasCombiner(): Boolean = false

    /**
     * Checks aggregator types.
     */
    def check(keyType: SerializedType,
              valType: SerializedType,
              args: List[SerializedType], 
              options: Map[String, SerializedType]): Option[List[String]]
  }

  type AggrTable = Map[String, AggregatorFactory]
  
  /**
   * Aggregators table.
   * The contents are read from properties file.
   */
  lazy val aggrMap: AggrTable = {
    var p = new Properties
    val is = this.getClass.getResourceAsStream("/" + propertiesFile)
    try {
      p.load(is)
      p.toMap.flatMap{ case (k:String, v:String) => (k, v) 
        try {
          Map(k ->  Class.forName(v).newInstance().asInstanceOf[AggregatorFactory])
        } catch {
          case e => 
            err.println(e)
          Map.empty[String, AggregatorFactory]
        }
      }
    } finally {
      is.close()
    }
  }

  /**
   * Makes FileDescriptorSet from serialized types.
   */
  def mkDescriptorSet(types: List[SerializedType]): Option[FileDescriptorSet]= {
    def getFiles(fd: FileDescriptor) = {
      fd :: fd.getDependencies().toList
    }

    val files0: Set[FileDescriptor] = Set.empty ++ getFiles(pbdata.Tables.getDescriptor())
    val files = types.foldRight(files0) { case (x, files) => 
      x match {
        // FileDescriptorの比較は正常に動作するのか?
        case SerializedTypes.MessageType(x) => files ++ getFiles(x.getFile())
        case SerializedTypes.EnumType(x)    => files ++ getFiles(x.getFile())
        case _                              => files
      }
    }
    if (files.isEmpty) {
      None
    } else {
      Some(util.PBUtils.FileDescriptorList2FileDescriptorSet(files.toList))
    }
  }

  /**
   * Process manager.
   *
   * This class deletes the temporary files when waitFor().
   */
  class Process(p: java.lang.Process, tmpFiles: List[String]) {

    def waitFor() = {
      val rslt = p.waitFor()

      for (t <- tmpFiles) {
        new File(t).delete()
      }

      rslt
    }
  }
  
  /**
   * Invokes an aggregator process.
   * 
   * @param outputFile   The file to write aggregator result.
   * @param kind         The kind of aggregator.
   * @param keyType      The key type.
   * @param valType      The value type.
   * @param args         The argument.
   * @param dataFile     The file where the emited data has been written.
   * @param outputText   Aggregator result format. If this is true, writes as text, otherwise as binary format.
   *
   * @return The process manager object.
   *
   * When you invokes the process by this method, you must wait completion of the process by waitFor() method.
   */
  def kick(outputFile: String,
           kind: String, 
           keyType: SerializedType,
           valType: SerializedType,
           args: List[Long], 
           dataFile: String,
           outputText: Boolean ) = {
    def writeTempFile(prefix: String, msg: Message) = {
      var t = File.createTempFile(prefix, null)
      var os = new FileOutputStream(t)
      
      try {
        msg.writeTo(os)
      } finally {
        os.close()
      }
      t.toString()
    }
    def mkFormatFile() = {

      var builder = DataFormat.newBuilder()
      mkDescriptorSet(List(keyType, valType)) match {
        case Some(x) => builder.setProtoFiles(x.toByteString)
        case None    =>
      }

      builder.setKeyType(keyType.toString)
      builder.setValueType(valType.toString)
      builder.addAllTableArgs(args.map { new java.lang.Long(_) })

      writeTempFile("scfmt", builder.build())
    }

    aggrMap.get(kind) match {
      case None => throw new IllegalArgumentException("%s: unkown table kind.".format(kind))
      case Some(aggr) => 
        val fmt = mkFormatFile()

        val props = Map("sc.aggr"       -> aggr.getClass.getName,
                        "sc.outputText" -> outputText.toString)
        new Process(JavaInvoker.invoke("aggr.AggrMain", List(outputFile, fmt, dataFile), props), List(fmt))
    }
  }
}
