#!/usr/bin/R --vanilla --slave -f
#
# tnet.R
#
# Implementation of the TNET serialization format in R.
#
# R actually doesn't have a good serialization format for say 100 data frames.
# A single data frame can be exported with CSV, but there isn't a
# straightforward way for multiple data frames to be serialized to the same
# file or byte stream.
#
# R exports binary data formats (e.g. .RData), but it's fairly hard to find a
# parser for them in any other language (say Python).  They also have multiple
# versions and no spec outside the R interpreter source code.
#
# TNET is well-specified and easy to implement in any language.
#
# NOTE: Pure R is very slow for string manipulation (>10x slower than Python).
#
# R-specific Usage Notes
# ----------------------
#
# Serialization (dump):
#   list(1,2,3) and c(1,2,3) both serialize to TNET [1, 2, 3]
#   list(1) serializes to TNET [1], but c(1) serializes to TNET 1, because R
#     has no non-vector types.  So you may have to explicit use list() to get a
#     singleton list.
#   named R lists and named R vectors serialize to TNET dictionaries.
#   unnamed R lists and unnamed R vectors serialize to TNET arrays.
#
# Deserialization (load):
#   Homogeneous TNET arrays of atoms (int, float, bool, string) are turned into
#   R vectors rather than R lists.
#
# (See README for general notes on TNET)
#
# Limitations
# -----------
#
# Raw vectors are not supported.  Mainly this is because we actually dump an R
# "character" type, which can actually contain unicode characters.  The raw type
# doesn't have paste().  (This is probably faster than doing c(x, c(y, z) on raw
# vectors)
#
# Encoding: we assume strings are utf-8 encoded.  TNET expects that.  (For R
# details, see help(Encoding) )
#
# An empty R list() is dumped as the empty array, since it has no names.  There
# is no way to output the empty TNET dictionary in R now.  Do we need it?


# %Id%

# Fast path for vectors of primitives
.fast.seq <- function(data) {
  tag <- NA
  if (is.integer(data)) {
    tag <- '#'
  } else if (is.numeric(data)) {
    tag <- '^'
  } else if (is.character(data)) {
    tag <- '$'
  } 
  # No special case for logical yet -- requires conversion to 't' and 'f'.

  # Vectorized version.
  if (!is.na(tag)) {
    # BUG: if there are any NA, fall back to slow path
    parts <- paste(nchar(data, type='bytes'), ':', data, tag, sep='')
    chunk <- paste(parts, collapse='')
    return(.dump.chunk(chunk, ']'))
  }

  return(.dump.seq(data))
}

tnet.dump <- function(data) {
  # in R:
  # is.list( c(1,2,3) ) == FALSE
  # is.vector( list(a="b") ) == TRUE
  # so the is.list() check has to come first

  # List with or without names.
  if (is.list(data)) {
    n <- names(data)
    if (is.null(n)) {
      return(.dump.seq(data))  # no names, treat as []
    } else {
      return(.dump.list(data))  # treat as {}
    }
  } 

  # Fallback for other types.
  if (is.vector(data) && length(data) != 1) {
    # is.vector(list()) is True, so this has to come after is.list() above
    n <- names(data)
    if (is.null(n)) {
      # If there are any NA values, we can't use the fast path, because we'll
      # get stuff like 2:NA^, which isn't valid.
      if (any(is.na(data))) {
        return(.dump.seq(data))
      } else {
        return(.fast.seq(data))  # potentially a fast path
      }
    } else {
      return(.dump.list(data))
    }
  }

  # length is 1:
  if (is.null(data) || is.na(data)) {
    # It's possible for is.na(x) AND is.integer(x) to be true, so we have to do
    # this first to avoid generating invalid 2:NA#.  See unit test.
    '0:~'
  } else if (is.integer(data)) {  # is.integer(1L)=T, is.integer(1)=F
    out <- as.character(data)
    .dump.chunk(out, '#')
  } else if (is.numeric(data)) {
    out <- as.character(data)
    .dump.chunk(out, '^')
  } else if (is.character(data)) {
    .dump.chunk(data, '$')
  } else if (is.logical(data)) {
    out <- ifelse(data, "t", "f")
    .dump.chunk(out, '?')
  } else {
    stop(paste("Can't serialize type", class(data)))
  }
}

.dump.chunk <- function(s, tag) {
  # Important: We have to count the number of bytes for unicode strings, not
  # chars.  For example, mu = \u00b5 takes 2 bytes, while nchar(mu) = 1.
  paste(nchar(s, type='bytes'), ':', s, tag, sep='')
}

.dump.list <- function(data) {
  n <- names(data)
  # pre-allocate for the key-value pairs
  chunks <- rep(0, length(n) * 2)
  i <- 1
  for (name in names(data)) {
    value <- data[[name]]
    chunks[i] <- tnet.dump(name)
    chunks[i+1] <- tnet.dump(value)
    i <- i + 2
  }
  chunk <- paste(chunks, collapse='')  # concatenate
  .dump.chunk(chunk, '}')
}

.dump.seq <- function(data) {
  # sapply takes a vector and returns a vector
  chunks <- sapply(data, tnet.dump)
  chunk <- paste(chunks, collapse='')  # concatenate
  .dump.chunk(chunk, ']')
}

# Load data from a string, returning the remainder.
tnet.load.prefix <- function(data) {
  if (data == '') {
    stop("Got empty data to parse")
  }
  # NOTE: instead of regexpr, I tried to use textConnection and scan().  This
  # doesn't work because you can't do readChar on textConnection.   Even using
  # encoding="bytes" and readChar(useBytes=T) doesn't work.
  #t = textConnection(data)
  #length = scan(file=t, what="integer", sep=":", n=1)

  # find the index of ':' in data
  left <- regexpr(':', data, fixed=T, useBytes=T)[1]
  # stupid 1-indexing, and substring is an INCLUSIVE on the right
  length <- as.integer(substring(data, 1, left-1))
  right <- left + length + 1  # position of } ] etc

  # HACKY BUG FIX: substring() works on CHARACTERS, not bytes.
  # TODO: Fix this for real.  See array-of-unicode test case -- this doesn't
  # work because you don't know the end of the string.

  # num bytes is always greater.
  diff <- nchar(data, type='bytes') - nchar(data)
  # Compute right index in CHARACTERS.
  right.chars <- right - diff

  payload <- substring(data, left+1, right.chars-1)
  # pick out one character
  payload_type <- substring(data, right.chars, right.chars)

  if (payload_type == '') {
    # This catches the case that len(payload) < length
    stop(paste(
        "Data was too short: got", nchar(payload, type='bytes'),
        "bytes, expected", length+1))
  }

  value <- .load.value(payload, payload_type)
  return(list(value=value, extra=substring(data, right.chars+1)))
}

# Load data from a string.  It is an error if there are extra bytes on the end
# of the string that are not part of the message.
tnet.load <- function(data) {
  result <- tnet.load.prefix(data)
  if (nchar(result$extra) > 0) {
    stop(paste("Extra bytes at end of message:", result$extra))
  }
  return(result$value)
}

.load.value <- function(payload, payload_type) {
  if (payload_type == '#') {
    as.integer(payload)
  } else if (payload_type == '}') {
    .load.dict(payload)
  } else if (payload_type == ']') {
    .load.list(payload)
  } else if (payload_type == '?') {
    # NOTE: no checking for invalid payloads here
    payload == 't'
  } else if (payload_type == '^') {
    as.numeric(payload)
  } else if (payload_type == '~') {
    if (payload != '') {
      stop("Can't have non-empty payload for null value")
    }
    # NOTE: We have to use NA to represent tnet 'null' instead of R NULL,
    # because appending NULL to list() results in list(), while appending NA
    # results in list(NA).
    NA
  } else if (payload_type == '$' || payload_type == ',' || payload_type == '\n') {
    # We're not really supporting byte strings in R.  TNET byte strings and
    # unicode strings both go to the R string type.
    payload
  } else if (payload_type == '!') {
    # tnetstrings use '!' plus 'true' or 'false' -- we support that.
    # NOTE: no checking for invalid payloads here
    payload == 'true'
  } else {
    stop(paste("Invalid payload type:", payload_type))
  }
}

# NOTE: This function takes scales nonlinearly with the size of the list,
# leading to performance bugs.  This seems to be an R list() limitation with
# repeated growth.  append() is faster, but gives the wrong result because it
# flattens the list.  Trying to resize the list periodically doesn't seem to
# work either for some reason.
.load.list <- function(data) {
  result <- list()
  classes <- list()  # to check if all the classes are the same
  lengths <- list()  # to check if all the classes are the same
  len <- 0  # length of result
  extra <- data

  # NOTE: Using an environment as a hash table doesn't seem to solve it either
  # -- I guess the quadratic behavior is in tnet.load.prefix() and the assignment
  #values <- new.env(hash=T)

  #start <- proc.time()
  while (T) {
    if (extra == '') {
      break
    }
    r <- tnet.load.prefix(extra)
    len <- len + 1

    result[[len]] <- r$value
    if (is.na(r$value[[1]])) {
      classes[[len]] <- "NA-class"  # not homogeneous with any other type!
    } else {
      classes[[len]] <- class(r$value)
    }
    lengths[[len]] <- length(r$value)
    class(r$value)
    extra <- r$extra
  }

  # Special checks for homogenous types
  if (all(lengths == 1)) {  # make sure they're all primitive
    if (all(classes == "numeric")) {
      result <- as.numeric(result)
    } else if (all(classes == "logical")) {
      result <- as.logical(result)
    } else if (all(classes == "character")) {
      result <- as.character(result)
    } else if (all(classes == "integer")) {
      result <- as.integer(result)
    }
  }

  #elapsed <- proc.time() - start
  #cat(paste('iterations', len, 'elapsed', elapsed[1], '\n'))
  return(result)
}

.load.dict <- function(data) {
  result <- list()
  extra <- data
  while (T) {
    if (extra == '') {
      break
    }
    r <- tnet.load.prefix(extra)
    key <- r$value
    extra <- r$extra
    if (!is.character(key)) {
      stop("Keys can only be strings.")
    }
    if (extra == '') {
      stop("Got an odd number of dictionary items")
    }
    r <- tnet.load.prefix(extra)
    value <- r$value
    extra <- r$extra
    result[[key]] <- value
  }

  return(result)
}

read.length.str <- function(f) {
  buf <- c()
  while (T) {
    ch <- readChar(f, 1, useBytes=T)  # read a single char
    # it's OK if it's NA, because we test for it
    d <- suppressWarnings(as.numeric(ch))
    if (is.na(d)) {
      if (ch != ':') {
        stop(paste("Expected ':', got '", ch, "'", sep=''))
      }
      break
    }
    buf <- append(buf, ch)
  }
  paste(buf, collapse='')
}

tnet.loadf <- function(f) {
  # Load from a file.
  #
  # It leaves the file pointer at the end of the record.  NOTE: This would be a
  # fragile disk serialization format, because if one byte is corrupted, it's
  # hard to "resynchronize" in the middle of the stream.  But it should be fine
  # for reading out of pipes.
  length.str <- read.length.str(f)
  len <- as.numeric(length.str)
  payload <- readChar(f, len, useBytes=T)  # read n characters
  payload_type <- readChar(f, 1, useBytes=T)
  return(.load.value(payload, payload_type))
}

tnet.readf <- function(f) {
  # Read a TNET chunk from a file, without deserializing it.
  length.str <- read.length.str(f)
  len <- as.numeric(length.str)
  payload1 <- readChar(f, len+1, useBytes=T)  # read n+1 characters
  paste(length.str, ':', payload1, sep='')
}


# R is stupid!  doesn't allow readChar from stdin
# fifo() requires BINARY mode for readChar()
#
args <- commandArgs(trailingOnly=T)
if (length(args) > 0 && args[1] == 'TODO') {
  req.fifo <- fifo('req', open='rb', blocking=T)
  value <- tnet.loadf(req.fifo)
  print(value)
}

