rm(list=ls())
library(KernSmooth)

# closed single-class load-dependent MVA

# we treat all queuing centers as load-dependent for two reasons:
# 1. Most of the systems we deal with (hyperthreaded SMP processors, 
#    storage area networks, etc.) will be hierarchical, and
# 2. Even for load-independent service centers, the queue length
#    probabilities are "interesting".

# Usually there is only one delay center, and its demand is the 
# "think time". If there isn't a delay center (batch workload class 
# with zero think time, we can just set the demand of that center to
# zero and everything works.

# least squares derivative for equally-spaced points
lsderiv <- function(y) {
  N <- length(y)
  NM1 <- N - 1
  NM2 <- N - 2
  NM3 <- N - 3
  NM4 <- N - 4
  dy <- y # get a vector of same length as y

  # interior points
  dy[3:NM2] <- (-2*y[1:NM4] - y[2:NM3] + y[4:NM1] + 2*y[5:N])/10

  # end points
  dy[1] <- (-21*y[1] + 13*y[2] + 17*y[3] - 9*y[4])/20
  dy[2] <- (-11*y[1] + 3*y[2] + 7*y[3] + y[4])/20
  dy[N-1] <- (11*y[N] - 3*y[NM1] - 7*y[NM2] -y[NM3])/20
  dy[N] <- (21*y[N] - 13*y[NM1] - 17*y[NM2] + 9*y[NM3])/20

  return(dy)
}

# kernel derivative
kderiv <- function(x, y) {
  return(locpoly(x, y, drv=1, bandwidth=1))
}

aba <- function(
  Z,
  queuing.demand,
  NCustomers
) {
  throughput.history <- c(1:NCustomers)
  response.history <- c(1:NCustomers)
  D <- sum(queuing.demand)
  Dmax <- max(queuing.demand)
  Nstar <- (D + Z)/Dmax
  for (N in 1:NCustomers) {
    if (N < Nstar) {
      throughput.history[N] <- N/(D + Z)
      response.history[N] <- D
    } else {
      throughput.history[N] <- 1.0/Dmax
      response.history[N] <- N*Dmax - Z
    }
  }
  return(list(
    Nstar=Nstar,
    throughput.history=throughput.history,
    response.history=response.history
  ))
}

closed.1c.ld <- function(
  delay.demand,
  queuing.demand,
  NCustomers,
  capacity
) {

  # dump inputs
  #print(delay.demand)
  #print(queuing.demand)
  #print(NCustomers)
  #print(capacity)

  # initialize delay centers
  NDelay <- length(delay.demand)
  delay.residence <- delay.demand
  delay.mean.queue.length <- delay.demand
  
  # initialize queuing centers
  NQueuing <- length(queuing.demand)
  queuing.residence <- queuing.demand
  queuing.utilization <- queuing.demand
  queuing.mean.queue.length <- queuing.demand
  probability <- matrix(0.0, nrow=NQueuing, ncol=NCustomers+1)
  probability[,1] <- 1.0
  new.prob <- probability
  
  # get inverse capacity only once
  inv.capacity <- 1.0/capacity

  # allocate "history" matrices
  throughput.history <- c(1:NCustomers)
  response.history <- c(1:NCustomers)
  utilization.history <- matrix(nrow=NCustomers, ncol=NQueuing)
  queue.history <- matrix(nrow=NCustomers, ncol=NDelay+NQueuing)

  # main loop
  for (n in 1:NCustomers) { # loop over customers
    system.throughput <- sum(delay.residence)
    for (i in 1:NQueuing) { # queuing center residence times
      #queuing.residence[i] <- 0.0
      #for (j in 1:n) {
        #queuing.residence[i] <- queuing.residence[i] +
          #as.double(j)*probability[i, j]*inv.capacity[i, j]
      #}
      queuing.residence[i] <- sum(
        c(1:n)*probability[i, 1:n]*inv.capacity[i, 1:n])
      queuing.residence[i] <- queuing.demand[i]*queuing.residence[i]
      system.throughput <- system.throughput + queuing.residence[i]
    }
    system.throughput <- as.double(n)/system.throughput
    throughput.history[n] <- system.throughput
    system.residence <- sum(queuing.residence)
    response.history[n] <- system.residence

    # mean queue lengths
    delay.mean.queue.length <- system.throughput*delay.residence
    for (i in 1:NQueuing) { # queue length probabilities
      queuing.utilization[i] <- 0.0
      queuing.mean.queue.length[i] <- 0.0
      new.prob[i, 1] = 1.0
      for (j in 1:n) {
        new.prob[i, j+1] <- queuing.demand[i]*system.throughput*
          inv.capacity[i, j]*probability[i, j]
        queuing.utilization[i] = queuing.utilization[i] + 
          new.prob[i, j+1]
        queuing.mean.queue.length[i] = queuing.mean.queue.length[i] +
          as.double(j)*new.prob[i, j+1]
        new.prob[i, 1] <- new.prob[i, 1] - new.prob[i, j+1]
      }
    }
    probability <- new.prob
    queue.history[n,] <- c(delay.mean.queue.length, 
      queuing.mean.queue.length)
    utilization.history[n,] <- queuing.utilization
  }
  return(list(
    #delay.residence=delay.residence,
    #queuing.residence=queuing.residence,
    #system.throughput=system.throughput,
    #delay.mean.queue.length=delay.mean.queue.length,
    #queuing.mean.queue.length=queuing.mean.queue.length,
    #probability=probability,
    #queuing.utilization=queuing.utilization,
    throughput.history=throughput.history,
    response.history=response.history,
    utilization.history=utilization.history,
    queue.history=queue.history
  ))
}


# test data
NCustomers = 500
capacity <- matrix(nrow=2, ncol=NCustomers)
capacity[,1] <- 1.0
#capacity[1,2:NCustomers] <- 1.0 # CPU
capacity[1, 1:NCustomers] <- 8.0
capacity[1, 1:7] <- c(1:7)
capacity[2,2:NCustomers] <- 1.0 # I/O
delay.demand <- 15.0 # Think time
queuing.demand <- c(0.8, 0.2) # Balanced ... R should be 1.0
mva.result <- closed.1c.ld(
  delay.demand, # delay demand
  queuing.demand,
  NCustomers,
  capacity # capacity matrix
)
print(mva.result)

aba.result <- aba(
  delay.demand,
  queuing.demand,
  NCustomers
)
print(aba.result)
