library('yaml')
norm <- function(x) sqrt(sum(x^2))

trust <- function(objfun, parinit, rinit, rmax, parscale,
    iterlim = 100, fterm = sqrt(.Machine$double.eps),
    mterm = sqrt(.Machine$double.eps),
    minimize = TRUE, blather = FALSE, ...)
{
    if (! is.numeric(parinit))
       stop("parinit not numeric")
    if (! all(is.finite(parinit)))
       stop("parinit not all finite")
    d <- length(parinit)
    if (missing(parscale)) {
        rescale <- FALSE
    } else {
        rescale <- TRUE
        if (length(parscale) != d)
           stop("parscale and parinit not same length")
        if (! all(parscale > 0))
           stop("parscale not all positive")
        if (! all(is.finite(parscale) & is.finite(1 / parscale)))
           stop("parscale or 1 / parscale not all finite")
    }
    if (! is.logical(minimize))
       stop("minimize not logical")

    r <- rinit
    theta <- parinit
    out <- try(objfun(theta, ...))
    if (inherits(out, "try-error")) {
        warning("error in first call to objfun")
        return(list(error = out, argument = theta, converged = FALSE,
            iterations = 0))
    }
    check.objfun.output(out, minimize, d)
    if (! is.finite(out$value))
        stop("parinit not feasible")
    accept <- TRUE

    if (blather) {
        theta.blather <- NULL
        theta.try.blather <- NULL
        type.blather <- NULL
        accept.blather <- NULL
        r.blather <- NULL
        stepnorm.blather <- NULL
        rho.blather <- NULL
        val.blather <- NULL
        val.try.blather <- NULL
        preddiff.blather <- NULL
    }

    for (iiter in 1:iterlim) {

        if (blather) {
            theta.blather <- rbind(theta.blather, theta)
            r.blather <- c(r.blather, r)
            if (accept)
                val.blather <- c(val.blather, out$value)
            else
                val.blather <- c(val.blather, out.value.save)
        }

        if (accept) {
            B <- out$hessian
            g <- out$gradient
            f <- out$value
            out.value.save <- f
            if (rescale) { 
                B <- B / outer(parscale, parscale)
                g <- g / parscale
            }
            if (! minimize) {
                B <- (- B)
                g <- (- g)
                f <- (- f)
            }
            eout <- eigen(B, symmetric = TRUE)
            gq <- as.numeric(t(eout$vectors) %*% g)
        }

        ########## solve trust region subproblem ##########

        ##### try for Newton #####
        is.newton <- FALSE
        if (all(eout$values > 0)) {
            ptry <- as.numeric(- eout$vectors %*% (gq / eout$values))
            if (norm(ptry) <= r)
                is.newton <- TRUE
        }

        ##### non-Newton #####
        if (! is.newton) {
            lambda.min <- min(eout$values)
            beta <- eout$values - lambda.min
            imin <- beta == 0
            C1 <- sum((gq / beta)[! imin]^2)
            C2 <- sum(gq[imin]^2)
            C3 <- sum(gq^2)
            if (C2 > 0 || C1 > r^2) {
                is.easy <- TRUE
                is.hard <- (C2 == 0)
                ##### easy cases #####
                beta.dn <- sqrt(C2) / r
                beta.up <- sqrt(C3) / r
                fred <- function(beep) {
                    if (beep == 0) {
                        if (C2 > 0)
                            return(- 1 / r)
                        else
                            return(sqrt(1 / C1) - 1 / r)
                    }
                    return(sqrt(1 / sum((gq / (beta + beep))^2)) - 1 / r)
                }
                if (fred(beta.up) <= 0) {
                    uout <- list(root = beta.up)
                } else if (fred(beta.dn) >= 0) {
                    uout <- list(root = beta.dn)
                } else {
                    uout <- uniroot(fred, c(beta.dn, beta.up))
                }
                wtry <- gq / (beta + uout$root)
                ptry <- as.numeric(- eout$vectors %*% wtry)
            } else {
                is.hard <- TRUE
                is.easy <- FALSE
                ##### hard-hard case #####
                wtry <- gq / beta
                wtry[imin] <- 0
                ptry <- as.numeric(- eout$vectors %*% wtry)
                utry <- sqrt(r^2 - sum(ptry^2))
                if (utry > 0) {
                    vtry <- eout$vectors[ , imin, drop = FALSE]
                    vtry <- vtry[ , 1]
                    ptry <- ptry + utry * vtry
                }
            }
        }

        ########## predicted versus actual change ##########
        preddiff <- sum(ptry * (g + as.numeric(B %*% ptry) / 2))
        if (rescale) {
            theta.try <- theta + ptry / parscale
        } else {
            theta.try <- theta + ptry
        }
        out <- try(objfun(theta.try, ...))
        if (inherits(out, "try-error"))
            break
        check.objfun.output(out, minimize, d)
        ftry <- out$value
        if (! minimize)
            ftry <- (- ftry)
        rho <- (ftry - f) / preddiff

        ########## termination test ##########
        if (ftry < Inf) {
            is.terminate <- abs(ftry - f) < fterm || abs(preddiff) < mterm
        } else {
            is.terminate <- FALSE
            rho <- (- Inf)
        }

        ##### adjustments #####
        if (is.terminate) {
            if (ftry < f) {
                accept <- TRUE
                theta <- theta.try
            }
        } else {
            if (rho < 1 / 4) {
                accept <- FALSE
                r <- r / 4
            } else {
                accept <- TRUE
                theta <- theta.try
                if (rho > 3 / 4 && (! is.newton))
                    r <- min(2 * r, rmax)
            }
        }

        if (blather) {
            theta.try.blather <- rbind(theta.try.blather, theta.try)
            val.try.blather <- c(val.try.blather, out$value)
            accept.blather <- c(accept.blather, accept)
            preddiff.blather <- c(preddiff.blather, preddiff)
            stepnorm.blather <- c(stepnorm.blather, norm(ptry))
            if (is.newton) {
                mytype <- "Newton"
            } else {
                if (is.hard) {
                    if (is.easy) {
                        mytype <- "hard-easy"
                    } else {
                        mytype <- "hard-hard"
                    }
                } else {
                    mytype <- "easy-easy"
                }
            }
            type.blather <- c(type.blather, mytype)
            rho.blather <- c(rho.blather, rho)
        }

        if (is.terminate)
            break
    }

    if (inherits(out, "try-error")) {
        out <- list(error = out, argument = theta.try, converged = FALSE)
    } else {
        out <- try(objfun(theta, ...))
        if (inherits(out, "try-error")) {
            out <- list(error = out)
            warning("error in last call to objfun")
        } else {
            check.objfun.output(out, minimize, d)
        }
        out$argument <- theta
        out$converged <- is.terminate
    }
    out$iterations <- iiter
    if (blather) {
        dimnames(theta.blather) <- NULL
        out$argpath <- theta.blather
        dimnames(theta.try.blather) <- NULL
        out$argtry <- theta.try.blather
        out$steptype <- type.blather
        out$accept <- accept.blather
        out$r <- r.blather
        out$rho <- rho.blather
        out$valpath <- val.blather
        out$valtry <- val.try.blather
        if (! minimize)
            preddiff.blather <- (- preddiff.blather)
        out$preddiff <- preddiff.blather
        out$stepnorm <- stepnorm.blather
    }
    return(out)
}

check.objfun.output <- function(obj, minimize, dimen)
{
    if (! is.list(obj))
        stop("objfun returned object that is not a list")
    foo <- obj$value
    if (is.null(foo))
        stop("objfun returned list that does not have a component 'value'")
    if (! is.numeric(foo))
        stop("objfun returned value that is not numeric")
    if (length(foo) != 1)
        stop("objfun returned value that is not scalar")
    if (is.na(foo) || is.nan(foo))
        stop("objfun returned value that is NA or NaN")
    if (minimize && foo == (-Inf))
        stop("objfun returned -Inf value in minimization")
    if ((! minimize) && foo == Inf)
        stop("objfun returned +Inf value in maximization")
    if (is.finite(foo)) {
        bar <- obj$gradient
        if (is.null(bar))
            stop("objfun returned list without component 'gradient' when value is finite")
        if (! is.numeric(bar))
            stop("objfun returned gradient that is not numeric")
        if (length(bar) != dimen)
            stop(paste("objfun returned gradient that is not vector of length", dimen))
        if (! all(is.finite(bar)))
            stop("objfun returned gradient not having all elements finite")
        baz <- obj$hessian
        if (is.null(baz))
            stop("objfun returned list without component 'hessian' when value is finite")
        if (! is.numeric(baz))
            stop("objfun returned hessian that is not numeric")
        if (! is.matrix(baz))
            stop("objfun returned hessian that is not matrix")
        if (! all(dim(baz) == dimen))
            stop(paste("objfun returned hessian that is not", dimen, "by", dimen, "matrix"))
        if (! all(is.finite(baz)))
            stop("objfun returned hessian not having all elements finite")
    }
    return(TRUE)
}


histw <- function(x, w, xaxis, xmin, xmax, ymax, bar=TRUE, add=FALSE, col="black", dens=TRUE) {
     #x: data vector
     #w: inverse weight
     #xaxis: vector of cut points
     #xmin,xmax: the range of x coordinate
     #ymax: the maximum of y coordinate

     #bar: bar plot (if TRUE) or line plot
     #add: if TRUE, the plot is added to an existing plot
     #col: color of lines
     #dens: if TRUE, the histogram has a total area of one

     nbin <- length(xaxis)
     xbin <- cut(x, breaks=xaxis, include.lowest=T, labels=1:(nbin-1))

     y <- tapply(w, xbin, sum)
     y[is.na(y)] <- 0
     y <- y/sum(w)
     if (dens) y <- y/ (xaxis[-1]-xaxis[-nbin])

    if (!add) {
     plot.new()
     plot.window(xlim=c(xmin,xmax), ylim=c(0,ymax))
     axis(1, pos=0)
     axis(2, pos=xmin)
    }

     if (bar==1) {
        rect(xaxis[-nbin], 0, xaxis[-1], y)
     }
     else {
        xval <- as.vector(rbind(xaxis[-nbin],xaxis[-1])) 
        yval <- as.vector(rbind(y,y))
        lines(c(min(xmin,xaxis[1]), xval, max(xmax,xaxis[length(xaxis)])), 
              c(0,yval,0), lty="11", lwd=2, col=col)
     }
     invisible()
}


insert <- function(x, d, x0=0) {
  #This inserts a value x0 at d-th position of x
  
  if (d==1)
   c(x0, x)
  else
   c(x[1:(d-1)], x0, x[-(1:(d-1))])
}

obj.fcn <- function(ze, logQ, size, base) {

   #ze:   a vector of log-normalizing constants (or free energies)
   #logQ: log of the unnormalized density ratios (over the baseline)
   #size: the individual sample sizes for the distributions
   #base: the baseline index

   N <- dim(logQ)[2]
   rho <- size/N

   ze <- insert(ze, base)
 
   #
   Qnorm <- exp(logQ-ze) *rho
   Qsum <- apply(Qnorm, 2, sum)

   val <- sum(log(Qsum)) /N +sum(ze*rho)
   
   W <- t(Qnorm[-base, ,drop=FALSE])/Qsum   #remove one column
   grad <- -apply(W, 2, sum) /N +rho[-base]

   O <- t(W)%*%W /N
   hess <- -O + diag( apply(W, 2, sum), nrow=length(grad) ) /N

   list(value=val, gradient=grad, hessian=hess)
}

uwham <- function(label=NULL, logQ, size=NULL, base=NULL, init=NULL, fisher=TRUE) {

   #label: a vector of labels, indicating which observation is obtained from which distribution
   #logQ: N x M matrix of log unnormalized densities (or negative potential energies), where 
   #      N is the total sample size, i.e., sum(size),
   #      M is the number of distributions for which free energies are to be computed
   #size: a vector of length M, giving the individual sample sizes for the distributions
   #base: the baseline index, between 1 to M, for the distribution whose free energy is set to 0
   #init: a vector of length M, giving the initial values of free energies
   #fisher: logical; if NULL, no variance estimation; 
   #        if TRUE, variance estimation is based on Fisher information

   N <- dim(logQ)[1]
   M <- dim(logQ)[2]

   # compute size if needed
   if (is.null(size)) {
      if (is.null(label)) {
         stop("either label or size must be provided")

      } else {
         size <- c( tapply(1:N, factor(label, levels=1:M), length) )
         size[is.na(size)] <- 0
      }
   }

   #logical, indicating the distributions with observations
   sampled <- size>0

   # check size and logQ
   if (N!=sum(size))
      stop("inconsistent sum(size) and dim(logQ)[1]")

   if (M!=length(size))
      stop("inconsistent length(size) and dim(logQ)[2]")

   #check size and label
   if (!is.null(label)) {
      if ( any(tapply(1:N, label, length)!=size[sampled]) )
         stop("inconsistent label and size[sampled]")
   } else {
      if (!is.null(fisher))
       if (fisher==FALSE) {
          label <- rep((1:M)[sampled], times=size[sampled])
          print("assume that observations are ordered by thermodynamic state if fisher=FALSE and label=NULL")
       }
   }

   #m is the number of distributions from which observations are simulated
   m <- sum(sampled)   #m<=M
   rho <- size[sampled]/N

   #set base or init if not provided
   if (is.null(base)) 
      base <- (1:M)[sampled][1]
   else if (!sampled[base])
      stop("observations from the baseline are required")

   if (is.null(init))
      init <- rep(0,M)

   #the baseline index, between 1 to m, corresponding to the sampled distributions
   base0 <- (1:m)[ as.logical(insert(rep(0, M-1), base, 1)[sampled]) ]
   print(base0)
   #log of unnormalized density ratios over the baseline
   logQ <- t(logQ - logQ[,base])

   #use trust
   out <- trust(obj.fcn, init[sampled][-base0], rinit=1, rmax=100, iterlim=1000,
                         logQ=logQ[sampled,], size=size[sampled], base=base0)

   ze0 <- insert(out$argument, base0)

   ze <- rep(0,M)
   ze[sampled] <- ze0

   Qnorm <- exp(logQ-ze)
   Qsum <- apply(Qnorm[sampled,] *rho, 2, sum)

   W <- t(Qnorm) /Qsum
   z <- apply(W, 2, mean)
  
   #all elements should be equal to 1
   check <- z[sampled]

   if (m<M) {
      ze[!sampled] <- log(z[!sampled])
      W[,!sampled] <- t(t(W[,!sampled])/z[!sampled])
   }

   #variance estimation 
  if (is.null(fisher)){

   list(ze=ze,
        W=W, check=check,
        out=out, 
        size=size, base=base)

  } else {
   O <- t(W)%*%W /N

   D <- matrix(0, M,M)
   D[,sampled] <- t( t(O[,sampled])*rho )

   H <- D - diag(1, nrow=M)
   H <- H[-base,-base]

   if (fisher) {
    G <- O - D[,sampled]%*%O[sampled,]
    G <- G[-base, -base]

    iHG <- -O + rep(1,M)%*%O[base, ,drop=F]
    iHG <- iHG[-base,-base]

    Ve <- iHG%*%solve(t(H)) /N

   } else {
    C <- matrix(0, m,M)
    for (j in 1:M)
       C[,j] <- tapply(W[,j], as.factor(label), mean) 
    
    G <- O - t(C)%*%diag(rho)%*%C
    G <- G[-base, -base]
    Ve <- solve(H, G)%*%solve(t(H)) /N
 
    #Equivalent
    #R <- matrix(0, N,M)
    #for (j in 1:M)
    #   R[,j] <- tapply(W[,j], as.factor(label), mean)[label] 
    # 
    #in.fcn <- solve(H, t(W[,-base] - R[,-base]))
    #Ve <- in.fcn%*%t(in.fcn) /N^2
   }

   #variance vector 
   ve <- insert(diag(Ve), base)

   #variance-covariance matrix
   Ve <- apply( apply(Ve, 2, insert, base), 1, insert, base )

   list(ze=ze, ve=ve, Ve=Ve,
        W=W, check=check,
        out=out, 
        label=label, size=size, base=base)
  }
}

uwham.phi <- function(phi, state, out.uwham, fisher=TRUE) {

   N <- dim(out.uwham$W)[1]
   M <- dim(out.uwham$W)[2]

   label <- out.uwham$label
   size <- out.uwham$size
   base <- out.uwham$base

   sampled <- size>0

   m <- sum(sampled)
   rho <- size[sampled]/N

   #
   L <- length(state)
   sampled2 <- c(sampled, rep(FALSE, L))

   W.phi <- out.uwham$W[,state, drop=FALSE] *phi
   phi.bar <- apply(W.phi, 2, mean)

   W <- cbind(out.uwham$W, W.phi)
   rm(W.phi)

   #variance estimation
  if (is.null(fisher)){

   list(phi=phi.bar)

  } else {
   O <- t(W)%*%W /N

   D <- matrix(0, M+L,M+L)
   D[,sampled2] <- t( t(O[,sampled2])*rho )

   H <- D - diag(1, nrow=M+L)
   H <- H[-base,-base]

   if (fisher) {
    G <- O - D[,sampled2]%*%O[sampled2,]
    G <- G[-base, -base]

    iHG <- -O + rep(1,M+L)%*%O[base, ,drop=F]
    iHG <- iHG[-base,-base]

    Ve <- iHG%*%solve(t(H)) /N

   } else {
    C <- matrix(0, m,M+L)
    for (j in 1:(M+L))
       C[,j] <- tapply(W[,j], as.factor(label), mean) 
    
    G <- O - t(C)%*%diag(rho)%*%C
    G <- G[-base, -base]
    Ve <- solve(H, G)%*%solve(t(H)) /N
 
    #Equivalently
    #R <- matrix(0, N,M+L)
    #for (j in 1:(M+L))
    #   R[,j] <- tapply(W[,j], as.factor(label), mean)[label] 
    # 
    #in.fcn <- solve(H, t(W[,-base] - R[,-base]))
    #Ve <- in.fcn%*%t(in.fcn) /N^2
   }

   Ve <- apply( apply(Ve, 2, insert, base), 1, insert, base )
   Ve <- Ve[c(state,M+1:L), c(state,M+1:L)] 

   mat <- cbind(-diag(phi.bar, nrow=L), diag(1, nrow=L))
   phi.V <- mat%*%Ve%*%t(mat)
   phi.v <- diag(phi.V)

   list(phi=phi.bar, phi.V=phi.V, phi.v=phi.v)
  }
}

uwham.boot <- function(proc.type, block.size, boot.size, seed=0, label=NULL, logQ, size=NULL, base=NULL, init=NULL, phi=NULL, state=NULL) {

   #proc.type: type of simulation, 
   #           "indep" for simulation of independent chains, 
   #           "parallel" for parallel tempering, or 
   #           "serial" for serial tempering 
   #block.size: recycled to be a vector of block sizes if proc.type="indep" or 
   #            the first element is treated as a single block size if proc.type="parallel" or "serial"
   #boot.size: the number of bootstrap replications
   #seed: seed for random number generation
   #label: a vector of labels, indicating which observation is obtained from which distribution

   N <- dim(logQ)[1]
   M <- dim(logQ)[2]

   # compute size if needed
   if (is.null(size)) {
      if (is.null(label)) {
         stop("either label or size must be provided")

      } else {
         size <- c( tapply(1:N, factor(label, levels=1:M), length) )
         size[is.na(size)] <- 0
      }
   }

   #logical, indicating the distributions with observations
   sampled <- size>0

   # check size and logQ
   if (N!=sum(size))
      stop("inconsistent sum(size) and dim(logQ)[1]")

   if (M!=length(size))
      stop("inconsistent length(size) and dim(logQ)[2]")

   #check size and label
   if (!is.null(label)) {
      if ( any(tapply(1:N, label, length)!=size[sampled]) )
         stop("inconsistent label and size[sampled]")
   } else {
      if (proc.type=="serial")
         stop("label is required if proc.type='serial'")
   }

   #m is the number of distributions from which observations are simulated
   m <- sum(sampled)

   #set base or init if not provided
   if (is.null(base)) {
      base <- (1:M)[sampled][1]
   } else if (!sampled[base]) {
      stop("observations from the baseline are required")
   }

   if (is.null(init))
      init <- rep(0,M)

   # set block.size
   if (proc.type=="indep") {
      block.size <- rep(block.size, length=m)

   } else if (proc.type=="parallel") {
      n <- N/m

      if (any(size[sampled]!=n))
         stop("equal sample sizes are required if proc.type='parallel'")

      block.size <- block.size[1]
      ind <- matrix(1:n, nrow=block.size)
      off <- rep((1:m)*n-n, each=n)

   } else if (proc.type=="serial") {
      block.size <- block.size[1]
      ind <- matrix(1:N, nrow=block.size)

   } else {
      stop("proc.type must be 'indep', 'parallel', or 'serial'")
   }

   #
   set.seed(seed)

   ans <- matrix(0, boot.size, M)

   if (!is.null(phi))
      ans2 <- matrix(0, boot.size, length(state))  

   for (i in 1:boot.size) {

     if (proc.type=="indep") {
       start <- 0
       sam <- NULL

       for (j in 1:m) {
          n <- size[sampled][j]
          ind <- matrix(1:n, nrow=block.size[j])

          sam2 <- sample(1:(n/block.size[j]), n/block.size[j], replace=T)
          sam2 <- c( ind[, sam2] ) 

          sam <- c(sam, start+sam2)
          start <- n+start
       } 
     } else if (proc.type=="parallel") {
       sam <- sample(1:(n/block.size), n/block.size, replace=T)
       sam <- c( ind[, sam] ) 
       sam <- off + rep(sam, times=m) 

     } else if (proc.type=="serial") {
       sam <- sample(1:(N/block.size), N/block.size, replace=T)
       sam <- c( ind[, sam] ) 

       size2 <- tapply(1:N, label[sam], length)
       if (length(size2)<m)
          stop("No observation is resampled from one or more thermodynamic states")
       else
          size[sampled] <- size2
     }

     out <- uwham(logQ=logQ[sam,], size=size, base=base, init=init, fisher=NULL)
     ans[i,] <- out$ze

     if (!is.null(phi))
        ans2[i,] <- uwham.phi(phi=phi[sam], state=state, out.uwham=out, fisher=NULL)$phi
   }

   boot.ze <- apply(ans, 2, mean)
   boot.ve <- apply(ans, 2, var)

   if (!is.null(phi)) {
      boot.phi <- apply(ans2, 2, mean)
      boot.phi.v <- apply(ans2, 2, var)

      list(ze=boot.ze, ve=boot.ve, 
           phi=boot.phi, phi.v=boot.phi.v)
   } else
      list(ze=boot.ze, ve=boot.ve)
}

#save(list=ls(), file="UWHAM.R")


bias.fcn <- function(epert, lam1, lam2, alpha, u0, w0){
# This is for the bias ilogistic potential
# (lambda2-lambda1) ln[1+exp(-alpha (u-u0))]/alpha + lambda2 u + w0
    ebias1 <- 0*epert
    if (alpha > 0) {
        ee <- 1 + exp(-alpha*(epert-u0))
        ebias1 <- (lam2 - lam1)*log(ee)/alpha
    }
    ebias1 + lam2*epert + w0
}

npot.fcn <- function(e0,epert, bet, lam1, lam2, alpha, u0, w0){ 
# This is the negative reduced energy 
# -beta*(U0+bias)
    -bet*(e0 + bias.fcn(epert, lam1, lam2, alpha, u0, w0))
}

uwham.r <- function(label,logQ,ufactormax,ufactormin=1){
  n <- dim(logQ)[1]
  m <- dim(logQ)[2]
  iniz <- array(0,dim=m) 
  uf <- ufactormax
  while(uf >= ufactormin & uf >= 1){
    mask <- seq(1,n,trunc(uf))
    out <- uwham(label=label[mask], logQ=neg.pot[mask,],init=iniz)
    show(uf)
    iniz <- out$ze
    uf <- uf/2
  }
  out$mask <- mask
  out
}

histw <-
function (x, w, xaxis, xmin, xmax, ymax, bar = TRUE, add = FALSE, 
            col = "black", dens = TRUE) 
{
  nbin <- length(xaxis)
  xbin <- cut(x, breaks = xaxis, include.lowest = T, labels = 1:(nbin -  1))
  y <- tapply(w, xbin, sum)
  y[is.na(y)] <- 0
  y <- y/sum(w)
  if (dens) 
    y <- y/(xaxis[-1] - xaxis[-nbin])
  if (!add) {
    plot.new()
    plot.window(xlim = c(xmin, xmax), ylim = c(0, ymax))
    axis(1, pos = 0)
    axis(2, pos = xmin)
  }
  if (bar == 1) {
    rect(xaxis[-nbin], 0, xaxis[-1], y)
  }
  else {
    xval <- as.vector(rbind(xaxis[-nbin], xaxis[-1]))
    yval <- as.vector(rbind(y, y))
    lines(c(min(xmin, xaxis[1]), xval, max(xmax, xaxis[length(xaxis)])), 
          c(0, yval, 0), lty = "11", lwd = 2, col = col)
  }
  invisible()
  list(y = y, breaks = xaxis)
}

args <- commandArgs(TRUE)
config <- yaml.load_file(args[1])

jobname <- args[2]
mintimeid <- as.integer(args[3])
maxtimeid <- as.integer(args[4])
mintimeid
maxtimeid

#define states
tempt   <- as.numeric(config$TEMPERATURES)
bet     <- 1.0/(0.001986209*tempt)
directn <-config$DIRECTION
intermd <-config$INTERMEDIATE
lambda1 <-config$LAMBDA1
lambda2 <-config$LAMBDA2
alpha   <-config$ALPHA
u0      <-config$U0
w0      <-config$W0

nstates <- length(lambda1)
leg1istate <- which(intermd==1)[1]
leg2istate <- which(intermd==1)[2]

colnames <- c("stateid", "temperature", "direction", "lambda1", "lambda2", "alpha", "u0", "w0", "potE", "pertE") 
datafiles <- sprintf("r%d/%s.out",seq(0,length(lambda1)-1),jobname)
nfiles <- length(datafiles)
data <- read.table(datafiles[1])
data
colnames(data) <- colnames
data$timeid <- 1:length(data$stateid)
for ( i in 2:nfiles) {
    t <- read.table(datafiles[i])
    colnames(t) <- colnames
    t$timeid <- 1:length(t$stateid)
    data <- rbind(data,t)
}
data$bet <- 1.0/(0.001986209*data$temperature)
nsamples <- length(data$stateid)
samplesperreplica <- as.integer(nsamples/nstates)


#LEG1
if (maxtimeid == -1){
    data1 <- subset(data, stateid <= leg1istate - 1 & timeid >= mintimeid)
}else{
data1 <- subset(data, stateid <= leg1istate - 1 & timeid >= mintimeid & timeid <= maxtimeid )
}
mtempt <- length(bet)
leg1stateids <- 1:leg1istate
leg1stateids
mlam <- length(leg1stateids)
mlam
m <- mlam*mtempt
N <- length(data1$stateid)

#extract U0 values as U-bias
#this is relevant only if the states are at different temperatures
e0 <- data1$potE
for (i in 1:N) {
    e0[i] <- e0[i] - bias.fcn(data1$pertE[i],data1$lambda1[i],data1$lambda2[i],data1$alpha[i],data1$u0[i],data1$w0[i])
}

neg.pot <- matrix(0, N,m)
sid <- 1
# note the order of (be,te)
for (be in leg1stateids  ) {
     for (te in 1:mtempt) {
             neg.pot[,sid] <- npot.fcn(e0=e0,data1$pertE,bet[te],lambda1[be],lambda2[be],alpha[be],u0[be],w0[be])
             sid <- sid + 1
    }
}

#the alchemical state indexes start with 0, UWHAM's state labels start with 1
statelabels <- data1$stateid + 1

#runs UWHAM
out <- uwham.r(label=statelabels, logQ=neg.pot,ufactormax=1,ufactormin=1)
ze <- matrix(out$ze, nrow=mtempt, ncol=mlam)
-ze/bet
ve <- matrix(out$ve, nrow=mtempt, ncol=mlam)
sqrt(ve)/bet

dgbind1 <- (-ze[,mlam]/bet[]) - (-ze[,1]/bet[])
ddgbind1 <- sqrt(ve[,mlam]+ve[,1])/bet

dgbind1
ddgbind1

#get plain be histograms at first temperature
umin <- min(data1$pertE)
umax <- max(data1$pertE)
hs <- hist(data1$pertE[ data1$stateid == mlam-1 ],plot=FALSE,breaks=10);
pmax = 1.2*max(hs$density)
# plot(hs$mids,hs$density,type="l",xlim=c(umin,umax),ylim=c(0,pmax));
for ( i in 1:mlam ){ 
    hs <- hist(data1$pertE[ data1$stateid == i-1 ],plot=FALSE,breaks=10);
    # lines(hs$mids,hs$density);
    outp <- cbind(hs$mids,hs$density);
    write(t(outp),file=sprintf("lambda1-%d.dat",i-1),ncol=2)
}




#LEG2
if (maxtimeid == -1){
    data1 <- subset(data, stateid >= leg2istate - 1 & timeid >= mintimeid )
}else {
    data1 <- subset(data, stateid >= leg2istate - 1 & timeid >= mintimeid & timeid <= maxtimeid )
}
mtempt <- length(bet)
leg2stateids <- seq(from=nstates, to=leg2istate, by=-1)
leg2stateids
mlam <- length(leg2stateids )
mlam
m <- mlam*mtempt
N <- length(data1$stateid)

#extract U0 values as U-bias
#this is relevant only if the states are at different temperatures
e0 <- data1$potE
for (i in 1:N) {
    e0[i] <- e0[i] - bias.fcn(data1$pertE[i],data1$lambda1[i],data1$lambda2[i],data1$alpha[i],data1$u0[i],data1$w0[i])
}

neg.pot <- matrix(0, N,m)
sid <- 1
# note the order of (be,te)
for (be in leg2stateids ) {
     for (te in 1:mtempt) {
             neg.pot[,sid] <- npot.fcn(e0=e0,data1$pertE,bet[te],lambda1[be],lambda2[be],alpha[be],u0[be],w0[be])
             sid <- sid + 1
    }
}

#the alchemical state indexes in leg2 run backward
statelabels <- nstates - data1$stateid

#runs UWHAM
out <- uwham.r(label=statelabels, logQ=neg.pot,ufactormax=1,ufactormin=1)
ze <- matrix(out$ze, nrow=mtempt, ncol=mlam)
-ze/bet
ve <- matrix(out$ve, nrow=mtempt, ncol=mlam)
sqrt(ve)/bet


dgbind2 <- (-ze[,mlam]/bet[]) - (-ze[,1]/bet[])
ddgbind2 <- sqrt(ve[,mlam]+ve[,1])/bet

dgbind2
ddgbind2

dgb <- dgbind1 - dgbind2
ddgb <- sqrt(ddgbind2*ddgbind2 + ddgbind1*ddgbind1)
if (maxtimeid == -1){
    maxsamples <- samplesperreplica
}else{
    maxsamples <- min(maxtimeid, samplesperreplica)
}
result <- sprintf("DDGb = %f +- %f range %d %d", dgb, ddgb, mintimeid, maxsamples)
write(sprintf("%f +- %f",dgb, ddgb), sprintf("%s.result", jobname))
#noquote(result)

#get plain be histograms at first temperature
umin <- min(data1$pertE)
umax <- max(data1$pertE)
hs <- hist(data1$pertE[ data1$stateid == leg2istate - 1  ],plot=FALSE,breaks=10);
pmax = 1.2*max(hs$density)
# plot(hs$mids,hs$density,type="l",xlim=c(umin,umax),ylim=c(0,pmax));
for ( i in nstates:leg2istate ){ 
    hs <- hist(data1$pertE[ data1$stateid == i-1 ],plot=FALSE,breaks=10);
    # lines(hs$mids,hs$density);
    outp <- cbind(hs$mids,hs$density);
    write(t(outp),file=sprintf("lambda2-%d.dat",i-1),ncol=2)
}
