#
# Copyright (c) 2010 Josef Hardi. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#

library(plm)

SimulateLambdaEstimation <- function(y.avg.time,
                                     x.experience,
                                     x.group,
                                     developer,
                                     snapshot,
                                     experiment = c("fixed", "ols", "with.group"),
                                     increment = 0.05,
                                     search.iter = 2, 
                                     simul.iter = 100) {
  #
  #
  #
  lambdas = NULL

  while (simul.iter != 0) {
    developer.sample <- sample(developer, size=28)
    
    y.avg.time.tmp = NULL
    x.experience.tmp = NULL
    x.group.tmp = NULL
    for (sample in developer.sample) {
      y.avg.time.tmp <- rbind(y.avg.time.tmp, 
                              y.avg.time[y.avg.time$alias==sample, ])
      x.experience.tmp <- rbind(x.experience.tmp, 
                                x.experience[x.experience$alias==sample, ])
      x.group.tmp <- rbind(x.group.tmp,
                           x.group[x.group$alias==sample, ])
    }
        
    lambda <- EstimateLambda(y.avg.time.tmp, x.experience.tmp, x.group.tmp, 
                   developer.sample, snapshot, experiment, increment,
                   search.iter)
    
    lambdas = c(lambdas, lambda)
    
    simul.iter = simul.iter - 1	
  }
  
  return(lambdas)	
}

EstimateLambda <- function(y.avg.time,
                           x.experience,
                           x.group,
                           developer,
                           snapshot,
                           experiment = c("fixed", "ols", "with.group"),
                           increment = 0.05,
                           iter = 2) {
  # Estimate the learning depreciation index using a simple binary
  # search technique.
  #
  # Args:
  #   y.avg.time:   a data frame for the dependent variable y_it
  #                 specifying the average time to resolve issues.
  #   x.experience: a data frame specifying developer's experience in
  #                 resolving issues. In this case, I measured it by
  #                 counting the number of resolved issues. Later,
  #                 this measurement is used to build the predictor
  #                 K_it where knowledge is defined as the possible
  #                 previous learning depreciation added to the current
  #                 experience measure.
  #   x.group:      a data frame for the predictor R_it specifying the
  #                 certain role estimated using core/periphery model
  #                 [Borgatti and Everett, 2000].
  #   developer:    a vector specifying the subjects of observation.
  #   snapshot:     a vector specifying the series of snapshot ids.
  #   experiment:   type of experiments done in this study.
  #   increment:    define the initial searching increment.
  #   iter:         define the number of searching iteration.
  #
  lambda.extreme = 0 # Initial values
  rss.extreme = Inf
  x1 = 0 # The depreciation index must be between [0, 1]
  x2 = 1
  
  while (iter != 0) {
    sample.lambdas = seq(0, 1, increment)
    
    for (lambda in sample.lambdas) {
      dataset <- ConstructLearningDataSet(
                   y.avg.time, x.experience, x.group,
                   developer, snapshot, lambda)
      
      lcurve <- ComputeLearningCurveModel(dataset, experiment)
      
      rss = sum(residuals(lcurve)^2)
      
      if (rss < rss.extreme) {
         rss.extreme = rss
         lambda.extreme = lambda
      }
    }
    
    # Conditions for the next iteration
    x1 = lambda.extreme - increment
    if (x1 < 0) 
   	  x1 = 0	
   	
    x2 = lambda.extreme + increment
    if (x2 > 1)
      x2 = 1
      
    increment = increment / 5
    iter = iter - 1
  }
  
  return(lambda.extreme)
}

TransformExperienceIntoKnowledge <- function(data, lambda) {
  # Transform the experience data set into a representation of acquired
  # knowledge defined using the formula below:
  #
  #     K_it = lambda x K_it-1 + k_it
  #
  # where K_it is the gained knowledge in the current time t, K_it-1 is
  # the previous gained knowledge, and k_t is the experience (measured by
  # the number of issues resolved by developer i).
  #
  # Args:
  #   data:   a data frame containing the developer's experience. An
  #           example for the measurement is the number of issues resolved.
  #   lambda: the learning depreciation index [Argote, 1990].
  #
  # Returns:
  #   The knowledge data frame.
  #
  knowledge <- data # Duplicate the input data
  
  for (t in 3:length(data)) {
    prev.knowledge <- knowledge[t-1]
    for(experience in data[t]) {
      knowledge[t] <- (prev.knowledge * lambda) + experience
    }
  }
  
  return(knowledge)
}

ConstructLearningDataSet <- function(y.avg.time,
                                     x.experience,
                                     x.group,
                                     developer,
                                     snapshot,
                                     lambda = 1) {
  # Integrate all the observed data into a data frame.
  #
  # Args:
  #   y.avg.time:  a data frame for the dependent variable y_it
  #                specifying the average time to resolve issues.
  #   x.experience:a data frame specifying developer's experience in
  #                resolving issues. In this case, I measured it by
  #                counting the number of resolved issues. Later,
  #                this measurement is used to build the predictor
  #                K_it where knowledge is defined as the possible
  #                previous learning depreciation added to the current
  #                experience measure.
  #   x.group:     a data frame for the predictor R_it specifying the
  #                certain role estimated using core/periphery model
  #                [Borgatti and Everett, 2000].
  #   developer:   a vector specifying the subjects of observation.
  #   snapshot:    a vector specifying the series of snapshot ids.
  #   lambda:      a fraction for learning depreciation index [0, 1].
  #   exclude_cells: an optional vector of cell numbers in which they
  #                  are excluded.
  #
  # Returns:
  #   A panel data frame for the learning curve regression model.
  #
  n.sub = length(developer) # The total number of the subjects.
  n.obs = length(snapshot)  # The length of the observation period.
  
  x.knowledge <- TransformExperienceIntoKnowledge(x.experience, lambda)
  
  dataset <- data.frame(AVRT = array(unlist(y.avg.time[, 2:(n.obs+1)])),
                        KNOW = array(unlist(x.knowledge[, 2:(n.obs+1)])),
                        GROP = array(unlist(x.group[, 2:(n.obs+1)])),
                        DEVA = rep(developer, each=1, times=n.obs),
                        SNAP = rep(snapshot, each=n.sub, times=1))
  
  # Remove invalid row
  dataset <- dataset[dataset$AVRT!=-1.0000, ]
  
  return(dataset)
}

ComputeLearningCurveModel <-
    function(dataset, experiment = c("fixed", "ols", "with.group")) {
  # Execute the regression model depends on different experiments.
  #
  # Args:
  #   dataset:    the learning panel data.
  #   experiment: type of experiments done in this study.
  #
  # Returns:
  #   The linear regression model using panel data.
  #
  lcurve <- switch(experiment,
                   fixed      = plm(log(AVRT)~KNOW,
                                   data  = dataset,
                                   index = c("DEVA", "SNAP"),
                                   model = "within"),
                   ols        = plm(log(AVRT)~KNOW,
                                    data  = dataset,
                                    index = c("DEVA", "SNAP"),
                                    model = "pooling"),
                   with.group = plm(log(AVRT)~KNOW+GROP,
                                    data  = dataset,
                                    index = c("DEVA", "SNAP"),
                                    model = "within"))
  
  return(lcurve)
}