content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' R6 class to create and represent an event
#'
#' @examples
#' # This handler prints increases a counter in the state of the
#' # Simulation object, and schedule another event every 0.1 time unit.
#' handler = function(time, sim, agent) {
#' x = getState(sim)
#' x$counter = x$counter + 1
#' setState(sim, x)
#' schedule(agent, newEvent(time + 0.1, handler))
#' }
#' # create a new simulation with no agents. but the simulation itself is
#' # an agent. So we can use all the methods of agent
#' sim = Simulation$new()
#' # set the state of the simulation, initialize the counter
#' sim$state = list(counter = 0)
#' # schedule a new event at time 0
#' sim$schedule(Event$new(0, handler))
#' # add a logger for the counter. Note that, because sim is an R6 class
#' # to use it in the newStateLogger function, we need to access the
#' # external pointer using its $get method
#' sim$addLogger(newStateLogger("counter", sim$get, "counter"))
#' # run the simulation for 10 time units.
#' print(sim$run(0:10))
#' # interestingly, the counts are not exactly in 10 event time unit.
#' # Firstly, report always happen before event, so event at time 0 is
#' # not counted in the time interval 0 to 1. Secondly, the event time
#' # is stored as a numeric value with increments of 0.1, which is
#' # subject to rounding errors. So some the the integer tiome events
#' # may be before the reporting and some may be after.
#'
#' @export
Event <- R6::R6Class(
"R6Event",
public = list(
#' Event
#'
#' @param time the time that this event will occur. A length-1
#' numeric vector.
#'
#' @param handler an R function that handles the event when it occurs.
#'
#' @details The R handler function should take exactly 3 arguments
#' 1. time: the current time in the simulation
#' 2. sim: the simulation object, an external pointer
#' 3. agent: the agent to whom this event is attached to.
#'
#' The return value of the handler function is ignored.
initialize = function(time, handler) {
private$event = newEvent(time, handler)
}
),
private = list(
event = NULL
),
active = list(
#' @field time
#'
#' returns the event time
#'
time = function() {
getTime(private$event)
},
#' @field get
#'
#' returns the external pointer, which can then be passed to
#' functions such as schedule and unschedule.
get = function() {
private$event
}
)
)
#' Creates a new event in R
#'
#' @name newEvent
#'
#' @param time the time that this event will occur. A length-1
#' numeric vector.
#'
#' @param handler an R function that handles the event when it occurs.
#'
#' @return an external pointer, which can then be passed to
#' functions such as schedule and unschedule.
#'
#' @details The R handler function should take exactly 3 arguments
#' 1. time: the current time in the simulation
#' 2. sim: the simulation object, an external pointer
#' 3. agent: the agent to whom this event is attached to.
#'
#' The return value of the handler function is ignored.
#'
#' This function avoids the overhead of an R6 class, and is thus faster.
#' This is the recommended method to create an event in an event handler.
#'
#' @export
NULL
#' returns the event time
#'
#' @name getTime
#'
#' @param event an external pointer returned by the newEvent function.
#'
#' @return a numeric value
#'
#' This function avoids the overhead of an R6 class, and is thus faster.
#' This is the recommended method to get event time in an event handler.
#'
#' @export
NULL
| /scratch/gouwar.j/cran-all/cranData/ABM/R/Event.R |
#' R6 class that represents a population
#'
#' A population is a collection of agents. There are two important tasks
#' for a population:
#' 1. to manage the agents in it
#' 2. to define the contact patterns of the agents
#'
#' The contact patterns are defined by objects of the Contact class that
#' are associated with the population. A population may have multiple
#' Contact objects, for example, one for random mixing, one for close
#' contacts represented by a contact network, and another for social
#' network.
#'
#' @export
Population <- R6::R6Class(
"R6Population",
inherit = Agent,
public = list(
#' Population
#'
#' @param population can be either an external pointer pointing to
#' a population object returned from newPopulation, or an integer
#' specifying the population size, or a list.
#'
#' @param initializer a function or NULL
#'
#' @details If population is a number (the population size), then initializer
#' can be a function that take the index of an agent and return its initial
#' state. If it is a list, the length is the population size, and each element
#' corresponds to the initial state of an agent (with the same index).
initialize = function(population=0, initializer=NULL) {
if (typeof(population) == "externalptr") {
super$initialize(population)
return()
}
if (is.list(population)) {
private$agent = newPopulation(population)
} else if (is.numeric(population)) {
private$agent = newPopulation(population, initializer)
} else stop("invalid population argument")
},
#' Add an agent
#'
#' @param agent either an object of the R6 class Agent, or an external
#' pointer returned from newAgent.
#'
#' @return the population object itself (invisible) for chaining actions
#'
#' @details The agent is scheduled in the population. If the population
#' is already added to a simulation, the agent will report its state
#' to the simulation.
addAgent = function(agent) {
if (inherits(agent, "R6Agent"))
agent = agent$get
if (!inherits(agent, "Agent"))
stop("invalid agent argument")
addAgent(private$agent, agent)
invisible(self)
},
#' remove an agent
#'
#' @param agent either an object of the R6 class Agent, or an external
#' pointer returned from newAgent.
#'
#' @return the population object itself (invisible) for chaining actions
#'
#' @details The agent is scheduled in the population. If the population
#' is already added to a simulation, the agent will report its state
#' to the simulation.
removeAgent = function(agent) {
if (inherits(agent, "R6Agent"))
agent = agent$get
if (!inherits(agent, "Agent"))
stop("invalid agent argument")
removeAgent(private$agent, agent)
invisible(self)
},
#' Add a contact pattern
#'
#' @param contact an external pointer pointing to a Contact object,
#' e.g., created from newRandomMixing.
#'
#' @details If the contact has already been added, this call does nothing.
addContact = function(contact) {
if (inherits(contact, "R6Contact"))
contact = contact$get
if (!inherits(contact, "Contact"))
stop("invalid contact argument")
addContact(private$agent, contact)
invisible(self)
},
#' return a specific agent by index
#'
#' @param i the index of the agent (starting from 1)
#'
#' @return an external pointer pointing to the agent
agent = function(i) {
getAgent(private$agent, i)
},
#' set the state of a specific agent by index
#'
#' @param i the index of the agent (starting from 1)
#'
#' @param state a list holding the state to set
#'
#' @return the population object itself (invisible) for chaining actions
setState = function(i, state) {
a = getAgent(private$agent, i)
setState(a, state)
invisible(self)
},
#' Set the states for the agents
#'
#' @param states either a list holding the states (one for each agent), or a
#' function
#'
#' @return the population object itself for chaining actions
#'
#' @details If ```states``` is a function then it takes a single argument
#' ```i```, specifying the index of the agent (starting from 1), and returns
#' a state.
setStates = function(states) {
setStates(self$get, states)
}
),
active = list(
#' @field size
#'
#' The population size, an integer
size = function() { getSize(private$agent) }
)
)
#' Create a new population
#'
#' @name newPopulation
#'
#' @param n an integer specifying the population size.
#'
#' @details The population will be created with "n" individuals in it.
#' These individuals have an empty state upon created. Note that
#' individuals can be added later by the "add" method, the initial
#' population size is for convenience, not required
#'
#' @export
NULL
#' Get the size of a population
#'
#' @name getSize
#'
#' @param population an external pointer to a population, for example,
#' one returned by [newPopulation()]
#'
#' @return the population size, an integer
#'
#' @export
NULL
#' Get the agent at an index in the population
#'
#' @name getAgent
#'
#' @param population an external pointer to a population, for example,
#' one returned by [newPopulation()]
#'
#' @param i the index of the agent, starting from 1.
#'
#' @return the agent at index i in the population.
#'
#' @export
NULL
#' Set the state for each agent in a population
#'
#' @name setStates
#'
#' @param population an external pointer to a population, for example,
#' one returned by [newPopulation()]
#'
#' @param states either a list holding the states (one for each agent), or a
#' function
#'
#' @details If ```states``` is a function then it takes a single argument
#' ```i```, specifying the index of the agent (starting from 1), and returns
#' a state.
#'
#' @export
NULL
#' add an agent to a population
#'
#' @name addAgent
#'
#' @param population an external pointer to a population, for example,
#' one returned by [newPopulation()]
#'
#' @param agent an external pointer to an agent, returned by [newAgent()] or
#' [getAgent()]
#'
#' @details if the agent is an R6 class, we should use ```agent$get``` to get
#' the external pointer. Similarly, if population is an R6 object, then we
#' should either use ```population$addAgent()``` or ```population$get```.
#'
#' @export
NULL
| /scratch/gouwar.j/cran-all/cranData/ABM/R/Population.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
newAgent <- function(state, death_time = NA_real_) {
.Call(`_ABM_newAgent`, state, death_time)
}
getID <- function(agent) {
.Call(`_ABM_getID`, agent)
}
getState <- function(agent) {
.Call(`_ABM_getState`, agent)
}
schedule <- function(agent, event) {
invisible(.Call(`_ABM_schedule`, agent, event))
}
unschedule <- function(agent, event) {
invisible(.Call(`_ABM_unschedule`, agent, event))
}
clearEvents <- function(agent) {
invisible(.Call(`_ABM_clearEvents`, agent))
}
setState <- function(agent, value) {
invisible(.Call(`_ABM_setState`, agent, value))
}
leave <- function(agent) {
.Call(`_ABM_leave`, agent)
}
setDeathTime <- function(agent, time) {
invisible(.Call(`_ABM_setDeathTime`, agent, time))
}
newRandomMixing <- function() {
.Call(`_ABM_newRandomMixing`)
}
newContact <- function(r6) {
.Call(`_ABM_newContact`, r6)
}
newCounter <- function(name, from, to = NULL, initial = 0L) {
.Call(`_ABM_newCounter`, name, from, to, initial)
}
newStateLogger <- function(name, agent, state) {
.Call(`_ABM_newStateLogger`, name, agent, state)
}
newEvent <- function(time, handler) {
.Call(`_ABM_newEvent`, time, handler)
}
getTime <- function(event) {
.Call(`_ABM_getTime`, event)
}
newConfigurationModel <- function(rng) {
.Call(`_ABM_newConfigurationModel`, rng)
}
newPopulation <- function(n, initializer = NULL) {
.Call(`_ABM_newPopulation`, n, initializer)
}
addAgent <- function(population, agent) {
invisible(.Call(`_ABM_addAgent`, population, agent))
}
getSize <- function(population) {
.Call(`_ABM_getSize`, population)
}
getAgent <- function(population, i) {
.Call(`_ABM_getAgent`, population, i)
}
addContact <- function(population, contact) {
invisible(.Call(`_ABM_addContact`, population, contact))
}
setStates <- function(population, states) {
invisible(.Call(`_ABM_setStates`, population, states))
}
newSimulation <- function(n, initializer = NULL) {
.Call(`_ABM_newSimulation`, n, initializer)
}
runSimulation <- function(sim, time) {
.Call(`_ABM_runSimulation`, sim, time)
}
resumeSimulation <- function(sim, time) {
.Call(`_ABM_resumeSimulation`, sim, time)
}
addLogger <- function(sim, logger) {
invisible(.Call(`_ABM_addLogger`, sim, logger))
}
addTransition <- function(sim, from, contact_from, to, contact_to, contact, waiting_time, to_change_callback = NULL, changed_callback = NULL) {
invisible(.Call(`_ABM_addTransition`, sim, from, contact_from, to, contact_to, contact, waiting_time, to_change_callback, changed_callback))
}
stateMatch <- function(state, rule) {
.Call(`_ABM_stateMatch`, state, rule)
}
newExpWaitingTime <- function(rate) {
.Call(`_ABM_newExpWaitingTime`, rate)
}
newGammaWaitingTime <- function(shape, scale) {
.Call(`_ABM_newGammaWaitingTime`, shape, scale)
}
newRWaitingTime <- function(rng) {
.Call(`_ABM_newRWaitingTime`, rng)
}
getWaitingTime <- function(generator, time) {
.Call(`_ABM_getWaitingTime`, generator, time)
}
| /scratch/gouwar.j/cran-all/cranData/ABM/R/RcppExports.R |
#' R6 class Create and represent a Simulation object
#'
#' The [Simulation] class inherits the [Population] class. So a simulation
#' manages agents and their contact. Thus, the class also inherits the
#' [Agent] class. So a simulation can have its own state, and events attached
#' (scheduled) to it. In addition, it also manages all the transitions, using
#' its ```addTransition``` method. ASt last, it maintains loggers, which
#' record (or count) the state changes, and report their values at specified
#' times.
#'
#' @export
Simulation <- R6::R6Class(
"R6Simulation",
inherit = Population,
public = list(
#' @method initialize
#' Constructor
#'
#' @param simulation can be either an external pointer pointing to
#' a population object returned from newSimulation, or an integer
#' specifying the population size, or a list
#'
#' @param initializer a function or NULL
#'
#' @details If simulation is a number (the population size), then initializer
#' can be a function that take the index of an agent and return its initial
#' state. If it is a list, the length is the population size, and each element
#' corresponds to the initial state of an agent (with the same index).
initialize = function(simulation = 0, initializer = NULL) {
if (typeof(simulation) == "externalptr") {
super$initialize(simulation)
return()
}
if (is.list(simulation)) {
private$agent = newSimulation(simulation)
} else if (is.numeric(simulation)) {
private$agent = newSimulation(simulation, initializer)
} else stop("invalid simulation argument")
},
#' Run the simulation
#'
#' @param time the time points to return the logger values.
#'
#' @return a list of numeric vectors, with time and values reported
#' by all logger.
#'
#' @details the returned list can be coerced into a data.frame object
#' which first column is time, and other columns are logger results,
#' each row corresponds to a time point.
#'
#' The Simulation object first collect and log the states from all
#' agents in the simulation, then set the current time to the time of
#' the first event, then call the resume method to actually run it.
#'
run = function(time) {
as.data.frame(runSimulation(self$get, time))
},
#' Continue running the simulation
#'
#' @param time the time points to return the logger values.
#'
#' @return a list of numeric vectors, with time and values reported
#' by all logger.
#'
#' @details the returned list can be coerced into a data.frame object
#' which first column is time, and other columns are logger results,
#' each row corresponds to a time point.
#'
#' The Simulation object repetitively handle the events until the the
#' last time point in "time" is reached. ASt each time point, the
#' logger states are collected in put in a list to return.
resume = function(time) {
as.data.frame(resumeSimulation(self$get, time))
},
#' Add a logger to the simulation
#'
#' @param logger, an external pointer returned by functions like
#' newCounter or newStateLogger.
#'
#' @return the simulation object itself (invisible)
#'
#' @details without adding a logger, there will be no useful simulation
#' results returned.
addLogger = function(logger) {
addLogger(self$get, logger)
invisible(self)
},
#' Add a transition to the simulation
#'
#' @param rule is a formula that gives the transition rule
#'
#' @param waiting.time either an external pointer to a WaitingTime object
#' such as one returned by newExpWaitingTime or newGammaWaitingTime, or
#' a function (see the details section)
#'
#' @param to_change_callback the R callback function to determine if
#' the change should occur. See the details section.
#'
#' @param changed_callback the R callback function after the change
#' happened. See the details section.
#'
#' @return the simulation object itself (invisible)
#'
#' @details If waiting.time is a function then it should take exactly one
#' argument time, which is a numeric value holding the current value, and
#' return a single numeric value for the waiting time (i.e., should not add
#' time).
#'
#' Formula can be used to specify either a spontaneous
#' transition change, or a transition caused by a contact.
#'
#' A spontaneous transition has the form from -> to, where from and
#' to are state specifications. It is either a variable name holding
#' a state (R list) or the list itself. The list can also be specified
#' by state(...) instead of list(...)
#'
#' For a spontaneous transition, the callback functions take the
#' following two arguments
#' 1. time: the current time in the simulation
#' 2. agent: the agent who initiate the contact, an external pointer
#'
#' A transition caused by contact, the formula needs to specify the
#' states of both the agent who initiate the contact and the contact
#' agent. The two states are connected by a + sign, the one before the
#' + sign is the initiator, and the one after the sign is the contact.
#' The transition must be associated with a Contact object, using
#' a ~ operator. The Contact object must be specified by a variable name
#' that hold the external pointer to the object (created by e.g.,
#' the newRandomMixing function) For example, suppose S=list("S"),
#' I=list("I"), and m=newRandomMixing(sim), then a possible rule
#' specifying an infectious agent contacting a susceptible agent causing
#' it to become exposed can be
#' specified by
#'
#' I + S -> I + list("E") ~ m
#'
#' For a transition caused by a contact, the callback functions take
#' the third argument:
#' 3. contact: the contact agent, an external pointer
addTransition = function(rule, waiting.time,
to_change_callback = NULL,
changed_callback = NULL)
{
l = private$parse(substitute(rule), parent.frame())
addTransition(self$get,
l$from$first, l$from$second, l$to$first, l$to$second, l$contact,
waiting.time, to_change_callback, changed_callback)
invisible(self)
}
),
private = list(
# parse a state.
# A state is either a variable name or a list (could be state(...))
# Variable values should be looked up in the
# environment envir
parse.state = function(state, envir) {
if (is.call(state)) {
op = as.character(state[[1]])
if (op != "list" && tolower(op) != "state")
stop("invalid state ", as.character(state))
return(as.list(state)[-1])
}
if (is.name(state)) {
if (!exists(state, envir = envir))
stop("state ", as.character(state), " is not defined")
v = get(state, envir=envir)
if (!is.list(v))
stop("invalid state ", state, " :", v)
v
} else if (!is.null(state)) {
list(state)
} else stop("state cannot be NULL")
},
# parse a side of the transition, either a starting state
# or a target state (or pairs). If it is a pair connected by ",
# it must be a transition caused by contact, and thus must be
# associated with a contact object following a ~.
parse.side = function(side, envir) {
if (is.call(side) && as.character(side[[1]]) == "~") {
if (!exists(side[[3]], envir=envir))
stop(paste("the contact", as.character(contact), "does not exist"))
contact = get(side[[3]], envir=envir)
if (inherits(contact, "R6Contact"))
contact = contact$get
side = side[[2]]
} else contact = NULL
if (is.call(side)) {
op = as.character(side[[1]])
if (op == "+") {
first = private$parse.state(side[[2]], envir=envir)
second = private$parse.state(side[[3]], envir=envir)
} else {
first = private$parse.state(side[[1]], envir=envir, envir=envir)
second = NULL
}
} else {
first = private$parse.state(side, envir=envir)
second = NULL
}
list(first = first, second = second, contact = contact)
},
# parse a formula that gives the transition rule
parse = function(formula, envir) {
if (as.character(formula[[1]]) != "<-")
return(NULL)
to = private$parse.side(formula[[2]], envir=envir)
from = private$parse.side(formula[[3]], envir=envir)
contact = from$contact
if (!is.null(contact) && !is.null(to$contact))
stop("conact can only be specied once")
if (is.null(contact)) contact = to$contact
if ((!is.null(from$second) && is.null(to$second)) ||
(!is.null(from$second) && is.null(to$second)))
stop("from and to arguments for a contact must both have two states")
if (is.null(from$second) && is.null(from$second) && !is.null(contact))
stop("contact is specified for a non-contact transition")
list(from=from, to=to, contact=contact)
}
)
)
| /scratch/gouwar.j/cran-all/cranData/ABM/R/Simulation.R |
#' Creates an exponentially distributed waiting time
#'
#' @name newExpWaitingTime
#'
#' @param rate the rate of the exponential distribution
#'
#' @return an external pointer
#'
#' @details This function creates an C++ object of type ExpWaitingTime.
#' It can be passed to addTransition or Simulation$addTransition to
#' specify the waiting time for a transition. As a C++ object, it is faster
#' than using an R function to generate waiting times because there is
#' no need to call an R function from C++.
#'
#' @export
NULL
#' Creates an gamma distributed waiting time
#'
#' @name newGammaWaitingTime
#'
#' @param shape the shape parameter of the gamma distribution
#'
#' @param scale the scale parameter of the gamma distribution, i.e., 1/rate
#'
#' @return an external pointer
#'
#' @details This function creates an C++ object of type ExpWaitingTime.
#' It can be passed to addTransition or Simulation$addTransition to
#' specify the waiting time for a transition. As a C++ object, it is faster
#' than using an R function to generate waiting times because there is
#' no need to call an R function from C++.
#'
#' @export
NULL
#' Generate a waiting time from an WaitingTime object
#'
#' @name getWaitingTime
#'
#' @param generator an external pointer to a WaitingTime object, e.g.,
#' one returned by newExpWaitingTime or newGammaWaitingTime
#'
#' @param time the current simulation time, a numeric value
#'
#' @return a numeric value
#'
#' @export
NULL
| /scratch/gouwar.j/cran-all/cranData/ABM/R/Transition.R |
#' A function for calculating the Abnormal Blood Profile Score
#'
#' The \code{ABPS} function computes the Abnormal Blood Profile Score
#' from seven haematological markers. Higher values of this composite
#' score are associated with a higher likelihood of blood doping.
#'
#' @param haemdata a vector or data frame containing (at least) the 7
#' haematological variables, either with the same names as the
#' parameters below, or (not recommended) without names but in the
#' same order as the parameters.
#' @param HCT haematocrit level [\%]
#' @param HGB haemoglobin level [g/dL]
#' @param MCH mean corpuscular haemoglobin [pg]
#' @param MCHC mean corpuscular haemoglobin concentration [g/dL]
#' @param MCV mean corpuscular volume [fL]
#' @param RBC red blood cell count [10^6/uL]
#' @param RETP reticulocytes percent [\%]
#'
#' @return a vector containing the ABPS score(s). Scores between 0 and
#' 1 indicate a possible suspicion of doping; a score above 1 should
#' only be found in 1 in 1000 male athletes.
#'
#' @details
#'
#' The ABPS uses the seven haematological variables (HCT, HGB, MCH,
#' MCHC, MCV, RBC, RETP) in order to obtain a combined score. This
#' score is more sensitive to doping than the individual markers, and
#' allows the detection of several types of blood doping using a
#' single score.
#'
#' The combined score is based on two classification techniques, a
#' naive Bayesian classifier and an SVM (Support Vector Machine). The
#' two models were trained using a database of 591 blood profiles
#' (including 402 control samples from clean athletes and 189 samples
#' of athletes who abused of an illegal substance); the two scores
#' were then combined using ensemble averaging to obtain the final
#' ABPS score.
#'
#' The ABPS is part of the Athlete Biological Passport program managed
#' by the World Anti-Doping Agency. While it is not a primary marker
#' of doping, it has been used as corroborative evidence (see e.g.
#' \url{https://jurisprudence.tas-cas.org/Shared\%20Documents/2773.pdf})
#'
#' @section Note:
#'
#' The values for the markers can be specified using either a data
#' frame containing (at least) the 7 haematological variables, or
#' using seven named parameters, but not both at the same time.
#'
#' The calculation of the ABPS depends on two sets of parameters, for
#' the two machine learning techniques (naive Bayesian classifier and
#' Support Vector Machine), which are provided in the package.
#'
#' Each parameter must be in a prespecified range; parameters outside
#' this range are constrained to the min (respectively max) values and
#' a warning is printed. The limits are available in the
#' variable \code{ABPS:::bayespar_7$mima}
#'
#' Note that several versions of the ABPS were developed (including
#' several different combinations of parameters). The version provided
#' in this package provides the same results as the WADA version
#' included in their ADAMS database. However, some values calculated
#' with other versions of the software have also been distributed (see
#' the help page for the \code{blooddoping} dataset for an example).
#'
#' @section References:
#' Sottas, P.E., N. Robinson, S. Giraud, et al., Statistical classification of abnormal blood profiles in athletes. Int J Biostat, 2006. 2(1): p. 1557-4679.
#'
#' \url{https://jurisprudence.tas-cas.org/Shared\%20Documents/2773.pdf}
#'
#' @examples
#' ABPS(HCT=43.2, HGB=14.6, MCH=31.1, MCHC=33.8, MCV=92.1, RBC=4.69, RETP=0.48)
#' ABPS(data.frame(HCT=43.2, HGB=14.6, MCH=31.1, MCHC=33.8, MCV=92.1, RBC=4.69, RETP=0.48))
#' ABPS(c(43.2, 14.6, 31.1, 33.8, 92.1, 4.69, 0.48))
#' data(blooddoping); ABPS(blooddoping)
#' data(bloodcontrol); ABPS(bloodcontrol)
#'
#' @export
ABPS <- function(haemdata=NULL, HCT=NULL, HGB=NULL, MCH=NULL, MCHC=NULL, MCV=NULL,
RBC=NULL, RETP=NULL) {
# This is the order of names used in the objects containing
# the model parameter
haemnames <- c("RETP","HGB","HCT","RBC","MCV","MCH","MCHC")
if (is.null(haemdata)) {
haemdata <- cbind(HCT, HGB, MCH, MCHC, MCV, RBC, RETP)
if (is.null(haemdata))
stop("ABPS requires either a data frame or 7 variables.")
} else {
# Make sure that the data was not specified in two different ways
if (!is.null(cbind(HCT, HGB, MCH, MCHC, MCV, RBC, RETP)))
stop("ABPS requires either a data frame or separate variables, but not both.")
}
# Make sure the data is in a data frame, even if we have only one
# data point and it is in a vector.
if (is.null(dim(haemdata))) {
haemdata <- t(haemdata)
}
# We assume that the variables have been specified in the same order as
# the function call (but this way of passing variables is not recommended)
if (is.null(colnames(haemdata)) && ncol(haemdata)==7)
colnames(haemdata) <- c("HCT", "HGB", "MCH", "MCHC", "MCV", "RBC", "RETP")
if (any(is.na(match(haemnames, colnames(haemdata)))))
stop("ABPS requires 7 haematological variables.")
# Select only the 7 variables we are interested in, in case there were
# more in the data, and sort them in the right order.
haemdata <- haemdata[, haemnames, drop=FALSE]
# Values that are outside the bounds used by the scoring algorithm
# (which depend on the values of the original dataset used to fit the
# algorithm) get assigned the minimum (or maximum) value instead.
haemdata.orig <- haemdata
haemdata <- t( apply( haemdata, MARGIN=1, FUN=pmax, bayespar_7$mima[,1]) )
haemdata <- t( apply( haemdata, MARGIN=1, FUN=pmin, bayespar_7$mima[,2]) )
# If any parameter had to be adjusted, print a warning
differences <- (haemdata.orig != haemdata)
differences[is.na(differences)] <- FALSE
if (any(differences)) {
param_differences <- apply( differences, MARGIN=2, FUN=any )
warning("some values were outside the bounds used by ABPS and were corrected: ",
paste(haemnames[param_differences], collapse="," ) )
}
# Computation of Bayes score
# bayespar_7 contains the parameters for the naive Bayesian classifier
# The first "apply" executes the code over all observations
# bayespar_7$xabs contains a split of the range of each parameter into
# 500 steps. For each parameter, we find the bin that contains the actual
# value
index <- apply(haemdata, MARGIN=1,
FUN=function(x) {
apply(x>bayespar_7$xabs, MARGIN=1,
function(x) { ifelse( any(is.na(x)), NA, max(which(x)) ) } )
}
)
# Set the index to NA when a value was missing
index[! is.finite(index)] <- NA
# Using the index, we can find the positive and negative probabilites for
# each parameter
probneg <- apply( index, MARGIN=2, FUN=function(x) { diag(bayespar_7$yabsnegt[,x]) } )
probpos <- apply( index, MARGIN=2, FUN=function(x) { diag(bayespar_7$yabspos[,x]) } )
# Classification: Naive Bayes score to compare posterior probability of
# being positive and posterior probability of being negative
# For a given observation, the probabilities for the different parameters
# are multiplied.
bayesscore <- log( apply(probpos, MARGIN=2, FUN=prod) / apply(probneg, MARGIN=2, FUN=prod) )
# This test is present in the original matlab code for ABPS, and was kept in
# the R code for consistency.
# However, based on the values obtained for all the possible combinations of
# min/max values for all the parameters, the warning should actually never
# be printed.
if (any(stats::na.omit(bayesscore>100)))
warning("ABPS: Bayes score very large, results may be inaccurate.")
# SVM score, using the parameters from the trained model provided
rbf <- kernlab::rbfdot(sigma=0.5/svmpar_7$kerneloption^2)
Kmat <- apply(haemdata, MARGIN=1,
FUN=function(x) {
kernlab::kernelMatrix(rbf,
x=(x-svmpar_7$me1)/svmpar_7$st1,
y=svmpar_7$xsup)
})
svmscore <- t(Kmat) %*% svmpar_7$w + as.numeric(svmpar_7$bsvm)
svmscore <- t(svmscore)
# Ensemble averaging of the two scores. The values were determined at the
# time the models were fitted.
score <- ( 6*bayesscore/as.numeric(bayespar_7$stdb) + svmscore/as.numeric(svmpar_7$stds) )/4.75
score <- as.vector(score)
return(score)
}
| /scratch/gouwar.j/cran-all/cranData/ABPS/R/ABPS.R |
#' function OFFscore
#'
#' The \code{OFFscore} function computes the value of the OFF-hr
#' score (or OFF score), a combination of the haemoglobin level and
#' the percentage of reticulocytes, used for detecting blood doping.
#'
#' @param haemdata a vector or data frame containing (at least) the 2
#' haematological variables, either with the same names as the
#' parameters below, or (not recommended) without names but in the
#' same order as the parameters.
#' @param HGB level of haemoglobin HGB [g/dL]
#' @param RETP percentage of reticulocytes [\%]
#'
#' @return value of the OFF score.
#'
#' @details
#'
#' The OFF-hr score is defined as 10*HGB [g/dL] - 60*sqrt(RETP [\%]).
#'
#' It is one of the parameters of the Athlete Biological Passport
#' (ABP) program managed by the World Anti-Doping Agency (WADA), and
#' is routinely used to identify athletes who use a substance
#' prohibited by anti-doping rules (see
#' e.g. \url{https://jurisprudence.tas-cas.org/Shared\%20Documents/4006.pdf}).
#'
#' The rationale for using this score for detecting blood doping is
#' the following: if a manipulation (use of transfusion or of
#' erythropoietic stimulating agents (ESA) such as recombinant human
#' erythropoietin (rhEPO)) increases the number of circulating red
#' cells (and thus increases the level of haemoglobin), the organism
#' will react by stopping its own production of red blood cells. This
#' negative feedback will be observed in the reduced percentage of
#' reticulocytes (immature red blood cells). Such a combination of
#' elevated HGB and reduced RETP, which will produce a high OFF
#' score, is found neither naturally, nor as the consequence of a
#' medical condition, and is thus indicative of doping.
#'
#' The OFF score will pick up both withdrawal of blood (which induces
#' a reduction in haemoglobin, a rise in reticulocytes, and thus a
#' rise of RETP and a reduction of OFF score), and its re-infusion
#' (HGB concentration increases, number of reticulocytes and RETP
#' decrease, and OFF score increases).
#'
#' @section Note:
#'
#' The values for HGB and RETP can be specified using either a data
#' frame containing (at least) the 2 haematological variables, or
#' using two named parameters, but not both at the same time.
#'
#' The original OFF-hr score, as described by Gore et al., expects
#' the haemoglobin level HGB to be specified in g/L. In order to be
#' coherent with other functions in the package, this function assumes
#' that HGB is specified in g/dL, and will multiply the value by 10.
#' A warning will be emitted if units used seem wrong.
#'
#' @section References:
#' Gore, C.J., R. Parisotto, M.J. Ashenden, et al., Second-generation
#' blood tests to detect erythropoietin abuse by athletes.
#' Haematologica, 2003. 88(3): p. 333-44.
#'
#' @examples
#' OFFscore(HGB=14.6, RETP=0.48)
#'
#' data(blooddoping)
#' OFFscore(HGB=blooddoping$HGB, RETP=blooddoping$RETP)
#'
#' @export
OFFscore <- function(haemdata=NULL, HGB=NULL, RETP=NULL){
haemnames <- c("HGB", "RETP")
if (is.null(haemdata)) {
haemdata <- cbind(HGB, RETP)
if (is.null(haemdata))
stop("OFFscore requires either a data frame or 2 variables.")
} else {
if (!is.null(cbind(HGB, RETP)))
stop("OFFscore requires either a data frame or separate variables, but not both.")
}
# Make sure the data is in a data frame, even if we have only one
# data point and it is in a vector.
if (is.null(dim(haemdata))) {
haemdata <- t(haemdata)
}
if (is.null(colnames(haemdata)) && ncol(haemdata)==2)
colnames(haemdata) <- haemnames
if (any(is.na(match(haemnames, colnames(haemdata)))))
stop("ABPS requires 2 haematological variables.")
# Select only the 2 variables we are interested in, in case there were
# more in the data, and sort them in the right order.
haemdata <- haemdata[, haemnames, drop=FALSE]
# Very crude range checking for the most common mistake in units
if (any(haemdata[,"HGB"]>50))
warning("OFF-score: very high values for HGB; are the units correct ?")
return(as.vector(10*haemdata[,"HGB"] - (60*sqrt(haemdata[,"RETP"]))))
}
| /scratch/gouwar.j/cran-all/cranData/ABPS/R/OFFscore.R |
#' Blood samples from an athlete convicted of doping.
#'
#' A dataset containing the result of the analysis of 13 blood samples,
#' taken over a period of 5 years, of a female athlete who was convicted
#' of doping on the basis of the Athlete Biological Passport.
#'
#' @format A data frame with 13 rows and 11 variables (including 10
#' haematological variables):
#' \describe{
#' \item{date}{the date of test}
#' \item{HCT}{haematocrit [\%]}
#' \item{HGB}{haemoglobin [g/dL]}
#' \item{MCH}{mean corpuscular haemoglobin [pg]}
#' \item{MCHC}{mean corpuscular haemoglobin concentration [g/dL]}
#' \item{MCV}{mean corpuscular volume [fL]}
#' \item{RBC}{red blood cell count [10^6/uL]}
#' \item{RETC}{reticulocyte count [10^6/uL]}
#' \item{RETP}{reticulocyte percentage [\%]}
#' \item{OFFscore}{OFF-score}
#' \item{ABPS}{Abnormal Blood Profile Score}
#' }
#'
#' In November 2012, the athlete was convicted of doping by the Court
#' of arbitration for Sport, the evidence showing at least two occurences
#' of doping, in summer 2009 (shortly before the test of 2 July 2009) and
#' in June 2011, likely using an agent such as recombinant erythropoietin
#' (rhEPO).
#'
#' Doping is indicated in particular by high haemoglobin values associated
#' with very low reticulocyte \% and high OFF scores (a combination of these
#' two variables).
#'
#' @section Note: The data tables published in the original source
#' contain several typos, as confirmed by the World Anti-Doping
#' Agency (WADA). In particular, values for RETP in rows 8 and 10
#' were swapped and the MCHC value for row 7 was incorrect. This
#' package's source code contains both the original data and the
#' details of the corrections that were applied.
#'
#' The ABPS values provided are very close (within <2\%) to those
#' obtained using the \code{ABPS} function, but some of them were likely
#' calculated using different (older) versions of the ABPS code, which
#' may explain some of these differences.
#'
#' @source \url{https://jurisprudence.tas-cas.org/Shared\%20Documents/2773.pdf}
"blooddoping"
#' Blood samples from different individuals.
#'
#' A dataset containing the result of the analysis of 13 blood samples
#' from different individuals.
#'
#' @format A data frame with 13 rows and 12 haematological variables:
#' \describe{
#' \item{HCT}{haematocrit [\%]}
#' \item{HGB}{haemoglobin [g/dL]}
#' \item{IRF}{immature reticulocyte fraction [\%]}
#' \item{MCH}{mean corpuscular haemoglobin [pg]}
#' \item{MCHC}{mean corpuscular haemoglobin concentration [g/dL]}
#' \item{MCV}{mean corpuscular volume [fL]}
#' \item{RBC}{red blood cell count [10^6/uL]}
#' \item{RDW.SD}{red blood cell distribution width [fL]}
#' \item{RETC}{reticulocyte count [10^6/uL]}
#' \item{RETP}{reticulocyte percentage [\%]}
#' \item{OFFscore}{OFF-score}
#' \item{ABPS}{Abnormal Blood Profile Score}
#' }
#'
#' These samples are assumed to represent normal population.
#'
#' @section Note: One of the rows actually belongs to one of the
#' authors of this package, who promises that he was not doped.
#'
#' @source Swiss Laboratory for Doping Analyses (LAD), with some calculations
#' performed by the World Anti-Doping Agency (WADA).
"bloodcontrol"
| /scratch/gouwar.j/cran-all/cranData/ABPS/R/data.R |
#'@title Data Preparation
#' @description Data preparation for ABCoxPH
#' @param data Raw data sets
#' @param t_int No of days to be considered as single time interval (Default value: 90)
#' @param max_lac Maximum no of lactation to be considered for data preparation (Default value: Max Lactation)
#' @import stats readxl
#' @return
#' \itemize{
#' \item wide_data - Processed data for ABCoxPH
#' }
#' @export
#'
#' @examples
#' library("ABSurvTDC")
#' library("readxl")
#' data_test<-read_excel(path = system.file("extdata/data_test.xlsx", package = "ABSurvTDC"))
#' PropData<-DataPrep(data =as.data.frame(data_test))
#' @references
#' \itemize{
#'\item J.D. Kalbfleisch and R.L. Prentice (1980). The statistical analysis of failure time data. John Wiley & Sons, Inc., New York, 1980. <doi:10.1002/9781118032985>
#' \item J.P. Klein and M L. Moeschberger (2003). Survival Analysis: Techniques for Censored and Truncated Data. Springer New York. <doi:10.1007/b97377>
#' }
DataPrep<-function(data, t_int, max_lac) {
if (!is.data.frame(data)) {
stop("Input must be a dataframe with structure as given in example")
}
if (missing(t_int)) { # Let user specify the time interval for making covariates
t_int <- 90
message("No time interval specified, defaulting to 90 days\n")
} else {
message("Time interval set to", t_int, "days\n")
}
animal <- unique(data[, 1]) # Vector of animal IDs
n <- length(animal) # Number of animals observed
n_lacs <- n_qt <- LAC_final <- name_LAC <- NULL # For output
Herd <- YearFC <- Season <- HYS <- AFC <- Cen <- NULL # For output
min_YFC <- min(as.numeric(format(data[,3], format = "%Y"))) # Minimum YearFC
## Creating fixed/time-independent variables ##
for (j in 1:n) { # Check every animal
animal_j <- subset(data, data[, 1] == animal[j]) # Isolate each animal
lac_j <- nrow(animal_j) # Count no. of lactations of jth animal
n_lacs <- append(n_lacs, lac_j) # Vector of no. of lactations corresponding to animal ID
qt_j <- as.numeric((animal_j[lac_j, 4]) - (animal_j[1, 3])) %/% t_int + 1 # Calculating no. of quarters of jth animal
n_qt <- append(n_qt,qt_j) # Vector of no. of quarters corresponding to animal ID
cen_j <- unique(animal_j$Cen) # Check censored status of jth animal
Cen <- append(Cen, cen_j) # Vector of censored status corresponding to animal ID
herd_j <- unique(animal_j$Herd) # Check herd no. of jth animal
Herd <- append(Herd, herd_j) # Vector of herd no. corresponding to animal ID
yearFC_j <- as.numeric(format(animal_j$Date_Calved[1], format = "%Y")) - min_YFC + 1 # Check year at first calving of jth animal
YearFC <- append(YearFC, yearFC_j) # Vector of year at first calving corresponding to animal ID
# Categorisation into seasons by checking DOB #
DOB_j <- unique(animal_j$DOB) # Read DOB of jth animal
m_dob_j <- match(months(DOB_j), month.name) # Extract month number of DOB_j
d_dob_j <- as.numeric(format(DOB_j, '%d')) # Extract day number of DOB_j
if (((m_dob_j == 2) & (d_dob_j >= 16)) | ((m_dob_j > 2) & (m_dob_j < 6)) | ((m_dob_j == 6) & (d_dob_j < 16))) {
season_j <- 2 # S=2 if 16/02 <= DOB_j < 16/06
}
else if (((m_dob_j == 6) & (d_dob_j >= 16)) | ((m_dob_j > 6) & (m_dob_j < 10)) | ((m_dob_j == 10) & (d_dob_j < 16))) {
season_j <- 3 # S=3 if 16/06 <= DOB_j < 16/10
}
else {
season_j <- 1 # S=1 if DOB_j >= 16/10 or DOB_j < 16/02
}
Season<-append(Season,season_j)
hys_j <- herd_j * 1000 + (yearFC_j*10) + season_j # Calculate HYS of jth animal
HYS <- append(HYS, hys_j) # Vector of HYS corresponding to animal ID
afc_j <- animal_j$Date_Calved[1] - animal_j$DOB[1] # Calculate HYS of jth animal
AFC <- append(AFC, afc_j) # Vector of AFC corresponding to animal ID
rm(animal_j, lac_j, qt_j, cen_j, herd_j, yearFC_j, DOB_j, m_dob_j, d_dob_j, season_j, hys_j, afc_j) # Remove intermediary variables
}
if (missing(max_lac)) { # Let user choose the number of lactations to be included
max_lac <- max(n_lacs) # Highest number of lactation among all animals
message("Max no. of lactations to be considered not specified. Defaulting to the max no. present among all animals in data\n")
} else {
message("All animals will be considered for", max_lac, "lactations\n")
}
max_qt <- max(n_qt) # Highest number of quarters among all animals
## Creating time-dependent variables ##
for (j in 1:n) { # Check every animal
animal_j <- subset(data, data[, 1] == animal[j]) # Isolate each animal
lac_j <- nrow(animal_j) # Count no. of lactations of jth animal
# Calculating lactation and dry lengths of jth animal
lac_length_j <- as.vector(as.ts(animal_j[, 4] - animal_j[, 3])) # Lactation lengths
dry_length_j <- NULL # For dry lengths
if (!lac_j == 1) {
for (d in 1:(lac_j-1)) {
dry <- as.vector(as.ts(animal_j[(d+1),3] - animal_j[d,4]))
dry_length_j <- append(dry_length_j, dry) # Dry lengths
rm(dry) # Remove intermediary variables
}
}
# Calculating cumulative lactation and dry lengths #
lac_cum_j <- (lac_length_j[1] %/% t_int) + 1 # 1st lactation length
dry_cum_j <- NULL
if (!lac_j == 1) { # Subsequent lactation lengths (if exist)
for (l in 2:(lac_j+1)) {
dry_l_cum <- (sum(lac_length_j[1:(l-1)]) + sum(dry_length_j[1:(l-1)])) %/% t_int + 1 #Cumulative next dry
lac_l_cum <- (sum(lac_length_j[1:l]) + sum(dry_length_j[1:(l-1)])) %/% t_int + 1 # Cumulative next lactation
lac_cum_j <- append(lac_cum_j, lac_l_cum)
dry_cum_j <- append(dry_cum_j, dry_l_cum)
rm(dry_l_cum, lac_l_cum) # Remove intermediary variables
if (l == lac_j) {
break
}
}
}
# Creating time dependent covariates #
# Make covariates for first lactation
lac_count <- 1
lac_yes <- rep("Yes", lac_cum_j[1]) # "Yes" when animal was lactating
lac_no <- rep("No", (lac_cum_j[lac_j] - lac_cum_j[1])) # "No" when animal was observed but dry
lac_NA <- rep(NA, (max_qt - lac_cum_j[lac_j])) # NA when animal was not observed
LAC <- c(lac_yes, lac_no, lac_NA)
rm(lac_yes, lac_no, lac_NA) # Remove intermediary variables
# Make covariates for subsequent lactations (if exist)
if(!lac_j == 1) {
for (a in 2:lac_j) {
lac_pre_no <- rep("No", dry_cum_j[a-1]) # "No" when animal was in previous lactation
lac_yes <- rep("Yes", (lac_cum_j[a] - dry_cum_j[a-1])) # "Yes" when animal was lactating
lac_no <- rep("No", (lac_cum_j[lac_j] - lac_cum_j[a])) # "No" when animal was observed but dry
lac_NA <- rep(NA, (max_qt - lac_cum_j[lac_j])) # NA when animal was not observed
lac_a <- c(lac_pre_no, lac_yes, lac_no, lac_NA)
LAC <- c(LAC, lac_a)
lac_count <- a
rm(lac_pre_no, lac_yes, lac_no, lac_NA, lac_a) # Remove intermediary variables
if (a == max_lac) { # Stop if max_lac is reached
break
}
}
}
# Fill lactations not reached with NA, if max_lac not reached
if (!lac_count == max_lac) {
for (b in (lac_j+1):max_lac) {
lac_no <- rep("No", lac_cum_j[lac_j]) # "No" when animal was observed but dry
lac_NA <- rep(NA, (max_qt - lac_cum_j[lac_j])) # NA when animal was not observed
lac_b <- c(lac_no, lac_NA)
LAC <- c(LAC, lac_b)
lac_count <- b
rm(lac_no, lac_NA, lac_b) # Remove intermediary variables
}
}
LAC_final<-rbind(LAC_final, LAC)
rm(animal_j, lac_j, lac_length_j, dry_length_j, lac_cum_j, dry_cum_j, LAC) # Remove intermediary variables
}
# Naming the lactations
name_seq <- paste0("L", seq(1, max_lac))
for (r in 1:max_lac) {# Naming the lactations with time
name_L <- paste(name_seq[r], seq(1,max_qt,1), sep="_")
name_LAC <- c(name_LAC, name_L)
rm(name_L) # Remove intermediary variables
}
# Naming the rows and columns of LAC
colnames(LAC_final) <- name_LAC
rownames(LAC_final) <- animal
# Creating output df
fixed_var<-cbind(n_lacs, n_qt, Cen, HYS, AFC)
wide_data<-as.data.frame(list(fixed_var, LAC_final))
# Conversion of "yes/no" into factors
for (i in c(6:ncol(wide_data))) {
wide_data[, i] <- factor(wide_data[, i], levels = c("Yes", "No"))
}
# Remove unnecessary variables
rm(n_lacs, n_qt, Cen, HYS, AFC, fixed_var, LAC_final, animal, b, d, a, Herd, i, j, l,
lac_count, max_lac, max_qt, n, name_LAC, name_seq, r, Season, YearFC)
# Output the created wide_data as a dataframe
return(wide_data)
}
#'@title Cox-PH Model for Animal Breeding
#' @description Data preparation for ABCoxPH
#' @param wide_data Dataset from DataPrep function
#' @param lact Number of lactation to be used for model building
#' @import stats survival readxl
#' @return
#' \itemize{
#' \item Cox_Model - ABCoxPH model
#' \item LongData- Long data
#' }
#' @export
#'
#' @examples
#' library("ABSurvTDC")
#' library("readxl")
#' data_test<-read_excel(path = system.file("extdata/data_test.xlsx", package = "ABSurvTDC"))
#' PropData<-DataPrep(data =as.data.frame(data_test))
#' ABCoxPH(PropData)
#' @references
#' \itemize{
#'\item J.D. Kalbfleisch and R.L. Prentice (1980). The statistical analysis of failure time data. John Wiley & Sons, Inc., New York, 1980. <doi:10.1002/9781118032985>
#' \item J.P. Klein and M L. Moeschberger (2003). Survival Analysis: Techniques for Censored and Truncated Data. Springer New York. <doi:10.1007/b97377>
#' }
ABCoxPH <- function(wide_data, lact){
unfold <- function(data, time, event, cov, cov.names = paste("covariate", ".", 1:ncovs, sep = ""), suffix = ".time",
cov.times = 0:ncov, common.times = TRUE, lag = 0, ...) {
vlag <- function(x, lag) c(rep(NA, lag), x[1:(length(x) -
lag)])
xlag <- function(x, lag) apply(as.matrix(x), 2, vlag, lag = lag)
all.cov <- unlist(cov)
if (!is.numeric(all.cov))
all.cov <- which(is.element(names(data), all.cov))
if (!is.list(cov))
cov <- list(cov)
ncovs <- length(cov)
nrow <- nrow(data)
ncol <- ncol(data)
ncov <- length(cov[[1]])
nobs <- nrow * ncov
if (length(unique(c(sapply(cov, length), length(cov.times) -
1))) > 1)
stop(paste("all elements of cov must be of the same length and \n",
"cov.times must have one more entry than each element of cov."))
var.names <- names(data)
subjects <- rownames(data)
omit.cols <- if (!common.times)
c(all.cov, cov.times)
else all.cov
keep.cols <- (1:ncol)[-omit.cols]
factors <- names(data)[keep.cols][sapply(data[keep.cols],
is.factor)]
levels <- lapply(data[factors], levels)
first.covs <- sapply(cov, function(x) x[1])
factors.covs <- which(sapply(data[first.covs], is.factor))
levels.covs <- lapply(data[names(factors.covs)], levels)
nkeep <- length(keep.cols)
if (is.numeric(event))
event <- var.names[event]
events <- sort(unique(data[[event]]))
if (length(events) > 2 || (!is.numeric(events) && !is.logical(events)))
stop("event indicator must have values {0, 1}, {1, 2} or {FALSE, TRUE}")
if (!(all(events == 0:1) || all(events == c(FALSE, TRUE)))) {
if (all(events = 1:2))
data[[event]] <- data[[event]] - 1
else stop("event indicator must have values {0, 1}, {1, 2} or {FALSE, TRUE}")
}
times <- if (common.times)
matrix(cov.times, nrow, ncov + 1, byrow = TRUE)
else as.matrix(data[, cov.times])
new.data <- matrix(Inf, nobs, 3 + ncovs + nkeep)
rownames <- rep("", nobs)
colnames(new.data) <- c("start", "stop", paste(event, suffix,
sep = ""), var.names[-omit.cols], cov.names)
end.row <- 0
data <- as.matrix(as.data.frame(lapply(data, as.numeric)))
for (i in 1:nrow) {
start.row <- end.row + 1
end.row <- end.row + ncov
start <- times[i, 1:ncov]
stop <- times[i, 2:(ncov + 1)]
event.time <- ifelse(stop == data[i, time] & data[i,
event] == 1, 1, 0)
keep <- matrix(data[i, -omit.cols], ncov, nkeep, byrow = TRUE)
select <- apply(matrix(!is.na(data[i, all.cov]), ncol = ncovs),
1, all)
rows <- start.row:end.row
cov.mat <- xlag(matrix(data[i, all.cov], nrow = length(rows)),
lag)
new.data[rows[select], ] <- cbind(start, stop, event.time,
keep, cov.mat)[select, ]
rownames[rows] <- paste(subjects[i], ".", seq(along = rows),
sep = "")
}
row.names(new.data) <- rownames
new.data <- as.data.frame(new.data[new.data[, 1] != Inf &
apply(as.matrix(!is.na(new.data[, cov.names])), 1, all), ])
for (fac in factors) {
new.data[[fac]] <- factor(levels[[fac]][new.data[[fac]]])
}
fcv <- 0
for (cv in factors.covs) {
fcv <- fcv + 1
new.data[[cov.names[cv]]] <- factor(levels.covs[[fcv]][new.data[[cov.names[cv]]]])
}
new.data
}
max_qt <- max(wide_data$n_qt)
max_lac <- (ncol(wide_data) - 5)/max_qt
# Let user choose how many lactations to consider in the model
if (missing(lact)) {
lact <- max_lac
message("CoxPH model considering all lactations available in wide data\n")
} else if (lact <= max_lac) {
message("CoxPH model considering", lact, "lactations\n")
} else {
stop("CoxPH model cannot have more number of lactations than is present in wide data")
}
# Define column numbers corresponding to covariate names
covariates <- list()
cnames <- NULL
for (i in 1:lact) {
covariates[[i]] <- ((max_qt*(i-1)):((max_qt*i)-1)) + 6
cnames[i] <- paste0("Lact_", i)
}
last_cov <- covariates[[length(covariates)]][length(covariates[[length(covariates)]])]
# Conversion from wide to long format
long_data <- unfold(wide_data[, 1:last_cov], time = "n_qt", event = "Cen",
cov = covariates, cov.names = cnames)
# Correcting to factors in long_data
for (i in c(9:ncol(long_data))) {
long_data[, i] <- factor(long_data[, i], levels = c("Yes", "No"))
}
long_data<<-long_data
# Formula for model
eq <- "AFC+HYS"
for (e in 1:length(cnames)) {
eq <- paste(eq, cnames[e], sep="+")
}
eq <- as.formula(noquote(paste("Surv(start, stop, Cen.time)", eq, sep="~")))
# Fitting the Cox-PH Model
cox_mdl <- coxph(eq, method = "efron", data = long_data)
# Remove unnecessary variables
rm(max_qt, max_lac, covariates, cnames, last_cov, long_data, eq)
# Output<-list(Cox_Model<-cox_mdl, LongData=LongData)
return(cox_mdl)
}
#'@title ABCoxPH Prediction
#' @description Prediction for ABCoxPH model
#' @param Model ABCoxPH model
#' @param NewData New data
#' @param AFC Age (in days) at first calving
#' @param HYS Combine effect of herd, year and season
#' @import stats readxl
#' @return
#' \itemize{
#' \item SurvProb - Survival probabilities
#' }
#' @export
#'
#' @examples
#' library("ABSurvTDC")
#' library("readxl")
#' data_test<-read_excel(path = system.file("extdata/data_test.xlsx", package = "ABSurvTDC"))
#' PropData<-DataPrep(data =as.data.frame(data_test))
#' model<-ABCoxPH(PropData)
#' Lact_1<-c("Yes","Yes","Yes","No","No","No","No","No","No","No","No")
#' Lact_2<-c("No","No","No","No","Yes","Yes","No","No","No","No","No")
#' Lact_3<-c("No","No","No","No","No","No","No","No","Yes","Yes","Yes")
#' Lact_4<-c("No","No","No","No","No","No","No","No","No","No","No")
#' Lact_5<-c("No","No","No","No","No","No","No","No","No","No","No")
#' Lact_6<-c("No","No","No","No","No","No","No","No","No","No","No")
#' Lact_7<-c("No","No","No","No","No","No","No","No","No","No","No")
#' Lact_8<-c("No","No","No","No","No","No","No","No","No","No","No")
#' Lact_9<-c("No","No","No","No","No","No","No","No","No","No","No")
#' ndata<- data.frame(Lact_1,Lact_2,Lact_3,Lact_4,Lact_5,Lact_6,Lact_7,
#' Lact_8,Lact_9)
#' HYS<-2033
#' AFC <- 1400
#' CoxPred(Model=model, NewData=ndata, AFC, HYS)
#'
#' @references
#' \itemize{
#'\item J.D. Kalbfleisch and R.L. Prentice (1980). The statistical analysis of failure time data. John Wiley & Sons, Inc., New York, 1980. <doi:10.1002/9781118032985>
#' \item J.P. Klein and M L. Moeschberger (2003). Survival Analysis: Techniques for Censored and Truncated Data. Springer New York. <doi:10.1007/b97377>
#' }
CoxPred<-function(Model, NewData, AFC, HYS){
subject<- NULL
datap<-data.frame(AFC, HYS,NewData, start = 0:(nrow(NewData)-1), stop = 1:nrow(NewData), Cen.time = 0,
subject = 1)
Pred<-survfit(Model, newdata = datap, id = subject)
# Required outputs
plot(Pred,conf.int = FALSE ,xlab="time",ylab = "proportion under lactation")
SurvProb<-Pred$surv
return(SurvProb)
}
| /scratch/gouwar.j/cran-all/cranData/ABSurvTDC/R/ABSurvTDC.R |
#' This is the workhorse function of the ACA. It detects significant change-points in serial data.
#' @param namefi - a character string specifying the data file to be loaded
#' @param xleg - character. The x-label of the plot
#' @param yleg - character. The y-label of the plot
#' @param titl - character. The title of the plot
#' @param onecol - character. Option for the data format. If \code{onecol} is "y", it is assumed that the input file is a single column file (varying parameter) else the input file is a 2 column file (independent variable, varying parameter)
#' @param daty - character. Option for the data processing. If \code{daty} is "y", the scan of the series is launched with the gradients (rates of change) of the data else it is launched with the data itself
#' @param gray - character. Option for the plot. If \code{gray} is "y", the background of the plot is gray else it is white
#' @details
#' if one of the arguments above is NULL, then the user will be
#' prompted to enter the missing value. \code{SDScan()} produces two files: the \emph{SDS.res} file
#' includes the statistics for each detected breakpoint; the \emph{SDS.png} file is the plot of the series
#' where the detected breakpoints are shown. In the \emph{SDS.res} file, there
#' is a line for each breakpoint: it includes the x and y values for the breakpoint, its index
#' in the series, the noise variance due to the discontinuity, the noise
#' variance due to the trend, the noise variance due to the discontinuity
#' (posterior value), the noise variance due to the trend (posterior value),
#' the change-point Signal-to-Noise Ratio (posterior value), the biweight
#' mean of the left segment, the biweight mean of the right segment. Values
#' are separated by the ''&'' symbol. A change-point plot is returned by \code{SDScan()}. This
#' plot shows the series and the detected change-points. Horizontal lines
#' are drawn to represent the biweight means of the two segments defined
#' by each change-point. The legend of the plot shows 4 numerical values
#' for each change-point: from left to right, the rank of the change-point
#' (as defined by the detection sequence), its location along the X-axis,
#' its signal-to-noise ratio, and the probability value for the two-tail
#' robust rank-order test, that was obtained right after the change-point
#' detection
#' @examples
#' \donttest{
#' data <- system.file("extdata","soccer.data.txt", package = "ACA")
#' SDScan(namefi=data, xleg="Time", yleg="Goals per game", titl="Goals in
#' England: 1888-2014", onecol="n", daty="n", gray="y")
#' }
#'
#' data <- system.file("extdata","amorese.data.txt", package = "ACA")
#' \donttest{
#' SDScan(namefi=data, xleg="Index", yleg="Value", titl="Change in
#' a Gaussian Sequence (with trend)", onecol="n", daty="n", gray="y")
#' }
#' @references
#' D. Amorese, "Applying a change-point detection method on frequency-magnitude distributions", \emph{Bull. seism. Soc. Am.} (2007) 97, doi:10.1785\/0120060181
#' Lanzante, J. R., "Resistant, robust and non-parametric techniques for the analysis of climate data: Theory and examples, including applications to historical radiosonde station data", \emph{International Journal of Climatology} (1996) 16(11), 1197-1226
#' Amorese, D., Grasso, J. R., Garambois, S., and Font, M., "Change-point analysis of geophysical time-series: application to landslide displacement rate (Sechilienne rock avalanche, France)", \emph{Geophysical Journal International} (2018) 213(2), 1231-1243
#' @author
#' Daniel Amorese <amorese.at.ipgp.fr
#' @importFrom grDevices dev.off pdf png
#' @importFrom graphics axis box legend lines locator par plot points rect segments text title
#' @importFrom stats median pnorm wilcox.test
#' @importFrom utils read.table write.table
#' @export
SDScan <-
function (namefi = NULL, xleg = NULL, yleg = NULL, titl = NULL,
onecol = NULL, daty = NULL, gray = NULL)
{
cat("\n*************************************************************************************\n")
cat("\nSerial Data Scanner \n\n")
cat("R function for change-point detection through the Lanzante's method (Lanzante,1996)\n\n")
cat("J. R. Lanzante, (1996). Resistant, robust and non-parametric techniques for the\n")
cat("analysis of climate data : theory and examples, including applications to\n")
cat("historical radiosonde station data, International Journal of Climatology,\n")
cat("vol. 16, 1197-1226.\n")
cat("\nOther reference : D. Amorese, (2007). Applying a change-point detection method\n")
cat("on frequency-magnitude distributions, Bulletin of the Seismological Society of\n")
cat("America, 97(5):1742-1749\n\n")
cat("*************************************************************************************\n")
NMAXITER <- 5
SNRMIN <- 0.05
SEUILPROB <- 0.05
ECART <- 2
ECARTBORD <- 2
ECARTNV <- 0
NLIM = 50
FILEOUT = "SDS.res"
no <- function(answer) answer == "n"
yes <- function(answer) answer == "y"
"medpairwise" <- function(n, x, y) {
a = numeric()
NTOTAL = 320000
if ((n * (n - 1)/2) > NTOTAL)
stop("\nMedpairwise error! : too many pairs of points!\n")
np = 0
if (n >= 1)
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
if (x[j] != x[i]) {
np = np + 1
if (np == NTOTAL)
stop("\nToo many pairs of points...STOP!!\n")
a[np] = (y[j] - y[i])/(x[j] - x[i])
}
}
}
cat("\nMEDPAIRWISE :", n, "points -> ", np, "2-points slopes\n")
med = median(a, na.rm = T)
b = med
res = y - med * x
med = median(res, na.rm = T)
A = med
cat("\n\tslope=", b, "intercept=", A, "\n")
return(list(b = b, A = A))
}
biwmean <- function(x, c = 5, eps = 1e-04) {
m <- median(x)
s <- median(abs(x - m))
u <- (x - m)/(c * s + eps)
w <- rep(0, length(x))
i <- abs(u) <= 1
w[i] <- ((1 - u^2)^2)[i]
bm <- sum(w * x)/sum(w)
return(bm)
}
biwvar <- function(x, c = 5, eps = 1e-04) {
m <- median(x)
s <- median(abs(x - m))
u <- (x - m)/(c * s + eps)
w <- rep(0, length(x))
i <- abs(u) <= 1
w[i] <- (1 - u^2)[i]
w5 <- 1 - 5 * u^2
bwm <- sqrt(length(x)) * sqrt(sum(w^4 * (x - m)^2))/abs(sum(w *
w5))
return(bwm)
}
"quickregli" <- function(n, x, y) {
sumx = sum(x)
sumx2 = sum(x * x)
sumy = sum(y)
sumy2 = sum(y * y)
sumxy = sum(x * y)
delta = n * sumx2 - sumx * sumx
b = (n * sumxy - sumx * sumy)/delta
A = (sumy * sumx2 - sumx * sumxy)/delta
return(list(b = b, A = A))
}
"ranktest" <- function(niter, N, yvar) {
SA = numeric()
corsa = (N + 1) * seq(1:N)
SA = abs(2 * cumsum(rank(yvar, ties.method = "average")) -
corsa)
n1 <- max(which(SA == unique(SA)[order(unique(SA))[length(unique(SA)) -
niter + 1]]))
W = sum(rank(yvar, ties.method = "average")[1:n1])
cat("\nn1=", n1, "W=", W, "\n")
n2 = N - n1
Wcrit = n1 * (N + 1)/2
cat("\n niter=", niter)
cat(" n=", N, "n2=", n2, "Critical W=", Wcrit, "\n")
sw = sqrt(n1 * n2 * (N + 1)/12)
srang2 = sum((rank(yvar, ties.method = "average"))^2)
srang_star = sum(rank(yvar, ties.method = "first")[n1:N])
sw2 = sqrt((n1 * n2 * srang2)/(N * (N - 1)) - (n1 * n2 *
(N + 1) * (N + 1))/(4 * (N - 1)))
if (n1 <= n2)
wx = W
if (n2 < n1)
wx = srang_star
xn1 <- yvar[1:n1]
xn2 <- yvar[-(1:n1)]
wt = wilcox.test(xn1, xn2, exact = F, correct = T)
p = wt$p.value
cat("sw=", sw, "sw_t=", sw2, "W=", W, "Wx=", wx, "\n")
return(list(p = p, tau = n1))
}
"snr" <- function(ndat, ndis, t, x, w, tabn, choix, NLIM) {
ntabn = numeric()
w1 = numeric()
w2 = numeric()
w3 = numeric()
x1 = numeric()
x2 = numeric()
x3 = numeric()
rdn = 1
ntabn = tabn
ntabn = c(ntabn, t)
ii = max(rank(ntabn, ties.method = "first")[ntabn ==
t])
nl = ntabn[rank(ntabn, ties.method = "first") == (ii -
1)]
if (nl == t && nl != ntabn[1])
nl = ntabn[rank(ntabn, ties.method = "average") ==
(ii - 2)]
nr = ntabn[rank(ntabn, ties.method = "average") == (ii +
1)]
if (nr == t && nr != ntabn[2])
nr = ntabn[rank(ntabn, ties.method = "average") ==
(ii + 2)]
if (nl == 1)
N1 = t - nl + 1
if (nl != 1)
N1 = t - nl
N2 = nr - t
if (N1 > NLIM) {
N1 = NLIM
nl = t - N1
}
if (N2 > NLIM) {
N2 = NLIM
nr = t + N2
}
n = N1 + N2
if (nl == 1) {
x1[1:N1] = x[1:N1]
w1[1:N1] = w[1:N1]
}
if (nl != 1) {
x1[1:N1] = x[(nl + 1):(nl + N1)]
w1[1:N1] = w[(nl + 1):(nl + N1)]
}
x2[1:N2] = x[(t + 1):(t + N2)]
w2[1:N2] = w[(t + 1):(t + N2)]
x3 = c(x1, x2)
w3 = c(w1, w2)
if (n <= 800) {
mpw = medpairwise(n, w3, x3)
b = mpw$b
A = mpw$A
}
if (n > 800) {
qrl = quickregli(n, w3, x3)
b = qrl$b
A = qrl$A
}
if (choix == 1)
cat("\nb=", b, "A=", A, "\n")
xl = biwmean(x1, 7.5, 1e-04)
xr = biwmean(x2, 7.5, 1e-04)
xleft = xl
xright = xr
omean = (N1 * xl + N2 * xr)/n
var = (N1 * (xl - omean) * (xl - omean) + N2 * (xr -
omean) * (xr - omean))/(n - 1)
if (choix != 1)
x1[1:N1] = x1[1:N1] - xl
if (choix == 1)
x1[1:N1] = x1[1:N1] - b * (w1 - w[t])
if (choix != 1)
x2[1:N2] = x2[1:N2] - xr
if (choix == 1)
x2[1:N2] = x2[1:N2] - b * (w2 - w[t])
x3 = c(x1, x2)
noisesd = biwvar(x3, 7.5, 1e-04)
noisev = noisesd * noisesd
if (noisev == 0)
noisev = 1e-06
if (choix == 0 || choix == 1)
rdn = noisev
if (choix == 2)
rdn = var/noisev
cat("\nsnr -> xl=", xl, "n=", N1, "nl=", nl, "xr=", xr,
"n=", N2, "nr=", nr, "x=", omean, "sd=", var, "sn=",
noisev, "snr=", rdn, "\n")
ratio = rdn
return(list(xleft = xleft, xright = xright, ratio = ratio))
}
"dtrend" <- function(ndat, ndis, t, x, w, tabn, NLIM) {
ntabn = numeric()
X = numeric()
x3 = numeric()
w2 = numeric()
ntabn[1:ndis] = tabn[1:ndis]
ntabn = c(ntabn, t)
ii = max(rank(ntabn, ties.method = "average")[ntabn ==
t])
nl = ntabn[rank(ntabn, ties.method = "average") == (ii -
1)]
if (nl == t && nl != ntabn[1])
nl = ntabn[rank(ntabn, ties.method = "average") ==
(ii - 2)]
nr = ntabn[rank(ntabn, ties.method = "average") == (ii +
1)]
if (nr == t && nr != ntabn[2])
nr = ntabn[rank(ntabn, ties.method = "average") ==
(ii + 2)]
if (nl == 1)
N1 = t - nl + 1
if (nl != 1)
N1 = t - nl
N2 = nr - t
if (N1 > NLIM) {
N1 = NLIM
nl = t - N1
}
if (N2 > NLIM) {
N2 = NLIM
nr = t + N2
}
n = N1 + N2
if (nl == 1) {
x3[1:n] = x[(nl):(nl + n - 1)]
w2[1:n] = w[(nl):(nl + n - 1)]
}
if (nl != 1) {
x3[1:n] = x[(nl + 1):(nl + n)]
w2[1:n] = w[(nl + 1):(nl + n)]
}
if (n <= 800) {
mpw = medpairwise(n, w2, x3)
b = mpw$b
A = mpw$A
}
if (n > 800) {
qrl = quickregli(n, w2, x3)
b = qrl$b
A = qrl$A
}
cat("\nReduction :", b, A, "\n")
X[1:nl] = x[1:nl]
X[(nl + 1):nr] = x[(nl + 1):nr] + b * (t - w[i])
X[(nr + 1):ndat] = x[(nr + 1):ndat]
x3[1:ndat] = X[1:ndat]
return(list(dt = x3))
}
"adjust" <- function(dat, m, tabn) {
ntabn = numeric()
idebut = numeric()
ifin = numeric()
mediane = numeric()
jdeb = numeric()
jfin = numeric()
ntabn[1:m] = tabn[1:m]
tabsort = sort(ntabn)
for (i in 1:(m - 1)) {
dat2 = numeric()
if (i == 1)
dat2 = dat[tabsort[i]:tabsort[i + 1]]
if (i != 1)
dat2 = dat[(tabsort[i] + 1):tabsort[i + 1]]
med = median(dat2, na.rm = T)
if (i == 1) {
jdeb[i] = tabsort[i]
jfin[i] = tabsort[i + 1]
for (j in jdeb[i]:jfin[i]) {
dat[j] = dat[j] - med
}
}
if (i != 1) {
jdeb[i] = 1 + tabsort[i]
jfin[i] = tabsort[i + 1]
for (j in jdeb[i]:jfin[i]) {
dat[j] = dat[j] - med
}
}
idebut[i] = jdeb[i]
ifin[i] = jfin[i]
mediane[i] = med
}
return(list(ndat = dat, debut = idebut, fin = ifin, mediane = mediane))
}
readline2 <- function(myString0, dflt) {
print(myString0)
a <- scan(what = character(0), nmax = 1)
if (identical(a, character(0)))
a = dflt
return(a)
}
mygformat <- function(num) {
fnum=numeric()
for( i in 1:NROW(num) ) {
fnum[i]=sprintf(" %.3f",num[i])
if( num[i]>=100 | num[i]<=0.01 ) fnum[i]=sprintf("%.0e",num[i])
}
return(fnum)
}
if (is.null(namefi)) {
namefi = readline2("\nData file name? : ", "")
}
b = read.table(namefi)
if (is.null(xleg)) {
xleg = readline2("\nX-axis title? : ", "X")
}
if (is.null(yleg)) {
yleg = readline2("\nY-axis title? : ", "Y")
}
if (is.null(titl)) {
titl = readline2("\nMain title? : ", "SERIES")
}
if (is.null(onecol)) {
onecol = readline2("\nIs this a single column file? (y/n): ",
"n")
}
if (no(onecol))
xvar <- b$V1
if (yes(onecol))
xvar <- 1:length(b$V1)
if (is.null(daty)) {
daty = readline2("\nShould gradient values be processed? (y/n): ",
"n")
}
if (yes(onecol))
b$V2 <- b$V1
if (no(daty))
yvar <- b$V2
if (yes(daty)) {
yvar <- diff(b$V2)/diff(b$V1)
xvar <- xvar[-length(xvar)]
xvar <- xvar[yvar != Inf]
yvar <- yvar[yvar != Inf]
}
if (is.null(gray)) {
gray = readline2("\nGray background color? (y/n): ",
"y")
}
if (no(gray))
pbg <- 0
if (yes(gray))
pbg <- 1
pva = numeric()
tau = numeric()
N <- length(yvar)
chpt = numeric()
testchpt = numeric()
tnvd = numeric()
tnvt = numeric()
tprobrob = numeric()
maxtmp = numeric()
cat("\nChange-point detection is being performed\n")
for (i in 1:N) {
chpt[i] = 0
testchpt[i] = 0
}
yvar0 <- yvar
chpt = rep(0, length(yvar))
testchpt = rep(0, length(yvar))
k = 1
chpt[k] = 1
k = 2
chpt[k] = N
ca <- 0
cort <- 0
niter <- 0
while (niter < NMAXITER) {
niter <- niter + 1
cat("\nITERATION #", niter, "\n\n")
rkt = ranktest(niter, N, yvar)
p = rkt$p
tau = rkt$tau
cat("p-value =", p, "ntau_i =", tau, "\n")
if (p < SEUILPROB) {
cat("\tTest Passed\n")
nxrob = tau
nyrob = N - tau
xn1 <- yvar[1:nxrob]
xn2 <- yvar[-(1:nxrob)]
z = c(xn1, xn2)
ix = rep(0, length(xn1))
for (i in 1:length(xn1)) {
for (j in (length(xn1) + 1):length(z)) {
if (rank(z, ties.method = "average")[j] < rank(z,
ties.method = "average")[i])
ix[i] = ix[i] + 1
}
}
iy = rep(0, length(xn2))
for (i in 1:length(xn2)) {
for (j in 1:length(xn1)) {
if (rank(z, ties.method = "average")[j] < rank(z,
ties.method = "average")[i + length(xn1)])
iy[i] = iy[i] + 1
}
}
sumnx = sum(ix)
sumnx2 = sum(ix * ix)
sumny = sum(iy)
sumny2 = sum(iy * iy)
nxbar = sumnx/length(xn1)
nybar = sumny/length(xn2)
sdnx = sumnx2 - length(xn1) * nxbar * nxbar
sdny = sumny2 - length(xn2) * nybar * nybar
zstat = 0.5 * (length(xn1) * nxbar - length(xn2) *
nybar)/sqrt(nxbar * nybar + sdnx + sdny)
zval = zstat
if (zstat > 0)
zval = -zval
p = pnorm(zval)
p = p * 2
cat("\nnxbar=", nxbar, "nybar=", nybar, "sdnx=",
sdnx, "sdny=", sdny, "zstat=", zstat, "p=", p,
"\n")
if (length(xn1) < 12 && length(xn2) < 12)
p = -p
probrob = p
pasymp = 1
if (probrob < 0) {
cat("\n\tWARNING : exact p-value against asymptotic!\n")
probrob = -probrob
pasymp = 0
}
if (probrob < SEUILPROB)
cat("\tRobust Rank Order Test Passed\n")
snr1 = snr(N, k, tau, yvar0, xvar, chpt, 0, NLIM)
NVD = snr1$ratio
snr2 = snr(N, k, tau, yvar0, xvar, chpt, 1, NLIM)
xl = snr2$xleft
xr = snr2$xright
NVT = snr2$ratio
cat("\nDisc. Noise Var.=", NVD, "Trend Noise Var=",
NVT, "\n")
prox = 1
cat("\n", tau, "<->")
for (i in 1:k) {
cat(" ", chpt[i])
if (abs(tau - chpt[i]) <= ECART)
prox = 0
}
if (prox == 1)
cat("\nGap between change-points")
if ((tau - 1) <= ECARTBORD)
prox = 0
if ((N - tau) <= ECARTBORD)
prox = 0
if ((NVD - NVT) > ECARTNV && prox == 1) {
cat("\tTrend reduction\n")
cort = cort + 1
if (cort >= NMAXITER)
break
suptend = dtrend(N, k, tau, yvar0, xvar, chpt,
NLIM)
yvar = suptend$dt
niter = 0
}
if ((NVD - NVT) <= ECARTNV && prox == 1) {
k = k + 1
if (k > 3) {
for (i in 1:(k - 2)) yvar[debut[i]:fin[i]] = yvar[debut[i]:fin[i]] +
medi[i]
}
chpt[k] = tau
testchpt[tau] = testchpt[tau] + 1
tnvd[k] = NVD
tnvt[k] = NVT
if (pasymp == 1)
tprobrob[k] = probrob
if (pasymp == 0)
tprobrob[k] = -probrob
cat("\nCHANGE-POINT DETECTED\n")
ajt = adjust(yvar, k, chpt)
yvar = ajt$ndat
debut = ajt$debut
fin = ajt$fin
medi = ajt$mediane
niter = 0
ca = ca + 1
cat("\nADJUSTMENT #", ca)
}
}
}
cat("\nNUMBER OF ITERATIONS :", niter)
if (niter != NMAXITER)
if ((NVD - NVT) <= ECARTNV && prox == 1)
for (i in 1:(k - 1)) for (j in debut[i]:fin[i]) yvar[j] = yvar[j] +
medi[i]
xseg = numeric()
yseg = numeric()
yseg2 = numeric()
pseg = numeric()
s2nr = numeric()
ligne = character()
for (i in 1:k) {
snrstar = ""
nvstar0 = ""
pstar = ""
nvstar = ""
tau = chpt[i]
if (tau != N && tau != 1) {
snr3 = snr(N, k, tau, yvar0, xvar, chpt, 0, N)
XL = snr3$xleft
XR = snr3$xright
NVD = snr3$ratio
snr4 = snr(N, k, tau, yvar0, xvar, chpt, 1, N)
NVT = snr4$ratio
snr5 = snr(N, k, tau, yvar0, xvar, chpt, 2, N)
SNRD = snr5$ratio
if (SNRD < SNRMIN)
snrstar = paste(format(SNRD, digits = 6), "*",
sep = "")
if (SNRD >= SNRMIN)
snrstar = paste(snrstar, format(SNRD, digits = 6),
sep = "")
if ((tnvd[i] - tnvt[i]) > ECARTNV)
nvstar0 = paste(format(tnvt[i], digits = 6),
"*", sep = "")
if ((tnvd[i] - tnvt[i]) <= ECARTNV)
nvstar0 = paste(nvstar0, format(tnvt[i], digits = 6),
sep = "")
if (tprobrob[i] < 0 || tprobrob[i] >= SEUILPROB)
pstar = paste(format(chpt[i], digits = 6), "*",
sep = "")
if ((NVD - NVT) > ECARTNV)
nvstar = paste(format(NVT, digits = 6), "*",
sep = "")
if ((NVD - NVT) <= ECARTNV)
nvstar = paste(nvstar, format(NVT, digits = 6),
sep = "")
ligne[i] = paste(format(xvar[tau], digits = 7), " & ",
format(yvar0[tau], digits = 6), " & ", pstar,
" & ", format(tnvd[i], digits = 6), " & ", nvstar0,
" & ", format(NVD, digits = 6), " & ", nvstar,
" & ", snrstar, " & ", format(XL, digits = 6),
" & ", format(XR, digits = 6), sep = "")
xseg = c(xseg, xvar[tau])
yseg = c(yseg, XL)
yseg2 = c(yseg2, XR)
pseg = c(pseg, tprobrob[i])
s2nr = c(s2nr, SNRD)
}
}
nn = order(xseg)
nn = c(nn, 0)
xseg1 = c(min(xvar), sort(xseg))
xseg2 = c(sort(xseg), max(xvar))
yseg = yseg[order(xseg)]
yseg2 = yseg2[order(xseg)]
yseg = c(yseg, yseg2[length(yseg2)])
pseg = pseg[order(xseg)]
pseg = c(pseg, 0)
s2nr = s2nr[order(xseg)]
s2nr = c(s2nr, 0)
dfr = data.frame(cbind(nn, xseg1, yseg, xseg2, pseg, s2nr))
ligne[2] = "X_value & Y_value & Chpt & NoiVarDi & NoiVarTr & NVDpost & NVTpost & DisSNR & LeftBWM & RightBWM"
ligne = ligne[!is.na(ligne)]
write.table(ligne, FILEOUT, quote = F, col.names = F, row.names = F)
cat("\nNumerical results in SDS.res\n\n")
"niceplot" <- function(df, lab1, lab2, mai, df2, locleg,
pbg) {
if (substr(mai, 1, 5) == "paste")
mai = parse(text = mai)
if (substr(lab1, 1, 5) == "paste")
lab1 = parse(text = lab1)
if (substr(lab2, 1, 5) == "paste")
lab2 = parse(text = lab2)
par(mfrow = c(1, 1))
if (pbg)
par(bg = "lightgray")
if (!pbg)
par(bg = "white")
par(mar = c(5, 5, 4, 2) + 0.1, xpd = TRUE)
plot(df, type = "n", axes = FALSE, ann = FALSE)
usr = par("usr")
rect(usr[1], usr[3], usr[2], usr[4], col = "cornsilk",
border = "black")
lines(df, col = "blue")
points(df, pch = 21, bg = "lightcyan", cex = 1.25)
if (length(df2[, 2]) > 1) {
segments(df2[, 2], df2[, 3], df2[, 4], lwd = 4)
ntext = length(df2[, 2])
text(df2[-ntext, 4], df2[-ntext, 3], as.character(df2[-ntext,
1]), col = "red", cex = 1.7, pos = 4, offset = 0.35)
}
axis(2, col.axis = "blue", las = 1)
axis(1, col.axis = "blue")
box()
if (length(df2[, 2]) > 1) {
nline = rep("", ntext)
if (locleg[1] == 1) {
cat("\nPLEASE, locate with the mouse the topright corner of the legend in the plot window\n\n")
loc = locator(1)
if (!is.null(loc))
outloc = c(loc$x, loc$y)
if (is.null(loc)) {
loc = "topleft"
outloc = loc
}
}
if (locleg[1] != 1)
loc = locleg
temp <- legend(loc[1], loc[2], inset = c(0, 0), legend = nline,
xjust = 1, yjust = 1, title = " Statistics for change-points ",
cex = 0.8)
textline = sprintf("%2d %7.2f %s %s ", df2[-ntext, 1], df2[-ntext, 4],
mygformat(df2[-ntext, 6]), mygformat(df2[-ntext, 5]))
hdr = sprintf("%1s %5s %3s %7s ", "N", "XChpt", "SNR", "P-value")
textline = c(hdr, textline)
print(textline)
text(temp$rect$left + temp$rect$w, temp$text$y, textline,
pos = 2, cex = 0.8)
}
title(main = mai, font.main = 4, col.main = "red", cex.main = 1.7)
title(xlab = lab1, col.lab = "red", cex.lab = 1.4)
title(ylab = lab2, col.lab = "red", cex.lab = 1.4)
if (length(df2[, 2]) <= 1)
outloc = c("No detection")
if (locleg[1] == 1)
return(list(inloc = outloc))
}
pts = data.frame(xvar, yvar0)
nplt = niceplot(pts, xleg, yleg, titl, dfr, 1, pbg)
inloc = nplt$inloc
cat(inloc)
resnamepdf = "SDS.pdf"
pdf(resnamepdf, version = "1.4")
niceplot(pts, xleg, yleg, titl, dfr, inloc, pbg)
dev.off()
resnamepng = "SDS.png"
png(resnamepng, width = 1200, height = 1200, res = 120)
niceplot(pts, xleg, yleg, titl, dfr, inloc, pbg)
dev.off()
cat("\nGraphics in SDS.png\n\n")
cat("\nGraphics in SDS.pdf\n\n")
} | /scratch/gouwar.j/cran-all/cranData/ACA/R/SDScan.R |
#' Dataset amorese.data.txt
#'
#' This data set is a small simulated time series to test the \code{ACA}
#' package.
#'
#' @name amorese.data.txt
#'
#' @format This data set contains 2 columns. The first column is an index,
#' from 1 to 410. The second column are the values of a synthetic
#' combination of normal distributions. This is a modified version of the
#' data example from James & Mattesons (2014) study: a sequence of 100
#' independent samples from normal distributions (N(0, 1), N(0, 3), N(2, 1)
#' and N(2, 4)). The notation N(??, ??) means normally distributed
#' with mean ?? and standard deviation ??. This synthetic data set is
#' slighty upgraded by adding an extra N(0, 3) very short (10 samples)
#' segment at the end of the initial sequence. This extra tip is added in
#' order to assess the detection capability for a breakpoint close to
#' series??? end, where an edge effect may be significant. Moreover, a 5 per
#' cent slope is added to this synthetic series to simulate a series with
#' upward trend. This synthetic series is plotted in Figures 2b and 2d in
#' Amorese & al. (2018).
#'
#' @source James, N.A. & Matteson, D.S., ecp: an R package for
#' nonparametric multiple change point analysis of multivariate
#' data, \emph{J. Stat. Softw.}, 62(7), 1???25 (2014).
#'
#' Amorese, D., Grasso, J. R., Garambois, S., and Font, M.,
#' "Change-point analysis of geophysical time-series: application
#' to landslide displacement rate (Sechilienne rock avalanche,
#' France)", \emph{Geophysical Journal International}, 213(2),
#' 1231-1243 (2018).
#'
NULL | /scratch/gouwar.j/cran-all/cranData/ACA/R/extdata_amorese.R |
#' Dataset soccer.data.txt
#'
#' This data set is a small time series to test the \code{ACA} package.
#'
#' @name soccer.data.txt
#'
#' @format This data set contains 2 columns. The first column is the
#' football season year. The second column is the average goals-per-game
#' in each season. Data are derived from all English professional league
#' soccer results from 1888-2014 (engsoccerdata R package).
#'
#' @source James P. Curley, engsoccerdata: English Soccer Data 1871-
#' 2016. R package version 0.1.5 (2016), doi: 10.5281/zenodo.13158.
#'
NULL | /scratch/gouwar.j/cran-all/cranData/ACA/R/extdata_soccer.R |
acdFit <- function(durations = NULL, model = "ACD", dist = "exponential",
order = NULL, startPara = NULL, dailyRestart = 0, optimFnc = "optim",
method = "Nelder-Mead", output = TRUE, bootstrapErrors = FALSE,
forceErrExpec = TRUE, fixedParamPos = NULL, bp = NULL, exogenousVariables = NULL, control = list()){
vecProvided <- FALSE
if(is.data.frame(durations)){
if("adjDur" %in% colnames(durations)){
dur <- durations$adjDur
} else if("durations" %in% colnames(durations)){
warning("no 'adjDur' column for diurnally adjusted durations found - used unadjusted durations instead")
dur <- durations$durations
} else stop("neither a 'durations' or a 'adjDur' column was found in the data.frame 'durations'")
if("time" %in% colnames(durations)){
if(!("POSIXlt" %in% class(durations$time))) durations$time <- as.POSIXlt(durations$time)
time = durations$time
} else time <- NULL
} else if(is.vector(durations)){
dur = durations
vecProvided <- TRUE
time <- NULL
} else stop("'durations' must be a data.frame or a vector")
z <- ExoVarNames <- NULL
if(length(exogenousVariables) != 0){
z <- as.matrix(durations[ , exogenousVariables])
if(is.numeric(exogenousVariables)){
ExoVarNames <- names(durations)[exogenousVariables]
} else{
ExoVarNames <- exogenousVariables
}
}
N <- length(dur)
mean <- mean(dur)
currentTime <- Sys.time()
#provides the possibility of entering truncated and/or case mismatched arguments:
model <- match.arg(toupper(model), c("ACD", "LACD1", "LACD2", "AMACD", "ABACD", "BACD", "SNIACD", "LSNIACD"))
dist <- match.arg(tolower(dist), c("exponential", "weibull", "burr", "gengamma", "genf", "qweibull", "mixqwe", "mixqww", "mixinvgauss"))
distCode <- .getDistCode(dist)
#checks startPara and order input:
if(length(startPara) != 0){
if(length(order) == 0) order <- .setOrder(model)
.checkOrderAndPara(order, startPara, distCode, model)
paraTemp <- .seperateStartPara(startPara, model, distCode, order)
distStartPara <- paraTemp$distStartPara
startPara <- paraTemp$startPara
}else{
if(length(order) != 0){
.checkOrder(order, model)
} else{
order <- .setOrder(model)
}
startPara <- .setStartPara(model, distCode, mean, order, Nexovar = ncol(z))
distStartPara <- startPara$distStartPara
startPara <- startPara$startPara
}
if(model %in% c("SNIACD", "LSNIACD") && length(bp) == 0) bp <- .setBP(order[3])
#checks the control list arguments:
con <- list(newDay = 0,
maxit = 4000,
trace = 0,
B = 999,
BootRoundTosec = FALSE)
nmsC <- names(con)
con[(namc <- names(control))] <- control
if(length(noNms <- namc[!namc %in% nmsC]))
warning("unknown names in control: ", paste(noNms, collapse = ", "))
#controls that time was provided if dailyRestart == 1, and computes the vector of indices for first observation of a news day
if(dailyRestart != 0 && length(time) != 0){
if(length(con$newDay) == 0 || con$newDay == 0) con$newDay <- .getNewDay(time) #computes the vector of indices for first observation of a news day
} else if(dailyRestart != 0 && length(time) == 0 && con$newDay == 0){
warning("can only use daily restart of the conditional mean if the clocktimes of transactions or the vector newDay (as a control parameter) are provided! Estimation done assuming no daily restart.")
}
mean <- mean(dur)
if(con$trace != 0) {
assign("ACDmOptimTrace", NULL, envir = ACDmGlobalEnv)
traceMatrix <- NULL
}
#makes it possible to have fixed parameters when optimizing the log likelihood:
fixedParam <- NULL
if(length(fixedParamPos) != 0){
fixedParam <- startPara[fixedParamPos]
startPara <- startPara[!fixedParamPos]
}
if(optimFnc == "optim"){
failed <- FALSE
tryCatch({ #uses the tryCatch to catch to still get the trace path in case of a failure of the optimization function
fit <- stats::optim(startPara, .getLLcall,
method = method, hessian=TRUE,
dur = dur, exogenousVar = z, model = model, order=order, distCode = distCode, newDay = con$newDay,
mean=mean, returnMu = FALSE, breakPoints = bp, forceErrExpec = forceErrExpec,
fixedParam = fixedParam, fixedParamPos = fixedParamPos, control = list(maxit = con$maxit, trace = con$trace), trace = con$trace)
}, error = function(c) {
failed <<- TRUE
if(con$trace != 0) { #in case the trace option were used and the optimization failed, the trace will be plotted:
numcol <- length(startPara) + length(fixedParam) + 1
numrow <- ceiling(length(get("ACDmOptimTrace", envir = ACDmGlobalEnv)) / numcol)
traceMatrix <<- matrix(get("ACDmOptimTrace", envir = ACDmGlobalEnv),
ncol = numcol,
byrow = T,
dimnames = list(1:numrow, c(paste0("para", 1:(numcol - 1)), "LL")))
.plotTracePath(traceMatrix)
rm("ACDmOptimTrace", envir = ACDmGlobalEnv)
cat(c$message)
}
}
)
if(failed){ #output in case of optimization failure:
if(con$trace != 0){
cat("\n\nacdFit: Oops, seems like the the optimization function failed. The trace path up to the crash was returned\n\n")
return(traceMatrix)
} else{
cat("\n\nError: Oops, seems like the the optimization function failed. Changing the 'optimFnc' or/and its settings, or starting from a diffrent 'startPara' might work. You can also trace the MLE search path by adding the argument 'control = list(trace = 1)'. \n\n")
return()
}
}
#if some parameters were set to be fixed, the fixed and estimated parameters are recombined:
if(length(fixedParamPos) != 0) parTemp <- .returnfixedPara(fit$par, fixedParam, fixedParamPos)
else parTemp <- fit$par
mu <- .getLLcall(param = parTemp, dur = dur, exogenousVar = z, model = model, order = order, mean = mean, distCode = distCode, newDay = con$newDay, returnMu = TRUE, breakPoints = bp, forceErrExpec = forceErrExpec)
} else if(optimFnc == "nlminb"){
failed <- FALSE
tryCatch({ #uses the tryCatch to catch to still get the trace path in case of a failure of the optimization function
fit <- stats::nlminb(start = startPara, objective = .getLLcall, dur = dur, exogenousVar = z,
model = model, order=order, distCode = distCode, newDay = con$newDay,
mean=mean, returnMu = FALSE, breakPoints = bp, forceErrExpec = forceErrExpec,
fixedParam = fixedParam, fixedParamPos = fixedParamPos,
control = list(trace = con$trace, iter.max = con$maxit), lower = -Inf, upper = Inf, trace = con$trace)
}, error = function(c) {
failed <<- TRUE
if(con$trace != 0) { #in case the trace option were used and the optimization failed, the trace will be plotted:
numcol <- length(startPara) + length(fixedParam) + 1
numrow <- ceiling(length(get("ACDmOptimTrace", envir = ACDmGlobalEnv)) / numcol)
traceMatrix <<- matrix(get("ACDmOptimTrace", envir = ACDmGlobalEnv),
ncol = numcol,
byrow = T,
dimnames = list(1:numrow, c(paste0("para", 1:(numcol - 1)), "LL")))
.plotTracePath(traceMatrix)
rm("ACDmOptimTrace", envir = ACDmGlobalEnv)
cat(c$message)
}
}
)
if(failed){ #output in case of optimization failure:
if(con$trace != 0){
cat("\n\nacdFit: Oops, seems like the the optimization function failed. The trace path up to the crash was returned\n\n")
return(traceMatrix)
} else{
cat("\n\nError: Oops, seems like the the optimization function failed. Changing the 'optimFnc' or/and its settings, or starting from a diffrent 'startPara' might work. You can also trace the MLE search path by adding the argument 'control = list(trace = 1)'. \n\n")
return()
}
}
#uses the stats::optimHess function to numerically compute the hessian:
hessianTemp <- matrix(nrow = length(startPara), ncol = length(startPara))
tryCatch({
hessianTemp <- stats::optimHess(fit$par, .getLLcall,
dur = dur, exogenousVar = z, model = model, order=order, distCode = distCode, newDay = con$newDay,
mean=mean, returnMu = FALSE, breakPoints = bp, forceErrExpec = forceErrExpec,
fixedParam = fixedParam, fixedParamPos = fixedParamPos)
}, error = function(c) {
warning("computing the hessian failed: ", c$message)
})
rownames(hessianTemp) <- NULL; colnames(hessianTemp) <- NULL
fit <- list(par = fit$par, hessian = hessianTemp, value = fit$objective, convergence = fit$convergence, counts = fit$evaluations[2])
#if some parameters were set to be fixed, the fixed and estimated parameters are recombined:
if(length(fixedParamPos) != 0) parTemp <- .returnfixedPara(fit$par, fixedParam, fixedParamPos)
else parTemp <- fit$par
mu <- .getLLcall(param = parTemp, dur = dur, exogenousVar = z, model = model, order=order, mean = mean, distCode = distCode, newDay = con$newDay, returnMu = TRUE, breakPoints = bp, forceErrExpec = forceErrExpec)
} else if(optimFnc == "solnp"){
failed <- FALSE
tryCatch({ #uses the tryCatch to catch to still get the trace path in case of a failure of the optimization function
if(con$trace == 0){ #too many warning messages from the solnp function - will not show these unless trace is used
options(warn = -1)
utils::capture.output(
fit <- Rsolnp::solnp(pars=startPara, fun = .getLLcall,
#ineqfun = ineq, ineqUB = .999, ineqLB = 0, LB = LB, UB = UB,
dur = dur, exogenousVar = z, model = model, order = order, mean = mean, distCode = distCode, returnMu = FALSE, forceErrExpec = forceErrExpec,
fixedParam = fixedParam, fixedParamPos = fixedParamPos,
breakPoints = bp, newDay = con$newDay, control = list(outer.iter = con$maxit, trace = con$trace), trace = con$trace)
)
options(warn = 0)
} else {
fit <- Rsolnp::solnp(pars=startPara, fun = .getLLcall,
#ineqfun = ineq, ineqUB = .999, ineqLB = 0, LB = LB, UB = UB,
dur = dur, exogenousVar = z, model = model, order = order, mean = mean, distCode = distCode, returnMu = FALSE, forceErrExpec = forceErrExpec,
breakPoints = bp, newDay = con$newDay, control = list(outer.iter = con$maxit, trace = con$trace), trace = con$trace)
}
}, error = function(c) {
failed <<- TRUE
if(con$trace != 0) { #in case the trace option were used and the optimization failed, the trace will be plotted:
numcol <- length(startPara) + length(fixedParam) + 1
numrow <- ceiling(length(get("ACDmOptimTrace", envir = .GlobalEnv)) / numcol)
traceMatrix <<- matrix(get("ACDmOptimTrace", envir = .GlobalEnv),
ncol = numcol,
byrow = T,
dimnames = list(1:numrow, c(paste0("para", 1:(numcol - 1)), "LL")))
.plotTracePath(traceMatrix)
rm("ACDmOptimTrace", envir = .GlobalEnv)
cat(c$message)
}
}
)
if(failed){ #output in case of optimization failure:
if(con$trace != 0){
cat("\n\nacdFit: Oops, seems like the the optimization function failed. The trace path up to the crash was returned\n\n")
return(traceMatrix)
} else{
cat("\n\nError: Oops, seems like the the optimization function failed. Changing the 'optimFnc' or/and its settings, or starting from a diffrent 'startPara' might work. You can also trace the MLE search path by adding the argument 'control = list(trace = 1)'. \n\n")
return()
}
}
fit <- list(par = fit$pars, hessian = fit$hessian,
value = fit$values[length(fit$values)], convergence = fit$convergence, counts = fit$nfuneval)
#if some parameters were set to be fixed, the fixed and estimated parameters are recombined:
if(length(fixedParamPos) != 0) parTemp <- .returnfixedPara(fit$par, fixedParam, fixedParamPos)
else parTemp <- fit$par
mu <- .getLLcall(param = parTemp, dur = dur, exogenousVar = z, model = model, order=order, mean = mean, distCode = distCode,
newDay = con$newDay, returnMu = TRUE, breakPoints = bp, forceErrExpec = forceErrExpec)
} else if(optimFnc == "optimx"){
failed <- FALSE
tryCatch({ #uses the tryCatch to catch to still get the trace path in case of a failure of the optimization function
fit <- optimx::optimx(startPara, .getLLcall,
method = method,
dur = dur, exogenousVar = z, model = model, order=order, distCode = distCode, newDay = con$newDay, mean=mean, forceErrExpec = forceErrExpec,
fixedParam = fixedParam, fixedParamPos = fixedParamPos,
returnMu = FALSE, breakPoints = bp, itnmax = con$maxit, control = list(trace = con$trace, kkt = FALSE), trace = con$trace)
}, error = function(c) {
failed <<- TRUE
if(con$trace != 0) { #in case the trace option were used and the optimization failed, the trace will be plotted:
numcol <- length(startPara) + length(fixedParam) + 1
numrow <- ceiling(length(get("ACDmOptimTrace", envir = ACDmGlobalEnv)) / numcol)
traceMatrix <<- matrix(get("ACDmOptimTrace", envir = ACDmGlobalEnv),
ncol = numcol,
byrow = T,
dimnames = list(1:numrow, c(paste0("para", 1:(numcol - 1)), "LL")))
.plotTracePath(traceMatrix)
rm("ACDmOptimTrace", envir = ACDmGlobalEnv)
cat(c$message)
}
}
)
if(failed){ #output in case of optimization failure:
if(con$trace != 0){
cat("\n\nacdFit: Oops, seems like the the optimization function failed. The trace path up to the crash was returned\n\n")
return(traceMatrix)
} else{
cat("\n\nError: Oops, seems like the the optimization function failed. Changing the 'optimFnc' or/and its settings, or starting from a diffrent 'startPara' might work. You can also trace the MLE search path by adding the argument 'control = list(trace = 1)'. \n\n")
return()
}
}
#uses the stats::optimHess function to numerically compute the hessian:
hessianTemp <- matrix(nrow = length(startPara), ncol = length(startPara))
tryCatch({
hessianTemp <- stats::optimHess(fit[1, 1:length(startPara)], .getLLcall,
dur = dur, exogenousVar = z, model = model, order=order, distCode = distCode, newDay = con$newDay,
mean = mean, returnMu = FALSE, breakPoints = bp, forceErrExpec = forceErrExpec,
fixedParam = fixedParam, fixedParamPos = fixedParamPos)
}, error = function(c) {
warning("computing the hessian failed: ", c$message)
})
rownames(hessianTemp) <- NULL; colnames(hessianTemp) <- NULL
fit <- list(par = as.numeric(fit[1, 1:length(startPara)]), hessian = hessianTemp,
value = fit$value, convergence = fit$convcode, counts = fit$fevals)
#if some parameters were set to be fixed, the fixed and estimated parameters are recombined:
if(length(fixedParamPos) != 0) parTemp <- .returnfixedPara(fit$par, fixedParam, fixedParamPos)
else parTemp <- fit$par
mu <- .getLLcall(param = parTemp, dur = dur, exogenousVar = z, model = model, order=order, mean = mean, distCode = distCode, newDay = con$newDay, returnMu = TRUE, breakPoints = bp, forceErrExpec = forceErrExpec)
}
if(bootstrapErrors){
if(!con$BootRoundTosec){ #the simulation in the bootstraps wont be rounded to seconds
bootPar <- matrix(nrow = con$B, ncol = length(fit$par))
i <- 1
percDone = 5
failed = 0
bootConverged <- rep(-99, con$B)
bootStartTime <- Sys.time()
while(i <= con$B){
bootDur <- sim_ACD(N, model = model, param = fit$par, order = order, startX = mean, startMu = mean, errors = mu$resi, dist = dist, roundToSec = FALSE)
bootTemp <- tryCatch(stats::optim(par = fit$par, fn = .getLLcall, dur = bootDur, model = model, order = order, mean = mean(bootDur), distCode = distCode, returnMu = FALSE, hessian = F, control = list(maxit = con$maxit, trace = con$trace, trace = con$trace)), error = function(e) {NULL})
bootParTemp <- bootTemp$par
if(length(bootParTemp) != 0 && all(abs(bootParTemp[-1]) < 1.5) && bootTemp$convergence == 0){
bootPar[i, ] <- bootParTemp
bootConverged[i] <- bootTemp$convergence
i <- i + 1
} else failed <- failed + 1
if ((i / con$B) >= .05 && percDone == 5) cat("Estimated time for bootstrap simulation: ", round(difftime(Sys.time(), bootStartTime, units = "secs")*20), " sec \n\nbootstrap % done: \n")
if ((i / con$B) >= (percDone / 100)) {cat(percDone,"% "); percDone = percDone + 5}
}
cat("\ntime for bootstrap simulation: ", round(difftime(Sys.time(), bootStartTime, units = "secs")), " sec")
cat("\n\n")
cat(failed, "of the ", con$B, " bootstrap estimations failed and were resimulated\n")
bootErr <- sqrt(diag(stats::cov(bootPar)))
bootCorr <- stats::cor(bootPar)
bootMean <- apply(bootPar, 2, mean)
} else{
mu <- .getLLcall(param = fit$par, dur = dur, model = model, order=order, mean = mean, distCode = distCode, newDay = con$newDay, returnMu = TRUE)
bootPar <- matrix(nrow = con$B, ncol = length(fit$par))
i <- 1
percDone = 5
cat("bootstrap % done: ")
bootDurTemp <- sim_ACD((con$B*(N+50))*1.3, param = fit$par, model = model, order = order, startX = mean, startMu = mean, errors = mu$resi, roundToSec = FALSE)
bootDur <- bootDurTemp[bootDurTemp!=0]
if(bootDur<N)
while(i <= con$B){
bootParTemp <- tryCatch(stats::optim(par = fit$par, fn = .getLLcall, model = model, x=bootDur[((i-(1+failed))*(N+50)+1):((i+failed)*(N+50))], order=order,mean=mean(bootDur), dist=distCode, returnMu=FALSE, newDay = con$newDay, hessian = TRUE, control = list(maxit = con$maxit, trace = con$trace))$par, error = function(e) {NULL})
if(length(bootParTemp) != 0){
bootPar[i, ] <- bootParTemp
i <- i + 1
} else failed <- failed + 1
if ((i / con$B) >= (percDone / 100)) {cat(percDone,"% "); percDone = percDone + 5}
}
cat("\n")
bootErr <- sqrt(diag(stats::cov(bootPar)))
bootCorr <- stats::cor(bootPar)
bootMean <- apply(bootPar, 2, mean)
}
}
#if QML and not preset (fixed) paramters, then robust errors will be computed:
if(model == "ACD" && dist == "exponential" && length(fixedParamPos) == 0 && length(exogenousVariables) == 0){
QLMscore <- .Call("getScoreACDExp",
as.double(dur),
as.double(mu$mu),
as.double(fit$par),
as.integer(order),
as.integer(0), PACKAGE = "ACDm")
sandwich <- solve(as.matrix(as.data.frame(QLMscore[3]))) %*% as.matrix(as.data.frame(QLMscore[4])) %*% solve(as.matrix(as.data.frame(QLMscore[3])))
robustSE <- sqrt(diag(sandwich))
robustCorr <- solve(diag(robustSE)) %*% sandwich %*% solve(diag(robustSE))
}
else{
robustSE <- NULL
robustCorr <- NULL
}
if(bootstrapErrors) namedParameters <- .getCoef(para = fit$par , model = model, dist = dist, hessian =
fit$hessian,
order = order, bootError = bootErr, bootCorr = bootCorr, bootMean = bootMean,
robustCorr = robustCorr, robustSE = robustSE, fixedParam = fixedParam,
fixedParamPos = fixedParamPos, ExoVarNames = ExoVarNames)
else namedParameters <- .getCoef(para = fit$par , model = model, dist = dist, hessian = fit$hessian, order = order,
robustCorr = robustCorr, robustSE = robustSE, fixedParam = fixedParam,
fixedParamPos = fixedParamPos, ExoVarNames = ExoVarNames)
N <- length(dur)
Npar <- length(fit$par)
LogLikelihood <- -fit$value
AIC <- 2 * (Npar - LogLikelihood)
BIC <- -2 * LogLikelihood + Npar * log(N)
MSE <- mean((dur-mu$mu)^2)
GoodnessOfFit <- data.frame("value" = c(LogLikelihood, AIC, BIC, MSE))
rownames(GoodnessOfFit) <- c("LogLikelihood", "AIC", "BIC", "MSE")
#computes the value of the unfree distribution parameter as a function of the others (if mean was forced to be 1):
if(forceErrExpec == 1) forcedDistPara <- .returnFixedMeanPara(distCode, namedParameters$DPar)
else{ #if forceErrExpec was false (0) this parameter was fixed at 1 in the estimation
forcedDistPara <- 1
names(forcedDistPara) <- names(.returnFixedMeanPara(distCode, namedParameters$DPar)) #only to get the name of the parameter
}
if(!vecProvided) returnValue <- list(call = match.call(),
durations = durations)
else returnValue <- list(call = match.call(),
durations = data.frame(durations = dur))
returnValue <- append(returnValue, list(
muHats = mu$mu,
residuals = mu$resi,
model = model,
order = order,
distribution = dist,
distCode = distCode,
startPara = startPara,
mPara = namedParameters$MPar,
dPara = namedParameters$DPar,
exogenousVariables = exogenousVariables,
breakPoints = bp,
Npar = Npar[1],
goodnessOfFit = GoodnessOfFit,
parameterInference = namedParameters$Inference,
forcedDistPara = forcedDistPara,
forceErrExpec = forceErrExpec,
comments = namedParameters$comment,
hessian = fit$hessian,
N = N,
evals = fit$counts[1],
convergence = fit$convergence,
estimationTime = difftime(Sys.time(), currentTime, units = "secs"),
description = paste("Estimated at", currentTime, "by user", Sys.info()[["user"]]),
newDayVector = con$newDay))
#if bootstrapp errors: adds the bootstrapp inference
if(bootstrapErrors) returnValue <- append(returnValue, list(bootstrapEstimates = bootPar, bootConverged = bootConverged, bootErr = bootErr, bootMean = bootMean, bootCorr = namedParameters$bootCorr, bootPar = bootPar))
#if QML (ACD and exponetial): adds the robust correlation
if(model == "ACD" && dist == "exponential" && length(fixedParamPos) == 0 && length(exogenousVariables) == 0) returnValue <- append(returnValue, list(robustCorr = namedParameters$robustCorr))
#if SNIACD: adds the break points:
if(model %in% c("SNIACD", "LSNIACD")) returnValue <- append(returnValue, list(SNIACDbp = bp))
#plots and append the trace path if the argument 'control = list(trace = 1)' was given
if(con$trace != 0) {
numcol <- length(startPara) + length(fixedParam) + 1
numrow <- ceiling(length(get("ACDmOptimTrace", envir = ACDmGlobalEnv)) / numcol)
traceMatrix <- matrix(get("ACDmOptimTrace", envir = ACDmGlobalEnv), ncol = numcol, byrow = T,
dimnames = list(1:numrow, c(names(returnValue$mPara), names(returnValue$dPara), "LL")))
.plotTracePath(traceMatrix)
rm("ACDmOptimTrace", envir = ACDmGlobalEnv)
returnValue <- append(returnValue, list(traceMatrix = traceMatrix))
}
class(returnValue) <- c("acdFit", class(returnValue))
if(output) print(returnValue)
acdFit <- returnValue
}
| /scratch/gouwar.j/cran-all/cranData/ACDm/R/acdFit.R |
coef.acdFit <- function(object, returnCoef = "all", ...){
returnCoef <- match.arg(returnCoef, c("all", "distribution", "model"))
switch(returnCoef,
all = c(object$mPara, object$dPara),
distribution = object$dPara,
model = object$mPara)
}
residuals.acdFit <- function(object, ...){
object$residuals
}
predict.acdFit <- function(object, N = 10, ...){
k <- max(object$order)
endMu = utils::tail(object$muHats, k) #the end of the estimated expected durations of the fitted model
if(length(object$durations$adjDur) != 0)
endDurations <- utils::tail(object$durations$adjDur, k) #the end of the durations of the fitted model
else #no 'adjDur' column
endDurations <- utils::tail(object$durations$durations, k) #the end of the durations of the fitted model
errorExpectation <- 1
#if the fitted model didn't have a forced error expectation = 1, the mean of the residuals is instead used:
if(object$forceErrExpec == FALSE) errorExpectation <- mean(object$residuals)
#"simulates" with error terms equal to their expectation, starting from the endpoints of the original data set
sim_ACD(N = N, param = stats::coef(object), Nburn = length(endDurations), startX = endDurations,
startMu = endMu, errors = errorExpectation)
}
print.acdFit <- function(x, ...){
if(x$distribution == "exponential") {
cat("\nACD model estimation by (Quasi) Maximum Likelihood \n")
} else {
cat("\nACD model estimation by Maximum Likelihood \n")
}
cat("\nCall:\n")
cat(" ", deparse(x$call), "\n")
cat("\nModel:\n")
cat(" ", x$model)
cat("(")
cat(x$order[1])
for(i in 2:length(x$order)) cat("", x$order[i], sep = ", ")
cat(")")
if(length(x$SNIACDbp) != 0) cat("\n Break points:", x$SNIACDbp)
cat("\n")
cat("\nDistribution:\n")
cat(" ", x$distribution)
cat("\n\nN:", x$N)
cat("\n\nParameter estimate:\n")
print(format(x$parameterInference, digits = 3, scientific = F))
if(length(x$comments) > 0){
cat("\nNote:", x$comments)
}
if(length(x$forcedDistPara) > 0){
cat("\n\nThe fixed/unfree mean distribution parameter: \n")
cat(" ", names(x$forcedDistPara), ": ", x$forcedDistPara, sep = "")
}
if(length(x$bootErr) != 0){
cat("\n\nBootstrap correlations:\n")
print(format(data.frame(x$bootCorr), digits = 3, scientific = F))
}
if(length(x$robustCorr) != 0){
cat("\n\nQML robust correlations:\n")
print(format(data.frame(x$robustCorr), digits = 3, scientific = F))
}
cat("\n\nGoodness of fit:\n")
print.data.frame(x$goodnessOfFit)
cat("\nConvergence:", x$convergence, "\n")
cat("\nNumber of log-likelihood function evaluations:", x$evals, "\n")
if(length(x$bootErr) == 0) cat("\nEstimation time:", round(x$estimationTime, digits = 4), attributes(x$estimationTime)$units, "\n")
else cat("\nTotal estimation time (including bootstrap simulations):", round(x$estimationTime, digits = 4), attributes(x$estimationTime)$units, "\n")
cat("\nDescription:", x$description)
cat("\n\n")
}
| /scratch/gouwar.j/cran-all/cranData/ACDm/R/acdFitClassMethods.R |
acf_acd <- function(fitModel = NULL, conf_level = 0.95, max = 50, min = 1){
lag <- acf <- NULL
if("acdFit" %in% class(fitModel)){
dur <- fitModel$durations$durations
adjDur <- fitModel$durations$adjDur
resi <- fitModel$residuals
} else if("data.frame" %in% class(fitModel)){
dur <- fitModel$durations
adjDur <- fitModel$adjDur
resi <- fitModel$residuals
} else if(is.vector(fitModel)){
dur <- fitModel
adjDur <- NULL
resi <- NULL
} else stop("fitModel is not of the correct object type")
df <- data.frame()
if(length(dur) != 0){
temp_acf <- stats::acf(dur, plot = FALSE, lag.max = max)
df <- rbind(df, data.frame(acf = temp_acf$acf[-(1:min)], lag = temp_acf$lag[-(1:min)], data = "durations"))
conf <- stats::qnorm((1 - conf_level)/2)/sqrt(length(dur))
}
if(length(adjDur) != 0){
temp_acf <- stats::acf(adjDur, plot = FALSE, lag.max = max)
df <- rbind(df, data.frame(acf = temp_acf$acf[-(1:min)], lag = temp_acf$lag[-(1:min)], data = "adj. durations"))
conf <- stats::qnorm((1 - conf_level)/2)/sqrt(length(adjDur))
}
if(length(resi) != 0){
temp_acf <- stats::acf(resi, plot = FALSE, lag.max = max)
df <- rbind(df, data.frame(acf = temp_acf$acf[-(1:min)], lag = temp_acf$lag[-(1:min)], data = "residuals"))
conf <- stats::qnorm((1 - conf_level)/2)/sqrt(length(resi))
}
g <- ggplot2::ggplot(df, ggplot2::aes(x = lag, y = acf))
g <- g + ggplot2::geom_bar(stat = "identity", position = "identity") + ggplot2::ylab("autocorrelation") + ggplot2::xlab("lag")
g <- g + ggplot2::geom_hline(yintercept = -conf, color = "blue",size = 0.2) + ggplot2::geom_hline(yintercept = conf, color = "blue",size = 0.2)
g <- g + ggplot2::geom_hline(yintercept = 0, color = "red", size = 0.3) + ggplot2::theme_bw(base_size=20) + ggplot2::facet_wrap(~data)
print(g)
acf_acd <- df
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/acf_acd.R |
computeDurations <- function(transactions, open = "10:00:00", close = "18:25:00", rm0dur = TRUE, type = "trade", priceDiff = .1, cumVol = 10000){
open <- as.POSIXlt(strptime(open, "%H:%M:%S"))
open <- open$h * 3600 + open$min * 60 + open$sec
close <- as.POSIXlt(strptime(close, "%H:%M:%S"))
close <- close$h * 3600 + close$min * 60 + close$sec
type <- switch(type, trade = 1, transactions = 1, price = 2, volume = 3)
if("data.frame" %in% class(transactions)){
if(length(transactions$time) == 0) stop("the data.frame 'transactions' must contain a column named 'time'
with timestamps of each transaction")
if(!("POSIXlt" %in% class(transactions$time))) transactions$time <- as.POSIXlt(transactions$time)
} else{
transactions <- data.frame(time = transactions)
if(!("POSIXlt" %in% class(transactions$time))) transactions$time <- as.POSIXlt(transactions$time)
}
if(length(transactions$volume) != 0 || length(transactions$price)){ #volume and/or price were provided
temp <- .C("computeDurationsSubSec",
as.integer(transactions$time$year), #1
as.integer(transactions$time$mon),
as.integer(transactions$time$mday),
as.integer(transactions$time$hour),
as.integer(transactions$time$min), #5
as.double(transactions$time$sec),
as.integer(rep(0,length(transactions$time))),
as.integer(rep(0,length(transactions$time))),
as.integer(rep(0,length(transactions$time))),
as.integer(rep(0,length(transactions$time))), #10
as.integer(rep(0,length(transactions$time))),
as.double(rep(0,length(transactions$time))),
as.integer(transactions$volume),
as.double(transactions$price),
as.integer(rep(0,length(transactions$time))), #15
as.double(rep(0,length(transactions$time))),
as.integer(rep(0,length(transactions$time))),
as.double(rep(0,length(transactions$time))),
as.integer(length(transactions$time)),
as.integer(0), #20
as.double(open),
as.double(close),
as.integer(type),
as.integer(rm0dur),
as.double(priceDiff),
as.integer(cumVol), PACKAGE = "ACDm") #26
n <- temp[[20]]
times <- paste(temp[[7]][1:n] + 1900, temp[[8]][1:n] + 1, temp[[9]][1:n], temp[[10]][1:n], temp[[11]][1:n], temp[[12]][1:n], sep = ":")
dftemp <- data.frame(time = strptime(times, "%Y:%m:%d:%H:%M:%OS"))
if(length(transactions$price) != 0) dftemp <- cbind(dftemp, price = temp[[16]][1:n])
if(length(transactions$volume) != 0) dftemp <- cbind(dftemp, volume = temp[[15]][1:n])
if(rm0dur) dftemp <- cbind(dftemp, Ntrans = temp[[17]][1:n])
dftemp <- cbind(dftemp, durations = temp[[18]][1:n])
} else{ #only transaction times were given
temp <- .C("computeDurationsShort",
as.integer(transactions$time$year), #1
as.integer(transactions$time$mon),
as.integer(transactions$time$mday),
as.integer(transactions$time$hour),
as.integer(transactions$time$min), #5
as.double(transactions$time$sec),
as.integer(rep(0,length(transactions$time))),
as.integer(rep(0,length(transactions$time))),
as.integer(rep(0,length(transactions$time))),
as.integer(rep(0,length(transactions$time))), #10
as.integer(rep(0,length(transactions$time))),
as.double(rep(0,length(transactions$time))),
as.double(rep(0,length(transactions$time))),
as.integer(0),
as.integer(rep(0,length(transactions$time))), #15
as.integer(length(transactions$time)),
as.integer(open),
as.integer(close),
as.integer(rm0dur), PACKAGE = "ACDm") #19
n <- temp[[14]]
times <- paste(temp[[7]][1:n]+1900, temp[[8]][1:n]+1, temp[[9]][1:n], temp[[10]][1:n], temp[[11]][1:n], temp[[12]][1:n], sep = ":")
dftemp <- data.frame(time = strptime(times, "%Y:%m:%d:%H:%M:%OS"))
if(rm0dur) dftemp <- cbind(dftemp, Ntrans = temp[[15]][1:n])
dftemp <- cbind(dftemp, durations = temp[[13]][1:n])
}
#checks if any of the durations are negative:
if(any(transactions$durations < 0)){
if(is.unsorted(transactions$time)){
warning("the provided 'time' column is not in chronological order")
} else{
warning("Negative durations computed.")
}
}
cat("The", length(transactions$time), "transactions resulted in", n, "durations")
return(dftemp)
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/computeDurations.R |
dburr <- function(x, theta = 1, kappa = 1.2, sig2 = .3, forceExpectation = F){
if(forceExpectation) theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
retrunValue <- theta * kappa * x^(kappa - 1)
retrunValue <- retrunValue / (1+ sig2 * theta * x^kappa)^(1/sig2 + 1)
return(retrunValue)
}
pburr <- function(x, theta = 1, kappa = 1.2, sig2 = .3, forceExpectation = F){
if(forceExpectation) theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
return(1 - (1 + sig2 * theta * x^kappa)^(-1/sig2))
}
qburr <- function(p, theta = 1, kappa = 1.2, sig2 = .3, forceExpectation = F){
if(forceExpectation) theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
retrunValue <- (1-p)^(-sig2) - 1
retrunValue <- retrunValue / (sig2 * theta)
retrunValue <- retrunValue^(1/kappa)
return(retrunValue)
}
burrExpectation <- function(theta = 1, kappa = 1.2, sig2 = .3){
retrunValue <- theta^(-1/kappa) * gamma(1+1/kappa)*gamma(1/sig2-1/kappa)
retrunValue <- retrunValue / (sig2^(1+1/kappa)*gamma(1/sig2+1))
return(retrunValue)
}
rburr <- function(n = 1,theta = 1, kappa = 1.2, sig2 = .3, forceExpectation = F){
if(forceExpectation) theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
qburr(stats::runif(n), theta = theta, kappa = kappa, sig2 = sig2)
}
dgenf <- function(x, kappa = 5, eta = 1.5, gamma = .8, lambda = 1, forceExpectation = F){
if(min(kappa, eta, gamma, lambda) <= 0) stop("all parameters must be > 0")
if(forceExpectation){
if(eta - 1/gamma == 0) stop("The expectation is undefined for eta - 1/gamma == 0")
lambda <- lgamma(kappa) + lgamma(eta) - lgamma(kappa + 1/gamma) - lgamma(eta - 1/gamma)
lambda <- exp(lambda) * eta^(1/gamma)
}
returnValue <- (kappa * gamma -1) * log(x) +
(-eta - kappa) * log(eta + (x/lambda)^gamma) +
eta * log(eta) -
lbeta(kappa, eta) -
(kappa*gamma) * log(lambda)
returnValue <- exp(returnValue) * gamma
return(returnValue)
}
pgenf <- function(q, kappa = 5, eta = 1.5, gamma = .8, lambda = 1, forceExpectation = F){
if(min(kappa, eta, gamma, lambda) <= 0) stop("all parameters must be > 0")
if(forceExpectation){
if(eta - 1/gamma == 0) stop("The expectation is undefined for eta - 1/gamma == 0")
lambda <- lgamma(kappa) + lgamma(eta) - lgamma(kappa + 1/gamma) - lgamma(eta - 1/gamma)
lambda <- exp(lambda) * eta^(1/gamma)
}
f <- function(x) dgenf(x = x, kappa = kappa, eta = eta, gamma = gamma, lambda = lambda)
returnValue <- ifelse(q == 0, 0, stats::integrate(f, 0, q)$value)
return(returnValue)
}
genfHazard <- function(x, kappa = 5, eta = 1.5, gamma = .8, lambda = 1, forceExpectation = F){
if(min(kappa, eta, gamma, lambda) <= 0) stop("all parameters must be > 0")
if(forceExpectation){
if(eta - 1/gamma == 0) stop("The expectation is undefined for eta - 1/gamma == 0")
lambda <- (lgamma(kappa) + lgamma(eta)
-(1/gamma)*log(eta)
- lgamma(kappa + 1/gamma) - lgamma(eta - 1/gamma))
lambda <- exp(lambda)
}
pdf <- dgenf(x = x, kappa = kappa, eta = eta, gamma = gamma, lambda = lambda)
survivial <- 1 - pgenf(q = x, kappa = kappa, eta = eta, gamma = gamma, lambda)
retrunValue <- pdf / survivial
return(retrunValue)
}
dgengamma <- function(x, gamma = .3, kappa = 1.2, lambda = .3, forceExpectation = F){
if(forceExpectation) lambda <- exp(lgamma(kappa) - lgamma(kappa + 1 / gamma))
retrunValue <- ((kappa * gamma - 1) * log(x)
- (kappa * gamma) * log(lambda)
- lgamma(kappa)
+ log(gamma)
-(x / lambda)^gamma)
retrunValue <- exp(retrunValue)
return(retrunValue)
}
pgengamma <- function(x, gamma = .3, kappa = 3, lambda = .3, forceExpectation = F){
if(forceExpectation) lambda <- exp(lgamma(kappa) - lgamma(kappa + 1 / gamma))
retrunValue <- stats::pgamma((x / lambda)^gamma, kappa)
return(retrunValue)
}
qgengamma <- function(p, gamma = .3, kappa = 3, lambda = .3, forceExpectation = F){
if(forceExpectation) lambda <- exp(lgamma(kappa) - lgamma(kappa + 1 / gamma))
retrunValue <- lambda * stats::qgamma(p, kappa)^(1/gamma)
return(retrunValue)
}
rgengamma <- function(n = 1, gamma = .3, kappa = 3, lambda = .3, forceExpectation = F){
qgengamma(stats::runif(n), gamma = gamma, kappa = kappa, forceExpectation = forceExpectation)
}
gengammaHazard <- function(x, gamma = .3, kappa = 3, lambda = .3, forceExpectation = F){
if(forceExpectation) lambda <- exp(lgamma(kappa) - lgamma(kappa + 1 / gamma))
pdf <- dgengamma(x, gamma = gamma, kappa = kappa, lambda = lambda)
survivial <- 1 - pgengamma(x, gamma = gamma, kappa = kappa, lambda = lambda)
retrunValue <- pdf / survivial
return(retrunValue)
}
dqweibull <- function(x, a = .8, qdist = 1.2, b = 1, forceExpectation = F){
if(forceExpectation) b <- .returnFixedMeanPara(distCode = 6, distPara = c(a, qdist))
returnValue <- (2 - qdist) * a / b^a * x^(a-1) * (1-(1-qdist)*(x/b)^a)^(1/(1-qdist))
return(returnValue)
}
pqweibull <- function(q, a = .8, qdist = 1.2, b = 1, forceExpectation = F){
if(forceExpectation) b <- .returnFixedMeanPara(distCode = 6, distPara = c(a, qdist))
returnValue <- 1 - (1-(1-qdist)*(q/b)^a)^((2-qdist)/(1-qdist))
return(returnValue)
}
qqweibull <- function(p, a = .8, qdist = 1.2, b = 1, forceExpectation = F){
if(forceExpectation) b <- .returnFixedMeanPara(distCode = 6, distPara = c(a, q))
returnValue <- ((1 - (1 - p)^((1 - qdist) / (2 - qdist))) / (1 - qdist))^(1 / a) * b
return(returnValue)
}
rqweibull <- function(n = 1, a = .8, qdist = 1.2, b = 1, forceExpectation = F){
qqweibull(stats::runif(n), a = a, qdist = qdist, b = b, forceExpectation = forceExpectation)
}
qweibullExpectation <- function(a = .8, qdist = 1.2, b = 1){
if((1/(qdist-1)-1)*a <= 1) stop("expectation does not exist for the given parameters")
returnValue <- lgamma(1/a) + lgamma(1/(qdist-1) - 1/a - 1) - lgamma(1/(qdist-1))
returnValue <- exp(returnValue) * (2 - qdist) / ((qdist - 1)^((a+1)/a) * a)
returnValue <- returnValue * b
return(returnValue)
}
qweibullHazard <- function(x, a = .8, qdist = 1.2, b = 1, forceExpectation = F){
if(forceExpectation) b <- .returnFixedMeanPara(distCode = 6, distPara = c(a, qdist))
returnValue <- (2-qdist) * b^(-a) * x^(a-1) * a
returnValue <- returnValue / (1 - (1-qdist) * (x/b)^a)
return(returnValue)
}
dmixqwe <- function(x, pdist = .5, a = .8, qdist = 1.5, lambda = .8, b = 1, forceExpectation = F){
if(forceExpectation) b <- (1 - (1 - pdist) * lambda) / pdist * .returnFixedMeanPara(distCode = 6, distPara = c(a, qdist))
returnValue <- pdist * (2 - qdist) * a / b^a * x^(a-1) * (1-(1-qdist)*(x/b)^a)^(1/(1-qdist))
returnValue <- returnValue + (1 - pdist) * 1/lambda * exp(-x/lambda)
return(returnValue)
}
pmixqwe <- function(q, pdist = .5, a = .8, qdist = 1.5, lambda = .8, b = 1, forceExpectation = F){
if(forceExpectation) b <- (1 - (1 - pdist) * lambda) / pdist * .returnFixedMeanPara(distCode = 6, distPara = c(a, qdist))
returnValue <- pdist * pqweibull(q, a = a, q = qdist, b = b) + (1 - pdist) * (1 - exp(-q/lambda))
return(returnValue)
}
mixqweHazard <- function(x, pdist = .5, a = .8, qdist = 1.5, lambda = .8, b = 1, forceExpectation = F){
if(forceExpectation) b <- (1 - (1 - pdist) * lambda) / pdist * .returnFixedMeanPara(distCode = 6, distPara = c(a, qdist))
pdf <- dmixqwe(x, pdist = pdist, a = a, qdist = qdist, lambda = lambda, b = b)
survivial <- 1 - pmixqwe(x, pdist = pdist, a = a, qdist = qdist, lambda = lambda, b = b)
returnValue <- pdf / survivial
return(returnValue)
}
dmixqww <- function(x, pdist = .5, a = 1.2, qdist = 1.5, theta = .8, gamma = 1, b = 1, forceExpectation = F){
if(forceExpectation) b <- .returnFixedMeanPara(distCode = 8, distPara = c(pdist, a, qdist, theta, gamma))
returnValue <- pdist * (2 - qdist) * a / b^a * x^(a-1) * (1-(1-qdist)*(x/b)^a)^(1/(1-qdist))
returnValue <- returnValue + (1 - pdist) * theta * gamma * x^(gamma-1) * exp(-theta * x^gamma)
return(returnValue)
}
pmixqww <- function(q, pdist = .5, a = 1.2, qdist = 1.5, theta = .8, gamma = 1, b = 1, forceExpectation = F){
if(forceExpectation) b <- .returnFixedMeanPara(distCode = 8, distPara = c(pdist, a, qdist, theta, gamma))
returnValue <- pdist * pqweibull(q, a = a, q = qdist, b = b) + (1 - pdist) * (1 - exp(-theta * q^gamma))
return(returnValue)
}
mixqwwHazard <- function(x, pdist = .5, a = 1.2, qdist = 1.5, theta = .8, gamma = 1, b = 1, forceExpectation = F){
if(forceExpectation) b <- .returnFixedMeanPara(distCode = 8, distPara = c(pdist, a, qdist, theta, gamma))
pdf <- dmixqww(x, pdist = pdist, a = a, qdist = qdist, theta = theta, gamma = gamma, b = b, forceExpectation = forceExpectation)
survivial <- 1 - pmixqww(x, pdist = pdist, a = a, qdist = qdist, theta = theta, gamma = gamma, b = b, forceExpectation = forceExpectation)
returnValue <- pdf / survivial
return(returnValue)
}
dmixinvgauss <- function(x, theta = .2, lambda = .1, gamma = .05, forceExpectation = F){
phi <- 1
if(forceExpectation) phi <- theta * (1 + theta^2 / lambda / (1 + gamma))
returnValue <- (gamma + phi * x) / (gamma + theta) * sqrt(lambda/(2 * pi * x^3 * phi)) * exp(-lambda * (phi * x - theta)^2/(2 * phi * x * theta^2))
return(returnValue)
}
pmixinvgauss <- function(q, theta = .2, lambda = .1, gamma = .05, forceExpectation = F){
phi <- 1
if(forceExpectation) phi <- theta * (1 + theta^2 / lambda / (1 + gamma))
t1 <- q / theta - 1
t2 <- -q / theta - 1
returnValue <- stats::pnorm(t1*sqrt(lambda / (q * phi))) + (gamma - theta) / (theta + gamma) * stats::pnorm(t2*sqrt(lambda / (q * phi))) * exp(2 * lambda / theta)
return(returnValue)
}
mixinvgaussHazard <- function(x, theta = .2, lambda = .1, gamma = .05, forceExpectation = F){
pdf <- dmixinvgauss(x, theta = theta, lambda = lambda, gamma = gamma, forceExpectation = forceExpectation)
survivial <- 1 - pmixinvgauss(x, theta = theta, lambda = lambda, gamma = gamma, forceExpectation = forceExpectation)
returnValue <- pdf / survivial
return(returnValue)
}
#
# dbirnbaumsaunders <- function(x, kappa = 1, sigma = 1){
#
# returnValue <- 1/(2 * kappa * sigma * sqrt(2 * pi)) * ((sigma/x)^(1/2) + (sigma/x)^(3/2)) * exp(-1/(2 * kappa^2) * (x/sigma + sigma/x -2))
#
# return(returnValue)
# }
#
# pbirnbaumsaunders <- function(x, kappa = 1, sigma = 1){
#
# returnValue <- stats::pnorm(1/kappa * ((x/sigma)^(1/2) - (sigma/x)^(1/2)))
#
# return(returnValue)
# }
#
# birnbaumsaundersHazard <- function(x, kappa = 1, sigma = 1){
#
# pdf <- dbirnbaumsaunders(x = x, kappa = kappa, sigma = sigma)
# survivial <- 1 - pbirnbaumsaunders(x, kappa, sigma = sigma)
# returnValue <- pdf / survivial
#
# return(returnValue)
# } | /scratch/gouwar.j/cran-all/cranData/ACDm/R/distributions.R |
diurnalAdj <- function(dur, method = "cubicSpline", nodes = c(seq(600, 1105, 60), 1105), aggregation = "all", span = "cv", spar = 0, Q = 4, returnSplineFnc = FALSE){
durations <- spline.x <- spline.y <- day <- x <- y <- time <- NULL
if(!("POSIXlt" %in% class(dur$time))) dur$time <- as.POSIXlt(dur$time)
#provides the possibility of entering truncated arguments:
method <- match.arg(method, c("cubicSpline", "supsmu", "smoothSpline", "FFF"))
aggregation <- match.arg(aggregation, c("weekdays", "all", "none"))
if(nodes[length(nodes)] == nodes[length(nodes) - 1]) nodes <- nodes[-length(nodes)] #if the last two nodes are the same, the last is removed
if(returnSplineFnc == TRUE) rtSpline <- list()
if(method == "cubicSpline" | method == "smoothSpline"){
if(method == "cubicSpline") splnFnc <- function(x, y) splines::interpSpline(x, y)
else splnFnc <- function(x, y) stats::smooth.spline(x, y, all.knots = TRUE, spar = spar)
timeInMinutes <- dur$time$hour * 60 + dur$time$min
timeInterval <- numeric(nrow(dur))
if (any(timeInMinutes < nodes[1] |
timeInMinutes > nodes[length(nodes)]))
stop(
"\nAt least one of the durations occured outside of the nodes. \nThe smallest and largest nodes should be at the opening and closing time."
)
if (nodes[length(nodes) - 1] > max(timeInMinutes))
warning(
"no durations occured at the latest interval. Check if the 'node' argument is correctly specified"
)
if (nodes[1] < min(timeInMinutes))
warning(
"no durations occured at the first interval. Check if the 'node' argument is correctly specified"
)
for(i in 1:(length(nodes)-1)){
timeInterval <- timeInterval + ifelse((timeInMinutes>=nodes[i] & timeInMinutes<nodes[i+1]),(nodes[i] + nodes[i+1])/2,0) #all observations are given its mid interval value
}
timeInterval <- timeInterval + ifelse(timeInMinutes == nodes[length(nodes)], (nodes[length(nodes)-1] + nodes[length(nodes)])/2,0)
if(aggregation == "all"){
meandur <- plyr::ddply(data.frame(durations = dur$durations, timeInterval = timeInterval), plyr::.(timeInterval), plyr::summarize,
mean = round(mean(durations, na.rm=TRUE), 2))
if(nrow(meandur) < 4) stop("Needs data in at least 3 nodes")
spline <- splnFnc(meandur$timeInterval, meandur$mean)
if(returnSplineFnc == TRUE) rtSpline <- spline
adjDur <- dur$durations/stats::predict(spline, timeInMinutes)$y
df = data.frame(spline=stats::predict(spline, seq(nodes[1], nodes[length(nodes)], 1)))
g <- ggplot(df, aes(x=spline.x/60,y=spline.y))
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g + geom_line()+ylab("Durations (seconds)")+xlab("time of the day")+ggtitle("Diurnal pattern estimated by a cubic spline function"))
} else if(aggregation == "weekdays"){
dur$timeInterval <- timeInterval
adjDur <- numeric(nrow(dur))
df <- data.frame()
for(j in 1:5){
meandur <- plyr::ddply(dur[dur$time$wday == j, -1], plyr::.(timeInterval), plyr::summarize,
mean = round(mean(durations, na.rm=TRUE), 2))
meandur <- cbind(meandur,day=j)
if(nrow(meandur) < 4) stop("Needs data in at least 3 nodes (too few observations on day ", day, " of the week)")
spline <- splnFnc(meandur$timeInterval, meandur$mean)
if(returnSplineFnc == TRUE) rtSpline <- c(rtSpline , list(spline))
df = rbind(df, data.frame(spline=stats::predict(spline, seq(nodes[1], nodes[length(nodes)], 1)), day = j))
adjDur <- adjDur + ifelse(dur$time$wday == j, dur$durations/stats::predict(spline, timeInMinutes)$y, 0)
}
df$day <- factor(df$day, labels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday"))
g <- ggplot(df , aes(x=spline.x/60,y=spline.y))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by a cubic spline function")
g <- g + facet_wrap(~day,ncol=5)
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else if(aggregation == "none"){
dur <- cbind(dur, timeInterval, date = strftime(dur$time, format = "%Y-%m-%d"))
df <- data.frame()
adjDur <- numeric(nrow(dur))
allDates <- unique(dur$date)
for(date in allDates){
meandur <- plyr::ddply(dur[dur$date == date, -1], plyr::.(timeInterval), plyr::summarize,
mean = mean(durations, na.rm=TRUE))
meandur <- cbind(meandur,date=date)
if(nrow(meandur) < 4) stop("Needs data in at least 3 nodes (too few observations on ", date, ")")
spline <- splnFnc(meandur$timeInterval, meandur$mean)
if(returnSplineFnc == TRUE) rtSpline <- c(rtSpline , list(spline))
df = rbind(df, data.frame(spline = stats::predict(spline, seq(nodes[1], nodes[length(nodes)], 1)), date = date))
adjDur <- ifelse(dur$date == date, dur$durations/stats::predict(spline, timeInMinutes)$y, adjDur)
}
#fix for ggplot2 in case the data are not starting on a monday (adds rows with empty dates in start):
space = ""
i <- 1
while(i < dur$time[1]$wday){
df <- rbind(data.frame(spline.x=rep(0, 1000), spline.y=rep(0, 1000), date = (space <- paste(space, " "))), df)
i <- i +1
}
g <- ggplot(df, aes(x = spline.x / 60, y = spline.y))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by a cubic spline function")
g <- g + facet_wrap(~date,ncol=5)
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else stop("The aggregation argument is not supported")
} else if(method == "supsmu"){
if(aggregation == "all"){
smooth <- stats::supsmu((dur$time$hour * 3600 + dur$time$min * 60 + dur$time$sec), dur$durations, span = span)
df <- data.frame(smooth)
smooth <- data.frame(smooth, row.names = 1)
adjDur <- dur$durations/smooth[paste(dur$time$hour * 3600 + dur$time$min * 60 + dur$time$sec), ]
g <- ggplot(df, aes(x=x/3600,y=y))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by \"super smoother\"")
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else if(aggregation == "weekdays"){
df <- data.frame()
adjDur <- numeric(nrow(dur))
for(j in 1:5){
tempTime <- dur$time[dur$time$wday == j]
smooth <- stats::supsmu((tempTime$hour * 3600 + tempTime$min * 60 + tempTime$sec), dur$durations[dur$time$wday == j], span = span)
df <- rbind(df, data.frame(smooth, day = j))
smooth <- data.frame(smooth, row.names = 1)
adjDur[dur$time$wday == j] <- dur$durations[dur$time$wday == j]/smooth[paste(tempTime$hour * 3600 + tempTime$min * 60 + tempTime$sec), ]
}
df$day <- factor(df$day, labels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday"))
g <- ggplot(df, aes(x=x/3600,y=y))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by \"super smoother\"")
g <- g + facet_wrap(~day,ncol=6)
g <- g + facet_grid(~day)
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else if(aggregation == "none"){
df <- data.frame()
adjDur <- numeric(nrow(dur))
dateTemp <- strftime(dur$time, format = "%Y-%m-%d")
for(date in unique(dateTemp)){
tempTime <- dur$time[dateTemp == date]
smooth <- stats::supsmu((tempTime$hour * 3600 + tempTime$min * 60 + tempTime$sec), dur$durations[dateTemp == date], span = span)
df <- rbind(df, data.frame(smooth, date = date))
smooth <- data.frame(smooth, row.names = 1)
adjDur[dateTemp == date] <- dur$durations[dateTemp == date]/smooth[paste(tempTime$hour * 3600 + tempTime$min * 60 + tempTime$sec), ]
}
#fix for ggplot2 in case the data are not starting on a monday (adds rows with empty dates in start):
space = ""
i <- 1
while(i < dur$time[1]$wday){
df <- rbind(data.frame(x=rep(43200, 2), y=rep(0, 2), date = (space <- paste(space, ""))), df)
i <- i +1
}
g <- ggplot(df, aes(x = x / 3600, y = y))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by \"super smoother\"")
g <- g + facet_wrap(~date,ncol=5)
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else stop("The aggregation argument is not supported")
} else if(method == "FFF"){
timeInSec <- 3600*dur$time$hour + 60*dur$time$min + dur$time$sec
range <- max(timeInSec) - min(timeInSec)
tBar <- (timeInSec - min(timeInSec)) / range
if(aggregation == "all"){
deltaC <- matrix(NA, nrow(dur), Q); deltaS <- matrix(NA, nrow(dur), Q)
for(j in 1:Q){
deltaC[, j] <- cos(tBar * 2*pi*j)
deltaS[, j] <- sin(tBar * 2*pi*j)
}
OLSest <- stats::lm(dur$durations ~ tBar + deltaC + deltaS)
adjDur <- stats::predict(OLSest)
adjDurDF <- data.frame(time = timeInSec, adjDur)
# fff <- function(x, coff = OLSest$coefficients, Q = Q){
# out <- coff[1] + x * coff[2]
# for(j in 1:Q){
# out <- out + coff[j + 2] * cos(x*2*pi*j) + coff[Q + j + 2] * sin(x*2*pi*j)
# }
# fff <- out
# }
#plot(seq(0,1,.01),fff(seq(0,1,.01), coff = OLSest$coefficients, Q = Q), t="l")
g <- ggplot(adjDurDF, aes(x = time/3600 ,y = adjDur))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by \"Flexible Fourier Form\"")
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else if(aggregation == "weekdays"){
df <- data.frame()
adjDur <- numeric(nrow(dur))
for(k in 1:5){
tempTBar <- tBar[dur$time$wday == k]
deltaC <- matrix(NA, length(tempTBar), Q); deltaS <- matrix(NA, length(tempTBar), Q)
for(j in 1:Q){
deltaC[, j] <- cos(tempTBar * 2*pi*j)
deltaS[, j] <- sin(tempTBar * 2*pi*j)
}
OLSest <- stats::lm(dur$durations[dur$time$wday == k] ~ tempTBar + deltaC + deltaS)
df <- rbind(df, data.frame(y = stats::predict(OLSest), time = timeInSec[dur$time$wday == k], day = k))
adjDur[dur$time$wday == k] <- dur$durations[dur$time$wday == k]/stats::predict(OLSest)
}
df$day <- factor(df$day, labels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday"))
g <- ggplot(df, aes(x=time/3600,y=y))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by \"Flexible Fourier Form\"")
g <- g + facet_wrap(~day,ncol=5)
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else if(aggregation == "none"){
df <- data.frame()
adjDur <- numeric(nrow(dur))
dateTemp <- strftime(dur$time, format = "%Y-%m-%d")
for(date in unique(dateTemp)){
tempTBar <- tBar[dateTemp == date]
deltaC <- matrix(NA, length(tempTBar), Q); deltaS <- matrix(NA, length(tempTBar), Q)
for(j in 1:Q){
deltaC[, j] <- cos(tempTBar * 2 * pi * j)
deltaS[, j] <- sin(tempTBar * 2 * pi * j)
}
OLSest <- stats::lm(dur$durations[dateTemp == date] ~ tempTBar + deltaC + deltaS)
df <- rbind(df, data.frame(y = stats::predict(OLSest), time = timeInSec[dateTemp == date], date = date))
adjDur[dateTemp == date] <- dur$durations[dateTemp == date]/stats::predict(OLSest)
}
#fix for ggplot2 in case the data are not starting on a monday (adds rows with empty dates in start):
space = ""
i <- 1
while(i < dur$time[1]$wday){
df <- rbind(data.frame(x=rep(0, 1000), y=rep(0, 1000), date = (space <- paste(space, " "))), df)
i <- i +1
}
g <- ggplot(df, aes(x=time/3600,y=y))
g <- g + geom_line()
g <- g + ylab("Durations (seconds)")
g <- g + xlab("time of the day")
g <- g + ggtitle("Diurnal pattern estimated by \"Flexible Fourier Form\"")
g <- g + facet_wrap(~date,ncol=5)
g <- g + geom_hline(yintercept = 0, color="red")
graphics::plot(g)
} else stop("The aggregation argument is not supported")
} else stop("Method not supported")
if(min(adjDur) <= 0) stop("The method and method arguments returned non-positive adjusted durations. Try a diffrent method or nodes etc.")
if(method == "cubicSpline" && returnSplineFnc == TRUE){
diurnalAdj <- rtSpline
} else{
adjDur <- cbind(dur[, !(names(dur) %in% "adjDur")], adjDur) #overwrites any previous "adjDur" column
class(adjDur) <- c("durObj", "data.frame")
attributes(adjDur$adjDur)$type <- attributes(dur)$type
attributes(adjDur$adjDur)$method <- method
attributes(adjDur$adjDur)$aggregation <- aggregation
attributes(adjDur$adjDur)$methodArguments <- switch(method,
cubicSpline = nodes,
supsmu = span,
smoothSpline = c(nodes, spar),
FFF = Q)
diurnalAdj <- adjDur
}
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/diurnalAdj.R |
.getLLcall <- function(param, dur, exogenousVar = NULL, model, order, mean = mean(dur), distCode = 1,
newDay = c(0), returnMu = TRUE, breakPoints = NULL, forceErrExpec = 1,
fixedParam = NULL, fixedParamPos = NULL, trace = 0){
#combines the param and fixedParam into the full param vector if there are fixed parameters:
if(length(fixedParamPos) != 0)
param <- .returnfixedPara(freePara = param, fixedParam = fixedParam, fixedParamPos = fixedParamPos)
distPara <- .seperateStartPara(param, model, distCode, order)$distStartPara
cFunction <- switch(model,
ACD = "getLL_ACDcall",
LACD1 = "getLL_LACD1call",
LACD2 = "getLL_LACD2call",
AMACD = "getLL_AMACDcall",
ABACD = "getLL_ABACDcall",
BACD = "getLL_BACDcall",
SNIACD = "getLL_SNIACDcall",
LSNIACD = "getLL_logSNIACDcall",
stop("model not supported"))
if(length(exogenousVar) == 0){
if(returnMu){
if(model %in% c("SNIACD", "LSNIACD")){
temp <- .Call(cFunction,
as.double(dur),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.double(breakPoints),
as.integer(forceErrExpec), PACKAGE = "ACDm")
} else {
temp <- .Call(cFunction,
as.double(dur),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.integer(forceErrExpec), PACKAGE = "ACDm")
}
if(trace != 0) assign("ACDmOptimTrace", c(get("ACDmOptimTrace", envir = ACDmGlobalEnv), param, -temp[[3]]), envir = ACDmGlobalEnv)
.getLLcall <- list(LL = temp[[3]], mu = temp[[1]], resi = temp[[2]])
} else{
if(model %in% c("SNIACD", "LSNIACD")){
LL <- -.Call(cFunction,
as.double(dur),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.double(breakPoints),
as.integer(forceErrExpec), PACKAGE = "ACDm")[[3]]
} else {
LL <- -.Call(cFunction,
as.double(dur),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.integer(forceErrExpec), PACKAGE = "ACDm")[[3]]
}
if(trace != 0) assign("ACDmOptimTrace", c(get("ACDmOptimTrace", envir = ACDmGlobalEnv), param, -LL), envir = ACDmGlobalEnv)
.getLLcall <- LL
}
} else { #if there are exogenous variables:
cFunction <- paste0(cFunction, "Ex")
if(returnMu){
if(model %in% c("SNIACD", "LSNIACD")){
temp <- .Call(cFunction,
as.double(dur),
as.double(exogenousVar),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.double(breakPoints),
as.integer(forceErrExpec), PACKAGE = "ACDm")
} else {
temp <- .Call(cFunction,
as.double(dur),
as.double(exogenousVar),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.integer(forceErrExpec), PACKAGE = "ACDm")
}
if(trace != 0) assign("ACDmOptimTrace", c(get("ACDmOptimTrace", envir = ACDmGlobalEnv), param, temp[[3]]), envir = ACDmGlobalEnv)
.getLLcall <- list(LL = -temp[[3]], mu = temp[[1]], resi = temp[[2]])
} else {
if(model %in% c("SNIACD", "LSNIACD")){
LL <- -.Call(cFunction,
as.double(dur),
as.double(exogenousVar),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.double(breakPoints),
as.integer(forceErrExpec), PACKAGE = "ACDm")[[3]]
} else {
LL <- -.Call(cFunction,
as.double(dur),
as.double(exogenousVar),
as.double(param),
as.integer(order),
as.double(mean),
as.integer(distCode),
as.double(distPara),
as.integer(newDay),
as.integer(forceErrExpec), PACKAGE = "ACDm")[[3]]
}
if(trace != 0) assign("ACDmOptimTrace", c(get("ACDmOptimTrace", envir = ACDmGlobalEnv), param, -LL), envir = ACDmGlobalEnv)
.getLLcall <- LL
}
}
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/getLL.R |
ACDmGlobalEnv <- new.env()
assign("ACDmOptimTrace", NULL, envir = ACDmGlobalEnv)
.getDistCode<- function(dist){
if(dist == "exponential"){
.getDistCode <- 1
} else if(dist == "weibull"){
.getDistCode <- 2
} else if(dist == "burr"){
.getDistCode <- 3
} else if(dist == "gengamma"){
.getDistCode <- 4
} else if(dist == "genf"){
.getDistCode <- 5
} else if(dist == "qweibull"){
.getDistCode <- 6
} else if(dist == "mixqwe"){
.getDistCode <- 7
} else if(dist == "mixqww"){
.getDistCode <- 8
} else if(dist == "mixinvgauss"){
.getDistCode <- 9
} else if(dist == "birnbaum-saunders"){
.getDistCode <- 10
} else stop("the provided distribution does not exist")
}
.setBP <- function(Nbp){
.setBP <- switch(as.character(Nbp),
"1" = c(1),
"2" = c(.5, 1.5),
"3" = c(.3, .8, 1.5),
"4" = c(.3, .8, 1.5, 2),
"5" = c(.3, .5, 1, 1.5, 2),
"6" = c(.2, .4, .6, 1, 1.5, 2))
}
.checkOrderAndPara <- function(order, para, distCode, model, Nexovar = 0){
#checks the order:
if(model %in% c("ACD", "LACD1", "LACD2","ABACD")){
if(length(order) != 2) stop("The order is not entered in the correct format, check the description")
} else if(model %in% c("AMACD", "SNIACD", "LSNIACD")){
if(length(order) != 3) stop("The order is not entered in the correct format, check the description")
} else stop("the model is wrongly entered or not supported!")
if(any(order != round(order))) stop("The order must be integers")
if(any(order < 0)) stop("The order can't have negative entries")
#checks the number of parameters
if(model %in% c("ACD", "LACD1", "LACD2")){
Nmodelpara <- 1 + order[1] + order[2]
} else if(model %in% c("ABACD")){
Nmodelpara <- 4 + 2*order[1] + order[2]
}else if(model %in% c("AMACD")){
Nmodelpara <- 1 + order[1] + order[2] + order[3]
} else if(model %in% c("BACD")){
Nmodelpara <- 1 + order[1] + order[2] + 2
} else if(model %in% c("SNIACD", "LSNIACD")){
Nmodelpara <- 1 + order[1] + order[2] + order[3]
}
if(distCode == 1){
Ndistpara <- 0
} else if(distCode == 2){
Ndistpara <- 1
} else if(distCode == 3){
Ndistpara <- 2
} else if(distCode == 4){
Ndistpara <- 2
} else if(distCode == 5){
Ndistpara <- 3
} else if(distCode == 6){
Ndistpara <- 2
} else if(distCode == 7){
Ndistpara <- 4
} else if(distCode == 8){
Ndistpara <- 5
} else if(distCode == 9){
Ndistpara <- 3
} else if(distCode == 10){
Ndistpara <- 1
} else{
stop("Wrong distCode!")
}
if(length(para) != (Nmodelpara + Ndistpara + Nexovar)){
errMsg <- paste("Wrong number of given parameters. The", model,
"model should have", Nmodelpara + Nexovar,
"parameters , of wich", Nexovar, "are for the exogenous variables",
"and the distribution", Ndistpara, "parameters.")
stop(errMsg)
}
}
.setStartPara <- function(model, distCode, mean, order, Nexovar = NULL){
if(distCode == 1){
distStartPara <- NULL
} else if(distCode == 2){
distStartPara <- .8
} else if(distCode == 3){
distStartPara <- c(1.1, 0.3)
} else if(distCode == 4){
distStartPara <- c(2, .5)
} else if(distCode == 5){
distStartPara <- c(0.4, 0.7, 2.5)
} else if(distCode == 6){
distStartPara <- c(.8, 1.2)
} else if(distCode == 7){
distStartPara <- c(.7, 1.3, 1.2, 1.2)
} else if(distCode == 8){
distStartPara <- c(.7, 1.3, 1.2, 0.7, 1.2)
} else if(distCode == 9){
distStartPara <- c(.7, 0.2, 0.4)
} else if(distCode == 10){
distStartPara <- c(1)
}
if(model == "ACD"){
startPara <- c(mean/10, rep(.15/order[1],order[1]), rep(.8/order[2],order[2]))
} else if(model == "LACD1"){
startPara <- c(0.03, rep(.03/order[1],order[1]), rep(.98/order[2],order[2]))
} else if(model == "LACD2"){
startPara <- c(0, rep(.03/order[1],order[1]), rep(.98/order[2],order[2]))
} else if(model == "ABACD"){
startPara <- c(mean/20, rep(.10/order[1],order[1]), rep(0, order[1]), rep(.8/order[2],order[2]), 0, 1, 1)
} else if(model == "AMACD"){
startPara <- c(mean/10, rep(.15/(order[1]+order[2]), (order[1]+order[2])), rep(.8/order[3],order[3]))
} else if(model == "BACD"){
startPara <- c(mean/20, rep(.10/order[1],order[1]), rep(.8/order[2],order[2]), 1, 1)
} else if(model %in% c("SNIACD")){
startPara <- c(mean/10, c(0.15, rep(.1,order[3])), rep(0,order[1] - 1), rep(.8/order[2],order[2]))
} else if(model %in% c("LSNIACD")){
startPara <- c(0, c(0.03, rep(0,order[3])), rep(0,order[1] - 1), rep(.8/order[2],order[2]))
}
if(length(Nexovar) != 0) startPara <- c(startPara, rep(0, Nexovar))
return(list(startPara = c(startPara, distStartPara), modelStartPara = startPara, distStartPara = distStartPara))
}
.seperateStartPara <- function(startPara, model, distCode, order){
if(model == "ACD"){
startMPara <- startPara[1:(1 + order[1] + order[2])]
} else if(model == "LACD1"){
startMPara <- startPara[1:(1 + order[1] + order[2])]
} else if(model == "LACD2"){
startMPara <- startPara[1:(1 + order[1] + order[2])]
} else if(model == "ABACD"){
startMPara <- startPara[1:(4 + 2 * order[1] + order[2])]
} else if(model == "AMACD"){
startMPara <- startPara[1:(1 + order[1] + order[2] + order[3])]
} else if(model == "BACD"){
startMPara <- startPara[1:(1 + order[1] + order[2] + 2)]
} else if(model %in% c("SNIACD", "LSNIACD")){
startMPara <- startPara[1:(1 + order[1] + order[2] + order[3])]
}
if(distCode == 1){
Ndistpara <- 0
} else if(distCode == 2){
Ndistpara <- 1
} else if(distCode == 3){
Ndistpara <- 2
} else if(distCode == 4){
Ndistpara <- 2
} else if(distCode == 5){
Ndistpara <- 3
} else if(distCode == 6){
Ndistpara <- 2
} else if(distCode == 7){
Ndistpara <- 4
} else if(distCode == 8){
Ndistpara <- 5
} else if(distCode == 9){
Ndistpara <- 3
} else if(distCode == 10){
Ndistpara <- 1
} else{
stop("Wrong distCode!")
}
startMPara <- startPara[(length(startMPara) + 1):(length(startPara) - Ndistpara)]
if(distCode == 1){
distStartPara <- NULL
} else{
distStartPara <- startPara[(length(startPara) - Ndistpara + 1):length(startPara)]
}
return(list(startPara = startPara, modelStartPara = startMPara, distStartPara = distStartPara))
}
.checkOrder <- function(order, model){
if(model %in% c("ACD", "LACD1", "LACD2","ABACD", "BACD")){
if(length(order) != 2) stop("The order is not entered in the correct format, check the description")
} else if(model %in% c("AMACD", "SNIACD", "LSNIACD")){
if(length(order) != 3) stop("The order is not entered in the correct format, check the description")
} else stop("the model is wrongly entered or not supported!")
if(any(order != round(order))) stop("The order must be integers")
if(any(order < 0)) stop("The order can't have negative entries")
}
.setOrder <- function(model){
if(model %in% c("ACD", "LACD1", "LACD2","ABACD", "BACD")){
return(c(1, 1))
} else if(model %in% c("AMACD")){
return(c(1, 1, 1))
} else if(model %in% c("SNIACD", "LSNIACD")){
return(c(1, 1, 2))
}
}
.getNewDay <- function(time){
daysDiff <- as.Date(time[-1])-as.Date(time[1:(length(time)-1)])
return(which(daysDiff != 0)+1)
}
#returns the full parameter vector from the shorter freePara and fixedParam
.returnfixedPara <- function(freePara, fixedParam, fixedParamPos){
if(length(fixedParamPos) != length(freePara) + length(fixedParam)) stop(".returnfixedPara() error")
returnPara <- numeric(length(fixedParamPos))
fixedParamIndex <- 1
fitParIndex <- 1
for(j in seq_along(fixedParamPos)){
if(fixedParamPos[j]){
returnPara[j] <-fixedParam[fixedParamIndex]
names(returnPara)[j] <- names(fixedParam)[fixedParamIndex]
fixedParamIndex <- fixedParamIndex + 1
} else{
returnPara[j] <- freePara[fitParIndex]
names(returnPara)[j] <- names(freePara)[fitParIndex]
fitParIndex <- fitParIndex + 1
}
}
return(returnPara)
}
#returns the full vector of SE from the shorter freeSE (sets the SE of fixed parameters to NA)
.returnfixedSE <- function(freeSE, fixedParamPos){
if(sum(!fixedParamPos) != length(freeSE)) stop(".returnfixedSE() error")
returnSE <- numeric(length(fixedParamPos))
fixedParamIndex <- 1
fitParIndex <- 1
for(j in seq_along(fixedParamPos)){
if(fixedParamPos[j]){
returnSE[j] <- NA
} else{
returnSE[j] <- freeSE[fitParIndex]
fitParIndex <- fitParIndex + 1
}
}
return(returnSE)
}
.returnFixedMeanPara <- function(distCode, distPara){
if(distCode == 1){ #Exponential
lambda <- 1
names(lambda) <- "lambda"
return(lambda)
} else if(distCode == 2){ #Weibull
theta <- gamma(1+1/distPara)^distPara
names(theta) <- "theta"
return(theta)
} else if(distCode == 3){ #Burr
kappa <- distPara[1]
sig2 <- distPara[2]
theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
names(theta) <- "theta"
return(theta)
} else if(distCode == 4){ #generelized Gamma
kappa <- distPara[1]
gammaPara <- distPara[2]
lambda <- exp(lgamma(kappa) - lgamma(kappa + 1 / gammaPara))
names(lambda) <- "lambda"
return(lambda)
} else if(distCode == 5){ #generelized F
kappa <- distPara[1]
eta <- distPara[2]
gammaPara <- distPara[3]
lambda <- (lgamma(kappa) + lgamma(eta)
-(1/gammaPara)*log(eta)
- lgamma(kappa + 1/gammaPara) - lgamma(eta - 1/gammaPara))
lambda <- exp(lambda)
names(lambda) <- "lambda"
return(lambda)
} else if(distCode == 6){ #q-Weibull
a <- distPara[1]
q <- distPara[2]
b <- lgamma(1/(q-1)) - lgamma(1/a) - lgamma(1/(q-1) - 1/a - 1)
b <- exp(b) * a * (q-1)^( (1+a) / a ) / (2 - q)
names(b) <- "b"
return(b)
} else if(distCode == 7){ #mixed q-Weibull and exponential
p <- distPara[1]
a <- distPara[2]
q <- distPara[3]
lambda <- distPara[4]
b <- lgamma(1/(q-1)) - lgamma(1/a) - lgamma(1/(q-1) - 1/a - 1)
b <- exp(b) * a * (q-1)^( (1+a) / a ) / (2 - q)
b <- b * (1 - (1 - p) * lambda) / p
names(b) <- "b"
return(b)
} else if(distCode == 8){ #mixed q-Weibull and Weibull
p <- distPara[1]
a <- distPara[2]
q <- distPara[3]
theta <- distPara[4]
gamma <- distPara[5]
b <- lgamma(1/(q-1)) - lgamma(1/a) - lgamma(1/(q-1) - 1/a - 1)
b <- exp(b) * a * (q-1)^( (1+a) / a ) / (2 - q)
b <- b * (1 - (1 - p) * theta^(-1/gamma) * gamma(1/gamma + 1)) / p
names(b) <- "b"
return(b)
} else if(distCode == 9){ #"mixinvgauss" finite inverse Gaussian mixature
return(NULL)
} else if(distCode == 10){ #"birnbaum-saunders"
return(NULL)
} else stop("the provided distribution does not exist")
}
.plotTracePath <- function(traceMatrix){
value <- iteration <- NULL
numcol <- dim(traceMatrix)[2]
numrow <- dim(traceMatrix)[1]
df <- data.frame(para = rep(dimnames(traceMatrix)[[2]], numrow),
iteration = rep(1:numrow, each = numcol),
value = as.vector(t(traceMatrix)))
df$para <- factor(df$para,
levels = dimnames(traceMatrix)[[2]])
if(min(df[df$para == "LL",]$value, na.rm = T) < -1e+12) df[df$para == "LL" & !is.nan(df$value) & df$value < -1e+12,]$value = NaN
g <- ggplot2::ggplot(df, aes(y = value, x = iteration)) + geom_line() + facet_grid(para ~ ., scales = "free_y") + ggtitle("Search path for the MLE optimization")
print(g)
}
.getCoef <- function(para, model = c("ACD","LACD1","LACD2","AMACD","SNIACD", "LSNIACD"), dist = c("exponential","weibull","burr"),
hessian, order, bootError = NULL, bootCorr = NULL, bootMean = NULL, robustCorr = NULL,
robustSE = NULL, fixedParam = NULL, fixedParamPos = NULL, ExoVarNames = NULL){
#the standard error of the parameters, estimated from the numerical hessian of the log likelihood function:
se <- matrix(nrow = nrow(hessian), ncol = ncol(hessian))
tryCatch(se <- sqrt(diag(solve(hessian))),
error = function(e) {
e
warning("The hessian could not be inverted, calculating the standard errors failed.")
})
#combines the para and fixedParam into the full para vector if there are fixed parameters:
if(length(fixedParamPos) != 0){
para <- .returnfixedPara(freePara = para, fixedParam = fixedParam, fixedParamPos = fixedParamPos)
se <- .returnfixedSE(freeSE = se, fixedParamPos = fixedParamPos)
}
comment <- NULL
if(model %in% c("ACD")){
conDurPara <- para[1]
paraNames <- "omega"
NconDurPara <- 1
if(order[1] != 0){
for(j in 1:order[1]){
conDurPara <- c(conDurPara, para[j+1])
paraNames <- c(paraNames, paste("alpha", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
}
if(order[2] != 0){
for(j in 1:order[2]){
conDurPara <- c(conDurPara, para[j+1+order[1]])
paraNames <- c(paraNames, paste("beta", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
}
names(conDurPara) <- paraNames
pval <- 2*(1-stats::pnorm(abs(para/se)))[1:NconDurPara]
} else if(model %in% c("LACD1", "LACD2")){
conDurPara <- para[1]
paraNames <- "omega"
NconDurPara <- 1
for(j in 1:order[1]){
conDurPara <- c(conDurPara, para[j+1])
paraNames <- c(paraNames, paste("alpha", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
for(j in 1:order[2]){
conDurPara <- c(conDurPara, para[j+1+order[1]])
paraNames <- c(paraNames, paste("beta", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
names(conDurPara) <- paraNames
pval = 2*(1-stats::pnorm(abs(para/se)))[1:NconDurPara]
} else if(model %in% c("AMACD")){
conDurPara <- para[1]
paraNames <- "omega"
NconDurPara <- 1
for(j in 1:order[1]){
conDurPara <- c(conDurPara, para[j+1])
paraNames <- c(paraNames, paste("alpha", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
for(j in 1:order[2]){
conDurPara <- c(conDurPara, para[j+1+order[1]])
paraNames <- c(paraNames, paste("nu", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
for(j in 1:order[3]){
conDurPara <- c(conDurPara, para[j+1+order[1]+order[2]])
paraNames <- c(paraNames, paste("beta", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
names(conDurPara) <- paraNames
pval = 2*(1-(stats::pnorm(abs(para/se))))[1:NconDurPara]
} else if(model %in% c("ABACD")){
conDurPara <- para[1]
paraNames <- "omega"
NconDurPara <- 1
for(j in 1:order[1]){
conDurPara <- c(conDurPara, para[j+1])
paraNames <- c(paraNames, paste("alpha", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
for(j in 1:order[1]){
conDurPara <- c(conDurPara, para[j+1+order[1]])
paraNames <- c(paraNames, paste("c", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
for(j in 1:order[2]){
conDurPara <- c(conDurPara, para[j+1+2*order[1]])
paraNames <- c(paraNames, paste("beta", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
conDurPara <- c(conDurPara, para[2+2*order[1]+order[2]], para[3+2*order[1]+order[2]], para[4+2*order[1]+order[2]])
paraNames <- c(paraNames, "nu", "delta1", "delta2")
NconDurPara <- NconDurPara + 3
names(conDurPara) <- paraNames
pval = 2*(1-(stats::pnorm(abs(para/se))))[1:NconDurPara]
} else if(model %in% c("BACD")){
conDurPara <- para[1]
paraNames <- "omega"
NconDurPara <- 1
for(j in 1:order[1]){
conDurPara <- c(conDurPara, para[j+1])
paraNames <- c(paraNames, paste("alpha", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
for(j in 1:order[2]){
conDurPara <- c(conDurPara, para[j+1+order[1]])
paraNames <- c(paraNames, paste("beta", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
conDurPara <- c(conDurPara, para[2+order[1]+order[2]], para[3+order[1]+order[2]])
paraNames <- c(paraNames,"delta1", "delta2")
NconDurPara <- NconDurPara + 2
names(conDurPara) <- paraNames
pval = 2*(1-(stats::pnorm(abs(para/se))))[1:NconDurPara]
} else if(model %in% c("SNIACD", "LSNIACD")){
conDurPara <- para[1]
paraNames <- "omega"
NconDurPara <- 1
for(j in 1:(order[3] + 1)){
conDurPara <- c(conDurPara, para[j+1])
paraNames <- c(paraNames, paste("c", j-1, sep = ""))
NconDurPara <- NconDurPara + 1
}
if(order[1] > 1)
for(j in 1:(order[1] - 1)){
conDurPara <- c(conDurPara, para[length(conDurPara) + 1])
paraNames <- c(paraNames, paste("alpha", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
for(j in 1:order[2]){
conDurPara <- c(conDurPara, para[length(conDurPara) + 1])
paraNames <- c(paraNames, paste("beta", j, sep = ""))
NconDurPara <- NconDurPara + 1
}
names(conDurPara) <- paraNames
pval = 2*(1-(stats::pnorm(abs(para/se))))[1:NconDurPara]
} else stop("model not supported")
if(dist == "exponential"){
distPara <- NULL
pval <- c(pval, NULL)
comment <- c(comment, NULL)
} else if(dist == "weibull") {
distPara <- para[NconDurPara+1]
paraNames <- c(paraNames, "gamma")
names(distPara) <- paraNames[NconDurPara+1]
pval <- c(pval, 2*(1 - stats::pnorm(abs((para[length(pval) + 1] - 1) / se[length(para)]))))
comment <- c(comment, "The p-value for the distribution parameter gamma is from the 2-tailed test H0: gamma = 1.")
} else if(dist == "burr") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "kappa", "sigma2")
names(distPara) <- paraNames[(NconDurPara + 1):length(para)]
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1] - 1) / se[length(pval) + 1]))))
pval <- c(pval, (1-stats::pnorm((para[length(pval) + 1])/se[length(pval) + 1])))
comment <- c(comment, "The p-value for the distribution parameter kappa is from the 2-tailed test H0: kappa = 1, and for sigma2 it is from the one sided test H0: sigma2 = 0 (or rather approching zero). If the two H0s are true, the Burr distribution reduces to the exponential distribution")
} else if(dist == "gengamma") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "kappa", "gamma")
names(distPara) <- paraNames[(NconDurPara+1):length(para)]
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
comment <- c(comment, "For the distribution parameters the null hypothesis is such that the parameter = 1 (2-sided). If the null is true, the generelized gamma distribution reduces to the exponential distribution")
} else if(dist == "genf") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "kappa", "eta", "gamma")
names(distPara) <- paraNames[(NconDurPara+1):length(para)]
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
comment <- c(comment, "The p-value for the distribution parameters are from the 2-tailed tests H0: distributionParameter = 1")
} else if(dist == "qweibull") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "a", "q")
names(distPara) <- paraNames[(NconDurPara+1):length(para)]
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1])/se[length(pval) + 1]))))
comment <- c(comment, "The p-value for the distribution parameters are from the 2-tailed tests H0: distributionParameter = 1")
} else if(dist == "mixqwe") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "p","a", "q", "lambda")
names(distPara) <- paraNames[(NconDurPara+1):length(para)]
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-.5)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
comment <- c(comment, "The p-value for p is from the 2-tailed tests H0: p = .5, the rest of the distribution parameters are from H0: para = 1")
} else if(dist == "mixqww") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "p","a", "q", "theta", "gamma")
names(distPara) <- paraNames[(NconDurPara+1):length(para)]
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-.5)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
comment <- c(comment, "The p-value for p is from the 2-tailed tests H0: p = .5, the rest of the distribution parameters are from H0: para = 1")
} else if(dist == "mixinvgauss") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "theta","lambda", "gamma")
names(distPara) <- paraNames[(NconDurPara+1):length(para)]
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-0)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-0)/se[length(pval) + 1]))))
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-0)/se[length(pval) + 1]))))
comment <- c(comment, "The p-value(s) for the distribution parameter(s) are from the 2-tailed test(s) H0: para = 0")
} else if(dist == "birnbaum-saunders") {
distPara <- para[(NconDurPara+1):length(para)]
paraNames <- c(paraNames, "kappa")
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]-1)/se[length(pval) + 1]))))
comment <- c(comment, "The p-value for the distribution parameter is from the 2-tailed test H0: kappa = 1")
}
paraNames <- c(paraNames, ExoVarNames)
if(length(pval) < length(para))
comment <- c(comment, "The p-value(s) for the exogenous parameter(s) are from the 2-tailed test(s) H0: parameter = 0")
while(length(pval) < length(para)){
pval <- c(pval, 2*(1-stats::pnorm(abs((para[length(pval) + 1]) / se[length(pval) + 1]))))
}
if(length(ExoVarNames) != 0){
conDurParanames <- names(conDurPara)
conDurPara <- c(conDurPara, para[(length(para) - length(ExoVarNames) + 1):length(para)])
names(conDurPara) <- c(conDurParanames, ExoVarNames)
}
if(length(fixedParamPos) != 0){
paraNames <- ifelse(fixedParamPos, paste(paraNames, "(fixed)", sep = " "), paraNames)
}
parameterInference <- data.frame(Parameters = paraNames,
Coef = para,
SE = se,
PV = round(pval, digits = 3),
row.names = 1)
#if bootstrapp where aviable: names the rows and columns of the correlation matrix and adds the mean and standard errors:
if(length(bootError) != 0){
parameterInference <- cbind(parameterInference, BootMean = bootMean, BootSE = bootError)
dimnames(bootCorr) <- list(paraNames, paraNames)
}
#names the rows and columns of the robust correlation matrix (if available):
if(length(robustCorr) != 0){
parameterInference <- cbind(parameterInference, robustSE = robustSE)
dimnames(robustCorr) <- list(paraNames, paraNames)
}
return(list(MPar = conDurPara, DPar = distPara, Inference = parameterInference, comment = comment, paraNames = paraNames, bootCorr = bootCorr, robustCorr = robustCorr))
}
.getdmudtheta_ACD <- function(param, x, order, mean = mean(x), newDay = c(0)){
if(length(newDay) == 1 & newDay[1] == 0){
NnewDays = 0;
}else{
NnewDays = length(newDay)
}
temp<-.C("getdmudtheta_ACD",
as.double(x),
as.integer(length(x)),
as.double(param[1:(1+order[1]+order[2])]),
as.integer(order),
as.double(mean),
as.double(numeric(length(x))),
as.double(numeric(length(x))),
as.integer(newDay),
as.integer(NnewDays),
as.double(numeric(length(x) * (1+order[1]+order[2]))), PACKAGE = "ACDm")
.getdmudtheta_ACD <- matrix(temp[[10]], nrow = length(x), ncol = (1+order[1]+order[2]))
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/internals.R |
plotDescTrans <- function(trans, windowunit = "hours", window = 1){
volume <- price <- n <- transactions <- time <- NULL
windowunit <- match.arg(windowunit, c("secs", "mins", "hours", "days"))
if(!("POSIXlt" %in% class(trans$time))) trans$time <- as.POSIXlt(trans$time)
timeInterval <- switch(windowunit,
secs = as.numeric(trunc(trans$time, "secs") - trans$time$sec %% window),
mins = as.numeric(trunc(trans$time, "mins") - (trans$time$min %% window) * 60),
hours = as.numeric(trunc(trans$time, "hours") - (trans$time$hour %% window) * 3600),
days = as.numeric(trunc(trans$time, "days") - ((trans$time$yday - trans[1,1]$yday) %% window) * 86400))
df <- cbind(dplyr::select(trans, volume, price), timeInterval = timeInterval)
by_timeInterval <- dplyr::group_by(df, timeInterval)
sumvol <- dplyr::summarise(by_timeInterval,
sumvol = sum(volume, na.rm = TRUE),
transactions = n())
sumvol <- as.data.frame(sumvol)
sumvol$time <- as.POSIXlt(sumvol$timeInterval, origin = "1970-01-01")
g1 <- ggplot(trans, aes(x=time,y=price, group = time$year+time$yday)) + geom_line()+ylab("price") + ggtitle("Price")
g2 <- ggplot(sumvol, aes(x=time,y=sumvol)) + geom_bar(stat = "identity")+ylab("volume") + ggtitle(paste("Volume traded per", window, windowunit))
g3 <- ggplot(sumvol, aes(x=time,y=transactions)) + geom_bar(stat = "identity")+ylab("transactions") + ggtitle(paste("Number of transactions per", window, windowunit))
print(g1)
graphics::par(ask = TRUE)
print(g2)
print(g3)
graphics::par(ask = FALSE)
# grid::grid.newpage()
# grid::pushViewport(grid::viewport(layout = grid::grid.layout(3, 1)))
#
# print(g1, vp = grid::viewport(layout.pos.row = 1, layout.pos.col = 1))
# print(g2, vp = grid::viewport(layout.pos.row = 2, layout.pos.col = 1))
# print(g3, vp = grid::viewport(layout.pos.row = 3, layout.pos.col = 1))
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/plotDescTrans.R |
plotHazard <- function(fitModel, breaks = 20, implied = TRUE, xstop = NULL){
hazard <- residual <- errorTerm <- NULL
if("acdFit" %in% class(fitModel)){
e <- stats::quantile(fitModel$residuals, seq(0, 1 - 1/breaks, 1/breaks))
h <- (1/breaks)/(1-1:(breaks-1)/breaks+1/(2*breaks))*(1/(e[2:breaks]-e[1:(breaks-1)]))
e2 <- stats::quantile(fitModel$residuals, seq(0 + 1/(2*breaks), 1 - 3/(2*breaks), 1/breaks))
df <- data.frame(residual = e2, hazard = h, curve = "Nonparametric")
g <- ggplot(df, aes(y=hazard, x=residual))
g <- g + geom_line() + geom_point() + ylab("hazard") + xlab("residual")
if(length(xstop) == 0) xstop <- max(e2)+1
xstart <- 0.01
if(fitModel$distribution == "weibull"){
gamma <- fitModel$dPara
theta <- .returnFixedMeanPara(2, gamma)
df2 <- data.frame(errorTerm = seq(xstart, xstop,.01), hazard = theta * gamma * seq(xstart, xstop,.01)^(gamma-1), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and Weibull implied (red).")
print(g)
} else if(fitModel$distribution == "burr"){
kappa <- fitModel$dPara[1]
sig2 <- fitModel$dPara[2]
theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
df2 <- data.frame(errorTerm = seq(xstart, xstop,.01), hazard = (theta*kappa*seq(xstart, xstop,.01)^(kappa-1))/(1+sig2*theta*seq(xstart, xstop,.01)^(kappa)), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and Burr implied (red).")
print(g)
} else if(fitModel$distribution == "exponential"){
df2 <- data.frame(errorTerm = seq(xstart, xstop,.1), hazard = 1, curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and exponential implied (red).")
print(g)
} else if(fitModel$distribution == "gengamma"){
kappa <- fitModel$dPara[1]
gammaPara <- fitModel$dPara[2]
df2 <- data.frame(errorTerm = seq(xstart, xstop,.01),
hazard = gengammaHazard(seq(xstart, xstop,.01), gamma = gammaPara, kappa = kappa, forceExpectation = T), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and generelized Gamma implied (red).")
print(g)
} else if(fitModel$distribution == "genf"){
kappa <- fitModel$dPara[1]
eta <- fitModel$dPara[2]
gamma <- fitModel$dPara[3]
lambda <- fitModel$forcedDistPara
df2 <- data.frame(errorTerm = seq(xstart, xstop,.01),
hazard = genfHazard(seq(xstart, xstop,.01), kappa = kappa, eta = eta, gamma = gamma, lambda = lambda), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and generelized F implied (red).")
print(g)
} else if(fitModel$distribution == "qweibull"){
a <- fitModel$dPara[1]
q <- fitModel$dPara[2]
b <- fitModel$forcedDistPara
df2 <- data.frame(errorTerm = seq(xstart, xstop, .01),
hazard = qweibullHazard(seq(xstart, xstop,.01), a = a, qdist = qdist, b = b), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and q-Weibull implied (red).")
print(g)
} else if(fitModel$distribution == "mixqwe"){
p <- fitModel$dPara[1]
a <- fitModel$dPara[2]
qdist <- fitModel$dPara[3]
lambda <- fitModel$dPara[4]
b <- fitModel$forcedDistPara
df2 <- data.frame(errorTerm = seq(xstart, xstop, .01),
hazard = mixqweHazard(seq(xstart, xstop,.01), pdist = p, a = a, qdist = qdist, lambda = lambda, b = b), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and q-Weibull mixed with exponential implied (red).")
print(g)
} else if(fitModel$distribution == "mixqww"){
p <- fitModel$dPara[1]
a <- fitModel$dPara[2]
qdist <- fitModel$dPara[3]
theta <- fitModel$dPara[4]
gamma <- fitModel$dPara[5]
b <- fitModel$forcedDistPara
df2 <- data.frame(errorTerm = seq(xstart, xstop, .01),
hazard = mixqwwHazard(seq(xstart, xstop,.01), pdist = p, a = a, qdist = qdist, theta = theta, gamma = gamma, b = b), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and q-Weibull mixed with Weibull implied (red).")
print(g)
} else if(fitModel$distribution == "mixinvgauss"){
theta <- fitModel$dPara[1]
lambda <- fitModel$dPara[2]
gamma <- fitModel$dPara[3]
df2 <- data.frame(errorTerm = seq(xstart, xstop, .01),
hazard = mixinvgaussHazard(seq(xstart, xstop,.01), theta = theta, lambda = lambda, gamma = gamma, forceExpectation = T), curve = "Implied")
g <- g + geom_line(data = df2, aes(y=hazard, x=errorTerm), linetype = 1, colour = 2) + ggtitle("Hazard function estimates: nonparametric (black) and finite mixature of inverse Gaussian implied (red).")
print(g)
} else{
g <- g + ggtitle("Hazard function estimates: nonparametric")
print(g)
}
} else if (is.numeric(fitModel)){
e <- stats::quantile(fitModel, seq(0, 1 - 1/breaks, 1/breaks))
h <- (1/breaks)/(1-1:(breaks-1)/breaks+1/(2*breaks))*(1/(e[2:breaks]-e[1:(breaks-1)]))
e2 <- stats::quantile(fitModel, seq(0 + 1/(2*breaks), 1 - 3/(2*breaks), 1/breaks))
df <- data.frame(residual = e2, hazard = h, curve = "Nonparametric")
g <- ggplot(df, aes(y=hazard, x=residual))
g <- g + geom_line() + geom_point() + ylab("hazard") + xlab("residual")
g <- g + ggtitle("Hazard function estimate")
print(g)
} else warning("The 'fitModel' argument has to be either an estimated model of class 'acdFit' or a numeric vector")
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/plotHazard.R |
plotHistAcd <- function(durations, windowunit = "mins", window = 1){
time <- NULL
if(!("POSIXlt" %in% class(durations$time))) durations$time <- as.POSIXlt(durations$time)
windowunit <- match.arg(windowunit, c("secs", "mins", "hours", "days"))
timefactor <- switch(windowunit,
secs = trunc(durations$time, units = windowunit) - durations$time$sec %% window,
mins = trunc(durations$time, units = windowunit) - (durations$time$min %% window) * 60,
hours = trunc(durations$time, units = windowunit) - (durations$time$hour %% window) * 3600,
days = trunc(durations$time, units = windowunit) - (durations$time$yday %% window) * 86400)
meandur <- tapply(durations$durations, timefactor, mean)
df <- data.frame(time = as.POSIXlt(names(meandur)), meandur = meandur)
g <- ggplot(df, aes(x = time, y = meandur))
g + geom_bar(stat = "identity") + ggtitle(paste(window, windowunit, "means"))
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/plotHistAcd.R |
plotLL <- function(fitModel, parameter1 = 1, parameter2 = NULL, param1sequence, param2sequence, startpoint = NULL,
returnOutput = FALSE){
logLikelihood <- NULL
parameters <- parameter1
if(length(parameter2) != 0) parameters <- c(parameters, parameter2)
#checks if the parameters were correctly entered, and transforms parameter string to int
for(i in 1:length(parameters)){
if(!is.numeric(parameters[i])){
if(!is.character(parameters[i])){
msg <- paste0("The argument 'parameter", i, "' must be an integer or a string of characters")
stop(msg)
} else if (parameters[i] %in% names(stats::coef(fitModel))){
parameters[i] <-which(names(stats::coef(fitModel)) == parameters[i])
} else {
msg <- paste0("Couldn't find parameter", i, ": \"", parameters[i], "\"")
stop("Couldn't find parameter1 \"", parameters[i], "\"")
}
}
if(!(parameters[i] %in% 1:length(stats::coef(fitModel)))){
msg <- paste0("There are only ", length(stats::coef(fitModel)), " parameters in the fitted model")
stop(msg)
}
}
if(!is.numeric(parameters)) parameters <- as.numeric(parameters)
#computes the mean of the durations:
if(length(fitModel$durations$adjDur) != 0){
mean <- mean(fitModel$durations$adjDur)
dur <- fitModel$durations$adjDur
} else {
mean <- mean(fitModel$durations)
dur <- fitModel$durations
}
#fills the matrix 'exVar' with the exogenous variables if they are present in the model:
exVar <- NULL
if(length(fitModel$exogenousVariables) != 0)
exVar <- as.matrix(fitModel$durations[ , fitModel$exogenousVariables])
if(length(parameter2) == 0){
if(missing(param1sequence)){
middle <- stats::coef(fitModel)[parameters[1]]
se <- fitModel$parameterInference$SE[parameters[1]]
if(is.na(se)){
msg <- paste0("Can't use default values for the range of '",
names(stats::coef(fitModel))[parameters[1]],
"' since it's standard error is 'NA', 'param1sequence' must be given instead")
stop(msg)
}
param1sequence <- seq(from = middle - se*4, to = middle + se*4, length.out = 21)
}
} else{
if(missing(param1sequence)){
middle <- stats::coef(fitModel)[parameters[1]]
se <- fitModel$parameterInference$SE[parameters[1]]
if(is.na(se)){
msg <- paste0("Can't use default values for the range of '",
names(stats::coef(fitModel))[parameters[1]],
"' since it's standard error is 'NA', 'param1sequence' must be given instead")
stop(msg)
}
param1sequence <- seq(from = middle - se*4, to = middle + se*4, length.out = 11)
}
if(missing(param2sequence)){
middle <- stats::coef(fitModel)[parameters[2]]
se <- fitModel$parameterInference$SE[parameters[2]]
if(is.na(se)){
msg <- paste0("Can't use default values for the range of '",
names(stats::coef(fitModel))[parameters[2]],
"' since it's standard error is 'NA', 'param2sequence' must be given instead")
stop(msg)
}
param2sequence <- seq(from = middle - se*4, to = middle + se*4, length.out = 11)
}
}
if(length(parameters) == 1){ #if only one parameter should be plotted:
#function to compute the loglikelihood for diffrent values of one parameter, holding the others fixed:
f1 <- function(x){
#sets the fixed parameters to either the fitted values or the 'startpoint' values
if(length(startpoint) == 0){
internalCoef <- stats::coef(fitModel)
} else {
internalCoef <- startpoint
}
internalCoef[parameters[1]] <- x
-.getLLcall(param = internalCoef, dur = dur, exogenousVar = exVar , model = fitModel$model, order = fitModel$order,
mean = mean, distCode = fitModel$distCode, newDay = fitModel$newDayVector, returnMu = FALSE,
breakPoints = fitModel$breakPoints, forceErrExpec = fitModel$forceErrExpec)
}
#creates a data.frame of the log likelihood and the parameter values:
df <- data.frame(param1sequence = param1sequence, logLikelihood = sapply(param1sequence, FUN = f1))
g <- ggplot2::ggplot(df, aes(x = param1sequence, y = logLikelihood))
g <- g + ggplot2::geom_point() + ggplot2::geom_line()
g <- g + ggplot2::ylab("log likelihood") + ggplot2::xlab(names(stats::coef(fitModel))[parameters[1]])
if( min(param1sequence) <= stats::coef(fitModel)[parameters[1]] && max(param1sequence) >= stats::coef(fitModel)[parameters[1]]){
g <- g + ggplot2::geom_point(x = stats::coef(fitModel)[parameters[1]], y = max(df$logLikelihood, na.rm = TRUE), color = "red", size = 3)
maxll <- max(df$logLikelihood, na.rm = TRUE)
minll <- min(df$logLikelihood, na.rm = TRUE)
g <- g + ggplot2::annotate("text", x = stats::coef(fitModel)[parameters[1]], y = maxll - (maxll-minll)/20, label = "MLE", color = "red")
}
print(g)
if(returnOutput) return(df)
} else { #if two parameters should be plotted together:
#function to compute the loglikelihood for diffrent values of the two parameters, holding the others fixed:
f2 <- function(x, y){
#sets the fixed parameters to either the fitted values or the 'startpoint' values
if(length(startpoint) == 0){
internalCoef <- stats::coef(fitModel)
} else {
internalCoef <- startpoint
}
internalCoef[parameter1] <- x
internalCoef[parameter2] <- y
-.getLLcall(param = internalCoef, dur = dur, exogenousVar = exVar , model = fitModel$model, order = fitModel$order,
mean = mean, distCode = fitModel$distCode, newDay = fitModel$newDayVector, returnMu = FALSE,
breakPoints = fitModel$breakPoints, forceErrExpec = fitModel$forceErrExpec)
}
#creates a matrix of the log likelihood values for diffrent values of the two parameters:
z <- matrix(nrow = length(param1sequence), ncol = length(param2sequence))
fillz <- function(){
for(i in 1:length(param1sequence)) {
for(j in 1:length(param2sequence)) {
z[i, j] <<- f2(param1sequence[i], param2sequence[j])
}
}
}
fillz()
nbcol = 100
color = grDevices::heat.colors(nbcol)
zcol = cut(z, nbcol)
rgl::persp3d(x = param1sequence, y = param2sequence, z = z, col=color[zcol],
ticktype="detailed", xlab = names(stats::coef(fitModel))[parameters[1]],
ylab = names(stats::coef(fitModel))[parameters[2]], zlab = "log likelihood",axes=TRUE,
phi = 30, theta = 40)
xtemp <- rep(param1sequence, length(param2sequence))
ytemp <- rep(param2sequence, each = length(param1sequence))
ztemp <- as.vector(z)
rgl::points3d(x = xtemp, y = ytemp, z = ztemp, color = "blue")
min1 <- min(param1sequence); max1 <- max(param1sequence)
min2 <- min(param2sequence); max2 <- max(param2sequence)
mlepoint1 <- stats::coef(fitModel)[parameters[1]]
mlepoint2 <- stats::coef(fitModel)[parameters[2]]
if(min1 <= mlepoint1 && max1 >= mlepoint1 &&
min2 <= mlepoint2 && max2 >= mlepoint2){
rgl::points3d(x = mlepoint1, y = mlepoint2, z = fitModel$goodnessOfFit$value[1], color = "red", size = 6)
}
if(returnOutput) return(list(para1 = param1sequence, para2 = param2sequence, z = z))
}
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/plotLL.R |
plotRollMeanAcd <- function(durations, window = 500){
rollingMeans <- group <- NULL
dur <- durations$durations
time <- durations$time[(window):length(dur)]
if(!("POSIXlt" %in% class(time))) time <- as.POSIXlt(time)
df <- data.frame(time = time, rollingMeans = zoo::rollmean(dur,window), group = time$year + time$yday/365)
g <- ggplot(df, aes(x = time,y = rollingMeans, group = group))
g + geom_line()
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/plotRollMeanAcd.R |
plotScatterAcd <- function(fitModel, x = "muHats", y = "residuals", xlag = 0, ylag = 0,
colour = NULL, xlim = NULL, ylim = NULL, alpha = 1/10,
smoothMethod = "auto"){
x <- match.arg(x, c("muHats", "residuals", "durations", "adjDur", "dayTime", "time", "index"))
y <- match.arg(y, c("muHats", "residuals", "durations", "adjDur", "dayTime", "time", "index"))
if(length(colour) != 0) colour <- match.arg(colour, c("muHats", "residuals", "durations", "adjDur", "dayTime", "time", "index"))
contTime = TRUE
xData <- switch(x,
muHats = fitModel$muHats,
residuals = fitModel$residuals,
durations = fitModel$durations$durations,
adjDur = fitModel$durations$adjDur,
dayTime = fitModel$durations$time$min / 60 + fitModel$durations$time$hour,
time = {if(contTime) fitModel$durations$time
else fitModel$durations$time$yday * (60*8 + 25) + fitModel$durations$time$min + fitModel$durations$time$hour * 60},
index = 1:fitModel$N)
yData <- switch(y,
muHats = fitModel$muHats,
residuals = fitModel$residuals,
durations = fitModel$durations$durations,
adjDur = fitModel$durations$adjDur,
dayTime = fitModel$durations$time$min / 60 + fitModel$durations$time$hour,
time = {if(contTime) fitModel$durations$time
else fitModel$durations$time$yday * (60*8 + 25) + fitModel$durations$time$min + fitModel$durations$time$hour * 60},
index = 1:fitModel$N)
if(length(colour) != 0){
colourData <- switch(colour,
muHats = fitModel$muHats,
residuals = fitModel$residuals,
durations = fitModel$durations$durations,
adjDur = fitModel$durations$adjDur,
dayTime = fitModel$durations$time$min / 60 + fitModel$durations$time$hour,
time = {if(contTime) fitModel$durations$time
else fitModel$durations$time$yday * (60*8 + 25) + fitModel$durations$time$min + fitModel$durations$time$hour * 60},
index = 1:fitModel$N,
NULL = NULL)
colourData <- colourData[(1+ylag):length(colourData)]
}
yData <- yData[(1+xlag):(length(yData)-ylag)]
xData <- xData[(1+ylag):(length(xData)-xlag)]
if(ylag != 0) y <- paste("lagged ", y, " (i-", ylag, ")", sep = "")
if(xlag != 0) x <- paste("lagged ", x, " (i-", xlag, ")", sep = "")
if(length(colour) == 0){
g <- ggplot(data.frame(x = xData, y = yData), aes(x = x, y = y))
} else{
g <- ggplot(data.frame(x = xData, y = yData, colour = colourData), aes(x = x, y = y, colour = colour)) + scale_colour_continuous(name = colour)
}
g <- g + geom_point(alpha = alpha) + geom_smooth(colour="red", size=1.5, fill = "blue", alpha = .2, method = smoothMethod)
if(x == "muHats" && y == "residuals") g <- g + scale_y_continuous(breaks = seq(1, max(yData), 1)) #+ geom_hline(yintercept = 1, colour = "red")
if(length(xlim) != 0) g <- g + xlim(xlim)
if(length(ylim) != 0 ) g <- g + ylim(ylim)
g + ylab(y) + xlab(x)
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/plotScatterAcd.R |
qqplotAcd <- function(fitModel, xlim = NULL, ylim = NULL){
residuals <- NULL
df <- data.frame(residuals = fitModel$residuals)
if(fitModel$distribution == "exponential"){
g <- ggplot(df, aes(sample=residuals)) + stat_qq(distribution = stats::qexp, geom="point")
if(length(xlim) != 0) g <- g + xlim(xlim)
if(length(ylim) != 0 ) g <- g + ylim(ylim)
g + geom_abline(intercept = 0, slope = 1, color="red") + xlab(paste(fitModel$distribution, "theoretical quantiles"))
} else if(fitModel$distribution == "weibull"){
g <- ggplot(df, aes(sample=residuals))
g <- g + stat_qq(distribution = stats::qweibull, dparams = list(shape = fitModel$dPara, scale = 1/(gamma(1+1/fitModel$dPara))))
g <- g + geom_abline(intercept = 0, slope = 1, color="red") + xlab(paste(fitModel$distribution, "theoretical quantiles"))
if(length(xlim) != 0) g <- g + xlim(xlim)
if(length(ylim) != 0 ) g <- g + ylim(ylim)
g
} else if(fitModel$distribution == "burr"){
burrQ <- function(p, kappa, sig2){
theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
return((((1-p)^(-sig2)-1)/(sig2*theta))^(1/kappa))
}
g <- ggplot(df, aes(sample=residuals)) + stat_qq(distribution = burrQ, dparams = list(kappa = fitModel$dPara[1], sig2 = fitModel$dPara[2])) + geom_abline(intercept = 0, slope = 1, color="red") + xlab(paste(fitModel$distribution, "theoretical quantiles"))
if(length(xlim) != 0) g <- g + xlim(xlim)
if(length(ylim) != 0 ) g <- g + ylim(ylim)
g
} else if(fitModel$distribution == "gengamma"){
kappa <- fitModel$dPara[1]
gammaPara <- fitModel$dPara[2]
g <- ggplot(df, aes(sample=residuals)) + stat_qq(distribution = qgengamma, dparams = list(gamma = gammaPara, kappa = kappa, forceExpectation = T)) + geom_abline(intercept = 0, slope = 1, color="red") + xlab(paste(fitModel$distribution, "theoretical quantiles"))
if(length(xlim) != 0) g <- g + xlim(xlim)
if(length(ylim) != 0 ) g <- g + ylim(ylim)
g
} else if(fitModel$distribution == "qweibull"){
a <- fitModel$dPara[1]
qdist <- fitModel$dPara[2]
b <- fitModel$forcedDistPara
g <- ggplot(df, aes(sample=residuals)) + stat_qq(distribution = qqweibull, dparams = list(a = a, qdist = qdist, b = b)) + geom_abline(intercept = 0, slope = 1, color="red") + xlab(paste(fitModel$distribution, "theoretical quantiles"))
if(length(xlim) != 0) g <- g + xlim(xlim)
if(length(ylim) != 0 ) g <- g + ylim(ylim)
g
} else stop("The QQ plot function is not yet implemented for this distribution")
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/qqplotAcd.R |
resiDensityAcd <- function(fitModel, xlim = NULL, binwidth = .1, density = FALSE){
..density.. <- dexp <- dweibull <- residuals <- NULL
df <- data.frame(residuals = fitModel$residuals)
#sets the PDF function for the assumed distribution:
distFnc <- switch(fitModel$distribution,
"exponential" = dexp,
"weibull" = dweibull,
"burr" = dburr,
"gengamma" = dgengamma,
"genf" = dgenf,
"qweibull" = dqweibull,
"mixqwe" = dmixqwe,
"mixqww" = dmixqww,
"mixinvgauss" = dmixinvgauss)
if(fitModel$distribution == "exponential"){
paraList <- list(rate = 1)
} else if(fitModel$distribution == "weibull"){
paraList <- list(shape = fitModel$dPara, scale = 1/(gamma(1+1/fitModel$dPara)))
} else if(fitModel$distribution == "burr"){
kappa = fitModel$dPara[1]
sig2 = fitModel$dPara[2]
theta <- ((gamma(1+1/kappa)*gamma(1/sig2 - 1/kappa))/(sig2^(1+1/kappa)*gamma(1/sig2+1)))^(kappa)
paraList <- list(kappa = kappa, sig2 = sig2, theta = theta)
} else if(fitModel$distribution == "gengamma"){
kappa <- fitModel$dPara[1]
gamma <- fitModel$dPara[2]
lambda <- fitModel$forcedDistPara
paraList <- list(gamma = gamma, kappa = kappa, lambda = lambda)
} else if(fitModel$distribution == "genf"){
kappa <- fitModel$dPara[1]
eta <- fitModel$dPara[2]
gamma <- fitModel$dPara[3]
lambda <- fitModel$forcedDistPara
paraList <- list(kappa = kappa, eta = eta, gamma = gamma, lambda = lambda)
} else if(fitModel$distribution == "qweibull"){
a <- fitModel$dPara[1]
q <- fitModel$dPara[2]
b <- fitModel$forcedDistPara
paraList <- list(a = a, q = q, b = b)
} else if(fitModel$distribution == "mixqwe"){
p <- fitModel$dPara[1]
a <- fitModel$dPara[2]
qdist <- fitModel$dPara[3]
lambda <- fitModel$dPara[4]
b <- fitModel$forcedDistPara
paraList <- list(pdist = p, a = a, qdist = qdist, lambda = lambda, b = b)
} else if(fitModel$distribution == "mixqww"){
p <- fitModel$dPara[1]
a <- fitModel$dPara[2]
qdist <- fitModel$dPara[3]
theta <- fitModel$dPara[4]
gamma <- fitModel$dPara[5]
b <- fitModel$forcedDistPara
paraList <- list(pdist = p, a = a, qdist = qdist, theta = theta, gamma = gamma, b = b)
} else if(fitModel$distribution == "mixinvgauss"){
theta <- fitModel$dPara[1]
lambda <- fitModel$dPara[2]
gamma <- fitModel$dPara[3]
paraList <- list(theta = theta, lambda = lambda, gamma = gamma, forceExpectation = T)
} else stop("the provided distribution does not exist")
g <- ggplot(df, aes(x=residuals))
g <- g + geom_histogram(aes(y = ..density..), binwidth = binwidth, alpha = 0.4)
if(density) g <- g + stat_density(aes(colour = 'Empirical'), cex = 1, geom = "line", adjust = .2)
g <- g + stat_function(fun = distFnc, aes(colour = 'Implied'),
args = paraList, n = 5000, cex = 1)
if(length(xlim) != 0) g <- g + xlim(xlim)
g + scale_colour_manual(name = 'Density', values = c('red', 'blue'))
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/resiDensityAcd.R |
sim_ACD <- function(N = 1000,
model = "ACD",
dist = "exponential",
param = NULL,
order = NULL,
Nburn = 50,
startX = c(1),
startMu = c(1),
errors = NULL,
sampleErrors = TRUE,
roundToSec = FALSE,
rm0 = FALSE,
diurnalFactor = FALSE,
splineObj = NULL,
open = NULL,
close = NULL){
#provides the possibility of entering truncated and/or case mismatched arguments:
model <- match.arg(toupper(model), c("ACD", "LACD1", "LACD2", "AMACD", "ABACD"))
dist <- match.arg(tolower(dist), c("exponential", "weibull", "burr", "gengamma", "genf"))
distCode <- .getDistCode(dist)
#checks param and order input:
if(length(param) != 0){
if(length(order) == 0) order <- .setOrder(model)
.checkOrderAndPara(order, param, distCode, model)
paraTemp <- .seperateStartPara(param, model, distCode, order)
distPara <- paraTemp$distStartPara
startPara <- paraTemp$startPara
}else{
if(length(order) != 0){
.checkOrder(order, model)
} else{
order <- .setOrder(model)
}
paraTemp <- .setStartPara(model, distCode, 1, order)
distPara <- paraTemp$distStartPara
startPara <- paraTemp$startPara
param <- c(distPara, startPara)
}
#simulates the error terms:
if(length(errors) == 0){
if(dist == "exponential") e <- stats::rexp(N + Nburn)
else if(dist == "weibull"){
e <- stats::rweibull(N + Nburn, shape = distPara, scale = 1/(gamma(1+1/distPara)))
}
else if(dist == "burr"){
kappa <- distPara[1]
sig2 <- distPara[2]
muPara <- burrExpectation(theta = 1, kappa = kappa, sig2 = sig2)^kappa
e <- rburr(N + Nburn, theta = 1, kappa = 1.2, sig2 = .3)
} else if(dist == "gengamma"){
kappa <- distPara[1]
gammaPara <- distPara[2]
e <- rgengamma(N + Nburn, gamma = gammaPara, kappa = kappa, forceExpectation = T)
} else if(dist == "genf"){
stop("Simulations are not available for the generelized F distribution")
}
} else{
if(sampleErrors) e <- sample(errors, size = N + Nburn, replace = TRUE)
else{
if(length(errors) != N + Nburn) stop("the 'errors' vector needs to be of length N + Nburn if sampleErrors = FALSE")
e <- errors
}
}
maxpq = max(order)
#if the start value vector is smaller than the order, the start values are repeted:
if(maxpq > min(length(startX), length(startMu))){
startX <- rep(startX, length.out = maxpq)
startMu <- rep(startMu, length.out = maxpq)
}
if(diurnalFactor){
if(length(splineObj) == 0){
splineObj <- ACDm::defaultSplineObj
open = "10:00:00"
close = "18:25:00"
}
knots <- c(splineObj[[1]]$knots, splineObj[[2]]$knots, splineObj[[3]]$knots, splineObj[[4]]$knots, splineObj[[5]]$knots)*60
konst <- c(splineObj[[1]][[2]][,1], splineObj[[2]][[2]][,1], splineObj[[3]][[2]][,1], splineObj[[4]][[2]][,1], splineObj[[5]][[2]][,1])
lin <- c(splineObj[[1]][[2]][,2], splineObj[[2]][[2]][,2], splineObj[[3]][[2]][,2], splineObj[[4]][[2]][,2], splineObj[[5]][[2]][,2])/60
sq <- c(splineObj[[1]][[2]][,3], splineObj[[2]][[2]][,3], splineObj[[3]][[2]][,3], splineObj[[4]][[2]][,3], splineObj[[5]][[2]][,3])/60^2
qub <- c(splineObj[[1]][[2]][,4], splineObj[[2]][[2]][,4], splineObj[[3]][[2]][,4], splineObj[[4]][[2]][,4], splineObj[[5]][[2]][,4])/60^3
splineNewDay <- cumsum(c(0, length(splineObj[[1]]$knots), length(splineObj[[2]]$knots), length(splineObj[[3]]$knots) , length(splineObj[[4]]$knots)))
opensek <- as.POSIXlt(strptime(open, "%H:%M:%S"))
opensek <- opensek$h * 3600 + opensek$min * 60 + opensek$sec
closesek <- as.POSIXlt(strptime(close, "%H:%M:%S"))
closesek <- closesek$h * 3600 + closesek$min * 60 + closesek$sec
temp<-.Call("sim_ACDSpline",
as.integer(N),
param[1:(1+order[1]+order[2])],
order,
startX,
startMu,
e,
as.integer(Nburn),
opensek,
closesek,
knots,
konst,
lin,
sq,
qub,
splineNewDay, PACKAGE = "ACDm")
if(roundToSec){
df <- data.frame(time = strptime("2014-01-06 00:00:00", "%Y-%m-%d %H:%M:%S") + ((temp[[1]] %/% 5) * 7 + (temp[[1]] %% 5)) * 60 * 60 * 24 + ceiling(temp[[2]]))
utils::capture.output(dur <- computeDurations(transactions = df, open = open, close = close, rm0dur = F, type = "transactions"))
} else { #doesnt yet work
df <- data.frame(time = strptime("2014-01-06 00:00:00", "%Y-%m-%d %H:%M:%S") + ((temp[[1]] %/% 5) * 7 + (temp[[1]] %% 5)) * 60 * 60 * 24 + temp[[2]])
utils::capture.output(dur <- computeDurations(transactions = df, open = open, close = close, rm0dur = F, type = "transactions"))
}
return(df)
} else if(!diurnalFactor){
cFunction <- switch(model,
ACD = "sim_ACDCALL",
LACD1 = "sim_LACD1",
LACD2 = "sim_LACD2",
AMACD = "sim_AMACD",
ABACD = "sim_ABACD")
if(!roundToSec){
return(.Call(cFunction,
as.integer(N),
startPara,
order,
startX,
startMu,
e,
as.integer(Nburn), PACKAGE = "ACDm"))
} else{
durTemp <- ceiling(cumsum(.Call(cFunction,
as.integer(N),
startPara,
order,
startX,
startMu,
e,
as.integer(Nburn), PACKAGE = "ACDm")))
if(!rm0) return(c(durTemp[1], durTemp[-1]-durTemp[-N]))
else{
durTemp <- c(durTemp[1], durTemp[-1]-durTemp[-N])
return(durTemp[durTemp != 0])
}
}
}
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/sim_ACD.R |
standardizeResi <- function(fitModel, transformation = "probIntegral"){
if(!("acdFit" %in% class(fitModel))) stop("fitModel is not of class 'acdFit'")
transformation <- match.arg(transformation, c("probIntegral", "cox-snell"))
if(fitModel$distribution == "exponential"){
returnValue <- stats::pexp(fitModel$residuals)
} else if(fitModel$distribution == "weibull"){
returnValue <- stats::pweibull(fitModel$residuals, shape = fitModel$dPara, scale = fitModel$forcedDistPara)
} else if(fitModel$distribution == "burr"){
returnValue <- pburr(fitModel$residuals, theta = fitModel$forcedDistPara, kappa = fitModel$dPara[1], sig2 = fitModel$dPara[2])
} else if(fitModel$distribution == "gengamma"){
returnValue <- pgengamma(fitModel$residuals, gamma = fitModel$dPara[2], kappa = fitModel$dPara[1], lambda = fitModel$forcedDistPara)
} else if(fitModel$distribution == "qweibull"){
returnValue <- pqweibull(fitModel$residuals, a = fitModel$dPara[1], qdist = fitModel$dPara[2], b = fitModel$forcedDistPara)
} else if(fitModel$distribution == "mixqwe"){
returnValue <- pmixqwe(fitModel$residuals, pdist = fitModel$dPara[1], a = fitModel$dPara[2], qdist = fitModel$dPara[3], lambda = fitModel$dPara[4], b = fitModel$forcedDistPara)
} else if(fitModel$distribution == "mixqww"){
returnValue <- pmixqww(fitModel$residuals, pdist = fitModel$dPara[1], a = fitModel$dPara[2], qdist = fitModel$dPara[3], theta = fitModel$dPara[4], gamma = fitModel$dPara[5], b = fitModel$forcedDistPara)
} else if(fitModel$distribution == "mixinvgauss"){
returnValue <- pmixinvgauss(fitModel$residuals, theta = fitModel$dPara[1], lambda = fitModel$dPara[2], gamma = fitModel$dPara[3])
} else stop("not yet implemented for the ", fitModel$distribution, " distribution")
if(transformation == "cox-snell"){
if(fitModel$distribution == "exponential"){
warning("Cox-Snell transformation for the exponential distribution will leave the residuals unchanged!")
return(fitModel$residuals)
} else{
return(-log(1 - returnValue))
}
} else returnValue
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/standardizeResi.R |
testRmACD <- function(fitModel, pStar = 2, robust = TRUE){
if(fitModel$model != "ACD" || fitModel$distribution != "exponential") stop("this test only works if the model estimated is a standard ACD model.")
#initiates variables:
p <- fitModel$order[1]
q <- fitModel$order[2]
if(length(fitModel$durations$adjDur)){
dur <- fitModel$durations$adjDur
} else{
dur <- fitModel$durations$durations
}
maxpq <- max(p, q)
N <- length(dur)
k <- length(fitModel$mPara)
mean <- mean(dur)
dmudtheta <- matrix(nrow = N, ncol = k)
dpsidtheta <- matrix(nrow = N, ncol = pStar)
mu <- fitModel$muHats
beta <- fitModel$mPara[(2+p):k]
######comuptes dmudtheta########
zeros<-rep(0, maxpq)
#for omega:
dmudtheta[,1]<-c(zeros,stats::filter(rep(1,N-maxpq),beta,"r"))
#for alpha:
for(j in 1:p){
dmudtheta[,j+1]<-c(zeros,stats::filter(dur[(maxpq+1-j):(N-j)],beta,"r"))
}
#for beta:
for(j in 1:q){
dmudtheta[,j+p+1]<-c(zeros,stats::filter(mu[(maxpq+1-j):(N-j)],beta,"r"))
}
######comuptes dpsidtheta########
zeros<-rep(0,pStar)
for(j in 1:pStar){
dpsidtheta[ , j] <- c(zeros,dur[(pStar + 1 - j):(N-j)]/mu[(pStar + 1 - j):(N-j)])
}
######comuptes a_i, b_i och c_i########
a <- dmudtheta/mu
b <- dpsidtheta/mu
c <- dur/mu-1
######comuptes the LM-statistic########
if(robust){ #"robust":
regression1 <- stats::lm(b~a-1)
cr <- c*regression1$residuals
regression2 <- stats::lm(rep(1,N)~cr-1)
SSR <- sum(regression2$residuals^2)
chi2 <- N - SSR
pv <- 1 - stats::pchisq(chi2, pStar)
} else{ #the non robust version of the test:
SSR0 <- sum(c^2)
regression1 <- stats::lm(c ~ a + b - 1)
SSR1 <- sum(regression1$residuals^2)
chi2 <- N * (SSR0 - SSR1) / SSR0
pv <- 1 - stats::pchisq(chi2, pStar)
}
df.out <- data.frame(c(chi2, pStar, pv))
rownames(df.out) <- c("LM-stat: ", "Degrees of freedom: ", "P-value: ")
colnames(df.out) <- " "
if(robust) cat("\nM&T (2006) test of no remaining ACD in residuals (robust version): \n")
if(!robust) cat("\nM&T (2006) test of no remaining ACD in residuals (nonrobust version): \n")
print(format(df.out, digits = 3, scientific = F))
testRmACD <- list(chi2 = chi2, pv = pv)
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/testRmACD.R |
testSTACD<-function(fitModel, K = 2, robust = TRUE){
if(fitModel$model != "ACD" || fitModel$distribution != "exponential") stop("this test only works if the model estimated is a standard ACD model.")
#initiates variables:
p <- fitModel$order[1]
q <- fitModel$order[2]
if(length(fitModel$durations$adjDur)){
dur <- fitModel$durations$adjDur
} else{
dur <- fitModel$durations$durations
}
k <- sum(fitModel$order)+1
maxpq <- max(p, q)
N <- length(dur)
mean <- mean(dur)
dmudtheta<-matrix(nrow = N, ncol = k)
dpsidtheta<-matrix(nrow = N, ncol = 2*p*K)
mu <- fitModel$muHats
beta <- fitModel$mPara[(2+p):k]
######computes dmudtheta########
zeros<-rep(0,maxpq)
#for omega:
dmudtheta[,1]<-c(zeros,stats::filter(rep(1,N-maxpq),beta,"r"))
#for alpha:
for(j in 1:p){
dmudtheta[,j+1]<-c(zeros,stats::filter(dur[(maxpq+1-j):(N-j)],beta,"r"))
}
#for beta:
for(j in 1:q){
dmudtheta[,j+p+1]<-c(zeros,stats::filter(mu[(maxpq+1-j):(N-j)],beta,"r"))
}
######computes dpsidtheta########
lnX <- log(dur)
for(l in 1:K){
for(j in 1:p){
dpsidtheta[,(j-1)*K+l]<-c(zeros,lnX[(maxpq+1-j):(N-j)]^l)
dpsidtheta[,(j-1)*K+l]<-c(zeros,stats::filter(dpsidtheta[(maxpq+1):N,(j-1)*K+l],beta,"r"))
dpsidtheta[,K*p+(j-1)*K+l]<-c(zeros,dur[(maxpq+1-j):(N-j)]*lnX[(maxpq+1-j):(N-j)]^l)
dpsidtheta[,K*p+(j-1)*K+l]<-c(zeros,stats::filter(dpsidtheta[(maxpq+1):N,K*p+(j-1)*K+l],beta,"r"))
}
}
######computes a_i, b_i and c_i########
a<-dmudtheta/mu
b<-dpsidtheta/mu
c<-dur/mu-1
######computes the LM-statistic########
if(robust){ #"robust":
regression1 <- stats::lm(b~a-1)
cr <- c*regression1$residuals
regression2 <- stats::lm(rep(1,N)~cr-1)
SSR <- sum(regression2$residuals^2)
chi2 <- N - SSR
pv <- 1 - stats::pchisq(chi2, 2*p*K)
} else{ #the non robust version of the test:
SSR0 <- sum(c^2)
regression1 <- stats::lm(c ~ a + b - 1)
SSR1 <- sum(regression1$residuals^2)
chi2 <- N * (SSR0 - SSR1) / SSR0
pv <- 1 - stats::pchisq(chi2, 2*p*K)
}
df.out <- data.frame(c(chi2, 2*p*K, pv))
rownames(df.out) <- c("LM-stat: ", "Degrees of freedom: ", "P-value: ")
colnames(df.out) <- " "
if(robust) cat("\nM&T (2006) test of ACD against STACD (robust version): \n")
if(!robust) cat("\nM&T (2006) test of ACD against STACD (nonrobust version): \n")
print(format(df.out, digits = 3, scientific = F))
testSTACD <- list(chi2 = chi2, pv = pv)
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/testSTACD.R |
testTVACD <- function(fitModel, K = 2, type = "total", robust = TRUE){
if(fitModel$model != "ACD" || fitModel$distribution != "exponential") stop("this test only works if the model estimated is a standard ACD model.")
if(length(fitModel$durations$time) == 0) stop("this test requires 'fitModel' to have been estimated from a durationobject with the time of durations provided.")
type <- match.arg(type, c("total", "intraday"))
#initiates variables:
p <- fitModel$order[1]
q <- fitModel$order[2]
if(length(fitModel$durations$adjDur)){
dur <- fitModel$durations$adjDur
} else{
dur <- fitModel$durations$durations
}
maxpq <- max(p, q)
N <- length(dur)
k <- length(fitModel$mPara)
mean <- mean(dur)
dmudtheta<-matrix(nrow = length(dur), ncol = k)
dpsidtheta<-matrix(nrow = length(dur), ncol = (1+p+q)*K)
mu <- fitModel$muHats
beta <- fitModel$mPara[(2+p):k]
if(type == "total"){
time <- as.numeric(fitModel$durations$time - fitModel$durations$time[1])
time <- time/max(time)
}else if(type == "intraday"){
time <- fitModel$durations$time$hour * 3600 + fitModel$durations$time$min * 60 + fitModel$durations$time$sec
time <- as.numeric(time - time[1])
time <- time/max(time)
}
######computes dmudtheta########
zeros<-rep(0,maxpq)
#for omega:
dmudtheta[,1] <- c(zeros, stats::filter(rep(1,N-maxpq),beta,"r"))
#for alpha:
for(j in 1:p){
dmudtheta[,j+1] <- c(zeros, stats::filter(dur[(maxpq+1-j):(N-j)],beta,"r"))
}
#for beta:
for(j in 1:q){
dmudtheta[,j+p+1] <- c(zeros, stats::filter(mu[(maxpq+1-j):(N-j)],beta,"r"))
}
######computes dpsidtheta########
#tl is needed for the partial derivatives:
tl <- matrix(nrow = length(dur), ncol = K)
for(l in 1:K){
tl[ ,l] <- c(zeros, time[maxpq:(N-1)]^l)
}
#for the first K partial derivatives:
for(l in 1:K){
dpsidtheta[ ,l] <- c(zeros,stats::filter(tl[(maxpq+1):N, l],beta,"r"))
}
#for vec(dur1):
for(l in 1:K){
for(j in 1:p){
dpsidtheta[, K*j+l] <- c(zeros, stats::filter(dur[(maxpq-j+1):(N-j)]*dpsidtheta[(maxpq+1):N, l],beta,"r"))
}
}
#for vec(dur2):
for(l in 1:K){
for(j in 1:q){
dpsidtheta[, K*p+K*j+l] <- c(zeros, stats::filter(mu[(maxpq-j+1):(N-j)]*dpsidtheta[(maxpq+1):N, l],beta,"r"))
}
}
######computes a_i, b_i and c_i########
a <- dmudtheta/mu
b <- dpsidtheta/mu
c <- dur/mu-1
######
if(robust){ #"robust":
regression1 <- stats::lm(b~a-1)
cr <- c*regression1$residuals
regression2 <- stats::lm(rep(1,N)~cr-1)
SSR <- sum(regression2$residuals^2)
chi2 <- N - SSR
pv <- 1 - stats::pchisq(chi2, (1+p+q)*K)
} else{ #the non robust version of the test:
SSR0 <- sum(c^2)
regression1 <- stats::lm(c ~ a + b - 1)
SSR1 <- sum(regression1$residuals^2)
chi2 <- N * (SSR0 - SSR1) / SSR0
pv <- 1 - stats::pchisq(chi2, (1+p+q)*K)
}
df.out <- data.frame(c(chi2, (1+p+q)*K, pv))
rownames(df.out) <- c("LM-stat: ", "Degrees of freedom: ", "P-value: ")
colnames(df.out) <- " "
if(robust) cat("\nM&T (2006) test of ACD against TVACD (robust version): \n")
if(!robust) cat("\nM&T (2006) test of ACD against TVACD (non-robust version): \n")
cat("\nType:", type,"\n")
print(format(df.out, digits = 3, scientific = F))
testTVACD <- list(chi2 = chi2, pv = pv)
} | /scratch/gouwar.j/cran-all/cranData/ACDm/R/testTVACD.R |
#' @title Analysis of Correlated High-Dimensional Expression (ACE) Data
#' @description A function for estimating factor models, giving factor-adjusted statistics.
#'
#' @param Z The observed data matrix with the variables in rows and samples in columns. It is a \eqn{p}-by-\eqn{n_1} matrix.
#' @param X (Optional) The observed data matrix with the variables in rows and samples in columns. It is a \eqn{p}-by-\eqn{n_2} matrix.
#' If X is present, then perform the two-sample test; otherwise, perform one-sample test.
#' @param H0_indicator (Optional) A \eqn{p}-dimensional vector containing only 0 and 1.
#' A value of 1 means the variable/gene is non-null and a value of 0 means the gene is null.
#' @param gama FDR control level.
#'
#' @return An object with S3 class \code{ACE} containing the following items will be returned:
#' \describe{
#' \item{\code{FDP}}{If H0_indicator exists, FDP is true FDP, otherwise, it is estimated FDP.}
#' \item{\code{Power}}{If H0_indicator exists, power is output which is defined as the ratio of the number of correctly rejected to the number of non-nulls.}
#' \item{\code{Rejection}}{The number of rejections.}
#' \item{\code{Adjusted_mean_difference}}{Factor-adjusted mean difference which is a \eqn{p}-dimensional vector.}
#' \item{\code{Adjusted_statistics}}{Factor-adjusted statistics (\eqn{p}-dimensional vector).}
#' \item{\code{Threshold}}{A critical value. When absolute factor-adjusted statistics is larger than the threshold, we reject it.}
#' \item{\code{Estimated_number_factor}}{The estimated number of factors.}
#' \item{\code{pai1_hat}}{The estimated proportion of non-nulls.}
#' }
#'
#' @importFrom stats cov pnorm quantile
#' @importFrom quantreg rq
#'
#' @references Cao, H., & Kosorok, M. R. (2011). Simultaneous critical values for t-tests in very high dimensions. Bernoulli, 17, 347.
#' @references Wang, P., Lyu, P., Peddada, S., Cao, H. (2023+). A powerful methodology for analyzing correlated high dimensional data using factor models. results not shown.
#'
#' @examples
#' library(mvtnorm); library(quantreg)
#' p <- 200; n <- 100; h <- 3 # the number of variables, samples and factors
#' berlii <- rbinom(p, 1, 0.2) # 1 means the variable is non-null and 0 means it is null.
#' index0 <- which(berlii == 0); index1 <- which(berlii == 1)
#'
#' mu <- matrix(rep(0, 1*p), nrow=p)
#' mu[index1] <- runif(length(index1), min=0.4, max=0.7) # expectation of data
#' B <- matrix(runif(h*p, min=-1, max=1), nrow=p) # factor loading matrix
#' t_error <- t(rmvt(n, sigma = diag(p), df = 10)) # error term followed t-distribution
#' f <- t(rmvt(n, diag(h), df = 4))/sqrt(4/(4-2)) # factor followed t-distribution
#' Y <- mu %*% matrix(rep(1, n*1), nrow=1) + B %*% f + t_error # data
#' res <- ACE(Z = Y, H0_indicator = berlii, gama = 0.05)
#' res$FDP # true FDP
#' res$Power # power
#'
#' @export
#'
ACE <- function(Z, X, H0_indicator, gama){
if (missing(X)) {
Y <- Z
n <- ncol(Y)
p <- nrow(Y)
} else {
p <- nrow(Z); n1 <- ncol(Z); n2 <- ncol(X); n <- n1
Y <- matrix(0, p, n)
for (jy in 1:n1){
Y[,jy] <- Z[,jy] - sqrt(n1/n2)*X[,jy] + apply(X[,1:n1], 1, sum)/sqrt(n1*n2) - apply(X,1,mean)
}
}
Yba <- apply(Y,1,mean); T <- (sqrt(n)* Yba); deltaa <- cov(t(Y))
h_max <- 20
pca <- svd(deltaa, nu=0, nv = h_max)
lam_sort <- pca$d
gamma_norm <- pca$v
# pca <- cppSvd(deltaa) # U %*% diag(S) %*% t(V)
# gamma_norm <- pca$U
# lam_sort <- pca$S
bizhi <- lam_sort[1:(h_max-1)]/lam_sort[2:h_max]
h_hat <- which.max(bizhi)
if (h_hat == 1){
B_hat <- gamma_norm[,1:h_hat] * sqrt(lam_sort[1:h_hat])
} else {
B_hat <- gamma_norm[,1:h_hat] %*% diag(sqrt(lam_sort[1:h_hat]))
}
W0_hat <- matrix(0, h_hat, n)
for (jjj in 1:n){
W0_hat[,jjj] <- rq(Y[,jjj] ~ B_hat - 1, tau = 0.5)$coef
}
W_piao <- rbind(rep(1,n), W0_hat)
Px <- t(W_piao) %*% solve(W_piao %*% t(W_piao)) %*% W_piao
muB_hat <- Y %*% t(W_piao) %*% solve(W_piao %*% t(W_piao))
mu_hat <- muB_hat[,1]; B_hat <- muB_hat[,-1]
T_k <- sqrt(n)*mu_hat
sigma_hat <- (Y %*% (diag(n) - Px) %*% t(Y))/(n - h_hat - 1)
bbb <- sqrt(diag(sigma_hat))
statistics <- T_k/bbb
abs_stat <- abs(statistics)
if (n <= 100) {
aaaaaaa <- seq(0.3, 5, by = 0.001)
pai <- function(c){
(sapply(c,
function(c, abs_stat){
mean(pmin(abs_stat, c))
}
, abs_stat) / c - 2*(1-exp(-c^2/2))/(c*sqrt(2*pi)) -
2 * pnorm(c, lower.tail = F))/(1 -2*(1-exp(-c^2/2))/(c*sqrt(2*pi)) -
2 * pnorm(c, lower.tail = F))
}
paii <- pai(aaaaaaa)
pai1 <- max(paii[paii >= 0 & paii <= 1])
s0 <- 0.1*mean(quantile(bbb, seq(0, 1-pai1, by = 0.001)))
sigma_hat <- diag((bbb + s0)^2)
bbb <- sqrt(diag(sigma_hat))
statistics <- T_k/bbb
abs_stat <- abs(statistics)
} else {pai1 <- 0}
aaaaaa <- seq(0.01, 10, by = 0.005)
f_t_hat <- function(x){
2*pnorm(x, lower.tail = F) -
gama * sapply(x, function(x, abs_stat){mean(( abs_stat >= x ))}, abs_stat)
}
f_t_hatt <- f_t_hat(aaaaaa)
index <- which(f_t_hatt <= 0)[1]
t_fdr_hat <- ifelse(length(index) <= 0, Inf, aaaaaa[index])
R <- sum(abs_stat >= t_fdr_hat)
if (missing(H0_indicator)) {
FDP <- 2*p*pnorm(t_fdr_hat, lower.tail = F) / R
return(list("FDP" = FDP, "Rejection" = R, "Adjusted_mean_difference" = mu_hat,
"Adjusted_statistics" = statistics, "Threshold" = t_fdr_hat,
"Estimated_number_factor" = h_hat, "pai1_hat" = pai1))
} else {
index_0 <- which(H0_indicator == 0)
false_reject <- which(abs(T_k[index_0]/bbb[index_0]) >= t_fdr_hat)
if (R == 0){
S <- 0; true_FDP <- 0
} else {
S <- R - length(false_reject)
true_FDP <- length(false_reject)/R
}
power <- S/(p - length(index_0))
return(list("FDP" = true_FDP, "Power" = power, "Rejection" = R,
"Adjusted_mean_difference" = mu_hat, "Adjusted_statistics" = statistics,
"Threshold" = t_fdr_hat, "Estimated_number_factor" = h_hat, "pai1_hat" = pai1))
}
}
| /scratch/gouwar.j/cran-all/cranData/ACE.CoCo/R/ACE.R |
#' @title Coleccion de notas.
#' @description Contiene colecciones de notas de distintos
#' portales noticiosos (una muestra corta).
#' Una segunda coleccion es de notas del periodico bahiense La Nueva.
#' Tambien tiene resumenes estadisticos de las bases completas
#' para el desarrollo de los ejemplos de las funciones.
#' @format Es una lista con 8 objetos.
#'\describe{
#' \item{la_nueva}{es un data frame con notas de La Nueva}
#' \item{rev_puerto}{es un data frame con notas de la Revista Puerto}
#' \item{rp_procesada}{es un data frame con indicadores de
#' conflictividad basados en los datos de la Revista Puerto}
#' \item{lc_mdp}{es una url para la descarga del corpus
#' de notas de La Capital}
#' \item{rp_mdp}{es una url para la descarga del corpus de notas
#' de la Revista Puerto}
#' \item{ed_neco}{es una url para la descarga del corpus de notas
#' de Ecos Diarios}
#' \item{ln_bb}{es una url para la descarga del corpus de
#' notas de La Nueva}
#' \item{ln_arg}{es una url para la descarga del corpus de
#' notas de La Nacion}
#'}
#' @docType data
#' @usage data(acep_bases)
#' @references Nieto, Agustin 2020 «Intersecciones entre historia digital e
#' historia social: un ejercicio de lectura distante sobre la
#' conflictividad maritima en la historia argentina reciente».
#' Drassana: revista del Museu Maritim (28):122-42.
#' (\href{https://observatoriodeconflictividad.org/nietohd.pdf}{Revista Drassana})
#' @source \href{https://revistapuerto.com.ar/}{Revista Puerto}
#' @source \href{https://www.lanueva.com/}{La Nueva}
#' @keywords datos
#' @examples
#' acep_bases$rp_procesada[1:6, ]
"acep_bases"
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_bases.R |
#' @title Limpieza de texto.
#' @description Funcion que limpia y normaliza las notas/textos.
#' @param x vector de textos al que se le aplica la
#' funcion de limpieza de texto.
#' @param rm_hashtag remueve los hashtags.
#' @param rm_emoji remueve los emojis.
#' @param rm_punt remueve la puntuacion.
#' @param rm_num remueve numeros.
#' @param rm_whitespace remueve los espacios en blanco.
#' @param rm_newline remueve los saltos de linea.
#' @param rm_cesp remueve caracteres especiales.
#' @param rm_stopwords remueve palabras vacias.
#' @param rm_dias remueve los dias de la semana.
#' @param rm_meses remueve los meses del anio.
#' @param rm_shortwords remueve las palabras cortas.
#' @param rm_url remueve las url.
#' @param rm_users remueve las menciones de usuarixs de redes sociales.
#' @param other_sw su valor por defecto es NULL, sirve para ampliar el
#' listado de stopwords con un nuevo vector de palabras.
#' @param u umbral de caracteres para la funcion rm_shortwords.
#' @param tolower convierte los textos a minusculas.
#' @importFrom utils read.delim
#' @keywords normalizacion
#' @examples
#' acep_clean("El SUTEBA fue al paro. Reclaman mejoras salariales.", rm_cesp = FALSE)
#' @export
acep_clean <- function(x,
tolower = TRUE,
rm_cesp = TRUE,
rm_emoji = TRUE,
rm_hashtag = TRUE,
rm_users = TRUE,
rm_punt = TRUE,
rm_num = TRUE,
rm_url = TRUE,
rm_meses = TRUE,
rm_dias = TRUE,
rm_stopwords = TRUE,
rm_shortwords = TRUE,
rm_newline = TRUE,
rm_whitespace = TRUE,
other_sw = NULL,
u = 1) {
if(is.vector(x) != TRUE){
mensaje <- "No ingresaste un vector en el parametro x. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
} else {
if(is.vector(x) == TRUE) {
out <- tryCatch({
if (tolower == TRUE) {
x <- gsub(pattern = "([[:upper:]])", perl = TRUE,
replacement = "\\L\\1", x)
}
if (tolower == TRUE) {
tildes <- readRDS(url("https://observatoriodeconflictividad.org/basesdatos/tildes.rds"))
x <- chartr(tildes, tolower(tildes), x)
}
if (rm_cesp == TRUE) {
tildes <- readRDS(url("https://observatoriodeconflictividad.org/basesdatos/tildes.rds"))
sintildes <- readRDS(url("https://observatoriodeconflictividad.org/basesdatos/sintildes.rds"))
x <- chartr(tildes, sintildes, x)
}
if (rm_url == TRUE) {
x <- gsub(ACEP::acep_rs$url, "", x, perl = TRUE)
}
if (rm_emoji == TRUE) {
emojis <- utils::read.delim('https://raw.githubusercontent.com/HDyCSC/datos/main/emojis.txt', header = FALSE)$V1
x <- gsub(emojis, " ", x, perl = TRUE)
}
if (rm_hashtag == TRUE) {
x <- gsub(ACEP::acep_rs$hashtag, "", x, perl = TRUE)
}
if (rm_users == TRUE) {
x <- gsub(ACEP::acep_rs$users, "", x, perl = TRUE)
}
if (rm_punt == TRUE) {
punt1 <- utils::read.delim('https://raw.githubusercontent.com/HDyCSC/datos/main/punt1.txt', header = FALSE)$V1
x <- gsub(punt1, " ", x, perl = TRUE)
}
if (rm_num == TRUE) {
x <- gsub(ACEP::acep_rs$num, "", x, perl = TRUE)
}
if (rm_meses == TRUE) {
meses <- utils::read.delim('https://raw.githubusercontent.com/HDyCSC/datos/main/meses.txt', header = FALSE)$V1
x <- gsub(meses, "", x, perl = TRUE)
}
if (rm_dias == TRUE) {
dias <- utils::read.delim('https://raw.githubusercontent.com/HDyCSC/datos/main/dias.txt', header = FALSE)$V1
x <- gsub(dias, "", x, perl = TRUE)
}
if (rm_stopwords == TRUE) {
stopwords <- readRDS(url("https://github.com/HDyCSC/datos/raw/222dd7c060fabc2904c1ceffbea6958f9a275b57/stopwords.rds"))
if (is.null(other_sw)) {
x <- gsub(stopwords, " ", x, perl = FALSE)
} else {
othersw <- paste0("|\\b", other_sw, "\\b", collapse = "")
x <- gsub(paste0(stopwords, othersw), " ", x, perl = FALSE)
}
}
if (rm_shortwords == TRUE) {
x <- gsub(paste0("\\b[[:alpha:]]{1,", u, "}\\b"), " ", x, perl = FALSE)
}
if (rm_punt == TRUE) {
punt1 <- utils::read.delim('https://raw.githubusercontent.com/HDyCSC/datos/main/punt1.txt', header = FALSE)$V1
x <- gsub(paste0("\\b",punt1,"\\b"), "", x, perl = TRUE)
}
if (rm_newline == TRUE) {
x <- gsub(ACEP::acep_rs$saltos, " ", x, perl = TRUE)
}
if (rm_whitespace == TRUE) {
gsub(ACEP::acep_rs$espacios, "", x, perl = TRUE)
} else {
x
}
}
)
}
return(out)
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_clean.R |
#' @title Frecuencia, menciones e intensidad.
#' @description Funcion que usa las funciones acep_frec, acep_men y acep_int y
#' devuelve una tabla con tres columnas nuevas: numero de palabras,
#' numero de menciones del diccionario, indice de intensidad.
#' @param db data frame con los textos a procesar.
#' @param t columna de data frame que contiene el vector
#' de textos a procesar.
#' @param d diccionario en formato vector.
#' @param n cantidad de decimales del indice de intensidad.
#' @return Si todas las entradas son correctas, la salida sera una
#' base de datos en formato tabular con tres nuevas variables.
#' @keywords indicadores
#' @export acep_db
#' @examples
#' df <- data.frame(texto = c("El SUTEBA fue al paro. Reclaman mejoras salariales.",
#' "El SOIP lleva adelante un plan de lucha con paros y piquetes."))
#' diccionario <- c("paro", "lucha", "piquetes")
#' acep_db(df, df$texto, diccionario, 4)
#' @export
acep_db <- function(db, t, d, n) {
if(is.data.frame(db) != TRUE){
mensaje <- "No ingresaste un marco de datos en el parametro db. Vuelve a intentarlo ingresando un marco de datos!"
return(message(mensaje))
}
if(is.vector(t) != TRUE){
mensaje <- "No ingresaste un vector en el parametro t. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
}
if(is.vector(d) != TRUE){
mensaje <- "No ingresaste un vector en el parametro d. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
} else {
tryCatch({
if(is.data.frame(db) == TRUE){
db <- db
db$n_palabras <- acep_frec(t)
db$conflictos <- acep_men(t, d)
db$intensidad <- acep_int(db$conflictos, db$n_palabras, n)
}
}
)
return(db)
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_db.R |
#' @title Deteccion de menciones de palabras.
#' @description Funcion que detecta de menciones de palabras que
#' refieren a conflictos en cada una de las notas/textos.
#' @param x vector de textos al que se le aplica la funcion de
#' deteccion de menciones de palabras del diccionario.
#' @param y vector de palabras del diccionario utilizado.
#' @param u umbral para atribuir valor positivo a la
#' deteccion de las menciones.
#' @param tolower convierte los textos a minusculas.
#' @keywords indicadores
#' @examples
#' df <- data.frame(texto = c("El SUTEBA fue al paro. Reclaman mejoras salariales.",
#' "El SOIP lleva adelante un plan de lucha con paros y piquetes."))
#' diccionario <- c("paro", "lucha", "piquetes")
#' df$detect <- acep_detect(df$texto, diccionario)
#' df
#' @export
acep_detect <- function(x, y, u = 1, tolower = TRUE) {
if(is.vector(x) == TRUE){
out <- tryCatch({
dicc <- paste0(y, collapse = "|")
if (tolower == TRUE) {
detect <- vapply(gregexpr(dicc, tolower(x)),
function(z) sum(z != -1), c(frec = 0))
} else {
detect <- vapply(gregexpr(dicc, x),
function(z) sum(z != -1), c(frec = 0))
}
ifelse(detect >= u, 1, 0)
}
)
return(out)
} else {
message("No ingresaste un vector. Vuelve a intentarlo ingresando un vector!")
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_detect.R |
#' @title Coleccion de diccionarios.
#' @description Coleccion de diccionarios que reune diccionarios
#' de diferentes origenes. El diccionario dicc_confl_acep fueron construidos
#' en el marco del Observatorio de Conflictividad de la UNMdP.
#' Los diccionarios dicc_confl_gp y dicc_viol_gp fueron extraidos de
#' Albrieu y Palazzo (2020).
#' @format Es un objeto de clase 'list' con 3 componentes.
#'\describe{
#' \item{dicc_confl_gp}{es un vector con palabras de un diciconario
#' de terminos que refeiren a conflictos}
#' \item{dicc_viol_gp}{es un vector con palabras de un diciconario
#' de terminos que refeiren a violencia}
#' \item{dicc_confl_sismos}{es un vector con palabras de un diciconario
#' de terminos que refeiren a conflictos}
#'}
#' @docType data
#' @usage data(acep_diccionarios)
#' @references Albrieu, Ramiro y Gabriel Palazzo 2020 «Categorizacion de
#' conflictos sociales en el ambito de los recursos naturales: un estudio
#' de las actividades extractivas mediante la mineria de textos».
#' Revista CEPAL (131):29-59.
#' (\href{https://observatoriodeconflictividad.org/RVE131_AP.pdf}{Revista CEPAL})
#' @references Laitano, Guillermina y Agustin Nieto
#' «Analisis computacional de la conflictividad laboral en Mar del Plata
#' durante el gobierno de Cambiemos». Ponencia presentado en VI Workshop -
#' Los conflictos laborales en la Argentina del siglo XX y XXI:
#' un abordaje interdisciplinario de conceptos, problemas y escalas de analisis,
#' Tandil, 2021.
#' @source \href{https://revistapuerto.com.ar/}{Revista Puerto}
#' @source \href{https://www.lanueva.com/}{La Nueva}
#' @keywords diccionarios
#' @examples
#' diccionario <- acep_load_base(acep_diccionarios$dicc_viol_gp)
#' diccionario
"acep_diccionarios"
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_diccionarios.R |
#' @title Frecuencia de palabras totales.
#' @description Funcion que cuenta la frecuencia de palabras totales
#' en cada una de las notas/textos.
#' @param x vector de textos al que se le aplica la funcion de conteo
#' de la frecuencia de palabras.
#' @keywords indicadores
#' @export acep_frec
#' @return Si todas las entradas son correctas, la salida sera un vector
#' con una frecuencia de palabras.
#' @examples
#' acep_frec("El SUTEBA fue al paro. Reclaman mejoras salariales.")
#' @export
acep_frec <- function(x) {
if(is.vector(x) == TRUE){
out <- tryCatch({
vapply(strsplit(x, " "), length, c(frec = 0))
}
)
return(out)
} else {
message("No ingresaste un vector. Vuelve a intentarlo ingresando un vector!")
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_frec.R |
#' @title Indice de intensidad.
#' @description Funcion que elabora un indice de intensidad en
#' base a la relacion entre palabras totales y palabras del diccionario
#' presentes en el texto.
#' @param pc vector numerico con la frecuencia de palabras conflictivas
#' presentes en cada texto.
#' @param pt vector de palabras totales en cada texto.
#' @param decimales cantidad de decimales, por defecto tiene 4
#' pero se puede modificar.
#' @export acep_int
#' @return Si todas las entradas son correctas,
#' la salida sera un vector numerico.
#' @keywords indicadores
#' @examples
#' conflictos <- c(1, 5, 0, 3, 7)
#' palabras <- c(4, 11, 12, 9, 34)
#' acep_int(conflictos, palabras, 3)
#' @export
acep_int <- function(pc, pt, decimales = 4) {
if(is.numeric(pc) != TRUE){
mensaje <- "No ingresaste un vector numerico en el parametro pc. Vuelve a intentarlo ingresando un vector numerico!"
return(message(mensaje))
}
if(is.numeric(pt) != TRUE){
mensaje <- "No ingresaste un vector numerico en el parametro pc. Vuelve a intentarlo ingresando un vector numerico!"
return(message(mensaje))
} else {
if(is.numeric(pc) == TRUE) {
tryCatch({
if(is.numeric(pt) == TRUE){
round(pc / pt, decimales)
}
}
)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_int.R |
#' @title Carga bases de datos creadas por el Observatorio.
#' @description Funcion para cargar bases de datos disponibles online.
#' Por ahora estan disponibles las siguientes bases: Revista Puerto 'rp_mdp';
#' La Nueva 'ln_bb', La Capital 'lc_mdp', Ecos Diarios 'ed_neco',
#' La Nacion 'ln_arg'
#' @param tag etiqueta identificatoria del data frame a cargar:
#' acep_bases$rp_mdp, acep_bases$ln_bb, acep_bases$lc_mdp,
#' acep_bases$ed_neco, acep_bases$ln_arg
#' @keywords datos
#' @export acep_load_base
#' @importFrom utils download.file
#' @importFrom httr GET
#' @return Si todas las entradas son correctas,
#' la salida sera una base de datos en formato tabular con un corpus de notas.
#' @examples
#' bd_sismos <- acep_bases$rev_puerto
#' acep_load_base(tag = bd_sismos) |> head()
#' @export
acep_load_base <- function(tag) {
if (httr::GET(tag)$status_code != 200){
message("La URL parece no existir. Intentalo con otra url!")
} else {
message("Descargando...")
tryCatch({
nombre <- basename(tag)
destfile <- file.path(tempdir(), nombre)
download.file(tag, destfile)
readRDS(destfile)
}
)
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_load_base.R |
#' @title Frecuencia de menciones de palabras.
#' @description Funcion que cuenta la frecuencia de menciones de
#' palabras que refieren a conflictos en cada una de las notas/textos.
#' @param x vector de textos al que se le aplica la funcion de conteo
#' de la frecuencia de menciones de palabras del diccionario.
#' @param y vector de palabras del diccionario utilizado.
#' @param tolower convierte los textos a minusculas.
#' @export acep_men
#' @return Si todas las entradas son correctas,
#' la salida sera un vector con una frecuencia
#' de palabras de un diccionario.
#' @keywords indicadores
#' @examples
#' df <- data.frame(texto = c("El SUTEBA fue al paro. Reclaman mejoras salariales.",
#' "El SOIP lleva adelante un plan de lucha con paros y piquetes."))
#' diccionario <- c("paro", "lucha", "piquetes")
#' df$detect <- acep_men(df$texto, diccionario)
#' df
#' @export
acep_men <- function(x, y, tolower = TRUE) {
if(is.vector(x) != TRUE){
mensaje <- "No ingresaste un vector en el parametro x. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
}
if(is.vector(y) != TRUE){
mensaje <- "No ingresaste un vector en el parametro y. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
} else {
if(is.vector(x) == TRUE) {
out <- tryCatch({
dicc <- paste0(y, collapse = "|")
if (tolower == TRUE) {
vapply(gregexpr(dicc, tolower(x), perl = TRUE),
function(z) sum(z != -1), c(frec = 0))
} else {
vapply(gregexpr(dicc, x, perl = TRUE),
function(z) sum(z != -1), c(frec = 0))
}
}
)
}
return(out)
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_men.R |
#' @title Resumen visual de la serie temporal de los indices de conflictividad.
#' @description Funcion que devuelve un panel visual de cuatro graficos
#' de barras con variables proxy de los indices de conflictividad agrupados
#' por segmento de tiempo.
#' @param datos data frame con datos procesados.
#' @param tagx orientacion de las etiquetas del
#' eje x ('horizontal' | 'vertical').
#' @export acep_plot_rst
#' @importFrom graphics par
#' @return Si todas las entradas son correctas,
#' la salida sera una imagen de cuatro paneles.
#' @keywords visualizacion
#' @examples
#' datos <- acep_bases$rp_procesada
#' fecha <- datos$fecha
#' n_palabras <- datos$n_palabras
#' conflictos <- datos$conflictos
#' datos_procesados_anio <- acep_rst(datos,
#' fecha, n_palabras, conflictos, st = 'anio')
#' acep_plot_rst(datos_procesados_anio, tagx = 'vertical')
#' @export
acep_plot_rst <- function(datos, tagx = "horizontal") {
tryCatch({
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
datos <- datos
par(mfrow = c(2, 2))
acep_plot_st(datos$st, datos$int_notas_confl,
t = "Eventos de protesta",
etiquetax = tagx)
acep_plot_st(datos$st, datos$frecm,
t = "Acciones de protesta",
etiquetax = tagx)
acep_plot_st(datos$st, datos$intensidad,
t = "Intensidad de la protesta",
etiquetax = tagx)
acep_plot_st(datos$st, datos$intac,
t = "Intensidad acumulada de la protesta",
etiquetax = tagx)
par(mfrow = c(1, 1))
}
)
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_plot_rst.R |
#' @title Grafico de barras de la serie temporal de indices de conflictividad.
#' @description Funcion que devuelve un grafico de barras con la serie
#' temporal de indices de conflictividad por dia, mes o anio.
#' @param x vector de valores del eje x (por ejemplo, fechas).
#' @param y vector de valores numericos del eje y (por ejemplo, menciones).
#' @param t titulo del grafico.
#' @param ejex nombre del eje x.
#' @param ejey nombre del eje y.
#' @param etiquetax orientacion de las etiquetas del
#' eje x ('horizontal' | 'vertical').
#' @export acep_plot_st
#' @importFrom graphics title barplot
#' @keywords visualizacion
#' @return Si todas las entradas son correctas,
#' la salida sera una imagen de un panel.
#' @examples
#' datos <- acep_bases$rp_procesada
#' fecha <- datos$fecha
#' n_palabras <- datos$n_palabras
#' conflictos <- datos$conflictos
#' dpa <- acep_rst(datos,
#' fecha, n_palabras, conflictos, st = 'anio')
#' acep_plot_st(
#' dpa$st, dpa$frecm,
#' t = 'Evolucion de la conflictividad en el sector pesquero argentino',
#' ejex = 'Anios analizados',
#' ejey = 'Menciones de terminos del diccionario de conflictos',
#' etiquetax = 'horizontal')
#' @export
acep_plot_st <- function(x, y, t = "", ejex = "",
ejey = "", etiquetax = "horizontal") {
etiquetax <- if (etiquetax == "horizontal") {
etiquetax <- 0
} else if (etiquetax == "vertical") {
etiquetax <- 2
}
if(is.vector(x) != TRUE){
mensaje <- "No ingresaste un vector en el parametro x. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
}
if(is.numeric(y) != TRUE){
mensaje <- "No ingresaste un vector numerico en el parametro y. Vuelve a intentarlo ingresando un vector numerico!"
return(message(mensaje))
} else {
if(is.vector(x) == TRUE) {
tryCatch({
graphics::barplot(y ~ x,
main = t,
ylab = "",
xlab = "",
cex.names = 1.0,
border = "grey",
col = grDevices::hcl.colors(length(y), "Pastel 1"),
las = etiquetax)
graphics::title(xlab = ejex, line = -0.1, cex.lab = 1.0)
graphics::title(ylab = ejey, line = -1.0, cex.lab = 1.0)
}
)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_plot_st.R |
#' @title Cadenas de caracteres para limpiar y normalizar textos.
#' @description Cadenas de caracteres y expresiones regulares
#' para limpiar y normalizar textos.
#' @format Son cadenas de caracteres.
#'\describe{
#' \item{stopwords}{es un string de palabras vacias.}
#' \item{dias}{es un string de dias.}
#' \item{meses}{es un string de meses.}
#' \item{emoji}{es un string con expresiones regulares para emojis.}
#' \item{sintildes}{es un string de letras sin tildes.}
#' \item{tildes}{es un string de letras con tildes.}
#' }
#' @docType data
#' @usage data(acep_rs)
#' @keywords expresiones regulares
#' @examples
#' print(acep_rs)
"acep_rs"
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_rs.R |
#' @title Serie temporal de indices de conflictividad.
#' @description Funcion que devuelve los indices de conflictividad
#' agrupados por segmento de tiempo: dia, mes, anio.
#' @param datos data frame con los textos a procesar.
#' @param fecha columna de data frame que contiene el
#' vector de fechas en formato date.
#' @param frecp columna de data frame que contiene el
#' vector de frecuencia de palabras por texto.
#' @param frecm columna de data frame que contiene el
#' vector de menciones del diccionario por texto.
#' @param st parametro para establecer el segmento temporal
#' a ser agrupado: anio, mes, dia.
#' @param u umbral de menciones para contabilizar una nota
#' como nota que refiere a un conflicto.
#' @param d cantidad de decimales, por defecto tiene 4 pero
#' se puede modificar.
#' @export acep_rst
#' @importFrom stats aggregate
#' @return Si todas las entradas son correctas,
#' la salida sera una base de datos en formato tabular
#' con nuevas variables.
#' @keywords resumen
#' @examples
#' datos <- acep_bases$rp_procesada
#' fecha <- datos$fecha
#' n_palabras <- datos$n_palabras
#' conflictos <- datos$conflictos
#' datos_procesados_anio <- acep_rst(datos,
#' fecha, n_palabras, conflictos, st = 'anio', u = 4)
#' datos_procesados_mes <- acep_rst(datos,
#' fecha, n_palabras, conflictos)
#' datos_procesados_dia <- acep_rst(datos,
#' fecha, n_palabras, conflictos, st = 'dia', d = 3)
#' datos_procesados_anio |> head()
#' datos_procesados_mes |> head()
#' datos_procesados_dia |> head()
#' @export
acep_rst <- function(datos, fecha, frecp, frecm,
st = "mes", u = 2, d = 4) {
if(is.data.frame(datos) != TRUE){
mensaje <- "No ingresaste un marco de datos en el parametro datos. Vuelve a intentarlo ingresando un marco de datos!"
return(message(mensaje))
}
if((paste(names(datos),collapse = '') != "fechan_palabrasconflictosintensidad")){
mensaje <- "No ingresaste un marco de datos adecuado en el parametro datos. Vuelve a intentarlo ingresando un marco de datos adecuado!"
return(message(mensaje))
}
if(is.data.frame(fecha) == TRUE){
mensaje <- "No ingresaste un vector Date en el parametro fecha. Vuelve a intentarlo ingresando un vector de fechas!"
return(message(mensaje))
}
if(!all(is.na(as.Date(as.character(fecha),format="%Y-%m-%d"))) == FALSE){
mensaje <- "No ingresaste un vector Date en el parametro fecha. Vuelve a intentarlo ingresando un vector de fechas!"
return(message(mensaje))
}
if(!is.numeric(frecp)){
mensaje <- "No ingresaste un vector numerico en el parametro frecp. Vuelve a intentarlo ingresando un vector numerico!"
return(message(mensaje))
}
if(!is.numeric(frecm)){
mensaje <- "No ingresaste un vector numerico en el parametro frecm. Vuelve a intentarlo ingresando un vector numerico!"
return(message(mensaje))
} else {
if((paste(names(datos),collapse = '') == "fechan_palabrasconflictosintensidad") && is.data.frame(datos) == TRUE) {
tryCatch({
datos <- datos
datos$anio <- format(fecha, "%Y")
datos$mes <- paste0(datos$anio, "-", format(fecha, "%m"))
datos$dia <- fecha
datos$csn <- ifelse(datos$conflictos > u, 1, 0)
st <- if (st == "anio") {
st <- datos$anio
} else if (st == "mes") {
st <- datos$mes
} else if (st == "dia") {
st <- datos$dia
}
frec_notas <- stats::aggregate(st, by = list(st), FUN = length)
colnames(frec_notas) <- c("st", "frecn")
frec_notas_conf <- stats::aggregate(
csn ~ st, datos, function(x) c(frec_notas_conf = sum(x)))
frec_palabras <- stats::aggregate(
frecp ~ st, datos, function(x) c(frec_palabras = sum(x)))
frec_conflict <- stats::aggregate(
frecm ~ st, datos, function(x) c(frec_conflict = sum(x)))
frec_int_acum <- stats::aggregate(
intensidad ~ st, datos, function(x) c(frec_int_acum = sum(x)))
colnames(frec_int_acum) <- c("st", "intac")
frec_pal_con <-
merge(frec_notas,
merge(frec_notas_conf,
merge(frec_palabras,
merge(frec_conflict, frec_int_acum))))
frec_pal_con$intensidad <- acep_int(frec_pal_con$frecm,
frec_pal_con$frecp,
decimales = d)
frec_pal_con$int_notas_confl <- acep_int(frec_pal_con$csn,
frec_pal_con$frecn,
decimales = d)
return(frec_pal_con)
}
)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_rst.R |
#' @title Tokenizador.
#' @description Funcion que tokeniza las notas/textos.
#' @param x vector de textos al que se le aplica la funcion de tokenizacion.
#' @param tolower convierte los textos a minusculas.
#' @keywords tokenizar
#' @examples
#' acep_token("Huelga de obreros del pescado en el puerto")
#' @export
acep_token <- function(x, tolower = TRUE) {
if(is.vector(x) != TRUE){
mensaje <- "No ingresaste un vector en el parametro x. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
} else {
if(is.vector(x) == TRUE) {
tryCatch({
if (tolower == TRUE) {
id <- data.frame(id_doc = seq_len(length(x)), texto = x)
id_token <- seq_len(length(unlist(strsplit(tolower(x), " "))))
token <- unlist(strsplit(tolower(x), " "))
texto <- rep(x, vapply(strsplit(x, " "), length, c(frec = 0)))
} else {
id <- data.frame(id_doc = seq_len(length(x)), texto = x)
id_token <- seq_len(length(unlist(strsplit(x, " "))))
token <- unlist(strsplit(x, " "))
texto <- rep(x, vapply(strsplit(x, " "), length, c(frec = 0)))
}
df <- merge(data.frame(texto = texto,
id_token = id_token, token = token), id)
df <- df[order(df$id_token, decreasing = FALSE), ]
df$id_token_doc <- as.vector(unlist(aggregate(
df$token ~ df$id_doc,
FUN = function(x) seq_len(length(x)))[, 2]))
df <- data.frame(
id_doc = df$id_doc,
texto = df$texto,
id_token = df$id_token,
id_token_doc = df$id_token_doc,
token = df$token)
return(df)
}
)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_token.R |
#' @title Grafico de barras de palabras mas recurrentes en un corpus.
#' @description Funcion que devuelve un grafico de barras con las palabras
#' mas recurrentes en un corpus textual.
#' @param x vector de palabras tokenizadas.
#' @param u numero de corte para el top de palabras mas frecuentes.
#' @param frec parametro para determinar si los valores se visualizaran
#' como frecuencia absoluta o relativa.
#' @export acep_token_plot
#' @return Si todas las entradas son correctas,
#' la salida sera un grafico de barras.
#' @keywords visualizacion
#' @examples
#' tokens <- c(rep("paro",15), rep("piquete",25), rep("corte",20), rep("manifestacion",10),
#' rep("bloqueo",5), rep("alerta",16), rep("ciudad",12), rep("sindicato",11), rep("paritaria",14),
#' rep("huelga",14), rep("escrache",15))
#' acep_token_plot(tokens)
#' @export
acep_token_plot <- function(x, u = 10, frec = TRUE) {
if(is.vector(x) != TRUE){
mensaje <- "No ingresaste un vector en el parametro x. Vuelve a intentarlo ingresando un vector!"
return(message(mensaje))
} else {
if(is.vector(x) == TRUE) {
tryCatch({
if (frec == TRUE) {
tabla_token <- base::table(x) |> as.data.frame()
tabla_token$x <- as.character(tabla_token$x)
tabla_token <- tabla_token[order(tabla_token$Freq, decreasing = TRUE), ]
tabla_token <- utils::head(tabla_token, n = u)
tabla_token$prop <- tabla_token$Freq / sum(tabla_token$Freq)
tabla_token <- data.frame(
token = tabla_token$x,
frec = tabla_token$Freq,
prop = tabla_token$prop)
tabla_token <- utils::head(tabla_token, n = u)
tabla_token <- tabla_token[order(tabla_token$frec,
decreasing = FALSE), ]
graphics::barplot(
height = tabla_token$frec,
names = as.factor(tabla_token$token),
col = grDevices::hcl.colors(u, "mint", rev = TRUE),
xlab = "frecuencia",
ylab = NULL,
main = paste("Top", u, "de palabras frecuentes"),
xlim = c(0, (max(tabla_token$frec) * 1.1)),
horiz = TRUE,
las = 1,
cex.names = 0.8,
cex.axis = 0.8
)
} else {
tabla_token <- base::table(x) |> as.data.frame()
tabla_token$x <- as.character(tabla_token$x)
tabla_token <- tabla_token[order(tabla_token$Freq, decreasing = TRUE), ]
tabla_token <- utils::head(tabla_token, n = u)
tabla_token$prop <- tabla_token$Freq / sum(tabla_token$Freq)
tabla_token <- data.frame(token = tabla_token$x,
frec = tabla_token$Freq,
prop = tabla_token$prop)
tabla_token <- utils::head(tabla_token, n = u)
tabla_token <- tabla_token[order(tabla_token$frec, decreasing = FALSE), ]
graphics::barplot(
height = tabla_token$prop,
names = as.factor(tabla_token$token),
col = grDevices::hcl.colors(u, "purp", rev = TRUE),
xlab = "porcentaje",
ylab = NULL,
main = paste("Top", u, "de palabras frecuentes"),
xlim = c(0, (max(tabla_token$prop) * 1.15)),
horiz = TRUE,
las = 1,
cex.names = 0.8,
cex.axis = 0.8
)
}
}
)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_token_plot.R |
#' @title Tabla de frecuencia de palabras tokenizadas.
#' @description Funcion que cuenta la frecuencia de palabras tokenizadas.
#' @param x vector de palabras tokenizadas.
#' @param u numero de corte para el top de palabras mas frecuentes.
#' @export acep_token_table
#' @return Si todas las entradas son correctas,
#' la salida sera una tabla con la frecuencia relativa y
#' absoluta de palabras tokenizadas.
#' @keywords tablas
#' @examples
#' tokens <- c(rep("paro",15), rep("piquete",25), rep("corte",20), rep("manifestacion",10),
#' rep("bloqueo",5), rep("alerta",16), rep("ciudad",12), rep("sindicato",11), rep("paritaria",14),
#' rep("huelga",14), rep("escrache",15))
#' acep_token_table(tokens)
#' @export
acep_token_table <- function(x, u = 10) {
if(is.vector(x) != TRUE){
message("No ingresaste un vector en el parametro x. Vuelve a intentarlo ingresando un vector!")
} else {
if(is.vector(x) == TRUE) {
tryCatch({
tabla_token <- base::table(x) |> as.data.frame()
tabla_token <- tabla_token[order(tabla_token$Freq, decreasing = TRUE), ]
tabla_token <- utils::head(tabla_token, n = u)
tabla_token$prop <- tabla_token$Freq / sum(tabla_token$Freq)
tabla_token <- data.frame(
token = tabla_token$x,
frec = tabla_token$Freq,
prop = tabla_token$prop)
utils::head(tabla_token, n = u)
}
)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEP/R/acep_token_table.R |
#' Returns the status of the ACE
#'
#' @param connection connection object returned from connect(url) function
#' @return data frame containing patient IDs and time intervals (optional)
#'
#' @examples
#' \dontrun{ACEsearch.status(ACEsearch.connect('http://localhost:8080'))}
#'
#'
ACEsearch.status <- function(connection) {
response <- httr::POST(url = paste0(connection$url,'/status'))
json_response <- httr::content(response, type="application/json")
if (!is.null(json_response$errorMessage)) {
stop(json_response$errorMessage)
}
return(json_response)
}
#' Returns the statistics information
#'
#' @param connection connection object returned from connect(url) function
#' @param patient_id numerical id of the patient
#' @return TRUE or FALSE
#'
#' @examples
#' \dontrun{ACEsearch.contains(ACEsearch.connect('http://localhost:8080'), 123)}
#'
#'
ACEsearch.contains <- function(connection, patient_id) {
request <- paste0('{"patientId":', patient_id, '}')
response <- httr::POST(url = paste0(connection$url,'/contains_patient'), body=request)
json_response <- httr::content(response, type="application/json")
return(json_response$response)
}
#' Dumps patient from ACE to a file on disk
#'
#' @param connection connection object returned from connect(url) function
#' @param patient_id numerical id of the patient
#' @param path path where to store the generated files
#' @param selection_query returns only the part of patient's data that intersects with the result of the selection_query
#' @param contains_start the dumped time interval's start has to be intersecting the selection_query
#' @param contains_end the dumped time interval's end has to be intersecting the selection_query
#' @return data frame containing patient IDs and time intervals (optional)
#'
#' @examples
#' \dontrun{ACEsearch.dump(ACEsearch.connect('http://localhost:8080'), 123, '/path/to/dump/files/')}
#' \dontrun{ACEsearch.dump(ACEsearch.connect('http://localhost:8080'), 123, '/path/', 'ICD9=250.50', TRUE, TRUE)}
#'
#'
ACEsearch.dump <- function(connection, patient_id, path, selection_query=NULL, contains_start=FALSE, contains_end=FALSE) {
request <- paste0('{"patientId":', patient_id, ', "icd9":true, "icd10": true, "departments":true, "cpt":true, "rx":true, "snomed": true, "notes": true, "visitTypes": true, "noteTypes": true, "encounterDays": true, "ageRanges": true, "labs": true, "vitals": true, "atc": true', ',"selectionQuery":"',selection_query, '", "containsStart":', contains_start, ', "containsEnd": ', contains_end, '}')
if (ACEsearch.contains(connection, patient_id) == FALSE) {
stop(paste0("Patient with id ", patient_id, " does not exist"))
}
response <- httr::POST(url = paste0(connection$url,'/dump'), body=request, write_disk(file.path(path, paste0(patient_id, '.json')), overwrite=TRUE))
}
| /scratch/gouwar.j/cran-all/cranData/ACEsearch/R/api.R |
#' Connects to ACE instance
#'
#' Attempts to connect to ACE instance using URL:PORT
#' @param url url address of a running ACE instance, usually containing port information
#' @return data frame containing connection information used for all other accessory functions
#'
#' @examples
#' \dontrun{ACEsearch.connect("http://localhost:8080")}
#'
ACEsearch.connect <- function(url) {
tryCatch(status <-httr::GET(paste0(url, "/status")), error=function(e) print("Cannot connect to the specified ACE instance"))
if (!exists("status")) {
stop("Could not connect to ACE instance")
}
print(status)
status <- httr::content(status, type="application/json")
if (status$status != "OK") {
stop("ACE instance instance error")
}
response <- data.frame(1)
response$url <- url
response$status <- status$status
response$dataset <- status$datasetVersion
response$code <- status$version
print(paste("Connected to ",url, response$code, response$dataset))
return (response)
}
| /scratch/gouwar.j/cran-all/cranData/ACEsearch/R/connect.R |
#' Queries ACE and returns a list of patient IDs
#'
#' @param connection connection object returned from connect(url) function
#' @param query ACE query
#' @param output_time equivalent to wrapping the query in OUTPUT() command. Togerther with patient IDs outputs each
#' time interval in patient's timeline when the query was evaluated as true
#' @return data frame containing patient IDs and time intervals (optional)
#'
#' @examples
#' \dontrun{ACEsearch.query(ACEsearch.connect('http://localhost:8080'), 'ICD9=250.50')}
#' \dontrun{ACEsearch.query(ACEsearch.connect('http://localhost:8080'), 'ICD9=250.50', TRUE)}
#'
#'
ACEsearch.query <- function(connection, query, output_time=FALSE) {
stopifnot(is.logical(output_time))
request <- paste0('{"query":"', query, '"}')
if (regexpr("output(\\s+)?\\(", tolower(query)) >= 0) {
output_time = TRUE
}
request <- paste0('{"query":"', query, '", "returnTimeIntervals":',output_time, '}')
response <- httr::POST(url = paste0(connection$url,'/query'), body = request)
json_response <- httr::content(response, type="application/json")
if (!is.null(json_response$errorMessage)) {
stop(json_response$errorMessage)
}
index <- 1
pids <- vector()
startTime <- vector()
endTime <- vector()
for (i in json_response$patientIds) {
val <- 1;
for (j in i) {
if (val == 1) {
pids[index] <- j[1]
} else if (val == 2) {
startTime[index] <- j[1]
} else if (val == 3) {
endTime[index] <- j[1]
index <- index + 1
val <- 0;
}
if (output_time) {
val <- val + 1
} else {
index <- index + 1
}
}
}
if (output_time) {
result <- data.frame(pids, startTime, endTime)
colnames(result) <- c("patientId", "startTime", "endTime")
} else {
result <- data.frame(pids)
colnames(result) <- c("patientId")
}
return(result)
}
#' Queries ACE with a CSV() command and imports the contents of the csv into a data frame
#'
#' @param connection connection object returned from connect(url) function
#' @param query ACE CSV query
#' @param file_name if specified, stores the csv into the file_name, otherwise the temporary
#' file used to download the data will be deleted after the data.frame is generated
#' @return data frame containing CSV file
#'
#' @examples
#' \dontrun{ACEsearch.csv(ACEsearch.connect('http://localhost:8080'), 'CSV(ICD9=250.50, CPT, LABS, ICD9)')}
#' \dontrun{ACEsearch.csv(ACEsearch.connect('http://localhost:8080'), 'CSV(ICD9=250.50, CPT, LABS, ICD9)',
#' '/output.csv')}
#'
ACEsearch.csv <- function(connection, query, file_name=NULL) {
request <- paste0('{"query":"', gsub(pattern = '"', replacement = '\\\\"', x = query), '", "returnTimeIntervals": false}')
response <- httr::POST(url = paste0(connection$url,'/query'), body = request)
json_response <- httr::content(response, type="application/json")
if (is.null(file_name)) {
temp <- tempfile()
} else {
temp <- file_name
}
print("Querying ACE instance...")
csv_content = httr::GET(url=paste0(connection$url,'/',json_response$exportLocation), write_disk(temp, overwrite=TRUE))
print(paste0("Writing output into: ", temp))
print("Importing data into R...")
result = read.csv(temp, header=TRUE, sep='\t')
if (is.null(file_name)) {
print("Deleting temporary files...")
file.remove(temp)
}
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/ACEsearch/R/query.R |
#' rmvn
#' @keywords internal
#' @param n Sample Size
#' @param sigma Covariance matrix
#' @return Generates multivariate normal data from a covariance matrix (\code{sigma}) of length \code{n}
#'
rmvn <- function(n,sigma) {
Sh <- with(svd(sigma),
v%*%diag(sqrt(d))%*%t(u))
matrix(stats::rnorm(ncol(sigma)*n),
ncol = ncol(sigma))%*%Sh
}
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/R/Internal.R |
#' Power_LS
#' @description The function is designed for calculating the power of heritability estimation from ACE models given the parameter settings. Or calculate one of the parameter settings (N,R,h2,c2) given the rest of known parameters.
#' This power calculator is made based on the Least Squares theory and follows the mathematical derivation proposed by Visscher(2004).
#' @import stats
#' @param N1 The number of kin pairs for group1 (amount of PAIRS)
#' @param N2 The number of kin pairs for group2
#' @param power The power of heritability estimation. Specified if you want to return the required sample sizes.
#' @param p_N1 The proportion of kin group1 over the . Required to be specified if the user wants to calculate the N1 and N2 simultaneously.
#' @param h2 The assumed standard heritability value of the target trait. 0 < h2 < 1
#' @param c2 The assumed standard common environmental effects on the target trait. 0 < c2 < 2
#' @param R1 The genetic relatedness of kin pair group1
#' @param R2 The genetic relatedness of kin pair group2
#' @param alpha The type-one error rate for heritability estimation.
#' @return A numeric \code{vector} of power when `N1` and `N2` are both specified.
#' \cr
#' A numeric \code{vector} of `N1` (or `N2`) when `N2` (or `N1`) is specified. A numeric \code{vector} of `N1` and `N2` when `RatioN` is specified.
#' @export
Power_LS <- function(N1,N2,power,p_N1=NULL,h2,c2,R1=1,R2=.5,alpha = .05){
Za <- qnorm(1-alpha)
if(missing(power)){
Zb <- sqrt(h2^2*(abs(R1-R2)^2) / ((1-(R1*h2+c2)^2)^2/N1 + (1-(R2*h2+c2)^2)^2/N2)) - Za
power_result <- pnorm(Zb,0)
return(power_result)
}
if(!missing(N1) & missing(N2)){
Zb <- qnorm(power)
N2_result <- (1-(R2*h2+c2)^2)^2 / (h2^2*(abs(R1-R2)^2)/((Za+Zb)^2) - ((1-(R1*h2+c2)^2)^2/N1))
return(round(N2_result))
}
if(missing(N1) & !missing(N2)){
Zb <- qnorm(power)
N1_result <- (1-(R1*h2+c2)^2)^2 / (h2^2*(abs(R1-R2)^2)/((Za+Zb)^2) - ((1-(R2*h2+c2)^2)^2/N2))
return(round(N1_result))
}
if(missing(N1) & missing(N2) & !is.null(p_N1)){
Zb <- qnorm(power)
N_total <- (1-(R1*h2+c2)^2)^2/(p_N1*h2^2*abs(R1-R2)^2/((Za+Zb)^2)) + (1-(R2*h2+c2)^2)^2/((1-p_N1)*h2^2*(abs(R1-R2)^2)/((Za+Zb)^2))
N1_result <- N_total * p_N1
N2_result <- N_total * (1-p_N1)
return(c(round(N1_result),round(N2_result)))
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/R/Power_LS.R |
#' Sim_Fit
#' @description A function to simulate a set of kin pair data and fit them with ACE models. Can be helpful with checking model performance for a given parameter setting.
#' @param GroupNames A character vector specifying two names of the simulated kin pairs
#' @param GroupSizes A numeric vector specifying two group sizes indicating the amount of kin pairs in respective group.
#' @param nIter A numeric value specifying the number of iteration you want to run given the parameters assigned (i.e. the number of model fitting results you want to get)
#' @param SSeed An integer specifying the starting seed of the random number. This parameter will make sure the simulated results are replicable across time
#' @param GroupRel A numeric vector specifying two genetic relatedness values of the simulated kin pairs
#' @param GroupR_c A numeric vector specifying two common environment correlation coefficients of the simulated kin pairs
#' @param mu A numeric vector specifying two mean values for the generated variable of the kin pairs
#' @param ace1 A numeric vector specifying three variance components under an ACE (additive genetics, common environment, unique environment) structure for group1
#' @param ace2 A numeric vector specifying three variance components under an ACE (additive genetics, common environment, unique environment) structure for group2
#' @param ifComb A logical value specifying the approach to achieve the required genetic relatedness value. \code{TRUE} = using combination approach. \code{FALSE} = using direct approach. (See function description for a detailed explanation of two approaches.)
#' @param lbound A logical value indicating if a lower boundary of .0001 will be imposed to the estimated A, C and E components
#' @param saveRaw A logical value specifying if the raw simulated data should be saved in the output list
#' @return Returns a two-level \code{list}. Level-one is the number of iterations. Level-two is the model fitting results and raw data (if \code{saveRaw = TRUE}) of the simulated data from the respective iteration. Level-two includes:
#' \item{Results}{A \code{list} including 1) A \code{data.frame} displaying the nested comparison model between ACE, AE, CE, E models and 2) A \code{list} of all model fit information generated from OpenMx}
#' \item{Data}{A \code{data.frame} consists of the simulated raw data}
#' @export
Sim_Fit <- function(
GroupNames = c("KinPair1","KinPair2"),
GroupSizes = c(100,100),
nIter = 100,
SSeed = 62,
GroupRel=c(1,.5),
GroupR_c=c(1,1),
mu = c(0,0),
ace1=c(1,1,1),
ace2=c(1,1,1),
ifComb=FALSE,
lbound=FALSE,
saveRaw=FALSE
){
l.results <- list()
for(i in 1: nIter){
set.seed(SSeed - 1 + i)
df_temp <- kinsim_double(
GroupNames = GroupNames,
GroupSizes = GroupSizes,
GroupRel=GroupRel,
GroupR_c=GroupR_c,
mu = mu,
ace1=ace1,
ace2=ace2,
ifComb=ifComb)
if(!saveRaw){
l.results[[i]] <- list(
Results = fit_uniACE(
data_1 = df_temp[which(df_temp$GroupName == GroupNames[1]), c("y1","y2")],
data_2 = df_temp[which(df_temp$GroupName == GroupNames[2]), c("y1","y2")],
GroupRel = GroupRel, GroupR_c = GroupR_c, lbound = lbound),
data = NA)
}else{
l.results[[i]] <- list(
Results = fit_uniACE(
data_1 = df_temp[which(df_temp$GroupName == GroupNames[1]), c("y1","y2")],
data_2 = df_temp[which(df_temp$GroupName == GroupNames[2]), c("y1","y2")],
GroupRel = GroupRel, GroupR_c = GroupR_c, lbound = lbound),
data = df_temp)
}
names(l.results)[[i]] <- paste("Iteration",i, sep = "")
}
return(l.results)
}
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/R/Sim_Fit.R |
#' fit_uniACE
#' @description Use OpenMx to quickly fit a univariate ACE model
#' @import OpenMx
#' @param data_1 A n by 2 \code{data.frame} consisting of the group1 kin pairs
#' @param data_2 A n by 2 \code{data.frame} consisting of the group2 kin pairs
#' @param GroupRel A numeric vector specifying two genetic relatedness values of two groups of kin pairs
#' @param GroupR_c A numeric vector specifying two common environment correlation coefficients of two groups of kin pairs
#' @param lbound A logical value indicating if a lower boundary of .0001 will be imposed to the estimated A, C and E components
#' @return Returns a \code{list} with the following:
#' \item{df_nested}{A \code{data.frame} displaying the nested comparison model between ACE, AE, CE, E models}
#' \item{fitACE}{A \code{list} of all model fit information generated from OpenMx}
#' @export
fit_uniACE <- function(data_1, data_2, GroupRel = c(1,.5), GroupR_c = c(1,1), lbound = FALSE){
# Load Libraries & Options
#require(OpenMx)
#require(psych)
#require(polycor)
# source("miFunctions.R")
# # Create Output
# filename <- "oneACEc"
# sink(paste(filename,".Ro",sep=""), append=FALSE, split=TRUE)
# ----------------------------------------------------------------------------------------------------------------------
# PREPARE DATA
# Load Data
mzData <- data_1
dzData <- data_2
R1 <- mxMatrix(type = "Full", nrow = 1, ncol = 1,free = FALSE, values = GroupRel[1], name = "R1")
R2 <- mxMatrix(type = "Full", nrow = 1, ncol = 1,free = FALSE, values = GroupRel[2], name = "R2")
r_c1 <- mxMatrix(type = "Full", nrow = 1, ncol = 1,free = FALSE, values = GroupR_c[1], name = "r_c1")
r_c2 <- mxMatrix(type = "Full", nrow = 1, ncol = 1,free = FALSE, values = GroupR_c[2], name = "r_c2")
#coeAM <- coe_am
# covMZ <- cov(mzData, use = "pairwise")
# covDZ <- cov(dzData, use = "pairwise")
# #
# mean(rbind(mzData,dzData)[,1], na.rm = TRUE)
nv <- 1
ntv <- 2
selVars1 <- colnames(mzData)
selVars2 <- colnames(dzData)
#start values
svBe <- .01
svMu <- 0
svVa <- .2
svVe <- .5
V <- NULL
VA <- NULL
VC <- NULL
VE <- NULL
cDZ <- NULL
cMZ <- NULL
#variance matrix
if(lbound == TRUE){
covA <- mxMatrix(type = "Symm", nrow = nv, ncol = nv,free = TRUE, values = svVa, lbound = .0001, labels = "VA11", name = "VA")
covC <- mxMatrix(type = "Symm", nrow = nv, ncol = nv,free = TRUE, values = svVa, lbound = .0001, labels = "VC11", name = "VC")
covE <- mxMatrix(type = "Symm", nrow = nv, ncol = nv,free = TRUE, values = svVe, lbound = .0001, labels = "VE11", name = "VE")
}else{
covA <- mxMatrix(type = "Symm", nrow = nv, ncol = nv,free = TRUE, values = svVa, labels = "VA11", name = "VA")
covC <- mxMatrix(type = "Symm", nrow = nv, ncol = nv,free = TRUE, values = svVa, labels = "VC11", name = "VC")
covE <- mxMatrix(type = "Symm", nrow = nv, ncol = nv,free = TRUE, values = svVe, labels = "VE11", name = "VE")
}
#expected variance matrix
covP <- mxAlgebra(expression = VA+VC+VE, name = "V")
covMZ <- mxAlgebra(expression = R1*VA+r_c1*VC, name = "cMZ")
covDZ <- mxAlgebra(expression = R2%x%VA+r_c2*VC, name = "cDZ")
expCovMz <- mxAlgebra(expression = rbind(cbind(V,cMZ), cbind(t(cMZ),V)), name = "expCovMz")
expCovDz <- mxAlgebra(expression = rbind(cbind(V,cDZ), cbind(t(cDZ),V)), name = "expCovDz")
#create data
dataMZ <- mxData( observed=mzData, type="raw" )
dataDZ <- mxData( observed=dzData, type="raw" )
# Mean Matrix
intercept <- mxMatrix(type = "Full", nrow= 1 , ncol = ntv, free = TRUE, values = 0, labels = "interC", name = "intercept")
expMean <- mxAlgebra(expression = 1*intercept , name = "expMean")
# Create expectation objects
expMZ <- mxExpectationNormal(covariance = "expCovMz", means ="expMean", dimnames = selVars1)
expDZ <- mxExpectationNormal(covariance = "expCovDz", means ="expMean", dimnames = selVars2)
funML <- mxFitFunctionML()
#Create models
pars <- list(intercept, covA, covC, covE, covP)
modelMZ <- mxModel(pars, expMean,covMZ,expCovMz,dataMZ,expMZ,funML,R1,r_c1,name = "MZ")
#MZfit <- mxRun(modelMZ, intervals = TRUE)
#summary(MZfit)
modelDZ <- mxModel(pars, expMean,covDZ,expCovDz,dataDZ,expDZ,funML,R2,r_c2,name = "DZ")
#DZfit <- mxRun(modelDZ, intervals = TRUE)
#summary(DZfit)
multi <- mxFitFunctionMultigroup(c("MZ","DZ"))
#Algebra for Variance components
rowUS <- rep("US",nv)
colUS <- rep(c("VA","VC","VE","SA","SC","SE"),each = nv)
estUS <- mxAlgebra(expression = cbind(VA,VC,VE,VA/V,VC/V,VE/V), name = "US", dimnames = list(rowUS,colUS))
#CI
ciACE <- mxCI("US[1,1:6]")
modelACE <- mxModel("oneACEvc_1cov", pars, modelMZ, modelDZ, multi,estUS,ciACE)
fitACE <- mxRun(modelACE, intervals = TRUE, silent = TRUE)
sumACE <-summary(fitACE)
#sumACE
# ----------------------------------------------------------------------------------------------------------------------
# RUN SUBMODELS
# Run AE model
modelAE <- mxModel( modelACE, name="oneAEvc" )
modelAE <- omxSetParameters( modelAE, labels="VC11", free=FALSE, values=0 )
fitAE <- mxRun( modelAE, intervals=T, silent = TRUE )
#fitGofs(fitAE); fitEstCis(fitAE)
# Run CE model
modelCE <- mxModel( modelACE, name="oneCEvc" )
modelCE <- omxSetParameters( modelCE, labels="VA11", free=FALSE, values=0 )
modelCE <- omxSetParameters( modelCE, labels=c("VE11","VC11"), free=TRUE, values=.6 )
fitCE <- mxRun( modelCE, intervals=T, silent = TRUE )
#fitGofs(fitCE); fitEstCis(fitCE)
# Run E model
modelE <- mxModel( modelACE, name="oneEvc" )
modelE <- omxSetParameters( modelE, labels=c("VA11","VC11"), free=FALSE, values=0 )
fitE <- mxRun( modelE, intervals=T, silent = TRUE )
#fitGofs(fitE); fitEstCis(fitE)
# Print Comparative Fit Statistics
df_nested <- mxCompare( fitACE, nested <- list(fitAE, fitCE, fitE) )
#(rbind(fitACE$US$result,fitAE$US$result,fitCE$US$result,fitE$US$result),4)
l.modeloutput <- list(nest = df_nested,summary = sumACE)
return(l.modeloutput)
}
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/R/fit_uniACE.R |
#' kinsim_double
#' @description The function to generate two groups of univariate kin pair(e.g., both MZ and DZ twins) data using a multivariate norm approach, given the ACE components.
#' \cr
#' \cr
#' Two approaches can be selected: a) simulate two groups of kin pairs using the genetic relatedness directly b) simulate two groups of kin pairs by combining MZ twins and DZ twins to achieve the required genetic relatedness (.5<R<1).
#' @param GroupNames A character vector specifying two names of the simulated kin pairs
#' @param GroupSizes A numeric vector specifying two group sizes indicating the amount of kin pairs in respective group.
#' @param GroupRel A numeric vector specifying two genetic relatedness values of the simulated kin pairs
#' @param GroupR_c A numeric vector specifying two common environment correlation coefficients of the simulated kin pairs
#' @param mu A numeric vector specifying two mean values for the generated variable of the kin pairs
#' @param ace1 A numeric vector specifying three variance components under an ACE (additive genetics, common environment, unique environment) structure for group1
#' @param ace2 A numeric vector specifying three variance components under an ACE (additive genetics, common environment, unique environment) structure for group2
#' @param ifComb A logical value specifying the approach to achieve the required genetic relatedness value. \code{TRUE} = using combination approach. \code{FALSE} = using direct approach. (See function description for a detailed explanation of two approaches.)
#' @return Returns \code{data.frame} with the following:
#' \item{GroupName}{group name of the kin pairs}
#' \item{R}{level of relatedness for the kin pair}
#' \item{r_c}{level of common envrionment correlation of the kin pairs}
#' \item{id}{id}
#' \item{A1}{Additive genetic component for kin1 of the kin pairs}
#' \item{A2}{Additive genetic component for kin2 of the kin pairs}
#' \item{C1}{shared-environmental component for kin1 of the kin pairs}
#' \item{C2}{shared-environmental component for kin2 of the kin pairs}
#' \item{E1}{non-shared-environmental component for kin1 of the kin pairs}
#' \item{E2}{non-shared-environmental component for kin2 of the kin pairs}
#' \item{y1}{generated variable i for kin1}
#' \item{y2}{generated variable i for kin2}
#' @export
kinsim_double <- function(
GroupNames = c("KinPair1","KinPair2"),
GroupSizes = c(100,100),
GroupRel=c(1,.5),
GroupR_c=c(1,1),
mu = c(0,0),
ace1=c(1,1,1),
ace2=c(1,1,1),
ifComb=FALSE){
if(!ifComb){
df_N1 <- kinsim_single(
name = GroupNames[1],
Rel = GroupRel[1],
r_c = GroupR_c[1],
n = GroupSizes[1],
mu = mu[1],
ace = ace1)
df_N2 <- kinsim_single(
name = GroupNames[2],
Rel = GroupRel[2],
r_c = GroupR_c[2],
n = GroupSizes[2],
mu = mu[2],
ace = ace2)
df_final <- rbind(df_N1, df_N2)
#return(df_final)
} else{
if((GroupRel[1] == 1 | GroupRel[1]==.5) & GroupRel[2] != 1 & GroupRel[2] != .5){
df_N1 <- kinsim_single(
name = GroupNames[1],
Rel = GroupRel[1],
r_c = GroupR_c[1],
n = GroupSizes[1],
mu = mu[1],
ace = ace1
)
df2MZ <- kinsim_single(
name = GroupNames[2],
Rel = 1,
r_c = GroupR_c[2],
n = round((GroupRel[2]-.5)*2*GroupSizes[2]),
mu = mu[2],
ace = ace2
)
df2DZ <- kinsim_single(
name = GroupNames[2],
Rel = .5,
r_c = GroupR_c[2],
n = GroupSizes[2] - round((GroupRel[2]-.5)*2*GroupSizes[2]),
mu = mu[2],
ace = ace2
)
df_N2 <- rbind(df2MZ,df2DZ)
df_N2 <- df_N2[sample(1:nrow(df_N2)),]
df_N2$id <- 1:nrow(df_N2)
df_N2$R = GroupRel[2]
df_final <- rbind(df_N1, df_N2)
}
if(GroupRel[1] != 1 & GroupRel[1] != .5 & (GroupRel[2] == 1 | GroupRel[2]==.5)){
df1MZ <- kinsim_single(
name = GroupNames[1],
Rel = 1,
r_c = GroupR_c[1],
n = round((GroupRel[1]-.5)*2*GroupSizes[1]),
mu = mu[1],
ace = ace1
)
df1DZ <- kinsim_single(
name = GroupNames[1],
Rel = .5,
r_c = GroupR_c[1],
n = GroupSizes[1] - round((GroupRel[1]-.5)*2*GroupSizes[1]),
mu = mu[1],
ace = ace1
)
df_N1 <- rbind(df1MZ,df1DZ)
df_N1 <- df_N1[sample(1:nrow(df_N1)),]
df_N1$id <- 1:nrow(df_N1)
df_N1$R = GroupRel[1]
df_N2 <- kinsim_single(
name = GroupNames[2],
Rel = GroupRel[2],
r_c = GroupR_c[2],
n = GroupSizes[2],
mu = mu[2],
ace = ace2
)
df_final <- rbind(df_N1, df_N2)
}
if((GroupRel[1] == 1 | GroupRel[1]==.5) & (GroupRel[2] == 1 | GroupRel[2]==.5)){
df_N1 <- kinsim_single(
name = GroupNames[1],
Rel = GroupRel[1],
r_c = GroupR_c[1],
n = GroupSizes[1],
mu = mu[1],
ace = ace1)
df_N2 <- kinsim_single(
name = GroupNames[2],
Rel = GroupRel[2],
r_c = GroupR_c[2],
n = GroupSizes[2],
mu = mu[2],
ace = ace2)
df_final <- rbind(df_N1, df_N2)
}
if(GroupRel[1] != 1 & GroupRel[1] != .5 & GroupRel[2] != 1 & GroupRel[2] != .5){
df1MZ <- kinsim_single(
name = GroupNames[1],
Rel = 1,
r_c = GroupR_c[1],
n = round((GroupRel[1]-.5)*2*GroupSizes[1]),
mu = mu[1],
ace = ace1
)
df1DZ <- kinsim_single(
name = GroupNames[1],
Rel = .5,
r_c = GroupR_c[1],
n = GroupSizes[1] - round((GroupRel[1]-.5)*2*GroupSizes[1]),
mu = mu[1],
ace = ace1
)
df_N1 <- rbind(df1MZ,df1DZ)
df_N1 <- df_N1[sample(1:nrow(df_N1)),]
df_N1$id <- 1:nrow(df_N1)
df_N1$R = GroupRel[1]
df2MZ <- kinsim_single(
name = GroupNames[2],
Rel = 1,
r_c = GroupR_c[2],
n = round((GroupRel[2]-.5)*2*GroupSizes[2]),
mu = mu[2],
ace = ace2
)
df2DZ <- kinsim_single(
name = GroupNames[2],
Rel = .5,
r_c = GroupR_c[2],
n = GroupSizes[2] - round((GroupRel[2]-.5)*2*GroupSizes[2]),
mu = mu[2],
ace = ace2
)
df_N2 <- rbind(df2MZ,df2DZ)
df_N2 <- df_N2[sample(1:nrow(df_N2)),]
df_N2$id <- 1:nrow(df_N2)
df_N2$R = GroupRel[2]
df_final <- rbind(df_N1, df_N2)
}
}
return(df_final)
}
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/R/kinsim_double.R |
#' kinsim_single
#' @description The function to generate one group of univariate kin pair (e.g., only DZ twins) data using a multivariate norm approach, given the ACE components.
#' @param name Assigned name for the simulated group of kin pairs
#' @param Rel Genetic relatedness of the simulated kin pairs
#' @param r_c Assumed common enviroment correlation
#' @param n The number of generated kin pairs.(n PAIRS of data; The total number of participants is 2n)
#' @param mu The mean for generated variable
#' @param ace Vector of variance components under an ACE (additive genetics, common environment, unique environment) structure
#' @return Returns \code{data.frame} with the following:
#' \item{GroupName}{group name of the kin pairs}
#' \item{R}{level of genetic relatedness for the kin pairs}
#' \item{r_c}{level of common envrionment correlation of the kin pairs}
#' \item{id}{id}
#' \item{A1}{Additive genetic component for kin1 of the kin pairs}
#' \item{A2}{Additive genetic component for kin2 of the kin pairs}
#' \item{C1}{shared-environmental component for kin1 of the kin pairs}
#' \item{C2}{shared-environmental component for kin2 of the kin pairs}
#' \item{E1}{non-shared-environmental component for kin1 of the kin pairs}
#' \item{E2}{non-shared-environmental component for kin2 of the kin pairs}
#' \item{y1}{generated variable i for kin1}
#' \item{y2}{generated variable i for kin2}
#' @export
kinsim_single <- function(
name = "KinPair1",
Rel=1,
r_c = 1,
n=100,
mu=0,
ace=c(1,1,1)){
sA <- ace[1]^0.5
sC <- ace[2]^0.5
sE <- ace[3]^0.5
S2 <- matrix(c(0,1,
1,0),2)
datalist <- list()
id <- 1:sum(n)
A.r <- sA*rmvn(n,
sigma = diag(2) + S2*Rel)
C.r <- sC*rmvn(n,
sigma = diag(2) + S2*r_c)
E.r <- cbind(stats::rnorm(n,
sd = sE),
stats::rnorm(n,
sd = sE))
y.r <- mu + A.r + C.r + E.r
r_ <- rep(Rel,n)
r_c <- rep(r_c,n)
groupName <- rep(name,n)
data.r <- data.frame(groupName, r_, r_c, id, A.r,C.r,E.r,y.r)
names(data.r) <- c("GroupName","R","r_c","id","A1","A2","C1","C2","E1","E2","y1","y2")
return(data.r)
}
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/R/kinsim_single.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(ACEsimFit)
## ----simulation---------------------------------------------------------------
kindata <- kinsim_double(
GroupNames = c("SStwins", "OStwins"),
GroupSizes = c(120, 60),
GroupRel = c(.75, 0.5),
GroupR_c = c(1, 1),
mu = c(0, 0),
ace1 = c(.6, .2, .2),
ace2 = c(.6, .2, .2),
ifComb = TRUE
)
head(kindata)
## ----Sim_Fit------------------------------------------------------------------
time1 <- Sys.time()
results_fit <- Sim_Fit(
GroupNames = c("SStwins", "OStwins"),
GroupSizes = c(120, 60),
nIter = 50,
SSeed = 62,
GroupRel = c(.75, 0.5),
GroupR_c = c(1, 1),
mu = c(0, 0),
ace1 = c(.6, .2, .2),
ace2 = c(.6, .2, .2),
ifComb = TRUE,
lbound = FALSE,
saveRaw = FALSE
)
time2 <- Sys.time()
## FYI, the time used for the results above is here. So design your simulation wisely!!!
time2 - time1
## ----resultsDemo--------------------------------------------------------------
results_fit[["Iteration1"]][["Results"]][["nest"]]
## ----powerCalculation---------------------------------------------------------
N <- 180 ##the total number of kin pairs you used in your previous simulation
## Calculate the average diffLL between ACE and CE model.
DiffLL <- numeric()
for(i in 1:50){
DiffLL[i] <- results_fit[[1]][["Results"]][["nest"]]$diffLL[3]
}
meanDiffLL <- mean(DiffLL)
## Calculate the power based on an alpha level of .05
Power <- 1- pchisq(qchisq(1-.05, 1), 1, meanDiffLL)
Power
## ----powerCalculation2--------------------------------------------------------
Power_LS(N1=120, N2=60, h2=.6, c2=.2, R1 = .75, R2 = 0.5, alpha = 0.05)
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/inst/doc/ACEsimFit.R |
---
title: "ACEsimFit"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ACEsimFit}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(ACEsimFit)
```
## Overview
This package is designed for
1) Simulating kin pairs data based on the assumption that every trait is affected by genetic effects (A), common environmental effects (C) and unique environmental effects (E).
2) Using kin pairs data to fit an ACE model and get model fit output.
In our vignette, we will use height as a trait to demonstrate how people can simulate kin pairs of height data and how to use those simulated data to fit a univariate ACE model. We will also discuss the possible ways to analyse these model fitting results.
3) Calculate power of heritability estimate given a specific condition.
(Note: The ideas and data in this vignette are only for demonstration purpose and can be inaccurate in terms of the true facts in science.)
## Simulate kin pair data
Suppose we have a situation like this:
I wants to investigate what factors impact the height of the adults in a non-European country. Researchers in developed countries like US and UK have collected a sea of twin data to explore the question and reached a conclusion: 60% of variance of height comes from genes, 20% of variance comes from family environment and 20% comes from personal environment and error. I wanted to see if this conclusion holds true in an Asian country. One approach is to use a public dataset with height measure and family structure records.
However, the dataset has two problems: 1) Only have 180 pairs of twins 2) can only distinguish MZ twins and same-sex DZ twins. Given the problems this dataset had, it will be useful to do a priori power analysis to check the statistical power of my study.
So first I need to simulate data which they have same variance and covariance structure as the data in the public dataset. We can use the `kinsim_double` function to do that. Here's some conditions I want to replicate:
* Sample sizes: 60 pairs of opposite-sex DZ twins (genetic relatedness is .5) and 120 pairs of same-sex MZ and DZ twins (genetic relatedness is .75)
* The relatedness of .75 is achieved by combining MZ and DZ twins instead of simulating kin pairs with relatedness of .75 directly.
* ACE variance structure: A = .6, C = .2, E = .2
* Group mean is both 0, because they should be Z scores.
Therefore, we set the parameters of the `kinsim_double` function like below:
```{r simulation}
kindata <- kinsim_double(
GroupNames = c("SStwins", "OStwins"),
GroupSizes = c(120, 60),
GroupRel = c(.75, 0.5),
GroupR_c = c(1, 1),
mu = c(0, 0),
ace1 = c(.6, .2, .2),
ace2 = c(.6, .2, .2),
ifComb = TRUE
)
head(kindata)
```
Now you can see we have a data.frame with 180 pairs of simulated twins. And we also have their variance components and other information.
## Calculate the power of the heritability estimate given the variance structure
Generally, calculating power for the A estimate had two approaches: likelihood theory and the least squares theory.
### Calculate power based on likelihood theory
To use the likelihood theory, we need to simulate a number of datasets with the same variance structure and average the suggested power from each set of data. See a more detailed explanation at [link](http://www.people.vcu.edu/~bverhulst/power/power.html). So we need to have a set of model fitting results with the -2ll values for the ACE model and the CE model.
Luckily, in our package they have a function `Sim_Fit` to simulate kin pairs data, fit them automatically into a ACE model and return the model summary results. The function fit the ACE model with the help of `OpenMx` package.
Here, we again assigned the same parameters to the function. There are a few new parameters here:
* We want 50 simulated datasets so my power calculation can be relatively accurate. So we set the `nIter = 50`. Set more iterations when you want to get a even more accurate power.
* We hope my simulated results can be replicated so we set `SSeed = 62`. Pick a lucky number here!
* We don't want to contrain the estimation so we set `lbound = FALSE`.
* We don't need the raw data since they will eat up my poor-RAM laptop. So we set `saveRaw = FALSE`.
```{r Sim_Fit}
time1 <- Sys.time()
results_fit <- Sim_Fit(
GroupNames = c("SStwins", "OStwins"),
GroupSizes = c(120, 60),
nIter = 50,
SSeed = 62,
GroupRel = c(.75, 0.5),
GroupR_c = c(1, 1),
mu = c(0, 0),
ace1 = c(.6, .2, .2),
ace2 = c(.6, .2, .2),
ifComb = TRUE,
lbound = FALSE,
saveRaw = FALSE
)
time2 <- Sys.time()
## FYI, the time used for the results above is here. So design your simulation wisely!!!
time2 - time1
```
Here's one example of the nested comparison table from the results
```{r resultsDemo}
results_fit[["Iteration1"]][["Results"]][["nest"]]
```
We can then calculated the weighted ncp with the average difference of log-likelihood for ACE and CE models.In turn, I calculated the power for the given variance structure and relatedness:
```{r powerCalculation}
N <- 180 ##the total number of kin pairs you used in your previous simulation
## Calculate the average diffLL between ACE and CE model.
DiffLL <- numeric()
for(i in 1:50){
DiffLL[i] <- results_fit[[1]][["Results"]][["nest"]]$diffLL[3]
}
meanDiffLL <- mean(DiffLL)
## Calculate the power based on an alpha level of .05
Power <- 1- pchisq(qchisq(1-.05, 1), 1, meanDiffLL)
Power
```
So you can see the power for my model would be insufficient to have a confident estimate of height heritability. :(
### Calculate power based on least squares theory
Unlike the likelihood theory, we don't need to run simulations to calculate power because we can have a formula in Xuanyu and Mason's paper (submitted; you will see a link here soon). The function `Power_LS` is designed for power calculation based on LS theory.
Here I typed the parameters my research question involved into the functions.
```{r powerCalculation2}
Power_LS(N1=120, N2=60, h2=.6, c2=.2, R1 = .75, R2 = 0.5, alpha = 0.05)
```
Your can see the power calculated with LS theory is different from the one with likelihood theory. This is due to 1) with the LS formula, we did't consider the combination of MZ and DZ twins to reach a relatedness of .75 2) they will be different especially when the sample size is small. In our case, if we have 1800 pairs in stead of 180, the power from likelihood theory will be 0.9890003 and the power from least squares theory will be 0.9960664. (Much more similar at this time!)
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/inst/doc/ACEsimFit.Rmd |
---
title: "ACEsimFit"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ACEsimFit}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(ACEsimFit)
```
## Overview
This package is designed for
1) Simulating kin pairs data based on the assumption that every trait is affected by genetic effects (A), common environmental effects (C) and unique environmental effects (E).
2) Using kin pairs data to fit an ACE model and get model fit output.
In our vignette, we will use height as a trait to demonstrate how people can simulate kin pairs of height data and how to use those simulated data to fit a univariate ACE model. We will also discuss the possible ways to analyse these model fitting results.
3) Calculate power of heritability estimate given a specific condition.
(Note: The ideas and data in this vignette are only for demonstration purpose and can be inaccurate in terms of the true facts in science.)
## Simulate kin pair data
Suppose we have a situation like this:
I wants to investigate what factors impact the height of the adults in a non-European country. Researchers in developed countries like US and UK have collected a sea of twin data to explore the question and reached a conclusion: 60% of variance of height comes from genes, 20% of variance comes from family environment and 20% comes from personal environment and error. I wanted to see if this conclusion holds true in an Asian country. One approach is to use a public dataset with height measure and family structure records.
However, the dataset has two problems: 1) Only have 180 pairs of twins 2) can only distinguish MZ twins and same-sex DZ twins. Given the problems this dataset had, it will be useful to do a priori power analysis to check the statistical power of my study.
So first I need to simulate data which they have same variance and covariance structure as the data in the public dataset. We can use the `kinsim_double` function to do that. Here's some conditions I want to replicate:
* Sample sizes: 60 pairs of opposite-sex DZ twins (genetic relatedness is .5) and 120 pairs of same-sex MZ and DZ twins (genetic relatedness is .75)
* The relatedness of .75 is achieved by combining MZ and DZ twins instead of simulating kin pairs with relatedness of .75 directly.
* ACE variance structure: A = .6, C = .2, E = .2
* Group mean is both 0, because they should be Z scores.
Therefore, we set the parameters of the `kinsim_double` function like below:
```{r simulation}
kindata <- kinsim_double(
GroupNames = c("SStwins", "OStwins"),
GroupSizes = c(120, 60),
GroupRel = c(.75, 0.5),
GroupR_c = c(1, 1),
mu = c(0, 0),
ace1 = c(.6, .2, .2),
ace2 = c(.6, .2, .2),
ifComb = TRUE
)
head(kindata)
```
Now you can see we have a data.frame with 180 pairs of simulated twins. And we also have their variance components and other information.
## Calculate the power of the heritability estimate given the variance structure
Generally, calculating power for the A estimate had two approaches: likelihood theory and the least squares theory.
### Calculate power based on likelihood theory
To use the likelihood theory, we need to simulate a number of datasets with the same variance structure and average the suggested power from each set of data. See a more detailed explanation at [link](http://www.people.vcu.edu/~bverhulst/power/power.html). So we need to have a set of model fitting results with the -2ll values for the ACE model and the CE model.
Luckily, in our package they have a function `Sim_Fit` to simulate kin pairs data, fit them automatically into a ACE model and return the model summary results. The function fit the ACE model with the help of `OpenMx` package.
Here, we again assigned the same parameters to the function. There are a few new parameters here:
* We want 50 simulated datasets so my power calculation can be relatively accurate. So we set the `nIter = 50`. Set more iterations when you want to get a even more accurate power.
* We hope my simulated results can be replicated so we set `SSeed = 62`. Pick a lucky number here!
* We don't want to contrain the estimation so we set `lbound = FALSE`.
* We don't need the raw data since they will eat up my poor-RAM laptop. So we set `saveRaw = FALSE`.
```{r Sim_Fit}
time1 <- Sys.time()
results_fit <- Sim_Fit(
GroupNames = c("SStwins", "OStwins"),
GroupSizes = c(120, 60),
nIter = 50,
SSeed = 62,
GroupRel = c(.75, 0.5),
GroupR_c = c(1, 1),
mu = c(0, 0),
ace1 = c(.6, .2, .2),
ace2 = c(.6, .2, .2),
ifComb = TRUE,
lbound = FALSE,
saveRaw = FALSE
)
time2 <- Sys.time()
## FYI, the time used for the results above is here. So design your simulation wisely!!!
time2 - time1
```
Here's one example of the nested comparison table from the results
```{r resultsDemo}
results_fit[["Iteration1"]][["Results"]][["nest"]]
```
We can then calculated the weighted ncp with the average difference of log-likelihood for ACE and CE models.In turn, I calculated the power for the given variance structure and relatedness:
```{r powerCalculation}
N <- 180 ##the total number of kin pairs you used in your previous simulation
## Calculate the average diffLL between ACE and CE model.
DiffLL <- numeric()
for(i in 1:50){
DiffLL[i] <- results_fit[[1]][["Results"]][["nest"]]$diffLL[3]
}
meanDiffLL <- mean(DiffLL)
## Calculate the power based on an alpha level of .05
Power <- 1- pchisq(qchisq(1-.05, 1), 1, meanDiffLL)
Power
```
So you can see the power for my model would be insufficient to have a confident estimate of height heritability. :(
### Calculate power based on least squares theory
Unlike the likelihood theory, we don't need to run simulations to calculate power because we can have a formula in Xuanyu and Mason's paper (submitted; you will see a link here soon). The function `Power_LS` is designed for power calculation based on LS theory.
Here I typed the parameters my research question involved into the functions.
```{r powerCalculation2}
Power_LS(N1=120, N2=60, h2=.6, c2=.2, R1 = .75, R2 = 0.5, alpha = 0.05)
```
Your can see the power calculated with LS theory is different from the one with likelihood theory. This is due to 1) with the LS formula, we did't consider the combination of MZ and DZ twins to reach a relatedness of .75 2) they will be different especially when the sample size is small. In our case, if we have 1800 pairs in stead of 180, the power from likelihood theory will be 0.9890003 and the power from least squares theory will be 0.9960664. (Much more similar at this time!)
| /scratch/gouwar.j/cran-all/cranData/ACEsimFit/vignettes/ACEsimFit.Rmd |
AtCtEt <-
function(data_m, data_d, mod = c('d','d','d'), knot_a=5, knot_c=5, knot_e=5, loc = c('e','e','e'), boot=FALSE, num_b = 100, init = rep(0,3), robust = 0)
{
pheno_m <- c(t(data_m[,1:2]))
pheno_d <- c(t(data_d[,1:2]))
T_m <- rep(data_m[,3], each=2)
T_d <- rep(data_d[,3], each=2)
mag <- var(pheno_m)
init_max <- log(mag)
init_min <- log(mag) - abs(log(mag))*1.3
if((is.vector(mod)==FALSE) | (length(mod)!=3) )
{stop('The \'mod\' argument must be a vector of length 3.')}
if(!(mod[1] %in% c('d','c','n')))
{stop('The \'mod\' argument for the A component must be \'d\'(dynamic), \'c\'(constant) or \'n\'(NA).')}
if(!(mod[2] %in% c('d','c','n')))
{stop('The \'mod\' argument for the C component must be \'d\'(dynamic), \'c\'(constant) or \'n\'(NA).')}
if(!(mod[3] %in% c('d','c')))
{stop('The \'mod\' argument for the E component must be \'d\'(dynamic), \'c\'(constant).')}
if((is.vector(loc)==FALSE) | (length(loc)!=3) )
{stop('The \'loc\' argument must be a vector of length 3.')}
order <- 3
if(mod[1]=='d')
{
order <- 3
if(knot_a < 3)
{stop('The number of interior knots must be no less than 3.')}
}else
{
order <- 1
}
#knot <- 8
min_T <- min(T_m, T_d)
max_T <- max(T_m, T_d)
if(mod[1]=='d')
{
if(loc[1]=='e')
{
knots_a <- seq(from=min_T, to=max_T, length.out=knot_a)
interval_a <- knots_a[2] - knots_a[1]
knots_a <- c(c(min_T-interval_a*2,min_T-interval_a), knots_a)
knots_a <- c(knots_a, c(max_T+interval_a,max_T+interval_a*2))
}else{
knots_a <- quantile(unique(T_m,T_d), probs = seq(from=0,to=1,length.out=knot_a))
knots_a <- c(knots_a[1], knots_a[1], knots_a)
knots_a <- c(knots_a, knots_a[knot_a+2], knots_a[knot_a+2])
}
B_des_a_m <- splineDesign(knots_a, x=T_m, ord=order)
B_des_a_d <- splineDesign(knots_a, x=T_d, ord=order)
}else{
knots_a <- c(min_T,max_T)
B_des_a_m <- splineDesign(knots_a, x=T_m, ord=order)
B_des_a_d <- splineDesign(knots_a, x=T_d, ord=order)
}
if(mod[2]=='d')
{
order <- 3
if(knot_c < 3)
{stop('The number of interior knots must be no less than 3.')}
}else
{
order <- 1
}
if(mod[2]=='d')
{
if(loc[2]=='e')
{
knots_c <- seq(from=min_T, to=max_T, length.out=knot_c)
interval_c <- knots_c[2] - knots_c[1]
knots_c <- c(c(min_T-interval_c*2,min_T-interval_c), knots_c)
knots_c <- c(knots_c, c(max_T+interval_c,max_T+interval_c*2))
}else{
knots_c <- quantile(unique(T_m,T_d), probs = seq(from=0,to=1,length.out=knot_c))
knots_c <- c(knots_c[1], knots_c[1], knots_c)
knots_c <- c(knots_c, knots_c[knot_c+2], knots_c[knot_c+2])
}
B_des_c_m <- splineDesign(knots_c, x=T_m, ord=order)
B_des_c_d <- splineDesign(knots_c, x=T_d, ord=order)
}else{
knots_c <- c(min(T_m, T_d),max(T_m, T_d))
B_des_c_m <- splineDesign(knots_c, x=T_m, ord=order)
B_des_c_d <- splineDesign(knots_c, x=T_d, ord=order)
}
if(mod[3]=='d')
{
order <- 3
if(knot_e < 3)
{stop('The number of interior knots must be no less than 3.')}
}else
{
order <- 1
}
if(mod[3]=='d')
{
if(loc[3]=='e')
{
knots_e <- seq(from=min_T, to=max_T, length.out=knot_e)
interval_e <- knots_e[2] - knots_e[1]
knots_e <- c(c(min_T-interval_e*2,min_T-interval_e), knots_e)
knots_e <- c(knots_e, c(max_T+interval_e,max_T+interval_e*2))
}else{
knots_e <- quantile(unique(T_m,T_d), probs = seq(from=0,to=1,length.out=knot_e))
knots_e <- c(knots_e[1], knots_e[1], knots_e)
knots_e <- c(knots_e, knots_e[knot_e+2], knots_e[knot_e+2])
}
B_des_e_m <- splineDesign(knots_e, x=T_m, ord=order)
B_des_e_d <- splineDesign(knots_e, x=T_d, ord=order)
}else{
knots_e <- c(min(T_m, T_d),max(T_m, T_d))
B_des_e_m <- splineDesign(knots_e, x=T_m, ord=order)
B_des_e_d <- splineDesign(knots_e, x=T_d, ord=order)
}
n_c <- ncol(B_des_c_m)
n_a <- ncol(B_des_a_m)
n_e <- ncol(B_des_e_m)
init_a <- rep(init[1],n_a)
init_c <- rep(init[2],n_c)
init_e <- rep(init[3],n_e)
up_a <- up_c <- up_e <- 10
lo_a <- lo_c <- -50
lo_e <- -15
if(mod[1]=='n')
{
up_a <- lo_a <- -50
init_a <- -50
}
if(mod[1]=='c')
{
up_a <- 20
lo_a <- -50
}
if(mod[2]=='n')
{
up_c <- lo_c <- -50
init_c <- -50
}
if(mod[2]=='c')
{
up_c <- 20
lo_c <- -50
}
if(mod[3]=='c')
{
up_e <- 20
lo_e <- -10
}
result <- optim(c(init_a,init_c,init_e), loglik_AtCtEt_esp, gr_AtCtEt_esp, pheno_m = matrix(pheno_m), pheno_d = matrix(pheno_d), B_des_a_m = B_des_a_m, B_des_a_d = B_des_a_d, B_des_c_m = B_des_c_m, B_des_c_d = B_des_c_d, B_des_e_m = B_des_e_m, B_des_e_d = B_des_e_d,lower = c(rep(lo_a, n_a),rep(lo_c, n_c),rep(lo_e, n_e)), upper = c(rep(up_a, n_a),rep(up_c, n_c),rep(up_e, n_e)), method = "L-BFGS-B", hessian = TRUE, control=list(maxit = 3000))
if(robust>0)
{
for(i in 1:ceiling(robust))
{
init <- runif(n_a+n_c+n_e,min=init_min,max=init_max)
if(mod[1]!='n')
{init_a <- init[1:n_a]}
if(mod[2]!='n')
{init_c <- init[(n_a+1):(n_a+n_c)]}
init_e <- init[(n_a+n_c+1):(n_a+n_c+n_e)]
result_r <- optim(c(init_a,init_c,init_e), loglik_AtCtEt_esp, gr_AtCtEt_esp, pheno_m = matrix(pheno_m), pheno_d = matrix(pheno_d), B_des_a_m = B_des_a_m, B_des_a_d = B_des_a_d, B_des_c_m = B_des_c_m, B_des_c_d = B_des_c_d, B_des_e_m = B_des_e_m, B_des_e_d = B_des_e_d,lower = c(rep(lo_a, n_a),rep(lo_c, n_c),rep(lo_e, n_e)), upper = c(rep(up_a, n_a),rep(up_c, n_c),rep(up_e, n_e)), method = "L-BFGS-B", hessian = TRUE, control=list(maxit = 3000))
if(result_r$value < result$value)
{
result <- result_r
}
}
}
res_a <- result$par[1:n_a]
res_c <- result$par[(1+n_a):(n_c+n_a)]
res_e <- result$par[(1+n_a+n_c):(n_e+n_c+n_a)]
hes <- .Call('hessian_AtCtEt_esp_c', res_a, res_c, res_e, matrix(pheno_m), matrix(pheno_d), B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d)
n_t <- n_a+n_c+n_e
hes_m <- matrix(0, n_t, n_t)
k <- 1
for(i in 1:n_t)
{
for(j in i:n_t)
{
hes_m[i,j] <- hes[k]
k <- k + 1
}
}
hes_m_t <- t(hes_m)
diag(hes_m_t) <- 0
hes_m <- hes_m_t + hes_m
if(mod[1]=='n')
{res_a <- -Inf}
if(mod[2]=='n')
{res_c <- -Inf}
AtCtEt_model <- list(n_beta_a=n_a, n_beta_c=n_c, n_beta_e=n_e, beta_a=res_a, beta_c=res_c, beta_e=res_e, hessian_ap=result$hessian, hessian=hes_m, con=result$convergence, lik=result$value, knots_a =knots_a, knots_c = knots_c, knots_e = knots_e, min_t = min_T, max_t = max_T, boot = NULL )
class(AtCtEt_model) <- 'AtCtEt_model'
if(boot==TRUE)
{
boot_res <- AtCtEt_boot(res = AtCtEt_model, mod, data_m, data_d, knot_a, knot_c, knot_e, loc, B=num_b,alpha=0.05,m=500)
AtCtEt_model$boot <- boot_res
}
return(invisible(AtCtEt_model))
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/AtCtEt.R |
AtCtEt_boot <- function(res, model, data_m, data_d, knot_a, knot_c, knot_e, loc, B=100,alpha=0.05,m=500)
{
spline.main <- sp.spline.estimator(data_m, data_d, model, knot_a, knot_c, knot_e, loc, m)
spline.boots_a <- matrix(NA,m,B)
spline.boots_c <- matrix(NA,m,B)
spline.boots_e <- matrix(NA,m,B)
spline.boots_h <- matrix(NA,m,B)
b_c <- res$beta_c
b_a <- res$beta_a
b_e <- res$beta_e
order <- 3
if(res$n_beta_a==1)
{order <- 1}
B_des_a_m <- splineDesign(res$knots_a, x=data_m[,3], ord=order)
B_des_a_d <- splineDesign(res$knots_a, x=data_d[,3], ord=order)
order <- 3
if(res$n_beta_c==1)
{order <- 1}
B_des_c_m <- splineDesign(res$knots_c, x=data_m[,3], ord=order)
B_des_c_d <- splineDesign(res$knots_c, x=data_d[,3], ord=order)
order <- 3
if(res$n_beta_e==1)
{order <- 1}
B_des_e_m <- splineDesign(res$knots_e, x=data_m[,3], ord=order)
B_des_e_d <- splineDesign(res$knots_e, x=data_d[,3], ord=order)
num_m <- nrow(data_m)
num_d <- nrow(data_d)
for(i in 1:B)
{
pheno_mr <- matrix(0, num_m,2)
for(j in 1:num_m)
{
var_ct <- sum(B_des_c_m[j,]*b_c)
var_at <- sum(B_des_a_m[j,]*b_a)
var_et <- sum(B_des_e_m[j,]*b_e)
#if(res$n_beta_a>1)
#{var_at <- exp(var_at)}
var_at <- exp(var_at)
#if(res$n_beta_c>1)
#{var_ct <- exp(var_ct)}
var_ct <- exp(var_ct)
#if(res$n_beta_e>1)
#{var_et <- exp(var_et)}
var_et <- exp(var_et)
# var_ct <- var_c
sigma <- matrix(c(var_at+var_ct+var_et,var_at+var_ct,var_at+var_ct,var_at+var_ct+var_et),2,2)
pheno_mr[j,1:2] <- mvrnorm(1, rep(0,2), sigma)
}
pheno_dr <- matrix(0, num_d,2)
for(j in 1:num_d)
{
var_ct <- sum(B_des_c_d[j,]*b_c)
var_at <- sum(B_des_a_d[j,]*b_a)
var_et <- sum(B_des_e_d[j,]*b_e)
#if(res$n_beta_a>1)
#{var_at <- exp(var_at)}
var_at <- exp(var_at)
#if(res$n_beta_c>1)
#{var_ct <- exp(var_ct)}
var_ct <- exp(var_ct)
#if(res$n_beta_e>1)
#{var_et <- exp(var_et)}
var_et <- exp(var_et)
sigma <- matrix(c(var_at+var_ct+var_et,0.5*var_at+var_ct,0.5*var_at+var_ct,var_at+var_ct+var_et),2,2)
pheno_dr[j,1:2] <- mvrnorm(1, rep(0,2), sigma)
}
spline.boots <- sp.spline.estimator(cbind(pheno_mr,data_m[,3]),cbind(pheno_dr, data_d[,3]),model=model, knot_a, knot_c, knot_e, loc, m=m)
spline.boots_a[,i] <- spline.boots$est_a
spline.boots_c[,i] <- spline.boots$est_c
spline.boots_e[,i] <- spline.boots$est_e
spline.boots_h[,i] <- spline.boots$est_a/(spline.boots$est_a+spline.boots$est_c+spline.boots$est_e)
}
# Result has m rows and B columns
#cis.lower_a <- 2*spline.main$est_a - apply(spline.boots_a,1,quantile,probs=1-alpha/2)
#cis.upper_a <- 2*spline.main$est_a - apply(spline.boots_a,1,quantile,probs=alpha/2)
#cis.lower_c <- 2*spline.main$est_c - apply(spline.boots_c,1,quantile,probs=1-alpha/2)
#cis.upper_c <- 2*spline.main$est_c - apply(spline.boots_c,1,quantile,probs=alpha/2)
#cis.lower_e <- 2*spline.main$est_e - apply(spline.boots_e,1,quantile,probs=1-alpha/2)
#cis.upper_e <- 2*spline.main$est_e - apply(spline.boots_e,1,quantile,probs=alpha/2)
# percentile method
cis.upper_a <- apply(spline.boots_a,1,quantile,probs=1-alpha/2)
cis.lower_a <- apply(spline.boots_a,1,quantile,probs=alpha/2)
cis.upper_c <- apply(spline.boots_c,1,quantile,probs=1-alpha/2)
cis.lower_c <- apply(spline.boots_c,1,quantile,probs=alpha/2)
cis.upper_e <- apply(spline.boots_e,1,quantile,probs=1-alpha/2)
cis.lower_e <- apply(spline.boots_e,1,quantile,probs=alpha/2)
cis.upper_h <- apply(spline.boots_h,1,quantile,probs=1-alpha/2)
cis.lower_h <- apply(spline.boots_h,1,quantile,probs=alpha/2)
# return(list(lower.ci_a=cis.lower_a,upper.ci_a=cis.upper_a,lower.ci_c=cis.lower_c,upper.ci_c=cis.upper_c, lower.ci_e=cis.lower_e,upper.ci_e=cis.upper_e, x=seq(from=res$min_t, to=res$max_t, length.out=m),boots_a=spline.boots_a, boots_c=spline.boots_c, boots_e=spline.boots_e))
return(list(lower.ci_a=cis.lower_a,upper.ci_a=cis.upper_a,lower.ci_c=cis.lower_c,upper.ci_c=cis.upper_c, lower.ci_e=cis.lower_e,upper.ci_e=cis.upper_e, lower.ci_h=cis.lower_h, upper.ci_h=cis.upper_h, x=seq(from=res$min_t, to=res$max_t, length.out=m)))
}
sp.spline.estimator <- function(data_m, data_d, model, knot_a, knot_c, knot_e, loc, m) {
# Fit spline to data, with cross-validation to pick lambda
fit <- AtCtEt(data_m, data_d, model, knot_a, knot_c, knot_e, loc)
T_m <- rep(data_m[,3], each=2)
T_d <- rep(data_d[,3], each=2)
eval.grid <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=m)
order <- 3
if(fit$n_beta_a==1)
{order <- 1}
bb_a <- splineDesign(fit$knots_a, x = eval.grid, ord=order, outer.ok = TRUE)
order <- 3
if(fit$n_beta_c==1)
{order <- 1}
bb_c <- splineDesign(fit$knots_c, x = eval.grid, ord=order, outer.ok = TRUE)
order <- 3
if(fit$n_beta_e==1)
{order <- 1}
bb_e <- splineDesign(fit$knots_e, x = eval.grid, ord=order, outer.ok = TRUE)
est_a <- bb_a%*%fit$beta_a
#if(fit$n_beta_a>1)
#{est_a <- exp(est_a)}
est_a <- exp(est_a)
est_c <- bb_c%*%fit$beta_c
#if(fit$n_beta_c>1)
#{est_c <- exp(est_c)}
est_c <- exp(est_c)
est_e <- bb_e%*%fit$beta_e
#if(fit$n_beta_e>1)
#{est_e <- exp(est_e)}
est_e <- exp(est_e)
return(list(e = fit$beta_e, c = fit$beta_c, a = fit$beta_a, est_c = est_c,est_a = est_a, est_e = est_e))
}
AtDtEt_boot <- function(res, model, data_m, data_d, knot_a, knot_d, knot_e, loc, B=100,alpha=0.05,m=500)
{
spline.main <- sp.spline.estimator_ade(data_m, data_d, model, knot_a, knot_d, knot_e, loc, m)
spline.boots_a <- matrix(NA,m,B)
spline.boots_d <- matrix(NA,m,B)
spline.boots_e <- matrix(NA,m,B)
spline.boots_h <- matrix(NA,m,B)
b_d <- res$beta_d
b_a <- res$beta_a
b_e <- res$beta_e
order <- 3
if(res$n_beta_a==1)
{order <- 1}
B_des_a_m <- splineDesign(res$knots_a, x=data_m[,3], ord=order)
B_des_a_d <- splineDesign(res$knots_a, x=data_d[,3], ord=order)
order <- 3
if(res$n_beta_d==1)
{order <- 1}
B_des_d_m <- splineDesign(res$knots_d, x=data_m[,3], ord=order)
B_des_d_d <- splineDesign(res$knots_d, x=data_d[,3], ord=order)
order <- 3
if(res$n_beta_e==1)
{order <- 1}
B_des_e_m <- splineDesign(res$knots_e, x=data_m[,3], ord=order)
B_des_e_d <- splineDesign(res$knots_e, x=data_d[,3], ord=order)
num_m <- nrow(data_m)
num_d <- nrow(data_d)
for(i in 1:B)
{
pheno_mr <- matrix(0, num_m,2)
for(j in 1:num_m)
{
var_dt <- sum(B_des_d_m[j,]*b_d)
var_at <- sum(B_des_a_m[j,]*b_a)
var_et <- sum(B_des_e_m[j,]*b_e)
#if(res$n_beta_a>1)
#{var_at <- exp(var_at)}
var_at <- exp(var_at)
#if(res$n_beta_c>1)
#{var_ct <- exp(var_ct)}
var_dt <- exp(var_dt)
#if(res$n_beta_e>1)
#{var_et <- exp(var_et)}
var_et <- exp(var_et)
# var_ct <- var_c
sigma <- matrix(c(var_at+var_dt+var_et,var_at+var_dt,var_at+var_dt,var_at+var_dt+var_et),2,2)
pheno_mr[j,1:2] <- mvrnorm(1, rep(0,2), sigma)
}
pheno_dr <- matrix(0, num_d,2)
for(j in 1:num_d)
{
var_dt <- sum(B_des_d_d[j,]*b_d)
var_at <- sum(B_des_a_d[j,]*b_a)
var_et <- sum(B_des_e_d[j,]*b_e)
#if(res$n_beta_a>1)
#{var_at <- exp(var_at)}
var_at <- exp(var_at)
#if(res$n_beta_c>1)
#{var_ct <- exp(var_ct)}
var_dt <- exp(var_dt)
#if(res$n_beta_e>1)
#{var_et <- exp(var_et)}
var_et <- exp(var_et)
sigma <- matrix(c(var_at+var_dt+var_et,0.5*var_at+0.25*var_dt,0.5*var_at+0.25*var_dt,var_at+var_dt+var_et),2,2)
pheno_dr[j,1:2] <- mvrnorm(1, rep(0,2), sigma)
}
spline.boots <- sp.spline.estimator_ade(cbind(pheno_mr,data_m[,3]),cbind(pheno_dr, data_d[,3]),model=model, knot_a, knot_d, knot_e, loc, m=m)
spline.boots_a[,i] <- spline.boots$est_a
spline.boots_d[,i] <- spline.boots$est_d
spline.boots_e[,i] <- spline.boots$est_e
spline.boots_h[,i] <- (spline.boots$est_a+spline.boots$est_d)/(spline.boots$est_a+spline.boots$est_d+spline.boots$est_e)
}
# percentile method
cis.upper_a <- apply(spline.boots_a,1,quantile,probs=1-alpha/2)
cis.lower_a <- apply(spline.boots_a,1,quantile,probs=alpha/2)
cis.upper_d <- apply(spline.boots_d,1,quantile,probs=1-alpha/2)
cis.lower_d <- apply(spline.boots_d,1,quantile,probs=alpha/2)
cis.upper_e <- apply(spline.boots_e,1,quantile,probs=1-alpha/2)
cis.lower_e <- apply(spline.boots_e,1,quantile,probs=alpha/2)
cis.upper_h <- apply(spline.boots_h,1,quantile,probs=1-alpha/2)
cis.lower_h <- apply(spline.boots_h,1,quantile,probs=alpha/2)
# return(list(lower.ci_a=cis.lower_a,upper.ci_a=cis.upper_a,lower.ci_c=cis.lower_c,upper.ci_c=cis.upper_c, lower.ci_e=cis.lower_e,upper.ci_e=cis.upper_e, x=seq(from=res$min_t, to=res$max_t, length.out=m),boots_a=spline.boots_a, boots_c=spline.boots_c, boots_e=spline.boots_e))
return(list(lower.ci_a=cis.lower_a,upper.ci_a=cis.upper_a,lower.ci_d=cis.lower_d,upper.ci_d=cis.upper_d, lower.ci_e=cis.lower_e,upper.ci_e=cis.upper_e, lower.ci_h=cis.lower_h, upper.ci_h=cis.upper_h, x=seq(from=res$min_t, to=res$max_t, length.out=m)))
}
sp.spline.estimator_ade <- function(data_m, data_d, model, knot_a, knot_d, knot_e, loc, m) {
# Fit spline to data, with cross-validation to pick lambda
fit <- AtDtEt(data_m, data_d, model, knot_a, knot_d, knot_e, loc)
T_m <- rep(data_m[,3], each=2)
T_d <- rep(data_d[,3], each=2)
eval.grid <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=m)
order <- 3
if(fit$n_beta_a==1)
{order <- 1}
bb_a <- splineDesign(fit$knots_a, x = eval.grid, ord=order, outer.ok = TRUE)
order <- 3
if(fit$n_beta_d==1)
{order <- 1}
bb_d <- splineDesign(fit$knots_d, x = eval.grid, ord=order, outer.ok = TRUE)
order <- 3
if(fit$n_beta_e==1)
{order <- 1}
bb_e <- splineDesign(fit$knots_e, x = eval.grid, ord=order, outer.ok = TRUE)
est_a <- bb_a%*%fit$beta_a
#if(fit$n_beta_a>1)
#{est_a <- exp(est_a)}
est_a <- exp(est_a)
est_d <- bb_d%*%fit$beta_d
#if(fit$n_beta_c>1)
#{est_c <- exp(est_c)}
est_d <- exp(est_d)
est_e <- bb_e%*%fit$beta_e
#if(fit$n_beta_e>1)
#{est_e <- exp(est_e)}
est_e <- exp(est_e)
return(list(e = fit$beta_e, d = fit$beta_d, a = fit$beta_a, est_d = est_d,est_a = est_a, est_e = est_e))
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/AtCtEt_boot.R |
loglik_AtCtEt_epsp <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, beta_a, D_a, B_des_c_m, B_des_c_d, beta_c, D_c, B_des_e_m, B_des_e_d, beta_e, D_e)
{
var_b_a <- param[1]
var_b_c <- param[2]
var_b_e <- param[3]
nll <- .Call('loglik_AtCtEt_epsp_c', var_b_a, var_b_c, var_b_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, beta_a, D_a, B_des_c_m, B_des_c_d, beta_c, D_c, B_des_e_m, B_des_e_d, beta_e, D_e)
return(nll)
}
loglik_AtCtEt_epsp_g <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, var_b_a, var_b_c, var_b_e, D_a, D_c, D_e)
{
beta_a <- param[1:ncol(D_a)]
beta_c <- param[(ncol(D_a)+1):(ncol(D_a)+ncol(D_c))]
beta_e <- param[(ncol(D_a)+ncol(D_c)+1):(ncol(D_a)+ncol(D_c)+ncol(D_e))]
nll <- .Call('loglik_AtCtEt_epsp_g_c', beta_a, beta_c, beta_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, var_b_a, var_b_c, var_b_e, D_a, D_c, D_e)
return(nll)
}
gr_AtCtEt_epsp <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, beta_a, D_a, B_des_c_m, B_des_c_d, beta_c, D_c, B_des_e_m, B_des_e_d, beta_e, D_e)
{
var_b_a <- param[1]
var_b_c <- param[2]
var_b_e <- param[3]
d <- .Call('gr_AtCtEt_epsp_c', var_b_a, var_b_c, var_b_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, beta_a, D_a, B_des_c_m, B_des_c_d, beta_c, D_c, B_des_e_m, B_des_e_d, beta_e, D_e)
return(d)
}
gr_AtCtEt_epsp_g <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, var_b_a, var_b_c, var_b_e, D_a, D_c, D_e)
{
beta_a <- param[1:ncol(D_a)]
beta_c <- param[(ncol(D_a)+1):(ncol(D_a)+ncol(D_c))]
beta_e <- param[(ncol(D_a)+ncol(D_c)+1):(ncol(D_a)+ncol(D_c)+ncol(D_e))]
d <- .Call('gr_AtCtEt_epsp_g_c', beta_a, beta_c, beta_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, var_b_a, var_b_c, var_b_e, D_a, D_c, D_e)
return(d)
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/AtCtEt_epsp.R |
loglik_AtCtEt_esp <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d)
{
beta_a <- param[1:ncol(B_des_a_m)]
beta_c <- param[(1+ncol(B_des_a_m)):(ncol(B_des_c_m)+ncol(B_des_a_m))]
beta_e <- param[(1+ncol(B_des_a_m)+ncol(B_des_c_m)):(ncol(B_des_e_m)+ncol(B_des_c_m)+ncol(B_des_a_m))]
nll <- .Call('loglik_AtCtEt_esp_c', beta_a, beta_c, beta_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d)
return(nll)
}
gr_AtCtEt_esp <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d)
{
beta_a <- param[1:ncol(B_des_a_m)]
beta_c <- param[(1+ncol(B_des_a_m)):(ncol(B_des_c_m)+ncol(B_des_a_m))]
beta_e <- param[(1+ncol(B_des_a_m)+ncol(B_des_c_m)):(ncol(B_des_e_m)+ncol(B_des_c_m)+ncol(B_des_a_m))]
num_beta_c <- ncol(B_des_c_m)
num_beta_a <- ncol(B_des_a_m)
num_beta_e <- ncol(B_des_e_m)
d <- .Call('gr_AtCtEt_esp_c', beta_a, beta_c, beta_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d)
return(d)
}
loglik_AtDtEt_esp <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_d_m, B_des_d_d, B_des_e_m, B_des_e_d)
{
beta_a <- param[1:ncol(B_des_a_m)]
beta_c <- param[(1+ncol(B_des_a_m)):(ncol(B_des_d_m)+ncol(B_des_a_m))]
beta_e <- param[(1+ncol(B_des_a_m)+ncol(B_des_d_m)):(ncol(B_des_e_m)+ncol(B_des_d_m)+ncol(B_des_a_m))]
nll <- .Call('loglik_AtDtEt_esp_c', beta_a, beta_c, beta_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_d_m, B_des_d_d, B_des_e_m, B_des_e_d)
return(nll)
}
gr_AtDtEt_esp <-
function(param, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_d_m, B_des_d_d, B_des_e_m, B_des_e_d)
{
beta_a <- param[1:ncol(B_des_a_m)]
beta_c <- param[(1+ncol(B_des_a_m)):(ncol(B_des_d_m)+ncol(B_des_a_m))]
beta_e <- param[(1+ncol(B_des_a_m)+ncol(B_des_d_m)):(ncol(B_des_e_m)+ncol(B_des_d_m)+ncol(B_des_a_m))]
#num_beta_c <- ncol(B_des_d_m)
#num_beta_a <- ncol(B_des_a_m)
#num_beta_e <- ncol(B_des_e_m)
d <- .Call('gr_AtDtEt_esp_c', beta_a, beta_c, beta_e, pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_d_m, B_des_d_d, B_des_e_m, B_des_e_d)
return(d)
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/AtCtEt_esp.R |
AtCtEtp <-
function(data_m, data_d, knot_a=8, knot_c=8, knot_e=8, eps = 0.1, mod=c('d','d','d'), robust=0)
{
if((is.vector(mod)==FALSE) | (length(mod)!=3) )
{stop('The \'mod\' argument must be a vector of length 3.')}
if(!(mod[1] %in% c('d','c','l')))
{stop('The \'mod\' argument for the A component must be \'d\'(dynamic), \'c\'(constant) or \'l\'(linear).')}
if(!(mod[2] %in% c('d','c','l')))
{stop('The \'mod\' argument for the C component must be \'d\'(dynamic), \'c\'(constant) or \'l\'(linear).')}
if(!(mod[3] %in% c('d','c','l')))
{stop('The \'mod\' argument for the E component must be \'d\'(dynamic), \'c\'(constant) or \'l\'(linear).')}
if((knot_a<3)|(knot_c<3)|(knot_e<3)|(knot_a>40)|(knot_c>40)|(knot_e>40))
{stop('The number of knots must be an integer within [3, 40].')}
num_m <- nrow(data_m)*2
num_d <- nrow(data_d)*2
pheno_m <- matrix(NA, num_m, 1)
pheno_d <- matrix(NA, num_d, 1)
pheno_m[seq(from=1, to=num_m, by=2),1] <- data_m[,1]
pheno_m[seq(from=2, to=num_m, by=2),1] <- data_m[,2]
pheno_d[seq(from=1, to=num_d, by=2),1] <- data_d[,1]
pheno_d[seq(from=2, to=num_d, by=2),1] <- data_d[,2]
T_m <- rep(data_m[,3],each=2)
T_d <- rep(data_d[,3],each=2)
mag <- var(pheno_m)
init_max <- log(mag)
init_min <- log(mag) - abs(log(mag))*1.2
limit <- 12
limit_e <- 10
# low_var <- mag/100000
low_var <- 1e-06
upp_var <- 100
var_ran_up <- 3
var_ran_lo <- 1
eps <- eps*2
order <- 3
penal_a <- 2
penal_c <- 2
penal_e <- 2
if(mod[1]=='c')
{penal_a <- 1}
if(mod[2]=='c')
{penal_c <- 1}
if(mod[3]=='c')
{penal_e <- 1}
t_int <- max(c(T_m,T_d))-min(c(T_m,T_d))
l_m_1 <- (max(c(T_m,T_d))-T_m)/t_int
l_m_2 <- (T_m-min(c(T_m,T_d)))/t_int
l_d_1 <- (max(c(T_m,T_d))-T_d)/t_int
l_d_2 <- (T_d-min(c(T_m,T_d)))/t_int
if(mod[1]=='d')
{
delta_a <- matrix(0, knot_a+order-2-penal_a, knot_a+order-2)
for(i in 1:nrow(delta_a))
{
if(penal_a==2)
{delta_a[i, i:(i+2)] <- c(1,-2,1)}else{
delta_a[i, i:(i+1)] <- c(1,-1)
}
}
D_a <- t(delta_a)%*%delta_a
knots_a <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=knot_a)
interval_a <- knots_a[2] - knots_a[1]
knots_a <- c(c(min(T_m, T_d)-interval_a*2,min(T_m, T_d)-interval_a), knots_a)
knots_a <- c(knots_a, c(max(T_m, T_d)+interval_a,max(T_m, T_d)+interval_a*2))
B_des_a_m <- splineDesign(knots_a, x=T_m, ord=order)
B_des_a_d <- splineDesign(knots_a, x=T_d, ord=order)
ei_a <- eigen(D_a)
B_des_a_m <- B_des_a_m%*%ei_a$vectors
B_des_a_d <- B_des_a_d%*%ei_a$vectors
D_a <- diag(c(ei_a$values[1:(length(ei_a$values)-2)],0,0))
}else{
if(mod[1]=='l')
{
D_a <- matrix(0,2,2)
B_des_a_m <- matrix(NA, num_m, 2)
B_des_a_m[,1] <- l_m_1
B_des_a_m[,2] <- l_m_2
B_des_a_d <- matrix(NA, num_d, 2)
B_des_a_d[,1] <- l_d_1
B_des_a_d[,2] <- l_d_2
knots_a <- c(min(T_m, T_d),max(T_m, T_d))
}else{
D_a <- matrix(0,1,1)
B_des_a_m <- matrix(1, num_m, 1)
B_des_a_d <- matrix(1, num_d, 1)
knots_a <- c(min(T_m, T_d))
}
}
if(mod[2]=='d')
{
delta_c <- matrix(0, knot_c+order-2-penal_c, knot_c+order-2)
for(i in 1:nrow(delta_c))
{
if(penal_c==2)
{delta_c[i, i:(i+2)] <- c(1,-2,1)}else{
delta_c[i, i:(i+1)] <- c(1,-1)
}
}
D_c <- t(delta_c)%*%delta_c
knots_c <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=knot_c)
interval_c <- knots_c[2] - knots_c[1]
knots_c <- c(c(min(T_m, T_d)-interval_c*2,min(T_m, T_d)-interval_c), knots_c)
knots_c <- c(knots_c, c(max(T_m, T_d)+interval_c,max(T_m, T_d)+interval_c*2))
B_des_c_m <- splineDesign(knots_c, x=T_m, ord=order)
B_des_c_d <- splineDesign(knots_c, x=T_d, ord=order)
ei_c <- eigen(D_c)
B_des_c_m <- B_des_c_m%*%ei_c$vectors
B_des_c_d <- B_des_c_d%*%ei_c$vectors
D_c <- diag(c(ei_c$values[1:(length(ei_c$values)-2)],0,0))
}else{
if(mod[2]=='l')
{
B_des_c_m <- matrix(NA, num_m, 2)
B_des_c_m[,1] <- l_m_1
B_des_c_m[,2] <- l_m_2
B_des_c_d <- matrix(NA, num_d, 2)
B_des_c_d[,1] <- l_d_1
B_des_c_d[,2] <- l_d_2
D_c <- matrix(0,2,2)
knots_c <- c(min(T_m, T_d),max(T_m, T_d))
}else{
B_des_c_m <- matrix(1, num_m, 1)
B_des_c_d <- matrix(1, num_d, 1)
D_c <- matrix(0,1,1)
knots_c <- c(min(T_m, T_d))
}
}
if(mod[3]=='d')
{
delta_e <- matrix(0, knot_e+order-2-penal_e, knot_e+order-2)
for(i in 1:nrow(delta_e))
{
if(penal_e==2)
{delta_e[i, i:(i+2)] <- c(1,-2,1)}else{
delta_e[i, i:(i+1)] <- c(1,-1)
}
}
D_e <- t(delta_e)%*%delta_e
knots_e <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=knot_e)
interval_e <- knots_e[2] - knots_e[1]
knots_e <- c(c(min(T_m, T_d)-interval_e*2,min(T_m, T_d)-interval_e), knots_e)
knots_e <- c(knots_e, c(max(T_m, T_d)+interval_e,max(T_m, T_d)+interval_e*2))
B_des_e_m <- splineDesign(knots_e, x=T_m, ord=order)
B_des_e_d <- splineDesign(knots_e, x=T_d, ord=order)
ei_e <- eigen(D_e)
B_des_e_m <- B_des_e_m%*%ei_e$vectors
B_des_e_d <- B_des_e_d%*%ei_e$vectors
D_e <- diag(c(ei_e$values[1:(length(ei_e$values)-2)],0,0))
}else{
if(mod[3]=='l')
{
B_des_e_m <- matrix(NA, num_m, 2)
B_des_e_m[,1] <- l_m_1
B_des_e_m[,2] <- l_m_2
B_des_e_d <- matrix(NA, num_d, 2)
B_des_e_d[,1] <- l_d_1
B_des_e_d[,2] <- l_d_2
D_e <- matrix(0,2,2)
knots_e <- c(min(T_m, T_d),max(T_m, T_d))
}else{
B_des_e_m <- matrix(1, num_m, 1)
B_des_e_d <- matrix(1, num_d, 1)
D_e <- matrix(0,1,1)
knots_e <- c(min(T_m, T_d))
}
}
n_a <- ncol(B_des_a_m)
n_c <- ncol(B_des_c_m)
n_e <- ncol(B_des_e_m)
lower <- 0
#if(is.na(init_var[1]))
#{
var_b_a <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))
#}else{
# if((init_var[1]<20)&(init_var[1]>low_var))
# {var_b_a <- init_var[1]}else{var_b_a <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))}
#}
if(mod[1] %in% c('l','c'))
{
var_b_a <- lower
n_a <- ifelse(mod[1]=='l',2,1)
}
#if(is.na(init_var[2]))
#{
var_b_c <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))
#}else{
# if((init_var[2]<20)&(init_var[2]>low_var))
# {var_b_c <- init_var[2]}else{var_b_c <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))}
#}
if(mod[2] %in% c('l','c'))
{
var_b_c <- lower
n_c <- ifelse(mod[2]=='l',2,1)
}
#if(is.na(init_var[3]))
#{
var_b_e <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))
#}else{
# if((init_var[3]<20)&(init_var[3]>low_var))
# {var_b_e <- init_var[3]}else{var_b_e <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))}
#}
if(mod[3] %in% c('l','c'))
{
var_b_e <- lower
n_e <- ifelse(mod[3]=='l',2,1)
}
beta_a <- runif(n_a,min=-0.2,max=0.2)
beta_c <- runif(n_c,min=-0.2,max=0.2)
beta_e <- runif(n_e,min=-0.2,max=0.2)
lik <- 100000
lik_pre <- 200000
liks <- c()
betas <- matrix(0,0,n_a+n_c+n_e)
vars <- matrix(0,0,3)
hessians <- matrix(0,0,9)
if((mod[1]!='d')&(mod[2]!='d')&(mod[3]!='d'))
{
low_a <- -15
upp_a <- 15
low_c <- -15
upp_c <- 15
low_e <- -8
upp_e <- 10
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=0, var_b_c=0, var_b_e=0, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),rep(low_e,n_e)), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),rep(upp_e,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
lik <- loglik_AtCtEt_epsp(c(0,0,0), pheno_m=pheno_m, pheno_d=pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, beta_a=beta_a, D_a=D_a, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, beta_c=beta_c, D_c=D_c, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_e=beta_e, D_e=D_e)
AtCtEtp_model <- list(D_a = D_a, D_c = D_c, D_e = D_e, pheno_m = pheno_m, pheno_d = pheno_d, T_m = T_m, T_d = T_d, knot_a=knots_a, knot_c=knots_c, knot_e=knots_e, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, con=result$convergence, lik=lik/2, iter=lik/2, var_b_a=lower, var_b_c=lower, var_b_e=lower, mod=mod)
}else{
while(abs(lik-lik_pre)>eps)
{
lik_pre <- lik
if((mod[1]=='d')&(mod[2]=='d')&(mod[3]=='d'))
{
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=var_b_a, var_b_c=var_b_c, var_b_e=var_b_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep((-1)*limit,n_a+n_c),rep((-1)*limit_e,n_e)), upper = rep(limit,n_a+n_c+n_e), method = "L-BFGS-B", control=list(maxit = 3000))
betas <- rbind(betas, result$par)
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e = beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = rep(low_var,3), upper = rep(upp_var,3), method = "L-BFGS-B", control=list(maxit = 3000), hessian=TRUE)
}else
{
v_a_t <- var_b_a
v_c_t <- var_b_c
v_e_t <- var_b_e
if(mod[1]!='d')
{v_a_t <- 0}
if(mod[2]!='d')
{v_c_t <- 0}
if(mod[3]!='d')
{v_e_t <- 0}
low_a <- (-1)*limit
upp_a <- limit
low_c <- (-1)*limit
upp_c <- limit
low_e <- (-1)*limit_e
upp_e <- limit_e
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=v_a_t, var_b_c=v_c_t, var_b_e=v_e_t, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),rep(low_e,n_e)), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),rep(upp_e,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
betas <- rbind(betas,c(beta_a,beta_c,beta_e))
low_a <- low_c <- low_e <- low_var
upp_a <- upp_c <- upp_e <- upp_var
if((mod[1]=='l')|(mod[1]=='c'))
{low_a <- upp_a <- lower}
if((mod[2]=='l')|(mod[2]=='c'))
{low_c <- upp_c <- lower}
if((mod[3]=='l')|(mod[3]=='c'))
{low_e <- upp_e <- lower}
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(low_a, low_c, low_e), upper = c(upp_a, upp_c, upp_e), method = "L-BFGS-B", control=list(maxit = 3000), hessian=TRUE)
}
vars <- rbind(vars, result$par)
hessians <- rbind(hessians, c(result$hessian))
var_b_a <- result$par[1]
var_b_c <- result$par[2]
var_b_e <- result$par[3]
lik <- result$value
liks <- c(liks, result$value)
}
min_i <- match(min(liks), liks)
if(robust>0)
{
for(rob in 1:ceiling(robust))
{
lik <- 100000
lik_pre <- 200000
liks_r <- c()
betas_r <- matrix(0,0,n_a+n_c+n_e)
vars_r <- matrix(0,0,3)
hessians_r <- matrix(0,0,9)
beta_a <- runif(n_a,min=init_min,max=init_max)
beta_c <- runif(n_c,min=init_min,max=init_max)
beta_e <- runif(n_e,min=init_min,max=init_max)
if(var_b_a!=0)
{var_b_a <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))}
if(var_b_c!=0)
{var_b_c <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))}
if(var_b_e!=0)
{var_b_e <- runif(1,min=var_ran_lo*abs(log(mag)),max=var_ran_up*abs(log(mag)))}
while(abs(lik-lik_pre)>eps)
{
lik_pre <- lik
if((mod[1]=='d')&(mod[2]=='d')&(mod[3]=='d'))
{
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=var_b_a, var_b_c=var_b_c, var_b_e=var_b_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep((-1)*limit,n_a+n_c),rep((-1)*limit_e,n_e)), upper = rep(limit,n_a+n_c+n_e), method = "L-BFGS-B", control=list(maxit = 3000))
betas_r <- rbind(betas_r, result$par)
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e = beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = rep(low_var,3), upper = rep(upp_var,3), method = "L-BFGS-B", control=list(maxit = 3000), hessian=TRUE)
}else
{
v_a_t <- var_b_a
v_c_t <- var_b_c
v_e_t <- var_b_e
if(mod[1]!='d')
{v_a_t <- 0}
if(mod[2]!='d')
{v_c_t <- 0}
if(mod[3]!='d')
{v_e_t <- 0}
low_a <- (-1)*limit
upp_a <- limit
low_c <- (-1)*limit
upp_c <- limit
low_e <- (-1)*limit_e
upp_e <- limit_e
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=v_a_t, var_b_c=v_c_t, var_b_e=v_e_t, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),rep(low_e,n_e)), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),rep(upp_e,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
betas_r <- rbind(betas_r,c(beta_a,beta_c,beta_e))
low_a <- low_c <- low_e <- low_var
upp_a <- upp_c <- upp_e <- upp_var
if((mod[1]=='l')|(mod[1]=='c'))
{low_a <- upp_a <- lower}
if((mod[2]=='l')|(mod[2]=='c'))
{low_c <- upp_c <- lower}
if((mod[3]=='l')|(mod[3]=='c'))
{low_e <- upp_e <- lower}
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(low_a, low_c, low_e), upper = c(upp_a, upp_c, upp_e), method = "L-BFGS-B", control=list(maxit = 3000), hessian=TRUE)
}
vars_r <- rbind(vars_r, result$par)
hessians_r <- rbind(hessians_r, c(result$hessian))
var_b_a <- result$par[1]
var_b_c <- result$par[2]
var_b_e <- result$par[3]
lik <- result$value
liks_r <- c(liks_r, result$value)
}
if(min(liks_r)<min(liks))
{
liks <- liks_r
vars <- vars_r
betas <- betas_r
hessians <- hessians_r
}
}
min_i <- match(min(liks), liks)
}
if((mod[1]=='d')&(mod[2]=='d')&(mod[3]=='d'))
{
result <- optim(betas[min_i,], loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = matrix(pheno_m), pheno_d = matrix(pheno_d), B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=vars[min_i,1], var_b_c=vars[min_i,2], var_b_e=vars[min_i,3], D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep((-1)*limit,n_a+n_c),rep((-1)*limit_e,n_e)), upper = rep(limit,n_c+n_a+n_e), method = "L-BFGS-B", control=list(maxit = 3000))
}else{
v_a_t <- vars[min_i,1]
v_c_t <- vars[min_i,2]
v_e_t <- vars[min_i,3]
if(mod[1]!='d')
{v_a_t <- lower}
if(mod[2]!='d')
{v_c_t <- lower}
if(mod[3]!='d')
{v_e_t <- lower}
low_a <- (-1)*limit
upp_a <- limit
low_c <- (-1)*limit
upp_c <- limit
low_e <- (-1)*limit_e
upp_e <- limit_e
result <- optim(betas[min_i,], loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=v_a_t, var_b_c=v_c_t, var_b_e=v_e_t, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),rep(low_e,n_e)), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),rep(upp_e,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
}
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
if(mod[1]=='d')
{
D_a <- t(delta_a)%*%delta_a
beta_a <- ei_a$vectors%*%beta_a
}
if(mod[2]=='d')
{
D_c <- t(delta_c)%*%delta_c
beta_c <- ei_c$vectors%*%beta_c
}
if(mod[3]=='d')
{
D_e <- t(delta_e)%*%delta_e
beta_e <- ei_e$vectors%*%beta_e
}
AtCtEtp_model <- list(D_a = D_a, D_c = D_c, D_e=D_e, pheno_m = pheno_m, pheno_d = pheno_d, T_m = T_m, T_d = T_d, knot_a=knots_a, knot_c=knots_c, knot_e=knots_e, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, con=result$convergence, lik=min(liks)/2, iter=(liks)/2, var_b_a=vars[min_i,1], var_b_c=vars[min_i,2], var_b_e=vars[min_i,3], mod=mod, hessian = matrix(hessians[min_i,],3,3)/2)
}
class(AtCtEtp_model) <- 'AtCtEtp_model'
#print('Estimates of beta_a:')
#print(beta_a)
#print('Estimates of beta_c:')
#print(beta_c)
#print('Estimates of beta_e:')
#print(beta_e)
return(invisible(AtCtEtp_model))
} | /scratch/gouwar.j/cran-all/cranData/ACEt/R/AtCtEtp.R |
AtCtEtp_2 <-
function(data_m, data_d, knot_a=8, knot_c=8, knot_e=8, eps = 0.1, mod=c('d','d','d'), robust=2)
{
if((is.vector(mod)==FALSE) | (length(mod)!=3) )
{stop('The model parameter must be a vector of length 3.')}
if(!(mod[1] %in% c('d','c','l')))
{stop('The \'mod\' parameter for the A component must be \'d\'(dynamic), \'c\'(constant) or \'l\'(linear).')}
if(!(mod[2] %in% c('d','c','l')))
{stop('The \'mod\' parameter for the C component must be \'d\'(dynamic), \'c\'(constant) or \'l\'(linear).')}
if(!(mod[3] %in% c('d','c','l')))
{stop('The \'mod\' parameter for the E component must be \'d\'(dynamic), \'c\'(constant) or \'l\'(linear).')}
if((knot_a<3)|(knot_c<3)|(knot_e<3))
{stop('The number of knots must be larger than 2.')}
num_m <- nrow(data_m)*2
num_d <- nrow(data_d)*2
pheno_m <- matrix(NA, num_m, 1)
pheno_d <- matrix(NA, num_d, 1)
pheno_m[seq(from=1, to=num_m, by=2),1] <- data_m[,1]
pheno_m[seq(from=2, to=num_m, by=2),1] <- data_m[,2]
pheno_d[seq(from=1, to=num_d, by=2),1] <- data_d[,1]
pheno_d[seq(from=2, to=num_d, by=2),1] <- data_d[,2]
T_m <- rep(data_m[,3],each=2)
T_d <- rep(data_d[,3],each=2)
mag <- var(pheno_m)
init_max <- log(mag)
init_min <- log(mag) - abs(log(mag))*1.2
limit <- 12
limit_e <- 10
low_var <- 1e-06
upp_var <- 100
eps <- eps*2
order <- 3
penal_a <- 2
penal_c <- 2
penal_e <- 2
if(mod[1]=='c')
{penal_a <- 1}
if(mod[2]=='c')
{penal_c <- 1}
if(mod[3]=='c')
{penal_e <- 1}
t_int <- max(c(T_m,T_d))-min(c(T_m,T_d))
delta_a <- matrix(0, knot_a+order-2-penal_a, knot_a+order-2)
for(i in 1:nrow(delta_a))
{
if(penal_a==2)
{delta_a[i, i:(i+2)] <- c(1,-2,1)}else{
delta_a[i, i:(i+1)] <- c(1,-1)
}
}
D_a <- t(delta_a)%*%delta_a
knots_a <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=knot_a)
interval_a <- knots_a[2] - knots_a[1]
knots_a <- c(c(min(T_m, T_d)-interval_a*2,min(T_m, T_d)-interval_a), knots_a)
knots_a <- c(knots_a, c(max(T_m, T_d)+interval_a,max(T_m, T_d)+interval_a*2))
B_des_a_m <- splineDesign(knots_a, x=T_m, ord=order)
B_des_a_d <- splineDesign(knots_a, x=T_d, ord=order)
ei_a <- eigen(D_a)
B_des_a_m <- B_des_a_m%*%ei_a$vectors
B_des_a_d <- B_des_a_d%*%ei_a$vectors
D_a <- diag(c(ei_a$values[1:(length(ei_a$values)-2)],0,0))
if(mod[1]=='l')
{
D_a <- matrix(0,2,2)
B_des_a_m <- B_des_a_m[,(ncol(B_des_a_m)-1):ncol(B_des_a_m)]
B_des_a_d <- B_des_a_d[,(ncol(B_des_a_d)-1):ncol(B_des_a_d)]
# knots_a <- c(min(T_m, T_d),max(T_m, T_d))
}
if(mod[1]=='c')
{
D_a <- matrix(0,1,1)
B_des_a_m <- matrix(1, num_m, 1)
B_des_a_d <- matrix(1, num_d, 1)
# knots_a <- c(min(T_m, T_d))
}
delta_c <- matrix(0, knot_c+order-2-penal_c, knot_c+order-2)
for(i in 1:nrow(delta_c))
{
if(penal_c==2)
{delta_c[i, i:(i+2)] <- c(1,-2,1)}else{
delta_c[i, i:(i+1)] <- c(1,-1)
}
}
D_c <- t(delta_c)%*%delta_c
knots_c <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=knot_c)
interval_c <- knots_c[2] - knots_c[1]
knots_c <- c(c(min(T_m, T_d)-interval_c*2,min(T_m, T_d)-interval_c), knots_c)
knots_c <- c(knots_c, c(max(T_m, T_d)+interval_c,max(T_m, T_d)+interval_c*2))
B_des_c_m <- splineDesign(knots_c, x=T_m, ord=order)
B_des_c_d <- splineDesign(knots_c, x=T_d, ord=order)
ei_c <- eigen(D_c)
B_des_c_m <- B_des_c_m%*%ei_c$vectors
B_des_c_d <- B_des_c_d%*%ei_c$vectors
D_c <- diag(c(ei_c$values[1:(length(ei_c$values)-2)],0,0))
if(mod[2]=='l')
{
D_c <- matrix(0,2,2)
B_des_c_m <- B_des_c_m[,(ncol(B_des_c_m)-1):ncol(B_des_c_m)]
B_des_c_d <- B_des_c_d[,(ncol(B_des_c_d)-1):ncol(B_des_c_d)]
# knots_c <- c(min(T_m, T_d),max(T_m, T_d))
}
if(mod[2]=='c')
{
B_des_c_m <- matrix(1, num_m, 1)
B_des_c_d <- matrix(1, num_d, 1)
D_c <- matrix(0,1,1)
# knots_c <- c(min(T_m, T_d))
}
delta_e <- matrix(0, knot_e+order-2-penal_e, knot_e+order-2)
for(i in 1:nrow(delta_e))
{
if(penal_e==2)
{delta_e[i, i:(i+2)] <- c(1,-2,1)}else{
delta_e[i, i:(i+1)] <- c(1,-1)
}
}
D_e <- t(delta_e)%*%delta_e
knots_e <- seq(from=min(T_m, T_d), to=max(T_m, T_d), length.out=knot_e)
interval_e <- knots_e[2] - knots_e[1]
knots_e <- c(c(min(T_m, T_d)-interval_e*2,min(T_m, T_d)-interval_e), knots_e)
knots_e <- c(knots_e, c(max(T_m, T_d)+interval_e,max(T_m, T_d)+interval_e*2))
B_des_e_m <- splineDesign(knots_e, x=T_m, ord=order)
B_des_e_d <- splineDesign(knots_e, x=T_d, ord=order)
ei_e <- eigen(D_e)
B_des_e_m <- B_des_e_m%*%ei_e$vectors
B_des_e_d <- B_des_e_d%*%ei_e$vectors
D_e <- diag(c(ei_e$values[1:(length(ei_e$values)-2)],0,0))
if(mod[3]=='l')
{
D_e <- matrix(0,2,2)
B_des_e_m <- B_des_e_m[,(ncol(B_des_e_m)-1):ncol(B_des_e_m)]
B_des_e_d <- B_des_e_d[,(ncol(B_des_e_d)-1):ncol(B_des_e_d)]
# knots_e <- c(min(T_m, T_d),max(T_m, T_d))
}
if(mod[3]=='c')
{
B_des_e_m <- matrix(1, num_m, 1)
B_des_e_d <- matrix(1, num_d, 1)
D_e <- matrix(0,1,1)
# knots_e <- c(min(T_m, T_d))
}
n_a <- ncol(B_des_a_m)
n_c <- ncol(B_des_c_m)
n_e <- ncol(B_des_e_m)
lower <- 0
var_b_a <- runif(1,min=0.5*abs(log(mag)),max=1.5*abs(log(mag)))
if(mod[1] %in% c('l','c'))
{
var_b_a <- lower
n_a <- ifelse(mod[1]=='l',2,1)
}
var_b_c <- runif(1,min=0.5*abs(log(mag)),max=1.5*abs(log(mag)))
if(mod[2] %in% c('l','c'))
{
var_b_c <- lower
n_c <- ifelse(mod[2]=='l',2,1)
}
var_b_e <- runif(1,min=0.5*abs(log(mag)),max=1.5*abs(log(mag)))
if(mod[3] %in% c('l','c'))
{
var_b_e <- lower
n_e <- ifelse(mod[3]=='l',2,1)
}
beta_a <- runif(n_a,min=-1,max=1)
beta_c <- runif(n_c,min=-1,max=1)
beta_e <- runif(n_e,min=-1,max=1)
lik <- 100000
lik_pre <- 200000
liks <- c()
betas <- matrix(0,0,n_a+n_c+n_e)
vars <- matrix(0,0,3)
if((mod[1]!='d')&(mod[2]!='d')&(mod[3]!='d'))
{
low_a <- -15
upp_a <- 15
low_c <- -15
upp_c <- 15
low_e <- -9
upp_e <- 9
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=0, var_b_c=0, var_b_e=0, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),rep(low_e,n_e)), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),rep(upp_e,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
if(robust>0)
{
for(j in 1:ceiling(robust))
{
init <- runif(n_a+n_c+n_e, min=init_min, max=init_max)
result_r <- optim(init, loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=0, var_b_c=0, var_b_e=0, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),rep(low_e,n_e)), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),rep(upp_e,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
if(result_r$value < result$value)
{
result <- result_r
}
}
}
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
lik <- loglik_AtCtEt_epsp(c(0,0,0), pheno_m=pheno_m, pheno_d=pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, beta_a=beta_a, D_a=D_a, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, beta_c=beta_c, D_c=D_c, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_e=beta_e, D_e=D_e)
AtCtEtp_model <- list(D_a = D_a, D_c = D_c, D_e = D_e, pheno_m = pheno_m, pheno_d = pheno_d, T_m = T_m, T_d = T_d, knot_a=knots_a, knot_c=knots_c, knot_e=knots_e, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, con=result$convergence, lik=lik/2, iter=lik/2, var_b_a=lower, var_b_c=lower, var_b_e=lower, mod=mod, bf = lik/2)
}else{
while(abs(lik-lik_pre)>eps)
{
lik_pre <- lik
if((mod[1]=='d')&(mod[2]=='d')&(mod[3]=='d'))
{
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=var_b_a, var_b_c=var_b_c, var_b_e=var_b_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep((-1)*limit,n_a+n_c),rep((-1)*limit_e,n_e)), upper = c(rep(limit,n_a+n_c),rep(limit_e,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
betas <- rbind(betas, result$par)
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e = beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = rep(low_var,3), upper = rep(upp_var,3), method = "L-BFGS-B", control=list(maxit = 3000))
}else
{
v_a_t <- var_b_a
v_c_t <- var_b_c
v_e_t <- var_b_e
if(mod[1]!='d')
{v_a_t <- 0}
if(mod[2]!='d')
{v_c_t <- 0}
if(mod[3]!='d')
{v_e_t <- 0}
low_a <- (-1)*limit
upp_a <- limit
low_c <- (-1)*limit
upp_c <- limit
low_e <- rep((-1)*limit_e,n_e)
upp_e <- rep(limit_e,n_e)
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=v_a_t, var_b_c=v_c_t, var_b_e=v_e_t, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),low_e), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),upp_e), method = "L-BFGS-B", control=list(maxit = 3000))
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
betas <- rbind(betas,c(beta_a,beta_c,beta_e))
low_a <- low_c <- low_var
low_e <- low_var
upp_a <- upp_c <- upp_e <- upp_var
if((mod[1]=='l')|(mod[1]=='c'))
{low_a <- upp_a <- lower}
if((mod[2]=='l')|(mod[2]=='c'))
{low_c <- upp_c <- lower}
if((mod[3]=='l')|(mod[3]=='c'))
{low_e <- upp_e <- lower}
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(low_a, low_c, low_e), upper = c(upp_a, upp_c, upp_e), method = "L-BFGS-B", control=list(maxit = 3000))
}
vars <- rbind(vars, result$par)
var_b_a <- result$par[1]
var_b_c <- result$par[2]
var_b_e <- result$par[3]
lik <- result$value
liks <- c(liks, result$value)
}
min_i <- match(min(liks), liks)
if(robust>0)
{
for(rob in 1:ceiling(robust))
{
lik <- 100000
lik_pre <- 200000
liks_r <- c()
betas_r <- matrix(0,0,n_a+n_c+n_e)
vars_r <- matrix(0,0,3)
beta_a <- runif(n_a,min=init_min,max=init_max)
beta_c <- runif(n_c,min=init_min,max=init_max)
beta_e <- runif(n_e,min=init_min,max=init_max)
if(var_b_a!=0)
{var_b_a <- runif(1,min=0.5*abs(log(mag)),max=1.5*abs(log(mag)))}
if(var_b_c!=0)
{var_b_c <- runif(1,min=0.5*abs(log(mag)),max=1.5*abs(log(mag)))}
if(var_b_e!=0)
{var_b_e <- runif(1,min=0.5*abs(log(mag)),max=1.5*abs(log(mag)))}
while(abs(lik-lik_pre)>eps)
{
lik_pre <- lik
if((mod[1]=='d')&(mod[2]=='d')&(mod[3]=='d'))
{
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=var_b_a, var_b_c=var_b_c, var_b_e=var_b_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(-12,n_a+n_c),rep(-9,n_e)), upper = c(rep(12,n_a+n_c),rep(9,n_e)), method = "L-BFGS-B", control=list(maxit = 3000))
betas_r <- rbind(betas_r, result$par)
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e = beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = rep(low_var,3), upper = rep(upp_var,3), method = "L-BFGS-B", control=list(maxit = 3000))
}else
{
v_a_t <- var_b_a
v_c_t <- var_b_c
v_e_t <- var_b_e
if(mod[1]!='d')
{v_a_t <- 0}
if(mod[2]!='d')
{v_c_t <- 0}
if(mod[3]!='d')
{v_e_t <- 0}
low_a <- (-1)*limit
upp_a <- limit
low_c <- (-1)*limit
upp_c <- limit
low_e <- rep((-1)*limit_e,n_e)
upp_e <- rep(limit_e,n_e)
result <- optim(c(beta_a,beta_c,beta_e), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=v_a_t, var_b_c=v_c_t, var_b_e=v_e_t, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),low_e), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),upp_e), method = "L-BFGS-B", control=list(maxit = 3000))
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
betas_r <- rbind(betas_r,c(beta_a,beta_c,beta_e))
low_a <- low_c <- low_var
low_e <- low_var
upp_a <- upp_c <- upp_e <- upp_var
if((mod[1]=='l')|(mod[1]=='c'))
{low_a <- upp_a <- lower}
if((mod[2]=='l')|(mod[2]=='c'))
{low_c <- upp_c <- lower}
if((mod[3]=='l')|(mod[3]=='c'))
{low_e <- upp_e <- lower}
result <- optim(c(var_b_a,var_b_c,var_b_e), loglik_AtCtEt_epsp, gr_AtCtEt_epsp, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(low_a, low_c, low_e), upper = c(upp_a, upp_c, upp_e), method = "L-BFGS-B", control=list(maxit = 3000))
}
vars_r <- rbind(vars_r, result$par)
var_b_a <- result$par[1]
var_b_c <- result$par[2]
var_b_e <- result$par[3]
lik <- result$value
liks_r <- c(liks_r, result$value)
}
if(min(liks_r)<min(liks))
{
liks <- liks_r
vars <- vars_r
betas <- betas_r
}
}
min_i <- match(min(liks), liks)
}
if((mod[1]=='d')&(mod[2]=='d')&(mod[3]=='d'))
{
result <- optim(betas[min_i,], loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = matrix(pheno_m), pheno_d = matrix(pheno_d), B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=vars[min_i,1], var_b_c=vars[min_i,2], var_b_e=vars[min_i,3], D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(-12,n_a+n_c),rep(-10,n_e)), upper = c(rep(12,n_a+n_c),rep(10,n_e)), method = "L-BFGS-B", control=list(maxit = 3000), hessian = TRUE)
}else{
v_a_t <- vars[min_i,1]
v_c_t <- vars[min_i,2]
v_e_t <- vars[min_i,3]
if(mod[1]!='d')
{v_a_t <- lower}
if(mod[2]!='d')
{v_c_t <- lower}
if(mod[3]!='d')
{v_e_t <- lower}
low_a <- (-1)*limit
upp_a <- limit
low_c <- (-1)*limit
upp_c <- limit
low_e <- rep((-1)*limit_e,n_e)
upp_e <- rep(limit_e,n_e)
result <- optim(betas[min_i,], loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = pheno_m, pheno_d = pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=v_a_t, var_b_c=v_c_t, var_b_e=v_e_t, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),low_e), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),upp_e), method = "L-BFGS-B", control=list(maxit = 3000), hessian = TRUE)
}
beta_a <- result$par[1:n_a]
beta_c <- result$par[(1+n_a):(n_a+n_c)]
beta_e <- result$par[(1+n_a+n_c):(n_a+n_c+n_e)]
lik_a <- lik_c <- lik_e <- 0
if(mod[1]=='d')
{
D_a <- t(delta_a)%*%delta_a
#beta_a <- ei_a$vectors%*%beta_a
lik_a <- log(prod(ei_a$values[1:(length(ei_a$values)-2)]))
}
if(mod[2]=='d')
{
D_c <- t(delta_c)%*%delta_c
#beta_c <- ei_c$vectors%*%beta_c
lik_c <- log(prod(ei_c$values[1:(length(ei_c$values)-2)]))
}
if(mod[3]=='d')
{
D_e <- t(delta_e)%*%delta_e
#beta_e <- ei_e$vectors%*%beta_e
lik_e <- log(prod(ei_e$values[1:(length(ei_e$values)-2)]))
}
AtCtEtp_model <- list(D_a = D_a, D_c = D_c, D_e=D_e, pheno_m = pheno_m, pheno_d = pheno_d, T_m = T_m, T_d = T_d, knot_a=knots_a, knot_c=knots_c, knot_e=knots_e, beta_a=beta_a, beta_c=beta_c, beta_e=beta_e, con=result$convergence, lik=(min(liks)-lik_a-lik_c-lik_e)/2, iter=(liks)/2, var_b_a=vars[min_i,1], var_b_c=vars[min_i,2], var_b_e=vars[min_i,3], mod=mod, hessian = result$hessian)
}
class(AtCtEtp_model) <- 'AtCtEtp_model'
#print('Estimates of beta_a:')
#print(beta_a)
#print('Estimates of beta_c:')
#print(beta_c)
#print('Estimates of beta_e:')
#print(beta_e)
#print(vars)
#print(betas)
return(invisible(AtCtEtp_model))
} | /scratch/gouwar.j/cran-all/cranData/ACEt/R/AtCtEtp_2.R |
AtDtEt <-
function(data_m, data_d, mod = c('d','d','d'), knot_a=5, knot_d=5, knot_e=5, loc = c('e','e','e'), boot=FALSE, num_b = 100, init = rep(0,3), robust = 0)
{
pheno_m <- c(t(data_m[,1:2]))
pheno_d <- c(t(data_d[,1:2]))
T_m <- rep(data_m[,3], each=2)
T_d <- rep(data_d[,3], each=2)
mag <- var(pheno_m)
init_max <- log(mag)
init_min <- log(mag) - abs(log(mag))*1.3
if((is.vector(mod)==FALSE) | (length(mod)!=3) )
{stop('The \'mod\' argument must be a vector of length 3.')}
if(!(mod[1] %in% c('d','c','n')))
{stop('The \'mod\' argument for the A component must be \'d\'(dynamic), \'c\'(constant) or \'n\'(NA).')}
if(!(mod[2] %in% c('d','c','n')))
{stop('The \'mod\' argument for the D component must be \'d\'(dynamic), \'c\'(constant) or \'n\'(NA).')}
if(!(mod[3] %in% c('d','c')))
{stop('The \'mod\' argument for the E component must be \'d\'(dynamic), \'c\'(constant).')}
if((is.vector(loc)==FALSE) | (length(loc)!=3) )
{stop('The \'loc\' argument must be a vector of length 3.')}
order <- 3
if(mod[1]=='d')
{
order <- 3
if(knot_a < 3)
{stop('The number of interior knots must be no less than 3.')}
}else
{
order <- 1
}
#knot <- 8
min_T <- min(T_m, T_d)
max_T <- max(T_m, T_d)
if(mod[1]=='d')
{
if(loc[1]=='e')
{
knots_a <- seq(from=min_T, to=max_T, length.out=knot_a)
interval_a <- knots_a[2] - knots_a[1]
knots_a <- c(c(min_T-interval_a*2,min_T-interval_a), knots_a)
knots_a <- c(knots_a, c(max_T+interval_a,max_T+interval_a*2))
}else{
knots_a <- quantile(unique(T_m,T_d), probs = seq(from=0,to=1,length.out=knot_a))
knots_a <- c(knots_a[1], knots_a[1], knots_a)
knots_a <- c(knots_a, knots_a[knot_a+2], knots_a[knot_a+2])
}
B_des_a_m <- splineDesign(knots_a, x=T_m, ord=order)
B_des_a_d <- splineDesign(knots_a, x=T_d, ord=order)
}else{
knots_a <- c(min_T,max_T)
B_des_a_m <- splineDesign(knots_a, x=T_m, ord=order)
B_des_a_d <- splineDesign(knots_a, x=T_d, ord=order)
}
if(mod[2]=='d')
{
order <- 3
if(knot_d < 3)
{stop('The number of interior knots must be no less than 3.')}
}else
{
order <- 1
}
if(mod[2]=='d')
{
if(loc[2]=='e')
{
knots_d <- seq(from=min_T, to=max_T, length.out=knot_d)
interval_c <- knots_d[2] - knots_d[1]
knots_d <- c(c(min_T-interval_c*2,min_T-interval_c), knots_d)
knots_d <- c(knots_d, c(max_T+interval_c,max_T+interval_c*2))
}else{
knots_d <- quantile(unique(T_m,T_d), probs = seq(from=0,to=1,length.out=knot_d))
knots_d <- c(knots_d[1], knots_d[1], knots_d)
knots_d <- c(knots_d, knots_d[knot_d+2], knots_d[knot_d+2])
}
B_des_d_m <- splineDesign(knots_d, x=T_m, ord=order)
B_des_d_d <- splineDesign(knots_d, x=T_d, ord=order)
}else{
knots_d <- c(min(T_m, T_d),max(T_m, T_d))
B_des_d_m <- splineDesign(knots_d, x=T_m, ord=order)
B_des_d_d <- splineDesign(knots_d, x=T_d, ord=order)
}
if(mod[3]=='d')
{
order <- 3
if(knot_e < 3)
{stop('The number of interior knots must be no less than 3.')}
}else
{
order <- 1
}
if(mod[3]=='d')
{
if(loc[3]=='e')
{
knots_e <- seq(from=min_T, to=max_T, length.out=knot_e)
interval_e <- knots_e[2] - knots_e[1]
knots_e <- c(c(min_T-interval_e*2,min_T-interval_e), knots_e)
knots_e <- c(knots_e, c(max_T+interval_e,max_T+interval_e*2))
}else{
knots_e <- quantile(unique(T_m,T_d), probs = seq(from=0,to=1,length.out=knot_e))
knots_e <- c(knots_e[1], knots_e[1], knots_e)
knots_e <- c(knots_e, knots_e[knot_e+2], knots_e[knot_e+2])
}
B_des_e_m <- splineDesign(knots_e, x=T_m, ord=order)
B_des_e_d <- splineDesign(knots_e, x=T_d, ord=order)
}else{
knots_e <- c(min(T_m, T_d),max(T_m, T_d))
B_des_e_m <- splineDesign(knots_e, x=T_m, ord=order)
B_des_e_d <- splineDesign(knots_e, x=T_d, ord=order)
}
n_d <- ncol(B_des_d_m)
n_a <- ncol(B_des_a_m)
n_e <- ncol(B_des_e_m)
init_a <- rep(init[1],n_a)
init_d <- rep(init[2],n_d)
init_e <- rep(init[3],n_e)
up_a <- up_d <- up_e <- 10
lo_a <- lo_d <- -50
lo_e <- -15
if(mod[1]=='n')
{
up_a <- lo_a <- -50
init_a <- -50
}
if(mod[1]=='c')
{
up_a <- 20
lo_a <- -50
}
if(mod[2]=='n')
{
up_d <- lo_d <- -50
init_d <- -50
}
if(mod[2]=='c')
{
up_d <- 20
lo_d <- -50
}
if(mod[3]=='c')
{
up_e <- 20
lo_e <- -10
}
result <- optim(c(init_a,init_d,init_e), loglik_AtDtEt_esp, gr_AtDtEt_esp, pheno_m = matrix(pheno_m), pheno_d = matrix(pheno_d), B_des_a_m = B_des_a_m, B_des_a_d = B_des_a_d, B_des_d_m = B_des_d_m, B_des_d_d = B_des_d_d, B_des_e_m = B_des_e_m, B_des_e_d = B_des_e_d,lower = c(rep(lo_a, n_a),rep(lo_d, n_d),rep(lo_e, n_e)), upper = c(rep(up_a, n_a),rep(up_d, n_d),rep(up_e, n_e)), method = "L-BFGS-B", hessian = TRUE, control=list(maxit = 3000))
if(robust>0)
{
for(i in 1:ceiling(robust))
{
init <- runif(n_a+n_d+n_e,min=init_min,max=init_max)
if(mod[1]!='n')
{init_a <- init[1:n_a]}
if(mod[2]!='n')
{init_d <- init[(n_a+1):(n_a+n_d)]}
init_e <- init[(n_a+n_d+1):(n_a+n_d+n_e)]
result_r <- optim(c(init_a,init_d,init_e), loglik_AtDtEt_esp, gr_AtDtEt_esp, pheno_m = matrix(pheno_m), pheno_d = matrix(pheno_d), B_des_a_m = B_des_a_m, B_des_a_d = B_des_a_d, B_des_d_m = B_des_d_m, B_des_d_d = B_des_d_d, B_des_e_m = B_des_e_m, B_des_e_d = B_des_e_d,lower = c(rep(lo_a, n_a),rep(lo_d, n_d),rep(lo_e, n_e)), upper = c(rep(up_a, n_a),rep(up_d, n_d),rep(up_e, n_e)), method = "L-BFGS-B", hessian = TRUE, control=list(maxit = 3000))
if(result_r$value < result$value)
{
result <- result_r
}
}
}
res_a <- result$par[1:n_a]
res_d <- result$par[(1+n_a):(n_d+n_a)]
res_e <- result$par[(1+n_a+n_d):(n_e+n_d+n_a)]
hes <- .Call('hessian_AtDtEt_esp_c', res_a, res_d, res_e, matrix(pheno_m), matrix(pheno_d), B_des_a_m, B_des_a_d, B_des_d_m, B_des_d_d, B_des_e_m, B_des_e_d)
n_t <- n_a+n_d+n_e
hes_m <- matrix(0, n_t, n_t)
k <- 1
for(i in 1:n_t)
{
for(j in i:n_t)
{
hes_m[i,j] <- hes[k]
k <- k + 1
}
}
hes_m_t <- t(hes_m)
diag(hes_m_t) <- 0
hes_m <- hes_m_t + hes_m
if(mod[1]=='n')
{res_a <- -Inf}
if(mod[2]=='n')
{res_d <- -Inf}
AtCtEt_model <- list(n_beta_a=n_a, n_beta_d=n_d, n_beta_e=n_e, beta_a=res_a, beta_d=res_d, beta_e=res_e, hessian_ap=result$hessian, hessian=hes_m, con=result$convergence, lik=result$value, knots_a =knots_a, knots_d = knots_d, knots_e = knots_e, min_t = min_T, max_t = max_T, boot = NULL )
class(AtCtEt_model) <- 'AtDtEt_model'
if(boot==TRUE)
{
boot_res <- AtDtEt_boot(res = AtCtEt_model, mod, data_m, data_d, knot_a, knot_d, knot_e, loc, B=num_b,alpha=0.05,m=500)
AtCtEt_model$boot <- boot_res
}
return(invisible(AtCtEt_model))
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/AtDtEt.R |
acetp_mcmc <- function(acetp, iter_num = 10000, sd = 0.1, burnin =1000)
{
if(!(class(acetp) %in% c('AtCtEp_model','AtEtp_model','AtCtEtp_model')))
{
stop('The first parameter must be an acetp object.')
}
if(burnin >= iter_num)
{
stop('The number of burnins must be smaller than the number of MCMC iterations.')
}
# if(class(acetp)=='AtCtEtp_model')
if(is(acetp,'AtCtEtp_model'))
{
res <- AtCtEtp_mcmc(acetp, iter_num, sd, burnin)
return(res)
}
#if(class(acetp)=='AtEtp_model')
#{
# res <- AtEtp_mcmc(acetp, iter_num, sd, burnin)
# return(res)
#}
}
AtCtEtp_mcmc <-
function(AtCtEtp, iter_num = 10000, sd = 0.1, burnin =1000)
{
# if(class(AtCtEtp)!='AtCtEtp_model')
if(!is(AtCtEtp,'AtCtEtp_model'))
{
stop('The first parameter must be an object obtained from the AtCtEtp function.')
}
T_m <- AtCtEtp$T_m
num_m <- length(T_m)
T_d <- AtCtEtp$T_d
num_d <- length(T_d)
max_t <- max(c(T_m,T_d))
min_t <- min(c(T_m,T_d))
t_int <- max_t-min_t
l_m_1 <- (max_t-T_m)/t_int
l_m_2 <- (T_m-min_t)/t_int
l_d_1 <- (max_t-T_d)/t_int
l_d_2 <- (T_d-min_t)/t_int
order <- 3
if(length(AtCtEtp$beta_a)>2)
{
B_des_a_m <- splineDesign(AtCtEtp$knot_a, x=AtCtEtp$T_m, ord=order)
B_des_a_d <- splineDesign(AtCtEtp$knot_a, x=AtCtEtp$T_d, ord=order)
}else{
if(length(AtCtEtp$beta_a)==2)
{
B_des_a_m <- matrix(NA, num_m, 2)
B_des_a_m[,1] <- l_m_1
B_des_a_m[,2] <- l_m_2
B_des_a_d <- matrix(NA, num_d, 2)
B_des_a_d[,1] <- l_d_1
B_des_a_d[,2] <- l_d_2
}else{
B_des_a_m <- matrix(1, num_m, 1)
B_des_a_d <- matrix(1, num_d, 1)
}
}
if(length(AtCtEtp$beta_c)>2)
{
B_des_c_m <- splineDesign(AtCtEtp$knot_c, x=AtCtEtp$T_m, ord=order)
B_des_c_d <- splineDesign(AtCtEtp$knot_c, x=AtCtEtp$T_d, ord=order)
}else{
if(length(AtCtEtp$beta_c)==2)
{
B_des_c_m <- matrix(NA, num_m, 2)
B_des_c_m[,1] <- l_m_1
B_des_c_m[,2] <- l_m_2
B_des_c_d <- matrix(NA, num_d, 2)
B_des_c_d[,1] <- l_d_1
B_des_c_d[,2] <- l_d_2
}else{
B_des_c_m <- matrix(1, num_m, 1)
B_des_c_d <- matrix(1, num_d, 1)
}
}
if(length(AtCtEtp$beta_e)>2)
{
B_des_e_m <- splineDesign(AtCtEtp$knot_e, x=AtCtEtp$T_m, ord=order)
B_des_e_d <- splineDesign(AtCtEtp$knot_e, x=AtCtEtp$T_d, ord=order)
}else{
if(length(AtCtEtp$beta_e)==2)
{
B_des_e_m <- matrix(NA, num_m, 2)
B_des_e_m[,1] <- l_m_1
B_des_e_m[,2] <- l_m_2
B_des_e_d <- matrix(NA, num_d, 2)
B_des_e_d[,1] <- l_d_1
B_des_e_d[,2] <- l_d_2
}else{
B_des_e_m <- matrix(1, num_m, 1)
B_des_e_d <- matrix(1, num_d, 1)
}
}
result <- mcmc_epsp_AtCtEt(AtCtEtp$pheno_m, AtCtEtp$pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, AtCtEtp$var_b_a, AtCtEtp$var_b_c, AtCtEtp$var_b_e, AtCtEtp$beta_a, AtCtEtp$beta_c, AtCtEtp$beta_e, AtCtEtp$D_a, AtCtEtp$D_c, AtCtEtp$D_e, iter_num, burnin, sd)
AtCtEtp_mc_mod <- list(beta_a_mc=result$beta_a_mc, beta_c_mc=result$beta_c_mc, beta_e_mc=result$beta_e_mc, cov_mc = result$cov, knots_a=AtCtEtp$knot_a, knots_c=AtCtEtp$knot_c, knots_e=AtCtEtp$knot_e, min_t = min(AtCtEtp$T_m, AtCtEtp$T_d), max_t = max(AtCtEtp$T_m, AtCtEtp$T_d))
class(AtCtEtp_mc_mod) <- 'AtCtEtp_mc_model'
return(AtCtEtp_mc_mod)
}
acetp_mcmc_2 <- function(AtCtEtp, iter_num = 5000, sd = 0.1, burnin =500)
{
# if(class(AtCtEtp)!='AtCtEtp_model')
if(!is(AtCtEtp,'AtCtEtp_model'))
{
stop('The first parameter must be an object obtained from the AtCtEtp function.')
}
T_m <- AtCtEtp$T_m
num_m <- length(T_m)
T_d <- AtCtEtp$T_d
num_d <- length(T_d)
t_int <- max(c(T_m,T_d))-min(c(T_m,T_d))
order <- 3
if(length(AtCtEtp$beta_a)>2)
{
ei_a <- eigen(AtCtEtp$D_a)
B_des_a_m <- splineDesign(AtCtEtp$knot_a, x=AtCtEtp$T_m, ord=order)
B_des_a_d <- splineDesign(AtCtEtp$knot_a, x=AtCtEtp$T_d, ord=order)
B_des_a_m <- B_des_a_m%*%ei_a$vectors
B_des_a_d <- B_des_a_d%*%ei_a$vectors
D_a <- diag(c(ei_a$values[1:(length(ei_a$values)-2)],0,0))
}else{
if(length(AtCtEtp$beta_a)==2)
{
k_a <- length(AtCtEtp$knot_a)-4
delta_a <- matrix(0, k_a+3-2-2, k_a+3-2)
for(i in 1:nrow(delta_a))
{
delta_a[i, i:(i+2)] <- c(1,-2,1)
}
D_a_n <- t(delta_a)%*%delta_a
ei_a <- eigen(D_a_n)
B_des_a_m <- splineDesign(AtCtEtp$knot_a, x=AtCtEtp$T_m, ord=order)
B_des_a_d <- splineDesign(AtCtEtp$knot_a, x=AtCtEtp$T_d, ord=order)
B_des_a_m <- B_des_a_m%*%ei_a$vectors
B_des_a_d <- B_des_a_d%*%ei_a$vectors
B_des_a_m <- B_des_a_m[,(ncol(B_des_a_m)-1):ncol(B_des_a_m)]
B_des_a_d <- B_des_a_d[,(ncol(B_des_a_d)-1):ncol(B_des_a_d)]
D_a <- AtCtEtp$D_a
}else{
B_des_a_m <- matrix(1, num_m, 1)
B_des_a_d <- matrix(1, num_d, 1)
D_a <- AtCtEtp$D_a
}
}
if(length(AtCtEtp$beta_c)>2)
{
ei_c <- eigen(AtCtEtp$D_c)
B_des_c_m <- splineDesign(AtCtEtp$knot_c, x=AtCtEtp$T_m, ord=order)
B_des_c_d <- splineDesign(AtCtEtp$knot_c, x=AtCtEtp$T_d, ord=order)
B_des_c_m <- B_des_c_m%*%ei_c$vectors
B_des_c_d <- B_des_c_d%*%ei_c$vectors
D_c <- diag(c(ei_c$values[1:(length(ei_c$values)-2)],0,0))
}else{
if(length(AtCtEtp$beta_c)==2)
{
k_c <- length(AtCtEtp$knot_c)-4
delta_c <- matrix(0, k_c+3-2-2, k_c+3-2)
for(i in 1:nrow(delta_c))
{
delta_c[i, i:(i+2)] <- c(1,-2,1)
}
D_c_n <- t(delta_c)%*%delta_c
ei_c <- eigen(D_c_n)
B_des_c_m <- splineDesign(AtCtEtp$knot_c, x=AtCtEtp$T_m, ord=order)
B_des_c_d <- splineDesign(AtCtEtp$knot_c, x=AtCtEtp$T_d, ord=order)
B_des_c_m <- B_des_c_m%*%ei_c$vectors
B_des_c_d <- B_des_c_d%*%ei_c$vectors
B_des_c_m <- B_des_c_m[,(ncol(B_des_c_m)-1):ncol(B_des_c_m)]
B_des_c_d <- B_des_c_d[,(ncol(B_des_c_d)-1):ncol(B_des_c_d)]
D_c <- AtCtEtp$D_c
}else{
B_des_c_m <- matrix(1, num_m, 1)
B_des_c_d <- matrix(1, num_d, 1)
D_c <- AtCtEtp$D_c
}
}
if(length(AtCtEtp$beta_e)>2)
{
ei_e <- eigen(AtCtEtp$D_e)
B_des_e_m <- splineDesign(AtCtEtp$knot_e, x=AtCtEtp$T_m, ord=order)
B_des_e_d <- splineDesign(AtCtEtp$knot_e, x=AtCtEtp$T_d, ord=order)
B_des_e_m <- B_des_e_m%*%ei_e$vectors
B_des_e_d <- B_des_e_d%*%ei_e$vectors
D_e <- diag(c(ei_e$values[1:(length(ei_e$values)-2)],0,0))
}else{
if(length(AtCtEtp$beta_e)==2)
{
k_e <- length(AtCtEtp$knot_e)-4
delta_e <- matrix(0, k_e+3-2-2, k_e+3-2)
for(i in 1:nrow(delta_e))
{
delta_e[i, i:(i+2)] <- c(1,-2,1)
}
D_e_n <- t(delta_e)%*%delta_e
ei_e <- eigen(D_e_n)
B_des_e_m <- splineDesign(AtCtEtp$knot_e, x=AtCtEtp$T_m, ord=order)
B_des_e_d <- splineDesign(AtCtEtp$knot_e, x=AtCtEtp$T_d, ord=order)
B_des_e_m <- B_des_e_m%*%ei_e$vectors
B_des_e_d <- B_des_e_d%*%ei_e$vectors
B_des_e_m <- B_des_e_m[,(ncol(B_des_e_m)-1):ncol(B_des_e_m)]
B_des_e_d <- B_des_e_d[,(ncol(B_des_e_d)-1):ncol(B_des_e_d)]
D_e <- AtCtEtp$D_e
}else{
B_des_e_m <- matrix(1, num_m, 1)
B_des_e_d <- matrix(1, num_d, 1)
D_e <- AtCtEtp$D_e
}
}
result <- mcmc_epsp_AtCtEt_2(AtCtEtp$pheno_m, AtCtEtp$pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, AtCtEtp$var_b_a, AtCtEtp$var_b_c, AtCtEtp$var_b_e, D_a, D_c, D_e, iter_num, burnin, sd)
AtCtEtp_mc_mod <- list(beta_a_mc=result$beta_a_mc, beta_c_mc=result$beta_c_mc, beta_e_mc=result$beta_e_mc, cov_mc = result$cov, knots_a=AtCtEtp$knot_a, knots_c=AtCtEtp$knot_c, knots_e=AtCtEtp$knot_e, min_t = min(AtCtEtp$T_m, AtCtEtp$T_d), max_t = max(AtCtEtp$T_m, AtCtEtp$T_d))
class(AtCtEtp_mc_mod) <- 'AtCtEtp_mc_model'
return(AtCtEtp_mc_mod)
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/acetp_mcmc.R |
mcmc_epsp <-
function(pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, var, var_b_a, var_b_c, D_a, D_c, iter=10000, burn=500, sd=0.1)
{
num_m <- length(pheno_m)
num_d <- length(pheno_d)
num_a <- ncol(B_des_a_m)
num_c <- ncol(B_des_c_m)
B_a_m <- t(B_des_a_m)
B_a_d <- t(B_des_a_d)
B_c_m <- t(B_des_c_m)
B_c_d <- t(B_des_c_d)
if(var_b_a <= 0.05)
{
ei_a <- eigen(D_a)
G_a <- t(ei_a$vectors)
}else{
G_a <- diag(rep(1,num_a))
}
if(var_b_c <= 0.05)
{
ei_c <- eigen(D_c)
G_c <- t(ei_c$vectors)
}else{
G_c <- diag(rep(1,num_c))
}
multResult <- rep(0,num_a+num_c+(num_a+1)*num_a/2+(num_c+1)*num_c/2+1)
output =.C("CWrapper_mcmc",
product = as.double(multResult),
num_p_mz = as.integer(num_m),
num_p_dz = as.integer(num_d),
num_col_a = as.integer(num_a),
num_col_c = as.integer(num_c),
ph_m = as.double(pheno_m),
ph_d = as.double(pheno_d),
B_des_a_m = as.double(B_a_m),
B_des_a_d = as.double(B_a_d),
B_des_c_m = as.double(B_c_m),
B_des_c_d = as.double(B_c_d),
G_a = as.double(G_a),
G_c = as.double(G_c),
var = as.double(var),
var_b_a = as.double(var_b_a),
var_b_c = as.double(var_b_c),
D_a = as.integer(D_a),
D_c = as.integer(D_c),
iter_n = as.integer(iter),
burn = as.integer(burn),
sd_mcmc = as.double(sd)
)
beta_a_mc <- output$product[1:num_a]
beta_c_mc <- output$product[(1+num_a):(num_a+num_c)]
k <- 1
cov_a <- matrix(0, num_a, num_a)
for(i in 1:num_a)
{
for(j in i:num_a)
{
cov_a[i,j] <- output$product[num_a+num_c+k]
cov_a[j,i] <- cov_a[i,j]
k <- k + 1
}
}
cov_c <- matrix(0, num_c, num_c)
for(i in 1:num_c)
{
for(j in i:num_c)
{
cov_c[i,j] <- output$product[num_a+num_c+k]
cov_c[j,i] <- cov_c[i,j]
k <- k + 1
}
}
return(list(beta_a_mc = beta_a_mc, beta_c_mc = beta_c_mc, cov_a = cov_a, cov_c = cov_c))
}
mcmc_epsp_AtEt <-
function(pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_e_m, B_des_e_d, var_b_a, var_b_e, D_a, D_e, iter=10000, burn=500, sd=0.1)
{
num_m <- length(pheno_m)
num_d <- length(pheno_d)
num_a <- ncol(B_des_a_m)
num_e <- ncol(B_des_e_m)
B_a_m <- t(B_des_a_m)
B_a_d <- t(B_des_a_d)
B_e_m <- t(B_des_e_m)
B_e_d <- t(B_des_e_d)
if(var_b_a <= 0.05)
{
ei_a <- eigen(D_a)
#G_a <- diag(sqrt(c(ei_a$value[1:(num_a-2)],0,0)))%*%ei_a$vectors
#G_a[num_a-1,] <- rep(1,num_a)
#G_a[num_a,] <- 1:num_a
G_a <- t(ei_a$vectors)
}else{
G_a <- diag(rep(1,num_a))
}
if(var_b_e <= 0.05)
{
ei_e <- eigen(D_e)
#G_e <- diag(sqrt(c(ei_e$value[1:(num_e-2)],0,0)))%*%ei_e$vectors
#G_e[num_e-1,] <- rep(1,num_e)
#G_e[num_e,] <- 1:num_e
G_e <- t(ei_e$vectors)
}else{
G_e <- diag(rep(1,num_e))
}
multResult <- rep(0,num_a+num_e+(num_a+1)*num_a/2+(num_e+1)*num_e/2+1)
var <- -1
output =.C("CWrapper_mcmc",
product = as.double(multResult),
num_p_mz = as.integer(num_m),
num_p_dz = as.integer(num_d),
num_col_a = as.integer(num_a),
num_col_c = as.integer(num_e),
ph_m = as.double(pheno_m),
ph_d = as.double(pheno_d),
B_des_a_m = as.double(B_a_m),
B_des_a_d = as.double(B_a_d),
B_des_c_m = as.double(B_e_m),
B_des_c_d = as.double(B_e_d),
G_a = as.double(G_a),
G_c = as.double(G_e),
var = as.double(var),
var_b_a = as.double(var_b_a),
var_b_c = as.double(var_b_e),
D_a = as.integer(D_a),
D_c = as.integer(D_e),
iter_n = as.integer(iter),
burn = as.integer(burn),
sd_mcmc = as.double(sd)
)
beta_a_mc <- output$product[1:num_a]
beta_e_mc <- output$product[(1+num_a):(num_a+num_e)]
k <- 1
cov_a <- matrix(0, num_a, num_a)
for(i in 1:num_a)
{
for(j in i:num_a)
{
cov_a[i,j] <- output$product[num_a+num_e+k]
cov_a[j,i] <- cov_a[i,j]
k <- k + 1
}
}
cov_e <- matrix(0, num_e, num_e)
for(i in 1:num_e)
{
for(j in i:num_e)
{
cov_e[i,j] <- output$product[num_a+num_e+k]
cov_e[j,i] <- cov_e[i,j]
k <- k + 1
}
}
return(list(beta_a_mc = beta_a_mc, beta_e_mc = beta_e_mc, cov_a = cov_a, cov_e = cov_e))
}
mcmc_epsp_AtCtEt <-
function(pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, var_b_a, var_b_c, var_b_e, beta_a, beta_c, beta_e, D_a, D_c, D_e, iter=10000, burn=500, sd=0.1)
{
num_m <- length(pheno_m)
num_d <- length(pheno_d)
num_a <- ncol(B_des_a_m)
num_c <- ncol(B_des_c_m)
num_e <- ncol(B_des_e_m)
B_a_m <- t(B_des_a_m)
B_a_d <- t(B_des_a_d)
B_c_m <- t(B_des_c_m)
B_c_d <- t(B_des_c_d)
B_e_m <- t(B_des_e_m)
B_e_d <- t(B_des_e_d)
if((var_b_a <= 0.05)&(num_a>2))
{
ei_a <- eigen(D_a)
G_a <- t(ei_a$vectors)
eiv_a <- ei_a$values
}else{
eiv_a <- rep(1, num_a)
G_a <- diag(rep(1,num_a))
}
if((var_b_c <= 0.05)&(num_c>2))
{
ei_c <- eigen(D_c)
G_c <- t(ei_c$vectors)
eiv_c <- ei_c$values
}else{
eiv_c <- rep(1, num_c)
G_c <- diag(rep(1,num_c))
}
if((var_b_e <= 0.05)&(num_e>2))
{
ei_e <- eigen(D_e)
G_e <- t(ei_e$vectors)
eiv_e <- ei_e$values
}else{
eiv_e <- rep(1, num_e)
G_e <- diag(rep(1,num_e))
}
beta_a_t <- G_a%*%beta_a
beta_c_t <- G_c%*%beta_c
beta_e_t <- G_e%*%beta_e
num_t <- num_a+num_c+num_e
# multResult <- rep(0,num_a+num_c+num_e+(num_a+1)*num_a/2+(num_c+1)*num_c/2+(num_e+1)*num_e/2+1)
multResult <- rep(0,num_t+(num_t+1)*num_t/2+1)
output =.C("CWrapper_mcmc_atctet",
product = as.double(multResult),
num_p_mz = as.integer(num_m),
num_p_dz = as.integer(num_d),
num_col_a = as.integer(num_a),
num_col_c = as.integer(num_c),
num_col_e = as.integer(num_e),
ph_m = as.double(pheno_m),
ph_d = as.double(pheno_d),
B_des_a_m = as.double(B_a_m),
B_des_a_d = as.double(B_a_d),
B_des_c_m = as.double(B_c_m),
B_des_c_d = as.double(B_c_d),
B_des_e_m = as.double(B_e_m),
B_des_e_d = as.double(B_e_d),
G_a = as.double(G_a),
G_c = as.double(G_c),
G_e = as.double(G_e),
ei_a = as.double(eiv_a),
ei_c = as.double(eiv_c),
ei_e = as.double(eiv_e),
var_b_a = as.double(var_b_a),
var_b_c = as.double(var_b_c),
var_b_e = as.double(var_b_e),
beta_a = as.double(beta_a_t),
beta_c = as.double(beta_c_t),
beta_e = as.double(beta_e_t),
D_a = as.integer(D_a),
D_c = as.integer(D_c),
D_e = as.integer(D_e),
iter_n = as.integer(iter),
burn = as.integer(burn),
sd_mcmc = as.double(sd)
)
beta_a_mc <- output$product[1:num_a]
beta_c_mc <- output$product[(1+num_a):(num_a+num_c)]
beta_e_mc <- output$product[(1+num_a+num_c):(num_a+num_c+num_e)]
k <- 1
cov_t <- matrix(0, num_t, num_t)
for(i in 1:num_t)
{
for(j in i:num_t)
{
cov_t[i,j] <- output$product[num_t+k]
cov_t[j,i] <- cov_t[i,j]
k <- k + 1
}
}
return(list(beta_a_mc = beta_a_mc, beta_c_mc = beta_c_mc, beta_e_mc = beta_e_mc, cov = cov_t))
}
mcmc_epsp_AtCtEt_2 <-
function(pheno_m, pheno_d, B_des_a_m, B_des_a_d, B_des_c_m, B_des_c_d, B_des_e_m, B_des_e_d, var_b_a, var_b_c, var_b_e, D_a, D_c, D_e, iter=10000, burn=500, sd=0.1)
{
num_m <- length(pheno_m)
num_d <- length(pheno_d)
num_a <- ncol(B_des_a_m)
num_c <- ncol(B_des_c_m)
num_e <- ncol(B_des_e_m)
B_a_m <- t(B_des_a_m)
B_a_d <- t(B_des_a_d)
B_c_m <- t(B_des_c_m)
B_c_d <- t(B_des_c_d)
B_e_m <- t(B_des_e_m)
B_e_d <- t(B_des_e_d)
num_t <- num_a+num_c+num_e
# multResult <- rep(0,num_a+num_c+num_e+(num_a+1)*num_a/2+(num_c+1)*num_c/2+(num_e+1)*num_e/2+1)
multResult <- rep(0,num_t+(num_t+1)*num_t/2+1)
output =.C("CWrapper_mcmc_atctet_2",
product = as.double(multResult),
num_p_mz = as.integer(num_m),
num_p_dz = as.integer(num_d),
num_col_a = as.integer(num_a),
num_col_c = as.integer(num_c),
num_col_e = as.integer(num_e),
ph_m = as.double(pheno_m),
ph_d = as.double(pheno_d),
B_des_a_m = as.double(B_a_m),
B_des_a_d = as.double(B_a_d),
B_des_c_m = as.double(B_c_m),
B_des_c_d = as.double(B_c_d),
B_des_e_m = as.double(B_e_m),
B_des_e_d = as.double(B_e_d),
var_b_a = as.double(var_b_a),
var_b_c = as.double(var_b_c),
var_b_e = as.double(var_b_e),
D_a = as.double(D_a),
D_c = as.double(D_c),
D_e = as.double(D_e),
iter_n = as.integer(iter),
burn = as.integer(burn),
sd_mcmc = as.double(sd)
)
beta_a_mc <- output$product[1:num_a]
beta_c_mc <- output$product[(1+num_a):(num_a+num_c)]
beta_e_mc <- output$product[(1+num_a+num_c):(num_a+num_c+num_e)]
k <- 1
cov_t <- matrix(0, num_t, num_t)
for(i in 1:num_t)
{
for(j in i:num_t)
{
cov_t[i,j] <- output$product[num_t+k]
cov_t[j,i] <- cov_t[i,j]
k <- k + 1
}
}
return(list(beta_a_mc = beta_a_mc, beta_c_mc = beta_c_mc, beta_e_mc = beta_e_mc, cov = cov_t))
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/mcmc_epsp.R |
plot_AtCtEt <- function(AtCtEt, boot=FALSE, xlab, ylab, main, col, legend)
{
# if(class(AtCtEt)!='AtCtEt_model')
if(!is(AtCtEt,'AtCtEt_model'))
{
stop('The first parameter must be an object obtained from the AtCtEt function.')
}
if(boot==TRUE)
{
if(is.null(AtCtEt$boot)==TRUE)
{
stop('Please first run the AtCtEt model with bootstrapping.')
}
}
model_cur <- AtCtEt
l_a <- model_cur$n_beta_a
l_c <- model_cur$n_beta_c
l_e <- model_cur$n_beta_e
#pheno_m <- c(t(data_m[,1:2]))
#pheno_d <- c(t(data_d[,1:2]))
#T_m <- rep(data_m[,3], each=2)
#T_d <- rep(data_d[,3], each=2)
n_beta <- 1:(l_a+l_c+l_e)
order <- 3
x <- seq(from=model_cur$min_t, to=model_cur$max_t, length.out=500)
if(model_cur$n_beta_a>1)
{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
}else{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_a[1]==-Inf)
{n_beta[1]=0}
}
if(model_cur$n_beta_c>1)
{
bb_c <- splineDesign(model_cur$knots_c, x = x, ord=order, outer.ok = TRUE)
}else{
bb_c <- splineDesign(model_cur$knots_c, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_c[1]==-Inf)
{n_beta[model_cur$n_beta_a+1]=0}
}
if(model_cur$n_beta_e>1)
{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=order, outer.ok = TRUE)
}else{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=1, outer.ok = TRUE)
}
points_a <- exp(bb_a%*%model_cur$beta_a)
points_c <- exp(bb_c%*%model_cur$beta_c)
points_e <- exp(bb_e%*%model_cur$beta_e)
#fisher <- solve(model_cur$hessian[2:(1+l_a+l_c),2:(1+l_a+l_c)])
max_v <- max(points_c, points_a, points_e)*1.2
plot(range(x), c(0,max_v), type = "n", xlab = xlab, ylab = ylab, main = main)
index <- 1
# bb <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
lines(x, points_a, col = col[1], lwd = 2)
lines(x, points_c, col = col[2], lwd = 2)
lines(x, points_e, col = col[3], lwd = 2)
if(boot == TRUE)
{
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_a, col = "orange" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_a, col = "orange" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_a, rev(AtCtEt$boot$lower.ci_a)),col='grey',border = NA, lty=3, density=20)
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_c, col = "green" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_c, col = "green" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_c, rev(AtCtEt$boot$lower.ci_c)),col='grey',border = NA, lty=3, density=20)
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_e, col = "yellow" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_e, col = "yellow" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_e, rev(AtCtEt$boot$lower.ci_e)),col='grey',border = NA, lty=3, density=20)
}else{
fisher <- solve(model_cur$hessian[n_beta,n_beta])
if((l_a>1)|(model_cur$beta_a[1]!=-Inf))
{
fisher_a <- fisher[index:l_a,index:l_a]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
flag <- 0
for(i in 1:length(x))
{
delta <- t(bb_a[i,])%*%fisher_a%*%bb_a[i,]
if(delta>=0)
{
sd[i] <- sqrt(delta)
lower[i] <- sum(bb_a[i,]*model_cur$beta_a) - 1.96*sd[i]
upper[i] <- sum(bb_a[i,]*model_cur$beta_a) + 1.96*sd[i]
}else{flag <- 1}
}
#if(l_a>1)
#{
lower <- exp(lower)
upper <- exp(upper)
#}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
index <- index + l_a
if(flag == 0)
{
lines(x, lower, col = "orange" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "orange" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
}else{
warning('The variance of one of the estimates from the Delta method for the A component is negative. Please try the bootstrap method for the confidence interval or use a different model.')
}
}
if((l_c>1)|(model_cur$beta_c[1]!=-Inf))
{
fisher_c <- fisher[index:(index+l_c-1),index:(index+l_c-1)]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
flag <- 0
for(i in 1:length(x))
{
delta <- t(bb_c[i,])%*%fisher_c%*%bb_c[i,]
if(delta>=0)
{
sd[i] <- sqrt(delta)
lower[i] <- sum(bb_c[i,]*model_cur$beta_c) - 1.96*sd[i]
upper[i] <- sum(bb_c[i,]*model_cur$beta_c) + 1.96*sd[i]
}else{flag <- 1}
}
#if(l_c>1)
#{
lower <- exp(lower)
upper <- exp(upper)
#}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
index <- index + l_c
if(flag == 0)
{
lines(x, lower, col = "green" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "green" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
}else{warning('The variance of one of the estimates from the Delta method for the C component is negative. Please try the bootstrap method for the confidence interval or use a different model.')}
}
fisher_e <- fisher[index:(index+l_e-1),index:(index+l_e-1)]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
for(i in 1:length(x))
{
sd[i] <- sqrt(t(bb_e[i,])%*%fisher_e%*%bb_e[i,])
lower[i] <- sum(bb_e[i,]*model_cur$beta_e) - 1.96*sd[i]
upper[i] <- sum(bb_e[i,]*model_cur$beta_e) + 1.96*sd[i]
}
#if(l_e>1)
#{
lower <- exp(lower)
upper <- exp(upper)
#}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
lines(x, lower, col = "yellow" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "yellow" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
} # boot == FALSE
if(legend==TRUE)
{
legend(x[1], max_v, c('Additive genetic component','Common environmental component', 'Unique environmental component'), col = col, lty=c(1,1,1), lwd=c(2,2,2))
}
}
plot_AtDtEt <- function(AtCtEt, boot=FALSE, xlab, ylab, main, col, legend)
{
# if(class(AtCtEt)!='AtCtEt_model')
if(!is(AtCtEt,'AtDtEt_model'))
{
stop('The first parameter must be an object obtained from the AtDtEt function.')
}
if(boot==TRUE)
{
if(is.null(AtCtEt$boot)==TRUE)
{
stop('Please first run the AtDtEt model with bootstrapping.')
}
}
model_cur <- AtCtEt
l_a <- model_cur$n_beta_a
l_d <- model_cur$n_beta_d
l_e <- model_cur$n_beta_e
n_beta <- 1:(l_a+l_d+l_e)
order <- 3
x <- seq(from=model_cur$min_t, to=model_cur$max_t, length.out=500)
if(model_cur$n_beta_a>1)
{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
}else{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_a[1]==-Inf)
{n_beta[1]=0}
}
if(model_cur$n_beta_d>1)
{
bb_d <- splineDesign(model_cur$knots_d, x = x, ord=order, outer.ok = TRUE)
}else{
bb_d <- splineDesign(model_cur$knots_d, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_d[1]==-Inf)
{n_beta[model_cur$n_beta_a+1]=0}
}
if(model_cur$n_beta_e>1)
{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=order, outer.ok = TRUE)
}else{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=1, outer.ok = TRUE)
}
points_a <- exp(bb_a%*%model_cur$beta_a)
points_d <- exp(bb_d%*%model_cur$beta_d)
points_e <- exp(bb_e%*%model_cur$beta_e)
#fisher <- solve(model_cur$hessian[2:(1+l_a+l_c),2:(1+l_a+l_c)])
max_v <- max(points_d, points_a, points_e)*1.2
plot(range(x), c(0,max_v), type = "n", xlab = xlab, ylab = ylab, main = main)
index <- 1
# bb <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
lines(x, points_a, col = col[1], lwd = 2)
lines(x, points_d, col = col[2], lwd = 2)
lines(x, points_e, col = col[3], lwd = 2)
if(boot == TRUE)
{
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_a, col = "orange" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_a, col = "orange" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_a, rev(AtCtEt$boot$lower.ci_a)),col='grey',border = NA, lty=3, density=20)
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_d, col = "green" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_d, col = "green" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_d, rev(AtCtEt$boot$lower.ci_d)),col='grey',border = NA, lty=3, density=20)
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_e, col = "yellow" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_e, col = "yellow" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_e, rev(AtCtEt$boot$lower.ci_e)),col='grey',border = NA, lty=3, density=20)
}else{
fisher <- solve(model_cur$hessian[n_beta,n_beta])
if((l_a>1)|(model_cur$beta_a[1]!=-Inf))
{
fisher_a <- fisher[index:l_a,index:l_a]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
flag <- 0
for(i in 1:length(x))
{
delta <- t(bb_a[i,])%*%fisher_a%*%bb_a[i,]
if(delta>=0)
{
sd[i] <- sqrt(delta)
lower[i] <- sum(bb_a[i,]*model_cur$beta_a) - 1.96*sd[i]
upper[i] <- sum(bb_a[i,]*model_cur$beta_a) + 1.96*sd[i]
}else{flag <- 1}
}
#if(l_a>1)
#{
lower <- exp(lower)
upper <- exp(upper)
#}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
index <- index + l_a
if(flag == 0)
{
lines(x, lower, col = "orange" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "orange" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
}else{
warning('The variance of one of the estimates from the Delta method for the A component is negative. Please try the bootstrap method for the confidence interval or use a different model.')
}
}
if((l_d>1)|(model_cur$beta_d[1]!=-Inf))
{
fisher_d <- fisher[index:(index+l_d-1),index:(index+l_d-1)]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
flag <- 0
for(i in 1:length(x))
{
delta <- t(bb_d[i,])%*%fisher_d%*%bb_d[i,]
if(delta>=0)
{
sd[i] <- sqrt(delta)
lower[i] <- sum(bb_d[i,]*model_cur$beta_d) - 1.96*sd[i]
upper[i] <- sum(bb_d[i,]*model_cur$beta_d) + 1.96*sd[i]
}else{flag <- 1}
}
#if(l_c>1)
#{
lower <- exp(lower)
upper <- exp(upper)
#}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
index <- index + l_d
if(flag == 0)
{
lines(x, lower, col = "green" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "green" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
}else{warning('The variance of one of the estimates from the Delta method for the D component is negative. Please try the bootstrap method for the confidence interval or use a different model.')}
}
fisher_e <- fisher[index:(index+l_e-1),index:(index+l_e-1)]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
for(i in 1:length(x))
{
sd[i] <- sqrt(t(bb_e[i,])%*%fisher_e%*%bb_e[i,])
lower[i] <- sum(bb_e[i,]*model_cur$beta_e) - 1.96*sd[i]
upper[i] <- sum(bb_e[i,]*model_cur$beta_e) + 1.96*sd[i]
}
#if(l_e>1)
#{
lower <- exp(lower)
upper <- exp(upper)
#}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
lines(x, lower, col = "yellow" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "yellow" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
} # boot == FALSE
if(legend==TRUE)
{
legend(x[1], max_v, c('Additive genetic component','Dominance genetic component', 'Unique environmental component'), col = col, lty=c(1,1,1), lwd=c(2,2,2))
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/plot_AtCtEt.R |
plot_AtCtEt_h <- function(AtCtEt, boot=FALSE, xlab, ylab, main, col)
{
# if((class(AtCtEt)!='AtCtEt_model')&(class(AtCtEt)!='AtCtEtp_mc_model'))
if(!(class(AtCtEt)%in%c('AtCtEt_model','AtCtEtp_mc_model')))
{
stop('The first parameter must be an object obtained from the AtCtEt or acetp_mcmc function.')
}
# if((boot==TRUE)&(class(AtCtEt)=='AtCtEt_model'))
if((boot==TRUE)&(is(AtCtEt,'AtCtEt_model')))
{
if(is.null(AtCtEt$boot)==TRUE)
{
stop('Please first run the AtCtEt model with bootstrapping.')
}
}
# if(class(AtCtEt)=='AtCtEt_model')
if(is(AtCtEt,'AtCtEt_model'))
{
model_cur <- AtCtEt
l_a <- model_cur$n_beta_a
l_c <- model_cur$n_beta_c
l_e <- model_cur$n_beta_e
if((l_a==1)&(model_cur$beta_a[1]==-Inf))
{stop('The current model has no additive genetic component.')}
#pheno_m <- c(t(data_m[,1:2]))
#pheno_d <- c(t(data_d[,1:2]))
#T_m <- rep(data_m[,3], each=2)
#T_d <- rep(data_d[,3], each=2)
n_beta <- 1:(l_a+l_c+l_e)
order <- 3
x <- seq(from=model_cur$min_t, to=model_cur$max_t, length.out=500)
if(model_cur$n_beta_a>1)
{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
}else{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_a[1]==-Inf)
{n_beta[1]=0}
}
if(model_cur$n_beta_c>1)
{
bb_c <- splineDesign(model_cur$knots_c, x = x, ord=order, outer.ok = TRUE)
}else{
bb_c <- splineDesign(model_cur$knots_c, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_c[1]==-Inf)
{n_beta[model_cur$n_beta_a+1]=0}
}
if(model_cur$n_beta_e>1)
{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=order, outer.ok = TRUE)
}else{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=1, outer.ok = TRUE)
}
points_a <- exp(bb_a%*%model_cur$beta_a)
points_c <- exp(bb_c%*%model_cur$beta_c)
points_e <- exp(bb_e%*%model_cur$beta_e)
points_h <- points_a/(points_a+points_c+points_e)
#fisher <- solve(model_cur$hessian[2:(1+l_a+l_c),2:(1+l_a+l_c)])
max_v <- 1
plot(range(x), c(0,max_v), type = "n", xlab = xlab, ylab = ylab, main = main)
index <- 1
# bb <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
lines(x, points_h, col = col[1], lwd = 2)
#lines(x, points_c, col = "blue", lwd = 2)
#lines(x, points_e, col = "pink", lwd = 2)
if(boot == TRUE)
{
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_h, col = "grey" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_h, col = "grey" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_h, rev(AtCtEt$boot$lower.ci_h)),col='grey',border = NA, lty=3, density=20)
}else{
fisher <- solve(model_cur$hessian[n_beta,n_beta])
e_a <- rep(0,length(x))
if(model_cur$beta_c[1]!=-Inf)
{e_a <- exp(bb_c%*%model_cur$beta_c-bb_a%*%model_cur$beta_a)}
e_b <- exp(bb_e%*%model_cur$beta_e-bb_a%*%model_cur$beta_a)
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
flag <- 0
for(i in 1:length(x))
{
if(model_cur$beta_c[1]!=-Inf)
{
P <- matrix(NA,2,l_a+l_c+l_e)
P[1,] <- c((-1)*bb_a[i,],bb_c[i,],rep(0,l_e))
P[2,] <- c((-1)*bb_a[i,],rep(0,l_c),bb_e[i,])
Sigma <- P%*%fisher%*%t(P)
delta <- t(c(e_a[i],e_b[i]))%*%Sigma%*%c(e_a[i],e_b[i])
delta <- delta*((1+e_a[i]+e_b[i])^(-4))
}else{
P <- matrix(NA,1,l_a+l_e)
P[1,] <- c((-1)*bb_a[i,],bb_e[i,])
Sigma <- P%*%fisher%*%t(P)
delta <- t(e_b[i])%*%Sigma%*%e_b[i]
delta <- delta*((1+e_b[i])^(-4))
}
if(delta>=0)
{
sd[i] <- sqrt(delta)
esti <- 1/(1+e_a[i]+e_b[i])
lower[i] <- esti - 1.96*sd[i]
upper[i] <- esti + 1.96*sd[i]
}else{flag <- 1}
}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
if(flag == 0)
{
lines(x, lower, col = "grey" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "grey" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
}else{warning('Please try the bootstrap method for the confidence interval or use a different model.')}
} # boot == FALSE
# legend(x[1], max_v, c('Additive genetic component','Common environmental component', 'Unique environmental component'), col = c('red','blue','pink'), lty=c(1,1,1), lwd=c(2,2,2))
}else{
model_cur <- AtCtEt
l_a <- length(model_cur$beta_a_mc)
l_c <- length(model_cur$beta_c_mc)
l_e <- length(model_cur$beta_e_mc)
if((l_a==1)&(model_cur$beta_a_mc[1]==-Inf))
{stop('The current model has no additive genetic component.')}
order <- 3
p_n <- 500
x <- seq(from=model_cur$min_t, to=model_cur$max_t, length.out=p_n)
t_int <- model_cur$max_t-model_cur$min_t
l_m_1 <- (model_cur$max_t-x)/t_int
l_m_2 <- (x-model_cur$min_t)/t_int
if(l_a>2)
{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
}else{
if(l_a==2)
{
bb_a <- matrix(NA, p_n, 2)
bb_a[,1] <- l_m_1
bb_a[,2] <- l_m_2
}else{
bb_a <- matrix(1, p_n, 1)
}
}
points_a <- exp(bb_a%*%model_cur$beta_a_mc)
if(l_c>2)
{
bb_c <- splineDesign(model_cur$knots_c, x = x, ord=order, outer.ok = TRUE)
}else{
if(l_c==2)
{
bb_c <- matrix(NA, p_n, 2)
bb_c[,1] <- l_m_1
bb_c[,2] <- l_m_2
}else{
bb_c <- matrix(1, p_n, 1)
}
}
points_c <- exp(bb_c%*%model_cur$beta_c_mc)
if(l_e>2)
{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=order, outer.ok = TRUE)
}else{
if(l_e==2)
{
bb_e <- matrix(NA, p_n, 2)
bb_e[,1] <- l_m_1
bb_e[,2] <- l_m_2
}else{
bb_e <- matrix(1, p_n, 1)
}
}
points_e <- exp(bb_e%*%model_cur$beta_e_mc)
points_h <- points_a/(points_a+points_c+points_e)
fisher <- model_cur$cov_mc
#fisher <- matrix(0, l_a+l_c+l_e, l_a+l_c+l_e)
#fisher[1:l_a, 1:l_a] <- model_cur$cov_a
#fisher[(1+l_a):(l_a+l_c), (1+l_a):(l_a+l_c)] <- model_cur$cov_c
#fisher[(1+l_a+l_c):(l_a+l_c+l_e), (1+l_a+l_c):(l_a+l_c+l_e)] <- model_cur$cov_e
max_v <- 1
plot(range(x), c(0,max_v), type = "n", xlab = xlab, ylab = ylab, main = main)
lines(x, points_h, col = col[1], lwd = 2)
e_a <- exp(bb_c%*%model_cur$beta_c_mc-bb_a%*%model_cur$beta_a_mc)
e_b <- exp(bb_e%*%model_cur$beta_e_mc-bb_a%*%model_cur$beta_a_mc)
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
flag <- 0
for(i in 1:length(x))
{
P <- matrix(NA,2,l_a+l_c+l_e)
P[1,] <- c((-1)*bb_a[i,],bb_c[i,],rep(0,l_e))
P[2,] <- c((-1)*bb_a[i,],rep(0,l_c),bb_e[i,])
Sigma <- P%*%fisher%*%t(P)
delta <- t(c(e_a[i],e_b[i]))%*%Sigma%*%c(e_a[i],e_b[i])
delta <- delta*((1+e_a[i]+e_b[i])^(-4))
if(delta>=0)
{
sd[i] <- sqrt(delta)
esti <- 1/(1+e_a[i]+e_b[i])
lower[i] <- esti - 1.96*sd[i]
upper[i] <- esti + 1.96*sd[i]
}else{flag <- 1}
}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
if(flag == 0)
{
lines(x, lower, col = "grey" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "grey" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
}else{warning('Please try the bootstrap method for the confidence interval or use a different model.')}
}
}
plot_AtDtEt_h <- function(AtCtEt, boot=FALSE, xlab, ylab, main, col)
{
# if((class(AtCtEt)!='AtCtEt_model')&(class(AtCtEt)!='AtCtEtp_mc_model'))
if(!(class(AtCtEt)%in%c('AtDtEt_model')))
{
stop('The first parameter must be an object obtained from the AtDtEt function.')
}
# if((boot==TRUE)&(class(AtCtEt)=='AtCtEt_model'))
if((boot==TRUE)&(is(AtCtEt,'AtDtEt_model')))
{
if(is.null(AtCtEt$boot)==TRUE)
{
stop('Please first run the AtDtEt model with bootstrapping.')
}
}
# if(class(AtCtEt)=='AtCtEt_model')
if(is(AtCtEt,'AtDtEt_model'))
{
model_cur <- AtCtEt
l_a <- model_cur$n_beta_a
l_d <- model_cur$n_beta_d
l_e <- model_cur$n_beta_e
if((l_a==1)&(model_cur$beta_a[1]==-Inf))
{stop('The current model has no additive genetic component.')}
n_beta <- 1:(l_a+l_d+l_e)
order <- 3
x <- seq(from=model_cur$min_t, to=model_cur$max_t, length.out=500)
if(model_cur$n_beta_a>1)
{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
}else{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_a[1]==-Inf)
{n_beta[1]=0}
}
if(model_cur$n_beta_d>1)
{
bb_d <- splineDesign(model_cur$knots_d, x = x, ord=order, outer.ok = TRUE)
}else{
bb_d <- splineDesign(model_cur$knots_d, x = x, ord=1, outer.ok = TRUE)
if(model_cur$beta_d[1]==-Inf)
{n_beta[model_cur$n_beta_a+1]=0}
}
if(model_cur$n_beta_e>1)
{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=order, outer.ok = TRUE)
}else{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=1, outer.ok = TRUE)
}
points_a <- exp(bb_a%*%model_cur$beta_a)
points_d <- exp(bb_d%*%model_cur$beta_d)
points_e <- exp(bb_e%*%model_cur$beta_e)
points_h <- (points_a+points_d)/(points_a+points_d+points_e)
#fisher <- solve(model_cur$hessian[2:(1+l_a+l_c),2:(1+l_a+l_c)])
max_v <- 1
plot(range(x), c(0,max_v), type = "n", xlab = xlab, ylab = ylab, main = main)
index <- 1
# bb <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
lines(x, points_h, col = col[1], lwd = 2)
#lines(x, points_c, col = "blue", lwd = 2)
#lines(x, points_e, col = "pink", lwd = 2)
if(boot == TRUE)
{
lines(AtCtEt$boot$x, AtCtEt$boot$lower.ci_h, col = "grey" ,lty = 2 , lwd = 0.6)
lines(AtCtEt$boot$x, AtCtEt$boot$upper.ci_h, col = "grey" ,lty = 2 , lwd = 0.6)
polygon(c(AtCtEt$boot$x, rev(AtCtEt$boot$x)),c(AtCtEt$boot$upper.ci_h, rev(AtCtEt$boot$lower.ci_h)),col='grey',border = NA, lty=3, density=20)
}else{
fisher <- solve(model_cur$hessian[n_beta,n_beta])
e_a <- exp(bb_a%*%model_cur$beta_a-bb_e%*%model_cur$beta_e)
e_b <- rep(0,length(x))
if(model_cur$beta_d[1]!=-Inf)
{e_b <- exp(bb_d%*%model_cur$beta_d-bb_e%*%model_cur$beta_e)}
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
flag <- 0
for(i in 1:length(x))
{
if(model_cur$beta_d[1]!=-Inf)
{
P <- matrix(NA,2,l_a+l_d+l_e)
P[1,] <- c(bb_a[i,],rep(0,l_d),(-1)*bb_e[i,])
P[2,] <- c(rep(0,l_a),bb_d[i,],(-1)*bb_e[i,])
Sigma <- P%*%fisher%*%t(P)
delta <- t(c(e_a[i],e_b[i]))%*%Sigma%*%c(e_a[i],e_b[i])
delta <- delta*((1+e_a[i]+e_b[i])^(-4))
}else{
P <- matrix(NA,1,l_a+l_e)
P[1,] <- c(bb_a[i,],(-1)*bb_e[i,])
Sigma <- P%*%fisher%*%t(P)
delta <- t(e_b[i])%*%Sigma%*%e_b[i]
delta <- delta*((1+e_b[i])^(-4))
}
if(delta>=0)
{
sd[i] <- sqrt(delta)
esti <- 1/(1+1/(e_a[i]+e_b[i]))
lower[i] <- esti - 1.96*sd[i]
upper[i] <- esti + 1.96*sd[i]
}else{flag <- 1}
}
lower <- ifelse(lower<0, 0, lower)
upper <- ifelse(upper>max_v, max_v, upper)
if(flag == 0)
{
lines(x, lower, col = "grey" ,lty = 2 , lwd = 0.6)
lines(x, upper, col = "grey" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(upper, rev(lower)),col='grey',border = NA, lty=3, density=20)
}else{warning('Please try the bootstrap method for the confidence interval or use a different model.')}
} # boot == FALSE
# legend(x[1], max_v, c('Additive genetic component','Common environmental component', 'Unique environmental component'), col = c('red','blue','pink'), lty=c(1,1,1), lwd=c(2,2,2))
}
}
| /scratch/gouwar.j/cran-all/cranData/ACEt/R/plot_AtCtEt_h.R |
plot_AtCtEtp <- function(AtCtEtp_mcmc, xlab, ylab, main, col, legend)
{
#if(class(AtCtEtp_mcmc)!='AtCtEtp_mc_model')
if(!is(AtCtEtp_mcmc,'AtCtEtp_mc_model'))
{
stop('The first parameter must be an object obtained from the acetp_mcmc function.')
}
model_cur <- AtCtEtp_mcmc
#pheno_m <- c(t(data_m[,1:2]))
#pheno_d <- c(t(data_d[,1:2]))
#T_m <- rep(data_m[,3], each=2)
#T_d <- rep(data_d[,3], each=2)
p_n <- 500
order <- 3
x <- seq(from=model_cur$min_t, to=model_cur$max_t, length.out=p_n)
t_int <- model_cur$max_t-model_cur$min_t
l_m_1 <- (model_cur$max_t-x)/t_int
l_m_2 <- (x-model_cur$min_t)/t_int
n_a <- length(model_cur$beta_a_mc)
n_c <- length(model_cur$beta_c_mc)
n_e <- length(model_cur$beta_e_mc)
if(n_a>2)
{
bb_a <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
}else{
if(n_a==2)
{
bb_a <- matrix(NA, p_n, 2)
bb_a[,1] <- l_m_1
bb_a[,2] <- l_m_2
}else{
bb_a <- matrix(1, p_n, 1)
}
}
points_a <- exp(bb_a%*%model_cur$beta_a_mc)
if(n_c>2)
{
bb_c <- splineDesign(model_cur$knots_c, x = x, ord=order, outer.ok = TRUE)
}else{
if(n_c==2)
{
bb_c <- matrix(NA, p_n, 2)
bb_c[,1] <- l_m_1
bb_c[,2] <- l_m_2
}else{
bb_c <- matrix(1, p_n, 1)
}
}
points_c <- exp(bb_c%*%model_cur$beta_c_mc)
if(n_e>2)
{
bb_e <- splineDesign(model_cur$knots_e, x = x, ord=order, outer.ok = TRUE)
}else{
if(n_e==2)
{
bb_e <- matrix(NA, p_n, 2)
bb_e[,1] <- l_m_1
bb_e[,2] <- l_m_2
}else{
bb_e <- matrix(1, p_n, 1)
}
}
points_e <- exp(bb_e%*%model_cur$beta_e_mc)
plot(range(x), c(0,max(points_c, points_a, points_e)*1.2), type = "n", xlab = xlab, ylab = ylab, main = main)
#bb <- splineDesign(model_cur$knots_a, x = x, ord=order, outer.ok = TRUE)
lines(x, points_a, col = col[1], lwd = 2)
fisher_a <- model_cur$cov_mc[1:n_a,1:n_a]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
for(i in 1:length(x))
{
sd[i] <- sqrt((t(bb_a[i,])%*%fisher_a%*%bb_a[i,]))
lower[i] <- sum(bb_a[i,]*model_cur$beta_a_mc) - 1.96*sd[i]
upper[i] <- sum(bb_a[i,]*model_cur$beta_a_mc) + 1.96*sd[i]
}
lines(x, exp(lower), col = "orange" ,lty = 2 , lwd = 0.6)
lines(x, exp(upper), col = "orange" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(exp(upper), rev(exp(lower))),col='grey',border = NA, lty=3, density=20)
lines(x, points_c, col = col[2], lwd = 2)
fisher_c <- model_cur$cov_mc[(n_a+1):(n_a+n_c),(n_a+1):(n_a+n_c)]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
for(i in 1:length(x))
{
sd[i] <- sqrt((t(bb_c[i,])%*%fisher_c%*%bb_c[i,]))
lower[i] <- sum(bb_c[i,]*model_cur$beta_c_mc) - 1.96*sd[i]
upper[i] <- sum(bb_c[i,]*model_cur$beta_c_mc) + 1.96*sd[i]
}
lines(x, exp(lower), col = "green" ,lty = 2 , lwd = 0.6)
lines(x, exp(upper), col = "green" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(exp(upper), rev(exp(lower))),col='grey',border = NA, lty=3, density=20)
lines(x, points_e, col = col[3], lwd = 2)
fisher_e <- model_cur$cov_mc[(n_a+n_c+1):(n_a+n_c+n_e),(n_a+n_c+1):(n_a+n_c+n_e)]
lower <- rep(NA, length(x))
upper <- rep(NA, length(x))
sd <- rep(NA, length(x))
for(i in 1:length(x))
{
sd[i] <- sqrt((t(bb_e[i,])%*%fisher_e%*%bb_e[i,]))
lower[i] <- sum(bb_e[i,]*model_cur$beta_e_mc) - 1.96*sd[i]
upper[i] <- sum(bb_e[i,]*model_cur$beta_e_mc) + 1.96*sd[i]
}
lines(x, exp(lower), col = "yellow" ,lty = 2 , lwd = 0.6)
lines(x, exp(upper), col = "yellow" ,lty = 2 , lwd = 0.6)
polygon(c(x, rev(x)),c(exp(upper), rev(exp(lower))),col='grey',border = NA, lty=3, density=20)
if(legend==TRUE)
{
legend(x[1], max(points_c, points_a, points_e)*1.2, c('Additive genetic component','Common environmental component', 'Unique environmental component'), col = col, lty=c(1,1,1), lwd=c(2,2,2))
}
} | /scratch/gouwar.j/cran-all/cranData/ACEt/R/plot_AtCtEtp.R |
plot_acet <- function(acet, boot = FALSE, heri = FALSE, xlab, ylab, main, col, legend = TRUE)
{
if(!(class(acet) %in% c('AtCtEt_model', 'AtDtEt_model', 'AtCtEp_mc_model','AtEtp_mc_model','AtCtEtp_mc_model')))
{
stop('The first argument must be an AtCtEt_model, AtDtEt_model, or AtCtEtp_mc_model object.')
}
if(missing(xlab))
{
xlab_t <- 'Age'
}else{
xlab_t <- xlab
}
if(missing(ylab))
{
if(heri == FALSE)
{
ylab_t <- 'Variance'
}else{
ylab_t <- 'Heritability'
}
}else{
ylab_t <- ylab
}
if(missing(main))
{
if(heri == FALSE)
{
if(is(acet,'AtDtEt_model')==FALSE)
{main_t <- "Variance curves of the A, C, and E components"}else{
main_t <- "Variance curves of the A, D, and E components"
}
}else{
main_t <- "Dynamic heritability"
}
}else{
main_t <- main
}
if(missing(col))
{
if(heri == FALSE)
{
col <- c("red","blue","pink")
}else{
col <- "black"
}
}else
{
if(heri == FALSE)
{
if(length(col)<3)
{
stop('The \'col\' argument should have three elements.')
}
}
}
# if(class(acet)=='AtCtEt_model')
if(is(acet,'AtCtEt_model'))
{
if(heri == FALSE)
{
plot_AtCtEt(acet, boot, xlab=xlab_t, ylab=ylab_t, main=main_t, col=col, legend =legend)
}else{
plot_AtCtEt_h(acet, boot, xlab=xlab_t, ylab=ylab_t, main=main_t, col=col)
}
}
if(is(acet,'AtDtEt_model'))
{
if(heri == FALSE)
{
plot_AtDtEt(acet, boot, xlab=xlab_t, ylab=ylab_t, main=main_t, col=col, legend =legend)
}else{
plot_AtDtEt_h(acet, boot, xlab=xlab_t, ylab=ylab_t, main=main_t, col=col)
}
}
# if(class(acet)=='AtCtEtp_mc_model')
if(is(acet,'AtCtEtp_mc_model'))
{
if(heri==FALSE)
{
plot_AtCtEtp(acet, xlab=xlab_t, ylab=ylab_t, main=main_t, col=col, legend =legend)
}else{
plot_AtCtEt_h(acet, boot, xlab=xlab_t, ylab=ylab_t, main=main_t, col=col)
}
}
} | /scratch/gouwar.j/cran-all/cranData/ACEt/R/plot_acet.R |
test_acetp <-
function(acetp, comp, sim = 100, robust = 0, pe = TRUE, verbose = TRUE)
{
if(!(class(acetp) %in% c('AtCtEtp_model')))
{
stop('The first parameter must be an acetp object.')
}
if(!(comp %in% c('a','c','e')))
{
stop('The variable \'comp\' must be \'a\',\'c\' or \'e\' to specify which component to test linearity.')
}
if(acetp$mod[match(comp,c('a','c','e'))]=='c')
{
stop('The component to test is a constant.')
}
if(acetp$mod[match(comp,c('a','c','e'))]=='d')
{
if(verbose == TRUE)
{
cat("Model comparison: \n")
print("Log-linear (null) vs. Spline")
}
mod_a <- acetp$mod
mod_n <- mod_a
mod_n[match(comp,c('a','c','e'))] <- 'l'
k_a <- ifelse((length(acetp$beta_a)-1)<2,8,length(acetp$beta_a)-1)
k_c <- ifelse((length(acetp$beta_c)-1)<2,8,length(acetp$beta_c)-1)
k_e <- ifelse((length(acetp$beta_e)-1)<2,8,length(acetp$beta_e)-1)
data_m <- cbind(acetp$pheno_m[seq(from=1,to=nrow(acetp$pheno_m),by=2)],acetp$pheno_m[seq(from=2,to=nrow(acetp$pheno_m),by=2)])
data_m <- cbind(data_m,acetp$T_m[seq(from=1,to=length(acetp$T_m),by=2)])
data_d <- cbind(acetp$pheno_d[seq(from=1,to=nrow(acetp$pheno_d),by=2)],acetp$pheno_d[seq(from=2,to=nrow(acetp$pheno_d),by=2)])
data_d <- cbind(data_d,acetp$T_d[seq(from=1,to=length(acetp$T_d),by=2)])
m_a <- AtCtEtp_2(data_m, data_d, knot_a=k_a, knot_c=k_c, knot_e=k_e, mod=mod_a, robust=robust)
if(pe==FALSE)
{
p <- test_acetp_2(m_a, comp)
return(p)
}
m_n <- AtCtEtp_2(data_m, data_d, knot_a=k_a, knot_c=k_c, knot_e=k_e, mod=mod_n, robust=robust)
llr <- m_n$lik - m_a$lik
order <- 3
num_m <- nrow(data_m)
num_d <- nrow(data_d)
D_a_n <- m_n$D_a
D_c_n <- m_n$D_c
D_e_n <- m_n$D_e
if(m_n$mod[1]!='c')
{
delta_a <- matrix(0, k_a+3-2-2, k_a+3-2)
for(i in 1:nrow(delta_a))
{
delta_a[i, i:(i+2)] <- c(1,-2,1)
}
D_a_n <- t(delta_a)%*%delta_a
ei_a <- eigen(D_a_n)
bb_a_m <- splineDesign(m_n$knot_a, x = data_m[,3], ord=order, outer.ok = TRUE)
bb_a_d <- splineDesign(m_n$knot_a, x = data_d[,3], ord=order, outer.ok = TRUE)
bb_a_m <- bb_a_m%*%ei_a$vectors
bb_a_d <- bb_a_d%*%ei_a$vectors
}
if(m_n$mod[2]!='c')
{
delta_c <- matrix(0, k_c+3-2-2, k_c+3-2)
for(i in 1:nrow(delta_c))
{
delta_c[i, i:(i+2)] <- c(1,-2,1)
}
D_c_n <- t(delta_c)%*%delta_c
ei_c <- eigen(D_c_n)
bb_c_m <- splineDesign(m_n$knot_c, x = data_m[,3], ord=order, outer.ok = TRUE)
bb_c_d <- splineDesign(m_n$knot_c, x = data_d[,3], ord=order, outer.ok = TRUE)
bb_c_m <- bb_c_m%*%ei_c$vectors
bb_c_d <- bb_c_d%*%ei_c$vectors
}
if(m_n$mod[3]!='c')
{
delta_e <- matrix(0, k_e+3-2-2, k_e+3-2)
for(i in 1:nrow(delta_e))
{
delta_e[i, i:(i+2)] <- c(1,-2,1)
}
D_e_n <- t(delta_e)%*%delta_e
ei_e <- eigen(D_e_n)
bb_e_m <- splineDesign(m_n$knot_e, x = data_m[,3], ord=order, outer.ok = TRUE)
bb_e_d <- splineDesign(m_n$knot_e, x = data_d[,3], ord=order, outer.ok = TRUE)
bb_e_m <- bb_e_m%*%ei_e$vectors
bb_e_d <- bb_e_d%*%ei_e$vectors
}
sim_m <- data_m
sim_d <- data_d
llr_sim <- rep(NA, sim)
for(i in 1:sim)
{
beta_a_n <- m_n$beta_a
beta_c_n <- m_n$beta_c
beta_e_n <- m_n$beta_e
var_b_a_n <- m_n$var_b_a
var_b_c_n <- m_n$var_b_c
var_b_e_n <- m_n$var_b_e
beta_a_1 <- m_a$beta_a
beta_c_1 <- m_a$beta_c
beta_e_1 <- m_a$beta_e
var_b_a_1 <- m_a$var_b_a
var_b_c_1 <- m_a$var_b_c
var_b_e_1 <- m_a$var_b_e
if(m_n$mod[1]=='d')
{
beta_a_n[1:(ncol(bb_a_m)-2)] <- rnorm(ncol(bb_a_m)-2, mean=0, sd=sqrt(var_b_a_n/ei_a$values[1:(ncol(bb_a_m)-2)]))
a_m <- exp(bb_a_m%*%beta_a_n)
a_d <- exp(bb_a_d%*%beta_a_n)
}else{
if(m_n$mod[1]=='l')
{
a_m <- exp(bb_a_m[,(ncol(bb_a_m)-1):ncol(bb_a_m)]%*%beta_a_n)
a_d <- exp(bb_a_d[,(ncol(bb_a_d)-1):ncol(bb_a_d)]%*%beta_a_n)
}else{
a_m <- exp(beta_a_n*matrix(1, num_m, 1))
a_d <- exp(beta_a_n*matrix(1, num_d, 1))
}
}
if(m_n$mod[2]=='d')
{
beta_c_n[1:(ncol(bb_c_m)-2)] <- rnorm(ncol(bb_c_m)-2, mean=0, sd=sqrt(var_b_c_n/ei_c$values[1:(ncol(bb_c_m)-2)]))
c_m <- exp(bb_c_m%*%beta_c_n)
c_d <- exp(bb_c_d%*%beta_c_n)
}else{
if(m_n$mod[2]=='l')
{
c_m <- exp(bb_c_m[,(ncol(bb_c_m)-1):ncol(bb_c_m)]%*%beta_c_n)
c_d <- exp(bb_c_d[,(ncol(bb_c_d)-1):ncol(bb_c_d)]%*%beta_c_n)
}else{
c_m <- exp(beta_c_n*matrix(1, num_m, 1))
c_d <- exp(beta_c_n*matrix(1, num_d, 1))
}
}
if(m_n$mod[3]=='d')
{
beta_e_n[1:(ncol(bb_e_m)-2)] <- rnorm(ncol(bb_e_m)-2, mean=0, sd=sqrt(var_b_e_n/ei_e$values[1:(ncol(bb_e_m)-2)]))
e_m <- exp(bb_e_m%*%beta_e_n)
e_d <- exp(bb_e_d%*%beta_e_n)
}else{
if(m_n$mod[3]=='l')
{
e_m <- exp(bb_e_m[,(ncol(bb_e_m)-1):ncol(bb_e_m)]%*%beta_e_n)
e_d <- exp(bb_e_d[,(ncol(bb_e_d)-1):ncol(bb_e_d)]%*%beta_e_n)
}else{
e_m <- exp(beta_e_n*matrix(1, num_m, 1))
e_d <- exp(beta_e_n*matrix(1, num_d, 1))
}
}
for(j in 1:nrow(sim_m))
{
sigma <- matrix(c(a_m[j]+c_m[j]+e_m[j],a_m[j]+c_m[j],a_m[j]+c_m[j],a_m[j]+c_m[j]+e_m[j]),2,2)
sim_m[j,1:2] <- mvrnorm(1, rep(0,2), sigma)
}
for(j in 1:nrow(sim_d))
{
sigma <- matrix(c(a_d[j]+c_d[j]+e_d[j],0.5*a_d[j]+c_d[j],0.5*a_d[j]+c_d[j],a_d[j]+c_d[j]+e_d[j]),2,2)
sim_d[j,1:2] <- mvrnorm(1, rep(0,2), sigma)
}
s_a <- AtCtEtp_2(sim_m, sim_d, knot_a=k_a, knot_c=k_c, knot_e=k_e, mod=mod_a, robust=robust)
s_n <- AtCtEtp_2(sim_m, sim_d, knot_a=k_a, knot_c=k_c, knot_e=k_e, mod=mod_n, robust=robust)
llr_sim[i] <- s_n$lik-s_a$lik
}
p <- sum(llr_sim>llr)/sim
test <- list(p = p, llr = llr, llr_sim=llr_sim)
}else{
if(verbose == TRUE)
{
cat("Model comparison: \n")
print("Constancy (null) vs. Log-linear")
}
re <- acetp_mcmc(acetp,iter_num=10000)
num_v <- 0
if(sum(acetp$mod=='d')>0)
{
ind <- c()
if(acetp$mod[1]=='d')
{ind <- c(ind, 1)}
if(acetp$mod[2]=='d')
{ind <- c(ind, 2)}
if(acetp$mod[3]=='d')
{ind <- c(ind, 3)}
hessian <- solve(acetp$hessian[ind,ind])
num_v <- nrow(hessian)
for(k in 1:num_v)
{
if(hessian[k,k]<0)
{
hessian[k,] <- 0
hessian[,k] <- 0
}
}
var <- c()
if(acetp$mod[1]=='d')
{var <- c(var, acetp$var_b_a)}
if(acetp$mod[2]=='d')
{var <- c(var, acetp$var_b_c)}
if(acetp$mod[3]=='d')
{var <- c(var, acetp$var_b_e)}
}
index <- c(1,2)
if(comp=='a')
{beta_t <- re$beta_a_mc}
if(comp=='c')
{
beta_t <- re$beta_c_mc
index <- index+length(re$beta_a_mc)
}
if(comp=='e')
{
beta_t <- re$beta_e_mc
index <- index+length(re$beta_a_mc)+length(re$beta_c_mc)
}
cov_cm <- matrix(0,2,2)
if(sum(acetp$mod=='d')>0)
{
cov_cor <- matrix(0,20,2)
for(j in 1:20)
{
var_t <- mvrnorm(1, var, hessian)
if(acetp$mod[1]=='d')
{
acetp$var_b_a <- var_t[1]
var_t <- var_t[-1]
}
if(acetp$mod[2]=='d')
{acetp$var_b_c <- var_t[1]
var_t <- var_t[-1]}
if(acetp$mod[3]=='d')
{acetp$var_b_e <- var_t[1]
var_t <- var_t[-1]}
re_b <- acetp_mcmc(acetp, iter_num = 4000, burnin = 500)
if(comp=='a')
{cov_cor[j,] <- re_b$beta_a_mc}
if(comp=='c')
{cov_cor[j,] <- re_b$beta_c_mc}
if(comp=='e')
{cov_cor[j,] <- re_b$beta_e_mc}
}
cov_cm <- cov(cov_cor)
}
sigma<-c(1,-1)%*%(re$cov_mc[index,index]+cov_cm)%*%c(1,-1)
p <- pchisq((beta_t[1]-beta_t[2])^2/sigma,1,lower.tail=FALSE)
test <- list(p = p, chisq = (beta_t[1]-beta_t[2])^2/sigma)
}
if(verbose == TRUE)
{
return(test)
}else{
return(invisible(test))
}
} | /scratch/gouwar.j/cran-all/cranData/ACEt/R/test_acetp.R |
test_acetp_2 <-
function(acetp, comp)
{
# re <- acetp_mcmc_2(acetp,iter_num=iter_num, sd = 0.05, burnin=1000)
order <- 3
penal_a <- 2
penal_c <- 2
penal_e <- 2
if(acetp$mod[1]=='c')
{penal_a <- 1}
if(acetp$mod[2]=='c')
{penal_c <- 1}
if(acetp$mod[3]=='c')
{penal_e <- 1}
num_m <- length(acetp$T_m)
num_d <- length(acetp$T_d)
delta_a <- matrix(0, length(acetp$knot_a)-4+order-2-penal_a, length(acetp$knot_a)-4+order-2)
for(i in 1:nrow(delta_a))
{
if(penal_a==2)
{delta_a[i, i:(i+2)] <- c(1,-2,1)}else{
delta_a[i, i:(i+1)] <- c(1,-1)
}
}
D_a <- t(delta_a)%*%delta_a
B_des_a_m <- splineDesign(acetp$knot_a, x=acetp$T_m, ord=order)
B_des_a_d <- splineDesign(acetp$knot_a, x=acetp$T_d, ord=order)
ei_a <- eigen(D_a)
B_des_a_m <- B_des_a_m%*%ei_a$vectors
B_des_a_d <- B_des_a_d%*%ei_a$vectors
D_a <- diag(c(ei_a$values[1:(length(ei_a$values)-2)],0,0))
if(acetp$mod[1]=='l')
{
D_a <- matrix(0,2,2)
B_des_a_m <- B_des_a_m[,(ncol(B_des_a_m)-1):ncol(B_des_a_m)]
B_des_a_d <- B_des_a_d[,(ncol(B_des_a_d)-1):ncol(B_des_a_d)]
}
if(acetp$mod[1]=='c')
{
D_a <- matrix(0,1,1)
B_des_a_m <- matrix(1, num_m, 1)
B_des_a_d <- matrix(1, num_d, 1)
}
delta_c <- matrix(0, length(acetp$knot_c)-4+order-2-penal_c, length(acetp$knot_c)-4+order-2)
for(i in 1:nrow(delta_c))
{
if(penal_c==2)
{delta_c[i, i:(i+2)] <- c(1,-2,1)}else{
delta_c[i, i:(i+1)] <- c(1,-1)
}
}
D_c <- t(delta_c)%*%delta_c
B_des_c_m <- splineDesign(acetp$knot_c, x=acetp$T_m, ord=order)
B_des_c_d <- splineDesign(acetp$knot_c, x=acetp$T_d, ord=order)
ei_c <- eigen(D_c)
B_des_c_m <- B_des_c_m%*%ei_c$vectors
B_des_c_d <- B_des_c_d%*%ei_c$vectors
D_c <- diag(c(ei_c$values[1:(length(ei_c$values)-2)],0,0))
if(acetp$mod[2]=='l')
{
D_c <- matrix(0,2,2)
B_des_c_m <- B_des_c_m[,(ncol(B_des_c_m)-1):ncol(B_des_c_m)]
B_des_c_d <- B_des_c_d[,(ncol(B_des_c_d)-1):ncol(B_des_c_d)]
}
if(acetp$mod[2]=='c')
{
D_c <- matrix(0,1,1)
B_des_c_m <- matrix(1, num_m, 1)
B_des_c_d <- matrix(1, num_d, 1)
}
delta_e <- matrix(0, length(acetp$knot_e)-4+order-2-penal_e, length(acetp$knot_e)-4+order-2)
for(i in 1:nrow(delta_e))
{
if(penal_e==2)
{delta_e[i, i:(i+2)] <- c(1,-2,1)}else{
delta_e[i, i:(i+1)] <- c(1,-1)
}
}
D_e <- t(delta_e)%*%delta_e
B_des_e_m <- splineDesign(acetp$knot_e, x=acetp$T_m, ord=order)
B_des_e_d <- splineDesign(acetp$knot_e, x=acetp$T_d, ord=order)
ei_e <- eigen(D_e)
B_des_e_m <- B_des_e_m%*%ei_e$vectors
B_des_e_d <- B_des_e_d%*%ei_e$vectors
D_e <- diag(c(ei_e$values[1:(length(ei_e$values)-2)],0,0))
if(acetp$mod[3]=='l')
{
D_e <- matrix(0,2,2)
B_des_e_m <- B_des_e_m[,(ncol(B_des_e_m)-1):ncol(B_des_e_m)]
B_des_e_d <- B_des_e_d[,(ncol(B_des_e_d)-1):ncol(B_des_e_d)]
}
if(acetp$mod[3]=='c')
{
D_e <- matrix(0,1,1)
B_des_e_m <- matrix(1, num_m, 1)
B_des_e_d <- matrix(1, num_d, 1)
}
n_a <- length(acetp$beta_a)
n_c <- length(acetp$beta_c)
n_e <- length(acetp$beta_e)
low_a <- -12
upp_a <- 12
low_c <- -12
upp_c <- 12
low_e <- rep(-8,n_e)
#low_e[c(n_e-1,n_e)] <- rep(-8,2)
#upp_e <- 10
upp_e <- rep(8,n_e)
#upp_e[c(n_e-1,n_e)] <- rep(8,2)
if(acetp$var_b_a>0)
{acetp$var_b_a <- 100000000}
if(acetp$var_b_c>0)
{acetp$var_b_c <- 100000000}
if(acetp$var_b_e>0)
{acetp$var_b_e <- 100000000}
result <- optim(runif(n_a+n_c+n_e,min=-0.5,max=0.5), loglik_AtCtEt_epsp_g, gr_AtCtEt_epsp_g, pheno_m = acetp$pheno_m, pheno_d = acetp$pheno_d, B_des_a_m=B_des_a_m, B_des_a_d=B_des_a_d, B_des_c_m=B_des_c_m, B_des_c_d=B_des_c_d, B_des_e_m=B_des_e_m, B_des_e_d=B_des_e_d, var_b_a=acetp$var_b_a, var_b_c=acetp$var_b_c, var_b_e=acetp$var_b_e, D_a=D_a, D_c=D_c, D_e=D_e, lower = c(rep(low_a,n_a),rep(low_c,n_c),low_e), upper = c(rep(upp_a,n_a),rep(upp_c,n_c),upp_e), method = "L-BFGS-B", control=list(maxit = 3000), hessian = TRUE)
if(comp=='a')
{
index <- 1:(n_a-2)
}
if(comp=='c')
{
index <- (1:(n_c-2))+n_a
}
if(comp=='e')
{
index <- (1:(n_e-2))+n_c+n_a
}
beta_t <- result$par[index]
sigma <- solve(result$hessian)[index,index]
chisq_t <- t(beta_t)%*%solve(sigma)%*%beta_t
p_1 <- pchisq(chisq_t,length(index),lower.tail=FALSE)
test <- list(p = p_1, chisq = chisq_t)
return(test)
} | /scratch/gouwar.j/cran-all/cranData/ACEt/R/test_acetp_2.R |
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE---------------------------------------------------------------
# install.packages("ACEt")
## ----eval=FALSE---------------------------------------------------------------
# install.packages("devtools")
# library(devtools)
# install_github("lhe17/ACEt")
## -----------------------------------------------------------------------------
library(ACEt)
data(data_ace)
## -----------------------------------------------------------------------------
attributes(data_ace)
head(data_ace$mz)
head(data_ace$dz)
## -----------------------------------------------------------------------------
# fitting the ACE(t) model
re <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), knot_a = 6, knot_c = 4)
summary(re)
## -----------------------------------------------------------------------------
# part of the expected information matrix
re$hessian[1:8,1:8]
# part the observed information matrix approximated by the L-BFGS algorithm
re$hessian_ap[1:8,1:8]
## -----------------------------------------------------------------------------
re_cc <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','c','c'), knot_a = 6, knot_c = 4)
p1 <- pchisq(2*(re_cc$lik-re$lik), 4, lower.tail=FALSE)
p1
re_ac <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('c','d','c'), knot_a = 6, knot_c = 4)
p2 <- pchisq(2*(re_ac$lik-re$lik), 6, lower.tail=FALSE)
p2
re_cn <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','n','c'), knot_a = 6, knot_c = 4)
p3 <- 0.5*pchisq(2*(re_cn$lik-re_cc$lik), 1, lower.tail=FALSE)
p3
## -----------------------------------------------------------------------------
plot_acet(re, ylab='Var', xlab='Age (1-50)')
## -----------------------------------------------------------------------------
## fitting an ACE(t) model with the CIs esitmated by the bootstrap method
re_b <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), knot_a = 6, knot_c = 4, boot = TRUE,
num_b = 60)
plot_acet(re_b, boot = TRUE)
## -----------------------------------------------------------------------------
## plot dynamic heritability with the CIs using the delta method
plot_acet(re_b, heri=TRUE, boot = FALSE)
## plot dynamic heritability with the CIs using the bootstrap method
plot_acet(re_b, heri=TRUE, boot = TRUE)
## ----eval=FALSE---------------------------------------------------------------
# ## fitting an ADE(t) model with the CIs esitmated by the bootstrap method
# re_b <- AtDtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), boot = TRUE, num_b = 60)
# plot_acet(re_b, boot = TRUE)
## -----------------------------------------------------------------------------
## fitting an ACE(t)-p model
re <- AtCtEtp(data_ace$mz, data_ace$dz, knot_a = 8, knot_c = 8, mod=c('d','d','l'))
summary(re)
## -----------------------------------------------------------------------------
re_mcmc <- acetp_mcmc(re, iter_num = 5000, burnin = 500)
summary(re_mcmc)
## -----------------------------------------------------------------------------
plot_acet(re_mcmc)
plot_acet(re_mcmc, heri=TRUE)
## ----knot_10, echo=FALSE, fig.cap="Plots of variance curves of the example data set fitted by the ACE(t) and ACE(t)-p model with 10 interior knots for each component. Left: the ACE(t) model. Right: the ACE(t)-p model."----
knitr::include_graphics("knot_10.jpg")
## -----------------------------------------------------------------------------
test <- test_acetp(re, comp = 'e')
test$p
## ----eval=FALSE---------------------------------------------------------------
# test <- test_acetp(re, comp = 'c', sim = 100, robust = 0)
# test$p
| /scratch/gouwar.j/cran-all/cranData/ACEt/inst/doc/acet-vignette.R |
---
title: 'ACEt: An R package for estimating dynamic heritability and twin model comparison'
author: "Liang He"
date: '`r Sys.Date()`'
output:
html_document:
toc: yes
fig_caption: yes
md_document:
toc: yes
bibliography: bibliography.bib
vignette: |
%\VignetteIndexEntry{User guide for ACEt}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# ACEt v1.9.0
## Installation
The R package can be installed from CRAN
```{r,eval=FALSE}
install.packages("ACEt")
```
The installation requires *Rcpp-0.11.1* and has been tested on *R-4.1.3*. The installation of the *ACEt* package also requires installing the *BH* and *RcppArmadillo* packages.
Please contact hyx520101@gmail.com for more information.
### Most recent version
To install the latest version from github:
```{r,eval=FALSE}
install.packages("devtools")
library(devtools)
install_github("lhe17/ACEt")
```
## Application to an example dataset
We illustrate how to utilize the *ACEt* R package with an example dataset that can be loaded with the following codes. More detail about the method is given in @he2016estimating and @He2019ACEtA.
```{r}
library(ACEt)
data(data_ace)
```
The example dataset contains two matrices ```mz``` and ```dz``` for MZ and DZ twins, respectively. Each matrix includes 2500 twin pairs, of which the first two columns are the quantitative phenotype of the twin pair and the third column (```T_m``` or ```T_d```) is age.
```{r}
attributes(data_ace)
head(data_ace$mz)
head(data_ace$dz)
```
The age is distributed uniformly from 1 to 50 in both twin datasets and the phenotypes are normally distributed with a mean equal to zero. As discussed in @He2019ACEtA, before used as an input for this package, the phenotype should be centered, for example, by using residuals from a linear regression model ```lm()``` in which covariates for the mean function can be included.
Fitting an ACE(t) model can be done by calling the ```AtCtEt``` function, in which users can specify a function (null, constant or splines) for each of the A, C, and E components independently through the ```mod``` argument.
```{r}
# fitting the ACE(t) model
re <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), knot_a = 6, knot_c = 4)
summary(re)
```
In the above script, an ACE(t) model is fitted for the example dataset. The first two arguments specify the matrices of the phenotypes for MZ and DZ twins, respectively. The argument ```mod = c('d','d','c')``` specifies that we allow the variances of the A and C components to change dynamically and assume the variance of the E component to be a constant over age. The ```mod``` argument is a vector of three elements corresponding to the A, C and E components that can be ```'d', 'c' or 'n'```, in which ```'n'``` represents the exclusion of a component. For example, ```mod = c('d','n','c')``` indicates that we fit an AE model with a dynamic A component and a constant E component. It should be noted that the E component cannot be eliminated. We can also give the number of knots for each component, which is ignored if we choose ```'c'``` or ```'n'``` for that component. The number of randomly generated initial values for the estimation algorithm can be specified using the ```robust``` argument. Multiple initial values can be attempted to minimize the risk of missing the global maximum. The ```AtCtEt``` function returns both an expected and an approximate observed Fisher information matrices (shown below), which are close to each other in general and can be used to compute pointwise CIs. Note that the expected information matrix is always positive (semi)definite, but the approximated one is not necessarily positive definite. The returned value ```lik``` is the negative log-likelihood that can be used for LRT for the comparison of twin models.
```{r}
# part of the expected information matrix
re$hessian[1:8,1:8]
# part the observed information matrix approximated by the L-BFGS algorithm
re$hessian_ap[1:8,1:8]
```
The ```AtCtEt``` function returns the minus log-likelihood evaluated at the estimates that is needed to make inference based on LRT. For example, the following program tests whether the A or C component has a constant variance with respect to age, we fit the null models and calculate the p-values based on $\chi^2$ distributions. It can be seen that the LRT has no sufficient statistical power to reject the constancy of the C component with this sample size (```p1>0.05```). In addition, we test whether the C component can be ignored by comparing ```re_cc``` and ```re_cn``` and compute the p-value (```p3```) based on a mixture of $\chi^2$ distributions.
```{r}
re_cc <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','c','c'), knot_a = 6, knot_c = 4)
p1 <- pchisq(2*(re_cc$lik-re$lik), 4, lower.tail=FALSE)
p1
re_ac <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('c','d','c'), knot_a = 6, knot_c = 4)
p2 <- pchisq(2*(re_ac$lik-re$lik), 6, lower.tail=FALSE)
p2
re_cn <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','n','c'), knot_a = 6, knot_c = 4)
p3 <- 0.5*pchisq(2*(re_cn$lik-re_cc$lik), 1, lower.tail=FALSE)
p3
```
After fitting the ACE(t) model, we can plot the estimated variance curves by calling the ```plot_acet``` function.
```{r}
plot_acet(re, ylab='Var', xlab='Age (1-50)')
```
By default, the 95% pointwise CIs are estimated using the delta method. Alternatively, we can choose the bootstrap method by setting ```boot=TRUE``` and giving the number of bootstrap resampling, the default value of which is 100.
```{r}
## fitting an ACE(t) model with the CIs esitmated by the bootstrap method
re_b <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), knot_a = 6, knot_c = 4, boot = TRUE,
num_b = 60)
plot_acet(re_b, boot = TRUE)
```
Next, we plot the age-specific heritability by setting the argument ```heri=TRUE``` in the ```plot_acet``` function. And similarly we can choose either the delta method or the bootstrap method to generate the CIs.
```{r}
## plot dynamic heritability with the CIs using the delta method
plot_acet(re_b, heri=TRUE, boot = FALSE)
## plot dynamic heritability with the CIs using the bootstrap method
plot_acet(re_b, heri=TRUE, boot = TRUE)
```
An ADE(t) model can be fitted and plotted similarly using the ```AtDtEt``` function as shown below.
```{r,eval=FALSE}
## fitting an ADE(t) model with the CIs esitmated by the bootstrap method
re_b <- AtDtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), boot = TRUE, num_b = 60)
plot_acet(re_b, boot = TRUE)
```
An ACE(t)-p model is a more stable model, which reduces the sensitivity to the number of knots by using P-splines. The ACE(t)-p model is implemented in the ```AtCtEtp``` function, in which users can choose exponential of penalized splines, a linear function or a constant to model a certain component by setting the ```mod``` argument. Compared to the ACE(t) model, it is not an essential problem to provide an excessive number of knots (the default value of interior knots is 8) when using the ACE(t)-p model as it is more important to ensure adequate knots for curves with more fluctuation than to avoid overfitting. Below, we fit the example dataset using the ```AtCtEtp``` function in which the A and C components are modelled by B-splines of 8 interior knots and the E component by a log-linear function. Similar to the ```AtCtEt``` function, we can use the ```robust``` argument to specify the number of randomly generated initial values, which can reduce the program's possibility of being stuck on a local maximum in the EM algorithm.
```{r}
## fitting an ACE(t)-p model
re <- AtCtEtp(data_ace$mz, data_ace$dz, knot_a = 8, knot_c = 8, mod=c('d','d','l'))
summary(re)
```
The ```AtCtEtp``` function finds MLE of the variance $\sigma^{2}_{\beta^{A,C,E}}$ using the integrated likelihood and also provides estimates of the spline coefficients, i.e. $\beta^{A,C,E}$, which are based on maximum a posteriori (MAP) estimation. For a variance component of log-linearity (the E component in this example), $\beta$ is a vector of two elements that $exp(\beta)$ are the variances of this component at the minimum and maximum age in the dataset. To obtain the empirical Bayes estimates of $\beta^{A,C,E}$ and the covariance matrix using the MCMC method, we then call the ```acetp_mcmc``` function by plugging the result from the ```AtCtEtp``` function. We can also specify the numbers of the MCMC iterations and burn-in.
```{r}
re_mcmc <- acetp_mcmc(re, iter_num = 5000, burnin = 500)
summary(re_mcmc)
```
Given the esimates together with their covariance matrix, we can plot the variance curves or dynamic heritability by calling the ```plot_acet``` function. The ```boot``` option is ignored for the ACE(t)-p model.
```{r}
plot_acet(re_mcmc)
plot_acet(re_mcmc, heri=TRUE)
```
Assigning too many knots in the ACE(t)-p model is much less harmful than that in the ACE(t) model. Comparing the following two plots from the application of the two models with 10 knots for each component to the example data set, it suggests that the ACE(t) model has an overfitting problem but the ACE(t)-p model works properly.
```{r knot_10, echo=FALSE, fig.cap="Plots of variance curves of the example data set fitted by the ACE(t) and ACE(t)-p model with 10 interior knots for each component. Left: the ACE(t) model. Right: the ACE(t)-p model."}
knitr::include_graphics("knot_10.jpg")
```
Finally, we give an example to test a linear or constant variance curve. The ```test_acetp``` function is dedicated to the model comparison for the ACE(t)-p model and returns a p-value from LRT using a resampling method for testing log-linearity or from a $\chi^2$ distribution for testing constancy. First, the following code tests whether the E component is invariant with age. Before testing, we need to fit the data using the ```AtCtEtp``` function and obtain an ```AtCtEtp_model``` object ```re```. Note that when testing a constant component, the component must be specified as log-linear when fitting the model (as shown above).
```{r}
test <- test_acetp(re, comp = 'e')
test$p
```
The result suggests that the E component is time-invariant as the p-value is larger than 0.05. Next, we test whether a log-linear model would be fitted better for the C component.
```{r,eval=FALSE}
test <- test_acetp(re, comp = 'c', sim = 100, robust = 0)
test$p
```
The result (p>0.05) shows that the null hypothesis of the log-linearity is not rejected.
## Reference
| /scratch/gouwar.j/cran-all/cranData/ACEt/inst/doc/acet-vignette.Rmd |
---
title: 'ACEt: An R package for estimating dynamic heritability and twin model comparison'
author: "Liang He"
date: '`r Sys.Date()`'
output:
html_document:
toc: yes
fig_caption: yes
md_document:
toc: yes
bibliography: bibliography.bib
vignette: |
%\VignetteIndexEntry{User guide for ACEt}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# ACEt v1.9.0
## Installation
The R package can be installed from CRAN
```{r,eval=FALSE}
install.packages("ACEt")
```
The installation requires *Rcpp-0.11.1* and has been tested on *R-4.1.3*. The installation of the *ACEt* package also requires installing the *BH* and *RcppArmadillo* packages.
Please contact hyx520101@gmail.com for more information.
### Most recent version
To install the latest version from github:
```{r,eval=FALSE}
install.packages("devtools")
library(devtools)
install_github("lhe17/ACEt")
```
## Application to an example dataset
We illustrate how to utilize the *ACEt* R package with an example dataset that can be loaded with the following codes. More detail about the method is given in @he2016estimating and @He2019ACEtA.
```{r}
library(ACEt)
data(data_ace)
```
The example dataset contains two matrices ```mz``` and ```dz``` for MZ and DZ twins, respectively. Each matrix includes 2500 twin pairs, of which the first two columns are the quantitative phenotype of the twin pair and the third column (```T_m``` or ```T_d```) is age.
```{r}
attributes(data_ace)
head(data_ace$mz)
head(data_ace$dz)
```
The age is distributed uniformly from 1 to 50 in both twin datasets and the phenotypes are normally distributed with a mean equal to zero. As discussed in @He2019ACEtA, before used as an input for this package, the phenotype should be centered, for example, by using residuals from a linear regression model ```lm()``` in which covariates for the mean function can be included.
Fitting an ACE(t) model can be done by calling the ```AtCtEt``` function, in which users can specify a function (null, constant or splines) for each of the A, C, and E components independently through the ```mod``` argument.
```{r}
# fitting the ACE(t) model
re <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), knot_a = 6, knot_c = 4)
summary(re)
```
In the above script, an ACE(t) model is fitted for the example dataset. The first two arguments specify the matrices of the phenotypes for MZ and DZ twins, respectively. The argument ```mod = c('d','d','c')``` specifies that we allow the variances of the A and C components to change dynamically and assume the variance of the E component to be a constant over age. The ```mod``` argument is a vector of three elements corresponding to the A, C and E components that can be ```'d', 'c' or 'n'```, in which ```'n'``` represents the exclusion of a component. For example, ```mod = c('d','n','c')``` indicates that we fit an AE model with a dynamic A component and a constant E component. It should be noted that the E component cannot be eliminated. We can also give the number of knots for each component, which is ignored if we choose ```'c'``` or ```'n'``` for that component. The number of randomly generated initial values for the estimation algorithm can be specified using the ```robust``` argument. Multiple initial values can be attempted to minimize the risk of missing the global maximum. The ```AtCtEt``` function returns both an expected and an approximate observed Fisher information matrices (shown below), which are close to each other in general and can be used to compute pointwise CIs. Note that the expected information matrix is always positive (semi)definite, but the approximated one is not necessarily positive definite. The returned value ```lik``` is the negative log-likelihood that can be used for LRT for the comparison of twin models.
```{r}
# part of the expected information matrix
re$hessian[1:8,1:8]
# part the observed information matrix approximated by the L-BFGS algorithm
re$hessian_ap[1:8,1:8]
```
The ```AtCtEt``` function returns the minus log-likelihood evaluated at the estimates that is needed to make inference based on LRT. For example, the following program tests whether the A or C component has a constant variance with respect to age, we fit the null models and calculate the p-values based on $\chi^2$ distributions. It can be seen that the LRT has no sufficient statistical power to reject the constancy of the C component with this sample size (```p1>0.05```). In addition, we test whether the C component can be ignored by comparing ```re_cc``` and ```re_cn``` and compute the p-value (```p3```) based on a mixture of $\chi^2$ distributions.
```{r}
re_cc <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','c','c'), knot_a = 6, knot_c = 4)
p1 <- pchisq(2*(re_cc$lik-re$lik), 4, lower.tail=FALSE)
p1
re_ac <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('c','d','c'), knot_a = 6, knot_c = 4)
p2 <- pchisq(2*(re_ac$lik-re$lik), 6, lower.tail=FALSE)
p2
re_cn <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','n','c'), knot_a = 6, knot_c = 4)
p3 <- 0.5*pchisq(2*(re_cn$lik-re_cc$lik), 1, lower.tail=FALSE)
p3
```
After fitting the ACE(t) model, we can plot the estimated variance curves by calling the ```plot_acet``` function.
```{r}
plot_acet(re, ylab='Var', xlab='Age (1-50)')
```
By default, the 95% pointwise CIs are estimated using the delta method. Alternatively, we can choose the bootstrap method by setting ```boot=TRUE``` and giving the number of bootstrap resampling, the default value of which is 100.
```{r}
## fitting an ACE(t) model with the CIs esitmated by the bootstrap method
re_b <- AtCtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), knot_a = 6, knot_c = 4, boot = TRUE,
num_b = 60)
plot_acet(re_b, boot = TRUE)
```
Next, we plot the age-specific heritability by setting the argument ```heri=TRUE``` in the ```plot_acet``` function. And similarly we can choose either the delta method or the bootstrap method to generate the CIs.
```{r}
## plot dynamic heritability with the CIs using the delta method
plot_acet(re_b, heri=TRUE, boot = FALSE)
## plot dynamic heritability with the CIs using the bootstrap method
plot_acet(re_b, heri=TRUE, boot = TRUE)
```
An ADE(t) model can be fitted and plotted similarly using the ```AtDtEt``` function as shown below.
```{r,eval=FALSE}
## fitting an ADE(t) model with the CIs esitmated by the bootstrap method
re_b <- AtDtEt(data_ace$mz, data_ace$dz, mod = c('d','d','c'), boot = TRUE, num_b = 60)
plot_acet(re_b, boot = TRUE)
```
An ACE(t)-p model is a more stable model, which reduces the sensitivity to the number of knots by using P-splines. The ACE(t)-p model is implemented in the ```AtCtEtp``` function, in which users can choose exponential of penalized splines, a linear function or a constant to model a certain component by setting the ```mod``` argument. Compared to the ACE(t) model, it is not an essential problem to provide an excessive number of knots (the default value of interior knots is 8) when using the ACE(t)-p model as it is more important to ensure adequate knots for curves with more fluctuation than to avoid overfitting. Below, we fit the example dataset using the ```AtCtEtp``` function in which the A and C components are modelled by B-splines of 8 interior knots and the E component by a log-linear function. Similar to the ```AtCtEt``` function, we can use the ```robust``` argument to specify the number of randomly generated initial values, which can reduce the program's possibility of being stuck on a local maximum in the EM algorithm.
```{r}
## fitting an ACE(t)-p model
re <- AtCtEtp(data_ace$mz, data_ace$dz, knot_a = 8, knot_c = 8, mod=c('d','d','l'))
summary(re)
```
The ```AtCtEtp``` function finds MLE of the variance $\sigma^{2}_{\beta^{A,C,E}}$ using the integrated likelihood and also provides estimates of the spline coefficients, i.e. $\beta^{A,C,E}$, which are based on maximum a posteriori (MAP) estimation. For a variance component of log-linearity (the E component in this example), $\beta$ is a vector of two elements that $exp(\beta)$ are the variances of this component at the minimum and maximum age in the dataset. To obtain the empirical Bayes estimates of $\beta^{A,C,E}$ and the covariance matrix using the MCMC method, we then call the ```acetp_mcmc``` function by plugging the result from the ```AtCtEtp``` function. We can also specify the numbers of the MCMC iterations and burn-in.
```{r}
re_mcmc <- acetp_mcmc(re, iter_num = 5000, burnin = 500)
summary(re_mcmc)
```
Given the esimates together with their covariance matrix, we can plot the variance curves or dynamic heritability by calling the ```plot_acet``` function. The ```boot``` option is ignored for the ACE(t)-p model.
```{r}
plot_acet(re_mcmc)
plot_acet(re_mcmc, heri=TRUE)
```
Assigning too many knots in the ACE(t)-p model is much less harmful than that in the ACE(t) model. Comparing the following two plots from the application of the two models with 10 knots for each component to the example data set, it suggests that the ACE(t) model has an overfitting problem but the ACE(t)-p model works properly.
```{r knot_10, echo=FALSE, fig.cap="Plots of variance curves of the example data set fitted by the ACE(t) and ACE(t)-p model with 10 interior knots for each component. Left: the ACE(t) model. Right: the ACE(t)-p model."}
knitr::include_graphics("knot_10.jpg")
```
Finally, we give an example to test a linear or constant variance curve. The ```test_acetp``` function is dedicated to the model comparison for the ACE(t)-p model and returns a p-value from LRT using a resampling method for testing log-linearity or from a $\chi^2$ distribution for testing constancy. First, the following code tests whether the E component is invariant with age. Before testing, we need to fit the data using the ```AtCtEtp``` function and obtain an ```AtCtEtp_model``` object ```re```. Note that when testing a constant component, the component must be specified as log-linear when fitting the model (as shown above).
```{r}
test <- test_acetp(re, comp = 'e')
test$p
```
The result suggests that the E component is time-invariant as the p-value is larger than 0.05. Next, we test whether a log-linear model would be fitted better for the C component.
```{r,eval=FALSE}
test <- test_acetp(re, comp = 'c', sim = 100, robust = 0)
test$p
```
The result (p>0.05) shows that the null hypothesis of the log-linearity is not rejected.
## Reference
| /scratch/gouwar.j/cran-all/cranData/ACEt/vignettes/acet-vignette.Rmd |
## covr: skip=all
## Look for existing generic functions also in imported namespaces.
## This will affect whether setGenericS3() creates a generic function
## or not.
options("R.methodsS3:checkImports:setGenericS3"=TRUE)
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/000.R |
###########################################################################/**
# @RdocDocumentation "Non-documented objects"
#
# % Other missing docs
# @eval "t <- readLines('../incl/999.missingdocs.txt'); t <- trim(unlist(strsplit(t, split=' '))); t <- t[nchar(t) > 0]; t2 <- gsub('\\[', '\\\\[', t); t <- unique(t); t <- sprintf('\\alias{%s}', t); paste(t, collapse='\n')"
#
# \description{
# This page contains aliases for all "non-documented" objects that
# \code{R CMD check} detects in this package.
#
# Almost all of them are \emph{generic} functions that have specific
# document for the corresponding method coupled to a specific class.
# Other functions are re-defined by \code{setMethodS3()} to
# \emph{default} methods. Neither of these two classes are non-documented
# in reality.
# The rest are deprecated methods.
# }
#
# @author
#
# @keyword internal
#*/###########################################################################
############################################################################
# HISTORY:
# 2005-05-15
# o Created to please R CMD check.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/999.NonDocumentedObjects.R |
#########################################################################/**
# @RdocPackage ACNE
#
# \description{
# @eval "getDescription(ACNE)"
# }
#
# \section{Installation and updates}{
# This package requires the \pkg{aroma.affymetrix} package.
# To install this package, do:
# \code{install.packages("ACNE")}
# }
#
# \section{To get started}{
# \enumerate{
# \item For a one-command pipeline, see the @see "doACNE" method.
# \item For other usages, see the @see "NmfPlm" class.
# }
# }
#
# @author "MO, HB, AR"
#
# \section{License}{
# @eval "getLicense(ACNE)"
# }
#
# \references{
# [1] @include "../incl/OrtizM_etal_2010.Rd" \cr
# }
#*/#########################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/999.package.R |
###########################################################################/**
# @RdocClass NmfPlm
#
# @title "The NmfPlm class"
#
# \description{
# @classhierarchy
#
# This class represents the NMF model of [REF].
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "aroma.affymetrix::ProbeLevelModel".}
# \item{maxIter}{The maximum number of iteration in the NMF step.}
# \item{maxIterRlm}{A positive @integer specifying the maximum number of
# iterations used in rlm.}
# \item{refs}{An index @vector (@integer or @logical) specifying the
# reference samples. If @NULL, all samples are used as a reference.}
# \item{flavor}{(Internal/developmental only)
# A @character string specifying which algorithm to use.}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# \seealso{
# Internally, for each SNP the NMF model is fitted using the
# @see "fitSnpNmf" function.
# }
#
# \references{
# [1] @include "../incl/OrtizM_etal_2010.Rd" \cr
# }
#
# @author
#*/###########################################################################
setConstructorS3("NmfPlm", function(..., maxIter=10L, maxIterRlm=20L, refs=NULL, flavor=c("v4", "v3", "v2", "v1")) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'maxIter':
maxIter <- Arguments$getInteger(maxIter, range=c(1L,999L));
# Argument 'maxIterRlm':
maxIterRlm <- Arguments$getInteger(maxIterRlm, range=c(1L,999L));
# Argument 'flavor':
flavor <- match.arg(flavor);
extend(ProbeLevelModel(...), "NmfPlm",
.maxIter = maxIter,
.maxIterRlm = maxIterRlm,
.refs = refs,
.flavor = flavor
)
})
setMethodS3("getAsteriskTags", "NmfPlm", function(this, collapse=NULL, ...) {
# Returns 'PLM[,<shift>]'
tags <- NextMethod("getAsteriskTags", collapse=NULL);
tags[1] <- "NMF";
flavor <- this$.flavor;
if (!is.null(flavor) && flavor != "v4") {
tags <- c(tags, flavor);
}
# Collapse
tags <- paste(tags, collapse=collapse);
tags;
})
setMethodS3("getProbeAffinityFile", "NmfPlm", function(this, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the probe affinities (and create files etc)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
paf <- NextMethod("getProbeAffinityFile");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Update the encode and decode functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setEncodeFunction(paf, function(groupData, ...) {
phi <- .subset2(groupData, "phiA");
stdvs <- .subset2(groupData, "phiB");
outliers <- .subset2(groupData, "phiOutliers");
# Encode outliers as the sign of 'pixels'; -1 = TRUE, +1 = FALSE
pixels <- sign(0.5 - as.integer(outliers));
list(intensities=phi, stdvs=stdvs, pixels=pixels);
})
setDecodeFunction(paf, function(groupData, ...) {
intensities <- .subset2(groupData, "intensities");
stdvs <- .subset2(groupData, "stdvs");
pixels <- .subset2(groupData, "pixels");
# Outliers are encoded by the sign of 'pixels'.
outliers <- as.logical(1-sign(pixels));
list(
phiA=intensities,
phiB=stdvs,
phiOutliers=outliers
);
})
paf;
}, private=TRUE)
setMethodS3("getFitUnitFunction", "NmfPlm", function(this,...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get algorithm parameters
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set the amount of shift to be used.
shift <- this$shift;
if (is.null(shift))
shift <- 0;
# Algorithm version
flavor <- this$.flavor;
if (is.null(flavor)) {
flavor <- "v4";
}
# Maximum number of iterations to fit.
maxIter <- this$.maxIter;
if (is.null(maxIter)) {
maxIter <- 10L;
}
# Maximum number of iterations to fit rlm.
maxIterRlm <- this$.maxIterRlm;
if (is.null(maxIterRlm)) {
maxIterRlm <- 20L;
}
# Reference samples.
refs <- this$.refs;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Select the 'NMF' function to use
# (When adding a new version, add it here; not below)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
nmfFcn <- NULL;
if (flavor == "v1") {
nmfFcn <- get("NmfPinv", mode="function");
} else if (flavor == "v2") {
nmfFcn <- get("NmfFast", mode="function");
} else if (flavor == "v3") {
nmfFcn <- get("NmfPinvBeta", mode="function");
} else if (flavor == "v4") {
nmfFcn <- get("fitSnpNmf", mode="function");
} else {
throw("Unknown flavor: ", flavor);
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setup the fit function
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fitUnits <- function(y, prevpaf=NULL,...){
SNPunit <- y;
#save(SNPunit, file = "SNPunit")
groupNames <- names(SNPunit);
if (length(SNPunit) > 1L) {
SNPdata <- rbind(SNPunit[[1L]]$intensities, SNPunit[[2L]]$intensities);
SNPdata <- SNPdata + shift;
indNonPos <- (SNPdata <= 0);
SNPdata[indNonPos] <- 0.0001;
nbrGroups <- 2L;
} else {
SNPdata <- SNPunit[[1L]]$intensities;
nbrGroups <- 1L;
}
if (nbrGroups == 2L) {
NMFdata <- nmfFcn(SNPdata, maxIter=maxIter, maxIterRlm=maxIterRlm, refs=refs);
W <- NMFdata[[1L]];
H <- NMFdata[[2L]];
I <- dim(H)[2L];
K <- dim(W)[1L]/2;
# prepare returned data
# allele A
theta1 <- H[1L,];
sdTheta <- rep(1, times=I);
thetaOutliers <- logical(I);
phi1 <- W[1:K,1L];
sdPhi1 <- W[(K+1):(2*K),1L];
phiOutliers <- logical(K);
# allele B
theta2 <- H[2L,];
phi2 <- W[1:K,2L];
sdPhi2 <- W[(K+1):(2*K),2L];
# fitted unit
fitUU <- list(
A = list(theta=theta1, sdTheta=sdTheta, thetaOutliers=thetaOutliers, phiA=phi1, phiB=sdPhi1, phiOutliers=phiOutliers),
B = list(theta=theta2, sdTheta=sdTheta, thetaOutliers=thetaOutliers, phiA=phi2, phiB=sdPhi2, phiOutliers=phiOutliers)
);
} else {
I <- dim(SNPdata)[2L];
K <- dim(SNPdata)[1L];
theta1 <- rep(1, times=I);
sdTheta <- rep(1, times=I);
thetaOutliers <- logical(I);
phi1 <- double(K);
sdPhi1 <- double(K);
phiOutliers <- logical(K);
fitUU <- list(list(theta=theta1, sdTheta=sdTheta, thetaOutliers=thetaOutliers, phiA=phi1, phiB=sdPhi1, phiOutliers=phiOutliers));
}
names(fitUU) <- groupNames;
fitUU;
} # getFitUnitFunction()
fitUnits;
}, private=TRUE)
############################################################################
# HISTORY:
# 2010-09-28 [HB]
# o Now argument 'refs' defaults to NULL (not 0), which means all samples.
# 2010-06-04 [MO]
# o Added refs as argument.
# 2010-05-18 [MO]
# o Added maxIterRlm as argument.
# 2010-05-17 [HB]
# o Now a flavor tag is added to NmfPlm:s only if flavor != "v4" (default).
# 2009-11-18 [MO]
# o Removed internal save() in getFitUnitFunction() of NmfPlm.
# 2009-03-24 [HB]
# o Added Rdoc comments.
# 2009-01-28 [HB]
# o Made getFitUnitFunction() slightly faster. Cleaned up code. Added
# support for 'flavor' to specify which NMF fitting function to use.
# 2008-12-08 [HB]
# o Tidied up code.
# o Updated to make use of new ProbeLevelModel.R.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/NmfPlm.R |
###########################################################################/**
# @RdocClass NmfSnpPlm
#
# @title "The NmfSnpPlm class"
#
# \description{
# @classhierarchy
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "NmfPlm".}
# \item{mergeStrands}{If @TRUE, the sense and the anti-sense strands are
# fitted together, otherwise separately.}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author
#*/###########################################################################
setConstructorS3("NmfSnpPlm", function(..., mergeStrands=FALSE) {
extend(NmfPlm(...), c("NmfSnpPlm", uses(SnpPlm())),
mergeStrands=mergeStrands
)
})
setMethodS3("getAsteriskTags", "NmfSnpPlm", function(this, collapse=NULL, ...) {
tags <- NextMethod("getAsteriskTags", collapse=NULL);
# Add class specific parameter tags
if (!this$mergeStrands)
tags <- c(tags, "+-");
# Collapse
tags <- paste(tags, collapse=collapse);
tags;
}, protected=TRUE)
############################################################################
# HISTORY:
# 2009-03-24 [HB]
# o Added Rdoc comments.
# 2009-01-28 [HB]
# o Dropped argument 'prevpaf' from constructor. The new ProbeLevelModel
# class will support this.
# 2008-07-03 [MO]
# o Created.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/NmfSnpPlm.R |
###########################################################################/**
# @set "class=SnpNmfFit"
# @RdocMethod plot
#
# @title "Generates a multi-panel plot summarizing the NMF SNP fit"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{lim, cnLim, epsLim}{The plot ranges for the probe data,
# the CN estimates, and the probe-affinity estimates.}
# \item{main}{A @character string to be the main title of the plot.}
# \item{...}{Not used.}
# }
#
# \value{
# Return nothing.
# }
#
# \seealso{
# See @see "fitSnpNmfArray".
# }
#
# @keyword internal
#*/###########################################################################
setMethodS3("plot", "SnpNmfFit", function(x, lim=c(0,2^16), cnLim=c(0,4), epsLim=c(-1,1)*2^12, main=NULL, ...) {
# To please R CMD check
fit <- x;
Y <- fit$Y;
W <- fit$W;
H <- fit$H;
Vest <- W %*% H;
Yest <- snpMatrixToArray(Vest);
str(Y)
str(Yest)
E <- Y - Yest;
layout(matrix(1:4, ncol=2L, byrow=TRUE));
par(mar=c(3.5,3.5,4.5,0.5)+0.1, mgp=c(2.1,0.6,0));
xlab <- expression(y[A]);
ylab <- expression(y[B]);
plot(NA, xlim=lim, ylim=lim, xlab=xlab, ylab=ylab);
abline(a=0, b=1, lty=3);
title(main="Probe pair signals");
stext(side=3, pos=1, sprintf("Number of probe pairs: %d*%d=%d", dim(Y)[3L], dim(Y)[1L], dim(Y)[3L]*dim(Y)[1L]))
d <- apply(Y, MARGIN=3, FUN=points);
xlab <- expression(C[A]);
ylab <- expression(C[B]);
plot(NA, xlim=cnLim, ylim=cnLim, xlab=xlab, ylab=ylab);
abline(a=0, b=1, lty=3);
lines(x=c(0,2), y=c(2,0), lty=3);
title(main="ASCN estimates");
stext(side=3, pos=1, sprintf("Number of arrays: %d", dim(Y)[3]));
points(t(H));
xlab <- expression(phi[A]);
ylab <- expression(phi[B]);
plot(NA, xlim=lim, ylim=lim, xlab=xlab, ylab=ylab);
abline(a=0, b=1, lty=3);
title(main="AS probe affinity estimates");
stext(side=3, pos=1, sprintf("Number of affinities: 2*%d=%d", dim(Y)[1L], nrow(W)));
col <- matrix(c("red", "blue"), nrow=nrow(W)/2, ncol=2L, byrow=TRUE);
legend("topright", pch=par("pch"), col=col[1L,], c("PMA", "PMB"), cex=0.8, bg="white");
points(W, col=col);
xlab <- expression(epsilon[A]);
ylab <- expression(epsilon[B]);
plot(NA, xlim=epsLim, ylim=epsLim, xlab=xlab, ylab=ylab);
abline(a=0, b=1, lty=3);
title(main="AS errors");
stext(side=3, pos=1, sprintf("Number of error pairs: %d*%d=%d", dim(E)[3L], dim(E)[1L], dim(E)[3L]*dim(E)[1L]));
d <- apply(E, MARGIN=3L, FUN=points);
title(main=main, outer=TRUE, line=-1)
}) # plot()
############################################################################
# HISTORY:
# 2009-03-25 [HB]
# o Created.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/SnpNmfFit.plot.R |
###########################################################################/**
# @RdocFunction WHInit
#
# @title "Initialization of the W and H matrices"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{V}{An KxI @matrix where I is the number of arrays and
# K is the number of probes where K should be even (K=2L).}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @list:
# \item{W}{A Kx2 @matrix of initial probe-affinity estimates.}
# \item{H}{A 2xI @matrix of initial allele-specific copy-number estimates.}
# \item{status}{An @integer specifying the status of the initialization:
# 0=normal case, 1=only one allele (either AA or BB), or
# 2=all samples are AB.
# }
# }
#
# \details{
# The allele-specific copy number estimates are estimated using a
# naive genotyping algorithm.
# The probe-affinities are estimated using a pseudo inverse.
# }
#
# @keyword internal
#*/###########################################################################
WHInit <- function(V, ...) {
# Number of arrays
I <- ncol(V);
# Number of probes
K <- nrow(V);
# Number of probe pairs
L <- as.integer(K/2);
# A small positive value
eps <- 0.0001;
H <- matrix(0, nrow=2L, ncol=I);
W <- matrix(0, nrow=K, ncol=2L);
rrA <- 1:L;
rrB <- (L+1):K;
rrBA <- c(rrB, rrA);
PMA <- V[rrA,,drop=FALSE];
PMB <- V[rrB,,drop=FALSE];
# We distinguish three locations:
# (1) AA (PMA > 2 PMB),
# (2) AB (PMA < 2PMB & PMB < 2PMA), and
# (3) BB (PMB > 2PMB).
# We apply this test for each of the probes and we use majority voting.
H[1,] <- as.integer(colMeans(PMA > 0.5*PMB) > 0.5);
H[2,] <- as.integer(colMeans(PMB > 0.5*PMA) > 0.5);
summary <- 2*H[1L,] + H[2L,];
dummy <- unique(summary);
status <- 0L;
# If all the samples have the same genotype, it is a special case
if (length(dummy) == 1L) {
# We have only one Genotype
# Case AA or BB
if (prod(H[,1L]) == 0) {
#print('only one allele AA or BB');
# Use the median for the first column of W
W[,1L] <- rowMedians(V)/2;
# Flip it for the second column
W[,2L] <- W[rrBA,1L];
# Change both of them if it was BB
H <- H*2;
if (H[2L,1L] == 1){
# Was it BB?
W <- W[,c(2L,1L),drop=FALSE];
H <- H[c(2L,1L),,drop=FALSE];
}
status <- 1L;
} else {
#disp('only samples AB')
W[,1L] <- rowMedians(V);
W[,2L] <- W[,1L];
# In this case there is no way to compute the cross hybridization
# We assume that there is no cross hybridization (just to asssume
# something :-)
W[rrB,1L] <- eps;
W[rrA,2L] <- eps;
status <- 2L;
}
} else {
# Normal case
aux <- colSums(H);
aux <- rep(aux, times=2L);
dim(aux) <- c((length(aux)/2), 2);
aux <- t(aux);
H <- 2 * H/aux;
H[is.na(H)] <- 0;
W <- t(miqr.solve(t(H),t(V)));
W[W < 0] <- eps;
# Sometimes, there are errors in the genotyping... Check correlation
corDiff <- cor(W[,1L],W[rrBA,2L]) - cor(W[,1L],W[,2L]);
if (is.na(corDiff) || corDiff < 0.1) {
#print('Too large Correlation')
#print('Solving for one allele')
W0 <- W;
W[,1L] <- rowMedians(W0);
W[,2L] <- W0[rrBA,1L];
H <- miqr.solve(W, V);
H[H < 0] <- 0;
status <- 1L;
}
}
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(W) == K && ncol(W) == 2L);
stopifnot(nrow(H) == 2L && ncol(H) == I);
list(W=W, H=H, status=status);
} # WHInit()
############################################################################
# HISTORY:
# 2009-03-24 [HB]
# o Added Rdoc comments.
# o Cleaned up code.
# 2009-02-02 [MO]
# o Change some code and make it more efficient
# 2009-01-28 [HB]
# o BUG FIX: The code of WHInit() assumed 20 probes in one place.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/WHInit.R |
###########################################################################/**
# @RdocDefault doACNE
# @alias doACNE.AffymetrixCelSet
#
# @title "(ACNE)"
#
# \description{
# @get "title" based on [1].
# The algorithm is processed in bounded memory, meaning virtually
# any number of arrays can be analyzed on also very limited computer
# systems.
# }
#
# \usage{
# @usage doACNE,AffymetrixCelSet
# @usage doACNE,default
# }
#
# \arguments{
# \item{csR, dataSet}{An @see "AffymetrixCelSet" (or the name of an
# @see "AffymetrixCelSet").}
# \item{fln}{If @TRUE, CRMAv2-style PCR fragment-length normalization
# is performed, otherwise not.}
# \item{drop}{If @TRUE, the RMA summaries are returned, otherwise
# a named @list of all intermediate and final results.}
# \item{verbose}{See @see "Verbose".}
# \item{...}{Additional arguments used to set up @see "AffymetrixCelSet"
# (when argument \code{dataSet} is specified).}
# }
#
# \value{
# Returns a named @list, iff \code{drop == FALSE}, otherwise
# a named @list of @see "aroma.core::AromaUnitTotalCnBinarySet"
# and @see "aroma.core::AromaUnitFracBCnBinarySet".
# }
#
# \references{
# [1] @include "../incl/OrtizM_etal_2010.Rd" \cr
# }
#
# @author "HB"
#*/###########################################################################
setMethodS3("doACNE", "AffymetrixCelSet", function(csR, fln=FALSE, drop=TRUE, verbose=FALSE, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'csR':
className <- "AffymetrixCelSet";
if (!inherits(csR, className)) {
throw(sprintf("Argument 'csR' is not a %s: %s", className, class(csR)[1]));
}
# Argument 'fln':
fln <- Arguments$getVerbose(fln);
# Argument 'drop':
drop <- Arguments$getLogical(drop);
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
verbose && enter(verbose, "ACNE");
verbose && cat(verbose, "Data set");
verbose && print(verbose, csR);
# List of objects to be returned
res <- list();
if (!drop) {
res <- c(res, list(csR=csR));
}
verbose && enter(verbose, "ACNE/CRMAv2/Allelic crosstalk calibration");
acc <- AllelicCrosstalkCalibration(csR, model="CRMAv2");
verbose && print(verbose, acc);
csC <- process(acc, verbose=verbose);
verbose && print(verbose, csC);
verbose && exit(verbose);
if (!drop) {
res <- c(res, list(acc=acc, csC=csC));
}
# Clean up
csR <- acc <- NULL;
verbose && enter(verbose, "ACNE/CRMAv2/Base position normalization");
bpn <- BasePositionNormalization(csC, target="zero");
verbose && print(verbose, bpn);
csN <- process(bpn, verbose=verbose);
verbose && print(verbose, csN);
verbose && exit(verbose);
if (!drop) {
res <- c(res, list(bpn=bpn, csN=csN));
}
# Clean up
csC <- bpn <- NULL;
verbose && enter(verbose, "ACNE/Probe summarization");
plm <- NmfSnpPlm(csN, mergeStrands=TRUE);
verbose && print(verbose, plm);
if (length(findUnitsTodo(plm)) > 0L) {
# Fit CN probes quickly (~5-10s/array + some overhead)
units <- fitCnProbes(plm, verbose=verbose);
verbose && str(verbose, units);
# Fit remaining units, i.e. SNPs (~5-10min/array)
units <- fit(plm, verbose=verbose);
verbose && str(verbose, units);
units <- NULL;
}
# Clean up
csN <- NULL;
ces <- getChipEffectSet(plm);
verbose && print(verbose, ces);
verbose && exit(verbose);
if (!drop) {
res <- c(res, list(plm=plm));
}
# Clean up
plm <- NULL;
# PCR fragment-length normalization?
if (fln) {
verbose && enter(verbose, "ACNE/CRMAv2/PCR fragment-length normalization");
fln <- FragmentLengthNormalization(ces, target="zero");
verbose && print(verbose, fln);
cesN <- process(fln, verbose=verbose);
verbose && print(verbose, cesN);
verbose && exit(verbose);
if (!drop) {
res <- c(res, list(fln=fln, cesN=cesN));
}
# Clean up
fln <- ces <- NULL;
} else {
cesN <- ces;
if (!drop) {
res <- c(res, list(cesN=cesN));
}
}
verbose && enter(verbose, "ACNE/Export to technology-independent data files");
dsNList <- exportTotalAndFracB(cesN, verbose=verbose);
verbose && print(verbose, dsNList);
verbose && exit(verbose);
# Clean up
cesN <- NULL;
if (!drop) {
res <- c(res, list(dsNList=dsNList));
}
# Return only the final results?
if (drop) {
res <- dsNList;
}
verbose && exit(verbose);
res;
}) # doACNE()
setMethodS3("doACNE", "default", function(dataSet, ..., verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'dataSet':
dataSet <- Arguments$getCharacter(dataSet);
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
verbose && enter(verbose, "ACNE");
verbose && enter(verbose, "ACNE/Setting up CEL set");
csR <- AffymetrixCelSet$byName(dataSet, ..., verbose=less(verbose, 50),
.onUnknownArgs="ignore");
verbose && print(verbose, csR);
verbose && exit(verbose);
res <- doACNE(csR, ..., verbose=verbose);
# Clean up
csR <- NULL;
verbose && exit(verbose);
res;
}) # doACNE()
############################################################################
# HISTORY:
# 2013-10-17
# o CLEANUP: Removed all explicit calls to gc().
# o CLEANUP: Dropped argument 'ram' to fit() of doACNE().
# o Turned doACNE() for character into a default method.
# o Created from doCRMAv1() in aroma.affymetrix.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/doACNE.R |
###########################################################################/**
# @RdocFunction fitSnpNmf
#
# @title "Non-negative matrix factorization (NMF) of a matrix containing SNP probe signals"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{V}{An KxI @matrix where I is the number of arrays and K is the
# number of probe where K should be even (K=2L).}
# \item{acc}{A positive @double specifying the converence threshold. For
# more details on convergence, see below.}
# \item{maxIter}{A positive @integer specifying the maximum number of
# iterations used to calculate the decomposition.}
# \item{maxIterRlm}{A positive @integer specifying the maximum number of
# iterations used in rlm.}
# \item{refs}{An index @vector (@integer or @logical) specifying the
# reference samples. If @NULL, all samples are used as a reference.}
# }
#
# \value{
# Returns a @list:
# \item{W}{The Kx2 @matrix containing allele-specific affinity estimates.}
# \item{H}{A 2xI @matrix containing allele-specific copy number estimates.}
# \item{hasConverged}{@TRUE if the algorithm converged, otherwise @FALSE.
# If not applicable, it is @NA.}
# \item{nbrOfIterations}{The number of iteration ran before stopping.
# If not applicable, it is @NA.}
# }
#
# \details{
# The algorithm is considered to have converged when the maximum update
# of any allele-specific copy number of any array (\code{H}) is greater
# than \code{acc}.
# }
#
# \seealso{
# @see "WHInit", @see "robustWInit", @see "robustHInit", and
# @see "removeOutliers".
# }
#
# @keyword internal
#*/###########################################################################
fitSnpNmf <- function(V, acc=0.02, maxIter=10, maxIterRlm=20, refs=NULL) {
I <- ncol(V);
K <- nrow(V);
# Argument 'refs':
if (is.null(refs)) {
refs <- seq_len(I);
} else if (!is.vector(refs)) {
throw("Argument 'refs' is not a vector: ", class(refs)[1L]);
} else if (is.logical(refs)) {
if (length(refs) != I) {
throw("The number of elements in argument 'refs' does not match the number of column in argument 'V': ", length(refs), " != ", I);
}
refs <- which(refs);
} else if (is.numeric(refs)) {
if (!all(1L <= refs & refs <= I)) {
throw("Some elements in argument 'refs' is out of range [1,", I, "].");
}
refs <- as.integer(refs);
} else {
throw("Argument 'refs' must be either a logical or a numeric vector: ", mode(refs));
}
# A small positive value
eps <- 1e-5;
# Another small positive value
eps2 <- 1e-9;
# Truncate negative values to a small positive value
V[V < eps] <- eps;
# Estimate the initial values of Affinities and Naive Genotyping calls
WHinit <- WHInit(V[,refs,drop=FALSE]);
status <- WHinit$status;
W <- WHinit$W; # Not really used
H <- WHinit$H;
W <- robustWInit(V[,refs,drop=FALSE], H=H);
H <- robustHInit(V, W=W);
V <- removeOutliers(V, W=W, H=H);
# If there is only one allele, no more to do...
# The algorithm (for one allele) is already a robust estimator
if (status == 1L || status == 2L) {
# Shrink average total copy numbers to be close to CN=2.
totalCNs <- colSums(H[,refs,drop=FALSE]);
b <- median(totalCNs)/2; # Scale factor
W <- b*W;
H <- H/b;
hasConverged <- NA;
iter <- NA_integer_;
} else {
onesA <- matrix(1, nrow=1L, ncol=I);
onesB <- matrix(1, nrow=K, ncol=1L);
ones2 <- matrix(1, nrow=K, ncol=I);
iter <- 1L;
hasConverged <- FALSE;
while (!hasConverged && iter < maxIter) {
# Remember H from previous iteration to test for convergence
Hprev <- H;
# Compute new W solving the system of equations
H[H < eps] <- eps;
W <- t(miqr.solve(t(H), t(V)));
W[W < eps] <- eps;
# Compute the H
H <- miqr.solve(W, V);
H[H < eps] <- eps;
# Normalizing the W
norms <- colSums(W);
norms <- norms + eps2; # Add a small positive value
W <- W %*% diag(1/norms);
H <- diag(norms) %*% H;
# Shrink average total copy numbers to be close to CN=2.
totalCNs <- colSums(H[,refs,drop=FALSE]);
b <- median(totalCNs)/2; # Scale factor
W <- b*W;
H <- H/b;
# Converged?
hasConverged <- (max(abs(Hprev - H)) < acc);
# Next iteration
iter <- iter + 1L;
} # while(...)
# Robust method for shrinking the average total copy number
# to close to CN=2.
Dmat <- rlm(t(H[,refs,drop=FALSE]), matrix(data=2, nrow=ncol(H[,refs,drop=FALSE]), ncol=1L), maxit=maxIterRlm);
coefs <- Dmat$coefficients;
H <- diag(coefs) %*% H;
W <- W %*% diag(1/coefs);
# Truncate non-positive estimate
H[H < eps] <- eps;
W[W < eps] <- eps;
} # if (status ...)
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(W) == K && ncol(W) == 2L);
stopifnot(nrow(H) == 2L && ncol(H) == I);
list(W=W, H=H, hasConverged=hasConverged, nbrOfIterations=iter);
} # fitSnpNmf()
############################################################################
# HISTORY:
# 2010-09-28 [HB]
# o Now argument 'refs' defaults to NULL (not 0), which means all samples.
# o Clean up and robustification of recent edits.
# 2010-06-04 [MO]
# o Added refs as argument.
# 2010-05-18 [MO]
# o Added maxIterRlm as argument.
# 2009-11-18 [HB]
# o Removed internal save() in fitSnpNmf().
# 2009-03-24 [HB]
# o Renamed from Nmf() to fitSnpNmf(). The former name was to generic
# while our algorithm is rather specific to SNP data.
# o Added optional arguments and internal "constants".
# o Added Rdoc comments.
# o Cleanup.
# 2009-02-15 [MO]
# o Robust method to get the H close to copy number equal to 2.
# 2009-02-05 [MO]
# o Clean the code
# 2009-02-04 [MO]
# o Comment of the lines which try to get the columns of W to be similar
# 2009-01-30 [MO]
# o Robust estimation of W
# o Robust estimation of H
# o With the robust estimations no need to differenciate between status
# o W and H using systems of equations
# o Normalization of the columns of W in each iteration
# o Normalization of the columns of H close to two
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/fitSnpNmf.R |
###########################################################################/**
# @RdocFunction fitSnpNmfArray
#
# @title "Allele-specific copy number estimation using non-negative matrix factorization (NMF)"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{Y}{An Lx2xI @array where L is number of probe pairs,
# 2 is the number of alleles (A and B),
# and I is the number of arrays.}
# \item{...}{Additional arguments passed to @see "fitSnpNmf".}
# }
#
# \value{
# Returns a @list of class \code{SnpNmfFit}:
# \item{Y}{The Lx2xI @array \code{Y}.}
# \item{W}{The Kx2 @matrix containing allele-specific affinity estimates
# where K=2L.}
# \item{H}{A 2xI @matrix containing allele-specific copy number estimates.}
# \item{hasConverged}{@TRUE if the algorithm converged, otherwise @FALSE.
# If not applicable, it is @NA.}
# \item{nbrOfIterations}{The number of iteration ran before stopping.
# If not applicable, it is @NA.}
# }
#
# \details{
# The algorithm is considered to have converged when the maximum update
# of any allele-specific copy number of any array (\code{H}) is greater
# than \code{acc}.
# }
#
# @examples "../incl/fitSnpNmfArray.Rex"
#
# \seealso{
# Internally, the array is stacked into a 2LxI matrix and decomposed
# using @see "fitSnpNmf".
# See @see "plot.SnpNmfFit".
# }
#
# @keyword internal
#*/###########################################################################
fitSnpNmfArray <- function(Y, ...) {
# Argument 'Y':
dim <- dim(Y);
if (length(dim) != 3L) {
dimStr <- paste(dim, collapse="x");
stop("Argument 'Y' is not a three-dimensional array: ", dimStr);
}
if (dim[2] != 2L) {
stop("Second dimension of argument 'Y' is not of length 2: ", dim[2L]);
}
# Transform to a "stacked" matrix
V <- Y;
dim(V) <- c(dim[1L]*dim[2L], dim[3L]);
fit <- fitSnpNmf(V, ...);
fit$V <- V
fit$Y <- Y
W2 <- fit$W;
dim(W2) <- c(dim[1:2], dim(W2)[2L]);
fit$W2 <- W2;
fit$args <- list(...);
class(fit) <- c(class(fit), "SnpNmfFit");
fit;
} # fitSnpNmfArray()
############################################################################
# HISTORY:
# 2009-03-25 [HB]
# o Added fitSnpNmfArray() accepting a Lx2xI array instead of a 2LxI matrix.
# o Created.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/fitSnpNmfArray.R |
miqr.solve <- function(a, b) {
x <- tryCatch({
qr.solve(a, b);
}, error = function(e) {
e;
}, finally = "");
if (!is.matrix(x)) {
x <- ginv(a) %*% b;
}
x;
} # miqr.solve()
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/miqr.solve.R |
plotFreqB <- function(pos, freqB, pch=176, ylim=c(0,1), xlab="Position (Mb)", ylab=expression(beta == theta[B]/(theta[A]+theta[B]))) {
xlim <- range(pos, na.rm=TRUE);
scale <- 0.04*diff(xlim);
xlim[1] <- xlim[1] - 3*scale;
xlim[2] <- xlim[2] + 1*scale;
x0 <- xlim[1];
# Plot raw data
plot(pos, freqB, pch=pch, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab);
# Plot density
d <- density(na.omit(freqB), from=0, to=1);
d$y <- d$y / max(d$y, na.rm=TRUE);
x <- d$x;
d$x <- x0 + 2.5*scale*d$y;
d$y <- x;
lines(d, lwd=2, col="blue");
# Annotate
# abline(h=c(1/3,2/3), lty=2, lwd=2, col="blue");
adj <- c(-0.1, 0.5);
y <- 1/20;
for (ss in c("AA", "AB", "BB")) {
text(x=x0, y=y, adj=adj, ss, cex=1.3, col="blue");
y <- y + 9/20;
}
} # plotFreqB()
############################################################################
# HISTORY:
# 2009-01-27 [HB]
# o Created.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/plotFreqB.R |
###########################################################################/**
# @RdocFunction pseudoinverse
#
# @title "Calculates the pseudo inverse of a matrix"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{X}{A @numeric @matrix.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @matrix.
# }
#
# \seealso{
# Internally @see "base::svd" is used.
# }
#
# @keyword "internal"
#*/###########################################################################
pseudoinverse <- function(X, ...) {
svd <- svd(X)
d <- svd$d
if(length(d) == 0L) {
array(0, dim=dim(X)[2:1])
} else {
svd$v %*% (1/d * t(svd$u))
}
} # pseudoinverse()
############################################################################
# HISTORY:
# 2009-03-24 [HB]
# o Added Rdoc comments.
# o Tidied up code. Minor minor speed up.
# 2008-xx-xx
# o Created.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/pseudoinverse.R |
###########################################################################/**
# @RdocFunction removeOutliers
#
# @title "Removes outliers in matrix containing SNP signals"
#
# \description{
# @get "title" by identifying outlier elements. The values of the
# elements that are outliers are substituted by corresponding values
# predicted values \code{Yest=W*H} from the current affinity (\code{W})
# and copy number (\code{H}) estimates.
# }
#
# @synopsis
#
# \arguments{
# \item{Y}{An IxK @matrix.}
# \item{W}{A Kx2 @matrix of probe-affinity estimates.}
# \item{H}{A 2xI @matrix of allele-specific copy-number estimates.}
# \item{tau}{A scalar specifying the threshold for identifying outliers.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns an IxK @matrix where outliers have been "pruned".
# Outliers are substituted by the corresponding value of \code{Yest}.
# }
#
# @keyword internal
#*/###########################################################################
removeOutliers <- function(Y, W, H, tau=10, ...) {
# Number of arrays
I <- ncol(Y);
# Number of probes
K <- nrow(Y);
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(W) == K && ncol(W) == 2L);
stopifnot(nrow(H) == 2L && ncol(H) == I);
# Output matrix
Yprime <- Y;
# Calculating residuals (E) of model Y = W*H + E.
Yest <- W %*% H;
E <- Y - Yest;
# Identify outliers
rowMad <- rowMads(E);
outliers <- which(abs(E) > tau*rowMad);
# Replacing outliers
if (length(outliers) > 0L) {
Yprime[outliers] <- Yest[outliers];
}
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(Yprime) == K && ncol(Yprime) == I);
Yprime;
} # removeOutliers()
############################################################################
# HISTORY:
# 2009-03-24 [HB]
# o Renamed from RemoveOutliers() to removeOutliers().
# o Added Rdoc comments.
# o Cleaning up code. Minor speed up.
# 2009-02-05 [MO]
# o Fix the value we assign to the outlier
# 2009-02-02 [MO]
# o More efficient code and different name for the indexes (i -> ii)
# 2009-01-30 [MO]
# o Change all the file. Faster and more appropiate for our case
# o Now we use the estimation of the W and the H
# 2009-01-28 [HB]
# o Added an explicit return value.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/removeOutliers.R |
###########################################################################/**
# @RdocFunction robustHInit
#
# @title "Robust initialization of the H (copy number) matrix"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{V}{An KxI @matrix where I is the number of arrays and K is the
# number of probes where K should be even (K=2L).}
# \item{W}{A Kx2 @matrix of probe-affinity estimates.}
# \item{maxIter}{The maximum number of iteration.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a 2xI @matrix of robustified allele-specific copy-number estimates.
# }
#
# \details{
# This function utilized a random number generator.
# }
#
# @keyword internal
#*/###########################################################################
robustHInit <- function(V, W, maxIter=5L, ...) {
# Number of arrays
I <- ncol(V);
# Number of probes
K <- nrow(V);
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(W) == K && ncol(W) == 2L);
# V = W * H
#[Vi1... Vin; Vj1... Vjn] = W * H
# 2 random probes a especific number of iterations
H <- matrix(0, nrow=2L, ncol=I);
Haux <- matrix(0, nrow=maxIter*K, ncol=I);
contHaux <- 1L;
for (ii in 1:maxIter) {
probes <- sample(K);
oddIdxs <- seq(from=1L, to=length(probes/2), by=2L);
for (jj in oddIdxs) {
pp <- c(probes[jj], probes[jj+1]);
Haux[c(contHaux,contHaux+1L),] <- miqr.solve(W[pp,],V[pp,]);
contHaux <- contHaux + 2L;
} # for (jj ...)
} # for (ii ...)
# Truncate non-positive values
Haux[Haux < 0] <- 0;
oddIdxs <- seq(from=1L, to=maxIter*K, by=2L);
evenIdxs <- seq(from=2L, to=maxIter*K, by=2L);
H[1L,] <- colMedians(Haux, rows = oddIdxs);
H[2L,] <- colMedians(Haux, rows = evenIdxs);
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(H) == 2L && ncol(H) == I);
H;
} # robustHInit()
############################################################################
# HISTORY:
# 2009-03-24 [HB]
# o Renamed from RobustHinit() to robustHinit().
# o Cleaned up code.
# o Added Rdoc comments.
# 2009-02-02 [MO]
# o Change some code to make more efficient and change the name of the
# indexes.
# 2009-01-30 [MO]
# o Created
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/robustHInit.R |
###########################################################################/**
# @RdocFunction robustWInit
#
# @title "Robust initialization of the W (affinity) matrix"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{V}{An KxI @matrix where I is the number of arrays and K is the
# number of probes where K should be even (K=2L).}
# \item{H}{A 2xI @matrix of allele-specific copy-number estimates.}
# \item{maxIter}{The maximum number of iterations.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a Kx2 @matrix of robustified probe-affinity estimates.
# }
#
# \details{
# This function utilized a random number generator.
# }
#
# @keyword internal
#*/###########################################################################
robustWInit <- function(V, H, maxIter=50L, ...) {
# Number of arrays
I <- ncol(V);
# Number of probes
K <- nrow(V);
# Number of probe pairs
L <- as.integer(K/2);
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(H) == 2L && ncol(H) == I);
# A small positive value
eps <- 1e-5;
Ws <- matrix(0, nrow=K, ncol=2*maxIter);
W <- matrix(0, nrow=K, ncol=2L);
# Create genotyping group of samples
AA <- which(2*H[2L,] < H[1L,]);
BB <- which(H[2L,] > 2*H[1L,]);
AB <- which(H[2L,] < 2*H[1L,] & 2*H[2L,] > H[1L,]);
nAA <- length(AA);
nAB <- length(AB);
nBB <- length(BB);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 1:
# In case most of the samples belong to only one group we twist some
# of them so we "have" signal from both alleles.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
percSamples <- floor(0.85*I);
if (nAA > percSamples || nAB > percSamples || nBB > percSamples) {
rrA <- 1:L;
rrB <- (L+1):K;
idxs <- sample(I, size=floor(I/2));
# Majority are heterozygotes?
if (nAB > percSamples) {
V[rrA,idxs] <- min(V);
V[rrB,idxs] <- V[rrB,idxs] * 2;
H[1L,idxs] <- 0;
H[2L,idxs] <- 2;
} else {
aux <- V[rrA,idxs];
V[rrA,idxs] <- V[rrB,idxs];
V[rrB,idxs] <- aux;
aux <- H[1L,idxs];
H[1L,idxs] <- H[2L,idxs];
H[2L,idxs] <- aux;
}
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 2:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Assign arrays into genotype groups (AA, AB, BB).
AA <- which(2*H[2L,] < H[1L,]);
BB <- which(H[2L,] > 2*H[1L,]);
AB <- which(H[2L,] < 2*H[1L,] & 2*H[2L,] > H[1L,]);
nAA <- length(AA);
nAB <- length(AB);
nBB <- length(BB);
hasAA <- (nAA > 0L);
hasAB <- (nAB > 0L);
hasBB <- (nBB > 0L);
cont <- 1L;
for (ii in 1:maxIter) {
# Select two random samples with different genotypes
groups <- sample(1:3, size=2L, replace=FALSE);
sampleAA <- 0L;
sampleBB <- 0L;
sampleAB <- 0L;
# Pick a random AA sample?
# FIXME: What if hasAA (nAA == 0) is FALSE?!? /HB 2014-04-27
if(!hasBB || !hasAB || (hasAA && (groups[1L] == 1L || groups[2L] == 1L))) {
idx <- sample(nAA, size=1L); # May return 0L? /HB 2014-04-27
sampleAA <- AA[idx]; # ...which then becomes integer(0)
}
# Pick a random BB sample?
# FIXME: What if hasBB (nBB == 0) is FALSE?!? /HB 2014-04-27
if(!hasAA || !hasAB || (hasBB && (groups[1L] == 2L || groups[2L] == 2L))) {
idx <- sample(nBB, size=1L); # May return 0L? /HB 2014-04-27
sampleBB <- BB[idx]; # ...which then becomes integer(0)
}
# Pick a random AB sample?
# FIXME: What if hasAB (nAB == 0) is FALSE?!? /HB 2014-04-27
if(!hasAA || !hasBB || (hasAB && (groups[1L] == 3L || groups[2L] == 3L))) {
idx <- sample(nAB, size=1L); # May return 0L? /HB 2014-04-27
sampleAB <- AB[idx]; # ...which then becomes integer(0)
}
# ...and here we get integer(0)*something => integer(0).
# Comparing (integer(0) > 0L) gives logical(0), which in turn
# gives an error in the if statements, e.g. if (logical(0)) {}
# => Error in if (logical(0)) { : argument is of length zero.
# FIXME: So, the above selection of two random samples is not
# fully correct/safe. /HB 2014-04-27
if (sampleAA*sampleBB > 0L) {
cc <- c(sampleAA, sampleBB);
} else if (sampleAB*sampleBB > 0L) {
cc <- c(sampleAB, sampleBB);
} else {
cc <- c(sampleAA, sampleAB);
}
dd <- c(cont, cont+1L);
Ws[,dd] <- t(miqr.solve(t(H[,cc]),t(V[,cc])));
cont <- cont + 2L;
} # for (ii ...)
oddIdxs <- seq(from=1L, to=2L*maxIter, by=2L);
evenIdxs <- seq(from=2L, to=2L*maxIter, by=2L);
mediansWA <- rowMedians(Ws, cols = oddIdxs);
mediansWB <- rowMedians(Ws, cols = evenIdxs);
# Truncate non-positive values
mediansWA[mediansWA < 0] <- eps;
mediansWB[mediansWB < 0] <- eps;
W[,1L] <- mediansWA;
W[,2L] <- mediansWB;
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(W) == K && ncol(W) == 2L);
W;
} # robustWInit()
############################################################################
# HISTORY:
# 2014-04-27 [HB]
# o Added comments to highlight potential problems with how the two random
# samples are choosen in the iteration of the 2nd step. See also
# issue report in thread 'CRMA v2 error with Mouse Diversity' on
# 2014-03-25 to the aroma.affymetrix mailing list. It reports on 'Error
# in if (sampleAA * sampleBB > 0L) { : argument is of length zero' with
# "Calls: fit ... fit.ProbeLevelModel -> <Anonymous> -> FUN -> nmfFcn ->
# robustWInit".
# o CLEANUP: Restructured the iteration of the 2nd step.
# 2009-02-24 [HB]
# o Added Rdoc comments.
# o Cleanig up and standarizing code.
# 2009-02-02 [MO]
# o Change some code to make it more efficient.
# 2009-01-30 [MO]
# o Created
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/robustWInit.R |
###########################################################################/**
# @RdocFunction snpArrayToMatrix
# @alias snpMatrixToArray
#
# @title "Reshapes SNP data in matrix form to array form and vice versa"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{Y}{A 2LxI @matrix or a Lx2xI @array,
# where L is the number of probe pairs and I is the number of arrays.}
# \item{dropNames}{If @TRUE, dimension names are dropped,
# otherwise preserved.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a Lx2xI @array or a 2LxI matrix.
# }
#
# @examples "../incl/snpArrayToMatrix.Rex"
#
# @keyword internal
#*/###########################################################################
snpArrayToMatrix <- function(Y, dropNames=TRUE, ...) {
# Argument 'Y':
if (!is.array(Y)) {
stop("Argument 'Y' is not an array: ", class(Y)[1L]);
}
dim <- dim(Y);
if (length(dim) != 3L) {
dimStr <- paste(dim, collapse="x");
stop("Argument 'Y' is not a three-dimensional array: ", dimStr);
}
if (dim[2] != 2L) {
stop("Second dimension of argument 'Y' is not of length 2: ", dim[2L]);
}
# Transform to a "stacked" matrix
dim <- c(dim[1L]*dim[2L], dim[3L]);
if (dropNames) {
dim(Y) <- dim;
} else {
dimnames <- list(NULL, dimnames(Y)[[3L]]);
dim(Y) <- dim;
dimnames(Y) <- dimnames;
}
Y;
} # snpArrayToMatrix()
snpMatrixToArray <- function(Y, dropNames=TRUE, ...) {
# Argument 'Y':
if (!is.matrix(Y)) {
stop("Argument 'Y' is not a matrix: ", class(Y)[1]);
}
dim <- dim(Y);
if (dim[1L] %% 2 != 0L) {
stop("The length of the first dimension of argument 'Y' is not even: ", dim[1]);
}
# Unstack to an array
dim <- c(dim[1L]/2, 2L, dim[2L]);
if (dropNames) {
dim(Y) <- dim;
} else {
dimnames <- list(NULL, c("A", "B"), dimnames(Y)[[2L]]);
dim(Y) <- dim;
dimnames(Y) <- dimnames;
}
Y;
} # snpMatrixToArray()
############################################################################
# HISTORY:
# 2009-03-25 [HB]
# o Created.
############################################################################
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/snpArrayToMatrix.R |
## covr: skip=all
.onLoad <- function(libname, pkgname) {
ns <- getNamespace(pkgname)
pkg <- Package(pkgname)
assign(pkgname, pkg, envir=ns)
}
.onAttach <- function(libname, pkgname) {
pkg <- get(pkgname, envir=getNamespace(pkgname))
startupMessage(pkg)
}
| /scratch/gouwar.j/cran-all/cranData/ACNE/R/zzz.R |
##########################################################################
# Data set:
# GSE8605/
# Mapping10K_Xba142/
# GSM226867.CEL, ..., GSM226876.CEL [10 files]
# URL: http://www.ncbi.nlm.nih.gov/projects/geo/query/acc.cgi?acc=GSE8605
##########################################################################
library("aroma.affymetrix");
library("ACNE");
log <- Arguments$getVerbose(-8, timestamp=TRUE);
cdf <- AffymetrixCdfFile$byChipType("Mapping10K_Xba142");
print(cdf);
gi <- getGenomeInformation(cdf);
print(gi);
si <- getSnpInformation(cdf);
print(si);
csR <- AffymetrixCelSet$byName("GSE8605", cdf=cdf);
print(csR);
acc <- AllelicCrosstalkCalibration(csR, model="CRMAv2");
print(acc);
csC <- process(acc, verbose=log);
print(csC);
bpn <- BasePositionNormalization(csC, target="zero");
print(bpn);
csN <- process(bpn, verbose=log);
print(csN);
plm <- NmfSnpPlm(csN, mergeStrands=TRUE);
print(plm);
fit(plm, ram=0.05, verbose=log);
ces <- getChipEffectSet(plm);
fln <- FragmentLengthNormalization(ces, target="zero");
print(fln);
cesN <- process(fln, verbose=log);
print(cesN);
ceR <- getAverage(cesN, verbose=log);
ce <- getFile(cesN, 1);
fig <- sprintf("%s", getFullName(ce));
if (!devIsOpen(fig)) {
devSet(fig);
subplots(3*2, ncol=2);
par(mar=c(3,4,2,1)+0.1, pch=".");
for (chr in getChromosomes(gi)[1:3]) {
units <- getUnitsOnChromosome(gi, chr);
pos <- getPositions(gi, units=units) / 1e6;
thetaR <- extractTotalAndFreqB(ceR, units=units)[,"total"];
data <- extractTotalAndFreqB(ce, units=units);
data[,"total"] <- 2*data[,"total"] / thetaR;
cn <- RawCopyNumbers(data[,"total"], pos, chromosome=chr);
plot(cn, col="gray", cex=0.8, ylim=c(0,4));
cnS <- gaussianSmoothing(cn, xOut=seq(xMin(cn), xMax(cn), by=1/2), sd=1);
points(cnS, col="black");
stext(side=3, pos=0, getName(ce));
stext(side=3, pos=1, sprintf("Chr%d", chr));
plot(pos, data[,"freqB"], cex=3, ylim=c(0,1));
box(col="blue");
stext(side=3, pos=0, getTags(cesN, collapse=","));
} # for (chr ...)
devDone();
}
| /scratch/gouwar.j/cran-all/cranData/ACNE/inst/testScripts/system/chipTypes/Mapping10K_Xba142/test20090128,10K,NMF.R |
##########################################################################
# Data set:
# GSE8605/
# Mapping10K_Xba142/
# GSM226867.CEL, ..., GSM226876.CEL [10 files]
# URL: http://www.ncbi.nlm.nih.gov/projects/geo/query/acc.cgi?acc=GSE8605
##########################################################################
library("ACNE");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
dataSet <- "GSE8605";
chipType <- "Mapping10K_Xba142";
res <- doACNE(dataSet, chipType=chipType, verbose=verbose);
print(res);
ds <- res$total;
dfR <- getAverageFile(ds, verbose=verbose);
df <- getFile(ds, 1);
baf <- getFile(res$fracB, 1);
ugp <- getAromaUgpFile(ds);
fig <- sprintf("%s", getFullName(df));
if (!devIsOpen(fig)) {
devSet(fig, width=10, height=5);
subplots(2*3, nrow=2, byrow=FALSE);
par(mar=c(3,4,2,1)+0.1, pch=".");
for (chr in 1:3) {
units <- getUnitsOnChromosome(ugp, chr);
pos <- getPositions(ugp, units=units);
beta <- extractMatrix(baf, units=units, drop=TRUE);
fracB <- RawAlleleBFractions(beta, pos, chromosome=chr);
theta <- extractMatrix(df, units=units, drop=TRUE);
thetaR <- extractMatrix(dfR, units=units, drop=TRUE);
C <- 2 * theta/thetaR;
cn <- RawCopyNumbers(C, pos, chromosome=chr);
plot(cn, col="gray", cex=0.8, ylim=c(0,4));
xOut <- seq(xMin(cn), xMax(cn), by=0.5e6);
cnS <- gaussianSmoothing(cn, xOut=xOut, sd=1e6);
points(cnS, col="black");
stext(side=3, pos=0, getName(df));
stext(side=3, pos=1, sprintf("Chr%d", chr));
plot(fracB, ylim=c(0,1));
box(col="blue");
stext(side=3, pos=0, getTags(ds, collapse=","));
stext(side=3, pos=1, sprintf("Chr%d", chr));
} # for (chr ...)
devDone();
}
| /scratch/gouwar.j/cran-all/cranData/ACNE/inst/testScripts/system/chipTypes/Mapping10K_Xba142/test20100517,10K,doACNE.R |