code
stringlengths 1
13.8M
|
---|
vpc_cens <- function(sim = NULL,
obs = NULL,
psn_folder = NULL,
bins = "jenks",
n_bins = 8,
bin_mid = "mean",
obs_cols = NULL,
sim_cols = NULL,
software = "auto",
show = NULL,
stratify = NULL,
stratify_color = NULL,
ci = c(0.05, 0.95),
uloq = NULL,
lloq = NULL,
plot = FALSE,
xlab = "Time",
ylab = "Probability of <LOQ",
title = NULL,
smooth = TRUE,
vpc_theme = NULL,
facet = "wrap",
labeller = NULL,
vpcdb = FALSE,
verbose = FALSE) {
if(is.null(uloq) & is.null(lloq)) {
stop("You have to specify either a lower limit of quantification (lloq=...) or an upper limit (uloq=...).")
}
if(!is.null(uloq) & !is.null(lloq)) {
stop("You have to specify either a lower limit of quantification (lloq=...) or an upper limit (uloq=...), but you can't specify both.")
}
if(is.null(lloq)) {
type <- "right-censored"
}
if(is.null(uloq)) {
type <- "left-censored"
}
if(is.null(obs) & is.null(sim)) {
stop("At least a simulation or an observation dataset are required to create a plot!")
}
if(!is.null(psn_folder)) {
if(!is.null(obs)) {
obs <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="original.npctab")[1]))
}
if(!is.null(sim)) {
sim <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="simulation.1.npctab")[1]))
}
software = "nonmem"
}
if (!is.null(obs)) {
software_type <- guess_software(software, obs)
} else {
software_type <- guess_software(software, sim)
}
if(!is.null(stratify)) {
if(!is.null(obs)) {
check_stratification_columns_available(obs, stratify, "observation")
}
if(!is.null(sim)) {
check_stratification_columns_available(sim, stratify, "simulation")
}
}
if(!is.null(stratify_color)) {
if(!is.null(obs)) {
check_stratification_columns_available(obs, stratify_color, "observation")
}
if(!is.null(obs)) {
check_stratification_columns_available(sim, stratify_color, "simulation")
}
}
show <- replace_list_elements(show_default, show)
cols <- define_data_columns(sim, obs, sim_cols, obs_cols, software_type)
if(!is.null(obs)) {
old_class <- class(obs)
class(obs) <- c(software_type, old_class)
}
if(!is.null(sim)) {
old_class <- class(sim)
class(sim) <- c(software_type, old_class)
}
if(!is.null(obs)) {
obs <- filter_dv(obs, verbose)
obs <- format_vpc_input_data(obs, cols$obs, lloq, uloq, stratify, bins, FALSE, 0, "observed", verbose)
}
if(!is.null(sim)) {
sim <- filter_dv(sim, verbose)
sim <- format_vpc_input_data(sim, cols$sim, NULL, NULL, stratify, bins, FALSE, 0, "simulated", verbose)
sim$sim <- add_sim_index_number(sim)
}
stratify_original <- stratify
if(!is.null(stratify_color)) {
if (is.null(stratify)) {
stratify <- stratify_color
}
if (length(stratify_color) > 1) {
stop("Error: please specify only 1 stratification variable for color!")
}
if (!stratify_color %in% stratify) {
stratify_original <- stratify
stratify <- c(stratify, stratify_color)
}
}
if (class(bins) != "numeric") {
if(!is.null(obs)) {
bins <- auto_bin(obs, type = bins, n_bins = n_bins)
} else {
bins <- auto_bin(sim, type = bins, n_bins = n_bins)
}
if (is.null(bins)) {
msg("Automatic binning unsuccessful, try increasing the number of bins, or specify vector of bin separators manually.", verbose)
}
}
bins <- unique(bins)
if(!is.null(obs)) {
obs <- bin_data(obs, bins, "idv")
}
if(!is.null(sim)) {
sim <- bin_data(sim, bins, "idv")
}
if(!is.null(lloq)) {
cens <- "left"
limit <- lloq
} else {
cens <- "right"
limit <- uloq
}
if (!is.null(sim)) {
tmp1 <- sim %>%
dplyr::group_by(strat, sim, bin)
vpc_dat <- tmp1 %>%
dplyr::summarise(ploq = loq_perc(dv, limit = limit, cens = cens),
mn_idv = mean(idv)) %>%
dplyr::group_by(strat, bin) %>%
dplyr::summarise(q50.low = quantile(ploq, ci[1]),
q50.med = quantile(ploq, 0.5),
q50.up = quantile(ploq, ci[2]),
bin_mid = mean(mn_idv)) %>%
dplyr::ungroup()
vpc_dat$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
vpc_dat$bin_max <- rep(bins[2:length(bins)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
if(bin_mid == "middle") {
vpc_dat$bin_mid <- apply(dplyr::bind_cols(vpc_dat$bin_min, vpc_dat$bin_max), 1, mean)
}
} else {
vpc_dat <- NULL
}
if(!is.null(obs)) {
tmp <- obs %>%
dplyr::group_by(strat,bin)
aggr_obs <- tmp %>%
dplyr::summarise(obs50 = loq_perc(dv, limit = lloq, cens = cens),
bin_mid = mean(idv)) %>%
dplyr::ungroup()
aggr_obs$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(aggr_obs$strat)) )[aggr_obs$bin]
aggr_obs$bin_max <- rep(bins[2:length(bins)], length(unique(aggr_obs$strat)) )[aggr_obs$bin]
if(bin_mid == "middle") {
aggr_obs$bin_mid <- apply(dplyr::bind_cols(aggr_obs$bin_min, aggr_obs$bin_max), 1, mean)
}
} else {
aggr_obs <- NULL
}
if (!is.null(stratify_original)) {
if (length(stratify) == 2) {
vpc_dat$strat1 <- unlist(strsplit(as.character(vpc_dat$strat), ", "))[(1:length(vpc_dat$strat)*2)-1]
vpc_dat$strat2 <- unlist(strsplit(as.character(vpc_dat$strat), ", "))[(1:length(vpc_dat$strat)*2)]
aggr_obs$strat1 <- unlist(strsplit(as.character(aggr_obs$strat), ", "))[(1:length(aggr_obs$strat)*2)-1]
aggr_obs$strat2 <- unlist(strsplit(as.character(aggr_obs$strat), ", "))[(1:length(aggr_obs$strat)*2)]
}
}
show$obs_dv = FALSE
show$obs_ci = FALSE
show$obs_median = TRUE
show$sim_median = FALSE
show$sim_median_ci = TRUE
show$pi_as_area = FALSE
show$pi_ci = FALSE
show$pi = FALSE
vpc_db <- list(sim = sim,
vpc_dat = vpc_dat,
stratify = stratify,
stratify_original = stratify_original,
stratify_color = stratify_color,
aggr_obs = aggr_obs,
obs = obs,
bins = bins,
facet = facet,
labeller = labeller,
type = "censored",
xlab = xlab,
ylab = ylab)
if(vpcdb) {
return(vpc_db)
} else {
pl <- plot_vpc(db = vpc_db,
show = show,
vpc_theme = vpc_theme,
smooth = smooth,
log_y = FALSE,
title = title)
return(pl)
}
} |
PV_pre_triang_dis=function(data,years=10){
app=rep(NA,years)
for(i in 1:years) app[i]=triangular_moments_dis_U(data,i)
PV=1+sum(app[1:years-1])
return(PV)
} |
do.dne <- function(X, label, ndim=2, numk=max(ceiling(nrow(X)/10),2),
preprocess=c("center","scale","cscale","decorrelate","whiten")){
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
ndim = as.integer(ndim)
if (!check_ndim(ndim,p)){stop("* do.dne : 'ndim' is a positive integer in [1,
numk = as.integer(numk)
if (!check_NumMM(numk,1,n/2,compact=FALSE)){stop("* do.dne : 'numk' should be an integer in [2,nrow(X)/2).")}
if (missing(preprocess)){ algpreprocess = "center" }
else { algpreprocess = match.arg(preprocess) }
label = check_label(label, n)
ulabel = unique(label)
if (any(is.na(label))||(any(is.infinite(label)))){stop("* Supervised Learning : any element of 'label' as NA or Inf will simply be considered as a class, not missing entries.") }
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
nbdtype = c("knn",numk)
nbdsymmetric = "union"
nbdstruct = aux.graphnbd(pX,method="euclidean",
type=nbdtype,symmetric=nbdsymmetric)
nbdmask = nbdstruct$mask
matF = array(0,c(n,n))
for (i in 1:(n-1)){
for (j in (i+1):n){
if (nbdmask[i,j]==TRUE){
if (label[i]==label[j]){
matF[i,j] = 1.0
matF[j,i] = 1.0
} else {
matF[i,j] = -1.0
matF[j,i] = -1.0
}
}
}
}
matS = diag(rowSums(matF))-matF
costobj = t(pX)%*%(matS-matF)%*%pX
projection = aux.adjprojection(RSpectra::eigs(costobj, ndim, which="SR")$vectors)
result = list()
result$Y = pX%*%projection
result$trfinfo = trfinfo
result$projection = projection
return(result)
} |
structure <- function (.Data, ...)
{
if(is.null(.Data))
warning("Calling 'structure(NULL, *)' is deprecated, as NULL cannot have attributes.\n Consider 'structure(list(), *)' instead.")
attrib <- list(...)
if(length(attrib)) {
specials <- c(".Dim", ".Dimnames", ".Names", ".Tsp", ".Label")
attrnames <- names(attrib)
m <- match(attrnames, specials)
ok <- !is.na(m)
if(any(ok)) {
replace <- c("dim", "dimnames", "names", "tsp", "levels")
names(attrib)[ok] <- replace[m[ok]]
}
if(any(attrib[["class", exact = TRUE]] == "factor")
&& typeof(.Data) == "double")
storage.mode(.Data) <- "integer"
attributes(.Data) <- c(attributes(.Data), attrib)
}
.Data
} |
library(bmp)
posey <- c(30, 167, 332, 457, 822, 1016, 1199,
1437, 1621, 1770, 1924, 2101, 2251, 2442, 2594,
2757, 2918, 3072, 3205, 3356, 3526, 3685,
4068, 4217)
susac <- c(751, 1286, 1485, 1666, 2030,
2187)
v <- list.files("buster_posey_catching/")
v <- v[order(as.numeric(unlist(lapply(strsplit(v, split="i"), "[[", 1))))]
setwd("buster_posey_catching/")
full <- list()
for(i in 1:length(v))
{
full[[i]] <- read.bmp(v[i])
}
setwd("../")
posz <- vector()
for(i in 2:length(full))
{
posz[i] <- sum(abs(full[[i]][1:250,1:250,2:4]-full[[i-1]][1:250,1:250,2:4])>1)
print(i)
}
posm <- list()
for(i in 1:length(posey))
{
posm[[i]] <- posz[posey[i]:(posey[i]+100)]
}
v <- list.files("andrew_susac_catching/")
v <- v[order(as.numeric(unlist(lapply(strsplit(v, split="i"), "[[", 1))))]
setwd("andrew_susac_catching/")
full <- list()
for(i in 1:length(v))
{
full[[i]] <- read.bmp(v[i])
}
setwd("../")
susz <- vector()
for(i in 2:length(full))
{
susz[i] <- sum(abs(full[[i]][1:250,1:250,2:4]-full[[i-1]][1:250,1:250,2:4])>1)
print(i)
}
susm <- list()
for(i in 1:length(susac))
{
susm[[i]] <- susz[susac[i]:(susac[i]+100)]
}
posv <- vector()
susv <- vector()
for(i in 1:100)
{
posv[i] <- mean(unlist(lapply(posm, "[[", i)))
}
for(i in 1:100)
{
susv[i] <- mean(unlist(lapply(susm, "[[", i)))
}
plot(posv[1:20], type="l", col="red", lwd=3,
ylim=c(80000, 120000), xlab="Time", xaxt="no",
ylab="Total Pixel Movement")
lines(susv[1:20], type="l", col="blue", lwd=3)
axis(side=1, at=c(0,5,10,15,20),
labels=as.character(c(0,5,10,15,20)*.05)) |
QA_Results <- data.table::CJ(
Group = c(0, 1, 2, 3),
xregs = c(0, 1, 2, 3),
Trans = c(TRUE, FALSE),
Training = "Failure",
Forecast = "Failure"
)
for (run in seq_len(QA_Results[, .N])) {
if (QA_Results[run, Group] == 0) {
groupvars <- NULL
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
} else if (QA_Results[run, Group] == 1) {
groupvars <- "MarketingSegments"
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
} else if (QA_Results[run, Group] == 2) {
groupvars <- c("MarketingSegments", "MarketingSegments2")
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
} else if (QA_Results[run, Group] == 3) {
groupvars <- c("MarketingSegments", "MarketingSegments2", "MarketingSegments3")
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
}
keep <- names(LeadsData)
keep <- keep[!keep %in% c(groupvars, "CalendarDateColumn")]
ModelData[LeadsData, paste0(keep) := mget(paste0("i.", keep))]
setwd("C:/Users/Bizon/Documents/GitHub")
TestModel <- tryCatch(
{
RemixAutoML::AutoLightGBMFunnelCARMA(
data = ModelData,
GroupVariables = groupvars,
BaseFunnelMeasure = keep,
ConversionMeasure = "Appointments",
ConversionRateMeasure = NULL,
CohortPeriodsVariable = "CohortDays",
CalendarDate = "CalendarDateColumn",
CohortDate = "CohortDateColumn",
PartitionRatios = c(0.70, 0.20, 0.10),
TruncateDate = NULL,
TimeUnit = "days",
TransformTargetVariable = QA_Results[run, Trans],
TransformMethods = c("Asinh", "Asin", "Log", "LogPlus1", "Sqrt", "Logit"),
AnomalyDetection = list(tstat_high = 3, tstat_low = -2),
Jobs = c("eval", "train"),
SaveModelObjects = FALSE,
ModelID = "ModelTest",
ModelPath = getwd(),
MetaDataPath = NULL,
DebugMode = TRUE,
NumOfParDepPlots = 1L,
EncodingMethod = "credibility",
NThreads = parallel::detectCores() / 2,
CalendarTimeGroups = c("days", "weeks", "months"),
CohortTimeGroups = c("days", "weeks"),
CalendarVariables = c("wday", "mday", "yday", "week", "month", "quarter", "year"),
HolidayGroups = c("USPublicHolidays", "EasterGroup", "ChristmasGroup", "OtherEcclesticalFeasts"),
HolidayLookback = NULL,
CohortHolidayLags = c(1L, 2L, 7L),
CohortHolidayMovingAverages = c(3L, 7L),
CalendarHolidayLags = c(1L, 2L, 7L),
CalendarHolidayMovingAverages = c(3L, 7L),
ImputeRollStats = -0.001,
CalendarLags = list("day" = c(1L, 2L, 7L, 35L, 42L), "week" = c(5L, 6L, 10L, 12L, 25L, 26L)),
CalendarMovingAverages = list("day" = c(7L, 14L, 35L, 42L), "week" = c(5L, 6L, 10L, 12L, 20L, 24L), "month" = c(6L, 12L)),
CalendarStandardDeviations = NULL,
CalendarSkews = NULL,
CalendarKurts = NULL,
CalendarQuantiles = NULL,
CalendarQuantilesSelected = "q50",
CohortLags = list("day" = c(1L, 2L, 7L, 35L, 42L), "week" = c(5L, 6L)),
CohortMovingAverages = list("day" = c(7L, 14L, 35L, 42L), "week" = c(5L, 6L), "month" = c(1L, 2L)),
CohortStandardDeviations = NULL,
CohortSkews = NULL,
CohortKurts = NULL,
CohortQuantiles = NULL,
CohortQuantilesSelected = "q50",
PassInGrid = NULL,
GridTune = FALSE,
BaselineComparison = "default",
MaxModelsInGrid = 25L,
MaxRunMinutes = 180L,
MaxRunsWithoutNewWinner = 10L,
LossFunction = "regression",
EvalMetric = "mae",
GridEvalMetric = "mae",
Device_Type = "CPU",
Input_Model = NULL,
Task = "train",
Boosting = "gbdt",
LinearTree = FALSE,
Trees = 50,
ETA = 0.10,
Num_Leaves = 31,
Deterministic = TRUE,
Force_Col_Wise = FALSE,
Force_Row_Wise = FALSE,
Max_Depth = 6,
Min_Data_In_Leaf = 20,
Min_Sum_Hessian_In_Leaf = 0.001,
Bagging_Freq = 1.0,
Bagging_Fraction = 1.0,
Feature_Fraction = 1.0,
Feature_Fraction_Bynode = 1.0,
Lambda_L1 = 0.0,
Lambda_L2 = 0.0,
Extra_Trees = FALSE,
Early_Stopping_Round = 10,
First_Metric_Only = TRUE,
Max_Delta_Step = 0.0,
Linear_Lambda = 0.0,
Min_Gain_To_Split = 0,
Drop_Rate_Dart = 0.10,
Max_Drop_Dart = 50,
Skip_Drop_Dart = 0.50,
Uniform_Drop_Dart = FALSE,
Top_Rate_Goss = FALSE,
Other_Rate_Goss = FALSE,
Monotone_Constraints = NULL,
Monotone_Constraints_method = "advanced",
Monotone_Penalty = 0.0,
Forcedsplits_Filename = NULL,
Refit_Decay_Rate = 0.90,
Path_Smooth = 0.0,
Max_Bin = 255,
Min_Data_In_Bin = 3,
Data_Random_Seed = 1,
Is_Enable_Sparse = TRUE,
Enable_Bundle = TRUE,
Use_Missing = TRUE,
Zero_As_Missing = FALSE,
Two_Round = FALSE,
Convert_Model = NULL,
Convert_Model_Language = "cpp",
Boost_From_Average = TRUE,
Alpha = 0.90,
Fair_C = 1.0,
Poisson_Max_Delta_Step = 0.70,
Tweedie_Variance_Power = 1.5,
Lambdarank_Truncation_Level = 30,
Is_Provide_Training_Metric = TRUE,
Eval_At = c(1, 2, 3, 4, 5),
Num_Machines = 1,
Gpu_Platform_Id = -1,
Gpu_Device_Id = -1,
Gpu_Use_Dp = TRUE,
Num_Gpu = 1
)
},
error = function(x) NULL
)
if (!is.null(TestModel)) QA_Results[run, Training := "Success"]
data.table::fwrite(QA_Results, file = "C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/Testing_Data/AutoLightGBMFunnel_QA.csv")
if (!is.null(TestModel)) {
if (QA_Results[run, Group] == 0) {
groupvars <- NULL
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-NoGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
} else if (QA_Results[run, Group] == 1) {
groupvars <- "MarketingSegments"
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-OneGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
} else if (QA_Results[run, Group] == 2) {
groupvars <- c("MarketingSegments", "MarketingSegments2")
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-TwoGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
} else if (QA_Results[run, Group] == 3) {
groupvars <- c("MarketingSegments", "MarketingSegments2", "MarketingSegments3")
ModelData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-ModelData.csv"), key = c(groupvars, "CalendarDateColumn"))
if (QA_Results[run, xregs] == 0) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 1) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData-XREGS1.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 2) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData-XREGS2.csv"), key = c(groupvars, "CalendarDateColumn"))
} else if (QA_Results[run, xregs] == 3) {
LeadsData <- data.table::fread(file = file.path("C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/QA_DataSets/ChainLadder-ThreeGroup-LeadsData-XREGS3.csv"), key = c(groupvars, "CalendarDateColumn"))
}
}
LeadsData <- LeadsData[CalendarDateColumn < "2020-01-05"]
Test <- tryCatch(
{
RemixAutoML::AutoLightGBMFunnelCARMAScoring(
TrainData = ModelData,
ForwardLookingData = LeadsData,
TrainEndDate = ModelData[, max(CalendarDateColumn)],
ForecastEndDate = LeadsData[, max(CalendarDateColumn)],
TrainOutput = TestModel$ModelOutput,
ArgsList = TestModel$ArgsList,
ModelPath = NULL,
MaxCohortPeriod = 15,
DebugMode = TRUE
)
},
error = function(x) NULL
)
} else {
Test <- NULL
}
if (!is.null(Test)) QA_Results[run, Forecast := "Success"]
rm(TestModel, Test)
data.table::fwrite(QA_Results, file = "C:/Users/Bizon/Documents/GitHub/RemixAutoML/tests/Testing_Data/AutoLightGBMFunnel_QA.csv")
Sys.sleep(5)
} |
catEffectBootAdaptor<-function (df, index, testFnc = sumSqCat, useResp = TRUE, ...) {
if (useResp) respVal <- df$resp
else respVal <- df$bkg
testFnc(respVal[index], df$cat, ...)
} |
count_levels_num <- function(x) {
.Call('_inspectdf_count_levels_num', PACKAGE = 'inspectdf', x)
}
count_levels_char <- function(x) {
.Call('_inspectdf_count_levels_char', PACKAGE = 'inspectdf', x)
}
na_numeric <- function(x) {
.Call('_inspectdf_na_numeric', PACKAGE = 'inspectdf', x)
}
na_character <- function(x) {
.Call('_inspectdf_na_character', PACKAGE = 'inspectdf', x)
}
na_logical <- function(x) {
.Call('_inspectdf_na_logical', PACKAGE = 'inspectdf', x)
}
na_integer <- function(x) {
.Call('_inspectdf_na_integer', PACKAGE = 'inspectdf', x)
} |
T <- 50
m <- 10
P <- 5
H <- 2
N_min <- 20
X <- rnorm(T)
mspe <- MSPE(X, m1 = T - m + 1, m2 = T, P = P, H = H, N = c(0, N_min:(T-m-H)))
N <- mspe$N
M <- mspe$mspe
h <- 1
plot(mspe, h, N_min = N_min, legend = (h == 1))
idx1_s <- which(M[h, , N == 0] == min(M[h, , N == 0]), arr.ind = TRUE)[1]
abline(h = M[h, idx1_s, N == 0], col = idx1_s, lty="dashed", lwd = 2)
for (p in 1:p_max) {
idx1_ls <- which(M[h, , N != 0] == min(M[h, , N != 0]), arr.ind = TRUE)[1,]
idx1_ls_p <- which(M[h, p, N != 0] == min(M[h, p, N != 0]), arr.ind = TRUE)[1]
abline(v = N[idx1_ls_p], col = p, lty = "dotted")
} |
bhl_getunpublisheditems <- function(...) {
.Defunct(package = "rbhl", msg = "API method removed")
} |
if (requiet("testthat") &&
requiet("insight") &&
requiet("robustbase")) {
data(mtcars)
m1 <- lmrob(mpg ~ gear + wt + cyl, data = mtcars)
test_that("model_info", {
expect_true(model_info(m1)$is_linear)
})
test_that("find_predictors", {
expect_identical(find_predictors(m1), list(conditional = c("gear", "wt", "cyl")))
expect_identical(find_predictors(m1, flatten = TRUE), c("gear", "wt", "cyl"))
expect_null(find_predictors(m1, effects = "random"))
})
test_that("find_random", {
expect_null(find_random(m1))
})
test_that("get_random", {
expect_warning(get_random(m1))
})
test_that("find_response", {
expect_identical(find_response(m1), "mpg")
})
test_that("get_response", {
expect_equal(get_response(m1), mtcars$mpg)
})
test_that("get_predictors", {
expect_equal(colnames(get_predictors(m1)), c("gear", "wt", "cyl"))
})
test_that("get_data", {
expect_equal(nrow(get_data(m1)), 32)
expect_equal(colnames(get_data(m1)), c("mpg", "gear", "wt", "cyl"))
})
test_that("find_formula", {
expect_length(find_formula(m1), 1)
expect_equal(
find_formula(m1),
list(conditional = as.formula("mpg ~ gear + wt + cyl")),
ignore_attr = TRUE
)
})
test_that("find_terms", {
expect_equal(find_terms(m1), list(
response = "mpg",
conditional = c("gear", "wt", "cyl")
))
expect_equal(find_terms(m1, flatten = TRUE), c("mpg", "gear", "wt", "cyl"))
})
test_that("n_obs", {
expect_equal(n_obs(m1), 32)
})
test_that("link_function", {
expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5)
})
test_that("link_inverse", {
expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5)
})
test_that("find_parameters", {
expect_equal(
find_parameters(m1),
list(conditional = c("(Intercept)", "gear", "wt", "cyl"))
)
expect_equal(nrow(get_parameters(m1)), 4)
expect_equal(
get_parameters(m1)$Parameter,
c("(Intercept)", "gear", "wt", "cyl")
)
})
test_that("is_multivariate", {
expect_false(is_multivariate(m1))
})
test_that("find_algorithm", {
expect_equal(find_algorithm(m1), list(algorithm = "SM"))
})
test_that("find_statistic", {
expect_identical(find_statistic(m1), "t-statistic")
})
} |
fit_clutter <- function(df, age, dh, basal_area, volume, site, plot, .groups=NA, model = "full", keep_model = FALSE){
basal_area2<-basal_area1<-I1<-I2<-volume2<-.<-Reg<-NULL
if( missing(df) ){
stop("df not set", call. = F)
}else if(!is.data.frame(df)){
stop("df must be a dataframe", call.=F)
}else if(length(df)<=1 | nrow(df)<=1){
stop("Length and number of rows of 'df' must be greater than 1", call.=F)
}
if( missing(age) ){
stop("age not set", call. = F)
}else if( !is.character(age) ){
stop("'age' must be a character containing a variable name", call.=F)
}else if(length(age)!=1){
stop("Length of 'age' must be 1", call.=F)
}else if(forestmangr::check_names(df, age)==F){
stop(forestmangr::check_names(df, age, boolean=F), call.=F)
}
if( missing(dh) ){
stop("dh not set", call. = F)
}else if( !is.character(dh) ){
stop("'dh' must be a character containing a variable name", call.=F)
}else if(length(dh)!=1){
stop("Length of 'dh' must be 1", call.=F)
}else if(forestmangr::check_names(df, dh)==F){
stop(forestmangr::check_names(df, dh, boolean=F), call.=F)
}
if( missing(basal_area) ){
stop("basal_area not set", call. = F)
}else if( !is.character(basal_area) ){
stop("'basal_area' must be a character containing a variable name", call.=F)
}else if(length(basal_area)!=1){
stop("Length of 'basal_area' must be 1", call.=F)
}else if(forestmangr::check_names(df, basal_area)==F){
stop(forestmangr::check_names(df, basal_area, boolean=F), call.=F)
}
if( missing(volume) ){
stop("volume not set", call. = F)
}else if( !is.character(volume) ){
stop("'volume' must be a character containing a variable name", call.=F)
}else if(length(volume)!=1){
stop("Length of 'volume' must be 1", call.=F)
}else if(forestmangr::check_names(df, volume)==F){
stop(forestmangr::check_names(df, volume, boolean=F), call.=F)
}
if( missing(site) ){
stop("site not set", call. = F)
}else if( !is.character(site) ){
stop("'site' must be a character containing a variable name", call.=F)
}else if(length(site)!=1){
stop("Length of 'site' must be 1", call.=F)
}else if(forestmangr::check_names(df, site)==F){
stop(forestmangr::check_names(df, site, boolean=F), call.=F)
}
if(missing(plot) && is.null(dplyr::groups(df)) ){
stop("plot not set. plot must be set if data doesn't have any groups", call. = F)
}else if(missing(plot) && !is.null(dplyr::groups(df)) ){
plot_syms <- rlang::syms(dplyr::groups(df))
}else if(!is.character(plot)){
stop("plot must be a character", call. = F)
}else if(! length(plot)%in% 1:10){
stop("Length of 'plot' must be between 1 and 10", call.=F)
}else if(forestmangr::check_names(df,plot)==F){
stop(forestmangr::check_names(df,plot, boolean=F), call.=F)
}else{
plot_syms <- rlang::syms(plot)
}
if(missing(.groups)||any(is.null(.groups))||any(is.na(.groups))||any(.groups==F)||any(.groups=="") ){
.groups_syms <- character()
}else if(!is.character(.groups)){
stop(".groups must be a character", call. = F)
}else if(! length(.groups)%in% 1:10){
stop("Length of '.groups' must be between 1 and 10", call.=F)
}else if(forestmangr::check_names(df,.groups)==F){
stop(forestmangr::check_names(df,.groups, boolean=F), call.=F)
}else{
.groups_syms <- rlang::syms(.groups)
}
if(!is.character( model )){
stop( "'model' must be character", call.=F)
}else if(length(model)!=1){
stop("Length of 'model' must be 1", call.=F)
}else if(! model %in% c('full', 'mod') ){
stop("'model' must be equal to 'full' or 'mod' ", call. = F)
}
if(! keep_model %in% c(TRUE, FALSE) ){
stop("keep_model must be equal to TRUE or FALSE", call. = F)
}
age_sym <- rlang::sym(age)
dh_sym <- rlang::sym(dh)
basal_area_sym <- rlang::sym(basal_area)
volume_sym <- rlang::sym(volume)
site_sym <- rlang::sym(site)
suppressMessages(
struct_form_data <- df %>%
dplyr::group_by(!!!.groups_syms, !!!plot_syms, .add=T ) %>%
dplyr::transmute(
I1 = !!age_sym, I2 = dplyr::lead(!!age_sym),
dh = !!dh_sym, dh2 = dplyr::lead(!!dh_sym),
basal_area1 = !!basal_area_sym, basal_area2 = dplyr::lead(!!basal_area_sym),
volume1 = !!volume_sym, volume2 = dplyr::lead(!!volume_sym),
site = !!site_sym ) %>%
stats::na.omit() %>%
dplyr::mutate(
Y1 = log(basal_area2) ,
X1 = log(basal_area1) * (I1/I2),
X2 = 1 - I1/I2 ,
X3 = (1 - I1/I2) * site ,
Y2 = log(volume2) ,
X4 = 1 / I2 ,
X5 = site
) %>%
dplyr::ungroup()
)
if(model == "full"){
eq1 <- Y2 ~ X4 + X5 + Y1
eq2 <- Y1 ~ X1 + X2 + X3
system <- list(Volume = eq1, AreaBasal = eq2)
inst <- ~ X4 + X5 + X1 + X2 + X3
restrict <- matrix(0, nrow=2, ncol=8)
restrict[1,5] <- 1
restrict[2,6] <- 1
restrict.rhs <- c(0, 1)
model_fit <- struct_form_data %>%
dplyr::group_by( !!!.groups_syms, .add=T ) %>%
dplyr::do(Reg = systemfit::systemfit(system, "2SLS", inst = inst, data = .,
restrict.matrix = restrict,
restrict.rhs = restrict.rhs)) %>%
dplyr::rowwise() %>%
dplyr::mutate(
b0 = stats::coef(Reg)[[1]],
b1 = stats::coef(Reg)[[2]],
b2 = stats::coef(Reg)[[3]],
b3 = stats::coef(Reg)[[4]],
a0 = stats::coef(Reg)[[7]],
a1 = stats::coef(Reg)[[8]] ) %>%
dplyr:: ungroup()
}else if(model == "mod" ){
eq1 <- Y2 ~ X4 + X5 + Y1
eq2 <- Y1 ~ X1 + X2
system <- list(Volume = eq1, AreaBasal = eq2)
inst <- ~ X4 + X5 + X1 + X2
restrict <- matrix(0, nrow=2, ncol=7)
restrict[1,5] <- 1
restrict[2,6] <- 1
restrict.rhs <- c(0, 1)
model_fit <- struct_form_data %>%
dplyr::group_by( !!!.groups_syms, .add=T ) %>%
dplyr::do(Reg = systemfit::systemfit(system, "2SLS", inst = inst, data = .,
restrict.matrix = restrict,
restrict.rhs = restrict.rhs)) %>%
dplyr::rowwise() %>%
dplyr::mutate(
b0 = stats::coef(Reg)[[1]],
b1 = stats::coef(Reg)[[2]],
b2 = stats::coef(Reg)[[3]],
b3 = stats::coef(Reg)[[4]],
a0 = stats::coef(Reg)[[7]] ) %>%
dplyr::ungroup()
}
if(keep_model == F){
model_fit <- as.data.frame(model_fit)
model_fit$Reg <- NULL
}
model_fit$A <- NULL
return(model_fit)
} |
wait_slurm <- function(x, ...) UseMethod("wait_slurm")
wait_slurm.slurm_job <- function(x, ...) {
wait_slurm.integer(get_job_id(x), ...)
}
wait_slurm.integer <- function(x, timeout = -1, freq = 0.1, force = TRUE, ...) {
if (opts_slurmR$get_debug()) {
warning("waiting is not available in debug mode.", call. = FALSE)
return()
} else if (!slurm_available())
stopifnot_slurm()
if (!is.finite(x))
stop("The job ID is not an integer: ", x, ". Can't wait for non-integer job ids.", call. = FALSE)
time0 <- Sys.time()
while(TRUE) {
Sys.sleep(freq)
s <- status(x)
if (force && s == -1L) {
next
} else if (!force && s == -1L) {
print(s)
break
}
njobs <- attr(s, "njobs")
if (njobs > 1L) {
ncompleted <- length(attr(s, "failed")) + length(attr(s, "done"))
if (ncompleted == njobs)
break
} else if (s %in% c(0L, 99L))
break
if (timeout > 0) {
seconds <- difftime(Sys.time(), time0, units = "secs")
if (seconds > timeout) {
warning("Timeout after ", seconds, " seconds.", call. = FALSE, immediate. = TRUE)
return(invisible(NULL))
}
}
}
invisible(NULL)
} |
test_that("compare state works correctly", {
loc <- tempfile("watcher")
dir.create(loc)
empty <- dir_state(loc)
expect_equal(length(empty), 0)
file.create(file.path(loc, "test-1.txt"))
one <- dir_state(loc)
expect_equal(length(one), 1)
expect_equal(basename(names(one)), "test-1.txt")
diff <- compare_state(empty, one)
expect_equal(diff$n, 1)
expect_equal(basename(diff$added), "test-1.txt")
write.table(mtcars, file.path(loc, "test-1.txt"))
diff <- compare_state(one, dir_state(loc))
expect_equal(diff$n, 1)
expect_equal(basename(diff$modified), "test-1.txt")
file.rename(file.path(loc, "test-1.txt"), file.path(loc, "test-2.txt"))
diff <- compare_state(one, dir_state(loc))
expect_equal(diff$n, 2)
expect_equal(basename(diff$deleted), "test-1.txt")
expect_equal(basename(diff$added), "test-2.txt")
diff <- compare_state(
c(file1 = "62da2", file2 = "e14a6", file3 = "6e6dd"),
c(file1 = "62da2", file2 = "e14a6", file21 = "532fa", file3 = "3f4sa")
)
expect_equal(diff$n, 2)
expect_equal(basename(diff$added), "file21")
expect_equal(basename(diff$modified), "file3")
})
test_that("watcher works correctly", {
skip_on_ci()
skip_on_os("windows")
skip_on_cran()
if (Sys.which("bash") == "") {
skip("bash not available")
}
if (system("bash -c 'which touch'", ignore.stdout = TRUE) != 0L) {
skip("touch (or which) not available")
}
loc <- tempfile("watcher")
dir.create(loc)
code_path <- file.path(loc, "R")
test_path <- file.path(loc, "tests")
dir.create(code_path)
dir.create(test_path)
delayed.bash.cmd <- function(command) {
system(paste0("bash -c 'sleep 1;", command, "'"), wait = FALSE)
}
add.code.file <- function(file.name) {
delayed.bash.cmd(paste0("touch ", file.path(code_path, file.name)))
}
remove.code.file <- function(file.name) {
delayed.bash.cmd(paste0("rm ", file.path(code_path, file.name)))
}
test.added <- function(added, deleted, modified) {
expect_equal(length(added), 1)
expect_equal(grepl("test1.R", added), TRUE)
expect_equal(length(deleted), 0)
expect_equal(length(modified), 0)
FALSE
}
test.removed <- function(added, deleted, modified) {
expect_equal(length(added), 0)
expect_equal(length(deleted), 1)
expect_equal(grepl("test1.R", deleted), TRUE)
expect_equal(length(modified), 0)
FALSE
}
add.code.file("test1.R")
watch(c(code_path, test_path), test.added)
remove.code.file("test1.R")
watch(c(code_path, test_path), test.removed)
}) |
AutoLightGBMClassifier <- function(
data = NULL,
TrainOnFull = FALSE,
ValidationData = NULL,
TestData = NULL,
TargetColumnName = NULL,
FeatureColNames = NULL,
PrimaryDateColumn = NULL,
IDcols = NULL,
WeightsColumnName = NULL,
CostMatrixWeights = c(1,0,0,1),
EncodingMethod = 'credibility',
OutputSelection = c('Importances', 'EvalPlots', 'EvalMetrics', 'Score_TrainData'),
model_path = NULL,
metadata_path = NULL,
DebugMode = FALSE,
SaveInfoToPDF = FALSE,
ModelID = 'TestModel',
ReturnFactorLevels = TRUE,
ReturnModelObjects = TRUE,
SaveModelObjects = FALSE,
NumOfParDepPlots = 3L,
Verbose = 0L,
GridTune = FALSE,
grid_eval_metric = 'Utility',
BaselineComparison = 'default',
MaxModelsInGrid = 10L,
MaxRunsWithoutNewWinner = 20L,
MaxRunMinutes = 24L*60L,
PassInGrid = NULL,
input_model = NULL,
task = 'train',
device_type = 'CPU',
NThreads = parallel::detectCores() / 2,
objective = 'binary',
metric = 'binary_logloss',
boosting = 'gbdt',
LinearTree = FALSE,
Trees = 50L,
eta = NULL,
num_leaves = 31,
deterministic = TRUE,
force_col_wise = FALSE,
force_row_wise = FALSE,
max_depth = NULL,
min_data_in_leaf = 20,
min_sum_hessian_in_leaf = 0.001,
bagging_freq = 0,
bagging_fraction = 1.0,
feature_fraction = 1.0,
feature_fraction_bynode = 1.0,
extra_trees = FALSE,
early_stopping_round = 10,
first_metric_only = TRUE,
max_delta_step = 0.0,
lambda_l1 = 0.0,
lambda_l2 = 0.0,
linear_lambda = 0.0,
min_gain_to_split = 0,
drop_rate_dart = 0.10,
max_drop_dart = 50,
skip_drop_dart = 0.50,
uniform_drop_dart = FALSE,
top_rate_goss = FALSE,
other_rate_goss = FALSE,
monotone_constraints = NULL,
monotone_constraints_method = 'advanced',
monotone_penalty = 0.0,
forcedsplits_filename = NULL,
refit_decay_rate = 0.90,
path_smooth = 0.0,
max_bin = 255,
min_data_in_bin = 3,
data_random_seed = 1,
is_enable_sparse = TRUE,
enable_bundle = TRUE,
use_missing = TRUE,
zero_as_missing = FALSE,
two_round = FALSE,
convert_model = NULL,
convert_model_language = "cpp",
boost_from_average = TRUE,
is_unbalance = FALSE,
scale_pos_weight = 1.0,
is_provide_training_metric = TRUE,
eval_at = c(1,2,3,4,5),
num_machines = 1,
gpu_platform_id = -1,
gpu_device_id = -1,
gpu_use_dp = TRUE,
num_gpu = 1) {
options(warn = -1)
params <- LightGBMArgs(input_model.=input_model, task.=tolower(task), objective.=objective, boosting.=boosting, LinearTree.=LinearTree, Trees.=Trees, eta.=eta, num_leaves.=num_leaves, NThreads.=NThreads, device_type.=tolower(device_type), deterministic.=deterministic, force_col_wise.=force_col_wise, force_row_wise.=force_row_wise, max_depth.=max_depth, min_data_in_leaf.=min_data_in_leaf, min_sum_hessian_in_leaf.=min_sum_hessian_in_leaf, bagging_freq.=bagging_freq, bagging_fraction.=bagging_fraction, feature_fraction.=feature_fraction, feature_fraction_bynode.=feature_fraction_bynode, extra_trees.=extra_trees, early_stopping_round.=early_stopping_round, first_metric_only.=first_metric_only, max_delta_step.=max_delta_step, lambda_l1.=lambda_l1, lambda_l2.=lambda_l2, linear_lambda.=linear_lambda, min_gain_to_split.=min_gain_to_split, drop_rate_dart.=drop_rate_dart, max_drop_dart.=max_drop_dart, skip_drop_dart.=skip_drop_dart, uniform_drop_dart.=uniform_drop_dart, top_rate_goss.=top_rate_goss, other_rate_goss.=other_rate_goss, monotone_constraints.=monotone_constraints, monotone_constraints_method.=monotone_constraints_method, monotone_penalty.=monotone_penalty, forcedsplits_filename.=forcedsplits_filename, refit_decay_rate.=refit_decay_rate, path_smooth.=path_smooth, max_bin.=max_bin, min_data_in_bin.=min_data_in_bin, data_random_seed.=data_random_seed, is_enable_sparse.=is_enable_sparse, enable_bundle.=enable_bundle, use_missing.=use_missing, zero_as_missing.=zero_as_missing, two_round.=two_round, convert_model.=convert_model, convert_model_language.=convert_model_language, boost_from_average.=boost_from_average, alpha.=NULL, fair_c.=NULL, poisson_max_delta_step.=NULL, tweedie_variance_power.=NULL, lambdarank_truncation_level.=NULL, is_unbalance.=is_unbalance, scale_pos_weight.=scale_pos_weight, multi_error_top_k.=NULL, is_provide_training_metric.=is_provide_training_metric, eval_at.=eval_at, gpu_platform_id.=gpu_platform_id, gpu_device_id.=gpu_device_id, gpu_use_dp.=gpu_use_dp, num_gpu.=num_gpu)
ArgsList <- c(as.list(environment()))
ArgsList[['data']] <- NULL
ArgsList[['ValidationData']] <- NULL
ArgsList[['TestData']] <- NULL
if(SaveModelObjects) {
if(!is.null(metadata_path)) {
save(ArgsList, file = file.path(metadata_path, paste0(ModelID, "_ArgsList.Rdata")))
} else if(!is.null(model_path)) {
save(ArgsList, file = file.path(model_path, paste0(ModelID, "_ArgsList.Rdata")))
}
}
if(DebugMode) print("Data prep ----")
Output <- XGBoostDataPrep(Algo="lightgbm", ModelType="classification", data.=data, ValidationData.=ValidationData, TestData.=TestData, TargetColumnName.=TargetColumnName, FeatureColNames.=FeatureColNames, WeightsColumnName.=WeightsColumnName, IDcols.=IDcols, TransformNumericColumns.=NULL, Methods.=NULL, ModelID.=ModelID, model_path.=model_path, TrainOnFull.=TrainOnFull, SaveModelObjects.=SaveModelObjects, ReturnFactorLevels.=ReturnFactorLevels, EncodingMethod.=EncodingMethod)
TransformNumericColumns <- Output$TransformNumericColumns; Output$TransformNumericColumns <- NULL
TransformationResults <- Output$TransformationResults; Output$TransformationResults <- NULL
FactorLevelsList <- Output$FactorLevelsList; Output$FactorLevelsList <- NULL
FinalTestTarget <- Output$FinalTestTarget; Output$FinalTestTarget <- NULL
WeightsVector <- Output$WeightsVector; Output$WeightsVector <- NULL
datavalidate <- Output$datavalidate; Output$datavalidate <- NULL
TargetLevels <- Output$TargetLevels; Output$TargetLevels <- NULL
TrainTarget <- Output$TrainTarget; Output$TrainTarget <- NULL
TrainMerge <- Output$TrainMerge; Output$TrainMerge <- NULL
ValidMerge <- Output$ValidMerge; Output$ValidMerge <- NULL
TestTarget <- Output$TestTarget; Output$TestTarget <- NULL
datatrain <- Output$datatrain; Output$datatrain <- NULL
dataTrain <- Output$dataTrain; Output$dataTrain <- NULL
TestMerge <- Output$TestMerge; Output$TestMerge <- NULL
TestData <- Output$TestData; Output$TestData <- NULL
datatest <- Output$datatest; Output$datatest <- NULL
EvalSets <- Output$EvalSets; Output$EvalSets <- NULL
dataTest <- Output$dataTest; Output$dataTest <- NULL
IDcols <- Output$IDcols; Output$IDcols <- NULL
Names <- Output$Names; rm(Output)
if(!is.null(WeightsColumnName)) {
params[["weight_column"]] <- which(WeightsColumnName %chin% names(dataTrain)) - 1L
}
ExperimentalGrid <- NULL; BestGrid <- NULL
if(DebugMode) print("Grid tuning ----")
if(GridTune) {
Output <- LightGBMGridTuner(ModelType="classification", TrainOnFull.=TrainOnFull, DebugMode.=DebugMode, params.=params, num_iterations.=params$num_iterations, max_depth.=params$max_depth, eta.=params$eta, num_leaves.=params$num_leaves, min_data_in_leaf.=params$min_data_in_leaf, bagging_freq.=params$bagging_freq, bagging_fraction.=params$bagging_fraction, feature_fraction.=params$feature_fraction, feature_fraction_bynode.=params$feature_fraction_bynode, lambda_l1.=params$lambda_l1, lambda_l2.=params$lambda_l2, LossFunction=NULL, EvalMetric=eval_metric, grid_eval_metric.=grid_eval_metric, CostMatrixWeights=CostMatrixWeights, TargetColumnName.=TargetColumnName, datatrain.=datatrain, dataTest.=dataTest, TestData.=TestData, EvalSets.=EvalSets, TestTarget.=TestTarget, FinalTestTarget.=FinalTestTarget, TargetLevels.=NULL, MaxRunsWithoutNewWinner=MaxRunsWithoutNewWinner, MaxModelsInGrid=MaxModelsInGrid, MaxRunMinutes=MaxRunMinutes, BaselineComparison.=BaselineComparison, SaveModelObjects=SaveModelObjects, metadata_path=metadata_path, model_path=model_path, ModelID=ModelID, NumLevels.=NULL)
ExperimentalGrid <- Output$ExperimentalGrid
BestGrid <- Output$BestGrid
}
if(DebugMode) print("Final Params ----")
params <- LightGBMFinalParams(params.=params, GridTune.=GridTune, PassInGrid.=PassInGrid, TrainOnFull.=TrainOnFull, BestGrid.=BestGrid, Trees.=params[["Trees"]], eta.=params[["eta"]], num_leaves.=params[["num_leaves"]], max_depth.=params[["max_depth"]], min_data_in_leaf.=params[["min_data_in_leaf"]], bagging_freq.=params[["bagging_freq"]], bagging_fraction.=params[["bagging_fraction"]], feature_fraction.=params[["feature_fraction"]], feature_fraction_bynode.=params[["feature_fraction_bynode"]])
if(DebugMode) print("Build model ----")
model <- lightgbm::lgb.train(params=params, data=datatrain, valids=EvalSets, nrounds = 5L)
if(DebugMode) print("Save Model ----")
if(SaveModelObjects) lightgbm::lgb.save(booster=model, filename=file.path(model_path, paste0(ModelID, ".txt")))
if(DebugMode) print("TrainData + ValidationData Scoring + Shap ----")
if("score_traindata" %chin% tolower(OutputSelection) && !TrainOnFull) {
predict <- data.table::as.data.table(predict(model, as.matrix(dataTrain)))
if(!is.null(datavalidate)) {
predict_validate <- data.table::as.data.table(predict(model, as.matrix(dataTest)))
predict <- data.table::rbindlist(list(predict, predict_validate))
data.table::setnames(predict, names(predict), "Predict")
rm(predict_validate)
}
Output <- XGBoostValidationData(model.=model, TestData.=NULL, ModelType="classification", TrainOnFull.=TRUE, TestDataCheck=FALSE, FinalTestTarget.=FinalTestTarget, TestTarget.=TestTarget, TrainTarget.=TrainTarget, TrainMerge.=TrainMerge, TestMerge.=TestMerge, dataTest.=dataTest, data.=dataTrain, predict.=predict, TargetColumnName.=TargetColumnName, SaveModelObjects. = SaveModelObjects, metadata_path.=metadata_path, model_path.=model_path, ModelID.=ModelID, LossFunction.=NULL, TransformNumericColumns.=TransformNumericColumns, GridTune.=GridTune, TransformationResults.=TransformationResults, TargetLevels.=NULL)
TrainData <- Output$ValidationData; rm(Output)
if(!"Predict" %chin% names(TrainData)) data.table::setnames(TrainData, "V1", "Predict")
} else {
TrainData <- NULL
}
if(DebugMode) print("Grid Score Model ----")
predict <- predict(object = model, if(!is.null(TestData)) as.matrix(TestData) else if(!is.null(ValidationData) && !TrainOnFull) as.matrix(dataTest) else as.matrix(dataTrain))
if(DebugMode) print("Validation, Importance, Shap data ----")
Output <- XGBoostValidationData(ModelType="classification", TestDataCheck=!is.null(TestData), TrainOnFull.=TrainOnFull, model.=model, TargetColumnName.=TargetColumnName, SaveModelObjects.=SaveModelObjects, metadata_path.=metadata_path, model_path.=model_path, ModelID.=ModelID, TestData.=TestData, TestTarget.=TestTarget, FinalTestTarget.=FinalTestTarget, TestMerge.=TestMerge, dataTest.=dataTest, TrainTarget.=TrainTarget, predict.=predict, TransformNumericColumns.=TransformNumericColumns, TransformationResults.=TransformationResults, GridTune.=GridTune, data.=dataTrain)
VariableImportance <- Output$VariableImportance; Output$VariableImportance <- NULL
ValidationData <- Output$ValidationData; rm(Output)
if(DebugMode) print("Running BinaryMetrics()")
EvalMetricsList <- list()
EvalMetrics2List <- list()
if("evalmetrics" %chin% tolower(OutputSelection)) {
if("score_traindata" %chin% tolower(OutputSelection) && !TrainOnFull) {
EvalMetricsList[["TrainData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=TrainData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "threshold")
EvalMetrics2List[["TrainData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=TrainData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "bins")
if(SaveModelObjects) {
if(!is.null(metadata_path)) {
data.table::fwrite(EvalMetricsList[['TestData']], file = file.path(metadata_path, paste0(ModelID, "_Test_EvaluationMetrics.csv")))
} else if(!is.null(model_path)) {
data.table::fwrite(EvalMetricsList[['TestData']], file = file.path(model_path, paste0(ModelID, "_Test_EvaluationMetrics.csv")))
}
}
}
EvalMetricsList[["TestData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=ValidationData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "threshold")
EvalMetrics2List[["TestData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=ValidationData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "bins")
if(SaveModelObjects) {
if(!is.null(metadata_path)) {
data.table::fwrite(EvalMetricsList[['TestData']], file = file.path(metadata_path, paste0(ModelID, "_Test_EvaluationMetrics.csv")))
} else if(!is.null(model_path)) {
data.table::fwrite(EvalMetricsList[['TestData']], file = file.path(model_path, paste0(ModelID, "_Test_EvaluationMetrics.csv")))
}
}
}
if(DebugMode) print("Running ML_EvalPlots()")
PlotList <- list()
if("evalplots" %chin% tolower(OutputSelection)) {
if("score_traindata" %chin% tolower(OutputSelection) && !TrainOnFull) {
Output <- ML_EvalPlots(ModelType="classification", DataType = 'Train', TrainOnFull.=TrainOnFull, ValidationData.=TrainData, NumOfParDepPlots.=NumOfParDepPlots, VariableImportance.=VariableImportance, TargetColumnName.=TargetColumnName, FeatureColNames.=FeatureColNames, SaveModelObjects.=SaveModelObjects, ModelID.=ModelID, metadata_path.=metadata_path, model_path.=model_path, LossFunction.=NULL, EvalMetric.=NULL, EvaluationMetrics.=NULL, predict.=NULL)
PlotList[["Train_EvaluationPlot"]] <- Output$EvaluationPlot; Output$EvaluationPlot <- NULL
PlotList[["Train_ParDepPlots"]] <- Output$ParDepPlots; Output$ParDepPlots <- NULL
PlotList[["Train_GainsPlot"]] <- Output$GainsPlot; Output$GainsPlot <- NULL
PlotList[["Train_LiftPlot"]] <- Output$LiftPlot; Output$LiftPlot <- NULL
PlotList[["Train_ROC_Plot"]] <- Output$ROC_Plot; rm(Output)
}
Output <- ML_EvalPlots(ModelType="classification", DataType = 'Test', TrainOnFull.=TrainOnFull, ValidationData.=ValidationData, NumOfParDepPlots.=NumOfParDepPlots, VariableImportance.=VariableImportance, TargetColumnName.=TargetColumnName, FeatureColNames.=FeatureColNames, SaveModelObjects.=SaveModelObjects, ModelID.=ModelID, metadata_path.=metadata_path, model_path.=model_path, LossFunction.=NULL, EvalMetric.=NULL, EvaluationMetrics.=NULL, predict.=NULL)
PlotList[["Test_EvaluationPlot"]] <- Output$EvaluationPlot; Output$EvaluationPlot <- NULL
PlotList[["Test_ParDepPlots"]] <- Output$ParDepPlots; Output$ParDepPlots <- NULL
PlotList[["Test_GainsPlot"]] <- Output$GainsPlot; Output$GainsPlot <- NULL
PlotList[["Test_LiftPlot"]] <- Output$LiftPlot; Output$LiftPlot <- NULL
PlotList[["Test_ROC_Plot"]] <- Output$ROC_Plot; rm(Output)
if(!is.null(VariableImportance) && "plotly" %chin% installed.packages()) PlotList[["Train_VariableImportance"]] <- plotly::ggplotly(VI_Plot(Type = "xgboost", VariableImportance)) else if(!is.null(VariableImportance)) PlotList[["Train_VariableImportance"]] <- VI_Plot(Type = "xgboost", VariableImportance)
}
if(DebugMode) print("Save PDF of model information ----")
if("pdfs" %chin% tolower(OutputSelection) && SaveModelObjects) {
CatBoostPDF(ModelType="regression", TrainOnFull.=TrainOnFull, SaveInfoToPDF.=SaveInfoToPDF, VariableImportance.=VariableImportance, Interaction.=NULL, model_path.=model_path, metadata_path.=metadata_path)
}
if(!exists("FactorLevelsList")) FactorLevelsList <- NULL
options(warn = 1)
if(DebugMode) print("Return objects ----")
if(ReturnModelObjects) {
return(list(
Model = model,
TrainData = if(exists("TrainData")) TrainData else NULL,
TestData = if(exists("ValidationData")) ValidationData else NULL,
PlotList = if(exists("PlotList")) PlotList else NULL,
EvaluationMetrics = if(exists("EvalMetricsList")) EvalMetricsList else NULL,
EvaluationMetrics2 = if(exists("EvalMetrics2List")) EvalMetrics2List else NULL,
VariableImportance = if(exists("VariableImportance")) VariableImportance else NULL,
GridMetrics = if(exists("ExperimentalGrid") && !is.null(ExperimentalGrid)) data.table::setorderv(ExperimentalGrid, cols = "EvalMetric", order = -1L, na.last = TRUE) else NULL,
ColNames = if(exists("Names")) Names else NULL,
FactorLevelsList = if(exists("FactorLevelsList")) FactorLevelsList else NULL,
ArgsList = ArgsList))
}
} |
.build_client <-
function(api,
encode,
version = NULL,
progress = NULL,
pat = getOption("osfr.pat")) {
api <- match.arg(api, c("osf", "wb"))
encode <- match.arg(encode, c("form", "multipart", "json", "raw"))
server <- Sys.getenv("OSF_SERVER")
url <- switch(api,
osf = ifelse(nzchar(server), "api.%s.osf.io", "api.osf.io"),
wb = ifelse(nzchar(server), "files.us.%s.osf.io", "files.osf.io")
)
if (nzchar(server)) url <- sprintf(url, server)
headers <- list(`User-Agent` = user_agent())
if (!is.null(pat)) {
headers$Authorization <- sprintf("Bearer %s", pat)
}
if (api == "osf") {
headers$`Accept-Header` <- sprintf(
"application/vnd.api+json;version=%s",
version)
}
crul::HttpClient$new(
url = paste0("https://", url),
opts = list(
encode = encode
),
headers = headers,
hooks = list(
request = log_request,
response = log_response
),
progress = progress
)
} |
BLOSUM62<-function(seqs,label=c(),outFormat="mat",outputFileDist=""){
path.pack=system.file("extdata",package="ftrCOOL")
if(length(seqs)==1&&file.exists(seqs)){
seqs<-fa.read(seqs,alphabet="aa")
seqs_Lab<-alphabetCheck(seqs,alphabet = "aa",label)
seqs<-seqs_Lab[[1]]
label<-seqs_Lab[[2]]
}
else if(is.vector(seqs)){
seqs<-sapply(seqs,toupper)
seqs_Lab<-alphabetCheck(seqs,alphabet = "aa",label)
seqs<-seqs_Lab[[1]]
label<-seqs_Lab[[2]]
}
else {
stop("ERROR: Input sequence is not in the correct format. It should be a FASTA file or a string vector.")
}
lenSeqs<-sapply(seqs, nchar)
numSeqs<-length(seqs)
featureMatrix<-matrix(0, nrow = numSeqs, ncol = (lenSeqs[1]*23))
adBlosum<-paste0(path.pack,"/BLOSUM62.csv")
Blosum<-read.csv(adBlosum)
row.names(Blosum)<-Blosum[,1]
Blosum<-Blosum[,-1]
Blosum<-as.matrix(Blosum)
Blosum<-type.convert(Blosum)
nameP1<-rep((1:(lenSeqs[1])),each=23)
nameP1<-paste0("aa",nameP1)
aaVect<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y","B","Z","X")
if(outFormat=="mat"){
if(length(unique(lenSeqs))>1){
stop("ERROR: All sequences should have the same length in 'mat' mode. For sequences with different lengths, please use 'txt' for outFormat parameter")
}
nameP2<-paste("d",rep(aaVect,lenSeqs[1]),sep = "")
colnames(featureMatrix)<-paste(nameP1,nameP2)
for(n in 1:numSeqs){
seq=seqs[n]
chars=unlist(strsplit(seq,""))
featureVector<-Blosum[,chars]
featureVector<-as.vector(featureVector)
featureMatrix[n,]<-featureVector
}
if(length(label)==numSeqs){
featureMatrix<-as.data.frame(featureMatrix)
featureMatrix<-cbind(featureMatrix,label)
}
row.names(featureMatrix)<-names(seqs)
return(featureMatrix)
}
else{
nameSeq<-names(seqs)
for(n in 1:numSeqs){
seq<-seqs[n]
chars<-unlist(strsplit(seq,split = ""))
featureVector<-Blosum[,chars]
vect<-as.vector(featureVector)
temp<-c(nameSeq[n],vect)
temp<-paste(temp,collapse = "\t")
write(temp,outputFileDist,append = TRUE)
}
}
} |
list2matrix.bas <- function(x, what, which.models = NULL) {
namesx <- x$namesx
if (is.null(which.models)) which.models <- 1:x$n.models
listobj <- x[[what]][which.models]
which <- x$which[which.models]
n.models <- length(which.models)
p <- length(namesx)
mat <- matrix(0, nrow = n.models, ncol = p)
for (i in 1:n.models) {
mat[i, which[[i]] + 1] <- listobj[[i]]
}
colnames(mat) <- namesx
return(mat)
}
list2matrix.which <- function(x, which.models = NULL) {
namesx <- x$namesx
listobj <- x$which
if (!is.null(which.models)) {
listobj <- listobj[which.models]
}
p <- length(namesx)
mat <- t(sapply(
listobj,
function(x, dimp) {
xx <- rep(0, dimp)
xx[x + 1] <- 1
xx
},
p
))
colnames(mat) <- namesx
mat
}
which.matrix <- function(which, n.vars) {
mat <- t(sapply(
which,
function(x, dimp) {
xx <- rep(0, dimp)
xx[x + 1] <- 1
xx
},
n.vars
))
mat
} |
context("elevation utils")
testthat::skip_on_cran()
raster_poa <- system.file("extdata/poa/poa_elevation.tif", package = "r5r")
data_path <- system.file("extdata/poa", package = "r5r")
r5r_core <- setup_r5(data_path = data_path, temp_dir = TRUE)
test_that("tobler_hiking", {
expect_error( tobler_hiking('bananas') )
expect_error( tobler_hiking() )
expect_identical( round( r5r:::tobler_hiking(1)), 33)
})
test_that("apply_elevation", {
if (requireNamespace("rgdal", quietly = TRUE)) {
expect_silent( r5r:::apply_elevation(r5r_core, raster_poa) )
expect_silent( r5r:::apply_elevation(r5r_core, c(raster_poa,raster_poa)) )
}
expect_error( r5r:::apply_elevation('bananas', raster_poa) )
expect_error( r5r:::apply_elevation(r5r_core, 'bananas') )
})
stop_r5(r5r_core) |
bbase.os <-
function(x, K, bdeg = 3, eps = 1e-5, intercept = TRUE) {
B <- bs(x, degree = bdeg, df = K + bdeg, intercept = intercept)
B
} |
tcplot <- function (data, u.range, cmax = FALSE, r = 1,
ulow = -Inf, rlow = 1, nt = 25, which = 1:npar, conf = 0.95,
lty = 1, lwd = 1, type = "b", cilty = 1, ask = nb.fig < length(which) &&
dev.interactive(), ...){
n <- length(data)
data <- sort(data)
if (missing(u.range)) {
u.range <- c(data[1], data[n - 4])
u.range <- u.range - .Machine$double.eps^0.5
}
u <- seq(u.range[1], u.range[2], length = nt)
locs <- scls <- shps <- matrix(NA, nrow = nt, ncol = 3)
dimnames(locs) <- list(round(u, 2), c("lower", "loc", "upper"))
dimnames(shps) <- list(round(u, 2), c("lower", "shape", "upper"))
pname <- "mscale"
npar <- 2
dimnames(scls) <- list(round(u, 2), c("lower", pname, "upper"))
z <- gpdmle(data, u[1], corr = TRUE, ...)
stvals <- as.list(round(fitted(z), 3))
for (i in 1:nt) {
z <- gpdmle(data, u[i], corr = TRUE, ...)
stvals <- as.list(fitted(z))
mles <- fitted(z)
stderrs <- z$std.err
cnst <- qnorm((1 + conf)/2)
shp <- mles["shape"]
scl <- mles["scale"]
shpse <- stderrs["shape"]
sclse <- stderrs["scale"]
scl <- scl - shp * u[i]
covar <- z$corr[1, 2] * prod(stderrs)
sclse <- sqrt(sclse^2 - 2 * u[i] * covar + (u[i] *
shpse)^2)
scls[i, ] <- c(scl - cnst * sclse, scl, scl + cnst *
sclse)
shps[i, ] <- c(shp - cnst * shpse, shp, shp + cnst *
shpse)
}
show <- rep(FALSE, npar)
show[which] <- TRUE
nb.fig <- prod(par("mfcol"))
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op))
}
if (show[1]) {
matplot(u, scls, type = "n", xlab = "Threshold",
ylab = "Modified Scale")
lines(u, scls[, 2], lty = lty, lwd = lwd, type = type)
segments(u, scls[, 1], u, scls[, 3], lty = cilty)
}
if (show[2]) {
matplot(u, shps, type = "n", xlab = "Threshold",
ylab = "Shape")
lines(u, shps[, 2], lty = lty, lwd = lwd, type = type)
segments(u, shps[, 1], u, shps[, 3], lty = cilty)
}
rtlist <- list(scales = scls, shapes = shps)
invisible(rtlist)
} |
library(plyr)
suppressPackageStartupMessages(library(dplyr))
library(ggplot2)
library(readr)
gap_dat <- read_tsv("05_gap-merged-with-china-1952.tsv") %>%
mutate(country = factor(country),
continent = factor(continent))
gap_dat %>% str()
gap_dat %>%
sapply(function(x) x %>% is.na() %>% sum())
gap_dat$year %>% summary()
all.equal(gap_dat$year %>% unique() %>% sort(), 1950:2007)
ggplot(gap_dat, aes(x = year)) + geom_histogram(binwidth = 1)
gap_dat$country %>% str()
country_freq <- gap_dat %>%
count(country)
ggplot(country_freq, aes(x = country, y = n)) +
geom_bar(stat = "identity")
(p <- ggplot(country_freq, aes(x = n)) + geom_histogram(binwidth = 1))
p + xlim(c(1, 16))
country_freq$n %>% table()
gap_dat$continent %>% levels()
gap_dat$continent %>% summary()
tmp <- gap_dat %>%
group_by(country) %>%
summarize(n_continent = n_distinct(continent))
tmp$n_continent %>% table()
gap_dat$pop %>% summary(digits = 10)
gap_dat[which.min(gap_dat$pop),]
gap_dat[which.max(gap_dat$pop),]
ggplot(gap_dat,aes(x = pop)) + geom_density() + scale_x_log10()
gap_dat$lifeExp %>% summary()
ggplot(gap_dat,aes(x = lifeExp)) + geom_density()
gap_dat$gdpPercap %>% summary()
gap_dat[which.max(gap_dat$gdpPercap),]
ggplot(gap_dat,aes(x = gdpPercap)) + geom_density() |
"sdtm_ae"
"sdtm_cm"
"sdtm_dm"
"sdtm_ds"
"sdtm_ex"
"sdtm_lb"
"sdtm_mh"
"sdtm_qs"
"sdtm_relrec"
"sdtm_sc"
"sdtm_se"
"sdtm_suppae"
"sdtm_suppdm"
"sdtm_suppds"
"sdtm_supplb"
"sdtm_sv"
"sdtm_ta"
"sdtm_te"
"sdtm_ti"
"sdtm_ts"
"sdtm_tv"
"sdtm_vs" |
tree_add_dates <- function(dated_tree = NULL,
missing_taxa = NULL,
dating_method = "mrbayes",
adding_criterion = "random",
mrbayes_output_file = "mrbayes_tree_add_dates.nexus") {
dated_tree <- tree_check(tree = dated_tree, dated = TRUE)
missing_taxa <- missing_taxa_check(missing_taxa = missing_taxa, dated_tree = dated_tree)
dating_method <- match.arg(dating_method, c("bladj", "mrbayes"))
adding_criterion <- tryCatch(match.arg(adding_criterion, c("random", "taxonomy", "tree")), error = function(e) "random")
if (dating_method == "bladj") {
if (inherits(missing_taxa, "phylo")) {
missing_taxa_phy <- missing_taxa
} else {
if (is.data.frame(missing_taxa)) {
all_taxa <- unique(c(dated_tree$tip.label, levels(missing_taxa$taxon)))
}
if (is.vector(missing_taxa)) {
all_taxa <- unique(c(dated_tree$tip.label, missing_taxa))
}
missing_taxa_phy <- get_otol_synthetic_tree(input = all_taxa)
}
constraint_tree <- suppressWarnings(geiger::congruify.phylo(
reference = phylo_tiplabel_space_to_underscore(dated_tree),
target = phylo_tiplabel_space_to_underscore(missing_taxa_phy), scale = NA,
ncores = 1
))
dated_tree_nodes <- sapply(seq(nrow(constraint_tree$calibrations)), function(i) {
phytools::findMRCA(
tree = constraint_tree$target,
tips = as.character(constraint_tree$calibrations[i, c("taxonA", "taxonB")]),
type = "node"
)
})
dated_tree_nodes <- dated_tree_nodes - ape::Ntip(constraint_tree$target)
missing_taxa_phy$node.label[dated_tree_nodes] <- paste0("cong", seq(nrow(constraint_tree$calibrations)))
missing_taxa_phy <- tree_add_nodelabels(tree = missing_taxa_phy)
new.phy <- make_bladj_tree(tree = missing_taxa_phy, nodenames = missing_taxa_phy$node.label[dated_tree_nodes], nodeages = sapply(seq(nrow(constraint_tree$calibrations)), function(i) sum(constraint_tree$calibrations[i, c("MinAge", "MaxAge")]) / 2))
}
if (dating_method == "mrbayes") {
dated_tree <- tree_add_outgroup(tree = dated_tree, outgroup = "an_outgroup")
ncalibration <- tree_get_node_data(tree = dated_tree, node_data = c("node_age", "descendant_tips_label"))
new.phy <- make_mrbayes_tree(constraint = dated_tree, ncalibration = ncalibration, missing_taxa = missing_taxa, mrbayes_output_file = mrbayes_output_file)
new.phy <- ape::drop.tip(new.phy, "an_outgroup")
}
return(new.phy)
}
missing_taxa_check <- function(missing_taxa = NULL, dated_tree = NULL) {
badformat <- TRUE
if (is.data.frame(missing_taxa)) {
if ("taxon" %in% names(missing_taxa)) {
badformat <- FALSE
if (length(missing_taxa) > 1 & !"clade" %in% names(missing_taxa)) {
badformat <- TRUE
}
}
} else {
missing_taxa_phy <- input_process(missing_taxa)
if (inherits(missing_taxa_phy, "phylo")) {
phylo_check(phy = dated_tree, dated = TRUE)
dtINmt <- dated_tree$tip.labels %in% missing_taxa$tip.labels
mtINdt <- missing_taxa$tip.labels %in% dated_tree$tip.labels
if (!all(dtINmt)) {
warning("not all taxa from dated_tree are in missing_taxa tree")
}
missing_taxa_pruned <- ape::drop.tip(missing_taxa, missing_taxa$tip.labels[mtINdt])
missing_taxa <- missing_taxa_phy
badformat <- FALSE
} else {
missing_taxa <- as.character(missing_taxa)
missing_taxa[which(is.na(missing_taxa))] <- "NA"
badformat <- FALSE
}
}
if (length(missing_taxa) == 0) {
missing_taxa <- NULL
badformat <- FALSE
}
if (badformat) {
stop("missing_taxa must be a character vector with species names,
a data frame with taxonomic assignations, a newick character string, a phylo object, or NULL")
}
return(missing_taxa)
} |
"dataAGGR" |
library(amt)
data(amt_fisher)
set.seed(123)
tr <- make_trast(amt_fisher[1:50, ], res = 5)
mini_fisher <- amt_fisher[1:40, ]
mcp <- hr_mcp(mini_fisher)
loc <- hr_locoh(mini_fisher)
kde <- hr_kde(mini_fisher)
mini_fisher1 <- amt_fisher[11:50, ]
mcp1 <- hr_mcp(mini_fisher1)
loc1 <- hr_locoh(mini_fisher1)
kde1 <- hr_kde(mini_fisher1, tr = tr)
expect_inherits(hr_overlap(mcp, mcp1), "tbl_df")
expect_inherits(hr_overlap(list(mcp, mcp1)), "tbl_df")
expect_inherits(hr_overlap(kde, kde1), "tbl_df")
expect_inherits(hr_overlap(list(kde, kde1)), "tbl_df")
expect_error(hr_overlap(kde, kde1, type = "vi"))
expect_error(hr_overlap(list(kde, kde1), type = "vi")) |
brute_IDs <- function(total.length, redundancy, alphabet, num.tries = 10, available.colors = NULL) {
if (missing(alphabet)) {
stop("Error: you need to enter an 'alphabet size,' e.g. the number of paint colors you have")
}
if (missing(total.length)) {
stop("Error: you need to enter the total length of the ID, e.g. how many color bands or paint drops on each organism")
}
if (missing(redundancy)) {
stop("Error: you need specify to how many erasure events the IDs should be robust. Note, an increase in robustness requires an increase in the total length of the ID. ")
}
if (redundancy >= total.length || redundancy == 0) {
stop("Error: the code must be robust to at least one erasure. It also cannot be robust to a number of positions equal to or greater than the total length.")
}
if (!is(num.tries, "numeric")) {
stop(paste0("Error: the variable 'num.tries' must be of the class 'numeric,' not '", class(num.tries),".'"))
}
tester <- function(total.length, redundancy, alphabet) {
perms <- rep(list(seq_len(alphabet)),total.length)
combos <- as.matrix(expand.grid(perms)) - 1
combo.list <- split(combos, 1:nrow(combos))
names(combo.list) <- NULL
x <- sample(1:length(combo.list), 1)
new.combs <- combo.list[x]
names(new.combs) <- NULL
combo.list <- combo.list[stringdist::seq_distmatrix(combo.list, new.combs, method = "hamming")[, length(new.combs)] > redundancy]
names(combo.list) <- 1:length(combo.list)
while (length(combo.list) > 0) {
x <- sample(1:length(combo.list), 1)
new.combs[length(new.combs) + 1] <- (combo.list[x])
combo.list <- combo.list[stringdist::seq_distmatrix(combo.list, new.combs, method = "hamming")[, length(new.combs)] > redundancy]
if (length(combo.list) != 0) {
names(combo.list) <- 1:length(combo.list)
}
}
return(new.combs)
}
temp1 <- NULL
temp2 <- 0
for (i in 1:num.tries) {
temp1 <- invisible(tester(total.length, redundancy, alphabet))
if (length(temp1) > length(temp2))
temp2 <- temp1
}
temp2 <- codes_to_colors(temp2, available.colors)
return(temp2)
} |
context("ashr with half-uniform mixture priors")
test_that("mixcompdist=+uniform gives all non-negative values for b and zero for a", {
set.seed(1); z=rnorm(10); z.ash=ash(z,1,mixcompdist="+uniform")
k = length(z.ash$fitted_g$pi)
expect_true(all(z.ash$fitted_g$b >= rep(0,k)))
expect_equal(z.ash$fitted_g$a,rep(0,k))
})
test_that("mixcompdist=-uniform gives all non-positive values for a and zero for b", {
set.seed(1); z=rnorm(10); z.ash=ash(z,1,mixcompdist="-uniform")
k = length(z.ash$fitted_g$pi)
expect_equal(z.ash$fitted_g$b,rep(0,k))
expect_true(all(z.ash$fitted_g$a <= 0))
}) |
women
names(women)
height
attach(women)
height
weight
women$height
g <- "My First List"
h <- c(25, 26, 18, 39)
j <- matrix(1:10, nrow=5)
k <- c("one", "two", "three")
mylist <- list(title=g, ages=h, j, k, women)
mylist
mylist[[2]]
mylist[[5]]
plot(x=height, y=weight, type='b', lty=5, pch=11, fg='red', bg='green', col.axis='purple', cex=1.5, cex.axis=2)
title(main='Henry Harvin', sub=' MA Course') |
stratsamp <- function(n, distribution, parameters, p) {
lims <- find_strata(p, distribution, parameters)
outmat <- matrix(data = NA, nrow = n, ncol = length(p)-1)
counts <- rep(0, length(lims)-1)
while (any(counts < n)) {
r <- distribution_sampling(1, distribution, parameters)
intvl <- findInterval(r, lims)
if (counts[intvl] < n) {
counts[intvl] <- counts[intvl] + 1
outmat[counts[intvl], intvl] <- r
}
}
outmat
} |
context("mlc")
suppressPackageStartupMessages(library(caret))
set.seed(1)
mat <- matrix(rnorm(300), ncol = 3, nrow = 100)
colnames(mat) <- letters[1:3]
y <- sample(factor(c("a", "b")), 100, replace = TRUE)
test_that("fit mlc",{
expect_is( mr <- mlc(mat,y), "list")
expect_equal(names(mr), c("a", "b", "levels"))
expect_equal(vapply(mr$a, length, numeric(1)), c(m=3,D=1,I=9))
})
test_that("predict mlc",{
mod <- train( mat, y, method = mlcCaret, trControl = trainControl(method = "none"))
expect_is(pred <- predict.mlc(mod, mat), "factor")
expect_equal(length(pred), nrow(mat))
expect_equal(levels(pred), c("a", "b"))
expect_is(prob <- predict.mlc.prob(mod, mat), "matrix")
expect_equal(nrow(prob), nrow(mat))
expect_equal(ncol(prob), 2)
})
|
semprobit <- function(formula, W, data, subset, ...) {
cl <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
if (!is.null(W) && !is.numeric(W) && !inherits(W, "sparseMatrix") && nrow(W) != NROW(y))
stop(gettextf("'W' must be a numeric square matrix, dimension %d should equal %d (number of observations)",
NROW(W), NROW(y)), domain = NA)
X <- model.matrix(mt, mf, contrasts)
sem_probit_mcmc(y, X, W, ...)
}
sem_probit_mcmc <- function(y, X, W, ndraw=1000, burn.in=100, thinning=1,
prior=list(a1=1, a2=1, c=rep(0, ncol(X)), T=diag(ncol(X))*1e12,
nu=0, d0=0, lflag = 0),
start=list(rho=0.75, beta=rep(0, ncol(X)), sige=1),
m=10, showProgress=FALSE, univariateConditionals=TRUE){
timet <- Sys.time()
n <- nrow( X )
n1 <- nrow( X )
n2 <- nrow( W )
k <- ncol( X )
I_n <- sparseMatrix(i=1:n, j=1:n, x=1)
if (is.null(colnames(X))) colnames(X) <- paste("x",1:k,sep="")
if( length(c(which(y == 0 ),which(y == 1))) != length( y ) ){
stop("semprobit: not all y-values are 0 or 1")
}
if( n1 != n2 && n1 != n ){
stop("semprobit: wrong size of spatial weight matrix W")
}
if (!inherits(W, "sparseMatrix") || any(diag(W) != 0)) {
stop('sarprobit: spatial weights matrix W must be a sparse matrix with zeros in the main diagonal')
}
ind <- match( n, apply(X,2,sum))
if( is.na(ind) ){
cflag <- 0
p <- k
}else if( ind == 1 ){
cflag <- 1
p <- k - 1
}else{
stop("semprobit: intercept term must be in first column of the X-matrix")
}
rho <- start$rho
beta <- start$beta
sige <- start$sige
c <- rep(0, k)
if (is.matrix(prior$T) && ncol(prior$T) == k && isSymmetric(prior$T) && det(prior$T) > 0) {
T <- prior$T
} else {
T <- diag(k)*1e12
}
if (is.numeric(prior$nu)) {
nu <- prior$nu
} else {
nu <- 0
}
if (is.numeric(prior$d0)) {
d0 <- prior$d0
} else {
d0 <- 0
}
TI <- solve(T)
TIc <- TI%*%c
S <- I_n - rho * W
H <- t(S) %*% S / sige
lower <- ifelse(y > 0, 0, -Inf)
upper <- ifelse(y > 0, Inf, 0)
rmin <- -1
rmax <- 1
ldetflag <- 0
tmp <- sar_lndet(ldetflag, W, rmin, rmax)
detval <- tmp$detval
a1 <- 1.0
a2 <- 1.0
if(is.numeric(prior$a1)) a1 <- prior$a1
if(is.numeric(prior$a2)) a2 <- prior$a2
u <- runif(thinning * ndraw + burn.in)
nrho <- nrow(detval)
nmk <- (n-k)/2
detval1 <- detval[,1]
detval2 <- detval[,2]
B <- matrix(NA, ndraw, k+2)
colnames(B) <- c(colnames(X), "sige", "rho")
cc <- 0.2
acc <- 0
acc_rate <- rep(NA, thinning * ndraw + burn.in)
if (showProgress) {
pb <- txtProgressBar(min=0, max=(thinning * ndraw + burn.in), initial=0, style=3)
}
if(cflag == 0) {
namesNonConstantParams <- colnames(X)
} else {
namesNonConstantParams <- colnames(X)[-1]
}
z <- y
ones <- rep(1, n)
W2diag <- diag(t(W)%*%W)
ind0 <- which(y == 0)
ind1 <- which(y == 1)
for (i in (1 - burn.in):(ndraw * thinning)) {
SX <- S %*% X
tSX <- t(SX)
tSXSX <- as.matrix(tSX %*% SX)
AI <- solve(tSXSX + sige * TI)
Sz <- as.double(S %*% z)
b <- as.double(tSX %*% Sz + sige * TIc)
b0 <- AI %*% b
beta <- as.double(rmvnorm(n=1, mean=b0, sigma=sige*AI))
nu1 <- n + 2*nu
e <- as.double(S %*% (z - X %*% beta))
d1 <- 2*d0 + crossprod(e)
chi <- rchisq(n=1,df=nu1)
sige <- as.double(d1/chi)
H <- t(S) %*% S / sige
if (univariateConditionals) {
dsig <- 1/sige * (ones - rho * rho * W2diag)
zvar <- ones/dsig;
mu <- X %*% beta
zmu <- z - mu
A <- (1/sige)* S %*% zmu
B2 <- t(S) %*% A
Cz <- zmu - zvar*B2
zm <- mu + Cz;
z[zvar < 0] <- 0
z[ind0] <- rtnorm(mu=zm[ind0], sd=sqrt(zvar[ind0]), a=-Inf, b=0)
z[ind1] <- rtnorm(mu=zm[ind1], sd=sqrt(zvar[ind1]), a=0, b=Inf)
z[is.infinite(z) | zvar < 0] <- 0
}
if (!univariateConditionals) {
mu <- X %*% beta
H <- (1/sige)*t(S)%*%S
if (m==1) {
z <- as.double(rtmvnorm.sparseMatrix(n=1, mean=mu, H=H,
lower=lower, upper=upper, burn.in=m, start.value=z))
} else {
z <- as.double(rtmvnorm.sparseMatrix(n=1, mean=mu, H=H,
lower=lower, upper=upper, burn.in=m))
}
}
rhox <- c_sem(rho,z,X,beta,sige,I_n,W,detval1,detval2,ones,a1,a2)
accept <- 0
rho2 <- rho + cc * rnorm(1)
while(accept == 0) {
if ((rho2 > rmin) & (rho2 < rmax)) {
accept <- 1
}
else {
rho2 <- rho + cc * rnorm(1)
}
}
rhoy <- c_sem(rho2,z,X,beta,sige,I_n,W,detval1,detval2,ones,a1,a2)
ru <- runif(1,0,1)
if ((rhoy - rhox) > exp(1)) {
p <- 1
} else {
ratio <- exp(rhoy-rhox)
p <- min(1,ratio)
}
if (ru < p) {
rho <- rho2
acc <- acc + 1
}
iter <- i + burn.in
acc_rate[iter] <- acc/iter
if (acc_rate[iter] < 0.4) {
cc <- cc/1.1;
}
if (acc_rate[iter] > 0.6) {
cc <- cc*1.1;
}
S <- I_n - rho * W
H <- t(S) %*% S / sige
if (i > 0) {
if (thinning == 1) {
ind <- i
}
else if (i%%thinning == 0) {
ind <- i%/%thinning
} else {
next
}
B[ind,] <- c(beta, sige, rho)
}
if (showProgress) setTxtProgressBar(pb, i + burn.in)
}
if (showProgress) close(pb)
beta <- colMeans(B)[1:k]
sige <- colMeans(B)[k+1]
rho <- colMeans(B)[k+2]
S <- (I_n - rho * W)
fitted.values <- X %*% beta
fitted.response <- as.numeric(fitted.values >= 0)
results <- NULL
results$time <- Sys.time() - timet
results$nobs <- n
results$nvar <- k
results$y <- y
results$zip <- n - sum(y)
results$beta <- colMeans(B)[1:k]
results$sige <- sige
results$rho <- colMeans(B)[k+2]
results$coefficients <- colMeans(B)
results$fitted.values <- fitted.values
results$fitted.response <- fitted.response
results$ndraw <- ndraw
results$nomit <- burn.in
results$a1 <- a1
results$a2 <- a2
results$nu <- nu
results$d0 <- d0
results$rmax <- rmax
results$rmin <- rmin
results$tflag <- "plevel"
results$lflag <- ldetflag
results$cflag <- cflag
results$lndet <- detval
results$names <- c(colnames(X), "sige", "rho")
results$B <- B
results$bdraw <- B[,1:k]
results$sdraw <- B[,k+1]
results$pdraw <- B[,k+2]
results$W <- W
results$X <- X
class(results) <- "semprobit"
return(results)
}
c_sem <- function(rho,y,X,b,sige,I_n,W,detval1,detval2,vi,a1,a2) {
i <- findInterval(rho,detval1)
if (i == 0) index=1
else index=i
detm = detval2[index]
z = I_n - rho*W;
e = as.double(z %*% (y - X %*% b))
ev = e * sqrt(vi)
epe = (crossprod(ev))/(2*sige)
cout = as.double(detm - epe)
return(cout)
}
coef.semprobit <- function(object, ...) {
if (!inherits(object, "semprobit"))
stop("use only with \"semprobit\" objects")
return(object$coefficients)
}
coefficients.semprobit <- function(object, ...) {
UseMethod("coef", object)
}
summary.semprobit <- function(object, var_names=NULL, file=NULL, digits = max(3, getOption("digits")-3), ...){
if (!inherits(object, "semprobit"))
stop("use only with \"semprobit\" objects")
nobs <- object$nobs
nvar <- object$nvar
ndraw <- object$ndraw
nomit <- object$nomit
draws <- object$B
bout_mean <- object$coefficients
bout_sd <- apply(draws, 2, sd)
bout_sig <- 1 - apply(draws, 2, function(x) { ifelse (mean(x) > 0, sum(x > 0), sum(x < 0)) }) / ndraw
bout_t <- bout_mean / bout_sd
bout_tPval<- (1 - pt( abs(bout_t), nobs ))*2
if( is.null(var_names)){
bout_names<- as.matrix(object$names)
}else{
bout_names<- as.matrix(var_names)
}
if(is.null(file)){file <- ""}
write(sprintf("--------MCMC probit with spatial errors ---------"), file, append=T)
write(sprintf("Execution time = %6.3f %s", object$time, attr(object$time, "units")) , file, append=T)
write(sprintf("N steps for TMVN= %6d" , object$nsteps), file, append=T)
write(sprintf("N draws = %6d, N omit (burn-in)= %6d", ndraw, nomit), file, append=T)
write(sprintf("N observations = %6d, K covariates = %6d", nobs, nvar) , file, append=T)
write(sprintf("
write(sprintf("Min rho = % 6.3f, Max rho = % 6.3f", object$rmin, object$rmax), file, append=T)
write(sprintf("--------------------------------------------------"), file, append=T)
write(sprintf(""), file, append=T)
coefficients <- cbind(bout_mean, bout_sd, bout_sig, bout_t, bout_tPval)
dimnames(coefficients) <- list(bout_names,
c("Estimate", "Std. Dev", "Bayes p-level", "t-value", "Pr(>|z|)"))
printCoefmat(coefficients, digits = digits,
signif.stars = getOption("show.signif.stars"))
return(invisible(coefficients))
}
plot.semprobit <- function(x, which=c(1, 2, 3),
ask = prod(par("mfcol")) < length(which) && dev.interactive(), ..., trueparam=NULL) {
if (!inherits(x, "semprobit"))
stop("use only with \"semprobit\" objects")
if (!is.numeric(which) || any(which < 1) || any(which > 3))
stop("'which' must be in 1:3")
names <- x$names
B <- x$B
k <- ncol(B)
show <- rep(FALSE, 3)
show[which] <- TRUE
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
if (show[1L]) {
for (i in 1:k) {
plot(1:nrow(B), B[,i], type="l", xlab="iteration", ylab=names[i], main=substitute("Trace plot of "*x, list(x=names[i])), ...)
if (!is.null(trueparam)) abline(h=trueparam[i], col="red", lty=2)
}
}
if (show[2L]) {
for (i in 1:k) {
acf(B[,i], main=substitute("ACF of "*x, list(x=names[i])), ...)
}
}
if (show[3L]) {
for (i in 1:k) {
plot(density(B[,i]), main=substitute("Posterior distribution of "*x, list(x=names[i])), ...)
if (!is.null(trueparam)) abline(v=trueparam[i], col="red", lty=2)
}
}
}
logLik.semprobit <- function(object, ...) {
X <- object$X
y <- object$y
n <- nrow(X)
k <- ncol(X)
W <- object$W
beta <- object$beta
rho <- object$rho
sige <- object$sige
I_n <- sparseMatrix(i=1:n, j=1:n, x=1)
S <- I_n - rho * W
D <- diag(1/sqrt(sige*diag(S %*% t(S))))
Xs <- D %*% X
F <- pnorm(as.double(Xs %*% beta))
lnL <- sum(log(F[y == 1])) + sum(log((1 - F[y == 0])))
out <- lnL
class(out) <- "logLik"
attr(out,"df") <- k+2
return(out)
}
fitted.semprobit <- function(object, ...) {
object$fitted.value
} |
tar_resources_url <- function(
handle = NULL
) {
out <- resources_url_init(
handle = handle
)
resources_validate(out)
out
} |
export_amira.path<-function(vertices,filename,Lines=c(1:(dim(vertices)[1]-1)-1,-1),path)
{
while(tolower(paste(filename,".am",sep=""))%in%list.files(path)!=FALSE){
i<-1
filename<-paste(filename,"(",i,")",sep="")
i<-i+1
}
cat(paste("
file = paste(path, "/", filename, ".am", sep = ""),
append = TRUE, sep = "",eol="\n")
cat(paste("define Lines ", length(Lines), "\n",
sep = ""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste("nVertices ", dim(vertices)[1], "\n\n",
sep = ""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste("Parameters {","\n",sep=""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste(' ContentType "HxLineSet"',"\n",sep=""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste("}", "\n\n", sep = ""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste("Lines { int LineIdx } @1","\n",sep=""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste("Vertices { float[3] Coordinates } @2", "\n\n", sep = ""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste("
sep = ""), append = TRUE, sep = "")
cat(paste(Lines,"\n",sep=" "), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
cat(paste("\n","@2","\n",sep=""), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
write.table(format(vertices, scientific = F, trim = T),
file = paste(path, "/", filename, ".am", sep = ""),
sep = " ", append = TRUE, quote = FALSE, row.names = FALSE,
col.names = FALSE, na = "")
cat(paste("\n",sep=" "), file = paste(path, "/", filename, ".am",
sep = ""), append = TRUE, sep = "")
} |
test_that("translations", {
expect_identical(pow(5, 2), 25)
expect_identical(phi(0), 0.5)
expect_equal(phi(2), 0.9772499, tolerance = 0.0000001)
x <- NA
log(x) <- log(5)
expect_equal(x, 5)
expect_equal(logit(0.5), 0)
expect_equal(logit(1), Inf)
x <- NA
logit(x) <- logit(0.75)
expect_equal(x, 0.75)
expect_equal(ilogit(logit(0.67)), 0.67)
expect_equal(invlogit(logit(0.67)), 0.67)
})
test_that("translations2", {
x <- seq(0, 1, by = 0.25)
expect_identical(logit(x), qlogis(x))
expect_identical(ilogit(logit(x)), x)
expect_identical(invlogit(logit(x)), x)
logit(x) <- c(0.5, 1)
expect_identical(x, ilogit(c(0.5, 1)))
log(x) <- c(0.5, 1)
expect_identical(x, exp(c(0.5, 1)))
expect_identical(pow(3, 4), 3^4)
expect_equal(phi(0:2), c(0.5, 0.8413447, 0.9772499), tolerance = 0.0000001)
}) |
read.cross.csv <-
function(dir, file, na.strings=c("-","NA"),
genotypes=c("A","H","B","D","C"),
estimate.map=TRUE, rotate=FALSE, ...)
{
if(missing(file)) file <- "data.csv"
if(!missing(dir) && dir != "") {
file <- file.path(dir, file)
}
args <- list(...)
if("" %in% na.strings) {
na.strings <- na.strings[na.strings != ""]
warning("Including \"\" in na.strings will cause problems; omitted.")
}
if(length(args) > 0 && "dec" %in% names(args)) {
dec <- args[["dec"]]
}
else dec <- "."
if(length(args) < 1 || !("sep" %in% names(args))) {
if(length(args) < 1 || !("comment.char" %in% names(args)))
data <- read.table(file, sep=",", na.strings=na.strings,
colClasses="character", fill=TRUE,
stringsAsFactors=TRUE,
blank.lines.skip=TRUE, comment.char="", ...)
else
data <- read.table(file, sep=",", na.strings=na.strings,
colClasses="character", fill=TRUE,
stringsAsFactors=TRUE,
blank.lines.skip=TRUE, ...)
}
else {
if(length(args) < 1 || !("comment.char" %in% names(args)))
data <- read.table(file, na.strings=na.strings,
colClasses="character", fill=TRUE,
stringsAsFactors=TRUE,
blank.lines.skip=TRUE, comment.char="", ...)
else
data <- read.table(file, na.strings=na.strings,
colClasses="character", fill=TRUE,
stringsAsFactors=TRUE,
blank.lines.skip=TRUE, ...)
}
if(rotate)
data <- as.data.frame(t(data), stringsAsFactors=FALSE)
empty <- grep("^\\s*$", data[2, ])
if( ! 1 %in% empty)
stop("You must include at least one phenotype (e.g., an index). ",
"There was this value in the first column of the second row '",
data[2,1],"' where was supposed to be nothing.",sep="")
if(length(empty)==ncol(data))
stop("Second row has all blank cells; you need to include chromosome IDs for the markers.")
n.phe <- min((1:ncol(data))[-empty])-1
empty <- rep(FALSE, n.phe)
empty[grep("^\\s*$", data[3,1:n.phe])] <- TRUE
if(all(empty)) {
map.included <- TRUE
map <- asnumericwithdec(unlist(data[3,-(1:n.phe)]), dec=dec)
if(any(is.na(map))) {
temp <- unique(unlist(data[3,-(1:n.phe)])[is.na(map)])
stop("There are missing marker positions.\n",
" In particular, we see these value(s): ",
paste("\"",paste(temp,collapse="\",\"",sep=""),"\"",collapse=" ",sep=""),
" at position(s): ",
paste(which(is.na(map)),colapse=",",sep=""),sep="")
}
nondatrow <- 3
}
else {
map.included <- FALSE
map <- rep(0,ncol(data)-n.phe)
nondatrow <- 2
}
data <- sapply(data,function(a) { a[!is.na(a) & a==""] <- NA; a })
pheno <- as.data.frame(data[-(1:nondatrow),1:n.phe,drop=FALSE], stringsAsFactors=TRUE)
colnames(pheno) <- data[1,1:n.phe]
mnames <- data[1,-(1:n.phe)]
if(any(is.na(mnames)))
stop("There are missing marker names. Check column(s) ",paste(which(is.na(mnames))+1+n.phe,collapse=","),sep="")
chr <- data[2,-(1:n.phe)]
if(any(is.na(chr)))
stop("There are missing chromosome IDs. Check column(s) ",paste(which(is.na(chr))+1+n.phe,collapse=","),sep="")
if(length(genotypes) > 0) {
temp <- unique(as.character(data[-(1:nondatrow),-(1:n.phe),drop=FALSE]))
temp <- temp[!is.na(temp)]
wh <- !(temp %in% genotypes)
if(any(wh)) {
warn <- "The following unexpected genotype codes were treated as missing.\n "
ge <- paste("|", paste(temp[wh],collapse="|"),"|",sep="")
warn <- paste(warn,ge,"\n",sep="")
warning(warn)
}
allgeno <- matrix(match(data[-(1:nondatrow),-(1:n.phe)],genotypes),
ncol=ncol(data)-n.phe)
}
else
allgeno <- matrix(as.numeric(data[-(1:nondatrow),-(1:n.phe)]),
ncol=ncol(data)-n.phe)
oldpheno <- pheno
pheno <- data.frame(lapply(pheno, sw2numeric, dec=dec), stringsAsFactors=TRUE)
if(all(chr %in% c(1:999,"X","x"))) {
tempchr <- chr
tempchr[chr=="X" | chr=="x"] <- 1000
tempchr <- as.numeric(tempchr)
if(map.included) neworder <- order(tempchr, map)
else neworder <- order(tempchr)
}
else {
tempchr <- factor(chr, levels=unique(chr))
if(map.included) neworder <- order(tempchr, map)
else neworder <- order(tempchr)
}
chr <- chr[neworder]
map <- map[neworder]
allgeno <- allgeno[,neworder,drop=FALSE]
mnames <- mnames[neworder]
if(!map.included) {
map <- split(rep(0,length(chr)),chr)[unique(chr)]
map <- unlist(lapply(map,function(a) seq(0,length=length(a),by=5)))
names(map) <- NULL
}
uchr <- unique(chr)
n.chr <- length(uchr)
geno <- vector("list",n.chr)
names(geno) <- uchr
min.mar <- 1
allautogeno <- NULL
for(i in 1:n.chr) {
temp.map <- map[chr==uchr[i]]
names(temp.map) <- mnames[chr==uchr[i]]
data <- allgeno[,min.mar:(length(temp.map)+min.mar-1),drop=FALSE]
min.mar <- min.mar + length(temp.map)
colnames(data) <- names(temp.map)
geno[[i]] <- list(data=data,map=temp.map)
if(uchr[i] == "X" || uchr[i] == "x")
class(geno[[i]]) <- "X"
else {
class(geno[[i]]) <- "A"
if(is.null(allautogeno)) allautogeno <- data
else allautogeno <- cbind(allautogeno,data)
}
}
if(is.null(allautogeno)) allautogeno <- allgeno
n.mar1 <- sapply(geno,function(a) ncol(a$data))
n.mar2 <- sapply(geno,function(a) length(a$map))
n.phe <- ncol(pheno)
n.ind1 <- nrow(pheno)
n.ind2 <- sapply(geno,function(a) nrow(a$data))
if(any(n.ind1 != n.ind2)) {
cat(n.ind1,n.ind2,"\n")
stop("Number of individuals in genotypes and phenotypes do not match.")
}
if(any(n.mar1 != n.mar2)) {
cat(n.mar1,n.mar2,"\n")
stop("Numbers of markers in genotypes and marker names files do not match.")
}
cat(" --Read the following data:\n")
cat("\t",n.ind1," individuals\n")
cat("\t",sum(n.mar1)," markers\n")
cat("\t",n.phe," phenotypes\n")
if(all(is.na(allgeno))) warning("There is no genotype data!\n")
if(all(is.na(allautogeno)) || max(allautogeno,na.rm=TRUE)<=2) type <- "bc"
else if(max(allautogeno,na.rm=TRUE)<=5) type <- "f2"
else type <- "4way"
cross <- list(geno=geno,pheno=pheno)
class(cross) <- c(type,"cross")
if(type=="f2") max.gen <- 5
else if(type=="bc") max.gen <- 2
else max.gen <- 14
for(i in 1:n.chr) {
if(any(diff(cross$geno[[i]]$map)<0)) {
o <- order(cross$geno[[i]]$map)
cross$geno[[i]]$map <- cross$geno[[i]]$map[o]
cross$geno[[i]]$data <- cross$geno[[i]]$data[,o,drop=FALSE]
}
}
if(estimate.map && !map.included) estmap <- TRUE
else estmap <- FALSE
list(cross,estmap)
} |
gbm.perf <- function(object,
plot.it=TRUE,
oobag.curve=FALSE,
overlay=TRUE,
method,
main="") {
if(!is.logical(plot.it) || (length(plot.it)) > 1 || is.na(plot.it))
stop("plot.it must be a logical - excluding NA")
performance <- gbmt_performance(object, method)
if (plot.it) {
plot(performance,
out_of_bag_curve=oobag.curve,
overlay=overlay,
main=main)
}
as.numeric(performance)
}
gbmt_performance <- function(gbm_fit_obj, method) {
check_if_gbm_fit(gbm_fit_obj)
if (missing(method)) {
method <- guess_error_method(gbm_fit_obj)
message("Using ", method, " method...")
}
result <-
switch(method,
OOB=best_iter_out_of_bag(gbm_fit_obj),
cv=best_iter_cv(gbm_fit_obj),
test=best_iter_test(gbm_fit_obj),
stop("method must be cv, test, or OOB"))
attr(result, 'decoration') <-
list(method=method,
gbm_fit_obj=gbm_fit_obj)
class(result) <- "GBMTPerformance"
result
}
as.double.GBMTPerformance <- function(x, ...) {
as.double(unclass(x))
}
print.GBMTPerformance <- function(x, ...) {
decoration <- attr(x, 'decoration')
method_descriptor <-
switch(decoration$method,
cv="cross-validation",
test="test-set",
OOB="out-of-bag",
stop("Unknown method."))
cat("The best ", method_descriptor, " iteration was ", x, ".\n",
sep="")
invisible(x)
}
plot.GBMTPerformance <- function(x,
out_of_bag_curve=FALSE,
overlay=TRUE,
main="", ...) {
decoration <- attr(x, 'decoration')
perf_plot(decoration$gbm_fit_obj, x,
out_of_bag_curve, overlay,
decoration$method,
main)
}
best_iter_test <- function(gbm_fit_obj) {
check_if_gbm_fit(gbm_fit_obj)
best_iter_test <- which.min(iteration_error(gbm_fit_obj, 'valid'))
return(best_iter_test)
}
best_iter_cv <- function(gbm_fit_obj) {
check_if_gbm_fit(gbm_fit_obj)
if(!has_cross_validation(gbm_fit_obj)) {
stop('In order to use method="cv" gbm must be called with cv_folds>1.')
}
best_iter_cv <- which.min(iteration_error(gbm_fit_obj, 'cv'))
return(best_iter_cv)
}
best_iter_out_of_bag <- function(gbm_fit_obj) {
check_if_gbm_fit(gbm_fit_obj)
if(gbm_fit_obj$params$bag_fraction==1)
stop("Cannot compute OOB estimate or the OOB curve when bag_fraction=1")
if(all(!is.finite(gbm_fit_obj$oobag.improve)))
stop("Cannot compute OOB estimate or the OOB curve. No finite OOB estimates of improvement")
message("OOB generally underestimates the optimal number of iterations although predictive performance is reasonably competitive.
Using cv_folds>1 when calling gbm usually results in improved predictive performance.")
smoother <- generate_smoother_oobag(gbm_fit_obj)
best_iter_oob <- smoother$x[which.min(-cumsum(smoother$y))]
return(best_iter_oob)
}
generate_smoother_oobag <- function(gbm_fit_obj) {
check_if_gbm_fit(gbm_fit_obj)
smoother <- NULL
x <- seq_len(gbm_fit_obj$params$num_trees)
smoother <- loess(gbm_fit_obj$oobag.improve~x,
enp.target=min(max(4,length(x)/10),50))
smoother$y <- smoother$fitted
smoother$x <- x
return(smoother)
}
guess_error_method <- function(gbm_fit_obj) {
if (has_train_test_split(gbm_fit_obj)) {
"test"
} else if (has_cross_validation(gbm_fit_obj)) {
"cv"
} else {
"OOB"
}
} |
test_that("gives warning markers are not correct", {
expect_warning(style_text(c(
"1+1",
"
"
)))
})
test_that("trailing spaces are stripped when checking marker and written back", {
expect_equal(
style_text(c(
"
"1+1",
"
)) %>%
as.character(),
c("
)
})
test_that("last stopping marker can be omitted", {
expect_equal(
style_text(c(
"
"1+1"
)) %>%
as.character(),
c("
)
})
test_that("last stopping marker can be omitted", {
expect_equal(
style_text(c(
"
"call( 1)",
"
"call(2 +0)",
"
"x=2"
)) %>%
as.character(),
c(
"
"
)
)
})
test_that("works for one line", {
expect_equal(
style_text(c(
"1+1",
"1+1
"1+1"
)) %>%
as.character(),
c("1 + 1", "1+1
)
})
test_that("works with other markers", {
expect_equal(
withr::with_options(
list(styler.ignore_start = "
{
style_text(c(
"1+1",
"1+1
"1+1"
)) %>%
as.character()
}
),
c("1 + 1", "1+1
)
})
test_that("works for multiple markers inline", {
withr::local_options(styler.ignore_start = "
expect_equal(
style_text(c(
"1+1",
"1+1
"1+1"
)) %>%
as.character(),
c("1 + 1", "1+1
)
})
test_that("works for multiple markers inline on one line", {
withr::local_options(styler.ignore_start = "nolint start|styler: off")
expect_equal(
style_text(c(
"1+1",
"1+1
"1+1"
)) %>%
as.character(),
c("1 + 1", "1+1
)
})
test_that("works with other markers", {
expect_warning(
withr::with_options(
list(styler.ignore_start = "
{
style_text(c(
"1+1",
"
"1+1",
"1+1",
"
)) %>%
as.character()
}
),
"Invalid stylerignore sequence"
)
})
test_that("Simple example works", {
expect_warning(test_collection("stylerignore", "simple",
transformer = style_text
), NA)
})
test_that("stylerignore does not need coincidence with top-level expressions", {
expect_warning(test_collection("stylerignore", "crossing",
transformer = style_text
), NA)
})
test_that("token adding or removing works in stylerignore", {
expect_warning(test_collection("stylerignore", "adding-removing",
transformer = style_text
), NA)
})
test_that("no token added or removed in complex case", {
expect_warning(test_collection("stylerignore", "braces",
transformer = style_text
), NA)
})
test_that("stylerignore sequences are respected in alignment detection", {
expect_warning(test_collection("stylerignore", "alignment",
transformer = style_text
), NA)
}) |
genscorestat<-function(scores,group,correct=0){
N<-length(group); MV<-table(group)
refg<-names(MV)[1]
if(is.numeric(group)) refg<-as.numeric(refg)
if(length(MV)!=2){
message("genscorestat works only for two groups")
out<-NA
}else{
abar<-mean(scores)
ahat<-mean(scores^2)
vv<-MV[2]*MV[1]*(ahat-abar^2)/(N-1)
ee<-MV[1]*abar
stat<-structure(sum(scores[group==refg]),.Names= "Gaussian")
correct<-abs(correct)*sign(stat-ee)
parameters<-structure(c(ee,vv),.Names=c("mean","variance"))
out<-list(null.value=structure(0, .Names = "median difference"),
alternative="two-sided",method="General Score Test",data.name=NULL,
statistic=stat,parameters=parameters,
p.value=2*pnorm(-abs(stat-ee-correct)/sqrt(vv)))
class(out)<-"htest"
}
return(out)
} |
"dccm.nma" <-
function(x, nmodes=NULL, ncore=NULL, progress = NULL, ...) {
nma <- x
if (missing(nma))
stop("dccm.nma: must supply a 'nma' object, i.e. from 'nma'")
if(!"nma" %in% class(nma))
stop("dccm.nma: must supply 'nma' object, i.e. from 'nma'")
ncore <- setup.ncore(ncore, bigmem = FALSE)
if(ncore > 1) {
mcparallel <- get("mcparallel", envir = getNamespace("parallel"))
mccollect <- get("mccollect", envir = getNamespace("parallel"))
}
cross.inner.prod <- function(a, b) {
mat <- apply(a, 1, "%*%", t(b))
return(mat)
}
corrmats <- function(r.inds, core.id, nma, corr.mat, freqs, progress) {
for ( i in r.inds ) {
mode <- matrix(nma$U[,i], ncol=3, byrow=TRUE)
corr.mat <- corr.mat + (cross.inner.prod(mode, mode) / (freqs[i]**2))
if(core.id==1) {
setTxtProgressBar(pb, i)
}
if(!is.null(progress)) {
if(i %% 20 == 0) {
progress$set(i)
}
}
}
return(corr.mat)
}
if(!is.null(nma$frequencies)) {
freqs <- nma$frequencies
}
else {
freqs <- nma$force.constants
}
if(is.null(nmodes))
nmodes <- length(nma$L)
else {
nmodes <- nmodes + nma$triv.modes
if(nmodes>length(nma$L)) {
warning("'nmodes' larger than the number of modes")
nmodes <- length(nma$L)
}
}
pbmax <- nmodes + nma$natoms
pb <- txtProgressBar(min=(nma$triv.modes+1), max=pbmax, style=3)
corr.mat <- matrix(0, nma$natoms, nma$natoms)
mode.inds <- (nma$triv.modes+1):nmodes
core.ids <- rep(1:ncore, length.out=length( mode.inds ))
if(ncore>1)
jobs <- list()
for ( i in 1:ncore ) {
rinds <- mode.inds[ which(core.ids==i) ]
if(ncore>1) {
q <- mcparallel(corrmats(rinds, i, nma, corr.mat, freqs, progress))
jobs[[i]] <- q
}
else
corr.mat <- corrmats(rinds, i, nma, corr.mat, freqs, progress)
}
if(ncore>1) {
res <- mccollect(jobs, wait=TRUE)
for ( job in res ) {
corr.mat <- corr.mat + job
}
}
a <- vector('numeric', length=nrow(corr.mat))
k <- length(mode.inds)
inds <- rep(1:nrow(corr.mat), each=3)
for ( j in (nma$triv.modes+1):nmodes ) {
v <- nma$U[, j] * nma$U[, j]
a <- a + ( tapply( v, inds, sum) / (freqs[j]**2))
k <- k+1
setTxtProgressBar(pb, k)
if(!is.null(progress)) {
if(j %% 20 == 0) {
progress$set(k)
}
}
}
close(pb)
a <- sqrt(a)
bn <- a%o%a
corr.mat <- corr.mat / bn
class(corr.mat) <- c("dccm", "matrix")
return(corr.mat)
} |
stat.entropyFunction = function(bitString) {
pT = sum(bitString)/length(bitString)
pF = 1-pT
if (pT==1 || pT==0) {
e = 0
} else {
e = -pT*log2(pT)-pF*log2(pF)
}
return(e)
} |
"ChickWeight" <-
structure(list(
weight = c(42, 51, 59, 64, 76, 93, 106, 125, 149,
171, 199, 205, 40, 49, 58, 72, 84, 103, 122, 138, 162, 187, 209,
215, 43, 39, 55, 67, 84, 99, 115, 138, 163, 187, 198, 202, 42,
49, 56, 67, 74, 87, 102, 108, 136, 154, 160, 157, 41, 42, 48,
60, 79, 106, 141, 164, 197, 199, 220, 223, 41, 49, 59, 74, 97,
124, 141, 148, 155, 160, 160, 157, 41, 49, 57, 71, 89, 112, 146,
174, 218, 250, 288, 305, 42, 50, 61, 71, 84, 93, 110, 116, 126,
134, 125, 42, 51, 59, 68, 85, 96, 90, 92, 93, 100, 100, 98, 41,
44, 52, 63, 74, 81, 89, 96, 101, 112, 120, 124, 43, 51, 63, 84,
112, 139, 168, 177, 182, 184, 181, 175, 41, 49, 56, 62, 72, 88,
119, 135, 162, 185, 195, 205, 41, 48, 53, 60, 65, 67, 71, 70,
71, 81, 91, 96, 41, 49, 62, 79, 101, 128, 164, 192, 227, 248,
259, 266, 41, 49, 56, 64, 68, 68, 67, 68, 41, 45, 49, 51, 57,
51, 54, 42, 51, 61, 72, 83, 89, 98, 103, 113, 123, 133, 142,
39, 35, 43, 48, 55, 62, 65, 71, 82, 88, 106, 120, 144, 157, 41,
47, 54, 58, 65, 73, 77, 89, 98, 107, 115, 117, 40, 50, 62, 86,
125, 163, 217, 240, 275, 307, 318, 331, 41, 55, 64, 77, 90, 95,
108, 111, 131, 148, 164, 167, 43, 52, 61, 73, 90, 103, 127, 135,
145, 163, 170, 175, 42, 52, 58, 74, 66, 68, 70, 71, 72, 72, 76,
74, 40, 49, 62, 78, 102, 124, 146, 164, 197, 231, 259, 265, 42,
48, 57, 74, 93, 114, 136, 147, 169, 205, 236, 251, 39, 46, 58,
73, 87, 100, 115, 123, 144, 163, 185, 192, 39, 46, 58, 73, 92,
114, 145, 156, 184, 207, 212, 233, 39, 48, 59, 74, 87, 106, 134,
150, 187, 230, 279, 309, 42, 48, 59, 72, 85, 98, 115, 122, 143,
151, 157, 150, 42, 53, 62, 73, 85, 102, 123, 138, 170, 204, 235,
256, 41, 49, 65, 82, 107, 129, 159, 179, 221, 263, 291, 305,
39, 50, 63, 77, 96, 111, 137, 144, 151, 146, 156, 147, 41, 49,
63, 85, 107, 134, 164, 186, 235, 294, 327, 341, 41, 53, 64, 87,
123, 158, 201, 238, 287, 332, 361, 373, 39, 48, 61, 76, 98, 116,
145, 166, 198, 227, 225, 220, 41, 48, 56, 68, 80, 83, 103, 112,
135, 157, 169, 178, 41, 49, 61, 74, 98, 109, 128, 154, 192, 232,
280, 290, 42, 50, 61, 78, 89, 109, 130, 146, 170, 214, 250, 272,
41, 55, 66, 79, 101, 120, 154, 182, 215, 262, 295, 321, 42, 51,
66, 85, 103, 124, 155, 153, 175, 184, 199, 204, 42, 49, 63, 84,
103, 126, 160, 174, 204, 234, 269, 281, 42, 55, 69, 96, 131,
157, 184, 188, 197, 198, 199, 200, 42, 51, 65, 86, 103, 118,
127, 138, 145, 146, 41, 50, 61, 78, 98, 117, 135, 141, 147, 174,
197, 196, 40, 52, 62, 82, 101, 120, 144, 156, 173, 210, 231,
238, 41, 53, 66, 79, 100, 123, 148, 157, 168, 185, 210, 205,
39, 50, 62, 80, 104, 125, 154, 170, 222, 261, 303, 322, 40, 53,
64, 85, 108, 128, 152, 166, 184, 203, 233, 237, 41, 54, 67, 84,
105, 122, 155, 175, 205, 234, 264, 264),
Time = c(0, 2, 4, 6,
8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2,
4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14,
16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0,
2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12,
14, 16, 18, 20, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0,
2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12,
14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10,
12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6,
8, 10, 12, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 0,
2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12,
14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10,
12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8,
10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6,
8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2,
4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14,
16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0,
2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12,
14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10,
12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8,
10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 2, 4, 6, 8, 10,
12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8,
10, 12, 14, 16, 18, 20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
20, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 21),
Chick = ordered(c(15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 18,
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 19,
19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 3, 3, 3, 3, 3, 3,
3, 3, 2, 2, 2, 2, 2, 2, 2, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1, 1, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 30, 30, 30, 30, 30, 30, 30, 30, 30,
30, 30, 30, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 21, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 31, 31, 31, 31, 31, 31, 31, 31, 31,
31, 31, 31, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 35,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 38, 38, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48), levels=1:50,
labels = c("18", "16", "15", "13", "9", "20", "10",
"8", "17", "19", "4", "6", "11", "3", "1", "12", "2", "5", "14",
"7", "24", "30", "22", "23", "27", "28", "26", "25", "29", "21",
"33", "37", "36", "31", "39", "38", "32", "40", "34", "35", "44",
"45", "43", "41", "47", "49", "46", "50", "42", "48")),
Diet = factor(c(1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4), levels=1:4,
labels = c("1", "2", "3", "4"))),
row.names = 1:578,
class = c("nfnGroupedData", "nfGroupedData", "groupedData", "data.frame"),
formula = weight ~ Time | Chick,
outer = ~ Diet,
labels = list(x = "Time", y = "Body weight"),
units = list(x = "(days)", y = "(gm)"))
environment(attr(ChickWeight, "formula")) <- emptyenv()
environment(attr(ChickWeight, "outer")) <- emptyenv() |
NULL
dcmp = function(x, lambda, nu, log = FALSE)
{
prep = prep.zicmp(length(x), lambda, nu)
dcmp_cpp(x, prep$lambda, prep$nu, take_log = log)
}
rcmp = function(n, lambda, nu)
{
prep = prep.zicmp(n, lambda, nu)
ymax = getOption("COMPoissonReg.ymax")
rcmp_cpp(n, prep$lambda, prep$nu, ymax = ymax)
}
pcmp = function(x, lambda, nu)
{
prep = prep.zicmp(length(x), lambda, nu)
pcmp_cpp(x, prep$lambda, prep$nu)
}
qcmp = function(q, lambda, nu, log.p = FALSE)
{
prep = prep.zicmp(length(q), lambda, nu)
if (log.p) {
log.q = q
} else {
log.q = log(q)
}
ymax = getOption("COMPoissonReg.ymax")
qcmp_cpp(log.q, prep$lambda, prep$nu, ymax = ymax)
}
cmp.expected.value = function(lambda, nu)
{
n = max(length(lambda), length(nu))
prep = prep.zicmp(n, lambda, nu)
res = numeric(n)
for (i in 1:n) {
res[i] = prep$lambda[i] * grad.fwd(z_hybrid, prep$lambda[i], nu = prep$nu[i], take_log = TRUE)
}
return(res)
} |
groupAndRename <- function(obj, var, before, after, addNA=FALSE) {
groupAndRenameX(obj=obj, var=var, before=before, after=after, addNA=addNA)
}
setGeneric("groupAndRenameX", function(obj, var, before, after, addNA=FALSE) {
standardGeneric("groupAndRenameX")
})
setMethod(f="groupAndRenameX", signature=c("factor"),
definition=function(obj, var, before, after, addNA=FALSE) {
if (!all(before %in% levels(obj))) {
stop("some elements of 'before' are not valid levels in the input factor!\n")
}
if (any(duplicated(before))) {
stop("each level from the original factor must be listed only once in argument 'before'!")
}
ll <- levels(obj)
ll[ll %in% before] <- after
levels(obj) <- ll
if (addNA) {
obj[is.na(obj)] <- after
}
obj
})
setMethod(f="groupAndRenameX", signature=c("data.frame"),
definition=function(obj, var, before, after, addNA=FALSE) {
if (length(var) != 1) {
stop("length of input 'var' != 1!\n")
}
if (!var %in% colnames(obj)) {
stop("variable specified in 'var' is not available in 'obj'!\n")
}
fac <- obj[[var]]
if (!is.factor(obj[[var]]) ) {
stop("check input, we do not have a factor here!\n")
}
obj[[var]] <- groupAndRename(obj[[var]], var=NULL, before=before, after=after, addNA=addNA)
obj
})
setMethod(f="groupAndRenameX", signature=c("sdcMicroObj"),
definition=function(obj, var, before, after, addNA=FALSE) {
obj <- nextSdcObj(obj)
manipKey <- get.sdcMicroObj(obj, type="manipKeyVars")
if (!var %in% colnames(manipKey)) {
stop("variable specified in 'var' is not available in 'obj'!\n")
}
manipKey[[var]] <- groupAndRename(manipKey[[var]], var=var, before=before, after=after, addNA=addNA)
obj <- set.sdcMicroObj(obj, type="manipKeyVars", input=list(manipKey))
obj <- calcRisks(obj)
obj
}) |
ans <- pl(amount = c(1, -1),
timestamp = c(2, 3),
price = c(1, 2),
along.timestamp = 3:1,
vprice = 1:3)
expect_equal(ans[[1]]$timestamp , 1:3)
expect_equal(unname(ans[[1]]$pl), c(0,1,1)) |
REND <- function(TPDc = NULL, TPDs = NULL){
if (is.null(TPDc) & is.null(TPDs)) {
stop("At least one of 'TPDc' or 'TPDs' must be supplied")
}
if (!is.null(TPDc) & class(TPDc) != "TPDcomm"){
stop("The class of one object do not match the expectations,
Please, specify if your object is a TPDc or a TPDs")
}
if (!is.null(TPDs) & class(TPDs) != "TPDsp"){
stop("The class of one object do not match the expectations,
Please, specify if your object is a TPDc or a TPDs")
}
results <- list()
Calc_FRich <- function(x) {
results_FR <- numeric()
if (class(x) == "TPDcomm") {
TPD <- x$TPDc$TPDc
names_aux <- names(x$TPDc$TPDc)
cell_volume <- x$data$cell_volume
}
if (class(x) == "TPDsp") {
TPD <- x$TPDs
names_aux <- names(x$TPDs)
cell_volume <- x$data$cell_volume
}
for (i in 1:length(TPD)) {
TPD_aux <- TPD[[i]]
TPD_aux[TPD_aux > 0] <- cell_volume
results_FR[i] <- sum(TPD_aux)
}
names(results_FR) <- names_aux
return(results_FR)
}
Calc_FEve <- function(x) {
results_FE <- numeric()
if (class(x) == "TPDcomm") {
TPD <- x$TPDc$TPDc
names_aux <- names(x$TPDc$TPDc)
cell_volume <- x$data$cell_volume
}
if (class(x) == "TPDsp") {
TPD <- x$TPDs
names_aux <- names(x$TPDs)
cell_volume <- x$data$cell_volume
}
for (i in 1:length(TPD)) {
TPD_aux <- TPD[[i]][TPD[[i]] > 0]
TPD_eve <- rep((1 / length(TPD_aux)), times = length(TPD_aux))
results_FE[i] <- sum(pmin(TPD_aux, TPD_eve))
}
names(results_FE) <- names_aux
return(results_FE)
}
Calc_FDiv <- function(x) {
results_FD <- numeric()
if (class(x) == "TPDcomm") {
TPD <- x$TPDc$TPDc
evaluation_grid<-x$data$evaluation_grid
names_aux <- names(x$TPDc$TPDc)
cell_volume <- x$data$cell_volume
}
if (class(x) == "TPDsp") {
TPD <- x$TPDs
evaluation_grid<-x$data$evaluation_grid
names_aux <- names(x$TPDs)
cell_volume <- x$data$cell_volume
}
for (i in 1:length(TPD)) {
functional_volume <- evaluation_grid[TPD[[i]]>0 , , drop=F]
for (j in 1:ncol(functional_volume)){
functional_volume[, j] <-
(functional_volume[, j] - min(functional_volume[, j])) /
(max(functional_volume[, j]) - min(functional_volume[, j]))
}
TPD_aux <- TPD[[i]][TPD[[i]] > 0]
COG <- colMeans(functional_volume, na.rm=T)
dist_COG <- function(x, COG) {
result_aux<-stats::dist(rbind(x, COG))
return(result_aux)
}
COGDist <- apply(functional_volume, 1, dist_COG, COG)
meanCOGDist <- mean(COGDist)
distDeviances <- COGDist-meanCOGDist
AWdistDeviances <- sum(TPD_aux * distDeviances)
absdistDeviances <- abs(COGDist-meanCOGDist)
AWabsdistDeviances <- sum(TPD_aux * absdistDeviances)
results_FD[i] <- (AWdistDeviances + meanCOGDist) /
( AWabsdistDeviances + meanCOGDist)
}
names(results_FD) <- names_aux
return(results_FD)
}
if (!is.null(TPDc)) {
results$communities <- list()
message("Calculating FRichness of communities")
results$communities$FRichness <- Calc_FRich(TPDc)
message("Calculating FEvenness of communities")
results$communities$FEvenness <- Calc_FEve(TPDc)
message("Calculating FDivergence of communities")
results$communities$FDivergence <- Calc_FDiv(TPDc)
}
if (!is.null(TPDs)) {
if (TPDs$data$type == "One population_One species" |
TPDs$data$type == "One population_Multiple species") {
results$species <- list()
message("Calculating FRichness of species")
results$species$FRichness <- Calc_FRich(TPDs)
message("Calculating FEvenness of species")
results$species$FEvenness <- Calc_FEve(TPDs)
message("Calculating FDivergence of species")
results$species$FDivergence <- Calc_FDiv(TPDs)
} else {
results$populations <- list()
message("Calculating FRichness of populations")
results$populations$FRichness <- Calc_FRich(TPDs)
message("Calculating FEvenness of populations")
results$populations$FEvenness <- Calc_FEve(TPDs)
message("Calculating FDivergence of populations")
results$populations$FDivergence <- Calc_FDiv(TPDs)
}
if (TPDs$data$method == "mean") {
message("WARNING: When TPDs are calculated using the TPDsMean function, Evenness
and Divergence are meaningless!!")
}
}
return(results)
} |
r1sd <- function(x, na = TRUE) {
return((x - mean(x, na.rm = na)) / (1 * sd(x, na.rm = na)))
} |
QRSimul <-
function(VecX, tau, times, subj, X, y, d, kn, degree, lambda, gam){
dim = length(subj)
X = matrix(X, nrow=dim)
H = length(tau)
px = ncol(X)
n = length(unique(subj))
if(px != length(VecX))
stop("the length of VecX and the number of covariate(s) must match")
XX = as.matrix(X)
if(all(X[,1]==1)) VecX[1]=1 else VecX[1] = (VecX[1] - min(X[,1]))/(max(X[,1])-min(X[,1]))
if(all(X[,1]==1)) X[,1]=X[,1] else X[,1] = (X[,1] - min(X[,1]))/(max(X[,1])-min(X[,1]))
for(k in 2:px){
VecX[k] = (VecX[k] - min(X[,k]))/(max(X[,k])-min(X[,k]))
X[,k] = (X[,k] - min(X[,k]))/(max(X[,k])-min(X[,k]))
}
lambda_all = lambdak_simul(times, subj, X, y, d, tau, kn, degree, lambda, gam)
lambdasicr = lambda_all$lambdasicr
lambdasic = lambda_all$lambdasic
simul = ncqr_simul(times, subj, y, X, tau, kn, degree, lambda=lambdasicr, lambcross=lambdasic, d)
W = simul$W
alpha = simul$alpha
hat_bt = simul$hat_bt
Hpx = rep(seq(1,px), H)
Xbeta = matrix(NA, dim, H*px)
for(h in 1:(H*px))
{
Xbeta[,h]=hat_bt[,h]*VecX[Hpx[h]]
}
qhat_h = matrix(NA, dim, H)
for(h in 1:H){
qhat_h[,h] = rowSums(Xbeta[,((h-1)*px+1):(px*h)])
}
HpxB = rep(px,H)
cum_HpxB = cumsum(HpxB)
cum_HpxA = c(1, c(cum_HpxB[seq(1:(H-1))]+1))
hat_bt0_ori = hat_bt[,cum_HpxA]
hat_btk_ori = hat_bt[,seq(2,px)]
hat_btk = matrix(NA, dim, (px-1))
hat_btk0 = matrix(0, dim, (px-1))
for(k in 1:(px-1)){
hat_btk[,k] = hat_btk_ori[,k]/(max(XX[,(k+1)])-min(XX[,(k+1)]))
hat_btk0[,k] = hat_btk_ori[,k]*min(XX[,(k+1)])/(max(XX[,(k+1)])-min(XX[,(k+1)]))
}
hat_bt0 = hat_bt0_ori - rowSums(hat_btk0)
out = list(W=W, alpha=alpha, hat_bt0=hat_bt0, hat_btk=hat_btk, qhat_h = qhat_h)
return(out)
} |
writeEnvelope <- function(obj, centerfun = mean) {
if(inherits(obj, c("SpatialPoints", "SpatialPointsDataFrame"), which = FALSE)) {
SpatialPointsEnvelope(obj)
} else if(inherits(obj, "list") && length(obj) > 0 &&
all(vapply(
X = obj,
FUN = inherits,
FUN.VALUE = logical(1),
c("SpatialPoints", "SpatialPointsDataFrame"))
)
) {
ListOfSpatialPointsEnvelope(obj, centerfun = mean)
} else if(inherits(obj, c("SpatialLines", "SpatialLinesDataFrame"), which = FALSE)) {
SpatialLinesEnvelope(obj, centerfun = mean)
} else if(inherits(obj, c("SpatialPolygons", "SpatialPolygonsDataFrame"), which = FALSE)) {
SpatialPolygonsEnvelope(obj)
} else {
stop("obj must be an object of class SpatialPoints, SpatialPointsDataFrame, ",
"SpatialLines, SpatialLinesDataFrame, SpatialPolygons, ",
"or SpatialPolygonsDataFrame, or a list of objects of class ",
"SpatialPoints or SpatialPointsDataFrame")
}
} |
session <- function(url, ...) {
session <- structure(
list(
handle = httr::handle(url),
config = c(..., httr::config(autoreferer = 1L)),
response = NULL,
url = NULL,
back = character(),
forward = character(),
cache = new_environment()
),
class = "rvest_session"
)
session_get(session, url)
}
is.session <- function(x) inherits(x, "rvest_session")
print.rvest_session <- function(x, ...) {
cat("<session> ", x$url, "\n", sep = "")
cat(" Status: ", httr::status_code(x), "\n", sep = "")
cat(" Type: ", httr::headers(x)$`Content-Type`, "\n", sep = "")
cat(" Size: ", length(x$response$content), "\n", sep = "")
invisible(x)
}
session_get <- function(x, url, ...) {
resp <- httr::GET(url, x$config, ..., handle = x$handle)
session_set_response(x, resp)
}
session_set_response <- function(x, response) {
httr::warn_for_status(response)
x$response <- response
x$url <- response$url
x$cache <- new_environment()
x
}
session_jump_to <- function(x, url, ...) {
check_session(x)
url <- xml2::url_absolute(url, x$url)
last_url <- x$url
x <- session_get(x, url, ...)
x$back <- c(last_url, x$back)
x$forward <- character()
x
}
session_follow_link <- function(x, i, css, xpath, ...) {
check_session(x)
url <- find_href(x, i = i, css = css, xpath = xpath)
inform(paste0("Navigating to ", url))
session_jump_to(x, url, ...)
}
find_href <- function(x, i, css, xpath) {
if (sum(!missing(i), !missing(css), !missing(xpath)) != 1) {
abort("Must supply exactly one of `i`, `css`, or `xpath`")
}
if (!missing(i)) {
stopifnot(length(i) == 1)
a <- html_elements(x, "a")
if (is.numeric(i)) {
out <- a[[i]]
} else if (is.character(i)) {
text <- html_text(a)
match <- grepl(i, text, fixed = TRUE)
if (!any(match)) {
stop("No links have text '", i, "'", call. = FALSE)
}
out <- a[[which(match)[[1]]]]
} else {
abort("`i` must a string or integer")
}
} else {
a <- html_elements(x, css = css, xpath = xpath)
if (length(a) == 0) {
abort("No links matched `css`/`xpath`")
}
out <- a[[1]]
}
html_attr(out, "href")
}
session_back <- function(x) {
check_session(x)
if (length(x$back) == 0) {
abort("Can't go back any further")
}
url <- x$back[[1]]
x$back <- x$back[-1]
old_url <- x$url
x <- session_get(x, url)
x$forward <- c(old_url, x$forward)
x
}
session_forward <- function(x) {
check_session(x)
if (length(x$forward) == 0) {
abort("Can't go forward any further")
}
url <- x$forward[[1]]
old_url <- x$url
x <- session_get(x, url)
x$forward <- x$forward[-1]
x$back <- c(old_url, x$back)
x
}
session_history <- function(x) {
check_session(x)
urls <- c(rev(x$back), x$url, x$forward)
prefix <- rep(c(" ", "- ", " "), c(length(x$back), 1, length(x$forward)))
cat_line(prefix, urls)
}
session_submit <- function(x, form, submit = NULL, ...) {
check_session(x)
check_form(form)
subm <- submission_build(form, submit)
resp <- submission_submit(subm, x$config, ..., handle = x$handle)
session_set_response(x, resp)
}
read_html.rvest_session <- function(x, ...) {
if (!is_html(x$response)) {
abort("Page doesn't appear to be html.")
}
env_cache(x$cache, "html", read_html(x$response, ..., base_url = x$url))
}
is_html <- function(x) {
type <- httr::headers(x)$`Content-Type`
if (is.null(type)) return(FALSE)
parsed <- httr::parse_media(type)
parsed$complete %in% c("text/html", "application/xhtml+xml")
}
html_form.rvest_session <- function(x, base_url = NULL) {
html_form(read_html(x), base_url = base_url)
}
html_table.rvest_session <- function(x,
header = NA,
trim = TRUE,
fill = deprecated(),
dec = ".",
na.strings = "NA",
convert = TRUE) {
html_table(
read_html(x),
header = header,
trim = trim,
fill = fill,
dec = dec,
na.strings = na.strings,
convert = convert
)
}
html_element.rvest_session <- function(x, css, xpath) {
html_element(read_html(x), css, xpath)
}
html_elements.rvest_session <- function(x, css, xpath) {
html_elements(read_html(x), css, xpath)
}
status_code.rvest_session <- function(x) {
status_code(x$response)
}
headers.rvest_session <- function(x) {
headers(x$response)
}
cookies.rvest_session <- function(x) {
cookies(x$response)
}
check_form <- function(x) {
if (!inherits(x, "rvest_form")) {
abort("`form` must be a single form produced by html_form()")
}
}
check_session <- function(x) {
if (!inherits(x, "rvest_session")) {
abort("`x` must be produced by session()")
}
} |
context('Test the creation of custom indicators')
data(forestgap)
data(serengeti)
datasets <- list(forestgap[3:4],
forestgap[1:2])
test_methods <- function(teststring, datalength, obj) {
ok_print <- any(grepl(teststring, capture.output(print(obj))))
expect_true(ok_print)
ok_summary <- any(grepl(teststring, capture.output(summary(obj))))
expect_true(ok_summary)
ok_as_df <- nrow(as.data.frame(obj)) == datalength
expect_true(ok_as_df)
return(TRUE)
}
test_that('Custom indicators work', {
skip_on_cran()
for (dataset in datasets) {
testindic <- function(mat) {
mat[1, 1]
}
indicator_mp <- create_indicator(testindic)
a <- indicator_mp(dataset)
test_methods("Spatial Early-Warning:", length(dataset), a)
test_methods("Spatial Early-Warning:", 1, a[[1]])
if ( length(dataset) > 1 ) {
suppressWarnings( plot(a) )
}
indictest(a[[1]], nulln = 9)
b <- indictest(a, nulln = 9)
test_methods("Spatial Early-Warning:", length(dataset), b)
test_methods("Spatial Early-Warning:", 1, b[[1]])
if (length(dataset) > 1) {
suppressWarnings( plot(b) )
}
}
indicator_mp <- create_indicator(testindic)
a <- indicator_mp(dataset)
expect_true({
all.equal(a, compute_indicator(dataset, fun = testindic))
})
})
test_that('Custom indicators handles anonymous functions correctly', {
expect_warning(
anon_fun_indic <- create_indicator(function(mat) mean(mat))
)
expect_true({
anon_fun_indic <- create_indicator(function(mat) mean(mat),
taskname = "TestTask")
TRUE
})
}) |
testthat::context("H2O AUTOML TEST")
test_that("Fire up H2O", {
testthat::skip_on_cran()
h2o.init(
nthreads = -1,
ip = 'localhost',
port = 54321
)
model_spec <<- automl_reg(mode = 'regression') %>%
set_engine(
engine = 'h2o',
max_runtime_secs = 5,
max_runtime_secs_per_model = 4,
nfolds = 5,
max_models = 3,
exclude_algos = c("DeepLearning"),
seed = 786
)
})
test_that("automl_reg: Parsnip Test", {
testthat::skip_on_cran()
model_fit <<- model_spec %>%
fit(value ~ ., data = training(m750_splits))
predictions_tbl <- model_fit %>%
modeltime_calibrate(testing(m750_splits)) %>%
modeltime_forecast(new_data = testing(m750_splits))
testthat::expect_s3_class(model_fit$fit, "automl_fit_impl")
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_equal(model_fit$preproc$y_var, "value")
testthat::expect_identical(nrow(testing(m750_splits)), nrow(predictions_tbl))
testthat::expect_identical(testing(m750_splits)$date, predictions_tbl$.index)
resid <- testing(m750_splits)$value - predictions_tbl$.value
testthat::expect_lte(max(abs(resid)), 5000)
testthat::expect_lte(mean(abs(resid)), 2500)
})
test_that("automl_reg: Workflow Test", {
testthat::skip_on_cran()
recipe_spec <- recipe(value ~ date, data = training(m750_splits)) %>%
step_log(value, skip = FALSE) %>%
step_date(date, features = "month") %>%
step_mutate(date_num = as.numeric(date))
wflw <- workflow() %>%
add_recipe(recipe_spec) %>%
add_model(model_spec)
wflw_fit <<- wflw %>%
fit(training(m750_splits))
predictions_tbl <- wflw_fit %>%
modeltime_calibrate(testing(m750_splits)) %>%
modeltime_forecast(
new_data = testing(m750_splits),
actual_data = training(m750_splits)
) %>%
mutate_at(vars(.value), exp)
testthat::expect_s3_class(wflw_fit$fit$fit$fit, "automl_fit_impl")
testthat::expect_s3_class(wflw_fit$fit$fit$fit$data, "tbl_df")
testthat::expect_equal(names(wflw_fit$fit$fit$fit$data)[1], "date")
mld <- wflw_fit %>% workflows::pull_workflow_mold()
testthat::expect_equal(names(mld$outcomes), "value")
full_data <- bind_rows(training(m750_splits), testing(m750_splits))
testthat::expect_identical(nrow(full_data), nrow(predictions_tbl))
testthat::expect_identical(full_data$date, predictions_tbl$.index)
predictions_tbl <- predictions_tbl %>% filter(.key == "prediction")
resid <- testing(m750_splits)$value - predictions_tbl$.value
testthat::expect_lte(max(abs(resid)), 10000)
testthat::expect_lte(mean(abs(resid)), 5000)
})
test_that("automl_leaderboard() works.", {
testthat::skip_on_cran()
expect_s3_class(automl_leaderboard(model_fit), "tbl_df")
expect_s3_class(automl_leaderboard(wflw_fit), "tbl_df")
expect_error(
automl_leaderboard(workflow())
)
expect_error(
workflow() %>%
add_model(automl_reg() %>% set_engine("h2o")) %>%
automl_leaderboard()
)
expect_error(
automl_leaderboard(automl_reg())
)
expect_error(
automl_leaderboard("a")
)
})
test_that("automl_update_model() works.", {
testthat::skip_on_cran()
model_ids <- automl_leaderboard(model_fit) %>% pull(model_id)
model_id_1 <- model_ids[1]
model_id_2 <- model_ids[2]
model_fit_swapped <- automl_update_model(model_fit, model_id_2)
model_2 <- h2o.getModel(model_id_2)
expect_equal(model_fit_swapped$fit$models$model_1, model_2)
expect_equal(
model_fit_swapped$fit$desc,
stringr::str_glue('H2O AutoML - {stringr::str_to_title(model_2@algorithm)}')
)
model_ids <- automl_leaderboard(wflw_fit) %>% pull(model_id)
model_id_1 <- model_ids[1]
model_id_2 <- model_ids[2]
model_fit_swapped <- automl_update_model(wflw_fit, model_id_2)
model_2 <- h2o.getModel(model_id_2)
expect_equal(model_fit_swapped$fit$fit$fit$models$model_1, model_2)
expect_equal(
model_fit_swapped$fit$fit$fit$desc,
stringr::str_glue('H2O AutoML - {stringr::str_to_title(model_2@algorithm)}')
)
expect_error(
automl_update_model()
)
expect_error(
automl_update_model("a")
)
expect_error(
automl_update_model(wflw_fit, "A")
)
})
testthat::test_that("Shutdown H2O", {
testthat::skip_on_cran()
h2o.shutdown(prompt = FALSE)
}) |
library(pcalg)
amat1 <- t(cbind(c(0,1,0,1,0),c(0,0,1,0,1),c(0,0,0,1,1),c(0,0,0,0,1),c(0,0,0,0,0)))
amat2 <- t(cbind(c(0,1,0,1,1),c(0,0,0,0,1),c(0,0,0,0,1),c(0,0,0,0,1),c(0,0,0,0,0)))
g1 <- as(amat1,"graphNEL")
g2 <- as(amat2,"graphNEL")
res <- compareGraphs(g1,g2)
if ((round(res["tpr"],5)!=0.83333) | (round(res["fpr"],5)!=0.5) | (round(res["tdr"],5)!=0.71429)) {
stop("Test of compareGraphs: Theoretical values not matched!")
} |
skip_tests_for_cran <- TRUE
skip_maxnet <- FALSE
skip_maxent.jar <- TRUE
skip_bioclim <- TRUE
skip_simDiff <- TRUE
library(dplyr)
options(warn=-1)
set.seed(48)
occs <- read.csv(file.path(system.file(package="dismo"), "/ex/bradypus.csv"))[,2:3]
envs.orig <- raster::stack(list.files(path=paste(system.file(package='dismo'), '/ex', sep=''),
pattern='grd', full.names=TRUE))
occs.z <- cbind(occs, raster::extract(envs.orig, occs))
occs.z$biome <- factor(occs.z$biome)
bg <- as.data.frame(dismo::randomPoints(envs.orig, 1000))
names(bg) <- names(occs)
bg.z <- cbind(bg, raster::extract(envs.orig, bg))
bg.z$biome <- factor(bg.z$biome)
algorithms <- c("maxnet", "maxent.jar", "bioclim")
no.iter <- 5
for(alg in algorithms) {
if(alg == "maxnet" & skip_maxnet == TRUE) next
if(alg == "maxent.jar" & skip_maxent.jar == TRUE) next
if(alg == "bioclim" & skip_bioclim == TRUE) next
if(alg == "bioclim") {
envs <- envs.orig[[-9]]
cats1 <- NULL
occs.z$biome <- NULL
bg.z$biome <- NULL
extrap <- FALSE
}else{
envs <- envs.orig
cats1 <- "biome"
extrap <- TRUE
}
if(alg == "bioclim") tune.args <- list(tails = c("low", "high", "both"))
if(alg %in% c("maxnet", "maxent.jar")) tune.args <- list(fc = c("L"), rm = 2:3)
mset <- lapply(tune.args, function(x) x[1])
context(paste("Testing ENMevaluate for", alg, "with block partitions..."))
e <- ENMevaluate(occs, envs, bg, tune.args = tune.args, partitions = "block", algorithm = alg, categoricals = cats1, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "block", tune.args, 4, 4)
context(paste("Testing evalplot.stats for", alg, "with block partitions..."))
test_evalplot.stats(e)
grps <- get.block(occs, bg)
context(paste("Testing evalplot.envSim.hist for", alg, "with block partitions..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, grps$occs.grp, grps$bg.grp)
context(paste("Testing evalplot.envSim.map for", alg, "with block partitions..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, grps$occs.grp, grps$bg.grp, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with block partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "block", mset, 4, 4)
context(paste("Testing evalplot.nulls for", alg, "with block partitions..."))
test_evalplot.nulls(ns)
if(skip_tests_for_cran == FALSE) {
context(paste("Testing ENMevaluate for", alg, "with checkerboard1 partitions..."))
e <- ENMevaluate(occs, envs, bg, tune.args = tune.args, partitions = "checkerboard1", algorithm = alg, categoricals = cats1, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "checkerboard1", tune.args, 2, 2)
context(paste("Testing evalplot.stats for", alg, "with checkerboard1 partitions..."))
test_evalplot.stats(e)
grps <- get.checkerboard1(occs, envs, bg, aggregation.factor = 2)
context(paste("Testing evalplot.envSim.hist for", alg, "with checkerboard1 partitions..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, grps$occs.grp, grps$bg.grp)
context(paste("Testing evalplot.envSim.map for", alg, "with checkerboard1 partitions..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, grps$occs.grp, grps$bg.grp, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with checkerboard1 partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "checkerboard1", mset, 2, 2)
context(paste("Testing ENMnulls plotting function for", alg, "with checkerboard1 partitions..."))
test_evalplot.nulls(ns)
}
if(skip_tests_for_cran == FALSE) {
context(paste("Testing ENMevaluate for", alg, "with checkerboard2 partitions..."))
e <- ENMevaluate(occs, envs, bg, tune.args = tune.args, partitions = "checkerboard2", algorithm = alg, categoricals = cats1, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "checkerboard2", tune.args, 4, 4)
context(paste("Testing evalplot.stats for", alg, "with checkerboard2 partitions..."))
test_evalplot.stats(e)
grps <- get.checkerboard2(occs, envs, bg, aggregation.factor = 2)
context(paste("Testing evalplot.envSim.hist for", alg, "with checkerboard2 partitions..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, grps$occs.grp, grps$bg.grp)
context(paste("Testing evalplot.envSim.map for", alg, "with checkerboard2 partitions..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, grps$occs.grp, grps$bg.grp, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with checkerboard2 partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "checkerboard2", mset, 4, 4)
context(paste("Testing ENMnulls plotting function for", alg, "with checkerboard2 partitions..."))
test_evalplot.nulls(ns)
}
if(skip_tests_for_cran == FALSE) {
context(paste("Testing ENMevaluate for", alg, "with random 5-fold partitions..."))
e <- ENMevaluate(occs, envs, bg, tune.args = tune.args, partitions = "randomkfold", algorithm = alg, categoricals = cats1, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "randomkfold", tune.args, 5, 1)
context(paste("Testing evalplot.stats for", alg, "with random 5-fold partitions..."))
test_evalplot.stats(e)
grps <- get.randomkfold(occs, bg, kfolds = 5)
context(paste("Testing evalplot.envSim.hist for", alg, "with random 5-fold partitions..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0)
context(paste("Testing evalplot.envSim.map for", alg, "with random 5-fold partitions..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with random 5-fold partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "randomkfold", mset, 5, 1)
context(paste("Testing ENMnulls plotting function for", alg, "with random 5-fold partitions..."))
test_evalplot.nulls(ns)
}
if(skip_tests_for_cran == FALSE) {
context(paste("Testing ENMevaluate for", alg, "with jackknife partitions..."))
e <- ENMevaluate(occs[1:10,], envs, bg, tune.args = tune.args, partitions = "jackknife", algorithm = alg, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "jackknife", tune.args, nrow(e@occs), 1)
context(paste("Testing evalplot.stats for", alg, "with testing partition..."))
test_evalplot.stats(e)
context(paste("Testing ENMnulls for", alg, "with jackknife partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "jackknife", mset, nrow(e@occs), 1)
context(paste("Testing ENMnulls plotting function for", alg, "with jackknife partitions..."))
test_evalplot.nulls(ns)
}
context(paste("Testing ENMevaluate for", alg, "with testing partition..."))
e <- ENMevaluate(occs[1:100,], envs, bg, tune.args = tune.args, partitions = "testing", algorithm = alg, categoricals = cats1, occs.testing = occs[101:nrow(occs),], overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "testing", tune.args, 1, 1)
context(paste("Testing evalplot.stats for", alg, "with testing partition..."))
test_evalplot.stats(e)
grps <- list(occs.grp = rep(0, nrow(occs)), bg.grp = rep(0, nrow(bg)))
context(paste("Testing evalplot.envSim.hist for", alg, "with testing partition..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0, occs.testing.z = e@occs.testing)
context(paste("Testing evalplot.envSim.map for", alg, "with testing partition..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0, occs.testing.z = e@occs.testing, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with testing partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "testing", mset, 1, 1)
context(paste("Testing ENMnulls plotting function for", alg, "with testing partition..."))
test_evalplot.nulls(ns)
if(skip_tests_for_cran == FALSE) {
context(paste("Testing ENMevaluate for", alg, "with no partitions..."))
e <- ENMevaluate(occs, envs, bg, tune.args = tune.args, partitions = "none", algorithm = alg, categoricals = cats1, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "none", tune.args, 1, 1)
context(paste("Testing ENMnulls for", alg, "with no partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "none", mset, 1, 1)
context(paste("Testing ENMnulls plotting function for", alg, "with no partitions..."))
test_evalplot.nulls(ns)
}
context(paste("Testing ENMevaluate for", alg, "with user partitions..."))
user.grp <- list(occs.grp = round(runif(nrow(occs), 1, 4)), bg.grp = round(runif(nrow(bg), 1, 4)))
e <- ENMevaluate(occs, envs, bg, tune.args = tune.args, partitions = "user", algorithm = alg, categoricals = cats1, user.grp = user.grp, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "user", tune.args, 4, 4)
context(paste("Testing evalplot.stats for", alg, "with user partitions..."))
test_evalplot.stats(e)
context(paste("Testing evalplot.envSim.hist for", alg, "with user partitions..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, user.grp$occs.grp, user.grp$bg.grp)
context(paste("Testing evalplot.envSim.map for", alg, "with user partitions..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, user.grp$occs.grp, user.grp$bg.grp, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with user partitions..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, user.eval.type = "kspatial", quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "user", mset, 4, 4)
context(paste("Testing ENMnulls plotting function for", alg, "with user partitions..."))
test_evalplot.nulls(ns)
if(skip_tests_for_cran == FALSE) {
context(paste("Testing ENMevaluate for", alg, "with random 5-fold partitions and no raster environmental variables..."))
e <- ENMevaluate(occs.z, bg = bg.z, tune.args = tune.args, partitions = "randomkfold", algorithm = alg, categoricals = cats1, quiet = TRUE)
test_ENMevaluation(e, alg, "randomkfold", tune.args, 5, 1, type = "swd")
context(paste("Testing evalplot.stats for", alg, "with random 5-fold partitions and no raster environmental variables..."))
test_evalplot.stats(e)
grps <- get.randomkfold(occs, bg, kfolds = 5)
context(paste("Testing evalplot.envSim.hist for", alg, "with random 5-fold partitions and no raster environmental variables..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0, categoricals = cats1)
context(paste("Testing evalplot.envSim.map for", alg, "with random 5-fold partitions and no raster environmental variables..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with random 5-fold partitions and no raster environmental variables..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "randomkfold", mset, 5, 1)
context(paste("Testing ENMnulls plotting function for", alg, "with random 5-fold partitions and no raster environmental variables..."))
test_evalplot.nulls(ns)
}
if(skip_tests_for_cran == FALSE) {
context(paste("Testing ENMevaluate for", alg, "with random 5-fold partitions and no input background data..."))
e <- ENMevaluate(occs, envs, tune.args = tune.args, partitions = "randomkfold", algorithm = alg, n.bg = 1000, categoricals = cats1, overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e, alg, "randomkfold", tune.args, 5, 1)
context(paste("Testing evalplot.stats for", alg, "with random 5-fold partitions and no input background data..."))
test_evalplot.stats(e)
grps <- get.randomkfold(occs, bg, kfolds = 5)
context(paste("Testing evalplot.envSim.hist for", alg, "with random 5-fold partitions and no input background data..."))
test_evalplot.envSim.hist(e, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0)
context(paste("Testing evalplot.envSim.map for", alg, "with random 5-fold partitions and no input background data..."))
test_evalplot.envSim.map(e, envs, occs.z, bg.z, grps$occs.grp, grps$bg.grp, bg.sel = 0, skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with random 5-fold partitions and no input background data..."))
ns <- ENMnulls(e, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e, ns, no.iter, alg, "randomkfold", mset, 5, 1)
context(paste("Testing ENMnulls plotting function for", alg, "with random 5-fold partitions and no input background data..."))
test_evalplot.nulls(ns)
}
if(skip_tests_for_cran == FALSE | alg != "bioclim") {
envs.2cat <- raster::addLayer(envs, envs$biome * round(runif(raster::ncell(envs), min = 0, max = 5)))
occs.z.2cat <- cbind(occs, raster::extract(envs.2cat, occs))
occs.z.2cat$biome.1 <- factor(occs.z.2cat$biome.1)
occs.z.2cat$biome.2 <- factor(occs.z.2cat$biome.2)
bg.z.2cat <- cbind(bg, raster::extract(envs.2cat, bg))
bg.z.2cat$biome.1 <- factor(bg.z.2cat$biome.1)
bg.z.2cat$biome.2 <- factor(bg.z.2cat$biome.2)
context(paste("Testing ENMevaluate for", alg, "with random 5-fold partitions and two categorical variables..."))
e.2cat <- ENMevaluate(occs, envs.2cat, bg, tune.args = tune.args, partitions = "randomkfold", algorithm = alg, n.bg = 1000, categoricals = c("biome.1", "biome.2"), overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e.2cat, alg, "randomkfold", tune.args, 5, 1)
context(paste("Testing ENMevaluate for", alg, "with random 5-fold partitions and two categorical variables and no env data..."))
e.2cat.z <- ENMevaluate(occs.z.2cat, bg = bg.z.2cat, tune.args = tune.args, partitions = "randomkfold", algorithm = alg, n.bg = 1000, categoricals = c("biome.1", "biome.2"), overlap = TRUE, quiet = TRUE)
test_ENMevaluation(e.2cat.z, alg, "randomkfold", tune.args, 5, 1, type = "swd")
context(paste("Testing evalplot.stats for", alg, "with random 5-fold partitions and two categorical variables..."))
test_evalplot.stats(e.2cat)
grps <- get.randomkfold(occs, bg, kfolds = 5)
context(paste("Testing evalplot.envSim.hist for", alg, "with random 5-fold partitions and two categorical variables..."))
test_evalplot.envSim.hist(e.2cat, occs.z.2cat, bg.z.2cat, grps$occs.grp, grps$bg.grp, bg.sel = 0, categoricals = c("biome.1", "biome.2"))
context(paste("Testing evalplot.envSim.map for", alg, "with random 5-fold partitions and two categorical variables..."))
test_evalplot.envSim.map(e.2cat, envs.2cat, occs.z.2cat, bg.z.2cat, grps$occs.grp, grps$bg.grp, bg.sel = 0, categoricals = c("biome.1", "biome.2"), skip_simDiff = skip_simDiff)
context(paste("Testing ENMnulls for", alg, "with random 5-fold partitions and two categorical variables..."))
ns <- ENMnulls(e.2cat, mod.settings = mset, no.iter = no.iter, quiet = TRUE)
test_ENMnulls(e.2cat, ns, no.iter, alg, "randomkfold", mset, 5, 1)
context(paste("Testing ENMnulls plotting function for", alg, "with random 5-fold partitions and two categorical variables..."))
test_evalplot.nulls(ns)
}
context(paste("Testing clamping function for", alg, "with..."))
test_clamp(e, envs, occs.z, bg.z, categoricals = cats1, canExtrapolate = extrap)
context(paste("Testing clamping function for", alg, "with two categorical variables..."))
if(skip_tests_for_cran == FALSE | alg != "bioclim") test_clamp(e.2cat, envs.2cat, occs.z.2cat, bg.z.2cat, categoricals = c("biome.1", "biome.2"))
} |
MCpriorIntFun <-
function(Nsim=200,
prior,
Hpar,
dimData,
FUN=function(par,...){as.vector(par)},
store=TRUE,
show.progress = floor(seq(1, Nsim, length.out = 20 ) ),
Nsim.min=Nsim,
precision = 0,
...)
{
start.time=proc.time()
not.finite=0
param = prior(type = "r", n=1, Hpar=Hpar, dimData=dimData)
temp.res=FUN(param,...)
dim.res=dim(temp.res)
if(is.null(dim.res) || (sum(dim.res!=1) ==1) )
{
emp.mean=rep(0,length(temp.res))
}
else
{
store=FALSE
emp.mean=array(0,dim=dim.res)
}
emp.variance= emp.mean
emp.variance.unNorm=emp.variance
if(store)
{
stored.vals=matrix(0,nrow=Nsim,ncol=length(emp.mean))
}
nsim=1
while((nsim<=Nsim) &&
( (nsim<=Nsim.min) ||
(max( sqrt(emp.variance/(nsim-1)) /
abs(emp.mean) ) > precision) )
)
{
if(any(nsim==show.progress))
{
cat(paste((nsim-1), "iterations done", "\n", sep = " " ))
}
flag=TRUE
count = 0
while(flag & (count<=50))
{
param = prior(type = "r", n=1, Hpar=Hpar, dimData=dimData)
temp.res=FUN(param,...)
flag = (any(sapply(as.vector(temp.res),
function(x){ ! is.finite(x) } ) ) )
if(flag)
{
not.finite = not.finite+1
}
count = count+1
}
if(flag)
stop("more than 50 non finite values produced in a row")
cur.res=temp.res
new.emp.mean=emp.mean+1/nsim*(cur.res-emp.mean)
emp.variance.unNorm=emp.variance.unNorm +
(cur.res-new.emp.mean)* (cur.res- emp.mean)
emp.variance = emp.variance.unNorm/(nsim-1)
emp.mean = new.emp.mean
if(store)
{
stored.vals[nsim,]= as.vector(cur.res)
}
nsim=nsim+1
}
end.time = proc.time()
elapsed=end.time-start.time
print(elapsed)
if(store)
{
returned.vals=stored.vals[1:(nsim-1),]
}
else
{
returned.vals=0
}
return(list( stored.vals= returned.vals,
elapsed=elapsed,
nsim = nsim-1,
emp.mean=emp.mean,
emp.stdev=sqrt(emp.variance),
est.error=sqrt(emp.variance/(nsim-1)),
not.finite = not.finite))
} |
NULL
"ghp100k" |
kkmeans <- function(K, parameters) {
state <- list()
state$time <- system.time({
H <- eigen(K, symmetric = TRUE)$vectors[, 1:parameters$cluster_count]
objective <- sum(diag(t(H) %*% K %*% H)) - sum(diag(K))
H_normalized <- H/matrix(sqrt(rowSums(H^2, 2)),
nrow(H),
parameters$cluster_count,
byrow = FALSE)
H_normalized[sqrt(rowSums(H^2, 2)) == 0, ] <- 0
set.seed(NULL)
state$clustering <- stats::kmeans(H_normalized,
centers = parameters$cluster_count,
iter.max = 1000,
nstart = 10)$cluster
state$objective <- objective
state$parameters <- parameters
})
state
} |
translogEla <- function( xNames, data, coef, coefCov = NULL,
dataLogged = FALSE ) {
checkNames( c( xNames ), names( data ) )
nExog <- length( xNames )
nCoef <- 1 + nExog + nExog * ( nExog + 1 ) / 2
if( nCoef > length( coef ) ) {
stop( "a translog function with ", nExog, " exogenous variables",
" must have at least ", nCoef, " coefficients" )
}
if( dataLogged ) {
logData <- data
} else {
logData <- logDataSet( data = data,
varNames = xNames )
}
result <- quadFuncDeriv( xNames = xNames, data = logData, coef = coef,
coefCov = coefCov )
return( result )
} |
.init_base_test_templ <- function() {
templ_dir <- file.path(get_templ_dir(), "BaseTestProjectTemplate")
if (dir.exists(templ_dir)) {
return(templ_dir)
}
unzip(file.path("data", "BaseTestProjectTemplate.zip"), exdir = get_templ_dir())
build_prj <- RSuite::prj_start("BaseTestProjectBuild", skip_rc = TRUE, path = templ_dir, tmpl = templ_dir)
params <- build_prj$load_params()
on.exit({
unlink(params$prj_path, recursive = TRUE, force = TRUE)
},
add = TRUE)
dst_rmgr <- RSuite::repo_mng_start("Dir",
path = normalizePath(file.path(templ_dir, "project", "repository")),
rver = params$r_ver,
types = params$bin_pkgs_type)
RSuite::repo_upload_ext_packages(dst_rmgr,
pkgs = c("logging"),
prj = build_prj,
pkg_type = params$bin_pkgs_type)
RSuite::repo_mng_stop(dst_rmgr)
return(templ_dir)
}
.templ_env <- new.env()
assign("templates", list(), envir = .templ_env)
register_project_templ <- function(templ_name, init_f) {
eval_managed(sprintf("Registering project template %s", templ_name), {
.init_base_test_templ()
base_dir <- get_templ_dir()
templ_dir <- file.path(base_dir, templ_name)
if (!dir.exists(templ_dir)) {
RSuite::tmpl_start(templ_name, path = base_dir, add_pkg = FALSE,
base_tmpl = file.path(base_dir, "BaseTestProjectTemplate"))
prj <- RSuite::prj_start(paste0(templ_name, "_Build"), skip_rc = T, path = base_dir, tmpl = templ_dir)
on.exit({
unlink(prj$path, recursive = TRUE, force = TRUE)
},
add = TRUE)
init_f(prj)
unlink(file.path(templ_dir, "project", "repository"), recursive = TRUE, force = TRUE)
file.rename(file.path(prj$path, "repository"), file.path(templ_dir, "project", "repository"))
}
templs <- get("templates", envir = .templ_env)
templs[[templ_name]] <- templ_dir
assign("templates", templs, envir = .templ_env)
})
}
get_project_templ <- function(templ_name) {
templs <- get("templates", envir = .templ_env)
if (!(templ_name %in% names(templs))) {
stop(sprintf("Requested non registered project template: %s", templ_name))
}
return(templs[[templ_name]])
}
init_test_project <- function(repo_adapters = c("Dir"), name = "TestProject", tmpl = NULL,
skip_rc = T) {
if (is.null(tmpl)) {
tmpl <- .init_base_test_templ()
}
RSuite::prj_load()
prj <- RSuite::prj_start(name, skip_rc = skip_rc, path = get_wspace_dir(), tmpl = tmpl)
RSuite::prj_config_set_repo_adapters(repos = repo_adapters, prj = prj)
unlink(file.path(prj$path, "deployment", "libs", "logging"),
recursive = T, force = T)
params_path <- file.path(prj$path, "PARAMETERS")
params_df <- data.frame(read.dcf(file = params_path))
params_df$SnapshotDate <- NULL
write.dcf(params_df, file = params_path)
on_test_exit(function() {
unlink(prj$path, recursive = T, force = T)
})
return(prj)
}
remove_package_from_lrepo <- function(pkg_file, prj, type = .Platform$pkgType) {
loc_repo <- .get_local_repo_path(prj, type)
unlink(file.path(loc_repo, pkg_file), force = T, recursive = T)
RSuite:::rsuite_write_PACKAGES(loc_repo, type = type)
}
create_test_package <- function(name, prj, ver = "1.0", deps = "",
imps = "", sysreqs = "", tmpl = "builtin",
skip_rc = T) {
RSuite::prj_start_package(name, prj = prj, skip_rc = skip_rc, tmpl = tmpl)
pkg_path <- file.path(prj$path, "packages", name)
pkg_desc_fname <- file.path(pkg_path, "DESCRIPTION")
if (file.exists(pkg_desc_fname)) {
pkg_desc <- data.frame(read.dcf(file = pkg_desc_fname))
pkg_desc$Version <- ver
deps <- trimws(deps)
if (sum(nchar(deps))) {
pkg_desc$Depends <- paste(deps, collapse = ", ")
}
imps <- trimws(imps)
if (sum(nchar(imps))) {
pkg_desc$Imports <- paste(imps, collapse = ", ")
}
sysreqs <- trimws(sysreqs)
if (sum(nchar(sysreqs))) {
pkg_desc$SystemRequirements <- sysreqs
}
write.dcf(pkg_desc, file = pkg_desc_fname)
}
invisible(pkg_path)
}
set_test_package_ns_imports <- function(name, prj, imps) {
imp_path <- file.path(prj$path, "packages", name, "R", "packages_import.R")
writeLines(c(sprintf("
}
create_test_master_script <- function(code, prj) {
fn <- tempfile(pattern = "test_", fileext = ".R", tmpdir = file.path(prj$path, "R"))
f <- file(fn, "w")
writeLines(code, con = f)
close(f)
invisible(fn)
}
create_package_deploy_to_lrepo <- function(name, prj, ver = "1.0", type = .Platform$pkgType,
deps = "", sysreqs = "", imps = "logging") {
pkg_path <- create_test_package(name, prj, ver, deps = deps, imps = imps, sysreqs = sysreqs)
set_test_package_ns_imports(name, prj, unlist(strsplit(imps, ",")))
params <- prj$load_params()
on.exit({
unlink(pkg_path, recursive = T, force = T)
unlink(file.path(params$lib_path, "*"), recursive = T, force = T)
}, add = T)
loc_repo <- .get_local_repo_path(prj, type)
prj_install_deps(prj, clean = T)
prj_build(prj, type = type)
int_path <- RSuite:::rsuite_contrib_url(repos = params$get_intern_repo_path(), type = type)
avails <- data.frame(available.packages(sprintf("file:///%s", int_path), type = type),
stringsAsFactors = F)
pkg_file <- avails[avails$Package == name, "File"]
file.copy(from = file.path(int_path, pkg_file), to = loc_repo)
RSuite:::rsuite_write_PACKAGES(loc_repo, type = type)
}
remove_test_packages <- function(prj) {
unlink(file.path(prj$path, "packages", "*"), recursive = T, force = T)
}
set_test_package_deps <- function(name, prj, deps = NULL, sugs = NULL) {
params <- prj$load_params()
pkg_desc_fname <- file.path(params$pkgs_path, name, "DESCRIPTION")
if (file.exists(pkg_desc_fname)) {
pkg_desc <- data.frame(read.dcf(file = pkg_desc_fname))
if (!is.null(deps)) {
pkg_desc$Depends <- paste(deps, collapse = ", ")
}
if (!is.null(sugs)) {
pkg_desc$Suggests <- paste(sugs, collapse = ", ")
}
}
write.dcf(pkg_desc, file = pkg_desc_fname)
}
.get_local_repo_path <- function(prj, type) {
path <- RSuite:::rsuite_contrib_url(repos = file.path(prj$path, "repository"), type = type)
stopifnot(dir.exists(path))
path <- normalizePath(path)
return(path)
}
expect_that_packages_installed <- function(names, prj, versions = NULL, supports = FALSE) {
stopifnot(is.null(versions) || length(names) == length(versions))
if (supports) {
lib_path <- file.path(prj$path, "deployment", "sbox")
} else {
lib_path <- file.path(prj$path, "deployment", "libs")
}
installed <- installed.packages(lib.loc = lib_path, noCache = T)[, "Package"]
pass <- setequal(installed, names)
if (pass) {
msg <- ""
} else if (length(setdiff(names, installed)) > 0) {
msg <- sprintf("Package(s) %s failed to install", paste(setdiff(names, installed), collapse = ", "))
} else if (length(setdiff(installed, names)) > 0) {
msg <- sprintf("Unexpected package(s) %s installed", paste(setdiff(installed, names), collapse = ", "))
} else {
stop(sprintf("Unexpected condition occured: %s != %s!!!", paste(names, collapse = ", "), paste(installed, collapse = ", ")))
}
if (pass && !is.null(versions)) {
inst_vers <- as.data.frame(installed.packages(lib.loc = lib_path, noCache = T), stringsAsFactors = F)[, c("Package", "Version")]
expt_vers <- data.frame(Package = names, Expected = versions)
failed_vers <- merge(x = inst_vers, y = expt_vers, by.x = "Package", by.y = "Package")
failed_vers <- failed_vers[!is.na(failed_vers$Expected) & failed_vers$Version != failed_vers$Expected, ]
pass <- nrow(failed_vers) == 0
if (!pass) {
msg <- sprintf("Unexpected versions installed ([pkg]ver!=exp): %s",
paste(sprintf("[%s]%s!=%s", failed_vers$Package, failed_vers$Version, failed_vers$Expected),
collapse = ", "))
}
}
expect(pass, msg)
invisible(installed)
}
expect_that_has_docs <- function(topics, pkg_name, prj) {
doc_path <- file.path(prj$path, "deployment", "libs", pkg_name, "help", "AnIndex")
if (!file.exists(doc_path)) {
pass <- F
msg <- sprintf("No documentation index found for %s", pkg_name)
} else {
lines <- readLines(doc_path)
all_topics <- unlist(lapply(strsplit(lines, "\t"), function(ent) { ent[1] }))
pass <- all(topics %in% all_topics)
if (!pass) {
msg <- sprintf("Documetation topics not found in %s: %s",
pkg_name, paste(setdiff(topics, all_topics), collapse = ", "))
} else {
msg <- ""
}
}
expect(pass, msg)
}
expect_that_packages_locked <- function(expects, params) {
lock_data <- data.frame(read.dcf(params$lock_path), stringsAsFactors = FALSE)
expected_data <- data.frame(Package = names(expects), Expected = expects)
locked <- lock_data$Package
pass <- setequal(locked, expected_data$Package)
if (pass) {
msg <- ""
} else if (length(setdiff(expected_data$Package, locked)) > 0) {
msg <- sprintf("Package(s) %s failed to lock", paste(setdiff(expected_data$Package, locked), collapse = ", "))
} else if (length(setdiff(locked, expected_data$Package)) > 0) {
msg <- sprintf("Unexpected package(s) %s locked", paste(setdiff(locked, expected_data$Package), collapse = ", "))
} else {
stop(sprintf("Unexpected condition occured: %s != %s!!!", paste(expected_data$Package, collapse = ", "), paste(locked, collapse = ", ")))
}
if (pass) {
failed_vers <- merge(x = lock_data, y = expected_data, by.x = "Package", by.y = "Package")
failed_vers <- failed_vers[!is.na(failed_vers$Expected) & failed_vers$Version != failed_vers$Expected, ]
pass <- nrow(failed_vers) == 0
msg <- sprintf("Unexpected versions locked ([pkg]ver!=exp): %s",
paste(sprintf("[%s]%s!=%s", failed_vers$Package, failed_vers$Version, failed_vers$Expected),
collapse = ", "))
}
expect(pass, msg)
} |
getGVGenotype <- function(ped) {
if (hasGenotype(ped)) {
genotype <- ped[ , c("id", "first", "second")]
} else {
genotype <- NULL
}
genotype
} |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 53