id
stringlengths 40
40
| repo_name
stringlengths 5
110
| path
stringlengths 2
233
| content
stringlengths 0
1.03M
⌀ | size
int32 0
60M
⌀ | license
stringclasses 15
values |
---|---|---|---|---|---|
31ea9fcd32a4abbe86e6955e8439ecbc1ab20520 | chendaniely/multidisciplinary-diffusion-model-experiments | staging/recurrent/src/helper-goodness.R | ################################################################################
#
# Goodness calculation functions
#
################################################################################
get_input_i <- function(unit_number){
input <- 0
return(input)
}
get_bias_i <- function(i_j_index_number){
bias <- 1
return(bias)
}
#' get index values for all other nodes in the
#' 'input', 'hidden', and 'inputmirror' banks
get_ks <- function(i_or_j, i_value, j_value, num_units_per_bank_0,
num_units_per_hidden_bank_0){
all_same_bank_number <- c(0:num_units_per_bank_0)
same_bank <-all_same_bank_number[!all_same_bank_number %in% c(i_value,
j_value)]
opposite_bank <- ifelse(i_or_j == 'i', i_value, j_value)
hidden_bank <- c(0:num_units_per_hidden_bank_0)
return(list(same_bank = same_bank,
opposite_bank = opposite_bank,
hidden_bank = hidden_bank))
}
k_ai <- get_ks('i', a_i_index, a_j_index, num_units_per_bank_0, 9)
k_ai
#' get activation value for a_k
#' currently returns 0.5
get_a_k <- function(){
return(0.5)
}
#'get
get_w_ij_k <- function(index_in_unlist_k_ai){
}
#' First term of the Goodness function on the 2 activation units
#' 2 activation units share 1 weight between them
#' \sum_{i}\sum_{j > i} w_{ij} a_i a_j
calculate_goodness_t1 <- function(ai, aj, a_i_pu_index, a_j_pu_index,
same_bank_values){
w <- same_bank_weight_matrix[
row.names(same_bank_weight_matrix) ==
a_i_pu_index,
colnames(same_bank_weight_matrix) ==
a_j_pu_index]
t1 <- w * ai * aj
if (is.na(t1)){
stop("t1 value is NA")
}
return(w * ai * aj)
}
calculate_goodness_t2 <- function(ai, input_i){
t2 <- ai * input_i
if (is.na(t2)){
stop("t2 is null")
}
return(t2)
}
calculate_goodness <- function(ai_aj_set, a_i_pu_index, a_j_pu_index,
same_bank_values,
opposite_bank_values,
hidden_bank_values){
ai <- ai_aj_set[1]
aj <- ai_aj_set[2]
t1 <- calculate_goodness_t1(ai, aj, a_i_pu_index, a_j_pu_index,
same_bank_values)
t2 <- calculate_goodness_t2(ai, 1)
return(sum(t1, t2))
}
| 2,494 | mit |
e22f543b37e4be7ccff548b5b2cf69d75c4c16b3 | Monash-RNA-Systems-Biology-Laboratory/patseqers | Shiny_tutorial/helper.R | # Generates a data frame of random numbers
make_df <- function(number_of_points){
x <- rnorm(number_of_points, mean = 20, sd = 5)
y <- rnorm(number_of_points, mean = 50, sd = 50)
df <- data.frame(x,y)
return(df)
}
| 251 | gpl-2.0 |
b4681f5d4cccf3c2e7478898ef64c0940770a574 | bwilbertz/kaggle_allen_ai | R/runFullModelPipeline.R | # The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
# Author: Benedikt Wilbertz
###############################################################################
# input
source("createInputFile.R")
source("createInputFile_NVAO.R")
# feature generation
source("createBasicStatFeatures.R")
# IR
source("createQueryData.R")
source("createQueryFeatures.R")
# PMI
source("createPMIFeatures.R")
source("createPMIFeaturesNVAO.R")
# FeatureHashing
source("createHashFeatures.R")
# model prediction
source("runModel.R")
| 1,547 | mit |
849f3460ee05af84e210dd8c74a1ce45cc189065 | eclee25/flu-SDI-exploratory-age | mapping_code/zipcode_maps_R_code/map-zipcodes_ECLedit.R | ## Name: Elizabeth Lee
## Date: 7/19/13
## Function:
### 1. draw OR map per season, popsize as bubble size
### 1b. draw log(OR map) per season 7/31/13, popsize as bubble size
### 1c. draw OR map per season, incidence as bubble size
### 2. draw incidence map per season
### 3) incidence maps by week 7/23/13
## Note: need 11 color bins because that is the max that the diverging color brewer palette will take
## Input Filenames: lat/long- mapping_code/cleanedmapdata/zip3_ll.txt; 1)
#### OR data by season (includes only 545 zip3s where data is present for all 10 seasons) - mapping_code/cleanedmapdata/zip3_OR_season.txt
#### incidence data by season OR popstat data for weekly incdence maps (includes only 843 zip3s where data is present for all 10 seasons) - mapping_code/cleanedmapdata/zip3_incid_season.txt
#### incidence data by week (includes only 843 zip3s where there is popstat data for all 10 seasons) - mapping_code/cleanedmapdata/zip3_incid_week.txt
## Output Filenames:
## Data Source: SDI, mapping_code/Coord3digits.csv (lat/long data)
##
library(ggplot2)
dfsumm<-function(x) {
if(!class(x)[1]%in%c("data.frame","matrix"))
stop("You can't use dfsumm on ",class(x)," objects!")
cat("\n",nrow(x),"rows and",ncol(x),"columns")
cat("\n",nrow(unique(x)),"unique rows\n")
s<-matrix(NA,nrow=6,ncol=ncol(x))
for(i in 1:ncol(x)) {
iclass<-class(x[,i])[1]
s[1,i]<-paste(class(x[,i]),collapse=" ")
y<-x[,i]
yc<-na.omit(y)
if(iclass%in%c("factor","ordered"))
s[2:3,i]<-levels(yc)[c(1,length(levels(yc)))] else
if(iclass=="numeric")
s[2:3,i]<-as.character(signif(c(min(yc),max(yc)),3)) else
if(iclass=="logical")
s[2:3,i]<-as.logical(c(min(yc),max(yc))) else
s[2:3,i]<-as.character(c(min(yc),max(yc)))
s[4,i]<-length(unique(yc))
s[5,i]<-sum(is.na(y))
s[6,i]<-!is.unsorted(yc)
}
s<-as.data.frame(s)
rownames(s)<-c("Class","Minimum","Maximum","Unique (excld. NA)","Missing values","Sorted")
colnames(s)<-colnames(x)
print(s)
}
##############################################################################################
# 1) OR maps by season
#communities file should have a list of nodes and data (nodes = zipcodes, data = OR or incidence)
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
communities <- read.csv('zip3_OR_season.txt', header=F, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
names(communities)<-c('season','zip3','OR')
communities$OR<-as.numeric(communities$OR)
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
mergeddata = merge(communities, latlong, by.x='zip3', by.y='zip3')
# ORs are floats, so they need to be binned
# how many bins should there be?
hist(communities$OR, breaks=50, freq=FALSE)
hist(communities$OR, breaks=50, freq=FALSE,xlim=c(0,30))
hist(communities$OR, breaks=50, freq=FALSE,xlim=c(15,65), ylim=c(0, 0.03))
quantile(communities$OR) # 0% (0.3044887) 25% (2.2441 670) 50% (3.4471814) 75% (5.4550541) 100% (64.2965544)
# explore the large ORs
highOR<-communities[communities$OR>20,] # seems to include both urban and rural communities
mergeddata$OR_bin<-cut(mergeddata$OR, breaks=c(seq(0,16, by=2), 20, 30, 65)) # bin the ORs
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
popstat6<-popstat[popstat$season=='6',] # use popstat values from season 6 since it is in the middle of the dataset
# are all zip3s from mergeddata present in popstat10?
sum(unique(mergeddata$zip3) %in% popstat6$zip3) # 843 zip3s
length(unique(mergeddata$zip3)) # 843 zip3s - all zip3s from mergeddata are present in rucc
mergethree <- merge(mergeddata, popstat6[,2:4], by = 'zip3')
mergethree$popstat<-as.numeric(mergethree$popstat)
mergethree$OR_bin<-factor(mergethree$OR_bin, rev(levels(mergethree$OR_bin)))
# 7/29/13 unused factors are not dropped
for (i in 1:10){
Sdat<-mergethree[mergethree$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=OR_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("odds ratio", type="div", palette=7, labels=sort(unique(mergethree$OR_bin)), drop=FALSE)
# ggsave(g, width=6, height=4, filename=paste("OR_map_S",i,".png", sep=''))
}
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
for (i in 1:10){
Sdat<-mergefour[mergefour$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=OR_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("odds ratio", type="div", palette=7, labels=sort(unique(mergefour$OR_bin)), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("OR_continentalmap_S",i,".png", sep=''))
}
############# check that the maps are drawing the same thing ############
mergethree[mergethree$zip3=='331',] # Miami, check that bins and colors and legend seem to match
mergethree[mergethree$zip3=='900',] # LA
mergethree[mergethree$zip3=='770',] # Houston
# test with a few cities since there are many different OR bins
Houston<-mergethree[(mergethree$zip3=='770' | mergethree$zip3=='945' | mergethree$zip3=='200' | mergethree$zip3=='900' | mergethree$zip3=='600' | mergethree$zip3=='331'),] # Houston & Norcal & DC & LA & Chicago
for (i in 1:5){
Sdat<-Houston[Houston$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(9,10))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=OR_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("odds ratio", type="div", palette=7, labels=sort(unique(mergethree$OR_bin)), drop=FALSE)
}
Houston[Houston$season=="5",]
############ end checks ################
# 7/19/13 plots, labels were wrong
# for (i in 1:10){
# Sdat<-mergetwo[mergetwo$season==as.character(i),]
# g <- ggplot(data=Sdat)
# g <- g + geom_point(aes(x=longitude, y=latitude, color=OR_legend), size=1)
# g <- g + labs(x=NULL, y=NULL)
# g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
# g <- g + scale_color_brewer(type="div", palette=7, labels=sort(unique(mergeddata$OR_bin), decreasing=TRUE))
# # ggsave(g, width=6, height=4, filename=paste("OR_map_S",i,".png", sep=''))
# }
########################################################################################
# 1a) Normalized OR maps by season
#communities file should have a list of nodes and data (nodes = zipcodes, data = OR or incidence)
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
communities <- read.csv('zip3_OR_season.txt', header=F, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
names(communities)<-c('season','zip3','OR')
communities$OR<-as.numeric(communities$OR)
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
# import reference dataset that has season ORs -- use for normalizing zip3 ORs
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Reference_datasets')
seasOR <- read.csv('pk6OR-allzip3_season.csv', header = TRUE, sep = ',', colClasses = 'character')
mergeddata = merge(communities, latlong, by.x = 'zip3', by.y = 'zip3')
mergetwo <- merge(mergeddata, seasOR, by.x = 'season', by.y = 'season') # add ref OR to dataset
mergetwo$pk6_OR <- as.numeric(mergetwo$pk6_OR)
mergetwo$OR_norm <- mergetwo$OR/mergetwo$pk6_OR
# ORs are floats, so they need to be binned
# how many bins should there be?
hist(mergetwo$OR_norm, breaks=50, freq=FALSE)
hist(mergetwo$OR_norm, breaks=50, freq=FALSE,xlim=c(0,10))
hist(mergetwo$OR_norm, breaks=100, freq=FALSE,xlim=c(0,4))
quantile(mergetwo$OR_norm) # 0% 0.0615112 25% 0.5976017 50% 0.8529960 75% 1.2638488 100% 19.3109002
mergetwo$ORnorm_bin<-cut(mergetwo$OR_norm, breaks=c(seq(0, 2.2, by = 0.3), 3, 4, 20)) # bin the ORs
# bubble size = popsize
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
popstat6<-popstat[popstat$season=='6',] # use popstat values from season 6 since it is in the middle of the dataset
# are all zip3s from mergeddata present in popstat10?
sum(unique(mergetwo$zip3) %in% popstat6$zip3) # 545 zip3s
length(unique(mergetwo$zip3)) # 545 zip3s - all zip3s from mergeddata are present in rucc
mergethree <- merge(mergetwo, popstat6[,2:4], by = 'zip3')
mergethree$popstat<-as.numeric(mergethree$popstat)
mergethree$ORnorm_bin<-factor(mergethree$ORnorm_bin, rev(levels(mergethree$ORnorm_bin)))
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata/mapoutputs')
for (i in 1:10){
Sdat<-mergefour[mergefour$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=ORnorm_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("normalized OR", type="div", palette=7, labels=sort(unique(mergefour$ORnorm_bin)), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("ORnorm_continentalmap_S0",i,".png", sep=''))
}
####################################################################################
# 1b) log OR maps by season
#communities file should have a list of nodes and data (nodes = zipcodes, data = OR or incidence)
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
communities <- read.csv('zip3_OR_season.txt', header=F, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
names(communities)<-c('season','zip3','OR')
communities$OR<-as.numeric(communities$OR)
communities$logOR<-log(communities$OR)
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
mergeddata = merge(communities, latlong, by.x='zip3', by.y='zip3')
# ORs are continuous, so they need to be binned
# how many bins should there be?
hist(communities$logOR, breaks=50, freq=FALSE)
quantile(communities$logOR) # 0% (-1.1891212) 25% (0.8083344) 50% (1.2375569) 75% (1.6965425) 100% (4.1635060)
# explore the large ORs
highOR<-mergeddata[mergeddata$logOR>3,] # seem to be mostly rural areas
mergeddata$logOR_bin<-cut(mergeddata$logOR, breaks=c(seq(-1.5,3.5, by=0.5), 4.5)) # bin the ORs
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
popstat6<-popstat[popstat$season=='6',] # use popstat values from season 6 since it is in the middle of the dataset
# are all zip3s from mergeddata present in popstat10?
sum(unique(mergeddata$zip3) %in% popstat6$zip3) # 843 zip3s
length(unique(mergeddata$zip3)) # 843 zip3s - all zip3s from mergeddata are present in rucc
mergethree <- merge(mergeddata, popstat6[,2:4], by = 'zip3')
mergethree$popstat<-as.numeric(mergethree$popstat)
mergethree$logOR_bin<-factor(mergethree$logOR_bin, levels=c("(-1.5,-1]", "(-1,-0.5]", "(-0.5,0]", "(0,0.5]", "(0.5,1]", "(1,1.5]", "(1.5,2]", "(2,2.5]", "(2.5,3]", "(3,3.5]", "(3.5,4.5]"))
mergethree$logOR_bin<-factor(mergethree$logOR_bin, levels=rev(levels(mergethree$logOR_bin)))
for (i in 1:10){
Sdat<-mergethree[mergethree$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat))
g <- g + labs(title = paste("Log Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=logOR_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("log odds ratio", type="div", palette=7, labels=sort(unique(mergethree$logOR_bin)), drop=FALSE)
# ggsave(g, width=6, height=4, filename=paste("logOR_map_S",i,".png", sep=''))
}
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
for (i in 1:10){
Sdat<-mergefour[mergefour$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat))
g <- g + labs(title = paste("Log Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=logOR_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("log odds ratio", type="div", palette=7, labels=sort(unique(mergethree$logOR_bin)), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("logOR_continentalmap_S",i,".png", sep=''))
}
#############################################################################################
# 1c. draw OR map per season, incidence as bubble size
#communities file should have a list of nodes and data (nodes = zipcodes, data = OR or incidence)
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
communities <- read.csv('zip3_OR_season.txt', header=F, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
names(communities)<-c('season','zip3','OR')
communities$OR<-as.numeric(communities$OR)
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
mergeddata = merge(communities, latlong, by.x='zip3', by.y='zip3')
# ORs are floats, so they need to be binned
# how many bins should there be?
hist(communities$OR, breaks=50, freq=FALSE)
hist(communities$OR, breaks=50, freq=FALSE,xlim=c(0,30))
hist(communities$OR, breaks=50, freq=FALSE,xlim=c(15,65), ylim=c(0, 0.03))
quantile(communities$OR) # 0% (0.3044887) 25% (2.2441 670) 50% (3.4471814) 75% (5.4550541) 100% (64.2965544)
# explore the large ORs
highOR<-communities[communities$OR>20,] # seems to include both urban and rural communities
mergeddata$OR_bin<-cut(mergeddata$OR, breaks=c(seq(0,16, by=2), 20, 30, 65)) # bin the ORs
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
# 9/13/13 attack rate was only shown for season6 but we want to show different attack rates by season
# create a uq ID combining season number and zip3 - this will be used to merge the dataset with the ORs
popstat$uqid <- paste(popstat$season, popstat$zip3, sep = '')
mergeddata$uqid <- paste(mergeddata$season, mergeddata$zip3, sep = '')
# are all of the zip3s in mergeddata also in popstat? - check before merging
# there are a greater number of zip3s in popstat than in mergeddata, so
sum(unique(mergeddata$zip3) %in% popstat$zip3) # 545 zip3s
length(unique(mergeddata$zip3)) # 545 zip3s - all zip3s from mergeddata are present in rucc
# create attack rate variable in popstat
popstat$AR1000 <- as.numeric(popstat$ILI)/as.numeric(popstat$popstat)*1000
mergethree <- merge(mergeddata, popstat[,5:6], by = 'uqid')
mergethree$OR_bin<-factor(mergethree$OR_bin, rev(levels(mergethree$OR_bin)))
# 7/29/13 unused factors are not dropped
for (i in 1:10){
Sdat<-mergethree[mergethree$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=AR1000))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("attack rate per 1000")
g <- g + geom_point(aes(x=longitude, y=latitude, color=OR_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("odds ratio", type="div", palette=7, labels=sort(unique(mergethree$OR_bin)), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("OR_map_S0",i,"_ARsize.png", sep=''))
}
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
for (i in 1:10){
Sdat<-mergefour[mergefour$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=AR1000))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("attack rate per 1000")
g <- g + geom_point(aes(x=longitude, y=latitude, color=OR_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("odds ratio", type="div", palette=7, labels=sort(unique(mergefour$OR_bin)), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("OR_continentalmap_S0",i,"_ARsize.png", sep=''))
}
#############################################################################################
# 1d) normalized OR by season, attack rate as bubble size
#communities file should have a list of nodes and data (nodes = zipcodes, data = OR or incidence)
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
communities <- read.csv('zip3_OR_season.txt', header=F, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
names(communities)<-c('season','zip3','OR')
communities$OR<-as.numeric(communities$OR)
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
# import reference dataset that has season ORs -- use for normalizing zip3 ORs
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Reference_datasets')
seasOR <- read.csv('pk6OR-allzip3_season.csv', header = TRUE, sep = ',', colClasses = 'character')
mergeddata = merge(communities, latlong, by.x = 'zip3', by.y = 'zip3')
mergetwo <- merge(mergeddata, seasOR, by.x = 'season', by.y = 'season') # add ref OR to dataset
mergetwo$pk6_OR <- as.numeric(mergetwo$pk6_OR)
mergetwo$OR_norm <- mergetwo$OR/mergetwo$pk6_OR
# ORs are floats, so they need to be binned
# how many bins should there be?
hist(mergetwo$OR_norm, breaks=50, freq=FALSE)
hist(mergetwo$OR_norm, breaks=50, freq=FALSE,xlim=c(0,10))
hist(mergetwo$OR_norm, breaks=100, freq=FALSE,xlim=c(0,4))
quantile(mergetwo$OR_norm) # 0% 0.0615112 25% 0.5976017 50% 0.8529960 75% 1.2638488 100% 19.3109002
mergetwo$ORnorm_bin<-cut(mergetwo$OR_norm, breaks=c(seq(0, 2.2, by = 0.3), 3, 4, 20)) # bin the ORs
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
# create a uq ID combining season number and zip3 - this will be used to merge the dataset with the ORs
popstat$uqid <- paste(popstat$season, popstat$zip3, sep = '')
mergetwo$uqid <- paste(mergetwo$season, mergetwo$zip3, sep = '')
# are all of the zip3s in mergeddata also in popstat? - check before merging
# there are a greater number of zip3s in popstat than in mergeddata, so
sum(unique(mergetwo$zip3) %in% popstat$zip3) # 545 zip3s
length(unique(mergetwo$zip3)) # 545 zip3s - all zip3s from mergeddata are present in rucc
# create attack rate variable in popstat
popstat$AR1000 <- as.numeric(popstat$ILI)/as.numeric(popstat$popstat)*1000
mergethree <- merge(mergetwo, popstat[,5:6], by = 'uqid')
mergethree$ORnorm_bin<-factor(mergethree$ORnorm_bin, rev(levels(mergethree$ORnorm_bin)))
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata/mapoutputs')
for (i in 1:10){
Sdat<-mergefour[mergefour$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=AR1000))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("attack rate per 1000")
g <- g + geom_point(aes(x=longitude, y=latitude, color=ORnorm_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("normalized OR", type="div", palette=7, labels=sort(unique(mergefour$ORnorm_bin)), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("ORnorm_continentalmap_S0",i,"_ARsize.png", sep=''))
}
#############################################################################################
# 1e) normalized OR by season, normalized attack rate as bubble size
#communities file should have a list of nodes and data (nodes = zipcodes, data = OR or incidence)
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
communities <- read.csv('zip3_OR_season.txt', header=F, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
names(communities)<-c('season','zip3','OR')
communities$OR<-as.numeric(communities$OR)
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
# import reference dataset that has season ORs -- use for normalizing zip3 ORs
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Reference_datasets')
seasOR <- read.csv('pk6OR-allzip3_season.csv', header = TRUE, sep = ',', colClasses = 'character')
mergeddata = merge(communities, latlong, by.x = 'zip3', by.y = 'zip3')
mergetwo <- merge(mergeddata, seasOR, by.x = 'season', by.y = 'season') # add ref OR to dataset
mergetwo$pk6_OR <- as.numeric(mergetwo$pk6_OR)
mergetwo$OR_norm <- mergetwo$OR/mergetwo$pk6_OR
# ORs are floats, so they need to be binned
# how many bins should there be?
hist(mergetwo$OR_norm, breaks=50, freq=FALSE)
hist(mergetwo$OR_norm, breaks=50, freq=FALSE,xlim=c(0,10))
hist(mergetwo$OR_norm, breaks=100, freq=FALSE,xlim=c(0,4))
quantile(mergetwo$OR_norm) # 0% 0.0615112 25% 0.5976017 50% 0.8529960 75% 1.2638488 100% 19.3109002
mergetwo$ORnorm_bin<-cut(mergetwo$OR_norm, breaks=c(seq(0, 2.2, by = 0.3), 3, 4, 20)) # bin the ORs
# import attack rate for bubble size
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
# create a uq ID combining season number and zip3 - this will be used to merge the dataset with the ORs
popstat$uqid <- paste(popstat$season, popstat$zip3, sep = '')
mergetwo$uqid <- paste(mergetwo$season, mergetwo$zip3, sep = '')
# are all of the zip3s in mergeddata also in popstat? - check before merging
# there are a greater number of zip3s in popstat than in mergeddata, so
sum(unique(mergetwo$zip3) %in% popstat$zip3) # 545 zip3s
length(unique(mergetwo$zip3)) # 545 zip3s - all zip3s from mergeddata are present in rucc
# create attack rate variable in popstat
popstat$AR <- as.numeric(popstat$ILI)/as.numeric(popstat$popstat)
mergethree <- merge(mergetwo, popstat[,5:6], by = 'uqid')
mergethree$ORnorm_bin<-factor(mergethree$ORnorm_bin, rev(levels(mergethree$ORnorm_bin)))
# import seasonal attack rate reference
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Reference_datasets')
AR_ref <- read.csv('AR-allzip3_season.csv', header = TRUE, colClasses = 'character')
mergethree <- merge(mergethree, AR_ref, by = 'season')
mergethree$attackrate <- as.numeric(mergethree$attackrate)
mergethree$AR_norm <- mergethree$AR / mergethree$attackrate
# bin normalized attack rates bc they are floats
# how many bins should there be?
hist(mergethree$AR_norm, breaks=50, freq=FALSE)
quantile(mergethree$AR_norm) # 0.02399209 0.45435468 0.76870995 1.26884483 6.74975292
mergethree$ARnorm_bin<-cut(mergethree$AR_norm, breaks=c(seq(0, 2.2, by = 0.3), 3, 4, 7)) # bin the ORs
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata/mapoutputs')
for (i in 1:10){
Sdat<-mergefour[mergefour$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=AR_norm))
g <- g + labs(title = paste("Odds Ratio, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("normalized attack rate")
g <- g + geom_point(aes(x=longitude, y=latitude, color=ORnorm_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("normalized OR", type="div", palette=7, labels=sort(unique(mergefour$ORnorm_bin)), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("ORnorm_continentalmap_S0",i,"_ARnorm.png", sep=''))
}
#############################################################################################
# 2) incidence maps by season
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
communities <- read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
communities$ILI<-as.numeric(communities$ILI)
communities$popstat<-as.numeric(communities$popstat)
communities$attack1000<- communities$ILI/communities$popstat*1000 # attack rate per 1000
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
mergeddata = merge(communities, latlong, by.x='zip3', by.y='zip3')
# attack1000 are floats, so they need to be binned
# how many bins should there be?
hist(communities$attack1000, breaks=50, freq=FALSE)
hist(communities$attack1000, breaks=50, freq=FALSE,xlim=c(0,30))
hist(communities$attack1000, breaks=50, freq=FALSE,xlim=c(15,65), ylim=c(0, 0.03))
quantile(communities$attack1000) # 0% (0.000000) 25% (1.087103) 50% (2.913078) 75% (6.270440) 100% (57.018699)
# explore the large attack1000s
highattack1000<-communities[communities$attack1000>30,] # seems to include both urban and rural communities
mergeddata$attack1000_bin<-cut(mergeddata$attack1000, breaks=c(seq(0,20, by=2), 60), right=FALSE) # bin the attack1000s
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
popstat6<-popstat[popstat$season=='6',]
# are all zip3s from mergeddata present in popstat10?
sum(unique(mergeddata$zip3) %in% popstat6$zip3) # 843 zip3s
length(unique(mergeddata$zip3)) # 843 zip3s - all zip3s from mergeddata are present in rucc
mergethree <- merge(mergeddata, popstat6[,2:4], by = 'zip3')
mergethree$popstat.y<-as.numeric(mergethree$popstat.y) # when popstat dataset was merged, popstat.y represents the popstat value in season 6.
mergethree$attack1000_bin<-factor(mergethree$attack1000_bin, levels=rev(levels(mergethree$attack1000_bin)))
for (i in 1:10){
Sdat<-mergethree[mergethree$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat.y))
g <- g + labs(title = paste("Incidence per 1000, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=attack1000_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("incidence", type="div", palette=7, labels=levels(mergethree$attack1000_bin), drop=FALSE)
ggsave(g, width=6, height=,4 filename=paste("Incid_map_S",i,".png", sep=''))
}
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
for (i in 1:10){
Sdat<-mergefour[mergefour$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat.y))
g <- g + labs(title = paste("Incidence per 1000, Season", i))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=attack1000_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("incidence", type="div", palette=7, labels=levels(mergethree$attack1000_bin), drop=FALSE)
ggsave(g, width=6, height=4, filename=paste("Incid_continentalmap_S0",i,".png", sep=''))
}
############# check that the maps are drawing the same thing ############
mergethree[mergethree$zip3=='331',] # Miami, check that bins and colors and legend seem to match
mergethree[mergethree$zip3=='900',] # LA
mergethree[mergethree$zip3=='770',] # Houston
# test with a few cities since there are many different OR bins
Houston<-mergethree[(mergethree$zip3=='770' | mergethree$zip3=='945' | mergethree$zip3=='200' | mergethree$zip3=='900' | mergethree$zip3=='600' | mergethree$zip3=='331'),] # Houston & Norcal & DC & LA & Chicago
for (i in 1:3){
Sdat<-Houston[Houston$season==as.character(i),]
g <- ggplot(data=Sdat, aes(size=popstat.y))
g <- g + labs(title = paste("Incidence, Season", i))
g <- g + scale_size_continuous(range=c(9,10))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=attack1000_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("incidence", type="div", palette=7, labels=levels(mergethree$attack1000_bin), drop=FALSE)
}
Houston[Houston$season=="3",]
############ end checks ################
##############################################################################################
# 3) incidence maps by week 7/31/13
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
d<-read.csv('zip3_incid_week_cl.txt', header=FALSE, colClasses='character')
names(d)<-c('season','zip3','week','attack') # attack rate per 10,000 individuals
d$week<-as.Date(d$week, format="%Y-%m-%d")
d$attack<-as.numeric(d$attack)
dfsumm(d)
latlong <- read.csv('zip3_ll.txt', header=F, sep=',', colClasses='character') # file for source of lat/longs
names(latlong)<-c('zip3', 'latitude', 'longitude')
latlong$latitude<-as.numeric(latlong$latitude)
latlong$longitude<-as.numeric(latlong$longitude)
mergeddata <- merge(d, latlong, by.x='zip3', by.y='zip3')
# attack are floats, so they need to be binned
# how many bins should there be?
hist(mergeddata$attack, breaks=1000, freq=FALSE)
hist(mergeddata$attack, breaks=1000, freq=FALSE,xlim=c(0,4), ylim=c(0, .1))
hist(mergeddata$attack, breaks=50, freq=FALSE,xlim=c(15,65), ylim=c(0, 0.03))
quantile(mergeddata$attack) # 0% (0.000000) 25% (0.000000) 50% (0.000000) 75% (0.000000) 100% (1074.533)
# explore the large attack10000s
highattack<-mergeddata[mergeddata$attack>8,] # relatively few instances and most of them fall during the 2008-2009 seasons, 786
hist(highattack$attack, freq=FALSE)
mergeddata$attack_bin<-cut(mergeddata$attack, breaks=c(-Inf, seq(0,2, by=0.25), 5, 30, 110), right=TRUE) # bin the attack rates
mergeddata[(mergeddata$attack_bin=='(-Inf,0]' & mergeddata$attack != 0),] # examine new bins, all of the incidences for the first bin are 0.
uqwk <- sort(unique(mergeddata$week))
### change marker size based on size of urban area ### 7/24/13
# # approach 1: use RUCC bin means
# rucc<-read.csv('zip3_RUCC2013avg_crosswalk.csv', header=T, colClasses='character')
# # are all zip3s from mergeddata present in rucc?
# sum(unique(mergeddata$zip3) %in% rucc$zip3) # 843 zip3s
# length(unique(mergeddata$zip3)) # 843 zip3s - all zip3s from mergeddata are present in rucc
# mergethree <- merge(mergeddata, rucc, by.x = 'zip3', by.y = 'zip3') # RUCCavg_m: 1 = metro urban, 2 = nonmetro urban, 3, rural
# mergethree$RUCCavg_m<-as.numeric(mergethree$RUCCavg_m)
# approach 2: use popstat values in season 6 (2005)
popstat<-read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character')
popstat6<-popstat[popstat$season=='6',]
# are all zip3s from mergeddata present in popstat10?
sum(unique(mergeddata$zip3) %in% popstat6$zip3) # 843 zip3s
length(unique(mergeddata$zip3)) # 843 zip3s - all zip3s from mergeddata are present in rucc
mergethree <- merge(mergeddata, popstat6, by.x = 'zip3', by.y = 'zip3')
mergethree$popstat<-as.numeric(mergethree$popstat)
mergethree[mergethree$attack_bin=="(-Inf,0]",]$attack_bin<-NA
mergethree$attack_bin<-factor(mergethree$attack_bin) # drop (-Inf,0] level since it no longer occurs
mergethree$attack_bin<-factor(mergethree$attack_bin, levels=rev(levels(mergethree$attack_bin))) # reversed so low values are blue and high values are red
for (i in 1:length(uqwk)){ # length(uqwk)
Wdat<-mergethree[mergethree$week==uqwk[i],]
g <- ggplot(data=Wdat, aes(size=popstat))
g <- g + labs(title = paste("Incidence per 10,000:", uqwk[i]))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=attack_bin)) #, size=RUCCavg_m
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g<- g + scale_color_brewer("incidence", type="div", palette=7, labels=levels(mergethree$attack_bin), drop=FALSE, na.value="grey85")
ggsave(g, width=6, height=4, filename=paste("Incid_map_",uqwk[i],".png", sep=''))
print(i)
}
## Continental US only ##
# remove Alaska and Hawaii dots - continental US only
AKHI<-c('995', '996', '997', '998', '999', '967', '968')
mergefour<-mergethree[!(mergethree$zip3 %in% AKHI),]
for (i in 1:length(uqwk)){ # length(uqwk)
Wdat<-mergefour[mergefour$week==uqwk[i],]
g <- ggplot(data=Wdat, aes(size=popstat))
g <- g + labs(title = paste("Incidence per 10,000:", uqwk[i]))
g <- g + scale_size_continuous(range=c(1,5))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=attack_bin)) #, size=RUCCavg_m
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g<- g + scale_color_brewer("incidence", type="div", palette=7, labels=levels(mergefour$attack_bin), drop=FALSE, na.value="grey85")
ggsave(g, width=6, height=4, filename=paste("Incid_continental_map_",uqwk[i],".png", sep=''))
print(i)
}
############# check that the maps are drawing the same thing ############
mergethree[mergethree$zip3=='331',] # Miami, check that bins and colors and legend seem to match
mergethree[mergethree$zip3=='900',] # LA
mergethree[mergethree$zip3=='770',] # Houston
# test with a few cities since there are many different OR bins
Houston<-mergethree[(mergethree$zip3=='770' | mergethree$zip3=='945' | mergethree$zip3=='200' | mergethree$zip3=='900' | mergethree$zip3=='600' | mergethree$zip3=='331'),] # Houston & Norcal & DC & LA & Chicago
for (i in 1:10){
Sdat<-Houston[Houston$week==uqwk[i],]
g <- ggplot(data=Sdat, aes(size=popstat))
g <- g + labs(title = paste("Incidence, Week", uqwk[i]))
g <- g + scale_size_continuous(range=c(9,10))
g <- g + scale_size("population size")
g <- g + geom_point(aes(x=longitude, y=latitude, color=attack_bin))
g <- g + labs(x=NULL, y=NULL)
g <- g + theme(panel.background = element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
g <- g + scale_color_brewer("incidence", type="div", palette=7, labels=sort(unique(mergethree$attack_bin)), drop=FALSE)
}
Houston[Houston$week==uqwk[i],]
############ end check on maps ################
############ checks that attack rates seem reasonable #############
setwd('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/mapping_code/cleanedmapdata')
d2<-read.csv('zip3_incid_week.txt', header=TRUE, colClasses='character')
d2$ILI<-as.numeric(d2$ILI)
d2$popstat<-as.numeric(d2$popstat)
d2$week<-as.Date(d2$week)
AR <- rep(0,10)
for (i in 1:10){
ili<-sum(d2[d2$season==as.character(i),]$ILI)
pstat<-303615090
print(ili)
print(pstat)
AR[i]<-ili/pstat*100
}
# compare AR in week and season data
d3 <- read.csv('zip3_incid_season.txt', header=T, sep=",", colClasses='character') # includes zip3s that are present for all 10 seasons
d3$ILI<-as.numeric(d3$ILI)
d3$popstat<-as.numeric(d3$popstat)
ARs <- rep(0,10)
for (i in 1:10){
ili <-sum(d3[d3$season==as.character(i),]$ILI)
pstat<-sum(d3[d3$season==as.character(i),]$popstat)
ARs[i]<-ili/pstat*100
}
############## end checks on attack rates ###############
#### pre-existing snippets of code, saved for syntax reference ####
# b <- c(0,1,2,3,5,6,7,8,10,15,16,17,18,28) # test set of data values (previously called community ids)
# b <- scan('zipcode_unique_commid.txt') # list of unique data values
# g = g + labs(x=NULL, y=NULL)
# g = g + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "top right", axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank())
# g = g + scale_color_brewer(type="div", palette=7, labels=sort(unique(mergeddata$OR_bin))
# ggsave(g, width=6, height=4, filename="zipcode_weighted_S1.png")
#b <- c(0,1,2,3,5,6,7,8,10,15,16,17,18,28)
#g = ggplot(data=mergeddata) + geom_point(aes(x=longitude, y=latitude, colour=V2, group=V2))
# g = g + theme_bw() + scale_x_continuous(limits = c(-125, -66), breaks=NA)
# g = g + scale_y_continuous(limits=c(25,50), breaks=NA)
# r <- scan('colors.txt', what="")
# r <- c("#FF0000","#FFFFFF","#00FFFF","#C0C0C0","#0000FF","#808080","#0000A0","#000000","#FF0080","#FFA500","#800080","#A52A2A","#FFF00","#800000","#00FF00","#008000","#FF00FF","#808000","#56A5EC")
# g = g + scale_colour_manual(values=r, breaks=b)
# g = g + scale_colour_manual(values=r)
# g = g + scale_colour_brewer(palette="Spectral", breaks=b)
# g = g + scale_colour_gradientn(breaks=b, labels=format(b), colours=rainbow(40))
# g = g + labs(x=NULL, y=NULL)
# ggsave(g, width=6, height=4, filename="zips.png")
| 42,043 | mit |
a3b9adf94893b3719f72a2540819286d0145e061 | nextgenusfs/amptk | amptk/check_version.R | #!/usr/bin/env Rscript
is.installed <- function(mypkg){
is.element(mypkg, installed.packages()[,1])
}
Rversion <- R.Version()$version.string
if (!is.installed("dada2")){
dadaversion <- '0.0.0'
} else {
dadaversion <- packageVersion("dada2")
}
if (!is.installed("phyloseq")){
phyloseqversion <- '0.0.0'
} else {
phyloseqversion <- packageVersion("phyloseq")
}
if (!is.installed("lulu")){
luluversion <- '0.0.0'
} else {
luluversion <- packageVersion("lulu")
}
parts <- strsplit(Rversion, ' ')
Rvers <- parts[[1]][3]
output <- paste(Rvers, dadaversion, phyloseqversion, luluversion, sep=',')
cat(output,"\n")
| 645 | bsd-2-clause |
e18dbca9e8c5d29c051cdf7f720c4babd14c660a | Tutuchan/morrisjs | R/morrisjs.R | #' morris.js plot
#'
#' This function prepares the widget to be drawn.
#'
#' In the case of a \code{data.frame} or a \code{tbl_df}, the first column must
#' be a object that can be interpreted as a \code{Data}, the other columns being
#' the data values.
#'
#' @param data the data to be drawn, can be a \code{ts}, \code{mts}, \code{xts},
#' \code{data.frame} or \code{tbl_df} object.
#' @param width the width of the widget (in pixels),
#' @param height the height of the widget (in pixels).
#'
#' @importFrom htmlwidgets createWidget shinyWidgetOutput shinyRenderWidget
#' @export
morrisjs <- function(data, width = NULL, height = NULL) {
# forward options using x
x = list(
data = data,
resize = TRUE,
hideHover = TRUE
)
# create widget
createWidget(
name = 'morrisjs',
x,
width = width,
height = height,
package = 'morrisjs'
)
}
#' Widget output function for use in Shiny
#'
#' @param outputId a character, the output variable to read from
#' @param width the width of the widget (must be valid CSS),
#' @param height the height of the widget (must be valid CSS)
#' @export
morrisjsOutput <- function(outputId, width = '100%', height = '400px'){
shinyWidgetOutput(outputId, 'morrisjs', width, height, package = 'morrisjs')
}
#' Widget render function for use in Shiny
#'
#' @param expr an R expression that evaluates to a \code{\link{morrisjs}} object,
#' @param env the environment in which to evaluate \code{expr},
#' @param quoted a logical: is \code{expr} a quoted expression ?
#'
#' @export
renderMorrisjs <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, morrisjsOutput, env, quoted = TRUE)
}
| 1,751 | mit |
a3b9adf94893b3719f72a2540819286d0145e061 | nextgenusfs/ufits | amptk/check_version.R | #!/usr/bin/env Rscript
is.installed <- function(mypkg){
is.element(mypkg, installed.packages()[,1])
}
Rversion <- R.Version()$version.string
if (!is.installed("dada2")){
dadaversion <- '0.0.0'
} else {
dadaversion <- packageVersion("dada2")
}
if (!is.installed("phyloseq")){
phyloseqversion <- '0.0.0'
} else {
phyloseqversion <- packageVersion("phyloseq")
}
if (!is.installed("lulu")){
luluversion <- '0.0.0'
} else {
luluversion <- packageVersion("lulu")
}
parts <- strsplit(Rversion, ' ')
Rvers <- parts[[1]][3]
output <- paste(Rvers, dadaversion, phyloseqversion, luluversion, sep=',')
cat(output,"\n")
| 645 | bsd-2-clause |
c0900916ad10eaaf2acb8707ce3fde8d6294d5c9 | andymememe/datasciencecoursera | R Programming/makepower.R | make.power <- function(n) {
pow <- function (x) {
x ^ n
}
pow
} | 73 | gpl-2.0 |
74a3a14eb41cec2ddaf4fcc310849e749244c614 | SMHendryx/quantifyBiomassFromPointClouds | R/watershedSegmentTrees.R | # Script segments a point cloud into clusters using watershed segmentation on rasterized point cloud
# Clear workspace:
rm(list=ls())
library(lidR)
library(feather)
library(data.table)
#terminal output coloring
library(crayon)
error <- red $ bold
warn <- magenta $ underline
note <- cyan
#cat(error("Error: subscript out of bounds!\n"))
#cat(warn("Warning: shorter argument was recycled.\n"))
#cat(note("Note: no such directory.\n"))
#bug in lasnormalize as of 11/2/17 related to :Error: 546 points were not normalizable because the DTM contained NA values. Process aborded
# no NAs in dtm
# so load an old version:
source("~/githublocal/quantifyBiomassFromPointClouds/quantifyBiomassFromPointClouds/R/lasNormalize.R")
#library(lidR, lib.loc = "/Users/seanmhendryx/githublocal/lidR/")
#read in las file
setwd("/Users/seanmhendryx/Data/thesis/Processed_Data/A-lidar/rerunWatershed/")
las = readLAS("Rectangular_UTMAZ_Tucson_2011_000564.las")
# get header:
oheader = las@header
#groundPoints = las %>% lasfilter(Classification == 2)
#plot(groundPoints)
#then change working directory for writing output:
setwd("/Users/seanmhendryx/Data/thesis/Processed_Data/A-lidar/rerunWatershed/output_20171103")
#make dtm:
dtm = grid_terrain(las, res = .1, method = "knnidw")
dtmRaster = raster::as.raster(dtm)
raster::plot(dtmRaster)
#lasRaster = raster::as.raster(las)
lasnorm = lasNormalizeR(las, dtm)
# compute a canopy image
chm = grid_canopy(lasnorm, res = 0.1, na.fill = "knnidw", k = 8)
chm = raster::as.raster(chm)
dev.new()
raster::plot(chm)
dev.off()
# smoothing post-process (e.g. 2x mean)
kernel = matrix(1,3,3)
schm = raster::focal(chm, w = kernel, fun = mean)
#schm = raster::focal(chm, w = kernel, fun = mean)
dev.new()
raster::plot(schm, col = height.colors(50)) # check the image
quartz.save("A-lidar_SCHM.png")
# save smoothed canopy height model as tif
raster::writeRaster(schm, "A-lidar_OPTICS_Outliers_Removed_Smoothed_CHM_no_edge_stretch.tif", format = "GTiff", overwrite = TRUE)
# tree segmentation
# 'th' Numeric. Number value below which a pixel cannot be a crown.
#Default 2
crowns = lastrees(lasnorm, "watershed", schm, th = 1, extra = TRUE)
# Plotting point cloud of trees only:
# without rendering points that are not assigned to a tree
tree = lasfilter(lasnorm, !is.na(treeID))
# this would be a good plot to play on rotate using ImageMagick:
plot(tree, color = "treeID", colorPalette = pastel.colors(100))
#save tree point cloud (clustered point cloud):
writeLAS(tree, "A-lidar_Clustered_By_Watershed_Segmentation.las")
#write.csv(tree@data, "all20TilesGroundClassified_and_Clustered_By_Watershed_Segmentation.csv")
write_feather(tree@data, "A-lidar_Clustered_By_Watershed_Segmentation.feather")
# Plotting raster with delineated crowns:
library(raster)
contour = rasterToPolygons(crowns, dissolve = TRUE)
dev.new()
plot(schm, col = height.colors(50))
plot(contour, add = T)
quartz.save("A-lidar Segmented SCHM - MCC-Lidar Classing & KNN-IDW Rasterization.png")
#dev.off()
| 3,011 | gpl-3.0 |
dd2bd46f4e6d1172285b0e9f0bfe588705613428 | andrewdefries/andrewdefries.github.io | FDA_Pesticide_Glossary/CMU.R | library("knitr")
library("rgl")
#knit("CMU.Rmd")
#markdownToHTML('CMU.md', 'CMU.html', options=c("use_xhml"))
#system("pandoc -s CMU.html -o CMU.pdf")
knit2html('CMU.Rmd')
| 174 | mit |
263ffbd199b804c501306857ec77c01624ef8651 | amschwinn/data_mining_lab | Lab 1/Schwinn_DataMining_Lab_1_Ex2.R | #############################
#Data Mining Practical Session
#Lab 1 Exercise 2
#
#Subject: Clustering with Iris Dataset.
#K-Means & Heirarchial Clustering
#
#Author: Austin Schwinn
#
#Jan 17, 2017
#############################
#install.packages('rstudioapi')
library(rstudioapi)
#Set working directory
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#Load in the data
data("iris")
irisFeat <- iris[1:4]
irisLabel <- as.matrix(iris[5])
####
#Data Preparation
#Normalize data with z-score transformation
normalize <- function(row) { (row - mean(row))/ sd(row)}
iris_zs <- data.frame(apply(irisFeat,2,normalize))
####
#K-Means
#Unprocessed Cluster
cl <- kmeans(irisFeat, 3)
print(cl)
plot(irisFeat[3:4], col = cl$cluster)
#Normalizaed Cluster
cl_norm <- kmeans(iris_zs, 3)
print(cl_norm)
plot(iris_zs[3:4], col = cl_norm$cluster)
#The results seem to be less stable with normalized data than
#with non-preprocessed data. This is due to additional noise
#between the distance between the first two feature columns
####
#Heirarchical Clustering
#Using euclidean distance and average for linkage criteria
hc <- hclust(dist(irisFeat, method="euclidean"), "ave")
print(hc)
plot(hc)
#Cluster with ward linkage criteria and manhattan distance
hc2 <- hclust(dist(irisFeat, method="manhattan"), "ward")
plot(hc2)
#Manhattan distance with ward linkage performed better for this
#use case. This H.Clus dendrogram has a clear 3 way split that
#matches the 3 plant species present in this use case
| 1,510 | mit |
cadfe7ab0699d8f30e3e85674ab4e70336aef4e2 | PFgimenez/PhD | R-files/data_renault_small_header.R | library(ggplot2)
#--------------------------------------------------------------------------------------------
#Parametres globaux
dataset_name = "renault_small_header"
taille_img_x = 1024/2
taille_img_y = 720/2
#fin parametres globaux
#--------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------
#data
#result
oracle = c(0.68502658003544,0.7502584170112226,0.7979548139397519,0.8242395156526875,0.8485307147076196,0.8646264028352038,0.8746308328411104,0.8875886001181335,0.8910956881275842,0.8964855286473715,0.9063053750738335,0.91313496751329,0.912322799763733,0.9178972238629651,0.9199276432368576,0.9201122268163024,0.926055818074424,0.9288245717660957,0.9296736562315416,0.9353957471943296,0.9353588304784406,0.9369462492616657,0.940711754282339,0.9426683402244537,0.9481320141760189,0.9475782634376846,0.9487595983461311,0.9501624335499114,0.9549985233313645,0.9559583579444773,0.9571766095688128,0.9594654459539279,0.9613851151801536,0.9631571175428234,0.9637847017129356,0.9664796219728293,0.966738038984052,0.9675871234494979,0.9679562906083875,0.9702820437093916,0.9718694624926166,0.9729031305375074,0.9752658003544005,0.9743428824571766,0.9768532191376255,0.978920555227407,0.9788098050797401,0.9800649734199646)
Naif = c(0.6878322504430006,0.7516612522150029,0.7948168930891908,0.8258269344359126,0.8426609568812758,0.8545112226816303,0.8633712344949793,0.8685764914353219,0.8756275841701122,0.8799468399291199,0.8792823390431187,0.8850782634376846,0.88559509746013,0.8845614294152392,0.8867764323685765,0.8880316007088009,0.8918340224453633,0.8910956881275842,0.8911326048434731,0.8922770230360307,0.8948981098641465,0.8963378617838157,0.8986636148848198,0.8929784406379209,0.897777613703485,0.8962640283520378,0.9004725339633786,0.8990696987595983,0.9011739515652688,0.9020968694624926,0.9001033668044891,0.9034627879503839,0.9013954518606024,0.9042011222681631,0.9020599527466037,0.9008786178381571,0.9043487891317188,0.9037950383933845,0.9027982870643827,0.9052347903130538,0.9051609568812758,0.9054193738924985,0.9071175428233904,0.9035366213821618,0.9091848789131719,0.9049025398700532,0.9078927938570585,0.90748670998228)
jointree = c(0.6878322504430006,0.7526580035440047,0.7941893089190786,0.8277835203780272,0.8470171293561725,0.8625590667454224,0.8761444181925576,0.8808697578263438,0.8924616066154755,0.8967808623744832,0.9006940342587123,0.9083727111636148,0.9097386296515062,0.9083727111636148,0.912322799763733,0.9146485528647371,0.9214412285883048,0.9216627288836385,0.9231024808033077,0.9257235676314235,0.9273479031305375,0.9287507383343178,0.9312241582988777,0.931814825753101,0.9340667454223273,0.9353588304784406,0.9365770821027761,0.9389028352037803,0.9375,0.9397519196692262,0.9406010041346722,0.9424468399291199,0.9441450088600118,0.9435174246898996,0.9455109273479031,0.9453632604843473,0.9486857649143532,0.948058180744241,0.9498301831069108,0.9525989367985824,0.9511591848789132,0.952931187241583,0.9543709391612523,0.9520451860602481,0.9551092734790313,0.9554415239220319,0.956179858239811,0.9568074424099232)
wmv = c(0.5945067926757236,0.7494093325457767,0.7923434731246308,0.8279681039574719,0.8467217956290608,0.8623744831659775,0.8753322504430006,0.8807959243945659,0.8900251033668045,0.8955995274660367,0.9003987005316008,0.9068591258121678,0.9080404607206143,0.9065637920850561,0.9089633786178382,0.9133933845245127,0.9174911399881867,0.9165682220909628,0.9196323095097461,0.9231024808033077,0.9227333136444182,0.9259081512108683,0.9279385705847608,0.9265357353809806,0.9298582398109864,0.9296736562315416,0.931814825753101,0.9323316597755463,0.9346204961606616,0.9357279976373302,0.9343620791494389,0.938312167749557,0.9372784997046663,0.9390135853514472,0.9396780862374483,0.9395673360897815,0.9434435912581217,0.9434435912581217,0.9425945067926758,0.9445510927347903,0.9440711754282339,0.9436650915534555,0.9462492616656822,0.9441450088600118,0.9460646780862374,0.9465445953927939,0.9479843473124631,0.9473198464264619)
vpop = c(0.6563792085056114,0.7331659775546367,0.7830035440047254,0.8160809214412286,0.8404459539279385,0.8577968103957472,0.8686134081512109,0.875849084465446,0.8849305965741288,0.8914648552864737,0.8955995274660367,0.9011739515652688,0.9044226225634967,0.9072652096869462,0.9088157117542823,0.9107722976963969,0.9135779681039575,0.9173803898405198,0.918820141760189,0.923693148257531,0.9219949793266391,0.9268310691080921,0.930190490253987,0.927790903721205,0.9305965741287655,0.9303012404016539,0.9348050797401063,0.9357649143532192,0.9384967513290018,0.9380537507383343,0.9374261665682221,0.9419669226225635,0.9418192557590077,0.9427790903721205,0.9432959243945659,0.9438865918487891,0.9457693443591259,0.9466553455404607,0.9487965150620201,0.9499040165386887,0.9484642646190194,0.9510853514471352,0.9507531010041347,0.9492395156526875,0.9530419373892498,0.9518975191966923,0.9546662728883638,0.9532634376845835)
vnaif = c(0.6740992321323095,0.7423582398109864,0.7871751329001772,0.8200679267572357,0.8422917897223863,0.8615623154164206,0.871788245717661,0.8802052569403426,0.8884746012994684,0.8947873597164796,0.8981836975782634,0.9059362079149439,0.9070806261075015,0.9101447135262847,0.9121382161842883,0.9143901358535145,0.918007974010632,0.9190785587714116,0.9221426461901949,0.9272002362669817,0.925317483756645,0.9302274069698759,0.9307442409923213,0.9324424099232133,0.9342144122858831,0.9352849970466627,0.9372784997046663,0.9390135853514472,0.9407855877141169,0.9408225044300059,0.9418561724748966,0.9451048434731246,0.9440342587123449,0.9461754282339043,0.9462123449497932,0.9476890135853514,0.9478366804489072,0.9492395156526875,0.9522666863555818,0.9531157708210277,0.9514914353219137,0.9530788541051388,0.9547401063201417,0.9528573538098051,0.9564382752510336,0.9560691080921441,0.9566966922622564,0.9572504430005907)
lextree = c(0.5280197873597164, 0.6412433549911399, 0.7050723567631424, 0.7468989958653278, 0.783261961015948, 0.8031231541642055, 0.8203263437684584, 0.8330995274660367, 0.846574128765505, 0.850708800945068, 0.8615623154164206, 0.8674689899586533, 0.8723419964559953, 0.8771042528056704, 0.8828632604843473, 0.8862965150620201, 0.8887699350265801, 0.8901727702303603, 0.8942336089781453, 0.8995865327820437, 0.902613703484938, 0.9030567040756055, 0.9059731246308328, 0.9053824571766096, 0.9110676314235086, 0.9092587123449498, 0.9129503839338452, 0.9157929710572947, 0.9184878913171884, 0.9184878913171884, 0.9205183106910809, 0.9215150620200827, 0.922917897223863, 0.923693148257531, 0.9270525694034258, 0.9264249852333136, 0.930153573538098, 0.9315564087418783, 0.9322209096278795, 0.9321470761961016, 0.9330699940933255, 0.9338083284111045, 0.935100413467218, 0.9380906674542233, 0.9377953337271117, 0.9392720023626698, 0.9392720023626698, 0.9418561724748966)
# MEDIUM
oracle = c(0.7603138103611524,0.7950764236439876,0.8167861490599215,0.8414716623833356,0.8583119166779386,0.8778574327066143,0.8894224266197754,0.9013932097930475,0.9117408359258758,0.9216826727985933,0.9276342486135534,0.9362234546192344,0.9372379277695116,0.9435276613012309,0.9493439740294873,0.9523873934803192,0.9559718652779656,0.9586094954686866,0.9601650209657785,0.964087650480184,0.9654402813472204,0.9662518598674422,0.9686189638847559,0.9705802786419586,0.9692952793182741,0.9717300148789395,0.9709184363587177,0.9730150142026242,0.9708508048153659,0.9744352766130123,0.9759231705667524,0.9736913296361422,0.9767347490869742,0.9753144866765859,0.9769376437170296,0.9772758014337887,0.9773434329771405,0.9776139591505478,0.9801163262545651,0.9798458000811578,0.9806573786013797,0.977140538347085,0.9796429054511024,0.979034221560936)
Naif = c(0.7603138103611524,0.7944677397538212,0.8170566752333288,0.8389016637359664,0.8511429730826457,0.8691329636142296,0.8769782226430407,0.8863790071689436,0.8961855809549574,0.9049776815906939,0.9145137292033004,0.9193832003246314,0.919518463411335,0.9237116190991479,0.9309481942377925,0.9306776680643852,0.9373055593128635,0.9371702962261599,0.936020559989179,0.9375084539429189,0.9422426619775464,0.9433247666711755,0.9454889760584336,0.9458947653185446,0.9440687136480455,0.9454213445150819,0.9461652914919518,0.9471121330988773,0.9452184498850263,0.9498173948329501,0.9515758149600974,0.951711078046801,0.9502231840930611,0.951372920330042,0.951711078046801,0.9511700256999864,0.9518463411335046,0.9510347626132828,0.9506289733531719,0.9557689706479102,0.9571892330582984,0.9521844988502638,0.9519139726768565,0.9546868659542811)
jointree = c(0.7603138103611524,0.794873529013932,0.8178682537535507,0.8404571892330583,0.8554713918571621,0.8743405924523198,0.8861084809955363,0.8957797916948465,0.9066008386311376,0.9147842553767077,0.9223589882321115,0.9274313539834979,0.930407141890978,0.9350060868389016,0.9423102935208981,0.9433923982145273,0.9496145002028946,0.9498173948329501,0.9490058163127283,0.9534018666305966,0.9542810766941702,0.9571892330582984,0.9586094954686866,0.9579331800351684,0.9576626538617611,0.9617205464628703,0.9600973894224266,0.9612471256594075,0.9597592317056676,0.9613823887461111,0.9664547544974976,0.964425808196943,0.962193967266333,0.9642905451102394,0.9665900175842013,0.9648315974570539,0.9648315974570539,0.9640200189368321,0.9652373867171649,0.9674015961044231,0.9676044907344785,0.9635465981333694,0.9664547544974976,0.9667252806709049)
wmv = c(0.6979575273907751,0.7939266874070067,0.8170566752333288,0.8351819288516164,0.854389287163533,0.8700121736778034,0.8816447991343163,0.8905721628567564,0.899702421209252,0.9056539970242121,0.9149871500067631,0.9184363587177059,0.9215474097118896,0.9254700392262951,0.9314892465846071,0.9313539834979034,0.9352766130123089,0.9385905586365481,0.9368997700527526,0.94312187204112,0.9402137156769917,0.9440010821046936,0.9454889760584336,0.9470445015555254,0.9469092384688219,0.94758555390234,0.9462329230353037,0.9492087109427837,0.9464358176653591,0.950967131069931,0.9513052887866901,0.9491410793994319,0.9485323955092655,0.9505613418098201,0.9517787095901529,0.9503584471797646,0.9514405518733937,0.9532666035438929,0.9545516028675775,0.9549573921276884,0.953604761260652,0.9508318679832274,0.9534018666305966,0.9517787095901529)
vpop = c(0.7324496145002029,0.7777627485459219,0.8017719464358176,0.8291627214933045,0.8490463952387394,0.8655484918165832,0.8830650615447044,0.8893547950764237,0.902340051399973,0.910929257405654,0.9199918842147978,0.9272960908967942,0.930407141890978,0.9358176653591235,0.9419721358041391,0.9437981874746382,0.9499526579196538,0.9517787095901529,0.9510347626132828,0.9548221290409847,0.9569187068848911,0.9592858109022048,0.9600973894224266,0.9615852833761667,0.9617881780062221,0.9636142296767212,0.9638171243067767,0.9634789665900176,0.9632760719599621,0.9672663330177195,0.9686189638847559,0.9650344920871095,0.9660489652373867,0.9649668605437576,0.968213174624645,0.9663871229541459,0.9684837007980522,0.9686865954281076,0.9698363316650886,0.9729473826592723,0.9706479101853104,0.9677397538211822,0.9696334370350331,0.9699039632084404)
vnaif = c(0.7487488164479913,0.7824293250371973,0.8092114162045178,0.8332206140944136,0.8528337616664412,0.8722440146084134,0.8844853239550926,0.8928040037873665,0.9056539970242121,0.9155282023535777,0.9216826727985933,0.930745299607737,0.9320979304747734,0.9373055593128635,0.9424455566076018,0.9452860814283782,0.94907344785608,0.950967131069931,0.9520492357635602,0.9559042337346139,0.958068443121872,0.9579331800351684,0.9603679155958339,0.9626673880697958,0.961855809549574,0.9642905451102394,0.9642905451102394,0.9652373867171649,0.9632084404166104,0.9653726498038685,0.9679426484512377,0.9657784390639794,0.9663871229541459,0.9660489652373867,0.9689571216015149,0.9669958068443122,0.9691600162315704,0.9691600162315704,0.9705802786419586,0.9728121195725686,0.972676856485865,0.968213174624645,0.969701068578385,0.9706479101853104)
lextree = c(0.5476126065196808, 0.6316786149059922, 0.6855133234140403, 0.7189909373731909, 0.7560530231299878, 0.7674827539564453, 0.7875693223319357, 0.8029893142161504, 0.8101582578114432, 0.8223319356147707, 0.8269308805626945, 0.8285540376031381, 0.8424861355336128, 0.8464087650480184, 0.8439740294873529, 0.8531042878398485, 0.8537806032733667, 0.8601379683484377, 0.8594616529149195, 0.8649398079264169, 0.8680508589206005, 0.8718382253483025, 0.8766400649262817, 0.8745434870823752, 0.8815095360476126, 0.8856350601920736, 0.8885432165562018, 0.8909779521168673, 0.8863790071689436, 0.8932097930474774, 0.8923982145272555, 0.9001758420127147, 0.898214527255512, 0.902678209116732, 0.9055187339375085, 0.9074124171513593, 0.9050453131340457, 0.9114026782091167, 0.9087650480183957, 0.9168808332206141, 0.9190450426078723, 0.921750304341945, 0.919180305694576, 0.927025564723387)
# BIG
oracle = c(0.8348566914917626,0.8585533739562176,0.8612615662378695,0.8746896863010607,0.888004965019183,0.8962423832092079,0.9022793951703905,0.9117580681561724,0.9126607989167231,0.920503272399007,0.9255247122545701,0.929981945384789,0.9330850823741819,0.9390656736628301,0.9395170390431054,0.9383886255924171,0.9461182577296321,0.9457797336944256,0.9480929812683367,0.9497856014443692,0.9514218009478673,0.950575490859851,0.9551455653351388,0.9552019860076733,0.9588693297224103,0.9575152335815843,0.9621417287294065,0.9614646806589935,0.9648499210110585,0.9666553825321598,0.9664296998420221,0.9657526517716091,0.9686865267433988,0.9704355675919657,0.9693071541412773,0.9721846084405326,0.9707176709546378,0.9743850146693749,0.976980365605958,0.9752877454299256,0.9755698487925976,0.9757955314827352,0.9781651997291808,0.9795192958700067,0.9804220266305574,0.981437598736177,0.9803091852854886,0.9836380049650192,0.9827352742044685,0.9807041299932295,0.9852742044685172,0.9853306251410516,0.9866283006093433,0.9849356804333108,0.9869104039720153,0.9865718799368088,0.9867411419544121,0.9872489280072219,0.9895621755811329,0.988772286165651,0.9893929135635297,0.9892236515459264,0.9892800722184608,0.9897878582712706,0.9902392236515459,0.9917061611374408,0.9914804784473031,0.9896750169262017,0.9914240577747687,0.9919882645001128,0.9918190024825095,0.992552471225457,0.9922703678627849,0.9926088918979914,0.9940194087113519,0.99322951929587,0.9933423606409388,0.9938501466937486,0.9940194087113519,0.9946400361092305,0.9939629880388174,0.9941886707289551,0.9942450914014895,0.9950914014895057,0.9946400361092305,0.9954863461972466,0.994583615436696)
Naif = c(0.8348566914917626,0.8585533739562176,0.8613744075829384,0.8726585420898217,0.8860302414804785,0.8930828255472806,0.8965809072444143,0.9057210561949899,0.9040848566914917,0.9109681787406906,0.9151433085082374,0.9193184382757843,0.920108327691266,0.9219137892123674,0.9189799142405778,0.9174565560821485,0.9268223877228616,0.9205596930715414,0.9233807266982622,0.9211238997968856,0.9203904310539381,0.9170051907018731,0.9243398781313473,0.920108327691266,0.9258632362897766,0.9185849695328369,0.923493568043331,0.9231550440081245,0.9247348228390883,0.9224779959377116,0.923888512751072,0.9223651545926428,0.9229293613179869,0.9223651545926428,0.9207853757616791,0.9206725344166102,0.9226472579553149,0.9223651545926428,0.9233243060257278,0.9250169262017603,0.9209546377792823,0.9170051907018731,0.9212931618144888,0.9250733468742948,0.9232678853531934,0.9259760776348455,0.922534416610246,0.9282893252087565,0.9208417964342135,0.9208982171067479,0.9237756714060031,0.9224215752651772,0.9273865944482058,0.9223651545926428,0.92118032046942,0.9225908372827805,0.9248476641841571,0.9249040848566915,0.9208417964342135,0.9227600993003836,0.9276686978108779,0.9250733468742948,0.925242608891898,0.9258068156172422,0.9281764838636877,0.9243398781313473,0.9268223877228616,0.925919656962311,0.9271044910855337,0.9255811329271045,0.9258068156172422,0.9251297675468292,0.9300947867298578,0.9268223877228616,0.9278943805010156,0.9251861882193636,0.9273301737756714,0.9268223877228616,0.9285150078988942,0.9276686978108779,0.9275558564658091,0.9229293613179869,0.9270480704129993,0.9293613179869104,0.9281200631911533,0.9260324983073798,0.9251861882193636)
jointree = c(0.8348566914917626,0.8549988715865493,0.8540397201534642,0.8636876551568494,0.8755924170616114,0.8810652222974498,0.8837734145791017,0.8911081020085759,0.8937598736176935,0.8960731211916046,0.903577070638682,0.9057774768675243,0.9092191378921237,0.9108553373956217,0.9113631234484315,0.9102347099977431,0.9174001354096141,0.9143534190927556,0.919826224328594,0.9146355224554277,0.9152561498533063,0.9146919431279621,0.9203340103814037,0.917795080117355,0.9240577747686752,0.9212367411419544,0.9251861882193636,0.9237192507334687,0.9261453396524486,0.9273301737756714,0.9251861882193636,0.9246784021665538,0.9271044910855337,0.9300947867298578,0.9273301737756714,0.9287406905890319,0.9284585872263598,0.9318438275784248,0.9338749717896637,0.9309410968178741,0.9304333107650643,0.9291356352967728,0.9320695102685624,0.9360753780185059,0.9316745655608215,0.9372037914691943,0.9306025727826676,0.9382193635748138,0.9361882193635748,0.9350033852403521,0.9365267433987813,0.9354547506206274,0.9405890318212593,0.9365267433987813,0.9409275558564658,0.9377679981945385,0.9414353419092756,0.9400248250959151,0.9386707289550892,0.9368088467614534,0.9410403972015347,0.940419769803656,0.9398555630783119,0.9423380726698263,0.9418867072895509,0.9429587000677048,0.9393477770255021,0.9443127962085308,0.9461746784021665,0.9457797336944256,0.9430151207402392,0.9459489957120288,0.9453847889866848,0.9448205822613406,0.9513089596027985,0.9448205822613406,0.9457233130218913,0.9459489957120288,0.9439178515007899,0.9447641615888062,0.9470774091627172,0.9480365605958023,0.9473030918528549,0.949221394719025,0.9470209884901828,0.9477544572331302,0.9487136086662153)
wmv = c(0.7950236966824644,0.8583276912660799,0.8601895734597157,0.8716993906567366,0.8851275107199278,0.8905438952832317,0.8966373279169487,0.9034078086210787,0.9054953735048522,0.909388399909727,0.9153125705258407,0.9190363349131122,0.920503272399007,0.9225908372827805,0.9206161137440758,0.9208417964342135,0.9276686978108779,0.9282893252087565,0.9278379598284812,0.9271609117580681,0.9289099526066351,0.9254118709095013,0.9317874069058903,0.9286842699164974,0.9334236064093884,0.9291920559693072,0.9347777025502144,0.9334800270819228,0.9343263371699391,0.9359625366734371,0.9335928684269916,0.933649289099526,0.9346648612051456,0.9378244188670729,0.9341006544798014,0.9368088467614534,0.9372037914691943,0.9390656736628301,0.9395734597156398,0.9385578876100203,0.9393477770255021,0.935793274655834,0.9402505077860528,0.939742721733243,0.9403633491311216,0.9422816519972919,0.9396298803881742,0.9422252313247574,0.9375987361769352,0.9379372602121417,0.9412096592191379,0.9369781087790566,0.9432972241029113,0.9391220943353645,0.943522906793049,0.9430715414127736,0.9443127962085308,0.943522906793049,0.9422252313247574,0.9403069284585872,0.9463439404197698,0.9417738659444821,0.9434664861205145,0.9436921688106522,0.9479237192507335,0.942168810652223,0.9401940871135184,0.9473030918528549,0.9460618370570977,0.9466260437824419,0.9441435341909276,0.9454412096592192,0.9449334236064094,0.945554051004288,0.950293387497179,0.9457233130218913,0.9466260437824419,0.9464003610923042,0.944877002933875,0.9451026856240127,0.9433536447754457,0.9469645678176484,0.9448205822613406,0.9486007673211465,0.9461746784021665,0.9473030918528549,0.9466824644549763)
vpop = c(0.806872037914692,0.8334461746784022,0.8447867298578199,0.8617129316181449,0.8754231550440081,0.8833220491988264,0.8934777702550214,0.9020537124802528,0.9027871812232002,0.910347551342812,0.913055743624464,0.9170616113744076,0.9228729406454524,0.9276686978108779,0.9272173324306026,0.9255247122545701,0.9312796208530806,0.9338185511171293,0.9344955991875423,0.9351162265854209,0.9371473707966599,0.935793274655834,0.940814714511397,0.9402505077860528,0.9433536447754457,0.9429587000677048,0.9449334236064094,0.9478672985781991,0.9474723538704581,0.9486571879936809,0.9523809523809523,0.9488264500112842,0.9493906567366283,0.9537914691943128,0.9487700293387498,0.9534529451591063,0.9549763033175356,0.9549198826450012,0.955991875423155,0.9567817648386369,0.955991875423155,0.9552584066802077,0.9570074475287745,0.9602234258632363,0.9589257503949447,0.9619160460392688,0.9614646806589935,0.9642857142857143,0.9581922816519973,0.9585872263597383,0.9615211013315279,0.9614082599864591,0.9658090724441435,0.9624238320920785,0.9637215075603701,0.9628187767998194,0.9654141277364027,0.9662040171518844,0.9663168584969533,0.962762356127285,0.9652448657187994,0.96614759647935,0.9650191830286617,0.964116452268111,0.9669939065673663,0.9663732791694877,0.9663168584969533,0.9680094786729858,0.968855788761002,0.9689122094335364,0.967219589257504,0.9669939065673663,0.9674452719476416,0.9669939065673663,0.9714511396975852,0.9679530580004514,0.9681223200180546,0.9686301060708644,0.9689122094335364,0.9678402166553826,0.9670503272399007,0.9699277815391559,0.9699842022116903,0.9683480027081923,0.9691943127962085,0.9694199954863462,0.9669374858948319)
vnaif = c(0.813247573911081,0.8426991649740465,0.8523470999774317,0.8663958474385015,0.8825885804558791,0.8895283231776123,0.8969194312796208,0.9077522004062288,0.9070751523358158,0.915086887835703,0.9195441209659219,0.9218009478672986,0.9260889189799142,0.9322387722861657,0.9314488828706838,0.9304897314375987,0.9373730534867976,0.9369781087790566,0.9391785150078988,0.938106522229745,0.9409839765290002,0.9377115775220041,0.9435793274655834,0.9396863010607086,0.9457233130218913,0.9444820582261341,0.9478672985781991,0.9474159331979237,0.9484315053035433,0.9498420221169036,0.9503498081697134,0.9480365605958023,0.950970435567592,0.9533965244865719,0.9496163394267659,0.9517603249830738,0.9523809523809523,0.9555969307154141,0.9552019860076733,0.9537914691943128,0.9539043105393816,0.9545813586097947,0.954355675919657,0.9561047167682238,0.9568946061837057,0.9586436470322727,0.956668923493568,0.9605055292259084,0.9558226134055517,0.9571767095463778,0.9596592191378921,0.9576844955991876,0.9623674114195441,0.9594335364477544,0.9623109907470097,0.9609568946061837,0.9621417287294065,0.9622545700744752,0.9609004739336493,0.9590950124125479,0.9640036109230422,0.9610133152787181,0.9618032046942,0.9620853080568721,0.9654141277364027,0.9628751974723538,0.9632137215075603,0.9643985556307831,0.9658090724441435,0.9669374858948319,0.9638907695779734,0.9650756037011962,0.9631008801624915,0.9645678176483864,0.9686301060708644,0.9650756037011962,0.9658654931166779,0.9655833897540058,0.9655833897540058,0.9648499210110585,0.9646242383209208,0.9649063416835929,0.9653012863913338,0.9670503272399007,0.9660911758068156,0.9666553825321598,0.9646242383209208)
lextree = c(0.7424611910810048, 0.765379621789444, 0.7834942139429861, 0.8010668924640135, 0.8163872424499012, 0.8279762912785775, 0.8401749929438329, 0.8500141123341801, 0.8590008467400508, 0.8656675134067174, 0.8728760937058989, 0.8791419700818516, 0.8845554614733276, 0.8876319503245836, 0.8940728196443691, 0.8959074230877787, 0.8986226361840248, 0.9025063505503811, 0.9032853513971211, 0.9054925204628845, 0.9074174428450466, 0.9084165961049958, 0.9105108664973186, 0.9119051651143099, 0.912125317527519, 0.9132317245272368, 0.9145695738075078, 0.9151679367767429, 0.9163985323172452, 0.9175726785210274, 0.918650860852385, 0.917888794806661, 0.9189556872706746, 0.9199040361275755, 0.9213773638159751, 0.9213096246119108, 0.9213265594129268, 0.9214451030200396, 0.9231329381879763, 0.9231216483206323, 0.9250014112334181, 0.9257239627434377, 0.9262094270392323, 0.9265707027942421, 0.9276601749929438, 0.9275246965848152, 0.9280045159469377, 0.9283657917019476, 0.9296528365791702, 0.929427039232289, 0.9302229748800451, 0.9295455828394016, 0.9308439175839683, 0.9294552639006491, 0.931927744848998, 0.93193338978267, 0.9328252893028507, 0.9330285069150438, 0.9328027095681626, 0.9351566469093988, 0.9336325148179508, 0.9352977702511995, 0.9361332204346599, 0.9349026248941575, 0.9370251199548405, 0.9373525261078183, 0.9368557719446796, 0.9378267005362687, 0.9378323454699408, 0.9387806943268416, 0.937894439740333, 0.9395032458368614, 0.9404346598927462, 0.9399040361275754, 0.9416991250352809, 0.94152977702512, 0.9420604007902907, 0.941930567315834, 0.9419700818515383, 0.942303132938188, 0.9430200395145357, 0.9435393734123624, 0.9445611064069998, 0.9447699689528648, 0.944990121366074, 0.9449336720293536, 0.9460795935647757)
#temps
Naiftemps = c(6.130709539279385E-4,0.0010267396264028352,0.0013801202008269344,0.0017326179858239811,0.0020518761444181925,0.002346206364441819,0.002692112743650325,0.002958922474896633,0.003319325125516834,0.003587738038984052,0.003918995643827525,0.004165597903130537,0.0044631892350856465,0.0047999383490844654,0.0050834096647962195,0.005496392166272888,0.005642942336089782,0.005919015468103957,0.00625843687241583,0.006549908446544596,0.006807933069994093,0.007017995976077968,0.007500659111045482,0.007785269750443,0.007988596204961607,0.008440687869167159,0.008594035624630833,0.00897847844063792,0.00915021902687537,0.009574535513880685,0.00982102030419374,0.010038620016243355,0.010323273922031896,0.010534935727997637,0.011071919115475488,0.011099380094506792,0.011406020230360307,0.011662656083874779,0.01210107704518606,0.012279355212640284,0.01246057553160071,0.01273974667749557,0.013162413319551093,0.013605502362669816,0.013554199940933254,0.0139077325753101,0.014163541088304784,0.014468727148552865)
jointreetemps = c(0.05651038696101595,0.05548537341258122,0.06120626594802126,0.0623701068738925,0.062498095688127586,0.06209152920112227,0.061362323464264616,0.061881492247489664,0.059686154865623155,0.058089586200531604,0.056820131681925574,0.05545843233165978,0.0545152713747785,0.052729507567926756,0.05129026760927348,0.050216916014471355,0.04846936488481984,0.04752081940342587,0.0458392634746013,0.0454242981024808,0.0435369152023036,0.042790418266391024,0.0409652832250443,0.03991381951417602,0.03865888249409333,0.037593355286473715,0.03649868366066155,0.03593367609273479,0.0345449454001772,0.03359148778056704,0.03271498050797401,0.03201600616509155,0.031172961237448317,0.030205463046367394,0.02980450838009451,0.0286410023257531,0.02801563363851152,0.02784097995422327,0.026706283298877732,0.0259410680744241,0.02541740527170703,0.024615321249261666,0.024441794853809803,0.023971850634967512,0.02294020736119315,0.02211592062906084,0.021499690231836975,0.020839556445658595)
wmvtemps = c(1.4945923339855287,1.5503172643975192,1.5912161551978736,1.6307620040977555,1.6681299061946249,1.7010002079887774,1.731601563792085,1.757977097164796,1.7833442033003544,1.8081031587049616,1.8335790233682812,1.8605553988112817,1.8899077695289428,1.9132010174985232,1.9398820778942705,1.9662830562979918,1.9924716027392204,2.0200019507900175,2.0456657402170704,2.071098103440638,2.097055232944477,2.122521191523922,2.1478210756054343,2.1732904908446544,2.198280694735676,2.223008907892794,2.247929881792676,2.2736685642350856,2.2997701271411697,2.3253274643384523,2.351896904348789,2.3767740933254577,2.4029979824276433,2.4299604074497934,2.456764699461016,2.483868777945954,2.5118990184214414,2.5411084656305376,2.571386206733609,2.603331608239811,2.6376037483387478,2.676942267018606,2.722561480249557,2.7757107274808033,2.8423695671145897,2.9231620128101006,3.0207718459465447,3.132521652281453)
vpoptemps = c(0.619880177753987,0.6692052204666273,0.7317778304415239,0.7835428034554046,0.8393094951269935,0.8925847076934436,0.9401803382309509,0.9840903001698169,1.026910106024808,1.0680262242321323,1.1092238419595393,1.1498626991656822,1.1903069023552866,1.2316474228440637,1.2740687191376254,1.316035523405198,1.3584633255685175,1.400294085757531,1.4422439237669817,1.4846440511665682,1.5258952243797992,1.568353883601595,1.6096364962714116,1.6515207696396927,1.6928072715224454,1.7338529835720615,1.774978728440638,1.8153042948538098,1.8561382867321323,1.8965240490992321,1.9361281432737745,1.9757038995126994,2.0158091659775548,2.0561014703558773,2.094314386961016,2.1337520829149437,2.172147336163615,2.2094566018163024,2.246534808992912,2.282568659664796,2.318733150361784,2.35474384476521,2.3906763707914944,2.4295996256645007,2.4761857712640283,2.536152407966627,2.6127072540977556,2.7319425257678676)
vnaiftemps = c(0.5654878922031896,0.6056752266317188,0.6490572558697578,0.6829557048139397,0.720100048471648,0.7555922484494979,0.7874808443591258,0.8166435704370939,0.8435650142498523,0.8696579058254578,0.8955631147002363,0.919717621492912,0.9444845379134672,0.9698594909184879,0.9959814455478441,1.0217668153425872,1.048007879023922,1.0745154691007088,1.1007386338230951,1.1269935481024809,1.15379970492469,1.1801322384819846,1.2061039145747194,1.2331453213600119,1.2602134606836977,1.287519607722977,1.3146178301092735,1.3419053928307738,1.3691115496898996,1.3972928465002954,1.4253422753987006,1.4536974013216184,1.4830541429415238,1.513018030936208,1.542581987263733,1.57262709757088,1.6032094773331365,1.6335512685321913,1.6641784893310692,1.6960786059878914,1.729621013511518,1.7641430136961016,1.8012120921441228,1.8432790326343769,1.8957278415534553,1.962086357981394,2.0506174140209685,2.171628945658594)
lextreetemps = c(9.385951491435322E-4, 9.206999003248672E-4, 9.574053750738335E-4, 0.0010206393532191376, 0.0010126803049320733, 0.0010660335240696987, 0.0010511686909332545, 0.0011193464153868872, 0.0011249905973124631, 0.0011314190527170703, 0.001175395976077968, 0.0011822279976373303, 0.0012078975413467219, 0.0012309438275251033, 0.0012884719137625518, 0.0013359795407560542, 0.0013247912655050206, 0.0013227320326343768, 0.001407098006497342, 0.0013673426055818075, 0.0013887386001181334, 0.0014351453189604252, 0.0014999887588600118, 0.001494079758564678, 0.0014659799320732427, 0.0014828400472533963, 0.001497552425428234, 0.001513701391760189, 0.0015271906453041936, 0.0015380687020082693, 0.001590135713230951, 0.0015552598272297697, 0.0016029215667454223, 0.0015638766870939161, 0.0016160766723272298, 0.00157890957250443, 0.0016145971020378026, 0.0015807919706142942, 0.0015811648663614885, 0.0016260900915534553, 0.0016257978108387477, 0.0015790943738924986, 0.0015788565305670408, 0.0015757875738334319, 0.0016151606024808033, 0.0015604985639397519, 0.0015971071544595393, 0.001537642295481394)
# MEDIUM
Naiftemps = c(0.0010116810496415528,0.0016883997700527526,0.0023949896523738673,0.0029658426213986203,0.0036375064926281618,0.004223179629379142,0.004630473285540376,0.005313823278777221,0.005876314892465846,0.006559031719193832,0.00682051251183552,0.007690403422156093,0.008060764439334506,0.008814894968213175,0.009339008318679832,0.009729382524009197,0.010218030907615312,0.010800554646287028,0.011279527728932775,0.01216870681725957,0.012532031786825375,0.013121461179494117,0.013982032530772352,0.014095038617611255,0.014546872987961585,0.01565805593128635,0.015851718652779657,0.01617400730420668,0.01710188137427296,0.017532312525361828,0.018411986744217502,0.019141338090085214,0.01930081678614906,0.019518399837684296,0.01986956134180982,0.0209797400919789,0.021716014878939538,0.022102245502502366,0.022544349925605302,0.022829389625321248,0.023375756932233194,0.024719914581360747,0.02521486602191262,0.02549994609765995)
jointreetemps = c(0.0691693280806168,0.06654608068443121,0.07256938495874476,0.07377797687001217,0.07320432787772217,0.0722793575679697,0.07092746354659814,0.06932008832679562,0.06753988286216692,0.0655458321385094,0.06349193764371704,0.06123165345597187,0.05893382436088192,0.056715938996347894,0.05464607669417016,0.052414928310564046,0.050042244690923846,0.048058093128635196,0.04610830853510077,0.04415676281617747,0.04236206627891249,0.04499164405518734,0.03902247227106723,0.037351015622886515,0.03585822920330042,0.034600085215744626,0.03333552772893277,0.03199651021236304,0.03092518605437576,0.02984158014337887,0.028999750642499662,0.028218356824022725,0.027235291965372648,0.02678806763154335,0.026055907953469497,0.02523585979981063,0.024701852022183148,0.02511995157581496,0.023732737454348708,0.023295820167726227,0.022987125659407547,0.02264744406871365,0.0223533163803598,0.02221581962667388)
wmvtemps = c(0.7965478627755985,0.8160240042607873,0.8311722452996078,0.8465396813877992,0.8619417899364263,0.8776536955227918,0.8927007136480455,0.9082907774922224,0.9244003205735155,0.9405550520086569,0.9563681658325442,0.9733636394562424,0.9900202305559312,1.0068131076017854,1.0233010451102393,1.039834390707426,1.0561597244014609,1.0720394964155282,1.0884614358852969,1.1044725541728662,1.1198347618017044,1.1354672159475179,1.1509304223589882,1.1660183666305965,1.181659499864737,1.1970076608954416,1.2124630060868389,1.227697067969701,1.2432045641823346,1.2584507239956715,1.2742634885702693,1.2899736402001893,1.3053130770999595,1.3215953077911538,1.3380518186798322,1.3556662739077505,1.374397483565535,1.3950147795211687,1.419355865075071,1.4481907822940618,1.484407602799946,1.5242677865548493,1.5660133616258622,1.6091389751792236)
vpoptemps = c(0.3373159754497498,0.35954324266197757,0.3859007524685513,0.4125853442445557,0.4421256462870283,0.471702110036521,0.500916622480725,0.530880220816989,0.5615572399567158,0.5926065827133775,0.6239073823211145,0.6555718758284864,0.6879136757067497,0.719780246584607,0.7520238882726904,0.7841105075071013,0.8152354380495063,0.8460459151224131,0.876229896320844,0.9068226852427972,0.9363751338428243,0.9659165105505207,0.9933383679832274,1.0202956827404301,1.046768951711078,1.0735229916136886,1.099179509333153,1.1252547484782902,1.1511916317462465,1.1774401381712432,1.2030660683754903,1.2285443556066549,1.25452294609766,1.280623670160963,1.3069319787636955,1.3323488160422021,1.3577788343027188,1.383842335452455,1.4113812024212093,1.4397572673474908,1.4700707158798865,1.5061081802380631,1.5506332132422562,1.6056343209793047)
vnaiftemps = c(0.28934941850399026,0.3064006830109563,0.3251501990396321,0.34224332693088055,0.35937942066819967,0.37544259786284323,0.39022054964155284,0.40448112538888137,0.4178912758014338,0.43131828635195457,0.44431559008521576,0.45747393473556064,0.4703594849181658,0.48379601271473016,0.4974150645881239,0.5110497538888138,0.5242245560665494,0.5376566951846341,0.5510735487623427,0.5647517916948465,0.5781106949141079,0.5918209430542405,0.6054222003922629,0.6188148186798322,0.6324439959421074,0.6463513863790071,0.6603340891383741,0.6741525128499932,0.6881400547139186,0.7022769139726769,0.7166760831867983,0.7312503485729744,0.7452351604220209,0.759776839307453,0.7747295884620587,0.7892993313945624,0.8029099446773975,0.8174296370891384,0.8327835777762749,0.8494901621804409,0.8676641988367374,0.8881704997971054,0.9103823529013932,0.9354209683484377)
lextreetemps = c(9.871808940890032E-4, 0.0010919204247260923, 0.0012270306370891384, 0.0012618103543892872, 0.0013648988570269173, 0.001442163539834979, 0.0016064685783849589, 0.0016502073786013797, 0.0017350571757067496, 0.0018341723319356149, 0.0018537329974300013, 0.001952052380630326, 0.002101698146895712, 0.0022774710536994456, 0.0024094375557960233, 0.0025170279183010956, 0.0026160373123224672, 0.0026916030434194506, 0.0028298945421344514, 0.0030196141147030976, 0.0030911308196943056, 0.0034702625388881372, 0.0035230289124847827, 0.003624732307588259, 0.0037039085824428514, 0.003917542986608955, 0.003986748248343028, 0.00413833606790207, 0.00439140528878669, 0.004361716184228324, 0.004411312586230218, 0.004512773583119167, 0.0046960397538211825, 0.00472514135668876, 0.004590834113350466, 0.004897660739889084, 0.0047370618490463956, 0.004694337772216962, 0.004398824624644935, 0.004273979230353037, 0.0038618103002840526, 0.0034346460503178683, 0.0029147185919112676, 0.0021245620925199513)
# BIG
Naiftemps = c(0.00469632012866091,0.00678952480108346,0.010033462953557925,0.013950615597313922,0.013854764178093787,0.016783919361209865,0.01984493064725467,0.022964014954009366,0.02519751498222448,0.026734815868178996,0.026873487613565825,0.031001877941425428,0.03304656119857796,0.03416763122848598,0.03846027137294735,0.039743153208058234,0.042773104339484225,0.04369320518029456,0.042934489024321426,0.04974415800462728,0.05624536854579313,0.0549238879860053,0.055812429095423506,0.057765659951470004,0.056513454376163876,0.05996653975509283,0.07009525111449692,0.06365193798318379,0.06876009203769538,0.0780441651148355,0.07275697449353874,0.08198136843293267,0.07704852389819987,0.08212766198295807,0.09007219005699453,0.08353381299023757,0.095925197731505,0.08518363077704419,0.09295438237119802,0.09361875887365273,0.09468717346650866,0.10357683099147903,0.10507326956718019,0.1009855371028723,0.1132588661475086,0.10426615811748773,0.12378699255121042,0.10007425297669431,0.1178453370012979,0.12116236595000282,0.11260484481688392,0.11937027504091191,0.11974064996332036,0.129627940804695,0.13540990141639864,0.12586783059646747,0.13246467857344393,0.14363830370746572,0.13832812194571412,0.13938311116754135,0.1345764880925508,0.14589631591422123,0.14171031202031603,0.15190489057562076,0.16036516416478555,0.15490840411963883,0.15825513888261852,0.1580144130925508,0.1575681183972912,0.16545660536117382,0.15969180874717834,0.16350425163656884,0.16294880428893904,0.17905278132054175,0.158539725,0.1891270586343115,0.174996566986456,0.18032123572234762,0.18605601224604967,0.17935054232505643,0.18371200090293455,0.18591498634311512,0.19241401495485327,0.1880498059255079,0.19844988448081263,0.20547052731376975,0.210128034255079)
jointreetemps = c(27.246312220077872,27.102715229783872,32.85797541239208,34.416307678234865,34.413268514192204,33.77221092274702,32.76229998685176,31.542794389932848,30.23325599983071,28.915683370859433,27.645367995993453,26.341602487444277,25.11493569775972,23.9426225814006,22.81615690158569,21.702456376051014,20.634195804017832,19.642536831950792,18.752658712149426,17.880941349641667,17.045190713616613,16.28752611319903,15.576991844816884,14.853247893459738,14.230431429264714,13.644606318266463,13.042544451216072,12.510861039219005,12.026990894362621,11.568933852886406,11.155899998645674,10.729652745838271,10.343556190790588,9.991670337791321,9.651479521189549,9.334602300547374,9.040392692906721,8.76525572315332,8.494856722589018,8.259694301619547,8.041163151966593,7.837130728852774,7.646287331188985,7.458310030867333,7.291952925963546,7.143948537215732,6.9897277505784094,6.848951528807629,6.718358433891993,6.589039963376784,6.46887727193725,6.353449732577168,6.24240912606512,6.12952212595226,6.029689841036059,5.932614609051408,5.8386968129902375,5.756244473110998,5.6623857901924275,5.573599088821172,5.492725137020316,5.40615928476298,5.327414508239277,5.2520847597065465,5.17176913244921,5.091933485835215,5.0233337465575625,4.9437326619074495,4.867635777765237,4.79375961506772,4.718916469638827,4.636837774435666,4.567317421726862,4.489804258295711,4.420138081038375,4.354781099379233,4.29003522037246,4.231350004232506,4.201858342155756,4.138456707223476,4.096267666873589,4.0839125694695255,4.0599902200338605,4.057317436173815,4.076606144300226,4.125554272799097,4.224577781376976)
wmvtemps = c(1.3014854312232003,1.4293006351839315,1.5217993793726021,1.6177837793387497,1.7165966678515008,1.7916713598510494,1.8508638076055066,1.8927339502933875,1.9215107408034304,1.940394192281652,1.9544777582374182,1.9648255922477995,1.9731129533401037,1.9775470077296322,1.983538067197021,1.987533917964342,1.995052105958023,2.0031658032046944,2.0091116354660348,2.0185100530918527,2.028649178345746,2.036527794403069,2.0443198671857368,2.054498832035658,2.0618487954186415,2.071772400304672,2.082963361938614,2.092457092360641,2.1021747085872264,2.11278116108102,2.1230016150417512,2.131670333728278,2.141553279282329,2.153808752369668,2.1659780175468293,2.177783284134507,2.1910185012976755,2.203252730309185,2.2150114293048975,2.2289482340893705,2.244422028436019,2.256098636425186,2.270281563924622,2.284246749097269,2.298065818381855,2.3114004930038368,2.3264456029113068,2.3389780284924395,2.355831651602347,2.3709314108553374,2.3871641346197245,2.402832263258858,2.419853930038366,2.436817791469194,2.4559093885691716,2.4738666230534867,2.4910949904649065,2.5098773295531482,2.527000736007673,2.5417984347777027,2.5597529294741594,2.580133362107876,2.5969212523132477,2.616967046659896,2.6356342306477094,2.654149471394719,2.673957175863236,2.692568846422929,2.7145501565109456,2.7330755097043555,2.751333355958023,2.770747666666667,2.7902277879711126,2.8138241220379148,2.835876202606635,2.861596143308508,2.88782152014218,2.913498345802302,2.9430133494696458,2.9720352645565336,3.0073289522681113,3.0449059916497405,3.0781286727036785,3.11233659732566,3.1506251038140376,3.1928383696682463,3.2362516150417515)
vpoptemps = c(0.7227277617919206,0.8176607521439856,0.8685920144436922,0.9502131319115323,1.0216973851275106,1.0768979904084857,1.122970468291582,1.160496698149402,1.1933421155495374,1.2239136979237193,1.252892573403295,1.281925575829384,1.311307916779508,1.340418865436696,1.3704302575603702,1.4022050484653577,1.4339764114759648,1.4657812242157526,1.4993304166666668,1.5340441478785827,1.570551124746107,1.6053883865380276,1.6435179111938614,1.6811516784585872,1.7186142176709547,1.7574324353419093,1.7949052927104492,1.8314419151997292,1.8706251108102008,1.90894174413225,1.9504140219476416,1.9909565009591514,2.032009888907696,2.071109803373956,2.111297498984428,2.152150255359964,2.192158244470774,2.234125353531934,2.273793709489957,2.316473826957797,2.3571149058338974,2.397940707289551,2.4398795717670954,2.480520919036335,2.5215542206612502,2.5621895197472355,2.6030560216091176,2.6435434206161137,2.68447965690589,2.7264281204581358,2.767640745147822,2.8107752989731436,2.8542455574362444,2.8974845194087115,2.941577401320244,2.983495113179869,3.027405131121643,3.0708076930151207,3.1148969090498757,3.1577836491762583,3.203265552189122,3.2486963406680207,3.296698244865719,3.343202899176258,3.392217123674114,3.4391654427330174,3.4891256319115325,3.5382426043218236,3.589984524994358,3.6454421313473255,3.7022196170728954,3.760034158824193,3.8207523094673888,3.8792767903972014,3.946767140769578,4.013047620232453,4.087033631798691,4.160871990972693,4.244349295982848,4.329059756149853,4.425155779056646,4.510295066463552,4.615544396468066,4.701517446118258,4.805407352572782,4.852121004062289,4.838807259252991)
vnaiftemps = c(0.6451137929361318,0.7004251516023471,0.7402862512412548,0.7941943547731889,0.8436578763258858,0.8850182477431731,0.9191662471789663,0.9456509149176259,0.9681622384337621,0.9869423571428572,1.0011106430828256,1.0154165848566914,1.0262460703565786,1.0352890938275785,1.0451006760324983,1.053796864421124,1.0625801736064093,1.073303820300158,1.0817336954412096,1.0911937024937937,1.1003672994809297,1.109127040340781,1.11878846812232,1.129006776686978,1.1406527800722184,1.1524109413789212,1.162298409896186,1.172779033795983,1.1848031659896185,1.1972309004739337,1.209998938670729,1.2215383002708193,1.2339877545136537,1.2478261321936357,1.262392099413225,1.2763035444030693,1.2883153586097946,1.302996368539833,1.3156883779620854,1.3307283744640037,1.3441463018505981,1.3648075778605282,1.373427721902505,1.3884467777589709,1.4039782082487022,1.418676180546152,1.434322146129542,1.4494948827578424,1.4646719453283683,1.4792495922477995,1.4947454961069735,1.5097882015910629,1.5263681480478448,1.54355282046942,1.5574965721620402,1.57437514596028,1.5904318558451818,1.6071270486910405,1.624394095971564,1.642454921575265,1.6590645144436922,1.6750880820920786,1.6930401014443692,1.7099707239900699,1.7283097175581132,1.7450942695779734,1.7636017050891446,1.7834564677273752,1.8007157356127286,1.8210072708756488,1.8392085387045813,1.8621039884901829,1.8897202618483413,1.905506202775897,1.9295180107199277,1.9544357783795983,1.9819774249605056,2.0094258825885802,2.040351750507786,2.072305510212142,2.1026803915594674,2.139019198995712,2.173030381516588,2.2094696697698035,2.2442522798465356,2.2808345163055743,2.3180854916497404)
lextreetemps = c(0.0016982109850409257, 0.0016183629805249788, 0.0016514960824160316, 0.0016693256223539374, 0.001678907248094835, 0.0016918056562235393, 0.001712892734970364, 0.0017309370702794243, 0.0017356804403048263, 0.0017490602314422805, 0.0017630764775613886, 0.001768132633361558, 0.001769494541349139, 0.0017885119390347163, 0.00179574688681908, 0.0017999977533163986, 0.001802400344340954, 0.0018416144510302005, 0.001827193355913068, 0.001834829844764324, 0.0018491389048828676, 0.0018589804064352243, 0.0018688644538526673, 0.001878152204346599, 0.0018913192040643523, 0.0019033213491391476, 0.0019160837764606265, 0.001925194405870731, 0.0019385509060118543, 0.0019486479424216765, 0.0019615007112616426, 0.0019732338131526954, 0.0019885149364944964, 0.001999172876093706, 0.002052725475585662, 0.0020244470505221563, 0.0020302198927462604, 0.0020494604346598926, 0.0020594874682472483, 0.0020678437256562237, 0.0020803426192492237, 0.0020900380976573525, 0.0020992857239627433, 0.0021098250127011007, 0.00211570282811177, 0.002121823838554897, 0.0021366507479537115, 0.0021429323736946093, 0.002146472548687553, 0.002155329156082416, 0.0021833488060965284, 0.0021642386113463166, 0.002166257888794807, 0.0021760188202088627, 0.002174500208862546, 0.002175848190798758, 0.0021818443240191926, 0.002179670657634773, 0.0021839889923793397, 0.002183961343494214, 0.002182636150155236, 0.0021819025740897544, 0.0021802319559695174, 0.0021853985661868473, 0.0021834517979113744, 0.00218234516511431, 0.002184271769686706, 0.0021765833135760655, 0.00217332661021733, 0.002173064578041208, 0.002174775591306802, 0.0021725151453570422, 0.0021662781484617557, 0.002163636748518205, 0.002160738729889924, 0.002160683974033305, 0.002159173096246119, 0.0021577892915608243, 0.002167361501552357, 0.002146954541349139, 0.0021496048941574935, 0.0021383544679650012, 0.0021277644707874683, 0.0021259769291560823, 0.0021188804233700253, 0.0021153694270392323, 0.0020956159243578887)
#--------------------------------------------------------------------------------------------
#parametres graphes
# -- manuels
x_pas_erreur = 4
y_pas_erreur = 3
x_legend_erreur = 0
y_legend_erreur = 50
y_padding_erreur = 1
x_pas_time = 4
y_pas_time = 3
x_legend_time = 0
y_legend_time = 0.013
y_padding_time = 0.01
# -- auto calcul
#MAJ VERS ERROR
oracle = 100-100*oracle
Naif = 100-100*Naif
jointree = 100-100*jointree
wmv = 100-100*wmv
vpop = 100-100*vpop
vnaif = 100-100*vnaif
lextree = 100-100*lextree
c_oracle = 100*c_oracle
c_Naif = 100*c_Naif
c_jointree = 100*c_jointree
c_wmv = 100*c_wmv
c_vpop = 100*c_vpop
c_vnaif = 100*c_vnaif
#FIN MAJ VERS ERROR
nb_val = length(oracle)
nb_val = length(Naif)
nb_val = length(jointree)
nb_val = length(wmv)
nb_val = length(vpop)
nb_val = length(vnaif)
min_val_erreur = 30000
min_val_erreur = min(min_val_erreur,100-100*oracle)
min_val_erreur = min(min_val_erreur,100-100*Naif)
min_val_erreur = min(min_val_erreur,100-100*jointree)
min_val_erreur = min(min_val_erreur,100-100*wmv)
min_val_erreur = min(min_val_erreur,100-100*vpop)
min_val_erreur = min(min_val_erreur,100-100*vnaif)
min_val_erreur = ceiling(min_val_erreur)
max_val_erreur = 30000
max_val_erreur = max(max_val_erreur,100-100*oracle)
max_val_erreur = max(max_val_erreur,100-100*Naif)
max_val_erreur = max(max_val_erreur,100-100*jointree)
max_val_erreur = max(max_val_erreur,100-100*wmv)
max_val_erreur = max(max_val_erreur,100-100*vpop)
max_val_erreur = max(max_val_erreur,100-100*vnaif)
max_val_erreur = floor(max_val_erreur)
size = 0:(nb_val-1)
x_lim_erreur = c(0,(nb_val-1))
if(min_val_erreur-y_padding_erreur <= 0){
min_val_erreur = min_val_erreur+y_padding_erreur+0.00001
}
y_lim_erreur = c((min_val_erreur-y_padding_erreur),(max_val_erreur+y_padding_erreur))
x_axp_erreur = c(0, (nb_val-1), x_pas_erreur)
y_axp_erreur = c((min_val_erreur-y_padding_erreur),(max_val_erreur+y_padding_erreur),y_pas_erreur)
min_val_time = 30000
min_val_time = min(min_val_erreur,100-100*Naiftemps)
min_val_time = min(min_val_erreur,100-100*jointreetemps)
min_val_time = min(min_val_erreur,100-100*wmvtemps)
min_val_time = min(min_val_erreur,100-100*vpoptemps)
min_val_time = min(min_val_erreur,100-100*vnaiftemps)
min_val_time = ceiling(min_val_erreur)
max_val_time = 30000
max_val_time = max(max_val_time,Naiftemps)
max_val_time = max(max_val_time,jointreetemps)
max_val_time = max(max_val_time,wmvtemps)
max_val_time = max(max_val_time,vpoptemps)
max_val_time = max(max_val_time,vnaiftemps)
max_val_time = floor(max_val_time)
x_lim_time = c(0,(nb_val-1))
if(min_val_time-y_padding_time <= 0){
min_val_time = min_val_time+y_padding_time+0.00001
}
size = 0:(nb_val-1)
y_lim_time = c((min_val_time-y_padding_time),(max_val_time+y_padding_time))
x_axp_time = c(0, (nb_val-1), x_pas_erreur)
y_axp_time = c((min_val_time-y_padding_time),(max_val_time+y_padding_time),y_pas_time)
#fin parametres graphes
#--------------------------------------------------------------------------------------------
(ggplot(NULL, aes(size)) + #scale_y_log10(breaks = round(seq(0, 100, by = 5),1)) + annotation_logticks(sides="l") +
ylab("Error rate (%)") + xlab("Number of configurated attributes") + theme_bw() #+ theme(legend.position="bottom")
+geom_line(aes(y=Naif, colour="Naive Bayes"), colour="turquoise2", linetype = "dotted") + geom_point(aes(y=Naif, shape="Naive Bayes"), colour="turquoise2", fill="turquoise2")
+geom_line(aes(y=wmv, colour="Weighted Majority Voter"), colour="deeppink2", linetype = "dotted") + geom_point(aes(y=wmv, shape="Weighted Majority Voter"), colour="deeppink2", fill="deeppink2")
+geom_line(aes(y=vpop, colour="Most Popular Choice"), colour="gold2", linetype = "dotted") + geom_point(aes(y=vpop, shape="Most Popular Choice"), colour="gold2", fill="gold2")
+geom_line(aes(y=vnaif, colour="Naive Bayes Voter"), colour="firebrick3", linetype = "dotted") + geom_point(aes(y=vnaif, shape="Naive Bayes Voter"), colour="firebrick3", fill="firebrick3")
+geom_line(aes(y=jointree, colour="Classical Bayes"), colour="springgreen4", linetype = "dotted") + geom_point(aes(y=jointree, shape="Classical Bayes"), colour="springgreen4", fill="springgreen4")
+geom_line(aes(y=lextree, colour="2-LP-tree (3 cl.)"), colour="dodgerblue4", linetype = "dotted") + geom_point(aes(y=lextree, shape="2-LP-tree (3 cl.)"), colour="dodgerblue4", fill="dodgerblue4")
+geom_line(aes(y=oracle, colour="Oracle"), colour="black", linetype = "dotted") + geom_point(aes(y=oracle, shape="Oracle"), colour="black", fill="black")
# + theme(legend.position=c(0.75,0.65), legend.background = element_rect(fill=alpha('blue', 0)))
+ theme(legend.position=c(0.75,0.72), legend.background = element_rect(fill=alpha('blue', 0)))
+ scale_colour_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
,'Oracle' #oracle
),
values =c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
,'Oracle'='black' #oracle
))
+ scale_shape_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
,'Oracle' #oracle
),
values =c(NULL
,'Classical Bayes'=21 #jointree
,'Naive Bayes'=24 #Naif
,'Weighted Majority Voter'=25 #wmv
,'Most Popular Choice'=22 #vpop
,'Naive Bayes Voter'=23 #vnaif
,'2-LP-tree (3 cl.)'=1
,'Oracle'=4 #oracle
))
+ guides(shape = guide_legend(override.aes = list(colour = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
,'Oracle'='black' #oracle
),
fill = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
,'Oracle'='black' #oracle
))))
)
(ggplot(NULL, aes(size))
+ scale_y_log10(
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", function(x) round(10^x,3)))
+ annotation_logticks(sides="l")
+ theme_bw() +
ylab("Recommendation time (ms)") + xlab("Number of configurated attributes") #+ theme(legend.position="bottom")
+geom_line(aes(y=jointreetemps, colour="Classical Bayes"), colour="springgreen4", linetype = "dotted") + geom_point(aes(y=jointreetemps, shape="Classical Bayes"), colour="springgreen4", fill="springgreen4")
+geom_line(aes(y=Naiftemps, colour="Naive Bayes"), colour="turquoise2", linetype = "dotted") + geom_point(aes(y=Naiftemps, shape="Naive Bayes"), colour="turquoise2", fill="turquoise2")
+geom_line(aes(y=wmvtemps, colour="Weighted Majority Voter"), colour="deeppink2", linetype = "dotted") + geom_point(aes(y=wmvtemps, shape="Weighted Majority Voter"), colour="deeppink2", fill="deeppink2")
+geom_line(aes(y=vpoptemps, colour="Most Popular Choice"), colour="gold2", linetype = "dotted") + geom_point(aes(y=vpoptemps, shape="Most Popular Choice"), colour="gold2", fill="gold2")
+geom_line(aes(y=vnaiftemps, colour="Naive Bayes Voter"), colour="firebrick3", linetype = "dotted") + geom_point(aes(y=vnaiftemps, shape="Naive Bayes Voter"), colour="firebrick3", fill="firebrick3")
+geom_line(aes(y=lextreetemps, colour="2-LP-tree (3 cl.)"), colour="dodgerblue4", linetype = "dotted") + geom_point(aes(y=lextreetemps, shape="2-LP-tree (3 cl.)"), colour="dodgerblue4", fill="dodgerblue4")
# + theme(legend.position=c(0.79,0.66), legend.background = element_rect(fill=alpha('blue', 0)))
# + theme(legend.position=c(0.79,0.2), legend.background = element_rect(fill=alpha('blue', 0)))
+ theme(legend.position="bottom")
+ scale_colour_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
),
values =c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
))
+ scale_shape_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
),
values =c(NULL
,'Classical Bayes'=21 #jointree
,'Naive Bayes'=24 #Naif
,'Weighted Majority Voter'=25 #wmv
,'Most Popular Choice'=22 #vpop
,'Naive Bayes Voter'=23 #vnaif
,'2-LP-tree (3 cl.)'=1
))
+ guides(shape = guide_legend(override.aes = list(colour = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
),
fill = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
))))
)
| 58,938 | gpl-3.0 |
cadfe7ab0699d8f30e3e85674ab4e70336aef4e2 | PFgimenez/thesis | R-files/data_renault_small_header.R | library(ggplot2)
#--------------------------------------------------------------------------------------------
#Parametres globaux
dataset_name = "renault_small_header"
taille_img_x = 1024/2
taille_img_y = 720/2
#fin parametres globaux
#--------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------
#data
#result
oracle = c(0.68502658003544,0.7502584170112226,0.7979548139397519,0.8242395156526875,0.8485307147076196,0.8646264028352038,0.8746308328411104,0.8875886001181335,0.8910956881275842,0.8964855286473715,0.9063053750738335,0.91313496751329,0.912322799763733,0.9178972238629651,0.9199276432368576,0.9201122268163024,0.926055818074424,0.9288245717660957,0.9296736562315416,0.9353957471943296,0.9353588304784406,0.9369462492616657,0.940711754282339,0.9426683402244537,0.9481320141760189,0.9475782634376846,0.9487595983461311,0.9501624335499114,0.9549985233313645,0.9559583579444773,0.9571766095688128,0.9594654459539279,0.9613851151801536,0.9631571175428234,0.9637847017129356,0.9664796219728293,0.966738038984052,0.9675871234494979,0.9679562906083875,0.9702820437093916,0.9718694624926166,0.9729031305375074,0.9752658003544005,0.9743428824571766,0.9768532191376255,0.978920555227407,0.9788098050797401,0.9800649734199646)
Naif = c(0.6878322504430006,0.7516612522150029,0.7948168930891908,0.8258269344359126,0.8426609568812758,0.8545112226816303,0.8633712344949793,0.8685764914353219,0.8756275841701122,0.8799468399291199,0.8792823390431187,0.8850782634376846,0.88559509746013,0.8845614294152392,0.8867764323685765,0.8880316007088009,0.8918340224453633,0.8910956881275842,0.8911326048434731,0.8922770230360307,0.8948981098641465,0.8963378617838157,0.8986636148848198,0.8929784406379209,0.897777613703485,0.8962640283520378,0.9004725339633786,0.8990696987595983,0.9011739515652688,0.9020968694624926,0.9001033668044891,0.9034627879503839,0.9013954518606024,0.9042011222681631,0.9020599527466037,0.9008786178381571,0.9043487891317188,0.9037950383933845,0.9027982870643827,0.9052347903130538,0.9051609568812758,0.9054193738924985,0.9071175428233904,0.9035366213821618,0.9091848789131719,0.9049025398700532,0.9078927938570585,0.90748670998228)
jointree = c(0.6878322504430006,0.7526580035440047,0.7941893089190786,0.8277835203780272,0.8470171293561725,0.8625590667454224,0.8761444181925576,0.8808697578263438,0.8924616066154755,0.8967808623744832,0.9006940342587123,0.9083727111636148,0.9097386296515062,0.9083727111636148,0.912322799763733,0.9146485528647371,0.9214412285883048,0.9216627288836385,0.9231024808033077,0.9257235676314235,0.9273479031305375,0.9287507383343178,0.9312241582988777,0.931814825753101,0.9340667454223273,0.9353588304784406,0.9365770821027761,0.9389028352037803,0.9375,0.9397519196692262,0.9406010041346722,0.9424468399291199,0.9441450088600118,0.9435174246898996,0.9455109273479031,0.9453632604843473,0.9486857649143532,0.948058180744241,0.9498301831069108,0.9525989367985824,0.9511591848789132,0.952931187241583,0.9543709391612523,0.9520451860602481,0.9551092734790313,0.9554415239220319,0.956179858239811,0.9568074424099232)
wmv = c(0.5945067926757236,0.7494093325457767,0.7923434731246308,0.8279681039574719,0.8467217956290608,0.8623744831659775,0.8753322504430006,0.8807959243945659,0.8900251033668045,0.8955995274660367,0.9003987005316008,0.9068591258121678,0.9080404607206143,0.9065637920850561,0.9089633786178382,0.9133933845245127,0.9174911399881867,0.9165682220909628,0.9196323095097461,0.9231024808033077,0.9227333136444182,0.9259081512108683,0.9279385705847608,0.9265357353809806,0.9298582398109864,0.9296736562315416,0.931814825753101,0.9323316597755463,0.9346204961606616,0.9357279976373302,0.9343620791494389,0.938312167749557,0.9372784997046663,0.9390135853514472,0.9396780862374483,0.9395673360897815,0.9434435912581217,0.9434435912581217,0.9425945067926758,0.9445510927347903,0.9440711754282339,0.9436650915534555,0.9462492616656822,0.9441450088600118,0.9460646780862374,0.9465445953927939,0.9479843473124631,0.9473198464264619)
vpop = c(0.6563792085056114,0.7331659775546367,0.7830035440047254,0.8160809214412286,0.8404459539279385,0.8577968103957472,0.8686134081512109,0.875849084465446,0.8849305965741288,0.8914648552864737,0.8955995274660367,0.9011739515652688,0.9044226225634967,0.9072652096869462,0.9088157117542823,0.9107722976963969,0.9135779681039575,0.9173803898405198,0.918820141760189,0.923693148257531,0.9219949793266391,0.9268310691080921,0.930190490253987,0.927790903721205,0.9305965741287655,0.9303012404016539,0.9348050797401063,0.9357649143532192,0.9384967513290018,0.9380537507383343,0.9374261665682221,0.9419669226225635,0.9418192557590077,0.9427790903721205,0.9432959243945659,0.9438865918487891,0.9457693443591259,0.9466553455404607,0.9487965150620201,0.9499040165386887,0.9484642646190194,0.9510853514471352,0.9507531010041347,0.9492395156526875,0.9530419373892498,0.9518975191966923,0.9546662728883638,0.9532634376845835)
vnaif = c(0.6740992321323095,0.7423582398109864,0.7871751329001772,0.8200679267572357,0.8422917897223863,0.8615623154164206,0.871788245717661,0.8802052569403426,0.8884746012994684,0.8947873597164796,0.8981836975782634,0.9059362079149439,0.9070806261075015,0.9101447135262847,0.9121382161842883,0.9143901358535145,0.918007974010632,0.9190785587714116,0.9221426461901949,0.9272002362669817,0.925317483756645,0.9302274069698759,0.9307442409923213,0.9324424099232133,0.9342144122858831,0.9352849970466627,0.9372784997046663,0.9390135853514472,0.9407855877141169,0.9408225044300059,0.9418561724748966,0.9451048434731246,0.9440342587123449,0.9461754282339043,0.9462123449497932,0.9476890135853514,0.9478366804489072,0.9492395156526875,0.9522666863555818,0.9531157708210277,0.9514914353219137,0.9530788541051388,0.9547401063201417,0.9528573538098051,0.9564382752510336,0.9560691080921441,0.9566966922622564,0.9572504430005907)
lextree = c(0.5280197873597164, 0.6412433549911399, 0.7050723567631424, 0.7468989958653278, 0.783261961015948, 0.8031231541642055, 0.8203263437684584, 0.8330995274660367, 0.846574128765505, 0.850708800945068, 0.8615623154164206, 0.8674689899586533, 0.8723419964559953, 0.8771042528056704, 0.8828632604843473, 0.8862965150620201, 0.8887699350265801, 0.8901727702303603, 0.8942336089781453, 0.8995865327820437, 0.902613703484938, 0.9030567040756055, 0.9059731246308328, 0.9053824571766096, 0.9110676314235086, 0.9092587123449498, 0.9129503839338452, 0.9157929710572947, 0.9184878913171884, 0.9184878913171884, 0.9205183106910809, 0.9215150620200827, 0.922917897223863, 0.923693148257531, 0.9270525694034258, 0.9264249852333136, 0.930153573538098, 0.9315564087418783, 0.9322209096278795, 0.9321470761961016, 0.9330699940933255, 0.9338083284111045, 0.935100413467218, 0.9380906674542233, 0.9377953337271117, 0.9392720023626698, 0.9392720023626698, 0.9418561724748966)
# MEDIUM
oracle = c(0.7603138103611524,0.7950764236439876,0.8167861490599215,0.8414716623833356,0.8583119166779386,0.8778574327066143,0.8894224266197754,0.9013932097930475,0.9117408359258758,0.9216826727985933,0.9276342486135534,0.9362234546192344,0.9372379277695116,0.9435276613012309,0.9493439740294873,0.9523873934803192,0.9559718652779656,0.9586094954686866,0.9601650209657785,0.964087650480184,0.9654402813472204,0.9662518598674422,0.9686189638847559,0.9705802786419586,0.9692952793182741,0.9717300148789395,0.9709184363587177,0.9730150142026242,0.9708508048153659,0.9744352766130123,0.9759231705667524,0.9736913296361422,0.9767347490869742,0.9753144866765859,0.9769376437170296,0.9772758014337887,0.9773434329771405,0.9776139591505478,0.9801163262545651,0.9798458000811578,0.9806573786013797,0.977140538347085,0.9796429054511024,0.979034221560936)
Naif = c(0.7603138103611524,0.7944677397538212,0.8170566752333288,0.8389016637359664,0.8511429730826457,0.8691329636142296,0.8769782226430407,0.8863790071689436,0.8961855809549574,0.9049776815906939,0.9145137292033004,0.9193832003246314,0.919518463411335,0.9237116190991479,0.9309481942377925,0.9306776680643852,0.9373055593128635,0.9371702962261599,0.936020559989179,0.9375084539429189,0.9422426619775464,0.9433247666711755,0.9454889760584336,0.9458947653185446,0.9440687136480455,0.9454213445150819,0.9461652914919518,0.9471121330988773,0.9452184498850263,0.9498173948329501,0.9515758149600974,0.951711078046801,0.9502231840930611,0.951372920330042,0.951711078046801,0.9511700256999864,0.9518463411335046,0.9510347626132828,0.9506289733531719,0.9557689706479102,0.9571892330582984,0.9521844988502638,0.9519139726768565,0.9546868659542811)
jointree = c(0.7603138103611524,0.794873529013932,0.8178682537535507,0.8404571892330583,0.8554713918571621,0.8743405924523198,0.8861084809955363,0.8957797916948465,0.9066008386311376,0.9147842553767077,0.9223589882321115,0.9274313539834979,0.930407141890978,0.9350060868389016,0.9423102935208981,0.9433923982145273,0.9496145002028946,0.9498173948329501,0.9490058163127283,0.9534018666305966,0.9542810766941702,0.9571892330582984,0.9586094954686866,0.9579331800351684,0.9576626538617611,0.9617205464628703,0.9600973894224266,0.9612471256594075,0.9597592317056676,0.9613823887461111,0.9664547544974976,0.964425808196943,0.962193967266333,0.9642905451102394,0.9665900175842013,0.9648315974570539,0.9648315974570539,0.9640200189368321,0.9652373867171649,0.9674015961044231,0.9676044907344785,0.9635465981333694,0.9664547544974976,0.9667252806709049)
wmv = c(0.6979575273907751,0.7939266874070067,0.8170566752333288,0.8351819288516164,0.854389287163533,0.8700121736778034,0.8816447991343163,0.8905721628567564,0.899702421209252,0.9056539970242121,0.9149871500067631,0.9184363587177059,0.9215474097118896,0.9254700392262951,0.9314892465846071,0.9313539834979034,0.9352766130123089,0.9385905586365481,0.9368997700527526,0.94312187204112,0.9402137156769917,0.9440010821046936,0.9454889760584336,0.9470445015555254,0.9469092384688219,0.94758555390234,0.9462329230353037,0.9492087109427837,0.9464358176653591,0.950967131069931,0.9513052887866901,0.9491410793994319,0.9485323955092655,0.9505613418098201,0.9517787095901529,0.9503584471797646,0.9514405518733937,0.9532666035438929,0.9545516028675775,0.9549573921276884,0.953604761260652,0.9508318679832274,0.9534018666305966,0.9517787095901529)
vpop = c(0.7324496145002029,0.7777627485459219,0.8017719464358176,0.8291627214933045,0.8490463952387394,0.8655484918165832,0.8830650615447044,0.8893547950764237,0.902340051399973,0.910929257405654,0.9199918842147978,0.9272960908967942,0.930407141890978,0.9358176653591235,0.9419721358041391,0.9437981874746382,0.9499526579196538,0.9517787095901529,0.9510347626132828,0.9548221290409847,0.9569187068848911,0.9592858109022048,0.9600973894224266,0.9615852833761667,0.9617881780062221,0.9636142296767212,0.9638171243067767,0.9634789665900176,0.9632760719599621,0.9672663330177195,0.9686189638847559,0.9650344920871095,0.9660489652373867,0.9649668605437576,0.968213174624645,0.9663871229541459,0.9684837007980522,0.9686865954281076,0.9698363316650886,0.9729473826592723,0.9706479101853104,0.9677397538211822,0.9696334370350331,0.9699039632084404)
vnaif = c(0.7487488164479913,0.7824293250371973,0.8092114162045178,0.8332206140944136,0.8528337616664412,0.8722440146084134,0.8844853239550926,0.8928040037873665,0.9056539970242121,0.9155282023535777,0.9216826727985933,0.930745299607737,0.9320979304747734,0.9373055593128635,0.9424455566076018,0.9452860814283782,0.94907344785608,0.950967131069931,0.9520492357635602,0.9559042337346139,0.958068443121872,0.9579331800351684,0.9603679155958339,0.9626673880697958,0.961855809549574,0.9642905451102394,0.9642905451102394,0.9652373867171649,0.9632084404166104,0.9653726498038685,0.9679426484512377,0.9657784390639794,0.9663871229541459,0.9660489652373867,0.9689571216015149,0.9669958068443122,0.9691600162315704,0.9691600162315704,0.9705802786419586,0.9728121195725686,0.972676856485865,0.968213174624645,0.969701068578385,0.9706479101853104)
lextree = c(0.5476126065196808, 0.6316786149059922, 0.6855133234140403, 0.7189909373731909, 0.7560530231299878, 0.7674827539564453, 0.7875693223319357, 0.8029893142161504, 0.8101582578114432, 0.8223319356147707, 0.8269308805626945, 0.8285540376031381, 0.8424861355336128, 0.8464087650480184, 0.8439740294873529, 0.8531042878398485, 0.8537806032733667, 0.8601379683484377, 0.8594616529149195, 0.8649398079264169, 0.8680508589206005, 0.8718382253483025, 0.8766400649262817, 0.8745434870823752, 0.8815095360476126, 0.8856350601920736, 0.8885432165562018, 0.8909779521168673, 0.8863790071689436, 0.8932097930474774, 0.8923982145272555, 0.9001758420127147, 0.898214527255512, 0.902678209116732, 0.9055187339375085, 0.9074124171513593, 0.9050453131340457, 0.9114026782091167, 0.9087650480183957, 0.9168808332206141, 0.9190450426078723, 0.921750304341945, 0.919180305694576, 0.927025564723387)
# BIG
oracle = c(0.8348566914917626,0.8585533739562176,0.8612615662378695,0.8746896863010607,0.888004965019183,0.8962423832092079,0.9022793951703905,0.9117580681561724,0.9126607989167231,0.920503272399007,0.9255247122545701,0.929981945384789,0.9330850823741819,0.9390656736628301,0.9395170390431054,0.9383886255924171,0.9461182577296321,0.9457797336944256,0.9480929812683367,0.9497856014443692,0.9514218009478673,0.950575490859851,0.9551455653351388,0.9552019860076733,0.9588693297224103,0.9575152335815843,0.9621417287294065,0.9614646806589935,0.9648499210110585,0.9666553825321598,0.9664296998420221,0.9657526517716091,0.9686865267433988,0.9704355675919657,0.9693071541412773,0.9721846084405326,0.9707176709546378,0.9743850146693749,0.976980365605958,0.9752877454299256,0.9755698487925976,0.9757955314827352,0.9781651997291808,0.9795192958700067,0.9804220266305574,0.981437598736177,0.9803091852854886,0.9836380049650192,0.9827352742044685,0.9807041299932295,0.9852742044685172,0.9853306251410516,0.9866283006093433,0.9849356804333108,0.9869104039720153,0.9865718799368088,0.9867411419544121,0.9872489280072219,0.9895621755811329,0.988772286165651,0.9893929135635297,0.9892236515459264,0.9892800722184608,0.9897878582712706,0.9902392236515459,0.9917061611374408,0.9914804784473031,0.9896750169262017,0.9914240577747687,0.9919882645001128,0.9918190024825095,0.992552471225457,0.9922703678627849,0.9926088918979914,0.9940194087113519,0.99322951929587,0.9933423606409388,0.9938501466937486,0.9940194087113519,0.9946400361092305,0.9939629880388174,0.9941886707289551,0.9942450914014895,0.9950914014895057,0.9946400361092305,0.9954863461972466,0.994583615436696)
Naif = c(0.8348566914917626,0.8585533739562176,0.8613744075829384,0.8726585420898217,0.8860302414804785,0.8930828255472806,0.8965809072444143,0.9057210561949899,0.9040848566914917,0.9109681787406906,0.9151433085082374,0.9193184382757843,0.920108327691266,0.9219137892123674,0.9189799142405778,0.9174565560821485,0.9268223877228616,0.9205596930715414,0.9233807266982622,0.9211238997968856,0.9203904310539381,0.9170051907018731,0.9243398781313473,0.920108327691266,0.9258632362897766,0.9185849695328369,0.923493568043331,0.9231550440081245,0.9247348228390883,0.9224779959377116,0.923888512751072,0.9223651545926428,0.9229293613179869,0.9223651545926428,0.9207853757616791,0.9206725344166102,0.9226472579553149,0.9223651545926428,0.9233243060257278,0.9250169262017603,0.9209546377792823,0.9170051907018731,0.9212931618144888,0.9250733468742948,0.9232678853531934,0.9259760776348455,0.922534416610246,0.9282893252087565,0.9208417964342135,0.9208982171067479,0.9237756714060031,0.9224215752651772,0.9273865944482058,0.9223651545926428,0.92118032046942,0.9225908372827805,0.9248476641841571,0.9249040848566915,0.9208417964342135,0.9227600993003836,0.9276686978108779,0.9250733468742948,0.925242608891898,0.9258068156172422,0.9281764838636877,0.9243398781313473,0.9268223877228616,0.925919656962311,0.9271044910855337,0.9255811329271045,0.9258068156172422,0.9251297675468292,0.9300947867298578,0.9268223877228616,0.9278943805010156,0.9251861882193636,0.9273301737756714,0.9268223877228616,0.9285150078988942,0.9276686978108779,0.9275558564658091,0.9229293613179869,0.9270480704129993,0.9293613179869104,0.9281200631911533,0.9260324983073798,0.9251861882193636)
jointree = c(0.8348566914917626,0.8549988715865493,0.8540397201534642,0.8636876551568494,0.8755924170616114,0.8810652222974498,0.8837734145791017,0.8911081020085759,0.8937598736176935,0.8960731211916046,0.903577070638682,0.9057774768675243,0.9092191378921237,0.9108553373956217,0.9113631234484315,0.9102347099977431,0.9174001354096141,0.9143534190927556,0.919826224328594,0.9146355224554277,0.9152561498533063,0.9146919431279621,0.9203340103814037,0.917795080117355,0.9240577747686752,0.9212367411419544,0.9251861882193636,0.9237192507334687,0.9261453396524486,0.9273301737756714,0.9251861882193636,0.9246784021665538,0.9271044910855337,0.9300947867298578,0.9273301737756714,0.9287406905890319,0.9284585872263598,0.9318438275784248,0.9338749717896637,0.9309410968178741,0.9304333107650643,0.9291356352967728,0.9320695102685624,0.9360753780185059,0.9316745655608215,0.9372037914691943,0.9306025727826676,0.9382193635748138,0.9361882193635748,0.9350033852403521,0.9365267433987813,0.9354547506206274,0.9405890318212593,0.9365267433987813,0.9409275558564658,0.9377679981945385,0.9414353419092756,0.9400248250959151,0.9386707289550892,0.9368088467614534,0.9410403972015347,0.940419769803656,0.9398555630783119,0.9423380726698263,0.9418867072895509,0.9429587000677048,0.9393477770255021,0.9443127962085308,0.9461746784021665,0.9457797336944256,0.9430151207402392,0.9459489957120288,0.9453847889866848,0.9448205822613406,0.9513089596027985,0.9448205822613406,0.9457233130218913,0.9459489957120288,0.9439178515007899,0.9447641615888062,0.9470774091627172,0.9480365605958023,0.9473030918528549,0.949221394719025,0.9470209884901828,0.9477544572331302,0.9487136086662153)
wmv = c(0.7950236966824644,0.8583276912660799,0.8601895734597157,0.8716993906567366,0.8851275107199278,0.8905438952832317,0.8966373279169487,0.9034078086210787,0.9054953735048522,0.909388399909727,0.9153125705258407,0.9190363349131122,0.920503272399007,0.9225908372827805,0.9206161137440758,0.9208417964342135,0.9276686978108779,0.9282893252087565,0.9278379598284812,0.9271609117580681,0.9289099526066351,0.9254118709095013,0.9317874069058903,0.9286842699164974,0.9334236064093884,0.9291920559693072,0.9347777025502144,0.9334800270819228,0.9343263371699391,0.9359625366734371,0.9335928684269916,0.933649289099526,0.9346648612051456,0.9378244188670729,0.9341006544798014,0.9368088467614534,0.9372037914691943,0.9390656736628301,0.9395734597156398,0.9385578876100203,0.9393477770255021,0.935793274655834,0.9402505077860528,0.939742721733243,0.9403633491311216,0.9422816519972919,0.9396298803881742,0.9422252313247574,0.9375987361769352,0.9379372602121417,0.9412096592191379,0.9369781087790566,0.9432972241029113,0.9391220943353645,0.943522906793049,0.9430715414127736,0.9443127962085308,0.943522906793049,0.9422252313247574,0.9403069284585872,0.9463439404197698,0.9417738659444821,0.9434664861205145,0.9436921688106522,0.9479237192507335,0.942168810652223,0.9401940871135184,0.9473030918528549,0.9460618370570977,0.9466260437824419,0.9441435341909276,0.9454412096592192,0.9449334236064094,0.945554051004288,0.950293387497179,0.9457233130218913,0.9466260437824419,0.9464003610923042,0.944877002933875,0.9451026856240127,0.9433536447754457,0.9469645678176484,0.9448205822613406,0.9486007673211465,0.9461746784021665,0.9473030918528549,0.9466824644549763)
vpop = c(0.806872037914692,0.8334461746784022,0.8447867298578199,0.8617129316181449,0.8754231550440081,0.8833220491988264,0.8934777702550214,0.9020537124802528,0.9027871812232002,0.910347551342812,0.913055743624464,0.9170616113744076,0.9228729406454524,0.9276686978108779,0.9272173324306026,0.9255247122545701,0.9312796208530806,0.9338185511171293,0.9344955991875423,0.9351162265854209,0.9371473707966599,0.935793274655834,0.940814714511397,0.9402505077860528,0.9433536447754457,0.9429587000677048,0.9449334236064094,0.9478672985781991,0.9474723538704581,0.9486571879936809,0.9523809523809523,0.9488264500112842,0.9493906567366283,0.9537914691943128,0.9487700293387498,0.9534529451591063,0.9549763033175356,0.9549198826450012,0.955991875423155,0.9567817648386369,0.955991875423155,0.9552584066802077,0.9570074475287745,0.9602234258632363,0.9589257503949447,0.9619160460392688,0.9614646806589935,0.9642857142857143,0.9581922816519973,0.9585872263597383,0.9615211013315279,0.9614082599864591,0.9658090724441435,0.9624238320920785,0.9637215075603701,0.9628187767998194,0.9654141277364027,0.9662040171518844,0.9663168584969533,0.962762356127285,0.9652448657187994,0.96614759647935,0.9650191830286617,0.964116452268111,0.9669939065673663,0.9663732791694877,0.9663168584969533,0.9680094786729858,0.968855788761002,0.9689122094335364,0.967219589257504,0.9669939065673663,0.9674452719476416,0.9669939065673663,0.9714511396975852,0.9679530580004514,0.9681223200180546,0.9686301060708644,0.9689122094335364,0.9678402166553826,0.9670503272399007,0.9699277815391559,0.9699842022116903,0.9683480027081923,0.9691943127962085,0.9694199954863462,0.9669374858948319)
vnaif = c(0.813247573911081,0.8426991649740465,0.8523470999774317,0.8663958474385015,0.8825885804558791,0.8895283231776123,0.8969194312796208,0.9077522004062288,0.9070751523358158,0.915086887835703,0.9195441209659219,0.9218009478672986,0.9260889189799142,0.9322387722861657,0.9314488828706838,0.9304897314375987,0.9373730534867976,0.9369781087790566,0.9391785150078988,0.938106522229745,0.9409839765290002,0.9377115775220041,0.9435793274655834,0.9396863010607086,0.9457233130218913,0.9444820582261341,0.9478672985781991,0.9474159331979237,0.9484315053035433,0.9498420221169036,0.9503498081697134,0.9480365605958023,0.950970435567592,0.9533965244865719,0.9496163394267659,0.9517603249830738,0.9523809523809523,0.9555969307154141,0.9552019860076733,0.9537914691943128,0.9539043105393816,0.9545813586097947,0.954355675919657,0.9561047167682238,0.9568946061837057,0.9586436470322727,0.956668923493568,0.9605055292259084,0.9558226134055517,0.9571767095463778,0.9596592191378921,0.9576844955991876,0.9623674114195441,0.9594335364477544,0.9623109907470097,0.9609568946061837,0.9621417287294065,0.9622545700744752,0.9609004739336493,0.9590950124125479,0.9640036109230422,0.9610133152787181,0.9618032046942,0.9620853080568721,0.9654141277364027,0.9628751974723538,0.9632137215075603,0.9643985556307831,0.9658090724441435,0.9669374858948319,0.9638907695779734,0.9650756037011962,0.9631008801624915,0.9645678176483864,0.9686301060708644,0.9650756037011962,0.9658654931166779,0.9655833897540058,0.9655833897540058,0.9648499210110585,0.9646242383209208,0.9649063416835929,0.9653012863913338,0.9670503272399007,0.9660911758068156,0.9666553825321598,0.9646242383209208)
lextree = c(0.7424611910810048, 0.765379621789444, 0.7834942139429861, 0.8010668924640135, 0.8163872424499012, 0.8279762912785775, 0.8401749929438329, 0.8500141123341801, 0.8590008467400508, 0.8656675134067174, 0.8728760937058989, 0.8791419700818516, 0.8845554614733276, 0.8876319503245836, 0.8940728196443691, 0.8959074230877787, 0.8986226361840248, 0.9025063505503811, 0.9032853513971211, 0.9054925204628845, 0.9074174428450466, 0.9084165961049958, 0.9105108664973186, 0.9119051651143099, 0.912125317527519, 0.9132317245272368, 0.9145695738075078, 0.9151679367767429, 0.9163985323172452, 0.9175726785210274, 0.918650860852385, 0.917888794806661, 0.9189556872706746, 0.9199040361275755, 0.9213773638159751, 0.9213096246119108, 0.9213265594129268, 0.9214451030200396, 0.9231329381879763, 0.9231216483206323, 0.9250014112334181, 0.9257239627434377, 0.9262094270392323, 0.9265707027942421, 0.9276601749929438, 0.9275246965848152, 0.9280045159469377, 0.9283657917019476, 0.9296528365791702, 0.929427039232289, 0.9302229748800451, 0.9295455828394016, 0.9308439175839683, 0.9294552639006491, 0.931927744848998, 0.93193338978267, 0.9328252893028507, 0.9330285069150438, 0.9328027095681626, 0.9351566469093988, 0.9336325148179508, 0.9352977702511995, 0.9361332204346599, 0.9349026248941575, 0.9370251199548405, 0.9373525261078183, 0.9368557719446796, 0.9378267005362687, 0.9378323454699408, 0.9387806943268416, 0.937894439740333, 0.9395032458368614, 0.9404346598927462, 0.9399040361275754, 0.9416991250352809, 0.94152977702512, 0.9420604007902907, 0.941930567315834, 0.9419700818515383, 0.942303132938188, 0.9430200395145357, 0.9435393734123624, 0.9445611064069998, 0.9447699689528648, 0.944990121366074, 0.9449336720293536, 0.9460795935647757)
#temps
Naiftemps = c(6.130709539279385E-4,0.0010267396264028352,0.0013801202008269344,0.0017326179858239811,0.0020518761444181925,0.002346206364441819,0.002692112743650325,0.002958922474896633,0.003319325125516834,0.003587738038984052,0.003918995643827525,0.004165597903130537,0.0044631892350856465,0.0047999383490844654,0.0050834096647962195,0.005496392166272888,0.005642942336089782,0.005919015468103957,0.00625843687241583,0.006549908446544596,0.006807933069994093,0.007017995976077968,0.007500659111045482,0.007785269750443,0.007988596204961607,0.008440687869167159,0.008594035624630833,0.00897847844063792,0.00915021902687537,0.009574535513880685,0.00982102030419374,0.010038620016243355,0.010323273922031896,0.010534935727997637,0.011071919115475488,0.011099380094506792,0.011406020230360307,0.011662656083874779,0.01210107704518606,0.012279355212640284,0.01246057553160071,0.01273974667749557,0.013162413319551093,0.013605502362669816,0.013554199940933254,0.0139077325753101,0.014163541088304784,0.014468727148552865)
jointreetemps = c(0.05651038696101595,0.05548537341258122,0.06120626594802126,0.0623701068738925,0.062498095688127586,0.06209152920112227,0.061362323464264616,0.061881492247489664,0.059686154865623155,0.058089586200531604,0.056820131681925574,0.05545843233165978,0.0545152713747785,0.052729507567926756,0.05129026760927348,0.050216916014471355,0.04846936488481984,0.04752081940342587,0.0458392634746013,0.0454242981024808,0.0435369152023036,0.042790418266391024,0.0409652832250443,0.03991381951417602,0.03865888249409333,0.037593355286473715,0.03649868366066155,0.03593367609273479,0.0345449454001772,0.03359148778056704,0.03271498050797401,0.03201600616509155,0.031172961237448317,0.030205463046367394,0.02980450838009451,0.0286410023257531,0.02801563363851152,0.02784097995422327,0.026706283298877732,0.0259410680744241,0.02541740527170703,0.024615321249261666,0.024441794853809803,0.023971850634967512,0.02294020736119315,0.02211592062906084,0.021499690231836975,0.020839556445658595)
wmvtemps = c(1.4945923339855287,1.5503172643975192,1.5912161551978736,1.6307620040977555,1.6681299061946249,1.7010002079887774,1.731601563792085,1.757977097164796,1.7833442033003544,1.8081031587049616,1.8335790233682812,1.8605553988112817,1.8899077695289428,1.9132010174985232,1.9398820778942705,1.9662830562979918,1.9924716027392204,2.0200019507900175,2.0456657402170704,2.071098103440638,2.097055232944477,2.122521191523922,2.1478210756054343,2.1732904908446544,2.198280694735676,2.223008907892794,2.247929881792676,2.2736685642350856,2.2997701271411697,2.3253274643384523,2.351896904348789,2.3767740933254577,2.4029979824276433,2.4299604074497934,2.456764699461016,2.483868777945954,2.5118990184214414,2.5411084656305376,2.571386206733609,2.603331608239811,2.6376037483387478,2.676942267018606,2.722561480249557,2.7757107274808033,2.8423695671145897,2.9231620128101006,3.0207718459465447,3.132521652281453)
vpoptemps = c(0.619880177753987,0.6692052204666273,0.7317778304415239,0.7835428034554046,0.8393094951269935,0.8925847076934436,0.9401803382309509,0.9840903001698169,1.026910106024808,1.0680262242321323,1.1092238419595393,1.1498626991656822,1.1903069023552866,1.2316474228440637,1.2740687191376254,1.316035523405198,1.3584633255685175,1.400294085757531,1.4422439237669817,1.4846440511665682,1.5258952243797992,1.568353883601595,1.6096364962714116,1.6515207696396927,1.6928072715224454,1.7338529835720615,1.774978728440638,1.8153042948538098,1.8561382867321323,1.8965240490992321,1.9361281432737745,1.9757038995126994,2.0158091659775548,2.0561014703558773,2.094314386961016,2.1337520829149437,2.172147336163615,2.2094566018163024,2.246534808992912,2.282568659664796,2.318733150361784,2.35474384476521,2.3906763707914944,2.4295996256645007,2.4761857712640283,2.536152407966627,2.6127072540977556,2.7319425257678676)
vnaiftemps = c(0.5654878922031896,0.6056752266317188,0.6490572558697578,0.6829557048139397,0.720100048471648,0.7555922484494979,0.7874808443591258,0.8166435704370939,0.8435650142498523,0.8696579058254578,0.8955631147002363,0.919717621492912,0.9444845379134672,0.9698594909184879,0.9959814455478441,1.0217668153425872,1.048007879023922,1.0745154691007088,1.1007386338230951,1.1269935481024809,1.15379970492469,1.1801322384819846,1.2061039145747194,1.2331453213600119,1.2602134606836977,1.287519607722977,1.3146178301092735,1.3419053928307738,1.3691115496898996,1.3972928465002954,1.4253422753987006,1.4536974013216184,1.4830541429415238,1.513018030936208,1.542581987263733,1.57262709757088,1.6032094773331365,1.6335512685321913,1.6641784893310692,1.6960786059878914,1.729621013511518,1.7641430136961016,1.8012120921441228,1.8432790326343769,1.8957278415534553,1.962086357981394,2.0506174140209685,2.171628945658594)
lextreetemps = c(9.385951491435322E-4, 9.206999003248672E-4, 9.574053750738335E-4, 0.0010206393532191376, 0.0010126803049320733, 0.0010660335240696987, 0.0010511686909332545, 0.0011193464153868872, 0.0011249905973124631, 0.0011314190527170703, 0.001175395976077968, 0.0011822279976373303, 0.0012078975413467219, 0.0012309438275251033, 0.0012884719137625518, 0.0013359795407560542, 0.0013247912655050206, 0.0013227320326343768, 0.001407098006497342, 0.0013673426055818075, 0.0013887386001181334, 0.0014351453189604252, 0.0014999887588600118, 0.001494079758564678, 0.0014659799320732427, 0.0014828400472533963, 0.001497552425428234, 0.001513701391760189, 0.0015271906453041936, 0.0015380687020082693, 0.001590135713230951, 0.0015552598272297697, 0.0016029215667454223, 0.0015638766870939161, 0.0016160766723272298, 0.00157890957250443, 0.0016145971020378026, 0.0015807919706142942, 0.0015811648663614885, 0.0016260900915534553, 0.0016257978108387477, 0.0015790943738924986, 0.0015788565305670408, 0.0015757875738334319, 0.0016151606024808033, 0.0015604985639397519, 0.0015971071544595393, 0.001537642295481394)
# MEDIUM
Naiftemps = c(0.0010116810496415528,0.0016883997700527526,0.0023949896523738673,0.0029658426213986203,0.0036375064926281618,0.004223179629379142,0.004630473285540376,0.005313823278777221,0.005876314892465846,0.006559031719193832,0.00682051251183552,0.007690403422156093,0.008060764439334506,0.008814894968213175,0.009339008318679832,0.009729382524009197,0.010218030907615312,0.010800554646287028,0.011279527728932775,0.01216870681725957,0.012532031786825375,0.013121461179494117,0.013982032530772352,0.014095038617611255,0.014546872987961585,0.01565805593128635,0.015851718652779657,0.01617400730420668,0.01710188137427296,0.017532312525361828,0.018411986744217502,0.019141338090085214,0.01930081678614906,0.019518399837684296,0.01986956134180982,0.0209797400919789,0.021716014878939538,0.022102245502502366,0.022544349925605302,0.022829389625321248,0.023375756932233194,0.024719914581360747,0.02521486602191262,0.02549994609765995)
jointreetemps = c(0.0691693280806168,0.06654608068443121,0.07256938495874476,0.07377797687001217,0.07320432787772217,0.0722793575679697,0.07092746354659814,0.06932008832679562,0.06753988286216692,0.0655458321385094,0.06349193764371704,0.06123165345597187,0.05893382436088192,0.056715938996347894,0.05464607669417016,0.052414928310564046,0.050042244690923846,0.048058093128635196,0.04610830853510077,0.04415676281617747,0.04236206627891249,0.04499164405518734,0.03902247227106723,0.037351015622886515,0.03585822920330042,0.034600085215744626,0.03333552772893277,0.03199651021236304,0.03092518605437576,0.02984158014337887,0.028999750642499662,0.028218356824022725,0.027235291965372648,0.02678806763154335,0.026055907953469497,0.02523585979981063,0.024701852022183148,0.02511995157581496,0.023732737454348708,0.023295820167726227,0.022987125659407547,0.02264744406871365,0.0223533163803598,0.02221581962667388)
wmvtemps = c(0.7965478627755985,0.8160240042607873,0.8311722452996078,0.8465396813877992,0.8619417899364263,0.8776536955227918,0.8927007136480455,0.9082907774922224,0.9244003205735155,0.9405550520086569,0.9563681658325442,0.9733636394562424,0.9900202305559312,1.0068131076017854,1.0233010451102393,1.039834390707426,1.0561597244014609,1.0720394964155282,1.0884614358852969,1.1044725541728662,1.1198347618017044,1.1354672159475179,1.1509304223589882,1.1660183666305965,1.181659499864737,1.1970076608954416,1.2124630060868389,1.227697067969701,1.2432045641823346,1.2584507239956715,1.2742634885702693,1.2899736402001893,1.3053130770999595,1.3215953077911538,1.3380518186798322,1.3556662739077505,1.374397483565535,1.3950147795211687,1.419355865075071,1.4481907822940618,1.484407602799946,1.5242677865548493,1.5660133616258622,1.6091389751792236)
vpoptemps = c(0.3373159754497498,0.35954324266197757,0.3859007524685513,0.4125853442445557,0.4421256462870283,0.471702110036521,0.500916622480725,0.530880220816989,0.5615572399567158,0.5926065827133775,0.6239073823211145,0.6555718758284864,0.6879136757067497,0.719780246584607,0.7520238882726904,0.7841105075071013,0.8152354380495063,0.8460459151224131,0.876229896320844,0.9068226852427972,0.9363751338428243,0.9659165105505207,0.9933383679832274,1.0202956827404301,1.046768951711078,1.0735229916136886,1.099179509333153,1.1252547484782902,1.1511916317462465,1.1774401381712432,1.2030660683754903,1.2285443556066549,1.25452294609766,1.280623670160963,1.3069319787636955,1.3323488160422021,1.3577788343027188,1.383842335452455,1.4113812024212093,1.4397572673474908,1.4700707158798865,1.5061081802380631,1.5506332132422562,1.6056343209793047)
vnaiftemps = c(0.28934941850399026,0.3064006830109563,0.3251501990396321,0.34224332693088055,0.35937942066819967,0.37544259786284323,0.39022054964155284,0.40448112538888137,0.4178912758014338,0.43131828635195457,0.44431559008521576,0.45747393473556064,0.4703594849181658,0.48379601271473016,0.4974150645881239,0.5110497538888138,0.5242245560665494,0.5376566951846341,0.5510735487623427,0.5647517916948465,0.5781106949141079,0.5918209430542405,0.6054222003922629,0.6188148186798322,0.6324439959421074,0.6463513863790071,0.6603340891383741,0.6741525128499932,0.6881400547139186,0.7022769139726769,0.7166760831867983,0.7312503485729744,0.7452351604220209,0.759776839307453,0.7747295884620587,0.7892993313945624,0.8029099446773975,0.8174296370891384,0.8327835777762749,0.8494901621804409,0.8676641988367374,0.8881704997971054,0.9103823529013932,0.9354209683484377)
lextreetemps = c(9.871808940890032E-4, 0.0010919204247260923, 0.0012270306370891384, 0.0012618103543892872, 0.0013648988570269173, 0.001442163539834979, 0.0016064685783849589, 0.0016502073786013797, 0.0017350571757067496, 0.0018341723319356149, 0.0018537329974300013, 0.001952052380630326, 0.002101698146895712, 0.0022774710536994456, 0.0024094375557960233, 0.0025170279183010956, 0.0026160373123224672, 0.0026916030434194506, 0.0028298945421344514, 0.0030196141147030976, 0.0030911308196943056, 0.0034702625388881372, 0.0035230289124847827, 0.003624732307588259, 0.0037039085824428514, 0.003917542986608955, 0.003986748248343028, 0.00413833606790207, 0.00439140528878669, 0.004361716184228324, 0.004411312586230218, 0.004512773583119167, 0.0046960397538211825, 0.00472514135668876, 0.004590834113350466, 0.004897660739889084, 0.0047370618490463956, 0.004694337772216962, 0.004398824624644935, 0.004273979230353037, 0.0038618103002840526, 0.0034346460503178683, 0.0029147185919112676, 0.0021245620925199513)
# BIG
Naiftemps = c(0.00469632012866091,0.00678952480108346,0.010033462953557925,0.013950615597313922,0.013854764178093787,0.016783919361209865,0.01984493064725467,0.022964014954009366,0.02519751498222448,0.026734815868178996,0.026873487613565825,0.031001877941425428,0.03304656119857796,0.03416763122848598,0.03846027137294735,0.039743153208058234,0.042773104339484225,0.04369320518029456,0.042934489024321426,0.04974415800462728,0.05624536854579313,0.0549238879860053,0.055812429095423506,0.057765659951470004,0.056513454376163876,0.05996653975509283,0.07009525111449692,0.06365193798318379,0.06876009203769538,0.0780441651148355,0.07275697449353874,0.08198136843293267,0.07704852389819987,0.08212766198295807,0.09007219005699453,0.08353381299023757,0.095925197731505,0.08518363077704419,0.09295438237119802,0.09361875887365273,0.09468717346650866,0.10357683099147903,0.10507326956718019,0.1009855371028723,0.1132588661475086,0.10426615811748773,0.12378699255121042,0.10007425297669431,0.1178453370012979,0.12116236595000282,0.11260484481688392,0.11937027504091191,0.11974064996332036,0.129627940804695,0.13540990141639864,0.12586783059646747,0.13246467857344393,0.14363830370746572,0.13832812194571412,0.13938311116754135,0.1345764880925508,0.14589631591422123,0.14171031202031603,0.15190489057562076,0.16036516416478555,0.15490840411963883,0.15825513888261852,0.1580144130925508,0.1575681183972912,0.16545660536117382,0.15969180874717834,0.16350425163656884,0.16294880428893904,0.17905278132054175,0.158539725,0.1891270586343115,0.174996566986456,0.18032123572234762,0.18605601224604967,0.17935054232505643,0.18371200090293455,0.18591498634311512,0.19241401495485327,0.1880498059255079,0.19844988448081263,0.20547052731376975,0.210128034255079)
jointreetemps = c(27.246312220077872,27.102715229783872,32.85797541239208,34.416307678234865,34.413268514192204,33.77221092274702,32.76229998685176,31.542794389932848,30.23325599983071,28.915683370859433,27.645367995993453,26.341602487444277,25.11493569775972,23.9426225814006,22.81615690158569,21.702456376051014,20.634195804017832,19.642536831950792,18.752658712149426,17.880941349641667,17.045190713616613,16.28752611319903,15.576991844816884,14.853247893459738,14.230431429264714,13.644606318266463,13.042544451216072,12.510861039219005,12.026990894362621,11.568933852886406,11.155899998645674,10.729652745838271,10.343556190790588,9.991670337791321,9.651479521189549,9.334602300547374,9.040392692906721,8.76525572315332,8.494856722589018,8.259694301619547,8.041163151966593,7.837130728852774,7.646287331188985,7.458310030867333,7.291952925963546,7.143948537215732,6.9897277505784094,6.848951528807629,6.718358433891993,6.589039963376784,6.46887727193725,6.353449732577168,6.24240912606512,6.12952212595226,6.029689841036059,5.932614609051408,5.8386968129902375,5.756244473110998,5.6623857901924275,5.573599088821172,5.492725137020316,5.40615928476298,5.327414508239277,5.2520847597065465,5.17176913244921,5.091933485835215,5.0233337465575625,4.9437326619074495,4.867635777765237,4.79375961506772,4.718916469638827,4.636837774435666,4.567317421726862,4.489804258295711,4.420138081038375,4.354781099379233,4.29003522037246,4.231350004232506,4.201858342155756,4.138456707223476,4.096267666873589,4.0839125694695255,4.0599902200338605,4.057317436173815,4.076606144300226,4.125554272799097,4.224577781376976)
wmvtemps = c(1.3014854312232003,1.4293006351839315,1.5217993793726021,1.6177837793387497,1.7165966678515008,1.7916713598510494,1.8508638076055066,1.8927339502933875,1.9215107408034304,1.940394192281652,1.9544777582374182,1.9648255922477995,1.9731129533401037,1.9775470077296322,1.983538067197021,1.987533917964342,1.995052105958023,2.0031658032046944,2.0091116354660348,2.0185100530918527,2.028649178345746,2.036527794403069,2.0443198671857368,2.054498832035658,2.0618487954186415,2.071772400304672,2.082963361938614,2.092457092360641,2.1021747085872264,2.11278116108102,2.1230016150417512,2.131670333728278,2.141553279282329,2.153808752369668,2.1659780175468293,2.177783284134507,2.1910185012976755,2.203252730309185,2.2150114293048975,2.2289482340893705,2.244422028436019,2.256098636425186,2.270281563924622,2.284246749097269,2.298065818381855,2.3114004930038368,2.3264456029113068,2.3389780284924395,2.355831651602347,2.3709314108553374,2.3871641346197245,2.402832263258858,2.419853930038366,2.436817791469194,2.4559093885691716,2.4738666230534867,2.4910949904649065,2.5098773295531482,2.527000736007673,2.5417984347777027,2.5597529294741594,2.580133362107876,2.5969212523132477,2.616967046659896,2.6356342306477094,2.654149471394719,2.673957175863236,2.692568846422929,2.7145501565109456,2.7330755097043555,2.751333355958023,2.770747666666667,2.7902277879711126,2.8138241220379148,2.835876202606635,2.861596143308508,2.88782152014218,2.913498345802302,2.9430133494696458,2.9720352645565336,3.0073289522681113,3.0449059916497405,3.0781286727036785,3.11233659732566,3.1506251038140376,3.1928383696682463,3.2362516150417515)
vpoptemps = c(0.7227277617919206,0.8176607521439856,0.8685920144436922,0.9502131319115323,1.0216973851275106,1.0768979904084857,1.122970468291582,1.160496698149402,1.1933421155495374,1.2239136979237193,1.252892573403295,1.281925575829384,1.311307916779508,1.340418865436696,1.3704302575603702,1.4022050484653577,1.4339764114759648,1.4657812242157526,1.4993304166666668,1.5340441478785827,1.570551124746107,1.6053883865380276,1.6435179111938614,1.6811516784585872,1.7186142176709547,1.7574324353419093,1.7949052927104492,1.8314419151997292,1.8706251108102008,1.90894174413225,1.9504140219476416,1.9909565009591514,2.032009888907696,2.071109803373956,2.111297498984428,2.152150255359964,2.192158244470774,2.234125353531934,2.273793709489957,2.316473826957797,2.3571149058338974,2.397940707289551,2.4398795717670954,2.480520919036335,2.5215542206612502,2.5621895197472355,2.6030560216091176,2.6435434206161137,2.68447965690589,2.7264281204581358,2.767640745147822,2.8107752989731436,2.8542455574362444,2.8974845194087115,2.941577401320244,2.983495113179869,3.027405131121643,3.0708076930151207,3.1148969090498757,3.1577836491762583,3.203265552189122,3.2486963406680207,3.296698244865719,3.343202899176258,3.392217123674114,3.4391654427330174,3.4891256319115325,3.5382426043218236,3.589984524994358,3.6454421313473255,3.7022196170728954,3.760034158824193,3.8207523094673888,3.8792767903972014,3.946767140769578,4.013047620232453,4.087033631798691,4.160871990972693,4.244349295982848,4.329059756149853,4.425155779056646,4.510295066463552,4.615544396468066,4.701517446118258,4.805407352572782,4.852121004062289,4.838807259252991)
vnaiftemps = c(0.6451137929361318,0.7004251516023471,0.7402862512412548,0.7941943547731889,0.8436578763258858,0.8850182477431731,0.9191662471789663,0.9456509149176259,0.9681622384337621,0.9869423571428572,1.0011106430828256,1.0154165848566914,1.0262460703565786,1.0352890938275785,1.0451006760324983,1.053796864421124,1.0625801736064093,1.073303820300158,1.0817336954412096,1.0911937024937937,1.1003672994809297,1.109127040340781,1.11878846812232,1.129006776686978,1.1406527800722184,1.1524109413789212,1.162298409896186,1.172779033795983,1.1848031659896185,1.1972309004739337,1.209998938670729,1.2215383002708193,1.2339877545136537,1.2478261321936357,1.262392099413225,1.2763035444030693,1.2883153586097946,1.302996368539833,1.3156883779620854,1.3307283744640037,1.3441463018505981,1.3648075778605282,1.373427721902505,1.3884467777589709,1.4039782082487022,1.418676180546152,1.434322146129542,1.4494948827578424,1.4646719453283683,1.4792495922477995,1.4947454961069735,1.5097882015910629,1.5263681480478448,1.54355282046942,1.5574965721620402,1.57437514596028,1.5904318558451818,1.6071270486910405,1.624394095971564,1.642454921575265,1.6590645144436922,1.6750880820920786,1.6930401014443692,1.7099707239900699,1.7283097175581132,1.7450942695779734,1.7636017050891446,1.7834564677273752,1.8007157356127286,1.8210072708756488,1.8392085387045813,1.8621039884901829,1.8897202618483413,1.905506202775897,1.9295180107199277,1.9544357783795983,1.9819774249605056,2.0094258825885802,2.040351750507786,2.072305510212142,2.1026803915594674,2.139019198995712,2.173030381516588,2.2094696697698035,2.2442522798465356,2.2808345163055743,2.3180854916497404)
lextreetemps = c(0.0016982109850409257, 0.0016183629805249788, 0.0016514960824160316, 0.0016693256223539374, 0.001678907248094835, 0.0016918056562235393, 0.001712892734970364, 0.0017309370702794243, 0.0017356804403048263, 0.0017490602314422805, 0.0017630764775613886, 0.001768132633361558, 0.001769494541349139, 0.0017885119390347163, 0.00179574688681908, 0.0017999977533163986, 0.001802400344340954, 0.0018416144510302005, 0.001827193355913068, 0.001834829844764324, 0.0018491389048828676, 0.0018589804064352243, 0.0018688644538526673, 0.001878152204346599, 0.0018913192040643523, 0.0019033213491391476, 0.0019160837764606265, 0.001925194405870731, 0.0019385509060118543, 0.0019486479424216765, 0.0019615007112616426, 0.0019732338131526954, 0.0019885149364944964, 0.001999172876093706, 0.002052725475585662, 0.0020244470505221563, 0.0020302198927462604, 0.0020494604346598926, 0.0020594874682472483, 0.0020678437256562237, 0.0020803426192492237, 0.0020900380976573525, 0.0020992857239627433, 0.0021098250127011007, 0.00211570282811177, 0.002121823838554897, 0.0021366507479537115, 0.0021429323736946093, 0.002146472548687553, 0.002155329156082416, 0.0021833488060965284, 0.0021642386113463166, 0.002166257888794807, 0.0021760188202088627, 0.002174500208862546, 0.002175848190798758, 0.0021818443240191926, 0.002179670657634773, 0.0021839889923793397, 0.002183961343494214, 0.002182636150155236, 0.0021819025740897544, 0.0021802319559695174, 0.0021853985661868473, 0.0021834517979113744, 0.00218234516511431, 0.002184271769686706, 0.0021765833135760655, 0.00217332661021733, 0.002173064578041208, 0.002174775591306802, 0.0021725151453570422, 0.0021662781484617557, 0.002163636748518205, 0.002160738729889924, 0.002160683974033305, 0.002159173096246119, 0.0021577892915608243, 0.002167361501552357, 0.002146954541349139, 0.0021496048941574935, 0.0021383544679650012, 0.0021277644707874683, 0.0021259769291560823, 0.0021188804233700253, 0.0021153694270392323, 0.0020956159243578887)
#--------------------------------------------------------------------------------------------
#parametres graphes
# -- manuels
x_pas_erreur = 4
y_pas_erreur = 3
x_legend_erreur = 0
y_legend_erreur = 50
y_padding_erreur = 1
x_pas_time = 4
y_pas_time = 3
x_legend_time = 0
y_legend_time = 0.013
y_padding_time = 0.01
# -- auto calcul
#MAJ VERS ERROR
oracle = 100-100*oracle
Naif = 100-100*Naif
jointree = 100-100*jointree
wmv = 100-100*wmv
vpop = 100-100*vpop
vnaif = 100-100*vnaif
lextree = 100-100*lextree
c_oracle = 100*c_oracle
c_Naif = 100*c_Naif
c_jointree = 100*c_jointree
c_wmv = 100*c_wmv
c_vpop = 100*c_vpop
c_vnaif = 100*c_vnaif
#FIN MAJ VERS ERROR
nb_val = length(oracle)
nb_val = length(Naif)
nb_val = length(jointree)
nb_val = length(wmv)
nb_val = length(vpop)
nb_val = length(vnaif)
min_val_erreur = 30000
min_val_erreur = min(min_val_erreur,100-100*oracle)
min_val_erreur = min(min_val_erreur,100-100*Naif)
min_val_erreur = min(min_val_erreur,100-100*jointree)
min_val_erreur = min(min_val_erreur,100-100*wmv)
min_val_erreur = min(min_val_erreur,100-100*vpop)
min_val_erreur = min(min_val_erreur,100-100*vnaif)
min_val_erreur = ceiling(min_val_erreur)
max_val_erreur = 30000
max_val_erreur = max(max_val_erreur,100-100*oracle)
max_val_erreur = max(max_val_erreur,100-100*Naif)
max_val_erreur = max(max_val_erreur,100-100*jointree)
max_val_erreur = max(max_val_erreur,100-100*wmv)
max_val_erreur = max(max_val_erreur,100-100*vpop)
max_val_erreur = max(max_val_erreur,100-100*vnaif)
max_val_erreur = floor(max_val_erreur)
size = 0:(nb_val-1)
x_lim_erreur = c(0,(nb_val-1))
if(min_val_erreur-y_padding_erreur <= 0){
min_val_erreur = min_val_erreur+y_padding_erreur+0.00001
}
y_lim_erreur = c((min_val_erreur-y_padding_erreur),(max_val_erreur+y_padding_erreur))
x_axp_erreur = c(0, (nb_val-1), x_pas_erreur)
y_axp_erreur = c((min_val_erreur-y_padding_erreur),(max_val_erreur+y_padding_erreur),y_pas_erreur)
min_val_time = 30000
min_val_time = min(min_val_erreur,100-100*Naiftemps)
min_val_time = min(min_val_erreur,100-100*jointreetemps)
min_val_time = min(min_val_erreur,100-100*wmvtemps)
min_val_time = min(min_val_erreur,100-100*vpoptemps)
min_val_time = min(min_val_erreur,100-100*vnaiftemps)
min_val_time = ceiling(min_val_erreur)
max_val_time = 30000
max_val_time = max(max_val_time,Naiftemps)
max_val_time = max(max_val_time,jointreetemps)
max_val_time = max(max_val_time,wmvtemps)
max_val_time = max(max_val_time,vpoptemps)
max_val_time = max(max_val_time,vnaiftemps)
max_val_time = floor(max_val_time)
x_lim_time = c(0,(nb_val-1))
if(min_val_time-y_padding_time <= 0){
min_val_time = min_val_time+y_padding_time+0.00001
}
size = 0:(nb_val-1)
y_lim_time = c((min_val_time-y_padding_time),(max_val_time+y_padding_time))
x_axp_time = c(0, (nb_val-1), x_pas_erreur)
y_axp_time = c((min_val_time-y_padding_time),(max_val_time+y_padding_time),y_pas_time)
#fin parametres graphes
#--------------------------------------------------------------------------------------------
(ggplot(NULL, aes(size)) + #scale_y_log10(breaks = round(seq(0, 100, by = 5),1)) + annotation_logticks(sides="l") +
ylab("Error rate (%)") + xlab("Number of configurated attributes") + theme_bw() #+ theme(legend.position="bottom")
+geom_line(aes(y=Naif, colour="Naive Bayes"), colour="turquoise2", linetype = "dotted") + geom_point(aes(y=Naif, shape="Naive Bayes"), colour="turquoise2", fill="turquoise2")
+geom_line(aes(y=wmv, colour="Weighted Majority Voter"), colour="deeppink2", linetype = "dotted") + geom_point(aes(y=wmv, shape="Weighted Majority Voter"), colour="deeppink2", fill="deeppink2")
+geom_line(aes(y=vpop, colour="Most Popular Choice"), colour="gold2", linetype = "dotted") + geom_point(aes(y=vpop, shape="Most Popular Choice"), colour="gold2", fill="gold2")
+geom_line(aes(y=vnaif, colour="Naive Bayes Voter"), colour="firebrick3", linetype = "dotted") + geom_point(aes(y=vnaif, shape="Naive Bayes Voter"), colour="firebrick3", fill="firebrick3")
+geom_line(aes(y=jointree, colour="Classical Bayes"), colour="springgreen4", linetype = "dotted") + geom_point(aes(y=jointree, shape="Classical Bayes"), colour="springgreen4", fill="springgreen4")
+geom_line(aes(y=lextree, colour="2-LP-tree (3 cl.)"), colour="dodgerblue4", linetype = "dotted") + geom_point(aes(y=lextree, shape="2-LP-tree (3 cl.)"), colour="dodgerblue4", fill="dodgerblue4")
+geom_line(aes(y=oracle, colour="Oracle"), colour="black", linetype = "dotted") + geom_point(aes(y=oracle, shape="Oracle"), colour="black", fill="black")
# + theme(legend.position=c(0.75,0.65), legend.background = element_rect(fill=alpha('blue', 0)))
+ theme(legend.position=c(0.75,0.72), legend.background = element_rect(fill=alpha('blue', 0)))
+ scale_colour_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
,'Oracle' #oracle
),
values =c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
,'Oracle'='black' #oracle
))
+ scale_shape_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
,'Oracle' #oracle
),
values =c(NULL
,'Classical Bayes'=21 #jointree
,'Naive Bayes'=24 #Naif
,'Weighted Majority Voter'=25 #wmv
,'Most Popular Choice'=22 #vpop
,'Naive Bayes Voter'=23 #vnaif
,'2-LP-tree (3 cl.)'=1
,'Oracle'=4 #oracle
))
+ guides(shape = guide_legend(override.aes = list(colour = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
,'Oracle'='black' #oracle
),
fill = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
,'Oracle'='black' #oracle
))))
)
(ggplot(NULL, aes(size))
+ scale_y_log10(
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", function(x) round(10^x,3)))
+ annotation_logticks(sides="l")
+ theme_bw() +
ylab("Recommendation time (ms)") + xlab("Number of configurated attributes") #+ theme(legend.position="bottom")
+geom_line(aes(y=jointreetemps, colour="Classical Bayes"), colour="springgreen4", linetype = "dotted") + geom_point(aes(y=jointreetemps, shape="Classical Bayes"), colour="springgreen4", fill="springgreen4")
+geom_line(aes(y=Naiftemps, colour="Naive Bayes"), colour="turquoise2", linetype = "dotted") + geom_point(aes(y=Naiftemps, shape="Naive Bayes"), colour="turquoise2", fill="turquoise2")
+geom_line(aes(y=wmvtemps, colour="Weighted Majority Voter"), colour="deeppink2", linetype = "dotted") + geom_point(aes(y=wmvtemps, shape="Weighted Majority Voter"), colour="deeppink2", fill="deeppink2")
+geom_line(aes(y=vpoptemps, colour="Most Popular Choice"), colour="gold2", linetype = "dotted") + geom_point(aes(y=vpoptemps, shape="Most Popular Choice"), colour="gold2", fill="gold2")
+geom_line(aes(y=vnaiftemps, colour="Naive Bayes Voter"), colour="firebrick3", linetype = "dotted") + geom_point(aes(y=vnaiftemps, shape="Naive Bayes Voter"), colour="firebrick3", fill="firebrick3")
+geom_line(aes(y=lextreetemps, colour="2-LP-tree (3 cl.)"), colour="dodgerblue4", linetype = "dotted") + geom_point(aes(y=lextreetemps, shape="2-LP-tree (3 cl.)"), colour="dodgerblue4", fill="dodgerblue4")
# + theme(legend.position=c(0.79,0.66), legend.background = element_rect(fill=alpha('blue', 0)))
# + theme(legend.position=c(0.79,0.2), legend.background = element_rect(fill=alpha('blue', 0)))
+ theme(legend.position="bottom")
+ scale_colour_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
),
values =c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
))
+ scale_shape_manual(name = 'Legend', guide = 'legend',
limits = c(NULL
,'Classical Bayes' #jointree
,'Naive Bayes' #Naif
,'Weighted Majority Voter' #wmv
,'Most Popular Choice' #vpop
,'Naive Bayes Voter' #vnaif
,'2-LP-tree (3 cl.)'
),
values =c(NULL
,'Classical Bayes'=21 #jointree
,'Naive Bayes'=24 #Naif
,'Weighted Majority Voter'=25 #wmv
,'Most Popular Choice'=22 #vpop
,'Naive Bayes Voter'=23 #vnaif
,'2-LP-tree (3 cl.)'=1
))
+ guides(shape = guide_legend(override.aes = list(colour = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
),
fill = c(NULL
,'Classical Bayes'='springgreen' #jointree
,'Naive Bayes'='turquoise2' #Naif
,'Weighted Majority Voter'='deeppink2' #wmv
,'Most Popular Choice'='gold2' #vpop
,'Naive Bayes Voter'='firebrick3' #vnaif
,'2-LP-tree (3 cl.)'='dodgerblue4'
))))
)
| 58,938 | gpl-3.0 |
29c062e99ea01825f3faf03a4bc83ed93dbb4827 | rho-devel/rho | src/extra/testr/filtered-test-suite/format/tc_format_34.R | expected <- eval(parse(text="structure(c(\"213198964\", \" 652425\"), .Names = c(\"null.deviance\", \"deviance\"))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(213198964, 652424.52183908), .Names = c(\"null.deviance\", \"deviance\")), FALSE, 5L, 0L, NULL, 3L, TRUE, NA)"));
.Internal(format(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]], argv[[6]], argv[[7]], argv[[8]]));
}, o=expected);
| 479 | gpl-2.0 |
c710fd01f5b72f465ccba907066bc88f2fd967de | realviacauchy/shiny-court-grapher | reports/TDO_Qtrly_Long_FY08-FY15_PLOT.R | ##################
# Draws quarterly graph of adult TDOs from eMagistrate data
# One line, with fiscal quarters on the x axis and counts on the y axis
##################
#source("reports/emagistrate_prep.R")
library(dplyr)
library(pander)
library(ggplot2)
TDO <-
emags %>%
filter(Type=="TDO", FYear>2007)
TDO_Qtrly <- TDO %>%
group_by(FQtr, FYear) %>%
summarise(count = sum(Process.Count))
TDO_Qtrly$FQtr <- factor(TDO_Qtrly$FQtr)
TDO_Qtrly_Long<-
unite(TDO_Qtrly, Fyear_FQtr, FYear, FQtr, sep="-")
TDO_Qtrly_Long_Plot <-
ggplot(TDO_Qtrly_Long, aes(x=Fyear_FQtr, y=count, group=1)) + geom_line() +
ylab("Number of TDOs") + xlab("Fiscal Quarter")
#TDO_Qtrly_Long_Plot + ylim(1000,max(TDO_Qtrly_Long$count)) + geom_line(size=1.2) + ggtitle("Quarterly TDO Trends (Adults Only), FY2008-FY2015") + scale_x_discrete(labels=c("08-1", "08-2", "08-3", "08-4", "09-1", "09-2", "09-3", "09-4", "10-1", "10-2", "10-3", "10-4", "11-1", "11-2", "11-3", "11-4", "12-1", "12-2", "12-3", "12-4", "13-1", "13-2", "13-3", "13-4", "14-1", "14-2", "14-3", "14-4", "15-1", "15-2","15-3", "15-4")) + theme(axis.text.x = element_text(angle=90))
| 1,149 | mit |
29c062e99ea01825f3faf03a4bc83ed93dbb4827 | cxxr-devel/cxxr | src/extra/testr/filtered-test-suite/format/tc_format_34.R | expected <- eval(parse(text="structure(c(\"213198964\", \" 652425\"), .Names = c(\"null.deviance\", \"deviance\"))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(213198964, 652424.52183908), .Names = c(\"null.deviance\", \"deviance\")), FALSE, 5L, 0L, NULL, 3L, TRUE, NA)"));
.Internal(format(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]], argv[[6]], argv[[7]], argv[[8]]));
}, o=expected);
| 479 | gpl-2.0 |
c710fd01f5b72f465ccba907066bc88f2fd967de | zhuoaprilfu/demo_fork | reports/TDO_Qtrly_Long_FY08-FY15_PLOT.R | ##################
# Draws quarterly graph of adult TDOs from eMagistrate data
# One line, with fiscal quarters on the x axis and counts on the y axis
##################
#source("reports/emagistrate_prep.R")
library(dplyr)
library(pander)
library(ggplot2)
TDO <-
emags %>%
filter(Type=="TDO", FYear>2007)
TDO_Qtrly <- TDO %>%
group_by(FQtr, FYear) %>%
summarise(count = sum(Process.Count))
TDO_Qtrly$FQtr <- factor(TDO_Qtrly$FQtr)
TDO_Qtrly_Long<-
unite(TDO_Qtrly, Fyear_FQtr, FYear, FQtr, sep="-")
TDO_Qtrly_Long_Plot <-
ggplot(TDO_Qtrly_Long, aes(x=Fyear_FQtr, y=count, group=1)) + geom_line() +
ylab("Number of TDOs") + xlab("Fiscal Quarter")
#TDO_Qtrly_Long_Plot + ylim(1000,max(TDO_Qtrly_Long$count)) + geom_line(size=1.2) + ggtitle("Quarterly TDO Trends (Adults Only), FY2008-FY2015") + scale_x_discrete(labels=c("08-1", "08-2", "08-3", "08-4", "09-1", "09-2", "09-3", "09-4", "10-1", "10-2", "10-3", "10-4", "11-1", "11-2", "11-3", "11-4", "12-1", "12-2", "12-3", "12-4", "13-1", "13-2", "13-3", "13-4", "14-1", "14-2", "14-3", "14-4", "15-1", "15-2","15-3", "15-4")) + theme(axis.text.x = element_text(angle=90))
| 1,149 | mit |
c710fd01f5b72f465ccba907066bc88f2fd967de | zhuoaprilfu/shiny-court-grapher | reports/TDO_Qtrly_Long_FY08-FY15_PLOT.R | ##################
# Draws quarterly graph of adult TDOs from eMagistrate data
# One line, with fiscal quarters on the x axis and counts on the y axis
##################
#source("reports/emagistrate_prep.R")
library(dplyr)
library(pander)
library(ggplot2)
TDO <-
emags %>%
filter(Type=="TDO", FYear>2007)
TDO_Qtrly <- TDO %>%
group_by(FQtr, FYear) %>%
summarise(count = sum(Process.Count))
TDO_Qtrly$FQtr <- factor(TDO_Qtrly$FQtr)
TDO_Qtrly_Long<-
unite(TDO_Qtrly, Fyear_FQtr, FYear, FQtr, sep="-")
TDO_Qtrly_Long_Plot <-
ggplot(TDO_Qtrly_Long, aes(x=Fyear_FQtr, y=count, group=1)) + geom_line() +
ylab("Number of TDOs") + xlab("Fiscal Quarter")
#TDO_Qtrly_Long_Plot + ylim(1000,max(TDO_Qtrly_Long$count)) + geom_line(size=1.2) + ggtitle("Quarterly TDO Trends (Adults Only), FY2008-FY2015") + scale_x_discrete(labels=c("08-1", "08-2", "08-3", "08-4", "09-1", "09-2", "09-3", "09-4", "10-1", "10-2", "10-3", "10-4", "11-1", "11-2", "11-3", "11-4", "12-1", "12-2", "12-3", "12-4", "13-1", "13-2", "13-3", "13-4", "14-1", "14-2", "14-3", "14-4", "15-1", "15-2","15-3", "15-4")) + theme(axis.text.x = element_text(angle=90))
| 1,149 | mit |
29c062e99ea01825f3faf03a4bc83ed93dbb4827 | kmillar/cxxr | src/extra/testr/filtered-test-suite/format/tc_format_34.R | expected <- eval(parse(text="structure(c(\"213198964\", \" 652425\"), .Names = c(\"null.deviance\", \"deviance\"))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(213198964, 652424.52183908), .Names = c(\"null.deviance\", \"deviance\")), FALSE, 5L, 0L, NULL, 3L, TRUE, NA)"));
.Internal(format(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]], argv[[6]], argv[[7]], argv[[8]]));
}, o=expected);
| 479 | gpl-2.0 |
8a4578e2cbad84471373f96e3bb8e75bfca317a9 | ADIRSE/maddata | app/global_functions.R |
#######################
# GLOBAL FUNCS
#######################
# load air quality measure points
fixStationCodes <- function(code) {
# code <- as.character(code)
if (code<10) {
station_code <- paste('2807900',
code, sep='')
}
else {
station_code <- paste('280790',
code, sep='')
}
station_code
}
# BOA
getBOAData <- function(url){
temp <- getURL(URLencode(url), ssl.verifypeer = FALSE)
data <- fromJSON(temp, simplifyVector=FALSE)
}
# IMPALA
connectImpala <- function(){
rimpala.init(libs ="lib/impala/impala-jdbc-0.5-2/")
# connect
rimpala.connect("54.171.4.239", port = "21050", principal = "user=guest;password=maddata")
rimpala.usedatabase("bod_pro")
}
disconnectImpala <- function(){
rimpala.close()
}
# identif <- 'PM20742'
# "SELECT * FROM md_trafico_madrid WHERE identif = \"PM20742\" AND fecha > \"2014-09-15\" LIMIT 100 LIMIT 100 LIMIT 100"
getImpalaQuery <- function(identif = 0, date_start = 0, date_end = 0){
limit <- 100
query <- "SELECT * FROM md_trafico_madrid "
if (identif != 0) {
query <- paste(query, "WHERE identif = ",
"\"",
identif,
"\"",
sep = '')
}
if (date_start != 0) {
query <- paste(query,
" AND fecha > ",
"\"",
date_start,
"\"",
sep = '')
}
if (date_end != 0) {
query <- paste(query,
" AND fecha < ",
"\"",
date_end,
"\"",
sep = '')
}
query <- paste(query, " ORDER BY fecha", sep = '')
if (limit != 0) {
query <- paste(query, " LIMIT ", as.character(limit) ,sep = '')
}
query
}
getImpalaData <- function(query){
data <- rimpala.query(query)
data
}
getSUMsDataTable <- function(date) {
month_start <- as.Date(date)
day(month_start) <- 1
month_end <- as.Date(date)
day(month_end) <- 1
month(month_end) <- month(month_end) + 1
# month_name <- strptime(date, format = "%M")
# print(month_name)
# sums_data <- rimpala.query("SELECT identif, sum(vmed) as vmed, sum(intensidad) as intensidad FROM md_trafico_madrid WHERE fecha >= \"2014-06-20\" and fecha < \"2014-06-23\" group by identif order by intensidad desc")
query <- paste("SELECT identif, avg(vmed) as velocidad_media, avg(carga) as carga_media, sum(intensidad) as intensidad_total FROM md_trafico_madrid WHERE fecha >= \"",
month_start,
"\" and fecha < \"",
month_end,
"\" group by identif order by carga_media desc",
sep = '')
sums_data <- rimpala.query(query)
sums_data
}
getTrafficPointsChoicesImpala <- function(limit = 0) {
choices <- rimpala.query("SELECT DISTINCT identif FROM md_trafico_madrid")
choices
}
getIDTrafPoint <- function(name){
code <- df_traffic_measure_points[df_traffic_measure_points$name == name, 3]
code
}
getAirQualityPoints <- function() {
num_decimals <- 3
# load air quality measure points
l_airq_measure_points <- read.csv2('data/est_airq_madrid.csv')
df_airq_measure_points <- as.data.frame(l_airq_measure_points)
df_airq_measure_points$Long2 <- as.numeric(as.character(df_airq_measure_points$Long2))
df_airq_measure_points$Lat2 <- as.numeric(as.character(df_airq_measure_points$Lat2))
df_airq_measure_points$Long2 <- round(df_airq_measure_points$Long2, digits = num_decimals)
df_airq_measure_points$Lat2 <- round(df_airq_measure_points$Lat2, digits = num_decimals)
return (df_airq_measure_points)
}
getKMLData <- function () {
kml_url = 'http://datos.madrid.es/egob/catalogo/202088-0-trafico-camaras.kml'
kml_file = 'data/202088-0-trafico-camaras.kml'
if (file.exists(kml_file)) {
download(url=kml_url, destfile = kml_file)
}
# print(toGeoJSON(kml_file))
# toGeoJSON(data=quakes, name="quakes", dest=tempdir(), lat.lon=c(1,2))
# return (toGeoJSON(kml_file))
return (kml_file)
}
addColVis <- function(data) {
nrows <- nrow(data)
data$fillColor <- rgb(runif(nrows),runif(nrows),runif(nrows))
return (data)
}
getCenter <- function(nm, networks){
net_ = networks[[nm]]
lat = as.numeric(net_$lat)/10^6;
lng = as.numeric(net_$lng)/10^6;
return(list(lat = lat, lng = lng))
}
plotMap <- function(num_measure_points = nrow(df_traffic_measure_points),
width = 1600,
height = 800) {
map <- Leaflet$new()
map$tileLayer(provide='Stamen.TonerLite')
# init map
# map$setView(c(40.41, -3.70), zoom = 12, size = c(20, 20))
map$setView(c(40.41, -3.70), zoom = 12)
# filter data points
sub_traffic_measure_points <- df_traffic_measure_points[1:num_measure_points,]
data_ <- sub_traffic_measure_points[,c("lat", "long")]
data_ <- addColVis(data_)
colnames(data_) <- c('lat', 'lng', 'fillColor')
output_geofile <- paste(getwd(), '/data/', sep='')
map$geoJson(
leafletR::toGeoJSON(data_,
lat.lon = c('lat', 'lng'),
dest=output_geofile),
pointToLayer = "#! function(feature, latlng){
return L.circleMarker(latlng, {
radius: 6,
fillColor: 'green',
color: '#333',
weight: 1,
fillOpacity: 0.8
})
} !#"
)
# append markers and popup texts
for(i in 1:num_measure_points) {
html_text <- paste("<h6> Punto de medida del tráfico </h6>")
html_text <- paste(html_text, "<p>", sub_traffic_measure_points$name[i]," </p>")
map$marker(c(sub_traffic_measure_points$lat[i],
sub_traffic_measure_points$long[i]),
bindPopup = html_text)
}
# append markers and popup texts
for(i in 1:nrow(df_airq_measure_points)) {
html_text <- paste("<h6> Estación de calidad del Aire </h6>")
html_text <- paste(html_text, "<p>", df_airq_measure_points$Estacion[i]," </p>")
map$marker(c(df_airq_measure_points$Lat2[i],
df_airq_measure_points$Long2[i]),
bindPopup = html_text)
}
map$enablePopover(TRUE)
map$fullScreen(TRUE)
return(map)
}
# SERIES CHART FOR ONE TRAFFIC MEASURE POINT
getTrafficSeriesChart <- function (traf_point = 'PM20742', date_start, date_end) {
# print(traf_point)
# print(date_start)
# print(date_end)
# print("===========")
query <- getImpalaQuery(traf_point, date_start, date_end)
# get data from impala
data <- getImpalaData(query)
data
} | 7,213 | mit |
29c062e99ea01825f3faf03a4bc83ed93dbb4827 | ArunChauhan/cxxr | src/extra/testr/filtered-test-suite/format/tc_format_34.R | expected <- eval(parse(text="structure(c(\"213198964\", \" 652425\"), .Names = c(\"null.deviance\", \"deviance\"))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(213198964, 652424.52183908), .Names = c(\"null.deviance\", \"deviance\")), FALSE, 5L, 0L, NULL, 3L, TRUE, NA)"));
.Internal(format(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]], argv[[6]], argv[[7]], argv[[8]]));
}, o=expected);
| 479 | gpl-2.0 |
29c062e99ea01825f3faf03a4bc83ed93dbb4827 | kmillar/rho | src/extra/testr/filtered-test-suite/format/tc_format_34.R | expected <- eval(parse(text="structure(c(\"213198964\", \" 652425\"), .Names = c(\"null.deviance\", \"deviance\"))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(213198964, 652424.52183908), .Names = c(\"null.deviance\", \"deviance\")), FALSE, 5L, 0L, NULL, 3L, TRUE, NA)"));
.Internal(format(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]], argv[[6]], argv[[7]], argv[[8]]));
}, o=expected);
| 479 | gpl-2.0 |
5d622a1e4f8df46cbb031e94bf5701f6dbf5edc7 | chichinabo/popyramids_shiny_apps | apps/resources/auto_check_and_install.R | #A short script to help installing packages on the go
#Most useful if you are distributing a set of script files to people who may not be aware that the needed packages are not installed
#Also useful if you use many packages and want to organise their loading at the beginning of a script
need<-c("shiny", "shinydashboard","leaflet","RJSONIO","epade","png","grid","RPostgreSQL") #needed packages
ins<-installed.packages()[,1] #find out which packages are installed
(Get<-need[which(is.na(match(need,ins)))]) # check if the needed packages are installed
if(length(Get)>0){install.packages(Get,repos='https://cran.rstudio.com/')} #install the needed packages if they are not-installed
eval(parse(text=paste("library(",need,")")))#load the needed packages
| 754 | gpl-3.0 |
5d622a1e4f8df46cbb031e94bf5701f6dbf5edc7 | chichinabo/shiny_popyramids | apps/resources/auto_check_and_install.R | #A short script to help installing packages on the go
#Most useful if you are distributing a set of script files to people who may not be aware that the needed packages are not installed
#Also useful if you use many packages and want to organise their loading at the beginning of a script
need<-c("shiny", "shinydashboard","leaflet","RJSONIO","epade","png","grid","RPostgreSQL") #needed packages
ins<-installed.packages()[,1] #find out which packages are installed
(Get<-need[which(is.na(match(need,ins)))]) # check if the needed packages are installed
if(length(Get)>0){install.packages(Get,repos='https://cran.rstudio.com/')} #install the needed packages if they are not-installed
eval(parse(text=paste("library(",need,")")))#load the needed packages
| 754 | gpl-3.0 |
29c062e99ea01825f3faf03a4bc83ed93dbb4827 | krlmlr/cxxr | src/extra/testr/filtered-test-suite/format/tc_format_34.R | expected <- eval(parse(text="structure(c(\"213198964\", \" 652425\"), .Names = c(\"null.deviance\", \"deviance\"))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(213198964, 652424.52183908), .Names = c(\"null.deviance\", \"deviance\")), FALSE, 5L, 0L, NULL, 3L, TRUE, NA)"));
.Internal(format(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]], argv[[6]], argv[[7]], argv[[8]]));
}, o=expected);
| 479 | gpl-2.0 |
9af149866568158694b0fb109721e6b128f6f663 | cxxr-devel/cxxr-svn-mirror | src/library/stats/R/ppr.R | # File src/library/stats/R/ppr.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1998 B. D. Ripley
# Copyright (C) 2000-12 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
ppr <- function(x, ...) UseMethod("ppr")
ppr.formula <-
function(formula, data, weights, subset,
na.action, contrasts = NULL, ..., model = FALSE)
{
call <- match.call()
m <- match.call(expand.dots = FALSE)
m$contrasts <- m$... <- NULL
m[[1L]] <- quote(stats::model.frame)
m <- eval(m, parent.frame())
Terms <- attr(m, "terms")
attr(Terms, "intercept") <- 0L
X <- model.matrix(Terms, m, contrasts)
Y <- model.response(m)
w <- model.weights(m)
if(length(w) == 0L) w <- rep(1, nrow(X))
fit <- ppr.default(X, Y, w, ...)
fit$na.action <- attr(m, "na.action")
fit$terms <- Terms
## fix up call to refer to the generic, but leave arg name as `formula'
call[[1L]] <- as.name("ppr")
fit$call <- call
fit$contrasts <- attr(X, "contrasts")
fit$xlevels <- .getXlevels(Terms, m)
if(model) fit$model <- m
structure(fit, class=c("ppr.form", "ppr"))
}
ppr.default <-
function(x, y, weights=rep(1,n), ww=rep(1,q), nterms, max.terms=nterms,
optlevel=2, sm.method=c("supsmu", "spline", "gcvspline"),
bass=0, span=0, df=5, gcvpen=1, ...)
{
call <- match.call()
call[[1L]] <- as.name("ppr")
sm.method <- match.arg(sm.method)
ism <- switch(sm.method, supsmu=0, spline=1, gcvspline=2)
if(missing(nterms)) stop("'nterms' is missing with no default")
mu <- nterms; ml <- max.terms
x <- as.matrix(x)
y <- as.matrix(y)
if(!is.numeric(x) || !is.numeric(y))
stop("'ppr' applies only to numerical variables")
n <- nrow(x)
if(nrow(y) != n) stop("mismatched 'x' and 'y'")
p <- ncol(x)
q <- ncol(y)
if(!is.null(dimnames(x))) xnames <- dimnames(x)[[2L]]
else xnames <- paste0("X", 1L:p)
if(!is.null(dimnames(y))) ynames <- dimnames(y)[[2L]]
else ynames <- paste0("Y", 1L:q)
msmod <- ml*(p+q+2*n)+q+7+ml+1 # for asr
nsp <- n*(q+15)+q+3*p
ndp <- p*(p+1)/2+6*p
.Fortran(C_setppr,
as.double(span), as.double(bass), as.integer(optlevel),
as.integer(ism), as.double(df), as.double(gcvpen)
)
Z <- .Fortran(C_smart,
as.integer(ml), as.integer(mu),
as.integer(p), as.integer(q), as.integer(n),
as.double(weights),
as.double(t(x)),
as.double(t(y)),
as.double(ww),
smod=double(msmod), as.integer(msmod),
double(nsp), as.integer(nsp),
double(ndp), as.integer(ndp),
edf=double(ml)
)
smod <- Z$smod
ys <- smod[q+6]
tnames <- paste("term", 1L:mu)
alpha <- matrix(smod[q+6L + 1L:(p*mu)],p, mu,
dimnames=list(xnames, tnames))
beta <- matrix(smod[q+6L+p*ml + 1L:(q*mu)], q, mu,
dimnames=list(ynames, tnames))
fitted <- drop(matrix(.Fortran(C_pppred,
as.integer(nrow(x)),
as.double(x),
as.double(smod),
y = double(nrow(x)*q),
double(2*smod[4L]))$y,
ncol=q, dimnames=dimnames(y)))
jt <- q + 7 + ml*(p+q+2*n)
gof <- smod[jt] * n * ys^2
gofn <- smod[jt+1L:ml] * n * ys^2
## retain only terms for the size of model finally fitted
jf <- q+6+ml*(p+q)
smod <- smod[c(1L:(q+6+p*mu), q+6+p*ml + 1L:(q*mu),
jf + 1L:(mu*n), jf+ml*n + 1L:(mu*n))]
smod[1L] <- mu
structure(list(call=call, mu=mu, ml=ml, p=p, q=q,
gof=gof, gofn=gofn,
df=df, edf=Z$edf[1L:mu],
xnames=xnames, ynames=ynames,
alpha=drop(alpha), beta=ys*drop(beta),
yb=smod[5+1L:q], ys=ys,
fitted.values=fitted, residuals=drop(y-fitted),
smod=smod),
class="ppr")
}
print.ppr <- function(x, ...)
{
if(!is.null(cl <- x$call)) {
cat("Call:\n")
dput(cl, control=NULL)
}
mu <- x$mu; ml <- x$ml
cat("\nGoodness of fit:\n")
gof <- setNames(x$gofn, paste(1L:ml, "terms"))
print(format(gof[mu:ml], ...), quote=FALSE)
invisible(x)
}
summary.ppr <- function(object, ...)
{
class(object) <- "summary.ppr"
object
}
print.summary.ppr <- function(x, ...)
{
print.ppr(x, ...)
mu <- x$mu
cat("\nProjection direction vectors:\n")
print(format(x$alpha, ...), quote=FALSE)
cat("\nCoefficients of ridge terms:\n")
print(format(x$beta, ...), quote=FALSE)
if(any(x$edf >0)) {
cat("\nEquivalent df for ridge terms:\n")
edf <- setNames(x$edf, paste("term", 1L:mu))
print(round(edf,2), ...)
}
invisible(x)
}
plot.ppr <- function(x, ask, type="o", ...)
{
ppr.funs <- function(obj)
{
## cols for each term
p <- obj$p; q <- obj$q
sm <- obj$smod
n <- sm[4L]; mu <- sm[5L]; m <- sm[1L]
jf <- q+6+m*(p+q)
jt <- jf+m*n
f <- matrix(sm[jf+1L:(mu*n)],n, mu)
t <- matrix(sm[jt+1L:(mu*n)],n, mu)
list(x=t, y=f)
}
obj <- ppr.funs(x)
if(!missing(ask)) {
oask <- devAskNewPage(ask)
on.exit(devAskNewPage(oask))
}
for(i in 1L:x$mu) {
ord <- order(obj$x[ ,i])
plot(obj$x[ord, i], obj$y[ord, i], type = type,
xlab = paste("term", i), ylab = "", ...)
}
invisible()
}
predict.ppr <- function(object, newdata, ...)
{
if(missing(newdata)) return(fitted(object))
if(!is.null(object$terms)) {
newdata <- as.data.frame(newdata)
rn <- row.names(newdata)
# work hard to predict NA for rows with missing data
Terms <- delete.response(object$terms)
m <- model.frame(Terms, newdata, na.action = na.omit,
xlev = object$xlevels)
if(!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m)
keep <- match(row.names(m), rn)
x <- model.matrix(Terms, m, contrasts.arg = object$contrasts)
} else {
x <- as.matrix(newdata)
keep <- seq_len(nrow(x))
rn <- dimnames(x)[[1L]]
}
if(ncol(x) != object$p) stop("wrong number of columns in 'x'")
res <- matrix(NA, length(keep), object$q,
dimnames = list(rn, object$ynames))
res[keep, ] <- matrix(.Fortran(C_pppred,
as.integer(nrow(x)),
as.double(x),
as.double(object$smod),
y = double(nrow(x)*object$q),
double(2*object$smod[4L])
)$y, ncol=object$q)
drop(res)
}
| 6,929 | gpl-2.0 |
9af149866568158694b0fb109721e6b128f6f663 | glycerine/bigbird | r-3.0.2/src/library/stats/R/ppr.R | # File src/library/stats/R/ppr.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1998 B. D. Ripley
# Copyright (C) 2000-12 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
ppr <- function(x, ...) UseMethod("ppr")
ppr.formula <-
function(formula, data, weights, subset,
na.action, contrasts = NULL, ..., model = FALSE)
{
call <- match.call()
m <- match.call(expand.dots = FALSE)
m$contrasts <- m$... <- NULL
m[[1L]] <- quote(stats::model.frame)
m <- eval(m, parent.frame())
Terms <- attr(m, "terms")
attr(Terms, "intercept") <- 0L
X <- model.matrix(Terms, m, contrasts)
Y <- model.response(m)
w <- model.weights(m)
if(length(w) == 0L) w <- rep(1, nrow(X))
fit <- ppr.default(X, Y, w, ...)
fit$na.action <- attr(m, "na.action")
fit$terms <- Terms
## fix up call to refer to the generic, but leave arg name as `formula'
call[[1L]] <- as.name("ppr")
fit$call <- call
fit$contrasts <- attr(X, "contrasts")
fit$xlevels <- .getXlevels(Terms, m)
if(model) fit$model <- m
structure(fit, class=c("ppr.form", "ppr"))
}
ppr.default <-
function(x, y, weights=rep(1,n), ww=rep(1,q), nterms, max.terms=nterms,
optlevel=2, sm.method=c("supsmu", "spline", "gcvspline"),
bass=0, span=0, df=5, gcvpen=1, ...)
{
call <- match.call()
call[[1L]] <- as.name("ppr")
sm.method <- match.arg(sm.method)
ism <- switch(sm.method, supsmu=0, spline=1, gcvspline=2)
if(missing(nterms)) stop("'nterms' is missing with no default")
mu <- nterms; ml <- max.terms
x <- as.matrix(x)
y <- as.matrix(y)
if(!is.numeric(x) || !is.numeric(y))
stop("'ppr' applies only to numerical variables")
n <- nrow(x)
if(nrow(y) != n) stop("mismatched 'x' and 'y'")
p <- ncol(x)
q <- ncol(y)
if(!is.null(dimnames(x))) xnames <- dimnames(x)[[2L]]
else xnames <- paste0("X", 1L:p)
if(!is.null(dimnames(y))) ynames <- dimnames(y)[[2L]]
else ynames <- paste0("Y", 1L:q)
msmod <- ml*(p+q+2*n)+q+7+ml+1 # for asr
nsp <- n*(q+15)+q+3*p
ndp <- p*(p+1)/2+6*p
.Fortran(C_setppr,
as.double(span), as.double(bass), as.integer(optlevel),
as.integer(ism), as.double(df), as.double(gcvpen)
)
Z <- .Fortran(C_smart,
as.integer(ml), as.integer(mu),
as.integer(p), as.integer(q), as.integer(n),
as.double(weights),
as.double(t(x)),
as.double(t(y)),
as.double(ww),
smod=double(msmod), as.integer(msmod),
double(nsp), as.integer(nsp),
double(ndp), as.integer(ndp),
edf=double(ml)
)
smod <- Z$smod
ys <- smod[q+6]
tnames <- paste("term", 1L:mu)
alpha <- matrix(smod[q+6L + 1L:(p*mu)],p, mu,
dimnames=list(xnames, tnames))
beta <- matrix(smod[q+6L+p*ml + 1L:(q*mu)], q, mu,
dimnames=list(ynames, tnames))
fitted <- drop(matrix(.Fortran(C_pppred,
as.integer(nrow(x)),
as.double(x),
as.double(smod),
y = double(nrow(x)*q),
double(2*smod[4L]))$y,
ncol=q, dimnames=dimnames(y)))
jt <- q + 7 + ml*(p+q+2*n)
gof <- smod[jt] * n * ys^2
gofn <- smod[jt+1L:ml] * n * ys^2
## retain only terms for the size of model finally fitted
jf <- q+6+ml*(p+q)
smod <- smod[c(1L:(q+6+p*mu), q+6+p*ml + 1L:(q*mu),
jf + 1L:(mu*n), jf+ml*n + 1L:(mu*n))]
smod[1L] <- mu
structure(list(call=call, mu=mu, ml=ml, p=p, q=q,
gof=gof, gofn=gofn,
df=df, edf=Z$edf[1L:mu],
xnames=xnames, ynames=ynames,
alpha=drop(alpha), beta=ys*drop(beta),
yb=smod[5+1L:q], ys=ys,
fitted.values=fitted, residuals=drop(y-fitted),
smod=smod),
class="ppr")
}
print.ppr <- function(x, ...)
{
if(!is.null(cl <- x$call)) {
cat("Call:\n")
dput(cl, control=NULL)
}
mu <- x$mu; ml <- x$ml
cat("\nGoodness of fit:\n")
gof <- setNames(x$gofn, paste(1L:ml, "terms"))
print(format(gof[mu:ml], ...), quote=FALSE)
invisible(x)
}
summary.ppr <- function(object, ...)
{
class(object) <- "summary.ppr"
object
}
print.summary.ppr <- function(x, ...)
{
print.ppr(x, ...)
mu <- x$mu
cat("\nProjection direction vectors:\n")
print(format(x$alpha, ...), quote=FALSE)
cat("\nCoefficients of ridge terms:\n")
print(format(x$beta, ...), quote=FALSE)
if(any(x$edf >0)) {
cat("\nEquivalent df for ridge terms:\n")
edf <- setNames(x$edf, paste("term", 1L:mu))
print(round(edf,2), ...)
}
invisible(x)
}
plot.ppr <- function(x, ask, type="o", ...)
{
ppr.funs <- function(obj)
{
## cols for each term
p <- obj$p; q <- obj$q
sm <- obj$smod
n <- sm[4L]; mu <- sm[5L]; m <- sm[1L]
jf <- q+6+m*(p+q)
jt <- jf+m*n
f <- matrix(sm[jf+1L:(mu*n)],n, mu)
t <- matrix(sm[jt+1L:(mu*n)],n, mu)
list(x=t, y=f)
}
obj <- ppr.funs(x)
if(!missing(ask)) {
oask <- devAskNewPage(ask)
on.exit(devAskNewPage(oask))
}
for(i in 1L:x$mu) {
ord <- order(obj$x[ ,i])
plot(obj$x[ord, i], obj$y[ord, i], type = type,
xlab = paste("term", i), ylab = "", ...)
}
invisible()
}
predict.ppr <- function(object, newdata, ...)
{
if(missing(newdata)) return(fitted(object))
if(!is.null(object$terms)) {
newdata <- as.data.frame(newdata)
rn <- row.names(newdata)
# work hard to predict NA for rows with missing data
Terms <- delete.response(object$terms)
m <- model.frame(Terms, newdata, na.action = na.omit,
xlev = object$xlevels)
if(!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m)
keep <- match(row.names(m), rn)
x <- model.matrix(Terms, m, contrasts.arg = object$contrasts)
} else {
x <- as.matrix(newdata)
keep <- seq_len(nrow(x))
rn <- dimnames(x)[[1L]]
}
if(ncol(x) != object$p) stop("wrong number of columns in 'x'")
res <- matrix(NA, length(keep), object$q,
dimnames = list(rn, object$ynames))
res[keep, ] <- matrix(.Fortran(C_pppred,
as.integer(nrow(x)),
as.double(x),
as.double(object$smod),
y = double(nrow(x)*object$q),
double(2*object$smod[4L])
)$y, ncol=object$q)
drop(res)
}
| 6,929 | bsd-2-clause |
f27dff1e9c5600ef69ad527852fcbad68ac143ad | sc-camp/2016-R-data-analysis | course_data_modeling/courses/Mod_8_mixed_mods/Mixed effects models.R | #Mixed effects models
#AdvInR
#Script developed by Nils Bunnefeld, and modified by Luc Bussière November 2014
# clear workspace
rm(list=ls())
# analysis of grouse
# import data file
GROUSE<-read.csv("grouse_shooting_mod.csv")
str(GROUSE)
# first will want to divide nr_shot by drive size to facilitate contrasts
GROUSE$shotperkm<-GROUSE$nr_shot/GROUSE$km2
# then examine distribution
hist(GROUSE$shotperkm)
# variable is bounded below and count data
# to keep things simple, let's see if a log transform will help
hist(log(GROUSE$shotperkm))
# looks like this will be better even if not perfect
# let's store the vector for later use
GROUSE$shot<-log(GROUSE$shotperkm)
# the counts will similarly need a log transform
GROUSE$count<-log(GROUSE$totalcount)
# now plot the relationship and predict effects
plot(shot~count,data=GROUSE)
# looks like a positive linear relationship (in log-log space), with slope approx 1
plot(shot~jitter(prev),data=GROUSE)
# looks like a negative relationship, maybe with slope around -0.5??
# but recall that we would really like to know ahead of time whether there is an interaction
# let's build a plot that separately illustrates the effects of previous shooting number on count
library(ggplot2)
ggplot(GROUSE, aes(x = count, y = shot)) +
geom_point() +
stat_smooth(method = "lm") +
facet_grid(. ~ prev)
# there is no strong evidence from these plots for an interaction, because the lines are more or less parallel
# however, it does look like the numbers of shot grouse are highest in the first shooting, followed by the second and third
# to start, let's keep things simple with the maximal linear model
g1<-lm(shot~count*prev,data=GROUSE)
par(mfrow=c(2,2))
plot(g1)
par(mfrow=c(1,1))
# QQ plot looks a bit weak, others OK
summary(g1)
# guess at coefficient for count not bad, but other suffers -- however interaction still in so model needs simplifying
g2<-update(g1,~. -count:prev)
anova(g1,g2)
# simpler model better, as expected
par(mfrow=c(2,2))
plot(g2)
par(mfrow=c(1,1))
# QQ maybe slightly better, still a bit poor
summary(g2)
# now guess for coefficients quite good, and supported statistically
# can't simplify further, because p-vals for linear models are already for p-on-deletion
# problems is that this linear model features pseudoreplication
# both moor and drive visited repeatedly, and would like to soak up the variance assoc with these factors
# but having done the fixed version of the model gives us a good idea of what to expect from a successful mixed mode, so it was worth starting there!
#Extending the linear model to account for pseudoreplication
#We know that we also have repeated measurements for some drives (hunting area) and drive is nested within moor (management unit owned by a single land owner). We need to address this by specifying the random effects in the model.
# What if we didn't use mixed effects models and added moor and drive as fixed effects? Let's try it out.
g4<-lm(shot~moor+drive+prev+count,data=GROUSE)
summary(g4)
#Why are there NA estimates in the summary output?
# Try these tables which tell you how many of each combination of moor and drive (and previous numbers of shootings) there are
xtabs(~moor+drive,data=GROUSE)
xtabs(~moor+drive+prev,data=GROUSE)
# most combinations are poorly represented, and the small overall sample means we don't have the degrees of freedom to estimate all the coefficients needed for a fixed model
#Let's try the same analysis in a mixed effect model framework. In mixed effects models, we need to specify the random part of the model.
# I prefer the syntax of lme4 to that of nlme, but both are OK
library(lme4)
g1.mixed<-lmer(shot~prev*count+(1|moor)+(1|drive),data=GROUSE)
summary(g1.mixed)
# notice how there are no p-values, because it's not clear what denominator df are in mixed models
# but R will let us assess significance by simplifying, even though there is substantial debate about whether this is a good method for judging significance of terms
g2.mixed<-update(g1.mixed,~. - prev:count)
anova(g1.mixed,g2.mixed)
# notice the message in red telling us that R has refit the models using a different estimation procedure -- we compare models fit with ML, but report coefficients fit with REML
# the simpler model is preferred (if we believe the likelihood ratio test method for testing significance)
summary(g2.mixed)
# can we simplify further?
g3.mixed<-update(g2.mixed,~. - prev)
anova(g3.mixed,g2.mixed)
# can't lose prev
g4.mixed<-update(g2.mixed,~. - count)
anova(g4.mixed,g2.mixed)
# can't lose count either
# if we care about p-values, the two LR tests have provided some for us
# so min adequate model is g2.mixed
summary(g2.mixed)
# coeffs similar but not identical to those in fixed model
# also similar to published table, but note that Bunnefeld et al model shooting event number as a factor
# They may have reasoned that because the 3 or 4 shootings category was binned, modelling prev as a factor did not constrain the outcome of that group's coefficient as much as if it were a continuous predictor. In fact, there is little difference between models.
# why are df known for model simp, but not for tests of coefs?
# could also compare models with AIC
# because sample is small, may prefer AICc
# may need to install MuMIn, which has one implementation of AICc
library(MuMIn)
AICc(g1.mixed,g2.mixed,g3.mixed,g4.mixed)
# methods are in agreement
# best model is g2.mixed
#The summary looks like the one from linear models, but we have additional parts here. The most important are the variances of the random effects and th enumber of observations to check that we have done the right thing. Try to understand the three different parts, the random effects, the fixed effects and the data structure (Number of obs).
#Now do a variance components analysis, which is basically extracting the variance from the random effects and calculating the relative contribution of each term of the random effects to the total variation explained by the random effects. Extract the variance for all levels plus the residual variance from the summary().
vars<- c(0.043589,0.084859,0.131216)
vars/sum(vars)
#report the predictions of the best fitting model using REML=TRUE because REML gives more robust estimates then ML; but REML is not suited for model comparison.
# could plot all three lines and all data on a single panel
plot(shot~count,data=GROUSE,
ylab="Grouse shot per km2 (log)",
xlab="Grouse counted per km2 (log)",
type="n")
# in the previous line, type ="n" suppresses plotting anything, so I can add points by group and better control how the plot looks
points(shot[prev=="1"]~count[prev=="1"],data=GROUSE, pch=20)
points(shot[prev=="2"]~count[prev=="2"],col="blue",data=GROUSE,
pch=20)
points(shot[prev=="3"]~count[prev=="3"],col="red",data=GROUSE,
pch=20)
abline(-3.98,1.4)
abline(-3.98-0.53,1.41,lty=3,col="blue")
abline(-3.98-0.94,1.41,lty=4,col="red")
#IMO this graph looks a bit messy. Use mfrow() to split the graphics area into three separate plotting areas, or use ggplot to make a publication quality figure
par(mfrow=c(1,3))
plot(shot~count,data=GROUSE,type="n",xlab="Grouse counted (log)", ylab="Number of grouse shot (log)",cex.lab=1.2)
title(main="First shooting event")
points(shot[prev=="1"]~count[prev=="1"],data=GROUSE,col="purple",pch=20)
abline(-3.98,1.4,col="purple")
plot(shot~count,data=GROUSE,type="n",xlab=" Grouse counted (log)",ylab="",cex.lab=1.2)
title(main="Second shooting event")
points(shot[prev=="2"]~count[prev=="2"],col="blue",data=GROUSE,
pch=20)
abline(-3.98-0.53,1.41,lty=3,col="blue")
plot(shot~count,data=GROUSE,type="n",xlab=" Grouse counted (log)", ylab="", cex.lab=1.2 )
title(main="Third/fourth shooting event")
points(shot[prev=="3"]~count[prev=="3"],col="red",data=GROUSE,
pch=20)
abline(-3.98-0.94,1.41,lty=4,col="red")
par(mfrow=c(1,1))
# SuppEx
# clear workspace
rm(list=ls())
# import data file
FLIES<-read.csv("KatieFlies4R.csv")
str(FLIES)
# recode family ID as factor
FLIES$Maternal.Fam<-as.factor(FLIES$Maternal.Fam)
FLIES$Paternal.Fam<-as.factor(FLIES$Paternal.Fam)
names(FLIES)
# select males
MFLIES<-FLIES[FLIES$Sex=="M",]
# want to play with data on mass so need to check dist
hist(MFLIES$Wet.Mass)
# looks great
# visualize effects of temp on male mass by pop
library(ggplot2)
ggplot(MFLIES, aes(x = Temp, y = Wet.Mass)) +
geom_point() +
stat_smooth(method="lm") +
facet_grid(.~ Population)
# looks like temp has a negative effect on mass, but maybe in only some sex:pop combinations
# build a fixed model first
Mfixed.mod1<-lm(Wet.Mass~Temp,data=MFLIES)
par(mfrow=c(2,2))
plot(Mfixed.mod1)
par(mfrow=c(1,1))
# looks good
summary(Mfixed.mod1)
# support for interaction, but pseudorep
# could add in Population as fixed factor
Mfixed.mod2<-lm(Wet.Mass~Temp*Population,data=MFLIES)
par(mfrow=c(2,2))
plot(Mfixed.mod2)
par(mfrow=c(1,1))
# problems with QQ plot at lower end
summary(Mfixed.mod2)
# seem to be some strong Pop effects, but model the same
# can I remove Int?
anova(Mfixed.mod2,Mfixed.mod1)
# nope
# F test says we need to keep int, but this is still pseudoreplicated
# so now try mixed model, first without "interaction" between temp and Pop
names(MFLIES)
library(lme4)
Mmixed.mod1<-lmer(Wet.Mass~Temp*Population+(1|Maternal.Fam)+(1|Paternal.Fam),data=MFLIES)
summary(mixed.mod1)
vcov(mixed.mod1)
# population not soaking up much variance
# is int important?
Mmixed.mod2<-update(Mmixed.mod1,~. - Temp:Population)
anova(Mmixed.mod1,Mmixed.mod2)
# yup, so far
# OK now, let's try fitting pop as a random effect
Mmixed.mod3<-lmer(Wet.Mass~Temp+(1|Population)+(1|Maternal.Fam)+(1|Paternal.Fam),data=MFLIES)
summary(Mmixed.mod3)
# simpler output, but no temp by pop int
# now can we get effects of temp to vary by population?
Mmixed.mod4<-lmer(Wet.Mass~Temp+(Temp|Population)+(1|Maternal.Fam)+(1|Paternal.Fam),data=MFLIES)
anova(Mmixed.mod4,Mmixed.mod3)
# mod 4 slightly better than mod3
summary(Mmixed.mod4)
# now how to illustrate effects?
str(Mmixed.mod4)
str(MFLIES)
NEWTEMP<-expand.grid(Temp=seq(18,22,length=10),Population=levels(MFLIES$Population),Maternal.Fam=levels(MFLIES$Maternal.Fam),Paternal.Fam=levels(MFLIES$Paternal.Fam))
PREDMASS<-predict(Mmixed.mod4,newdata=NEWTEMP,re.form=~(Temp|Population))
PREDSFRAME<-cbind(NEWTEMP,PREDMASS)
head(PREDSFRAME)
ggplot(PREDSFRAME, aes(x = Temp, y = PREDMASS)) +
geom_line() +
facet_grid(. ~ Population)
# way cool, and actually shows effects
# could also visualize combined effects of Maternal Fam and Pop
PREDMASS<-predict(Mmixed.mod4,newdata=NEWTEMP,re.form=~(Temp|Population)+(1|Maternal.Fam))
PREDSFRAME<-cbind(NEWTEMP,PREDMASS)
head(PREDSFRAME)
ggplot(PREDSFRAME, aes(x = Temp, y = PREDMASS)) +
geom_line() +
facet_grid(Maternal.Fam ~ Population)
# note that now the family and pop combinations include many that were not sampled
| 10,963 | gpl-3.0 |
b4e8232ce9a45f411ea82f491da7548a69152464 | vinhqdang/my_mooc | MOOC-work/coursera/FINISHED/compdata-004 Computing for Data Analysis/Coursera-Computing-for-Data-Analysis-master/Week3/Assignment3.R | # Part 1
outcome <- read.csv(file="/.../data/outcome-of-care-measures.csv", colClasses = "character")
# "..." is the directory in your computer; masked here for privacy
head(outcome)
names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome[, 11], xlab="30-day Dearth Rate", main="Heart Attack 30-day Death Rate")
library(googleVis)
gvt = gvisTable(outcome, options = list(showRowNumber = T, height = 800,width=1200))
plot(gvt)
# Part 2
outcome[, 17] <- as.numeric(outcome[, 17])
outcome[, 23] <- as.numeric(outcome[, 23])
par(mfrow=c(3, 1))
hist(outcome[, 11], xlab="30-day Dearth Rate", main="Heart Attack")
hist(outcome[, 17], xlab="30-day Dearth Rate", main="Heart Failure")
hist(outcome[, 23], xlab="30-day Dearth Rate", main="Pneumonia")
m1 = median(outcome[, 11], na.rm = T)
m2 = median(outcome[, 17], na.rm = T)
m3 = median(outcome[, 23], na.rm = T)
par(mfrow=c(1, 3))
hist(outcome[, 11], xlab="30-day Dearth Rate", main=substitute(bar(X) == k, list(k = mean(outcome[, 11], na.rm = T))))
abline(v = m1, col = 2)
hist(outcome[, 17], xlab="30-day Dearth Rate", main="Heart Failure")
abline(v = m2, col = 2)
hist(outcome[, 23], xlab="30-day Dearth Rate", main="Pneumonia")
abline(v = m3, col = 2)
# Part 3
# outcome2 <- outcome[(table(outcome$State)>=20)[outcome$State],]
table(outcome$State) < 20
table(outcome$State)[table(outcome$State) < 20]
exclu = c("AK","DC","DE","GU","HI","RI","VI","VT")
outcome2 <- outcome[!outcome$State %in% exclu, ]
outcome2$State
table(outcome2$State)
table(outcome$State)
death <- outcome2[, 11]
state <- outcome2$State
boxplot(death ~ state, ylab="30-day Death Rate", main="Heart Attack 30-day Death Rate by State")
# Challenge!
by.m<-reorder(state, death, median, na.rm=T)
boxplot(death ~ by.m, ylab="30-day Death Rate", main="Heart Attack 30-day Death Rate by State", las=2, cex.axix=0.7, xaxt="n")
axis(1, by.m, paste0(by.m,"(",table(outcome2$State)[outcome2$State],")"), las=2, cex.axis=0.7)
# Part 4
hospital <- read.csv(file="/.../data/hospital-data.csv", colClasses = "character")
# "..." is the directory in your computer; masked here for privacy
head(hospital)
outcome.hospital <- merge(outcome, hospital, by = "Provider.Number")
death <- as.numeric(outcome.hospital[, 11]) ## Heart attack outcome
npatient <- as.numeric(outcome.hospital[, 15])
owner <- factor(outcome.hospital$Hospital.Ownership)
library(lattice)
xyplot(death ~ npatient | owner, xlab="Number of Patients Seen", ylab="30-day Death Rate", main="Heart Attack 30-day Death Rate by Ownership",panel = function(x, y, ...){
panel.xyplot(x, y, ...)
panel.lmline(x, y, lwd = 2)
#fit <- lm(y ~ x)
#panel.abline(fit, lwd = 2)
})
#######---------------------------------------
source("best.R")
best("TX", "heart attack")
best("MD", "heart attack")
best("TX", "heart failure")
best("MD", "pneumonia")
source("http://spark-public.s3.amazonaws.com/compdata/scripts/submitscript.R")
submit()
source("rankhospital.R")
rankhospital("TX", "heart failure", 4)
rankhospital("MD", "heart attack", "worst")
rankhospital("MN", "heart attack", 5000)
rankhospital("TX", "heart attack")
source("rankall.R")
head(rankall("heart attack", 20), 10)
tail(rankall("pneumonia", "worst"), 3)
tail(rankall("heart failure"), 10)
rankall("heart failure", 10) | 3,251 | mit |
515fe2e7bfb53872808761cc68b8fe1c0a089df8 | cxxr-devel/cxxr-svn-mirror | src/library/Recommended/Matrix/R/lsCMatrix.R | #### Logical Symmetric Sparse Matrices in Compressed column-oriented format
### contains = "lsparseMatrix"
setAs("lsCMatrix", "matrix",
function(from) as(as(from, "generalMatrix"), "matrix"))
setAs("lsCMatrix", "lgCMatrix",
function(from) .Call(Csparse_symmetric_to_general, from))
## needed for indexing (still ?)
setAs("lsCMatrix", "lgTMatrix",
function(from) as(as(from, "generalMatrix"), "lgTMatrix"))
aslsC.by.lgC <- function(from) as(as(from, "lgCMatrix"), "symmetricMatrix")
setAs("lgTMatrix", "lsCMatrix", aslsC.by.lgC) # <-> needed for Matrix()
setAs("matrix", "lsCMatrix", aslsC.by.lgC)
## Specific conversions, should they be necessary. Better to convert as
## as(x, "TsparseMatrix") or as(x, "denseMatrix")
setAs("lsCMatrix", "lsTMatrix",
function(from) .Call(Csparse_to_Tsparse, from, FALSE))
setAs("lsCMatrix", "dsCMatrix",
function(from) new("dsCMatrix", i = from@i, p = from@p,
x = as.double(from@x), uplo = from@uplo,
Dim = from@Dim, Dimnames = from@Dimnames))
if(FALSE) # needed ?
setAs("lsCMatrix", "dgTMatrix",
function(from) as(as(from, "dsCMatrix"), "dgTMatrix"))
## have rather tril() and triu() methods than
## setAs("lsCMatrix", "ltCMatrix", ....)
setMethod("tril", "lsCMatrix",
function(x, k = 0, ...) {
if(x@uplo == "L" && k == 0)
## same internal structure + diag
new("ltCMatrix", uplo = x@uplo, i = x@i, p = x@p,
x = x@x, Dim = x@Dim, Dimnames = x@Dimnames)
else tril(as(x, "lgCMatrix"), k = k, ...)
})
setMethod("triu", "lsCMatrix",
function(x, k = 0, ...) {
if(x@uplo == "U" && k == 0)
new("ltCMatrix", uplo = x@uplo, i = x@i, p = x@p,
x = x@x, Dim = x@Dim, Dimnames = x@Dimnames)
else triu(as(x, "lgCMatrix"), k = k, ...)
})
setMethod("chol", signature(x = "lsCMatrix"),
function(x, pivot=FALSE, ...)
chol(as(x, "dgCMatrix"), pivot=pivot, ...))
## Use more general method from CsparseMatrix class
## setMethod("t", signature(x = "lsCMatrix"),
## function(x)
## .Call(lsCMatrix_trans, x),
## valueClass = "lsCMatrix")
| 2,155 | gpl-2.0 |
3083df7c840fe797c50f376957c908c0c7cb00c5 | sjbonner/Truncated_CJS | TruncatedCJS/R/simulate_ms.R | simulateMS <- function(nstate,u,T,phi,p,debug=FALSE){
if(debug)
browser()
## Generate complete state matrix
Wfull <- sapply(1:(T-1),function(t){
Wfull.tmp <- matrix(NA,sum(u[,t]),T)
Wfull.tmp[,t] <- rep(1:nstate,u[,t])
for(s in t:(T-1)){
for(m in 1:nstate){
tmp <- which(Wfull.tmp[,s]==m)
if(length(tmp) > 0)
Wfull.tmp[tmp,s+1] <- sample(nstate,length(tmp),replace=TRUE,psi[s,m,])
}
}
Wfull.tmp
})
## Generate survival matrix
S <- sapply(1:(T-1),function(t){
S.tmp <- matrix(NA,sum(u[,t]),T)
S.tmp[,t] <- 1
for(s in t:(T-1))
S.tmp[,s+1] <- 1*(runif(sum(u[,t])) < S.tmp[,s] * phi[Wfull[[t]][,s],s])
S.tmp
})
## Generate capture matrix
W <- do.call("rbind",sapply(1:(T-1),function(t){
W.tmp <- matrix(0,sum(u[,t]),T)
W.tmp[,t] <-Wfull[[t]][,t]
for(s in t:(T-1)){
p.tmp <- S[[t]][,s+1] * p[Wfull[[t]][,s+1],s]
W.tmp[,s+1] <- Wfull[[t]][,s+1]*(runif(sum(u[,t])) < p.tmp)
}
W.tmp
}))
return(W)
}
| 1,240 | gpl-3.0 |
5c6604e62c883df04b4c1f172f3211e1b5f876c7 | freestone-lab/TSLibrary | TSExperiment/R/old_tsexperiment.R | ##TODO(David): TSprotocol should attach a specific protocol to a subject/session
#
## Note all outward facing functions take input as Data, all inward facing functions
## take input as df.
#
##' Workhorse for creating and updating a TSExperiment
##'
##' @param ExperimentPath The path to the experiment folder
##' @param EventCodePaths The paths to the event code files
##' @param DataPaths The paths to the data files
##' @param ProtocolPaths The paths to the protocols
##' @importFrom magrittr %>%
##' @return NULL
##' @export
##' @examples
#TSupdate <- function(ExperimentPath, DataPaths=NULL, EventCodePaths=NULL, ProtocolPaths=NULL){
#
# dir.create(file.path(ExperimentPath, "TSData"), showWarnings = FALSE)
#
# # Read user paths, or create if userpaths.csv doesn't exist.
# if (file.exists(TSuserpaths(ExperimentPath))){
# userpaths = readr::read_csv(TSuserpaths(ExperimentPath), col_types="cc")
# } else{
# if (is.null(EventCodePaths) | is.null(DataPaths)){
# stop("There is no userpaths.csv file. You must supply Event Code and Data Paths so we know where to look")
# }
# userpaths = dplyr::bind_rows(list(dplyr::data_frame(PathType = "EventCode", Path = EventCodePaths),
# dplyr::data_frame(PathType = "Data", Path = DataPaths),
# dplyr::data_frame(PathType = "Protocol", Path = ProtocolPaths)))
# readr::write_csv(userpaths, TSuserpaths(ExperimentPath))
# }
#
#
# #TODO(David): Allow the user to overwrite or append? Currently OVERWRITES
# # Uncomment the 3 lines in the below three segments to change this
# # from OVERWRITES to APPENDS
# #
# # The drawback to this is that the user must now supply the paths
# # everytime, because it will rewrite the userpaths for every new
# # computer that runs the code with different paths.
# if (!is.null(DataPaths)){
# userpaths = dplyr::data_frame(PathType = "Data", Path = DataPaths) %>%
## dplyr::setdiff(userpaths) %>%
## dplyr::bind_rows(list(userpaths, .)) %>%
## dplyr::distinct() %>%
# dplyr::arrange(PathType)
# readr::write_csv(userpaths, TSuserpaths(ExperimentPath))
# } # DataPaths
#
# if (!is.null(EventCodePaths)){
# userpaths = dplyr::data_frame(PathType = "EventCode", Path = EventCodePaths) %>%
## dplyr::setdiff(userpaths) %>%
## dplyr::bind_rows(list(userpaths, .)) %>%
## dplyr::distinct() %>%
# dplyr::arrange(PathType)
# readr::write_csv(userpaths, TSuserpaths(ExperimentPath))
# } # EventCodePaths
#
# if (!is.null(ProtocolPaths)){
# userpaths = dplyr::data_frame(PathType = "Protocol", Path = ProtocolPaths) %>%
## dplyr::setdiff(userpaths) %>%
## dplyr::bind_rows(list(userpaths, .)) %>%
## dplyr::distinct() %>%
# dplyr::arrange(PathType)
# readr::write_csv(userpaths, TSuserpaths(ExperimentPath))
# } # ProtocolPaths
#
# EventCodes = userpaths %>%
# filter(PathType == "EventCode") %>%
# select(Path)
# DataFiles = userpaths %>%
# filter(PathType == "Data") %>%
# select(Path) %>%
# sapply(Sys.glob)
#
# if (file.exists(TSDataFiles(ExperimentPath))){
# pFiles = readr::read_csv(TSDataFiles(ExperimentPath), col_names="File", col_types="c")$File
# pFiles = sapply(pFiles, basename, USE.NAMES=FALSE)
# nFiles = sapply(DataFiles, basename, USE.NAMES=FALSE)
# DataFiles = DataFiles[!(nFiles %in% pFiles)]
# } # file.exists
#
# Data = dplyr::data_frame()
# if (length(DataFiles)>0){
# Data = DataFiles %>%
# mpc_load_files() %>%
# mpc_tidy(files=EventCodes$Path)
#
# TScheckprotocol(ExperimentPath)
# TSchecksubjects(ExperimentPath, Data)
# TSsave(ExperimentPath, Data)
# write(DataFiles, TSDataFiles(ExperimentPath), sep=",", append=TRUE)
# }
# return(NULL)
#} # TSrun
#
##' Load an TSExperiment Data File
##'
##' @param ExperimentPath The path to the experiment folder
##' @return A TSData frame
##' @importFrom magrittr %>%
##' @export
##' @examples
#TSload <- function(ExperimentPath){
# Data = dplyr::data_frame()
# if (file.exists(TSDataFile(ExperimentPath))){
# Data = readr::read_csv(TSDataFile(ExperimentPath), col_types="ccdcd") %>%
# dplyr::mutate(subject=factor(subject),
# date=factor(date),
# event=factor(event))
# TSchecksubjects(ExperimentPath, Data)
# } # file.exists
# return(Data)
#}
#
##' Add subject information to the TSData frame
##'
##' @param ExperimentPath The path to the experiment folder
##' @param Data The TSData frame
##' @return A TSData frame with the subject information added
##' @importFrom magrittr %>%
##' @export
##' @examples
#TSaddSubjectData <- function(ExperimentPath, Data){
# SubjectInfo = readr::read_csv(TSSubjectFile(ExperimentPath), col_types="ccccccccccc") %>%
# dplyr::mutate(subject=factor(subject))
# df = dplyr::left_join(Data, SubjectInfo, by="subject")
# return(df)
#}
#
##' Defines the trial
##'
##' @param df The data
##' @param trialname The name to give to the trial
##' @param trial The pattern to search for
##' @return Data with a column of trial information
##' @importFrom magrittr %>%
##' @export
##' @examples
#TSdefinetrial <- function(df, trialname, trial){
# # TODO(David): And multiple trials at the same time?
# trialname = paste0("trial_", trialname)
# trialval <- lazyeval::interp(~ trialdef(event, trial), trial=trial)
# return(Data %>%
# dplyr::group_by(subject, date) %>%
# dplyr::mutate_(.dots=setNames(list(trialval), trialname)))
#}
#
##' Lists unique dates in TSData
##'
##' @param Data The data
##' @return The unique dates
##' @export
##' @examples
#TSlistdate <- function(Data){
# return(as.character(unique(Data$date)))
#}
#
##' Lists unique sessions in TSData
##'
##' @param Data The data
##' @return The unique sessions
##' @export
##' @examples
#TSlistsession <- function(Data){
# return(as.character(unique(Data$session)))
#}
#
##' Lists unique subjects in TSData
##'
##' @param Data The data
##' @return The unique subjects
##' @export
##' @examples
#TSlistsubject <- function(Data){
# return(as.character(unique(Data$subject)))
#}
#
##' Lists unique trials in TSData
##'
##' @param Data The data
##' @return The unique trials
##' @export
##' @examples
#TSlisttrials <- function(Data){
# cols = colnames(Data)
# return(gsub("trial_", "", cols[(startsWith(colnames(Data), "trial_"))]))
#}
#
##' Lists unique trial numbers from a trial in TSData
##'
##' @param Data The data
##' @param trialname The trial name to list
##' @return The unique trial numbers
##' @export
##' @examples
#TSlisttrialnumbers <- function(Data, trialname){
# trialname = paste0("trial_", trialname)
# return(as.character(unique(Data[[trialname]])))
#}
#
##' Returns the time event record from a single trial (time is relative)
##'
##' @param Data The data
##' @param Subject The subject
##' @param Date The date
##' @param Trial The trial name
##' @param Trialnumber The trial number
##' @return The time event record from a single trial
##' @importFrom magrittr %>%
##' @export
##' @examples
#TSlisttrialdata <- function(Data, Subject, Date, Trial, Trialnumber){
# trialname = paste0("trial_", Trial)
# subjectfilter = lazyeval::interp(~subject == Subject, Subject = Subject)
# datefilter = lazyeval::interp(~date == Date, Date = Date)
# trialfilter = lazyeval::interp(~trialname == Trialnumber, trialname=as.name(trialname))
# return(Data %>%
# dplyr::ungroup() %>%
# dplyr::filter_(subjectfilter, datefilter, trialfilter) %>%
# dplyr::mutate(time = time - time[1]) %>%
# dplyr::select(c(time, event)))
#}
#
##' Checks to see if there's been a change to the protocol
##'
##' @param ExperimentPath The path to the experiment
##' @return NULL
##' @importFrom magrittr %>%
##' @export
##' @examples
#TScheckprotocol <- function(ExperimentPath){
# protocol = readr::read_csv(TSuserpaths(ExperimentPath), col_types="cc") %>%
# dplyr::filter(PathType == "Protocol") %>%
# dplyr::select(Path)
# protocol = protocol$Path
#
# hash = protocol%>%
# readLines(encoding = "UTF-8") %>%
# digest::sha1()
#
# if (file.exists(TSprotocolFileList(ExperimentPath))){
# protocols = readr::read_csv(TSprotocolFileList(ExperimentPath), col_types="cc")
# if (!(hash %in% protocols$hash)){
# TSaddprotocol(ExperimentPath, protocol, hash)
# }
# } else{
# TSaddprotocol(ExperimentPath, protocol, hash)
# }
#}
#
##' Adds a protocol
##'
##' @param ExperimentPath The path to the experiment
##' @param protocol The path to the new protocol
##' @param hash The hash (checksum) of the new protocol
##' @return NULL
##' @importFrom magrittr %>%
##' @export
##' @examples
#TSaddprotocol <- function(ExperimentPath, protocol, hash){
# filenameWithExt = basename(protocol)
# filenameWithoutExt = filenameWithExt %>% substr(1, nchar(.)-4)
# extension = filenameWithExt %>% substr(nchar(.)-3, nchar(.))
#
# newfilename = file.path(TSprotocolPath(ExperimentPath),
# paste0(filenameWithoutExt, "_", hash, extension))
#
# dir.create(TSprotocolPath(ExperimentPath), showWarnings = FALSE)
# file.copy(protocol, newfilename, copy.date=TRUE)
#
# cols = !file.exists(TSprotocolFileList(ExperimentPath))
# readr::write_csv(dplyr::data_frame(hash=hash, file=basename(newfilename)),
# TSprotocolFileList(ExperimentPath),
# append=TRUE, col_names=cols)
#}
#
#
#
#
## Internal Functions -----------------------------------------------------------
#TSuserpaths <- function(ExperimentPath){
# return(file.path(ExperimentPath, "TSData", "UserPaths.csv"))
#}
#
#TSEventCodeFile <- function(ExperimentPath){
# return(file.path(ExperimentPath, "TSData", "EventCodes.csv"))
#}
#
#TSDataFiles <- function(ExperimentPath){
# return(file.path(ExperimentPath, "TSData", "DataFiles.csv"))
#}
#
#TSDataFile <- function(ExperimentPath){
# return(file.path(ExperimentPath, "TSData", "Data.csv"))
#}
#
#TSSubjectFile <- function(ExperimentPath){
# return(file.path(ExperimentPath, "TSData", "Subjects.csv"))
#}
#
#TSprotocolPath <- function(ExperimentPath){
# return(file.path(ExperimentPath, "TSData", "Protocols"))
#}
#
#TSprotocolFileList <- function(ExperimentPath){
# return(file.path(ExperimentPath, "TSData", "Protocols.csv"))
#}
#
#TSsave <- function(ExperimentPath, Data){
# colnames = !file.exists(TSDataFile(ExperimentPath))
# readr::write_csv(Data, TSDataFile(ExperimentPath), append=TRUE, col_names=colnames)
# return(NULL)
#}
#
#TSsubject <- function(ExperimentPath, subjects){
# df = dplyr::data_frame(subject=subjects,
# laboratory=NA,
# experiment_name=NA,
# protocol=NA,
# supplier=NA,
# species=NA,
# strain=NA,
# sex=NA,
# arrival_date=NA,
# arrival_weight=NA,
# analysis=NA)
# colnames = !file.exists(TSSubjectFile(ExperimentPath))
# readr::write_csv(df, TSSubjectFile(ExperimentPath), na="", append=TRUE, col_names=colnames)
#}
#
#TSchecksubjects <- function(ExperimentPath, Data){
# if (!file.exists(TSSubjectFile(ExperimentPath))){
# TSsubject(ExperimentPath, unique(Data$subject))
# } else{
# SubjectInfo = readr::read_csv(TSSubjectFile(ExperimentPath), col_types="ccccccccccc")
# SubjectsInData = unique(Data$subject)
# SubjectsOnFile = factor(SubjectInfo$subject)
#
# pna = sum(is.na(SubjectInfo)) / prod(dim(SubjectInfo))
# if (pna>0.5){
# warning("Most of the Subjects file has not been filled out. Fix this.")
# }
#
# SubjectsnotOnFile = setdiff(SubjectsInData, SubjectsOnFile)
# if (length(SubjectsnotOnFile)>0){
# TSsubject(ExperimentPath, SubjectsnotOnFile)
# warning("At least one subject in the data but not in the subjects file. These were added to the subjects file")
# }
#
# SubjectsnotInData = setdiff(SubjectsOnFile, SubjectsInData)
# if (length(SubjectsnotInData)>0){
# warning("At least one subject in the subjects file but not in the data. Fix this.")
# }
# }
#}
#
#TSdiagnostics <- function(Data){
# # Checks common Errors like a on without an off
# # This is as easy as a series of trialdefs that go from on to on without an
# # off in between. This will return the start, stop, number, and everthing in
# # between. The user can decide what to do with this information.
# return(NULL)
#}
#
#TSstartsession <- function(){
## Starts a session in the 24/7 chamber?
# return(NULL)
#}
#
#TSendsession <- function(){
## Starts a session in the 24/7 chamber?
# return(NULL)
#}
#
#TSactive <- function(){
# # A file with a list of active experiments (ActiveExperiments)
# # Maybe we can use this to do away with the dependency on "ExperimentPath"
# return(NULL)
#}
#
#TSAnalysis <- function(){
# # Runs the automated analysis associated with each subject
# return(NULL)
#}
#
#TSroomtemperature <- function(){
# # Allow for the room temperature input on certain days (High/Low)
# return(NULL)
#}
#
#TSraster <- function(){
# # Plots a raster.
# # (should be moved to analysis.R or plots.R)
# return(NULL)
#}
#
#TScumulativerecord <- function(){
# # Plots a cumulative record.
# # (should be moved to analysis.R or plots.R)
# return(NULL)
#}
#
#TScdf <- function(){
# # Plots a cumulative distribution function
# # (should be moved to analysis.R or plots.R)
# return(NULL)
#}
#
#TSorderevents <- function(){
# # Reorders events with the same timestampe based on some priority of events.
# return(NULL)
#}
#
#TSeventname <- function(){
# # returns the event name associated with an event code
# # (this is redundant with codesfor, but backward. Maybe remove?)
# return(NULL)
#}
#
#TSedit <- function(){
# # Adds or removes an event code based on matching codes.
# return(NULL)
#}
#
#TSaddeventcodes <- function(){
# # Adds event codes to the list of event codes
# # (may be redundant with codes already in TSLib)
# return(NULL)
#}
#
#TSbegin <- function(){
# # Sets up an experiment structure
# # (probably redundant with TSupdate)
# return(NULL)
#}
#
#TSaddlog <- function(){
# # Adds notes (to specific date/time/subject?)
# return(NULL)
#}
#
#TSemail <- function(){
# # Emails user
# # (redundant with email in TSLib?)
# return(NULL)
#}
#
#TSprotocol <- function(){
# # Adds a protocol for a particular subject for a particular session?
# return(NULL)
#}
#
## Notes on the following functions ---------------------------------------------
##
## The following functions played prominantly in Randy's TSLib Matlab toolbox,
## but do not have a place here. combineover is only useful if the data is stored
## in a Matlab structure. Both functions are superseced by mutate and summarize.
#TScombineover <- function(){
# # Combines the result over one layer of the Matlab structure
# # Its basically summarize
# # See notes above
# return(NULL)
#}
#
#TSapplystat <- function(){
# # Applies a statistic to the same level of the Matlab structure, can input
# # more than one stat to apply
# # Its basically mutate
# # See notes above
# return(NULL)
#}
| 15,510 | mit |
d7a7a36ef05138fb86d705f79b430449b66fbb85 | SchlossLab/Sze_FollowUps_Microbiome_2017 | code/srn/srn_run_36_RF.R | ### Build the best lesion model possible
### Try XG-Boost, RF, Logit (GLM), C5.0, SVM
### Find the best based on Jenna Wiens suggestions on test and training
## Marc Sze
#Load needed libraries
source('code/functions.R')
loadLibs(c("dplyr", "caret","scales", "doMC"))
load("exploratory/srn_RF_model_setup.RData")
# Set i variable
i = 36
#################################################################################
# #
# #
# Model Training and Parameter Tuning #
# #
#################################################################################
# Call number of processors to use
registerDoMC(cores = 4)
#Set up lists to store the data
test_tune_list <- list()
test_predictions <- list()
#Get test data
train_test_data <- test_data[eighty_twenty_splits[, i], ]
#Train the model
train_name <- paste("data_split", i, sep = "")
set.seed(3457)
test_tune_list[[paste("data_split", i, sep = "")]] <- assign(train_name,
train(lesion ~ ., data = train_test_data,
method = "rf",
ntree = 2000,
trControl = fitControl,
metric = "ROC",
verbose = FALSE))
test_test_data <- test_data[-eighty_twenty_splits[, i], ]
test_predictions[[paste("data_split", i, sep = "")]] <-
predict(test_tune_list[[paste("data_split", i, sep = "")]],
test_test_data)
# Save image with data and relevant parameters
save.image(paste("exploratory/srn_RF_model_", i, ".RData", sep=""))
| 1,758 | mit |
02fe2ac01b2df3fa0ed87b8810834402cfbcf554 | KellyBlack/R-Object-Oriented-Programming | chapter12/monteCarloS3.R |
######################################################################
# Create the Monte Carlo class
#
# This class is used to make many simulations
MonteCarlo <- function()
{
# Define the slots
me = list(
## First define the parameters for the stochastic model
N = 0,
T = 0,
x0 = 0,
y0 = 0,
alpha = 0,
beta = 0,
gamma = 0,
delta = 0,
noiseOne = 0,
noiseTwo = 0,
## Define the data to track and the number of trials
xData = 0,
yData = 0
)
## Set the name for the class
class(me) <- append(class(me),"MonteCarlo")
return(me)
}
## Define the function used to perform the Monte Carlo simulations.
getParams <- function(monteCarlo)
{
UseMethod("getParams",monteCarlo)
}
getParams.default <- function(monteCarlo)
{
print("getParams.default not defined!")
return(NA)
}
## define the function to get the parameters as a vector
getParams.MonteCarlo <- function(monteCarlo)
{
## return the values of all of the parameters
return(c(monteCarlo$N,monteCarlo$T,monteCarlo$x0,monteCarlo$y0,
monteCarlo$alpha,monteCarlo$beta,monteCarlo$gamma,monteCarlo$delta,
monteCarlo$noiseOne,monteCarlo$noiseTwo))
}
setParams <- function(monteCarlo,N,T,x0,y0,alpha,beta,gamma,delta,noiseOne,noiseTwo)
{
UseMethod("setParams",monteCarlo)
}
setParams.default <- function(monteCarlo,N,T,x0,y0,alpha,beta,gamma,delta,noiseOne,noiseTwo)
{
## Do not know what to do here, so do not make any changes.
return(monteCarlo)
}
# Define the method to set all of the parameters to use in a
# simulation at once.
setParams.MonteCarlo <- function(
monteCarlo,
N,T,x0,y0,alpha,beta,gamma,delta,noiseOne,noiseTwo)
{
## Set the values of all of the parameters
monteCarlo$N <- N
monteCarlo$T <- T
monteCarlo$x0 <- x0
monteCarlo$y0 <- y0
monteCarlo$alpha <- alpha
monteCarlo$beta <- beta
monteCarlo$gamma <- gamma
monteCarlo$delta <- delta
monteCarlo$noiseOne <- noiseOne
monteCarlo$noiseTwo <- noiseTwo
return(monteCarlo)
}
# Define the method used to initialize the data prior to a run.
prepare <- function(monteCarlo,number)
{
UseMethod("prepare",monteCarlo)
}
prepare.default <- function(monteCarlo,number)
{
## Not sure what to do here. So do nuthn!
return(monteCarlo)
}
prepare.MonteCarlo <- function(monteCarlo,number)
{
## Set the number of trials and initialize the values to
## zeroes.
monteCarlo$xData <- double(number)
monteCarlo$yData <- double(number)
return(monteCarlo)
}
# Define the method to set the value for a single data pair
setValue <- function(monteCarlo,x,y,i)
{
UseMethod("setValue",monteCarlo)
}
setValue.default <- function(monteCarlo,x,y,i)
{
## Not sure what to do so do nuthing
return(monteCarlo)
}
setValue.MonteCarlo <- function(monteCarlo,x,y,i)
{
## Set the number of trials and initialize the values
monteCarlo$xData[i] <- x
monteCarlo$yData[i] <- y
return(monteCarlo)
}
## Define the method to get all of the data as a matrix
getValues <- function(monteCarlo)
{
UseMethod("getValues",monteCarlo)
}
getValues.default <- function(monteCarlo)
{
## Not sure what to do. Return NA
return(NA)
}
getValues.MonteCarlo <- function(monteCarlo)
{
## Set the number of trials and initialize the values
return(matrix(c(monteCarlo$xData,monteCarlo$yData),ncol=2))
}
## Define the function used to perform the Monte Carlo simulations.
simulations <- function(monteCarlo,number,simulation)
{
UseMethod("simulations",monteCarlo)
}
simulations.default <- function(monteCarlo,number,simulation)
{
print("simulations.default not defined!")
return(simulation)
}
simulations.MonteCarlo <- function(monteCarlo,number,simulation)
{
## Set the number of trials and initialize the values
monteCarlo <- prepare(monteCarlo,number)
params <- getParams(monteCarlo) # get the parameters
## Perform the simulations
lupe <- 0
while(lupe < number)
{
lupe <- lupe + 1 # increment the count
## Perform a single simulation.
simulation <- singleSimulation(
simulation,
params[1],params[2],params[3],params[4],params[5],
params[6],params[7],params[8],params[9],params[10])
## Get the last values of the simulation and record them.
values <- getFinalValues(simulation)
monteCarlo <- setValue(monteCarlo,values[1],values[2],lupe)
}
return(monteCarlo)
}
# the methods to plot the results
hist.MonteCarlo <- function(x,main="",...)
{
par(mfrow=c(2,1))
values <- getValues(x)
isValid <- (!is.na(values[,1])) && (!is.infinite(values[,1]))
hist(values[isValid,1],xlab="x",main=main,...)
hist(values[isValid,2],xlab="y",main="",...)
}
| 5,248 | mit |
69e851e89c73fc9f2bb94f65f29138769e4df026 | zedoul/buildEssential | R/buildr.R | #' Get packages
#'
#' @param description_path a file path of DESCRIPTION
#' @importFrom desc desc_get_deps
#' @importFrom yaml yaml.load_file
get_packages <- function(description_path) {
stopifnot(file.exists(description_path))
# Do not consider miniCRAN::getCranDescription function, since this function
# should support both CRAN and local packages.
deps <- desc::desc_get_deps(description_path)
target_deps <- deps[, "type"] %in% c("Imports", "Suggests", "LinkingTo")
unique(deps[target_deps, "package"])
}
#' Setup Your Build Essentials
#'
#' @param description_path to DESCRIPTION
#' @importFrom miniCRAN pkgAvail
#' @importFrom RCurl url.exists
#' @export
setup <- function(description_path,
minicran_path,
cran_url = 'http://cran.us.r-project.org',
package_type = 'source') {
stopifnot(file.exists(description_path))
stopifnot(dir.exists(minicran_path))
if (!grepl("DESCRIPTION", description_path)) {
warning("It seems not a DESCRIPTION file\n")
}
packages_to_install <- get_packages(description_path)
cat("- DESCRIPTION:", description_path, "\n")
cat("- miniCRAN:", minicran_path,"\n")
cat("- CRAN:", cran_url,"\n")
cat("- R library:", paste("\n ", .libPaths()), "\n")
cat("- package type:", package_type,"\n")
cat("Start to setup miniCRAN...\n")
stopifnot(RCurl::url.exists(cran_url))
# Add miniCRAN packages
.mpkgs <- row.names(miniCRAN::pkgAvail(repos = minicran_path,
type = package_type))
.cpkgs <- as.data.frame(available.packages(contriburl = contrib.url(cran_url),
type = package_type))
for (i in 1:length(packages_to_install)) {
package_name <- packages_to_install[i]
cat(paste0("[", i, "/", length(packages_to_install), "]: "),
"Add", package_name, "to miniCRAN ... ")
if (all(!(package_name %in% .mpkgs),
!is.na(as.character(.cpkgs[package_name, "Package"])))) {
cat("\n")
tryCatch({
add_cran_pkg(package_name,
minicran_path,
cran_url,
package_type)
}, error = function(err) {
warning(err)
})
} else {
cat("already exists\n")
}
}
cat(paste("All done. Installed packages in the R library are as follows:\n"))
print(installed.packages()[, c("Package", "Version")])
}
| 2,435 | mit |
8b6cc736b0a4854f6aad575aff47792a3c844fd4 | ChiWang/r-source | src/library/stats/R/arima.R | # File src/library/stats/R/arima.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 2002-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
arima <- function(x, order = c(0L, 0L, 0L),
seasonal = list(order = c(0L, 0L, 0L), period = NA),
xreg = NULL, include.mean = TRUE,
transform.pars = TRUE, fixed = NULL, init = NULL,
method = c("CSS-ML", "ML", "CSS"), n.cond,
SSinit = c("Gardner1980", "Rossignol2011"),
optim.method = "BFGS",
optim.control = list(), kappa = 1e6)
{
"%+%" <- function(a, b) .Call(C_TSconv, a, b)
SSinit <- match.arg(SSinit)
SS.G <- SSinit == "Gardner1980"
## helper of armafn(), called by optim()
upARIMA <- function(mod, phi, theta)
{
p <- length(phi); q <- length(theta)
mod$phi <- phi; mod$theta <- theta
r <- max(p, q + 1L)
if(p > 0) mod$T[1L:p, 1L] <- phi
if(r > 1L)
mod$Pn[1L:r, 1L:r] <-
if(SS.G) .Call(C_getQ0, phi, theta)
else .Call(C_getQ0bis, phi, theta, tol = 0)# tol=0: less checking
else
mod$Pn[1L, 1L] <- if (p > 0) 1/(1 - phi^2) else 1
mod$a[] <- 0
mod
}
arimaSS <- function(y, mod)
{
## next call changes mod components a, P, Pn so beware!
.Call(C_ARIMA_Like, y, mod, 0L, TRUE)
}
## the objective function called by optim()
armafn <- function(p, trans)
{
par <- coef
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, trans)
if(is.null(Z <- tryCatch(upARIMA(mod, trarma[[1L]], trarma[[2L]]),
error = function(e) NULL)))
return(.Machine$double.xmax)# bad parameters giving error, e.g. in solve(.)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
## next call changes Z components a, P, Pn so beware!
res <- .Call(C_ARIMA_Like, x, Z, 0L, FALSE)
s2 <- res[1L]/res[3L]
0.5*(log(s2) + res[2L]/res[3L])
}
armaCSS <- function(p)
{
par <- as.double(fixed)
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, FALSE)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
res <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), FALSE)
0.5 * log(res)
}
arCheck <- function(ar)
{
p <- max(which(c(1, -ar) != 0)) - 1
if(!p) return(TRUE)
all(Mod(polyroot(c(1, -ar[1L:p]))) > 1)
}
maInvert <- function(ma)
{
## polyroot can't cope with leading zero.
q <- length(ma)
q0 <- max(which(c(1,ma) != 0)) - 1L
if(!q0) return(ma)
roots <- polyroot(c(1, ma[1L:q0]))
ind <- Mod(roots) < 1
if(all(!ind)) return(ma)
if(q0 == 1) return(c(1/ma[1L], rep.int(0, q - q0)))
roots[ind] <- 1/roots[ind]
x <- 1
for (r in roots) x <- c(x, 0) - c(0, x)/r
c(Re(x[-1L]), rep.int(0, q - q0))
}
series <- deparse(substitute(x))
if(NCOL(x) > 1L)
stop("only implemented for univariate time series")
method <- match.arg(method)
x <- as.ts(x)
if(!is.numeric(x))
stop("'x' must be numeric")
storage.mode(x) <- "double" # a precaution
dim(x) <- NULL
n <- length(x)
if(!missing(order))
if(!is.numeric(order) || length(order) != 3L || any(order < 0))
stop("'order' must be a non-negative numeric vector of length 3")
if(!missing(seasonal))
if(is.list(seasonal)) {
if(is.null(seasonal$order))
stop("'seasonal' must be a list with component 'order'")
if(!is.numeric(seasonal$order) || length(seasonal$order) != 3L
|| any(seasonal$order < 0L))
stop("'seasonal$order' must be a non-negative numeric vector of length 3")
} else if(is.numeric(order)) {
if(length(order) == 3L) seasonal <- list(order=seasonal)
else ("'seasonal' is of the wrong length")
} else stop("'seasonal' must be a list with component 'order'")
if (is.null(seasonal$period) || is.na(seasonal$period)
||seasonal$period == 0) seasonal$period <- frequency(x)
arma <- as.integer(c(order[-2L], seasonal$order[-2L], seasonal$period,
order[2L], seasonal$order[2L]))
narma <- sum(arma[1L:4L])
xtsp <- tsp(x)
tsp(x) <- NULL
Delta <- 1.
for(i in seq_len(order[2L])) Delta <- Delta %+% c(1., -1.)
for(i in seq_len(seasonal$order[2L]))
Delta <- Delta %+% c(1, rep.int(0, seasonal$period-1), -1)
Delta <- - Delta[-1L]
nd <- order[2L] + seasonal$order[2L]
n.used <- sum(!is.na(x)) - length(Delta)
if (is.null(xreg)) {
ncxreg <- 0L
} else {
nmxreg <- deparse(substitute(xreg))
if (NROW(xreg) != n) stop("lengths of 'x' and 'xreg' do not match")
ncxreg <- NCOL(xreg)
xreg <- as.matrix(xreg)
storage.mode(xreg) <- "double"
}
class(xreg) <- NULL
if (ncxreg > 0L && is.null(colnames(xreg)))
colnames(xreg) <-
if(ncxreg == 1L) nmxreg else paste0(nmxreg, 1L:ncxreg)
if (include.mean && (nd == 0L)) {
xreg <- cbind(intercept = rep(1, n), xreg = xreg)
ncxreg <- ncxreg + 1L
}
if(method == "CSS-ML") {
anyna <- anyNA(x)
if(ncxreg) anyna <- anyna || anyNA(xreg)
if(anyna) method <- "ML"
}
if (method == "CSS" || method == "CSS-ML") {
ncond <- order[2L] + seasonal$order[2L] * seasonal$period
ncond1 <- order[1L] + seasonal$period * seasonal$order[1L]
ncond <- ncond + if(!missing(n.cond)) max(n.cond, ncond1) else ncond1
} else ncond <- 0
if (is.null(fixed)) fixed <- rep(NA_real_, narma + ncxreg)
else if(length(fixed) != narma + ncxreg) stop("wrong length for 'fixed'")
mask <- is.na(fixed)
## if(!any(mask)) stop("all parameters were fixed")
no.optim <- !any(mask)
if(no.optim) transform.pars <- FALSE
if(transform.pars) {
ind <- arma[1L] + arma[2L] + seq_len(arma[3L])
if (any(!mask[seq_len(arma[1L])]) || any(!mask[ind])) {
warning("some AR parameters were fixed: setting transform.pars = FALSE")
transform.pars <- FALSE
}
}
init0 <- rep.int(0, narma)
parscale <- rep(1, narma)
if (ncxreg) {
cn <- colnames(xreg)
orig.xreg <- (ncxreg == 1L) || any(!mask[narma + 1L:ncxreg])
if (!orig.xreg) {
S <- svd(na.omit(xreg))
xreg <- xreg %*% S$v
}
dx <- x
dxreg <- xreg
if(order[2L] > 0L) {
dx <- diff(dx, 1L, order[2L])
dxreg <- diff(dxreg, 1L, order[2L])
}
if(seasonal$period > 1L & seasonal$order[2L] > 0) {
dx <- diff(dx, seasonal$period, seasonal$order[2L])
dxreg <- diff(dxreg, seasonal$period, seasonal$order[2L])
}
fit <- if(length(dx) > ncol(dxreg))
lm(dx ~ dxreg - 1, na.action = na.omit)
else list(rank = 0L)
if(fit$rank == 0L) {
## Degenerate model. Proceed anyway so as not to break old code
fit <- lm(x ~ xreg - 1, na.action = na.omit)
}
isna <- is.na(x) | apply(xreg, 1L, anyNA)
n.used <- sum(!isna) - length(Delta)
init0 <- c(init0, coef(fit))
ses <- summary(fit)$coefficients[, 2L]
parscale <- c(parscale, 10 * ses)
}
if (n.used <= 0) stop("too few non-missing observations")
if(!is.null(init)) {
if(length(init) != length(init0))
stop("'init' is of the wrong length")
if(any(ind <- is.na(init))) init[ind] <- init0[ind]
if(method == "ML") {
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part")
if(transform.pars)
init <- .Call(C_ARIMA_Invtrans, as.double(init), arma)
}
} else init <- init0
coef <- as.double(fixed)
if(!("parscale" %in% names(optim.control)))
optim.control$parscale <- parscale[mask]
if(method == "CSS") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method, hessian = TRUE,
control = optim.control)
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
## set model for predictions
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
if(ncxreg > 0) x <- x - xreg %*% coef[narma + (1L:ncxreg)]
arimaSS(x, mod)
val <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), TRUE)
sigma2 <- val[[1L]]
var <- if(no.optim) numeric() else solve(res$hessian * n.used)
} else {
if(method == "CSS-ML") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method,
hessian = FALSE, control = optim.control)
if(res$convergence == 0) init[mask] <- res$par
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part from CSS")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part from CSS")
ncond <- 0L
}
if(transform.pars) {
init <- .Call(C_ARIMA_Invtrans, init, arma)
## enforce invertibility
if(arma[2L] > 0) {
ind <- arma[1L] + 1L:arma[2L]
init[ind] <- maInvert(init[ind])
}
if(arma[4L] > 0) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
init[ind] <- maInvert(init[ind])
}
}
trarma <- .Call(C_ARIMA_transPars, init, arma, transform.pars)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
res <- if(no.optim)
list(convergence = 0, par = numeric(),
value = armafn(numeric(), as.logical(transform.pars)))
else
optim(init[mask], armafn, method = optim.method,
hessian = TRUE, control = optim.control,
trans = as.logical(transform.pars))
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
if(transform.pars) {
## enforce invertibility
if(arma[2L] > 0L) {
ind <- arma[1L] + 1L:arma[2L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(arma[4L] > 0L) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(any(coef[mask] != res$par)) { # need to re-fit
oldcode <- res$convergence
res <- optim(coef[mask], armafn, method = optim.method,
hessian = TRUE,
control = list(maxit = 0L,
parscale = optim.control$parscale),
trans = TRUE)
res$convergence <- oldcode
coef[mask] <- res$par
}
## do it this way to ensure hessian was computed inside
## stationarity region
A <- .Call(C_ARIMA_Gradtrans, as.double(coef), arma)
A <- A[mask, mask]
var <- crossprod(A, solve(res$hessian * n.used, A))
coef <- .Call(C_ARIMA_undoPars, coef, arma)
} else var <- if(no.optim) numeric() else solve(res$hessian * n.used)
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
val <- if(ncxreg > 0L)
arimaSS(x - xreg %*% coef[narma + (1L:ncxreg)], mod)
else arimaSS(x, mod)
sigma2 <- val[[1L]][1L]/n.used
}
value <- 2 * n.used * res$value + n.used + n.used * log(2 * pi)
aic <- if(method != "CSS") value + 2*sum(mask) + 2 else NA
nm <- NULL
if (arma[1L] > 0L) nm <- c(nm, paste0("ar", 1L:arma[1L]))
if (arma[2L] > 0L) nm <- c(nm, paste0("ma", 1L:arma[2L]))
if (arma[3L] > 0L) nm <- c(nm, paste0("sar", 1L:arma[3L]))
if (arma[4L] > 0L) nm <- c(nm, paste0("sma", 1L:arma[4L]))
if (ncxreg > 0L) {
nm <- c(nm, cn)
if(!orig.xreg) {
ind <- narma + 1L:ncxreg
coef[ind] <- S$v %*% coef[ind]
A <- diag(narma + ncxreg)
A[ind, ind] <- S$v
A <- A[mask, mask]
var <- A %*% var %*% t(A)
}
}
names(coef) <- nm
if(!no.optim) dimnames(var) <- list(nm[mask], nm[mask])
resid <- val[[2L]]
tsp(resid) <- xtsp
class(resid) <- "ts"
structure(list(coef = coef, sigma2 = sigma2, var.coef = var, mask = mask,
loglik = -0.5 * value, aic = aic, arma = arma,
residuals = resid, call = match.call(), series = series,
code = res$convergence, n.cond = ncond, nobs = n.used,
model = mod),
class = "Arima")
}
print.Arima <-
function (x, digits = max(3L, getOption("digits") - 3L), se = TRUE, ...)
{
cat("\nCall:", deparse(x$call, width.cutoff = 75L), "", sep = "\n")
if (length(x$coef)) {
cat("Coefficients:\n")
coef <- round(x$coef, digits = digits)
## use NROW as if all coefs are fixed there are no var.coef's
if (se && NROW(x$var.coef)) {
ses <- rep.int(0, length(coef))
ses[x$mask] <- round(sqrt(diag(x$var.coef)), digits = digits)
coef <- matrix(coef, 1L, dimnames = list(NULL, names(coef)))
coef <- rbind(coef, s.e. = ses)
}
print.default(coef, print.gap = 2)
}
cm <- x$call$method
if(is.null(cm) || cm != "CSS")
cat("\nsigma^2 estimated as ", format(x$sigma2, digits = digits),
": log likelihood = ", format(round(x$loglik, 2L)),
", aic = ", format(round(x$aic, 2L)), "\n", sep = "")
else
cat("\nsigma^2 estimated as ",
format(x$sigma2, digits = digits),
": part log likelihood = ", format(round(x$loglik,2)),
"\n", sep = "")
invisible(x)
}
predict.Arima <-
function (object, n.ahead = 1L, newxreg = NULL, se.fit = TRUE, ...)
{
myNCOL <- function(x) if (is.null(x)) 0 else NCOL(x)
rsd <- object$residuals
xr <- object$call$xreg
xreg <- if (!is.null(xr)) eval.parent(xr) else NULL
ncxreg <- myNCOL(xreg)
if (myNCOL(newxreg) != ncxreg)
stop("'xreg' and 'newxreg' have different numbers of columns")
class(xreg) <- NULL
xtsp <- tsp(rsd)
n <- length(rsd)
arma <- object$arma
coefs <- object$coef
narma <- sum(arma[1L:4L])
if (length(coefs) > narma) {
if (names(coefs)[narma + 1L] == "intercept") {
xreg <- cbind(intercept = rep(1, n), xreg)
newxreg <- cbind(intercept = rep(1, n.ahead), newxreg)
ncxreg <- ncxreg + 1L
}
xm <- if(narma == 0) drop(as.matrix(newxreg) %*% coefs)
else drop(as.matrix(newxreg) %*% coefs[-(1L:narma)])
}
else xm <- 0
if (arma[2L] > 0L) {
ma <- coefs[arma[1L] + 1L:arma[2L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("MA part of model is not invertible")
}
if (arma[4L] > 0L) {
ma <- coefs[sum(arma[1L:3L]) + 1L:arma[4L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("seasonal MA part of model is not invertible")
}
z <- KalmanForecast(n.ahead, object$model)
pred <- ts(z[[1L]] + xm, start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
if (se.fit) {
se <- ts(sqrt(z[[2L]] * object$sigma2),
start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
list(pred=pred, se=se)
}
else pred
}
makeARIMA <- function(phi, theta, Delta, kappa = 1e6,
SSinit = c("Gardner1980", "Rossignol2011"),
tol = .Machine$double.eps)
{
if(anyNA(phi)) warning(gettextf("NAs in '%s'", "phi"), domain=NA)
if(anyNA(theta)) warning(gettextf("NAs in '%s'", "theta"), domain=NA)
p <- length(phi); q <- length(theta)
r <- max(p, q + 1L); d <- length(Delta)
rd <- r + d
Z <- c(1., rep.int(0, r-1L), Delta)
T <- matrix(0., rd, rd)
if(p > 0) T[1L:p, 1L] <- phi
if(r > 1L) {
ind <- 2:r
T[cbind(ind-1L, ind)] <- 1
}
if(d > 0L) {
T[r+1L, ] <- Z
if(d > 1L) {
ind <- r + 2:d
T[cbind(ind, ind-1)] <- 1
}
}
if(q < r - 1L) theta <- c(theta, rep.int(0, r-1L-q))
R <- c(1, theta, rep.int(0, d))
V <- R %o% R
h <- 0.
a <- rep(0., rd)
Pn <- P <- matrix(0., rd, rd)
if(r > 1L)
Pn[1L:r, 1L:r] <- switch(match.arg(SSinit),
"Gardner1980" = .Call(C_getQ0, phi, theta),
"Rossignol2011" = .Call(C_getQ0bis, phi, theta, tol),
stop("invalid 'SSinit'"))
else Pn[1L, 1L] <- if(p > 0) 1/(1 - phi^2) else 1
if(d > 0L) Pn[cbind(r+1L:d, r+1L:d)] <- kappa
list(phi=phi, theta=theta, Delta=Delta, Z=Z, a=a, P=P, T=T, V=V,
h=h, Pn=Pn)
}
coef.Arima <- function (object, ...) object$coef
vcov.Arima <- function (object, ...) object$var.coef
logLik.Arima <- function (object, ...) {
res <- if(is.na(object$aic)) NA
else structure(object$loglik, df = sum(object$mask) + 1, nobs = object$nobs)
class(res) <- "logLik"
res
}
## arima.sim() is in ./ts.R
| 18,997 | gpl-2.0 |
20769a023b830ddef7c35ab7c099b5ac260e9f87 | yufree/democode | plot/mass.R | source("http://bioconductor.org/biocLite.R")
biocLite("mzR")
library(mzR)
all <- openMSfile('./FULL200.CDF')
df <- header(all)
bb <- peaks(all)
aaaa <- sapply(bb,as.data.frame)
oddvals <- seq(1, ncol(aaaa), by=2)
aaaaa <- unlist(aaaa[oddvals])
ccc <- unique(c(aaaaa))
ccc <- ccc[order(ccc)]
# bbb <- sapply(bb, "[",250:700)
# ddd <- unique(c(bbb))
# dddd <- ddd[ddd<700]
time <- df$retentionTime
df2 <- matrix(0, nrow = length(ccc), ncol = length(time))
rownames(df2) <- ccc
colnames(df2) <- time
rm(aaaa)
rm(aaaaa)
rm(oddvals)
rm(df)
rm(all)
gc()
for(i in 1:length(time)){
temp <- bb[[i]]
index <- which(ccc%in%temp[,1])
df2[index,i] <- temp[,2]
}
ddd <- as.integer(ccc)
library(data.table)
dt = data.table(df2)
dt$fac <- ddd
df3 <- dt[,lapply(.SD, sum), by=ddd ]
df3 <- as.matrix(df3)
df7 <- df3[,2000:3000]
heatmap(df7)
library(rARPACK)
df4 <- svds(df3,2)
df5 <- df4$u %*% diag(df4$d) %*% t(df4$v)
rownames(df5) <- ddd
colnames(df5) <- time
df6 <- df5[,2000:3000]
heatmap(df6)
df8 <- as.data.frame(df5)
df9 <- as.data.frame(t(df8))
rownames(df8) <- ddd
colnames(df9) <- time
write.table(df3,'df3.txt')
| 1,185 | mit |
8b6cc736b0a4854f6aad575aff47792a3c844fd4 | jagdeesh109/RRO | R-src/src/library/stats/R/arima.R | # File src/library/stats/R/arima.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 2002-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
arima <- function(x, order = c(0L, 0L, 0L),
seasonal = list(order = c(0L, 0L, 0L), period = NA),
xreg = NULL, include.mean = TRUE,
transform.pars = TRUE, fixed = NULL, init = NULL,
method = c("CSS-ML", "ML", "CSS"), n.cond,
SSinit = c("Gardner1980", "Rossignol2011"),
optim.method = "BFGS",
optim.control = list(), kappa = 1e6)
{
"%+%" <- function(a, b) .Call(C_TSconv, a, b)
SSinit <- match.arg(SSinit)
SS.G <- SSinit == "Gardner1980"
## helper of armafn(), called by optim()
upARIMA <- function(mod, phi, theta)
{
p <- length(phi); q <- length(theta)
mod$phi <- phi; mod$theta <- theta
r <- max(p, q + 1L)
if(p > 0) mod$T[1L:p, 1L] <- phi
if(r > 1L)
mod$Pn[1L:r, 1L:r] <-
if(SS.G) .Call(C_getQ0, phi, theta)
else .Call(C_getQ0bis, phi, theta, tol = 0)# tol=0: less checking
else
mod$Pn[1L, 1L] <- if (p > 0) 1/(1 - phi^2) else 1
mod$a[] <- 0
mod
}
arimaSS <- function(y, mod)
{
## next call changes mod components a, P, Pn so beware!
.Call(C_ARIMA_Like, y, mod, 0L, TRUE)
}
## the objective function called by optim()
armafn <- function(p, trans)
{
par <- coef
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, trans)
if(is.null(Z <- tryCatch(upARIMA(mod, trarma[[1L]], trarma[[2L]]),
error = function(e) NULL)))
return(.Machine$double.xmax)# bad parameters giving error, e.g. in solve(.)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
## next call changes Z components a, P, Pn so beware!
res <- .Call(C_ARIMA_Like, x, Z, 0L, FALSE)
s2 <- res[1L]/res[3L]
0.5*(log(s2) + res[2L]/res[3L])
}
armaCSS <- function(p)
{
par <- as.double(fixed)
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, FALSE)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
res <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), FALSE)
0.5 * log(res)
}
arCheck <- function(ar)
{
p <- max(which(c(1, -ar) != 0)) - 1
if(!p) return(TRUE)
all(Mod(polyroot(c(1, -ar[1L:p]))) > 1)
}
maInvert <- function(ma)
{
## polyroot can't cope with leading zero.
q <- length(ma)
q0 <- max(which(c(1,ma) != 0)) - 1L
if(!q0) return(ma)
roots <- polyroot(c(1, ma[1L:q0]))
ind <- Mod(roots) < 1
if(all(!ind)) return(ma)
if(q0 == 1) return(c(1/ma[1L], rep.int(0, q - q0)))
roots[ind] <- 1/roots[ind]
x <- 1
for (r in roots) x <- c(x, 0) - c(0, x)/r
c(Re(x[-1L]), rep.int(0, q - q0))
}
series <- deparse(substitute(x))
if(NCOL(x) > 1L)
stop("only implemented for univariate time series")
method <- match.arg(method)
x <- as.ts(x)
if(!is.numeric(x))
stop("'x' must be numeric")
storage.mode(x) <- "double" # a precaution
dim(x) <- NULL
n <- length(x)
if(!missing(order))
if(!is.numeric(order) || length(order) != 3L || any(order < 0))
stop("'order' must be a non-negative numeric vector of length 3")
if(!missing(seasonal))
if(is.list(seasonal)) {
if(is.null(seasonal$order))
stop("'seasonal' must be a list with component 'order'")
if(!is.numeric(seasonal$order) || length(seasonal$order) != 3L
|| any(seasonal$order < 0L))
stop("'seasonal$order' must be a non-negative numeric vector of length 3")
} else if(is.numeric(order)) {
if(length(order) == 3L) seasonal <- list(order=seasonal)
else ("'seasonal' is of the wrong length")
} else stop("'seasonal' must be a list with component 'order'")
if (is.null(seasonal$period) || is.na(seasonal$period)
||seasonal$period == 0) seasonal$period <- frequency(x)
arma <- as.integer(c(order[-2L], seasonal$order[-2L], seasonal$period,
order[2L], seasonal$order[2L]))
narma <- sum(arma[1L:4L])
xtsp <- tsp(x)
tsp(x) <- NULL
Delta <- 1.
for(i in seq_len(order[2L])) Delta <- Delta %+% c(1., -1.)
for(i in seq_len(seasonal$order[2L]))
Delta <- Delta %+% c(1, rep.int(0, seasonal$period-1), -1)
Delta <- - Delta[-1L]
nd <- order[2L] + seasonal$order[2L]
n.used <- sum(!is.na(x)) - length(Delta)
if (is.null(xreg)) {
ncxreg <- 0L
} else {
nmxreg <- deparse(substitute(xreg))
if (NROW(xreg) != n) stop("lengths of 'x' and 'xreg' do not match")
ncxreg <- NCOL(xreg)
xreg <- as.matrix(xreg)
storage.mode(xreg) <- "double"
}
class(xreg) <- NULL
if (ncxreg > 0L && is.null(colnames(xreg)))
colnames(xreg) <-
if(ncxreg == 1L) nmxreg else paste0(nmxreg, 1L:ncxreg)
if (include.mean && (nd == 0L)) {
xreg <- cbind(intercept = rep(1, n), xreg = xreg)
ncxreg <- ncxreg + 1L
}
if(method == "CSS-ML") {
anyna <- anyNA(x)
if(ncxreg) anyna <- anyna || anyNA(xreg)
if(anyna) method <- "ML"
}
if (method == "CSS" || method == "CSS-ML") {
ncond <- order[2L] + seasonal$order[2L] * seasonal$period
ncond1 <- order[1L] + seasonal$period * seasonal$order[1L]
ncond <- ncond + if(!missing(n.cond)) max(n.cond, ncond1) else ncond1
} else ncond <- 0
if (is.null(fixed)) fixed <- rep(NA_real_, narma + ncxreg)
else if(length(fixed) != narma + ncxreg) stop("wrong length for 'fixed'")
mask <- is.na(fixed)
## if(!any(mask)) stop("all parameters were fixed")
no.optim <- !any(mask)
if(no.optim) transform.pars <- FALSE
if(transform.pars) {
ind <- arma[1L] + arma[2L] + seq_len(arma[3L])
if (any(!mask[seq_len(arma[1L])]) || any(!mask[ind])) {
warning("some AR parameters were fixed: setting transform.pars = FALSE")
transform.pars <- FALSE
}
}
init0 <- rep.int(0, narma)
parscale <- rep(1, narma)
if (ncxreg) {
cn <- colnames(xreg)
orig.xreg <- (ncxreg == 1L) || any(!mask[narma + 1L:ncxreg])
if (!orig.xreg) {
S <- svd(na.omit(xreg))
xreg <- xreg %*% S$v
}
dx <- x
dxreg <- xreg
if(order[2L] > 0L) {
dx <- diff(dx, 1L, order[2L])
dxreg <- diff(dxreg, 1L, order[2L])
}
if(seasonal$period > 1L & seasonal$order[2L] > 0) {
dx <- diff(dx, seasonal$period, seasonal$order[2L])
dxreg <- diff(dxreg, seasonal$period, seasonal$order[2L])
}
fit <- if(length(dx) > ncol(dxreg))
lm(dx ~ dxreg - 1, na.action = na.omit)
else list(rank = 0L)
if(fit$rank == 0L) {
## Degenerate model. Proceed anyway so as not to break old code
fit <- lm(x ~ xreg - 1, na.action = na.omit)
}
isna <- is.na(x) | apply(xreg, 1L, anyNA)
n.used <- sum(!isna) - length(Delta)
init0 <- c(init0, coef(fit))
ses <- summary(fit)$coefficients[, 2L]
parscale <- c(parscale, 10 * ses)
}
if (n.used <= 0) stop("too few non-missing observations")
if(!is.null(init)) {
if(length(init) != length(init0))
stop("'init' is of the wrong length")
if(any(ind <- is.na(init))) init[ind] <- init0[ind]
if(method == "ML") {
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part")
if(transform.pars)
init <- .Call(C_ARIMA_Invtrans, as.double(init), arma)
}
} else init <- init0
coef <- as.double(fixed)
if(!("parscale" %in% names(optim.control)))
optim.control$parscale <- parscale[mask]
if(method == "CSS") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method, hessian = TRUE,
control = optim.control)
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
## set model for predictions
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
if(ncxreg > 0) x <- x - xreg %*% coef[narma + (1L:ncxreg)]
arimaSS(x, mod)
val <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), TRUE)
sigma2 <- val[[1L]]
var <- if(no.optim) numeric() else solve(res$hessian * n.used)
} else {
if(method == "CSS-ML") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method,
hessian = FALSE, control = optim.control)
if(res$convergence == 0) init[mask] <- res$par
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part from CSS")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part from CSS")
ncond <- 0L
}
if(transform.pars) {
init <- .Call(C_ARIMA_Invtrans, init, arma)
## enforce invertibility
if(arma[2L] > 0) {
ind <- arma[1L] + 1L:arma[2L]
init[ind] <- maInvert(init[ind])
}
if(arma[4L] > 0) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
init[ind] <- maInvert(init[ind])
}
}
trarma <- .Call(C_ARIMA_transPars, init, arma, transform.pars)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
res <- if(no.optim)
list(convergence = 0, par = numeric(),
value = armafn(numeric(), as.logical(transform.pars)))
else
optim(init[mask], armafn, method = optim.method,
hessian = TRUE, control = optim.control,
trans = as.logical(transform.pars))
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
if(transform.pars) {
## enforce invertibility
if(arma[2L] > 0L) {
ind <- arma[1L] + 1L:arma[2L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(arma[4L] > 0L) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(any(coef[mask] != res$par)) { # need to re-fit
oldcode <- res$convergence
res <- optim(coef[mask], armafn, method = optim.method,
hessian = TRUE,
control = list(maxit = 0L,
parscale = optim.control$parscale),
trans = TRUE)
res$convergence <- oldcode
coef[mask] <- res$par
}
## do it this way to ensure hessian was computed inside
## stationarity region
A <- .Call(C_ARIMA_Gradtrans, as.double(coef), arma)
A <- A[mask, mask]
var <- crossprod(A, solve(res$hessian * n.used, A))
coef <- .Call(C_ARIMA_undoPars, coef, arma)
} else var <- if(no.optim) numeric() else solve(res$hessian * n.used)
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
val <- if(ncxreg > 0L)
arimaSS(x - xreg %*% coef[narma + (1L:ncxreg)], mod)
else arimaSS(x, mod)
sigma2 <- val[[1L]][1L]/n.used
}
value <- 2 * n.used * res$value + n.used + n.used * log(2 * pi)
aic <- if(method != "CSS") value + 2*sum(mask) + 2 else NA
nm <- NULL
if (arma[1L] > 0L) nm <- c(nm, paste0("ar", 1L:arma[1L]))
if (arma[2L] > 0L) nm <- c(nm, paste0("ma", 1L:arma[2L]))
if (arma[3L] > 0L) nm <- c(nm, paste0("sar", 1L:arma[3L]))
if (arma[4L] > 0L) nm <- c(nm, paste0("sma", 1L:arma[4L]))
if (ncxreg > 0L) {
nm <- c(nm, cn)
if(!orig.xreg) {
ind <- narma + 1L:ncxreg
coef[ind] <- S$v %*% coef[ind]
A <- diag(narma + ncxreg)
A[ind, ind] <- S$v
A <- A[mask, mask]
var <- A %*% var %*% t(A)
}
}
names(coef) <- nm
if(!no.optim) dimnames(var) <- list(nm[mask], nm[mask])
resid <- val[[2L]]
tsp(resid) <- xtsp
class(resid) <- "ts"
structure(list(coef = coef, sigma2 = sigma2, var.coef = var, mask = mask,
loglik = -0.5 * value, aic = aic, arma = arma,
residuals = resid, call = match.call(), series = series,
code = res$convergence, n.cond = ncond, nobs = n.used,
model = mod),
class = "Arima")
}
print.Arima <-
function (x, digits = max(3L, getOption("digits") - 3L), se = TRUE, ...)
{
cat("\nCall:", deparse(x$call, width.cutoff = 75L), "", sep = "\n")
if (length(x$coef)) {
cat("Coefficients:\n")
coef <- round(x$coef, digits = digits)
## use NROW as if all coefs are fixed there are no var.coef's
if (se && NROW(x$var.coef)) {
ses <- rep.int(0, length(coef))
ses[x$mask] <- round(sqrt(diag(x$var.coef)), digits = digits)
coef <- matrix(coef, 1L, dimnames = list(NULL, names(coef)))
coef <- rbind(coef, s.e. = ses)
}
print.default(coef, print.gap = 2)
}
cm <- x$call$method
if(is.null(cm) || cm != "CSS")
cat("\nsigma^2 estimated as ", format(x$sigma2, digits = digits),
": log likelihood = ", format(round(x$loglik, 2L)),
", aic = ", format(round(x$aic, 2L)), "\n", sep = "")
else
cat("\nsigma^2 estimated as ",
format(x$sigma2, digits = digits),
": part log likelihood = ", format(round(x$loglik,2)),
"\n", sep = "")
invisible(x)
}
predict.Arima <-
function (object, n.ahead = 1L, newxreg = NULL, se.fit = TRUE, ...)
{
myNCOL <- function(x) if (is.null(x)) 0 else NCOL(x)
rsd <- object$residuals
xr <- object$call$xreg
xreg <- if (!is.null(xr)) eval.parent(xr) else NULL
ncxreg <- myNCOL(xreg)
if (myNCOL(newxreg) != ncxreg)
stop("'xreg' and 'newxreg' have different numbers of columns")
class(xreg) <- NULL
xtsp <- tsp(rsd)
n <- length(rsd)
arma <- object$arma
coefs <- object$coef
narma <- sum(arma[1L:4L])
if (length(coefs) > narma) {
if (names(coefs)[narma + 1L] == "intercept") {
xreg <- cbind(intercept = rep(1, n), xreg)
newxreg <- cbind(intercept = rep(1, n.ahead), newxreg)
ncxreg <- ncxreg + 1L
}
xm <- if(narma == 0) drop(as.matrix(newxreg) %*% coefs)
else drop(as.matrix(newxreg) %*% coefs[-(1L:narma)])
}
else xm <- 0
if (arma[2L] > 0L) {
ma <- coefs[arma[1L] + 1L:arma[2L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("MA part of model is not invertible")
}
if (arma[4L] > 0L) {
ma <- coefs[sum(arma[1L:3L]) + 1L:arma[4L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("seasonal MA part of model is not invertible")
}
z <- KalmanForecast(n.ahead, object$model)
pred <- ts(z[[1L]] + xm, start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
if (se.fit) {
se <- ts(sqrt(z[[2L]] * object$sigma2),
start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
list(pred=pred, se=se)
}
else pred
}
makeARIMA <- function(phi, theta, Delta, kappa = 1e6,
SSinit = c("Gardner1980", "Rossignol2011"),
tol = .Machine$double.eps)
{
if(anyNA(phi)) warning(gettextf("NAs in '%s'", "phi"), domain=NA)
if(anyNA(theta)) warning(gettextf("NAs in '%s'", "theta"), domain=NA)
p <- length(phi); q <- length(theta)
r <- max(p, q + 1L); d <- length(Delta)
rd <- r + d
Z <- c(1., rep.int(0, r-1L), Delta)
T <- matrix(0., rd, rd)
if(p > 0) T[1L:p, 1L] <- phi
if(r > 1L) {
ind <- 2:r
T[cbind(ind-1L, ind)] <- 1
}
if(d > 0L) {
T[r+1L, ] <- Z
if(d > 1L) {
ind <- r + 2:d
T[cbind(ind, ind-1)] <- 1
}
}
if(q < r - 1L) theta <- c(theta, rep.int(0, r-1L-q))
R <- c(1, theta, rep.int(0, d))
V <- R %o% R
h <- 0.
a <- rep(0., rd)
Pn <- P <- matrix(0., rd, rd)
if(r > 1L)
Pn[1L:r, 1L:r] <- switch(match.arg(SSinit),
"Gardner1980" = .Call(C_getQ0, phi, theta),
"Rossignol2011" = .Call(C_getQ0bis, phi, theta, tol),
stop("invalid 'SSinit'"))
else Pn[1L, 1L] <- if(p > 0) 1/(1 - phi^2) else 1
if(d > 0L) Pn[cbind(r+1L:d, r+1L:d)] <- kappa
list(phi=phi, theta=theta, Delta=Delta, Z=Z, a=a, P=P, T=T, V=V,
h=h, Pn=Pn)
}
coef.Arima <- function (object, ...) object$coef
vcov.Arima <- function (object, ...) object$var.coef
logLik.Arima <- function (object, ...) {
res <- if(is.na(object$aic)) NA
else structure(object$loglik, df = sum(object$mask) + 1, nobs = object$nobs)
class(res) <- "logLik"
res
}
## arima.sim() is in ./ts.R
| 18,997 | gpl-2.0 |
8b6cc736b0a4854f6aad575aff47792a3c844fd4 | limeng12/r-source | src/library/stats/R/arima.R | # File src/library/stats/R/arima.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 2002-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
arima <- function(x, order = c(0L, 0L, 0L),
seasonal = list(order = c(0L, 0L, 0L), period = NA),
xreg = NULL, include.mean = TRUE,
transform.pars = TRUE, fixed = NULL, init = NULL,
method = c("CSS-ML", "ML", "CSS"), n.cond,
SSinit = c("Gardner1980", "Rossignol2011"),
optim.method = "BFGS",
optim.control = list(), kappa = 1e6)
{
"%+%" <- function(a, b) .Call(C_TSconv, a, b)
SSinit <- match.arg(SSinit)
SS.G <- SSinit == "Gardner1980"
## helper of armafn(), called by optim()
upARIMA <- function(mod, phi, theta)
{
p <- length(phi); q <- length(theta)
mod$phi <- phi; mod$theta <- theta
r <- max(p, q + 1L)
if(p > 0) mod$T[1L:p, 1L] <- phi
if(r > 1L)
mod$Pn[1L:r, 1L:r] <-
if(SS.G) .Call(C_getQ0, phi, theta)
else .Call(C_getQ0bis, phi, theta, tol = 0)# tol=0: less checking
else
mod$Pn[1L, 1L] <- if (p > 0) 1/(1 - phi^2) else 1
mod$a[] <- 0
mod
}
arimaSS <- function(y, mod)
{
## next call changes mod components a, P, Pn so beware!
.Call(C_ARIMA_Like, y, mod, 0L, TRUE)
}
## the objective function called by optim()
armafn <- function(p, trans)
{
par <- coef
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, trans)
if(is.null(Z <- tryCatch(upARIMA(mod, trarma[[1L]], trarma[[2L]]),
error = function(e) NULL)))
return(.Machine$double.xmax)# bad parameters giving error, e.g. in solve(.)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
## next call changes Z components a, P, Pn so beware!
res <- .Call(C_ARIMA_Like, x, Z, 0L, FALSE)
s2 <- res[1L]/res[3L]
0.5*(log(s2) + res[2L]/res[3L])
}
armaCSS <- function(p)
{
par <- as.double(fixed)
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, FALSE)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
res <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), FALSE)
0.5 * log(res)
}
arCheck <- function(ar)
{
p <- max(which(c(1, -ar) != 0)) - 1
if(!p) return(TRUE)
all(Mod(polyroot(c(1, -ar[1L:p]))) > 1)
}
maInvert <- function(ma)
{
## polyroot can't cope with leading zero.
q <- length(ma)
q0 <- max(which(c(1,ma) != 0)) - 1L
if(!q0) return(ma)
roots <- polyroot(c(1, ma[1L:q0]))
ind <- Mod(roots) < 1
if(all(!ind)) return(ma)
if(q0 == 1) return(c(1/ma[1L], rep.int(0, q - q0)))
roots[ind] <- 1/roots[ind]
x <- 1
for (r in roots) x <- c(x, 0) - c(0, x)/r
c(Re(x[-1L]), rep.int(0, q - q0))
}
series <- deparse(substitute(x))
if(NCOL(x) > 1L)
stop("only implemented for univariate time series")
method <- match.arg(method)
x <- as.ts(x)
if(!is.numeric(x))
stop("'x' must be numeric")
storage.mode(x) <- "double" # a precaution
dim(x) <- NULL
n <- length(x)
if(!missing(order))
if(!is.numeric(order) || length(order) != 3L || any(order < 0))
stop("'order' must be a non-negative numeric vector of length 3")
if(!missing(seasonal))
if(is.list(seasonal)) {
if(is.null(seasonal$order))
stop("'seasonal' must be a list with component 'order'")
if(!is.numeric(seasonal$order) || length(seasonal$order) != 3L
|| any(seasonal$order < 0L))
stop("'seasonal$order' must be a non-negative numeric vector of length 3")
} else if(is.numeric(order)) {
if(length(order) == 3L) seasonal <- list(order=seasonal)
else ("'seasonal' is of the wrong length")
} else stop("'seasonal' must be a list with component 'order'")
if (is.null(seasonal$period) || is.na(seasonal$period)
||seasonal$period == 0) seasonal$period <- frequency(x)
arma <- as.integer(c(order[-2L], seasonal$order[-2L], seasonal$period,
order[2L], seasonal$order[2L]))
narma <- sum(arma[1L:4L])
xtsp <- tsp(x)
tsp(x) <- NULL
Delta <- 1.
for(i in seq_len(order[2L])) Delta <- Delta %+% c(1., -1.)
for(i in seq_len(seasonal$order[2L]))
Delta <- Delta %+% c(1, rep.int(0, seasonal$period-1), -1)
Delta <- - Delta[-1L]
nd <- order[2L] + seasonal$order[2L]
n.used <- sum(!is.na(x)) - length(Delta)
if (is.null(xreg)) {
ncxreg <- 0L
} else {
nmxreg <- deparse(substitute(xreg))
if (NROW(xreg) != n) stop("lengths of 'x' and 'xreg' do not match")
ncxreg <- NCOL(xreg)
xreg <- as.matrix(xreg)
storage.mode(xreg) <- "double"
}
class(xreg) <- NULL
if (ncxreg > 0L && is.null(colnames(xreg)))
colnames(xreg) <-
if(ncxreg == 1L) nmxreg else paste0(nmxreg, 1L:ncxreg)
if (include.mean && (nd == 0L)) {
xreg <- cbind(intercept = rep(1, n), xreg = xreg)
ncxreg <- ncxreg + 1L
}
if(method == "CSS-ML") {
anyna <- anyNA(x)
if(ncxreg) anyna <- anyna || anyNA(xreg)
if(anyna) method <- "ML"
}
if (method == "CSS" || method == "CSS-ML") {
ncond <- order[2L] + seasonal$order[2L] * seasonal$period
ncond1 <- order[1L] + seasonal$period * seasonal$order[1L]
ncond <- ncond + if(!missing(n.cond)) max(n.cond, ncond1) else ncond1
} else ncond <- 0
if (is.null(fixed)) fixed <- rep(NA_real_, narma + ncxreg)
else if(length(fixed) != narma + ncxreg) stop("wrong length for 'fixed'")
mask <- is.na(fixed)
## if(!any(mask)) stop("all parameters were fixed")
no.optim <- !any(mask)
if(no.optim) transform.pars <- FALSE
if(transform.pars) {
ind <- arma[1L] + arma[2L] + seq_len(arma[3L])
if (any(!mask[seq_len(arma[1L])]) || any(!mask[ind])) {
warning("some AR parameters were fixed: setting transform.pars = FALSE")
transform.pars <- FALSE
}
}
init0 <- rep.int(0, narma)
parscale <- rep(1, narma)
if (ncxreg) {
cn <- colnames(xreg)
orig.xreg <- (ncxreg == 1L) || any(!mask[narma + 1L:ncxreg])
if (!orig.xreg) {
S <- svd(na.omit(xreg))
xreg <- xreg %*% S$v
}
dx <- x
dxreg <- xreg
if(order[2L] > 0L) {
dx <- diff(dx, 1L, order[2L])
dxreg <- diff(dxreg, 1L, order[2L])
}
if(seasonal$period > 1L & seasonal$order[2L] > 0) {
dx <- diff(dx, seasonal$period, seasonal$order[2L])
dxreg <- diff(dxreg, seasonal$period, seasonal$order[2L])
}
fit <- if(length(dx) > ncol(dxreg))
lm(dx ~ dxreg - 1, na.action = na.omit)
else list(rank = 0L)
if(fit$rank == 0L) {
## Degenerate model. Proceed anyway so as not to break old code
fit <- lm(x ~ xreg - 1, na.action = na.omit)
}
isna <- is.na(x) | apply(xreg, 1L, anyNA)
n.used <- sum(!isna) - length(Delta)
init0 <- c(init0, coef(fit))
ses <- summary(fit)$coefficients[, 2L]
parscale <- c(parscale, 10 * ses)
}
if (n.used <= 0) stop("too few non-missing observations")
if(!is.null(init)) {
if(length(init) != length(init0))
stop("'init' is of the wrong length")
if(any(ind <- is.na(init))) init[ind] <- init0[ind]
if(method == "ML") {
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part")
if(transform.pars)
init <- .Call(C_ARIMA_Invtrans, as.double(init), arma)
}
} else init <- init0
coef <- as.double(fixed)
if(!("parscale" %in% names(optim.control)))
optim.control$parscale <- parscale[mask]
if(method == "CSS") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method, hessian = TRUE,
control = optim.control)
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
## set model for predictions
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
if(ncxreg > 0) x <- x - xreg %*% coef[narma + (1L:ncxreg)]
arimaSS(x, mod)
val <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), TRUE)
sigma2 <- val[[1L]]
var <- if(no.optim) numeric() else solve(res$hessian * n.used)
} else {
if(method == "CSS-ML") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method,
hessian = FALSE, control = optim.control)
if(res$convergence == 0) init[mask] <- res$par
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part from CSS")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part from CSS")
ncond <- 0L
}
if(transform.pars) {
init <- .Call(C_ARIMA_Invtrans, init, arma)
## enforce invertibility
if(arma[2L] > 0) {
ind <- arma[1L] + 1L:arma[2L]
init[ind] <- maInvert(init[ind])
}
if(arma[4L] > 0) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
init[ind] <- maInvert(init[ind])
}
}
trarma <- .Call(C_ARIMA_transPars, init, arma, transform.pars)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
res <- if(no.optim)
list(convergence = 0, par = numeric(),
value = armafn(numeric(), as.logical(transform.pars)))
else
optim(init[mask], armafn, method = optim.method,
hessian = TRUE, control = optim.control,
trans = as.logical(transform.pars))
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
if(transform.pars) {
## enforce invertibility
if(arma[2L] > 0L) {
ind <- arma[1L] + 1L:arma[2L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(arma[4L] > 0L) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(any(coef[mask] != res$par)) { # need to re-fit
oldcode <- res$convergence
res <- optim(coef[mask], armafn, method = optim.method,
hessian = TRUE,
control = list(maxit = 0L,
parscale = optim.control$parscale),
trans = TRUE)
res$convergence <- oldcode
coef[mask] <- res$par
}
## do it this way to ensure hessian was computed inside
## stationarity region
A <- .Call(C_ARIMA_Gradtrans, as.double(coef), arma)
A <- A[mask, mask]
var <- crossprod(A, solve(res$hessian * n.used, A))
coef <- .Call(C_ARIMA_undoPars, coef, arma)
} else var <- if(no.optim) numeric() else solve(res$hessian * n.used)
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
val <- if(ncxreg > 0L)
arimaSS(x - xreg %*% coef[narma + (1L:ncxreg)], mod)
else arimaSS(x, mod)
sigma2 <- val[[1L]][1L]/n.used
}
value <- 2 * n.used * res$value + n.used + n.used * log(2 * pi)
aic <- if(method != "CSS") value + 2*sum(mask) + 2 else NA
nm <- NULL
if (arma[1L] > 0L) nm <- c(nm, paste0("ar", 1L:arma[1L]))
if (arma[2L] > 0L) nm <- c(nm, paste0("ma", 1L:arma[2L]))
if (arma[3L] > 0L) nm <- c(nm, paste0("sar", 1L:arma[3L]))
if (arma[4L] > 0L) nm <- c(nm, paste0("sma", 1L:arma[4L]))
if (ncxreg > 0L) {
nm <- c(nm, cn)
if(!orig.xreg) {
ind <- narma + 1L:ncxreg
coef[ind] <- S$v %*% coef[ind]
A <- diag(narma + ncxreg)
A[ind, ind] <- S$v
A <- A[mask, mask]
var <- A %*% var %*% t(A)
}
}
names(coef) <- nm
if(!no.optim) dimnames(var) <- list(nm[mask], nm[mask])
resid <- val[[2L]]
tsp(resid) <- xtsp
class(resid) <- "ts"
structure(list(coef = coef, sigma2 = sigma2, var.coef = var, mask = mask,
loglik = -0.5 * value, aic = aic, arma = arma,
residuals = resid, call = match.call(), series = series,
code = res$convergence, n.cond = ncond, nobs = n.used,
model = mod),
class = "Arima")
}
print.Arima <-
function (x, digits = max(3L, getOption("digits") - 3L), se = TRUE, ...)
{
cat("\nCall:", deparse(x$call, width.cutoff = 75L), "", sep = "\n")
if (length(x$coef)) {
cat("Coefficients:\n")
coef <- round(x$coef, digits = digits)
## use NROW as if all coefs are fixed there are no var.coef's
if (se && NROW(x$var.coef)) {
ses <- rep.int(0, length(coef))
ses[x$mask] <- round(sqrt(diag(x$var.coef)), digits = digits)
coef <- matrix(coef, 1L, dimnames = list(NULL, names(coef)))
coef <- rbind(coef, s.e. = ses)
}
print.default(coef, print.gap = 2)
}
cm <- x$call$method
if(is.null(cm) || cm != "CSS")
cat("\nsigma^2 estimated as ", format(x$sigma2, digits = digits),
": log likelihood = ", format(round(x$loglik, 2L)),
", aic = ", format(round(x$aic, 2L)), "\n", sep = "")
else
cat("\nsigma^2 estimated as ",
format(x$sigma2, digits = digits),
": part log likelihood = ", format(round(x$loglik,2)),
"\n", sep = "")
invisible(x)
}
predict.Arima <-
function (object, n.ahead = 1L, newxreg = NULL, se.fit = TRUE, ...)
{
myNCOL <- function(x) if (is.null(x)) 0 else NCOL(x)
rsd <- object$residuals
xr <- object$call$xreg
xreg <- if (!is.null(xr)) eval.parent(xr) else NULL
ncxreg <- myNCOL(xreg)
if (myNCOL(newxreg) != ncxreg)
stop("'xreg' and 'newxreg' have different numbers of columns")
class(xreg) <- NULL
xtsp <- tsp(rsd)
n <- length(rsd)
arma <- object$arma
coefs <- object$coef
narma <- sum(arma[1L:4L])
if (length(coefs) > narma) {
if (names(coefs)[narma + 1L] == "intercept") {
xreg <- cbind(intercept = rep(1, n), xreg)
newxreg <- cbind(intercept = rep(1, n.ahead), newxreg)
ncxreg <- ncxreg + 1L
}
xm <- if(narma == 0) drop(as.matrix(newxreg) %*% coefs)
else drop(as.matrix(newxreg) %*% coefs[-(1L:narma)])
}
else xm <- 0
if (arma[2L] > 0L) {
ma <- coefs[arma[1L] + 1L:arma[2L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("MA part of model is not invertible")
}
if (arma[4L] > 0L) {
ma <- coefs[sum(arma[1L:3L]) + 1L:arma[4L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("seasonal MA part of model is not invertible")
}
z <- KalmanForecast(n.ahead, object$model)
pred <- ts(z[[1L]] + xm, start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
if (se.fit) {
se <- ts(sqrt(z[[2L]] * object$sigma2),
start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
list(pred=pred, se=se)
}
else pred
}
makeARIMA <- function(phi, theta, Delta, kappa = 1e6,
SSinit = c("Gardner1980", "Rossignol2011"),
tol = .Machine$double.eps)
{
if(anyNA(phi)) warning(gettextf("NAs in '%s'", "phi"), domain=NA)
if(anyNA(theta)) warning(gettextf("NAs in '%s'", "theta"), domain=NA)
p <- length(phi); q <- length(theta)
r <- max(p, q + 1L); d <- length(Delta)
rd <- r + d
Z <- c(1., rep.int(0, r-1L), Delta)
T <- matrix(0., rd, rd)
if(p > 0) T[1L:p, 1L] <- phi
if(r > 1L) {
ind <- 2:r
T[cbind(ind-1L, ind)] <- 1
}
if(d > 0L) {
T[r+1L, ] <- Z
if(d > 1L) {
ind <- r + 2:d
T[cbind(ind, ind-1)] <- 1
}
}
if(q < r - 1L) theta <- c(theta, rep.int(0, r-1L-q))
R <- c(1, theta, rep.int(0, d))
V <- R %o% R
h <- 0.
a <- rep(0., rd)
Pn <- P <- matrix(0., rd, rd)
if(r > 1L)
Pn[1L:r, 1L:r] <- switch(match.arg(SSinit),
"Gardner1980" = .Call(C_getQ0, phi, theta),
"Rossignol2011" = .Call(C_getQ0bis, phi, theta, tol),
stop("invalid 'SSinit'"))
else Pn[1L, 1L] <- if(p > 0) 1/(1 - phi^2) else 1
if(d > 0L) Pn[cbind(r+1L:d, r+1L:d)] <- kappa
list(phi=phi, theta=theta, Delta=Delta, Z=Z, a=a, P=P, T=T, V=V,
h=h, Pn=Pn)
}
coef.Arima <- function (object, ...) object$coef
vcov.Arima <- function (object, ...) object$var.coef
logLik.Arima <- function (object, ...) {
res <- if(is.na(object$aic)) NA
else structure(object$loglik, df = sum(object$mask) + 1, nobs = object$nobs)
class(res) <- "logLik"
res
}
## arima.sim() is in ./ts.R
| 18,997 | gpl-2.0 |
20769a023b830ddef7c35ab7c099b5ac260e9f87 | jpgroup/democode | plot/mass.R | source("http://bioconductor.org/biocLite.R")
biocLite("mzR")
library(mzR)
all <- openMSfile('./FULL200.CDF')
df <- header(all)
bb <- peaks(all)
aaaa <- sapply(bb,as.data.frame)
oddvals <- seq(1, ncol(aaaa), by=2)
aaaaa <- unlist(aaaa[oddvals])
ccc <- unique(c(aaaaa))
ccc <- ccc[order(ccc)]
# bbb <- sapply(bb, "[",250:700)
# ddd <- unique(c(bbb))
# dddd <- ddd[ddd<700]
time <- df$retentionTime
df2 <- matrix(0, nrow = length(ccc), ncol = length(time))
rownames(df2) <- ccc
colnames(df2) <- time
rm(aaaa)
rm(aaaaa)
rm(oddvals)
rm(df)
rm(all)
gc()
for(i in 1:length(time)){
temp <- bb[[i]]
index <- which(ccc%in%temp[,1])
df2[index,i] <- temp[,2]
}
ddd <- as.integer(ccc)
library(data.table)
dt = data.table(df2)
dt$fac <- ddd
df3 <- dt[,lapply(.SD, sum), by=ddd ]
df3 <- as.matrix(df3)
df7 <- df3[,2000:3000]
heatmap(df7)
library(rARPACK)
df4 <- svds(df3,2)
df5 <- df4$u %*% diag(df4$d) %*% t(df4$v)
rownames(df5) <- ddd
colnames(df5) <- time
df6 <- df5[,2000:3000]
heatmap(df6)
df8 <- as.data.frame(df5)
df9 <- as.data.frame(t(df8))
rownames(df8) <- ddd
colnames(df9) <- time
write.table(df3,'df3.txt')
| 1,185 | mit |
8b6cc736b0a4854f6aad575aff47792a3c844fd4 | hadley/r-source | src/library/stats/R/arima.R | # File src/library/stats/R/arima.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 2002-2015 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
arima <- function(x, order = c(0L, 0L, 0L),
seasonal = list(order = c(0L, 0L, 0L), period = NA),
xreg = NULL, include.mean = TRUE,
transform.pars = TRUE, fixed = NULL, init = NULL,
method = c("CSS-ML", "ML", "CSS"), n.cond,
SSinit = c("Gardner1980", "Rossignol2011"),
optim.method = "BFGS",
optim.control = list(), kappa = 1e6)
{
"%+%" <- function(a, b) .Call(C_TSconv, a, b)
SSinit <- match.arg(SSinit)
SS.G <- SSinit == "Gardner1980"
## helper of armafn(), called by optim()
upARIMA <- function(mod, phi, theta)
{
p <- length(phi); q <- length(theta)
mod$phi <- phi; mod$theta <- theta
r <- max(p, q + 1L)
if(p > 0) mod$T[1L:p, 1L] <- phi
if(r > 1L)
mod$Pn[1L:r, 1L:r] <-
if(SS.G) .Call(C_getQ0, phi, theta)
else .Call(C_getQ0bis, phi, theta, tol = 0)# tol=0: less checking
else
mod$Pn[1L, 1L] <- if (p > 0) 1/(1 - phi^2) else 1
mod$a[] <- 0
mod
}
arimaSS <- function(y, mod)
{
## next call changes mod components a, P, Pn so beware!
.Call(C_ARIMA_Like, y, mod, 0L, TRUE)
}
## the objective function called by optim()
armafn <- function(p, trans)
{
par <- coef
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, trans)
if(is.null(Z <- tryCatch(upARIMA(mod, trarma[[1L]], trarma[[2L]]),
error = function(e) NULL)))
return(.Machine$double.xmax)# bad parameters giving error, e.g. in solve(.)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
## next call changes Z components a, P, Pn so beware!
res <- .Call(C_ARIMA_Like, x, Z, 0L, FALSE)
s2 <- res[1L]/res[3L]
0.5*(log(s2) + res[2L]/res[3L])
}
armaCSS <- function(p)
{
par <- as.double(fixed)
par[mask] <- p
trarma <- .Call(C_ARIMA_transPars, par, arma, FALSE)
if(ncxreg > 0) x <- x - xreg %*% par[narma + (1L:ncxreg)]
res <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), FALSE)
0.5 * log(res)
}
arCheck <- function(ar)
{
p <- max(which(c(1, -ar) != 0)) - 1
if(!p) return(TRUE)
all(Mod(polyroot(c(1, -ar[1L:p]))) > 1)
}
maInvert <- function(ma)
{
## polyroot can't cope with leading zero.
q <- length(ma)
q0 <- max(which(c(1,ma) != 0)) - 1L
if(!q0) return(ma)
roots <- polyroot(c(1, ma[1L:q0]))
ind <- Mod(roots) < 1
if(all(!ind)) return(ma)
if(q0 == 1) return(c(1/ma[1L], rep.int(0, q - q0)))
roots[ind] <- 1/roots[ind]
x <- 1
for (r in roots) x <- c(x, 0) - c(0, x)/r
c(Re(x[-1L]), rep.int(0, q - q0))
}
series <- deparse(substitute(x))
if(NCOL(x) > 1L)
stop("only implemented for univariate time series")
method <- match.arg(method)
x <- as.ts(x)
if(!is.numeric(x))
stop("'x' must be numeric")
storage.mode(x) <- "double" # a precaution
dim(x) <- NULL
n <- length(x)
if(!missing(order))
if(!is.numeric(order) || length(order) != 3L || any(order < 0))
stop("'order' must be a non-negative numeric vector of length 3")
if(!missing(seasonal))
if(is.list(seasonal)) {
if(is.null(seasonal$order))
stop("'seasonal' must be a list with component 'order'")
if(!is.numeric(seasonal$order) || length(seasonal$order) != 3L
|| any(seasonal$order < 0L))
stop("'seasonal$order' must be a non-negative numeric vector of length 3")
} else if(is.numeric(order)) {
if(length(order) == 3L) seasonal <- list(order=seasonal)
else ("'seasonal' is of the wrong length")
} else stop("'seasonal' must be a list with component 'order'")
if (is.null(seasonal$period) || is.na(seasonal$period)
||seasonal$period == 0) seasonal$period <- frequency(x)
arma <- as.integer(c(order[-2L], seasonal$order[-2L], seasonal$period,
order[2L], seasonal$order[2L]))
narma <- sum(arma[1L:4L])
xtsp <- tsp(x)
tsp(x) <- NULL
Delta <- 1.
for(i in seq_len(order[2L])) Delta <- Delta %+% c(1., -1.)
for(i in seq_len(seasonal$order[2L]))
Delta <- Delta %+% c(1, rep.int(0, seasonal$period-1), -1)
Delta <- - Delta[-1L]
nd <- order[2L] + seasonal$order[2L]
n.used <- sum(!is.na(x)) - length(Delta)
if (is.null(xreg)) {
ncxreg <- 0L
} else {
nmxreg <- deparse(substitute(xreg))
if (NROW(xreg) != n) stop("lengths of 'x' and 'xreg' do not match")
ncxreg <- NCOL(xreg)
xreg <- as.matrix(xreg)
storage.mode(xreg) <- "double"
}
class(xreg) <- NULL
if (ncxreg > 0L && is.null(colnames(xreg)))
colnames(xreg) <-
if(ncxreg == 1L) nmxreg else paste0(nmxreg, 1L:ncxreg)
if (include.mean && (nd == 0L)) {
xreg <- cbind(intercept = rep(1, n), xreg = xreg)
ncxreg <- ncxreg + 1L
}
if(method == "CSS-ML") {
anyna <- anyNA(x)
if(ncxreg) anyna <- anyna || anyNA(xreg)
if(anyna) method <- "ML"
}
if (method == "CSS" || method == "CSS-ML") {
ncond <- order[2L] + seasonal$order[2L] * seasonal$period
ncond1 <- order[1L] + seasonal$period * seasonal$order[1L]
ncond <- ncond + if(!missing(n.cond)) max(n.cond, ncond1) else ncond1
} else ncond <- 0
if (is.null(fixed)) fixed <- rep(NA_real_, narma + ncxreg)
else if(length(fixed) != narma + ncxreg) stop("wrong length for 'fixed'")
mask <- is.na(fixed)
## if(!any(mask)) stop("all parameters were fixed")
no.optim <- !any(mask)
if(no.optim) transform.pars <- FALSE
if(transform.pars) {
ind <- arma[1L] + arma[2L] + seq_len(arma[3L])
if (any(!mask[seq_len(arma[1L])]) || any(!mask[ind])) {
warning("some AR parameters were fixed: setting transform.pars = FALSE")
transform.pars <- FALSE
}
}
init0 <- rep.int(0, narma)
parscale <- rep(1, narma)
if (ncxreg) {
cn <- colnames(xreg)
orig.xreg <- (ncxreg == 1L) || any(!mask[narma + 1L:ncxreg])
if (!orig.xreg) {
S <- svd(na.omit(xreg))
xreg <- xreg %*% S$v
}
dx <- x
dxreg <- xreg
if(order[2L] > 0L) {
dx <- diff(dx, 1L, order[2L])
dxreg <- diff(dxreg, 1L, order[2L])
}
if(seasonal$period > 1L & seasonal$order[2L] > 0) {
dx <- diff(dx, seasonal$period, seasonal$order[2L])
dxreg <- diff(dxreg, seasonal$period, seasonal$order[2L])
}
fit <- if(length(dx) > ncol(dxreg))
lm(dx ~ dxreg - 1, na.action = na.omit)
else list(rank = 0L)
if(fit$rank == 0L) {
## Degenerate model. Proceed anyway so as not to break old code
fit <- lm(x ~ xreg - 1, na.action = na.omit)
}
isna <- is.na(x) | apply(xreg, 1L, anyNA)
n.used <- sum(!isna) - length(Delta)
init0 <- c(init0, coef(fit))
ses <- summary(fit)$coefficients[, 2L]
parscale <- c(parscale, 10 * ses)
}
if (n.used <= 0) stop("too few non-missing observations")
if(!is.null(init)) {
if(length(init) != length(init0))
stop("'init' is of the wrong length")
if(any(ind <- is.na(init))) init[ind] <- init0[ind]
if(method == "ML") {
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part")
if(transform.pars)
init <- .Call(C_ARIMA_Invtrans, as.double(init), arma)
}
} else init <- init0
coef <- as.double(fixed)
if(!("parscale" %in% names(optim.control)))
optim.control$parscale <- parscale[mask]
if(method == "CSS") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method, hessian = TRUE,
control = optim.control)
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
## set model for predictions
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
if(ncxreg > 0) x <- x - xreg %*% coef[narma + (1L:ncxreg)]
arimaSS(x, mod)
val <- .Call(C_ARIMA_CSS, x, arma, trarma[[1L]], trarma[[2L]],
as.integer(ncond), TRUE)
sigma2 <- val[[1L]]
var <- if(no.optim) numeric() else solve(res$hessian * n.used)
} else {
if(method == "CSS-ML") {
res <- if(no.optim)
list(convergence=0L, par=numeric(), value=armaCSS(numeric()))
else
optim(init[mask], armaCSS, method = optim.method,
hessian = FALSE, control = optim.control)
if(res$convergence == 0) init[mask] <- res$par
## check stationarity
if(arma[1L] > 0)
if(!arCheck(init[1L:arma[1L]]))
stop("non-stationary AR part from CSS")
if(arma[3L] > 0)
if(!arCheck(init[sum(arma[1L:2L]) + 1L:arma[3L]]))
stop("non-stationary seasonal AR part from CSS")
ncond <- 0L
}
if(transform.pars) {
init <- .Call(C_ARIMA_Invtrans, init, arma)
## enforce invertibility
if(arma[2L] > 0) {
ind <- arma[1L] + 1L:arma[2L]
init[ind] <- maInvert(init[ind])
}
if(arma[4L] > 0) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
init[ind] <- maInvert(init[ind])
}
}
trarma <- .Call(C_ARIMA_transPars, init, arma, transform.pars)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
res <- if(no.optim)
list(convergence = 0, par = numeric(),
value = armafn(numeric(), as.logical(transform.pars)))
else
optim(init[mask], armafn, method = optim.method,
hessian = TRUE, control = optim.control,
trans = as.logical(transform.pars))
if(res$convergence > 0)
warning(gettextf("possible convergence problem: optim gave code = %d",
res$convergence), domain = NA)
coef[mask] <- res$par
if(transform.pars) {
## enforce invertibility
if(arma[2L] > 0L) {
ind <- arma[1L] + 1L:arma[2L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(arma[4L] > 0L) {
ind <- sum(arma[1L:3L]) + 1L:arma[4L]
if(all(mask[ind]))
coef[ind] <- maInvert(coef[ind])
}
if(any(coef[mask] != res$par)) { # need to re-fit
oldcode <- res$convergence
res <- optim(coef[mask], armafn, method = optim.method,
hessian = TRUE,
control = list(maxit = 0L,
parscale = optim.control$parscale),
trans = TRUE)
res$convergence <- oldcode
coef[mask] <- res$par
}
## do it this way to ensure hessian was computed inside
## stationarity region
A <- .Call(C_ARIMA_Gradtrans, as.double(coef), arma)
A <- A[mask, mask]
var <- crossprod(A, solve(res$hessian * n.used, A))
coef <- .Call(C_ARIMA_undoPars, coef, arma)
} else var <- if(no.optim) numeric() else solve(res$hessian * n.used)
trarma <- .Call(C_ARIMA_transPars, coef, arma, FALSE)
mod <- makeARIMA(trarma[[1L]], trarma[[2L]], Delta, kappa, SSinit)
val <- if(ncxreg > 0L)
arimaSS(x - xreg %*% coef[narma + (1L:ncxreg)], mod)
else arimaSS(x, mod)
sigma2 <- val[[1L]][1L]/n.used
}
value <- 2 * n.used * res$value + n.used + n.used * log(2 * pi)
aic <- if(method != "CSS") value + 2*sum(mask) + 2 else NA
nm <- NULL
if (arma[1L] > 0L) nm <- c(nm, paste0("ar", 1L:arma[1L]))
if (arma[2L] > 0L) nm <- c(nm, paste0("ma", 1L:arma[2L]))
if (arma[3L] > 0L) nm <- c(nm, paste0("sar", 1L:arma[3L]))
if (arma[4L] > 0L) nm <- c(nm, paste0("sma", 1L:arma[4L]))
if (ncxreg > 0L) {
nm <- c(nm, cn)
if(!orig.xreg) {
ind <- narma + 1L:ncxreg
coef[ind] <- S$v %*% coef[ind]
A <- diag(narma + ncxreg)
A[ind, ind] <- S$v
A <- A[mask, mask]
var <- A %*% var %*% t(A)
}
}
names(coef) <- nm
if(!no.optim) dimnames(var) <- list(nm[mask], nm[mask])
resid <- val[[2L]]
tsp(resid) <- xtsp
class(resid) <- "ts"
structure(list(coef = coef, sigma2 = sigma2, var.coef = var, mask = mask,
loglik = -0.5 * value, aic = aic, arma = arma,
residuals = resid, call = match.call(), series = series,
code = res$convergence, n.cond = ncond, nobs = n.used,
model = mod),
class = "Arima")
}
print.Arima <-
function (x, digits = max(3L, getOption("digits") - 3L), se = TRUE, ...)
{
cat("\nCall:", deparse(x$call, width.cutoff = 75L), "", sep = "\n")
if (length(x$coef)) {
cat("Coefficients:\n")
coef <- round(x$coef, digits = digits)
## use NROW as if all coefs are fixed there are no var.coef's
if (se && NROW(x$var.coef)) {
ses <- rep.int(0, length(coef))
ses[x$mask] <- round(sqrt(diag(x$var.coef)), digits = digits)
coef <- matrix(coef, 1L, dimnames = list(NULL, names(coef)))
coef <- rbind(coef, s.e. = ses)
}
print.default(coef, print.gap = 2)
}
cm <- x$call$method
if(is.null(cm) || cm != "CSS")
cat("\nsigma^2 estimated as ", format(x$sigma2, digits = digits),
": log likelihood = ", format(round(x$loglik, 2L)),
", aic = ", format(round(x$aic, 2L)), "\n", sep = "")
else
cat("\nsigma^2 estimated as ",
format(x$sigma2, digits = digits),
": part log likelihood = ", format(round(x$loglik,2)),
"\n", sep = "")
invisible(x)
}
predict.Arima <-
function (object, n.ahead = 1L, newxreg = NULL, se.fit = TRUE, ...)
{
myNCOL <- function(x) if (is.null(x)) 0 else NCOL(x)
rsd <- object$residuals
xr <- object$call$xreg
xreg <- if (!is.null(xr)) eval.parent(xr) else NULL
ncxreg <- myNCOL(xreg)
if (myNCOL(newxreg) != ncxreg)
stop("'xreg' and 'newxreg' have different numbers of columns")
class(xreg) <- NULL
xtsp <- tsp(rsd)
n <- length(rsd)
arma <- object$arma
coefs <- object$coef
narma <- sum(arma[1L:4L])
if (length(coefs) > narma) {
if (names(coefs)[narma + 1L] == "intercept") {
xreg <- cbind(intercept = rep(1, n), xreg)
newxreg <- cbind(intercept = rep(1, n.ahead), newxreg)
ncxreg <- ncxreg + 1L
}
xm <- if(narma == 0) drop(as.matrix(newxreg) %*% coefs)
else drop(as.matrix(newxreg) %*% coefs[-(1L:narma)])
}
else xm <- 0
if (arma[2L] > 0L) {
ma <- coefs[arma[1L] + 1L:arma[2L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("MA part of model is not invertible")
}
if (arma[4L] > 0L) {
ma <- coefs[sum(arma[1L:3L]) + 1L:arma[4L]]
if (any(Mod(polyroot(c(1, ma))) < 1))
warning("seasonal MA part of model is not invertible")
}
z <- KalmanForecast(n.ahead, object$model)
pred <- ts(z[[1L]] + xm, start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
if (se.fit) {
se <- ts(sqrt(z[[2L]] * object$sigma2),
start = xtsp[2L] + deltat(rsd),
frequency = xtsp[3L])
list(pred=pred, se=se)
}
else pred
}
makeARIMA <- function(phi, theta, Delta, kappa = 1e6,
SSinit = c("Gardner1980", "Rossignol2011"),
tol = .Machine$double.eps)
{
if(anyNA(phi)) warning(gettextf("NAs in '%s'", "phi"), domain=NA)
if(anyNA(theta)) warning(gettextf("NAs in '%s'", "theta"), domain=NA)
p <- length(phi); q <- length(theta)
r <- max(p, q + 1L); d <- length(Delta)
rd <- r + d
Z <- c(1., rep.int(0, r-1L), Delta)
T <- matrix(0., rd, rd)
if(p > 0) T[1L:p, 1L] <- phi
if(r > 1L) {
ind <- 2:r
T[cbind(ind-1L, ind)] <- 1
}
if(d > 0L) {
T[r+1L, ] <- Z
if(d > 1L) {
ind <- r + 2:d
T[cbind(ind, ind-1)] <- 1
}
}
if(q < r - 1L) theta <- c(theta, rep.int(0, r-1L-q))
R <- c(1, theta, rep.int(0, d))
V <- R %o% R
h <- 0.
a <- rep(0., rd)
Pn <- P <- matrix(0., rd, rd)
if(r > 1L)
Pn[1L:r, 1L:r] <- switch(match.arg(SSinit),
"Gardner1980" = .Call(C_getQ0, phi, theta),
"Rossignol2011" = .Call(C_getQ0bis, phi, theta, tol),
stop("invalid 'SSinit'"))
else Pn[1L, 1L] <- if(p > 0) 1/(1 - phi^2) else 1
if(d > 0L) Pn[cbind(r+1L:d, r+1L:d)] <- kappa
list(phi=phi, theta=theta, Delta=Delta, Z=Z, a=a, P=P, T=T, V=V,
h=h, Pn=Pn)
}
coef.Arima <- function (object, ...) object$coef
vcov.Arima <- function (object, ...) object$var.coef
logLik.Arima <- function (object, ...) {
res <- if(is.na(object$aic)) NA
else structure(object$loglik, df = sum(object$mask) + 1, nobs = object$nobs)
class(res) <- "logLik"
res
}
## arima.sim() is in ./ts.R
| 18,997 | gpl-2.0 |
b15aa7e269200525f9dc0ef7208abbcc33437c48 | Monash-RNA-Systems-Biology-Laboratory/patseqers | adele/dev_timepoint_app/ui.R | library(shiny)
source("helper.R")
shinyUI(pageWithSidebar(
headerPanel(h3("Changes in gene expression after C.albicans infection")),
sidebarPanel(
selectInput("select.experiment", label = "Select experiment",
choices = list.dirs(full.names=F, recursive =F),
selected = list.dirs(full.names=F, recursive =F)[2]),
radioButtons("org.sel", "Choose which organism to plot",
choices= list("C.albicans" = 1, "Mouse" = 2),
selected = 1),
helpText("This is important when changing the dataset to look at. If one of the samples is not
displaying on the graph, check that the correct organism is selected"),
numericInput("num.filter", label = "Minimum count filter", value = 25, min = 0),
uiOutput("gene.select"),
radioButtons("plot.view", "View just the gene or the gene of interest in comparison with expression changes
overall",
choices= list("Only gene of interest" = 1, "Comparison with overall expression change" = 2),
selected = 1),
downloadButton("download.eps", label = "Download eps file")
),
mainPanel(
plotOutput("time.plot", width = "600px", height = "600px", click = "plot_click"),
#tableOutput("test.table"),
verbatimTextOutput("info.txt"),
helpText("You can click on the graph to get X & Y values. Note that the only measured data occur at the given
timepoints."),
helpText("Gene expressions refers to the mean of the normalised counts for the replicates of either condition
at each time point. The varistran package was used to normalised the raw counts.")
)
))
| 1,746 | gpl-2.0 |
feb4037aed2d3e385f3bda1e581e3c02520b9716 | uzh/ezRun | script/debug-bwa.R | # p2578
setwd("/scratch/gtan/debug/quickDebug")
library(ezRun)
param = list()
param[['cores']] = '8'
param[['ram']] = '16'
param[['scratch']] = '100'
param[['node']] = ''
param[['process_mode']] = 'SAMPLE'
param[['samples']] = 'C8102P,C5401P,C6701P,C8902-7P,C8301P'
param[['refBuild']] = 'Homo_sapiens/Ensembl/GRCh38.p10/Annotation/Release_91-2018-02-26'
param[['paired']] = 'true'
param[['algorithm']] = 'mem'
param[['cmdOptions']] = ''
param[['trimAdapter']] = 'false'
param[['trimLeft']] = '0'
param[['trimRight']] = '0'
param[['minTailQuality']] = '0'
param[['minAvgQuality']] = '0'
param[['minReadLength']] = '20'
param[['specialOptions']] = ''
param[['mail']] = 'ge.tan@fgcz.ethz.ch'
param[['dataRoot']] = '/srv/gstore/projects'
param[['resultDir']] = 'p2578/BWA_26020_2018-11-10--19-40-49'
output = list()
output[['Name']] = 'C8902-7P'
output[['BAM [File]']] = 'p2578/BWA_26020_2018-11-10--19-40-49/C8902-7P.bam'
output[['BAI [File]']] = 'p2578/BWA_26020_2018-11-10--19-40-49/C8902-7P.bam.bai'
output[['IGV Starter [Link]']] = 'p2578/BWA_26020_2018-11-10--19-40-49/C8902-7P-igv.jnlp'
output[['Species']] = 'Homo sapiens (human)'
output[['refBuild']] = 'Homo_sapiens/Ensembl/GRCh38.p10/Annotation/Release_91-2018-02-26'
output[['paired']] = 'true'
output[['refFeatureFile']] = ''
output[['strandMode']] = ''
output[['Read Count']] = '27955406'
output[['IGV Starter [File]']] = 'p2578/BWA_26020_2018-11-10--19-40-49/C8902-7P-igv.jnlp'
output[['IGV Session [File]']] = 'p2578/BWA_26020_2018-11-10--19-40-49/C8902-7P-igv.xml'
output[['PreprocessingLog [File]']] = 'p2578/BWA_26020_2018-11-10--19-40-49/C8902-7P_preprocessing.log'
output[['Condition [Factor]']] = ''
output[['Sample Id [B-Fabric]']] = 'bfs_164963'
output[['FragmentSize [Characteristic]']] = '0'
output[['SampleConc [Characteristic]']] = '85'
output[['Tube [Characteristic]']] = 'p2578_3757/16'
output[['Index [Characteristic]']] = 'TCCTGAGC'
output[['PlatePosition [Characteristic]']] = 'usermade_caf_'
output[['LibConc_100_800bp [Characteristic]']] = '0'
output[['LibConc_qPCR [Characteristic]']] = '0'
output[['InputAmount [Characteristic]']] = '0'
input = list()
input[['Name']] = 'C8902-7P'
input[['Condition']] = ''
input[['Read1']] = 'p2578/HiSeq2500_20171103_RUN396_o3757/20171103.B-C8902-7P_R1.fastq.gz'
input[['Read2']] = 'p2578/HiSeq2500_20171103_RUN396_o3757/20171103.B-C8902-7P_R2.fastq.gz'
input[['Species']] = 'Homo sapiens (human)'
input[['FragmentSize']] = '0'
input[['SampleConc']] = '85'
input[['Tube']] = 'p2578_3757/16'
input[['Index']] = 'TCCTGAGC'
input[['PlatePosition']] = 'usermade_caf_'
input[['LibConc_100_800bp']] = '0'
input[['LibConc_qPCR']] = '0'
input[['Adapter1']] = 'NA'
input[['Adapter2']] = 'NA'
input[['strandMode']] = 'NA'
input[['LibraryPrepKit']] = 'ATAC '
input[['EnrichmentMethod']] = 'None'
input[['InputAmount']] = '0'
input[['Read Count']] = '27955406'
input[['Sample Id']] = 'bfs_164963'
EzAppBWA$new()$run(input=input, output=output, param=param)
| 2,964 | gpl-3.0 |
a539dc810a518e58cf0368de42f854b0040d960e | debarros/CSIAccountabilityWkbk | crdc.R | #crdc.R
# This prepares data for the CRDC (Civil Rights Data Collection)
BEDSday = BedsDate(year = 2017)
EndDay = schoolYear(x = "end", y = BEDSday) - 29
# Read in the last Student Lite file from the relevant year
StudentLiteExtract = read.csv(file = file.choose(), header = F, stringsAsFactors = F)
colnames(StudentLiteExtract) = GetNiceColumnNames("STUDENT LITE", templates)[1:ncol(StudentLiteExtract)]
# Read in the last School Enrollment file from the relevant year
EnrollExt = read.csv(file = file.choose(), header = F, stringsAsFactors = F)
colnames(EnrollExt) = GetNiceColumnNames("SCHOOL ENTRY EXIT", templates)
EnrollExt$SCHOOLEXITDATEENROLLMENTEXITDATE[EnrollExt$SCHOOLEXITDATEENROLLMENTEXITDATE == ""] = as.character(schoolYear(x = "end", y = BEDSday))
# Calculate the reporting race
StudentLiteExtract$CRDC_Race = NA
StudentLiteExtract$CRDC_Race = StudentLiteExtract$ETHNICCODESHORTRACE1CODE
StudentLiteExtract$CRDC_Race[StudentLiteExtract$RACE2CODE != ""] = "M"
StudentLiteExtract$CRDC_Race[StudentLiteExtract$HISPANICETHNICITYINDICATORHISPANICLATINOETHNICITYINDICATOR == "Y"] = "H"
summary(factor(StudentLiteExtract$CRDC_Race))
# Add the PowerSchool ID
StudentLiteExtract$ID = powerschoolraw$ID[match(x = StudentLiteExtract$STUDENTIDSCHOOLDISTRICTSTUDENTID, table = powerschoolraw$student_number)]
# In the enrollment file, mark whether the enrollment covered BEDS day
EnrollExt$IncludesBEDS = T
EnrollExt$IncludesBEDS[EnrollExt$SCHOOLENTRYDATEENROLLMENTENTRYDATE > BEDSday] = F
EnrollExt$IncludesBEDS[EnrollExt$SCHOOLEXITDATEENROLLMENTEXITDATE < BEDSday] = F
# In the demographics file, mark whether the student was enrolled on beds day
StudentLiteExtract$EnrolledOnBEDS = F
for(i in 1:nrow(StudentLiteExtract)){
thisOne = sum(EnrollExt$IncludesBEDS & EnrollExt$STUDENTIDSCHOOLDISTRICTSTUDENTID == StudentLiteExtract$STUDENTIDSCHOOLDISTRICTSTUDENTID[i])
StudentLiteExtract$EnrolledOnBEDS[i] = thisOne > 0
}
# In the demographics file, add info about IDEA, 504, and ELL
StudentLiteExtract$IDEA = Workbook$IEP[match(StudentLiteExtract$STUDENTIDSCHOOLDISTRICTSTUDENTID, Workbook$`Local.ID.(optional)`)]
StudentLiteExtract[is.na(StudentLiteExtract$IDEA),4:8]
StudentLiteExtract$Five04 = Workbook$`504.plan?`[match(StudentLiteExtract$STUDENTIDSCHOOLDISTRICTSTUDENTID, Workbook$`Local.ID.(optional)`)]
StudentLiteExtract[is.na(StudentLiteExtract$Five04),4:8]
StudentLiteExtract$ELL = Workbook$LEP.Status[match(StudentLiteExtract$STUDENTIDSCHOOLDISTRICTSTUDENTID, Workbook$`Local.ID.(optional)`)]
StudentLiteExtract[is.na(StudentLiteExtract$ELL),4:8]
# Get an enrollment file from the following year
NextLite = read.csv(file = file.choose(), header = F, stringsAsFactors = F)
colnames(NextLite) = GetNiceColumnNames("STUDENT LITE", templates)[1:ncol(NextLite)]
# Add in subsequent grade levels to StudentLiteExtract
StudentLiteExtract$NextGrade = NextLite$CURRENTGRADELEVELGRADELEVEL[match(StudentLiteExtract$STUDENTIDSCHOOLDISTRICTSTUDENTID, NextLite$STUDENTIDSCHOOLDISTRICTSTUDENTID)]
# Mark whether students were retained
StudentLiteExtract$Retained = StudentLiteExtract$CURRENTGRADELEVELGRADELEVEL == StudentLiteExtract$NextGrade
StudentLiteExtract$Retained[is.na(StudentLiteExtract$Retained)] = F
# Mark whether the students were still enrolled as of the abritrary end of year date
StudentLiteExtract$EndActive = T
for(i in 1:nrow(StudentLiteExtract)){
curID = StudentLiteExtract$STUDENTIDSCHOOLDISTRICTSTUDENTID[i]
endDates = EnrollExt$SCHOOLEXITDATEENROLLMENTEXITDATE[EnrollExt$STUDENTIDSCHOOLDISTRICTSTUDENTID == curID]
endDates = betterMax(endDates)
StudentLiteExtract$EndActive[i] = endDates > EndDay
}
outfile = StudentLiteExtract[,c("STUDENTIDSCHOOLDISTRICTSTUDENTID", "LASTNAMESHORTSTUDENTSLASTNAME", "FIRSTNAMESHORTSTUDENTSFIRSTNAME",
"CURRENTGRADELEVELGRADELEVEL", "ID","GENDERCODEGENDERDESCRIPTION", "CRDC_Race", "IDEA", "Five04",
"ELL", "Retained", "EnrolledOnBEDS", "EndActive")]
write.csv(x = outfile, file = paste0(OutFolder, "crdc student table.csv"))
| 4,087 | gpl-3.0 |
03eda555017ea544b50990732518082e4c9fd391 | shraddhapai/Rutils | makeColorLighter.R | # #########################################
# makeColorLighter.R
# An R utility to make an RGB colour lighter. Useful in situations where we want to
# plot a lighter confidence interval band across the mean trendline of a dataseries.
#
# Requires: hsv2rgb.R also part of Rcolorutil
#
# Example usage:
# > pal <- c("#D7191C","#FDAE61","#ABDDA4","#2B83BA")
# > y <- sapply(pal,.makeLighter)
# > par(mfrow=c(2,1))
# > barplot(1:length(pal), col=pal,main="Original colours") ## original colours
# > barplot(1:length(y),col=y, main="Lighter colours")
# Copyright 2014 Shraddha Pai <Shraddha.Pai@camh.ca>
#
# LICENSE:
# makeColorLighter.R is part of "Rcolorutil"
# "Rcolorutil" is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "Rcolorutil" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
makeLighter <- function(rgb_col) {
source("hsv2rgb.R")
x <- c(
strtoi(paste("0x",substr(rgb_col,2,3),sep="")),
strtoi(paste("0x",substr(rgb_col,4,5),sep="")),
strtoi(paste("0x",substr(rgb_col,6,7),sep=""))
);
#cat(sprintf("RGB = { %i, %i, %i} \n", x[1],x[2],x[3]))
y <- rgb2hsv(x[1],x[2],x[3],max=255)
#cat("HSV : Before")
#print(y)
y[3] <- 1 # min(1,y[3] + 0.4);
y[2] <- y[2]/3 # increase brightness, lower saturation
#cat("HSV: After")
#print(y)
z <- hsv2rgb(y[1]*360, y[2],y[3])
z
### (RGB) returns lighter RGB object
} | 1,728 | gpl-3.0 |
bb3fa6fc52d8070e846e3b8d52c7afa20ea8bc38 | rigreco/Runge-Kutta-Simulation | AppleX/GRAPHICS/DXBLOCK.R | null | 1,024 | gpl-3.0 |
80434261e701646071de7e79686f821a526936a7 | andeek/conclique-gibbs-sampler | simulation/big-network/DSATUR.R | getNColors <- function(x) {
if (is(x, "matrix")) {
adj_mat <- x
diag(adj_mat) <- FALSE
} else if (is(x, "SpatialPolygons")) {
adj_mat <- getAM(x)
} else {
stop("x must be an adjacency matrix or a SpatialPolygons* object.")
}
nColors <- length(unique(dsatur(adj_mat)))
return(nColors)
}
## Get neighboring verteces
getNeighbors <- function(adj_mat, node_index) {
nb <- which(adj_mat[node_index,])
nb <- nb[!(nb==node_index)]
return(nb)
}
## Count occurrences of color in given nodes
getAmountColor <- function(node_indexes, color_number, coloring) {
node_colors <- coloring[node_indexes]
return(sum(node_colors==color_number))
}
## Greedy DSATUR graph coloring algorithm
# Reference: D.Brelaz (1979) - New Methods to color the vertices of a graph. Communications of the ACM: 22(4).
# Ported from Python implementation by Andrei Novikov (pyclustering@yandex.ru)
# Under GNU Public license
dsatur <- function(x, coloring=NULL) {
if (is.null(coloring)) { # Set up vertex coloring from scratch
color_counter = 1
adj_mat <- x
diag(adj_mat) <- FALSE
degrees = list()
saturation_degrees = rep(0, nrow(adj_mat))
coloring = rep(0, nrow(adj_mat))
uncolored_vertices = 1:nrow(adj_mat)
index_maximum_degree = 0
maximum_degree = 0
for (index_node in 1:nrow(adj_mat)) {
# Fill degree of nodes in the input graph
degrees[[length(degrees)+1]] <- c(sum(adj_mat[index_node,]), index_node)
# And find node with maximal degree at the same time.
if ((degrees[[index_node]])[1] > maximum_degree) {
maximum_degree <- (degrees[[index_node]])[1]
index_maximum_degree <- index_node
}
}
# Update saturation
neighbors = getNeighbors(adj_mat, index_maximum_degree)
for (index_neighbor in neighbors){
saturation_degrees[index_neighbor] <- saturation_degrees[index_neighbor] + 1
}
# Coloring the first node
coloring[index_maximum_degree] = color_counter
uncolored_vertices <- uncolored_vertices[-index_maximum_degree]
} else { # Set up vertex coloring given input coloring
color_counter = max(coloring)
adj_mat <- x
diag(adj_mat) <- FALSE
degrees = list()
saturation_degrees = rep(0, nrow(adj_mat))
uncolored_vertices = 1:nrow(adj_mat)
uncolored_vertices <- uncolored_vertices[coloring==0]
# Fill degree of nodes in the input graph and update saturation
for (index_node in 1:nrow(adj_mat)) {
# Set degree
degrees[[length(degrees)+1]] <- c(sum(adj_mat[index_node,]), index_node)
# Set saturation
index_neighbors <- getNeighbors(adj_mat, index_node)
index_saturation <- 0
for (number_color in 1:color_counter) {
if (getAmountColor(index_neighbors, number_color, coloring) > 0) {
index_saturation <- index_saturation + 1
}
}
saturation_degrees[index_node] <- index_saturation
}
}
# Color the remaining verteces
while(length(uncolored_vertices) > 0) {
# Get maximum saturation degree
maximum_satur_degree = -1
for (index in uncolored_vertices) {
if (saturation_degrees[index] > maximum_satur_degree) {
maximum_satur_degree = saturation_degrees[index]
}
}
# Get list of indexes with maximum saturation degree
indexes_maximum_satur_degree <- c()
for (index in uncolored_vertices) {
if (saturation_degrees[index] == maximum_satur_degree) {
indexes_maximum_satur_degree <- c(indexes_maximum_satur_degree, index)
}
}
coloring_index = indexes_maximum_satur_degree[1]
if (length(indexes_maximum_satur_degree) > 1) { # There are more then one node with maximum saturation
# Find node with maximum degree
maximum_degree = -1
for (index in indexes_maximum_satur_degree) {
degree <- (degrees[[index]])[1]
node_index <- (degrees[[index]])[2]
if (degree > maximum_degree) {
coloring_index = node_index
maximum_degree = degree
}
}
}
# Coloring
node_index_neighbors = getNeighbors(adj_mat, coloring_index)
for (number_color in 1:(color_counter)) {
if (getAmountColor(node_index_neighbors, number_color, coloring) == 0) {
coloring[coloring_index] = number_color
break;
}
}
# If it has not been colored then
if (coloring[coloring_index] == 0) {
color_counter <- color_counter + 1 # Add new color
coloring[coloring_index] = color_counter
}
# Remove node from uncolored set
uncolored_vertices <- uncolored_vertices[!(uncolored_vertices==coloring_index)]
# Update degree of saturation
for (index_neighbor in node_index_neighbors) {
subneighbors = getNeighbors(adj_mat, index_neighbor)
if (getAmountColor(subneighbors, coloring[coloring_index], coloring) == 1) {
saturation_degrees[index_neighbor] <- saturation_degrees[index_neighbor] + 1
}
}
}
# Return final coloring
return(coloring)
} | 5,249 | gpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | malachig/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | dufeiyu/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | genome/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | gatoravi/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b68c3075f75d204aa26d808b5ce7a6d5879ec0f9 | cran/eqtl | R/drop.peakfeat.R | #####################################################################
#
# drop.peakfeat.R
#
# copyright (c) 2008-3, Ahmid A Khalili
#
# last modified Jul, 2008
# first written Mar, 2008
# Licensed under the GNU General Public License version 2 (June, 1991)
#
# Part of the R/eqtl package
# Contains: drop.peakfeat
#
######################################################################
######################################################################
#
# drop.peakfeat: Erase chosen peak features informations from
# a \code{peak} object.
#
######################################################################
`drop.peakfeat` <-
function(peak,feat)
{
require(qtl)
if ( !all(attr(peak,'class',exact=TRUE) %in% c('peak','list')) )
stop("Input should have class \"peak\".")
if ( missing(peak) )
stop("Argument 'peak' unspecified.")
if ( !is.vector(feat) || !any(feat %in% attr(peak,'features',exact=TRUE)) )
stop("Argument 'peak' misspecified: Expecting a vector containing the 'peak' features attributes (",attr(peak,'features',exact=TRUE),")")
for (i in 1:length(peak)){
for (y in 1:length(peak[[i]])){
if (!is.na(peak[[i]][y])){
for ( f in feat ){
if ( f %in% names(peak[[i]][[y]] ) ){
col <- grep(f,names(peak[[i]][[y]]))
peak[[i]][[y]] <- peak[[i]][[y]][-col]
} else print("feat is not defined in peak")
}
}
}
}
attributes(peak)$features <- attributes(peak)$features[ ! attr(peak,'features') %in% feat ]
try(return(peak),silent=FALSE)
}
| 1,534 | gpl-2.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | ahwagner/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | kkrysiak/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | jasonwalker80/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | indraniel/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | ebelter/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | ernfrid/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | susannasiebert/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | chrisamiller/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | tmooney/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | iferguson90/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | mkiwala/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | brummett/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | gschang/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
14bf36635e0da79c482c2de97a23c12c59e71fd4 | rahuldhote/jpmml-evaluator | pmml-rattle/src/test/R/ClusteringTest.R | library("amap")
library("pmml")
library("rattle")
irisData = readCsv("csv/Iris.csv")
irisData$Species = NULL
writeIris = function(clusters, affinities, file){
result = data.frame("predictedValue" = clusters)
if(!is.null(affinities)){
result = data.frame(result, affinities)
}
writeCsv(result, file)
}
generateHierarchicalClusteringIris = function(){
hcluster = hcluster(irisData)
centers = centers.hclust(irisData, hcluster, 10)
saveXML(pmml(hcluster, centers = centers), "pmml/HierarchicalClusteringIris.pmml")
clusters = predict(hcluster, irisData, irisData, 10)
writeIris(clusters, NULL, "csv/HierarchicalClusteringIris.csv")
}
generateKMeansIris = function(){
set.seed(42)
kmeans = kmeans(irisData, 3)
kmeansPmml = pmml(kmeans)
clusteringModelNode = kmeansPmml["ClusteringModel"][[1]]
outputNode = xmlNode("Output")
outputFieldNodes = list(
xmlNode(name = "OutputField", attrs = list(name = "predictedValue", feature = "predictedValue")),
xmlNode(name = "OutputField", attrs = list(name = "affinity_1", feature = "affinity", value = "1")),
xmlNode(name = "OutputField", attrs = list(name = "affinity_2", feature = "affinity", value = "2")),
xmlNode(name = "OutputField", attrs = list(name = "affinity_3", feature = "affinity", value = "3"))
)
outputNode = append.xmlNode(outputNode, outputFieldNodes)
clusteringModelNode["Output"][[1]] = outputNode
kmeansPmml["ClusteringModel"][[1]] = clusteringModelNode
saveXML(kmeansPmml, "pmml/KMeansIris.pmml")
affinity = function(center){
return (colSums(apply(irisData, 1, function(x) { ((x - center) ^ 2) })))
}
clusters = predict(kmeans, irisData)
affinities = data.frame("affinity_1" = affinity(kmeans$centers[1, ]), "affinity_2" = affinity(kmeans$centers[2, ]), "affinity_3" = affinity(kmeans$centers[3, ]))
writeIris(clusters, affinities, "csv/KMeansIris.csv")
}
generateHierarchicalClusteringIris()
generateKMeansIris() | 1,926 | agpl-3.0 |
b513e4ecbab9a10ddfc2b80b99d94caf2b8c7c1c | apregier/genome | lib/perl/Genome/Model/Tools/R/CallR.pm.U_test.R | ########################################
######## Draw Copy Number Graph ########
########################################
utest <- function(name1=NULL,name2=NULL,nameAll=NULL,normalize=0,normalizedFile=NULL){
############################ read data ##############################
data1 <- read.table(name1,sep='\t')
data2 <- read.table(name2,sep='\t')
if(normalize == 1){
a <- median(data2$V3)
data1_ <- 2*data1$V3/a
data2_ <- 2*data2$V3/a
}
else{
data1_ <- data1$V1
data2_ <- data2$V1
}
u <- sprintf("%e",wilcox.test(data1_,data2_,alternative=c("two.sided"))$p.value)
write.table(u,file=nameAll,append=FALSE,row.names=FALSE,col.names=FALSE,eol="")
if(normalize == 1){
write.table(data1_,file=normalizedFile,append=FALSE,row.names=FALSE,col.names=FALSE)
}
}
| 789 | lgpl-3.0 |
96d8ad61b7d641c7edd8bf06f0401ce63acc774b | USGS-VIZLAB/gages-through-ages | scripts/fetch/getData_siteRecords.R | #Run this independent of vizlab - will require 8 + hours of DL time
#generates csv output currently retrieved from sciencebase
#one part is parellized
#get # years of record for every site
library(dplyr)
library(stringr)
library(data.table)
library(dataRetrieval)
library(lubridate)
library(parallel)
source('scripts/fetch/helperFunctions.R')
dir.create('chunks',showWarnings = FALSE)
#loop over HUC regions
hucs <- str_pad(1:21, width = 2, pad = "0")
setAccess("internal")
allDF <- data.frame()
for(h in hucs) {
#dataRet call
hucDF <- select(readNWISdata(huc = h, hasDataTypeCd = "dv", service = "site",
seriesCatalogOutput = TRUE, parameterCd = "00060"),
agency_cd, site_no, site_tp_cd, station_nm, dec_lat_va, dec_long_va,
huc_cd, data_type_cd, parm_cd, begin_date, end_date, count_nu)
hucDF <- filter(hucDF, data_type_cd == "dv", parm_cd == "00060")
hucDF <- mutate(hucDF, begin_date = as.character(begin_date),
end_date=as.character(end_date))
#filter & append
allDF <- bind_rows(allDF, hucDF)
}
#convert to dates
allDF <- mutate(allDF, begin_date = as.Date(begin_date),
end_date = as.Date(end_date),
dayRange = as.numeric(end_date - begin_date),
intDaysRecord = count_nu - 2, #assuming record exists on first and last days
intDaysAll = end_date - begin_date - 1,
diff = intDaysAll - intDaysRecord)
# x <- allDF[c(duplicated(allDF$site_no),duplicated(allDF$site_no, fromLast = TRUE)),]
#get sites where ratio days/ years is off
saveRDS(allDF, "allDF.rds")
allDF <- readRDS("allDF.rds")
completeSiteDF <- filter(allDF,
diff <= 10,
count_nu > 355) #can't have less than 355 days in any non-start/end year
incompleteSiteDF <- filter(allDF, diff > 10)
incompleteSiteDF <- filter(incompleteSiteDF, count_nu >= 355)
#want long df with row for each site/year
#For complete sites, check if start and end years should be counted
longOK <- checkCompleteYears(completeSiteDF)
longOK <- longOK[!duplicated(longOK),]
# fwrite(longOK, file = 'sitesYearsComplete_latLon_355_NEW.csv')
# #need to deal with multiple lines for some sites in allDF
# allDF_oneLineSite <- allDF[!duplicated(allDF$site_no),]
# longOK_join <- left_join(longOK, allDF_oneLineSite, by = "site_no")
#
# fwrite(longOK_join, file = 'sitesYearsComplete_latLon_355.csv')
#get data for incomplete sites
#check what sites I already downloaded - save 6000 some sites
dlSites <- checkDownloadedSites('old_chunks')
toDownloadDF <- filter(incompleteSiteDF, !site_no %in% dlSites)
#don't repeat - there are sites with multiple measurement points
toDownloadSites <- unique(toDownloadDF$site_no)
#chunk by 200 sites
reqBks <- seq(1,length(toDownloadSites),by=200)
for(i in reqBks) {
sites <- na.omit(toDownloadSites[i:(i+199)])
print(paste('starting', i))
all_sites <- tryCatch({
currentSitesDF <- readNWISdv(siteNumber = sites, parameterCd = "00060")
fwrite(currentSitesDF, file = file.path('chunks', paste0('newChunk', i)))
},
error=function(cond) {
message("***************Errored on",i,"***********\n")
return(all_sites)
})
print(paste("Finished sites", i, "through", i+199))
}
completeFromIncomplete <- data.frame()
for(i in files){
complete <- yearsFunc(i)
completeFromIncomplete <- bind_rows(completeFromIncomplete, complete)
}
cl <- makeCluster(4)
files <- list.files(c('chunks','old_chunks'), full.names = TRUE)
allIncompleteYears <- clusterApply(cl, fun = yearsFunc, x = files)
stopCluster(cl)
#reassemble to DF, write
allIncompleteDF <- do.call("bind_rows", allIncompleteYears)
incomplete_lat_lon <- left_join(allIncompleteDF, allDF_oneLineSite, by = "site_no")
fwrite(incomplete_lat_lon, file = "incomplete_lat_lon_355.csv")
allSites_355 <- bind_rows(longOK_join, incomplete_lat_lon)
fwrite(allSites_355, file = "allSitesYears_355.csv", quote = TRUE)
| 4,016 | cc0-1.0 |
5c16d744f245ac0a371890b1a8c6e58cae77e480 | sebastianueckert/mirt | R/createGroup.R | #' Create a user defined group-level object with correct generic functions
#'
#' Initializes the proper S4 class and methods necessary for mirt functions to use in estimation for defining
#' customized group-level functions. To use the defined objects pass to the
#' \code{mirt(..., customGroup = OBJECT)} command, and ensure that the class parameters are properly labeled.
#'
#' @aliases createGroup
#' @param par a named vector of the starting values for the parameters
#' @param est a logical vector indicating which parameters should be freely estimated by default
#' @param den the probability density function given the Theta/ability values.
#' First input contains a vector of all the defined parameters and the second input
#' must be a matrix called \code{Theta}.
#' Function also must return a \code{numeric} vector object corresponding to the associated densities for
#' each row in the \code{Theta} input
#' @param nfact number of factors required for the model. E.g., for unidimensional models with only one
#' dimension of integration \code{nfact = 1}
#' @param standardize logical; use standardization of the quadrature table method proposed by
#' Woods and Thissen (2006)? If TRUE, the logical elements named \code{'MEAN_1'} and \code{'COV_11'}
#' can be included in the parameter vector, and when these values are set to FALSE in the \code{est}
#' input the E-table will be standardized to these fixed values (e.g.,
#' \code{par <- c(a1=1, d=0, MEAN_1=0, COV_11=1)} with \code{est <- c(TRUE, TRUE, FALSE, FALSE)} will
#' standardize the E-table to have a 0 mean and unit variance)
#' @param gr gradient function (vector of first derivatives) of the log-likelihood used in
#' estimation. The function must be of the form \code{gr(x, Theta)}, where \code{x} is the object
#' defined by \code{createGroup()} and \code{Theta} is a matrix of latent trait parameters
#' @param hss Hessian function (matrix of second derivatives) of the log-likelihood used in
#' estimation. If not specified a numeric approximation will be used.
#' The input is identical to the \code{gr} argument
#' @param gen a function used when \code{GenRandomPars = TRUE} is passed to the estimation function
#' to generate random starting values. Function must be of the form \code{function(object) ...}
#' and must return a vector with properties equivalent to the \code{par} object. If NULL,
#' parameters will remain at the defined starting values by default
#' @param lbound optional vector indicating the lower bounds of the parameters. If not specified
#' then the bounds will be set to -Inf
#' @param ubound optional vector indicating the lower bounds of the parameters. If not specified
#' then the bounds will be set to Inf
#' @param derivType if the \code{gr} or \code{hss} terms are not specified this type will be used to
#' obtain them numerically. Default is 'Richardson'
#'
#' @author Phil Chalmers \email{rphilip.chalmers@@gmail.com}
#' @references
#' Chalmers, R., P. (2012). mirt: A Multidimensional Item Response Theory
#' Package for the R Environment. \emph{Journal of Statistical Software, 48}(6), 1-29.
#' \doi{10.18637/jss.v048.i06}
#' @keywords createGroup
#' @export createGroup
#' @examples
#'
#' # normal density example, N(mu, sigma^2)
#' den <- function(obj, Theta) dnorm(Theta, obj@par[1], sqrt(obj@par[2]))
#' par <- c(mu = 0, sigma2 = .5)
#' est <- c(FALSE, TRUE)
#' lbound <- c(-Inf, 0)
#' grp <- createGroup(par, est, den, nfact = 1, lbound=lbound)
#'
#' dat <- expand.table(LSAT6)
#' mod <- mirt(dat, 1, 'Rasch')
#' modcustom <- mirt(dat, 1, 'Rasch', customGroup=grp)
#'
#' coef(mod)
#' coef(modcustom)
#'
createGroup <- function(par, est, den, nfact, standardize = FALSE,
gr = NULL, hss = NULL, gen = NULL,
lbound = NULL, ubound = NULL, derivType = 'Richardson'){
if(missing(par)) missingMsg('par')
if(missing(est)) missingMsg('est')
if(missing(den)) missingMsg('den')
if(missing(nfact)) missingMsg('nfact')
safe_den <- function(obj, Theta){
d <- obj@den(obj, Theta)
d <- ifelse(d < 1e-300, 1e-300, d)
as.vector(d)
}
names(est) <- names(par)
dummyfun <- function(...) return(NULL)
usegr <- usehss <- TRUE
if(is.null(gr)){
gr <- dummyfun
usegr <- FALSE
}
if(is.null(hss)){
hss <- dummyfun
usehss <- FALSE
}
if(is.null(gen))
gen <- function(object) object@par
lbound <- if(!is.null(lbound)) lbound else rep(-Inf, length(par))
ubound <- if(!is.null(ubound)) ubound else rep(Inf, length(par))
Nans <- rep(NaN,length(par))
if(any(names(par) %in% c('g', 'u', 'PI')) || any(names(est) %in% c('g', 'u', 'PI')))
stop('Parameter names cannot be \'g\', \'u\', or \'PI\', please change.', call.=FALSE)
return(new('GroupPars', par=par, est=est, parnames=names(est), dentype='custom',
den=den, safe_den=safe_den, nfact=as.integer(nfact), standardize=standardize,
itemclass= -999L, any.prior=FALSE, lbound=lbound, usegr=usegr, usehss=usehss,
ubound=ubound, gr=gr, hss=hss, gen=gen, derivType=derivType,
prior.type=rep(0L, length(par)), prior_1=Nans, prior_2=Nans))
}
| 5,276 | gpl-2.0 |
b56af4788dcf2b74e723ba98e729eb182424722c | himalayajung/neuroR | data/caret_feature_selection.R | require(caret)
require(randomForest)
require(doParallel)
require(foreach)
source('../Rfunctions.R') # R helper functions
## Settings
set.seed(123)
# brain morphometric properties
features=list('_volume','_area','_thickness$','_thicknessstd','_foldind','_meancurv','_gauscurv','_all')
# features=list('_volume')
scale_by_TIV=1 # should you normalize the volume features by TIV??
data_splitting=0
## Data
df=read.csv('../FS.csv',row.names=1)
df_imputed=read.csv('../FS_imputed.csv',row.names=1)
df=df[df$sex==1,] # select only male subjects
df_imputed=df_imputed[df_imputed$sex==1,]
df$ADOS=df_imputed$ADOS
df$control=as.factor(df$control)
df=df[df$site=="KKI",] # choose individual site if necessary
df_imputed=df_imputed[df_imputed$site=="KKI",]
phenotype=df_imputed[c("age","sex","VIQ","PIQ","FIQ")]
## setup parallel backend to use multiple processors
n_cluster=22
cl = makeCluster(n_cluster)
registerDoParallel(cl)
# Loop along the features
for (feature in features){
print(paste('************************',feature,'************************'))
# creating feature specific data
if (feature=="_all") {
d = df[11:length(df)]
col_vol = grep("_volume",names(d))
TIV = df$EstimatedTotalIntraCranialVol_volume
d[col_vol] = as.data.frame(sweep(data.matrix(d[col_vol]),1,TIV,'/'))
d=d[-length(d)]
} else{
d=df[grep(feature,names(df))] }
if (feature=="_volume" && scale_by_TIV==1){
TIV = d$EstimatedTotalIntraCranialVol_volume
d = as.data.frame(sweep(data.matrix(d),1,TIV,'/'))
d = d[-length(d)]}
## Preprocessing
# Remove near zero variance predictors
nzv = nearZeroVar(d)
if(length(nzv)>0){
d = d[, -nzv]}
# Identifying Correlated Predictors
highlyCor = findCorrelation(cor(d),cutoff=0.99)
if(length(highlyCor)>0){
d = d[,-highlyCor]}
# Linear Dependencies --commented for individual sites
# comboInfo = findLinearCombos(as.matrix(d))
# print(comboInfo)
# if(length(comboInfo$remove)>0){
# d=d[,-comboInfo$remove]}
## Data Splitting
if(data_splitting==1){
trainIndex = createDataPartition(as.factor(df$control), p=0.8,list = FALSE) # as.factor makes sure that the subjects are evenly sampled from each ADOS score
Train = d[trainIndex,]
Test = d[-trainIndex,]
yTrain = df$control[trainIndex]
yTest = df$control[-trainIndex]} else{
Train = d
yTrain = df$control
}
# define the control using a random forest selection function
control = rfeControl(functions=rfFuncs, method = "LOOCV")
# run the RFE algorithm
rfProfile = rfe(Train, yTrain, sizes=10, rfeControl=control)
# save(rfProfile,file=paste0('feature_selection/kki/','RFE',feature,'_male.Rdata'))
# list the chosen features
print(predictors(rfProfile))
cat('Resamples')
print(head(rfProfile$resample))
# plot the results
p = plot(rfProfile, type=c("g", "o"),main=feature)
print(p)
}
stopCluster(cl)
| 2,948 | mit |
781a41d935d1390df1c2d68220a4af4f06c75a08 | BlueGranite/Microsoft-R-Resources | Microsoft-R-Open/WebinarPurl.R | library(rmarkdown)
library(knitr)
knit("BlueGranite R Demo.Rmd")
purl("BlueGranite R Demo.Rmd")
| 104 | mit |
bfeeebd655e70fd0f00a71cfc1b665663812b367 | mizumot/chi | ui.R | library(shiny)
library(shinyAce)
shinyUI(bootstrapPage(
headerPanel("Chi-square Test"),
########## Adding loading message #########
tags$head(tags$style(type="text/css", "
#loadmessage {
position: fixed;
top: 0px;
left: 0px;
width: 100%;
padding: 10px 0px 10px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #CCFF66;
z-index: 105;
}
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("Loading...",id="loadmessage")),
########## Added up untill here ##########
mainPanel(
tabsetPanel(position = "left", selected = "Test of Independence (Tabulated data)",
tabPanel("Test of goodness of fit (Raw data)",
h2("Test of goodness of fit (Raw data)"),
h4("One nominal variable"),
p('Note: Input values must be separated by tabs. Copy and paste from Excel/Numbers.'),
p(HTML("<b><div style='background-color:#FADDF2;border:1px solid black;'>Your data needs to have the header (variable names) in the first row. Missing values should be indicated by a period (.) or NA.</div></b>")),
aceEditor("text1", value="L1\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nJapanese\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nThai\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese\nChinese", mode="r", theme="cobalt"),
br(),
h3("Contingency table"),
verbatimTextOutput("data1.out"),
br(),
h3("Test result"),
verbatimTextOutput("test1.out"),
br(),
h3("Plot"),
plotOutput("pPlot1"),
br(),
br(),
strong('R session info'),
verbatimTextOutput("info1.out")
),
tabPanel("Test of goodness of fit (Tabulated data)",
h2("Test of goodness of fit (Tabulated data)"),
h4("One nominal variable"),
p('Note: Input values must be separated by tabs. Copy and paste from Excel/Numbers.'),
p(HTML("<b><div style='background-color:#FADDF2;border:1px solid black;'>Your data needs to have the header (variable names) in the first row. Missing values should be indicated by a period (.) or NA.</div></b>")),
aceEditor("text2", value="Japanese\tThai\tChinese\n18\t24\t48", mode="r", theme="cobalt"),
br(),
h3("Contingency table"),
verbatimTextOutput("data2.out"),
br(),
h3("Test result"),
verbatimTextOutput("test2.out"),
br(),
h3("Plot"),
plotOutput("pPlot2"),
br(),
br(),
strong('R session info'),
verbatimTextOutput("info2.out")
),
tabPanel("Test of Independence (Raw data)",
h2("Test of Independence (Raw data)"),
h4("Two or more than two nominal variables"),
p('Note: Input values must be separated by tabs. Copy and paste from Excel/Numbers.'),
p(HTML("<b><div style='background-color:#FADDF2;border:1px solid black;'>Your data needs to have the header (variable names) in the first row. Missing values should be indicated by a period (.) or NA.</div></b>")),
aceEditor("text3", value="Sex\tEffect\nM\tNo\nW\tNo\nW\tNo\nM\tNo\nM\tYes\nM\tYes\nM\tYes\nM\tNo\nW\tYes\nM\tNo\nW\tYes\nM\tNo\nM\tYes\nM\tNo\nM\tNo\nM\tYes\nW\tYes\nW\tYes\nW\tYes\nW\tYes\nW\tYes\nM\tYes\nM\tNo\nM\tNo\nM\tYes\nM\tYes\nW\tYes\nM\tNo\nM\tYes\nW\tYes\nM\tNo\nM\tNo\nW\tYes\nW\tYes\nW\tYes\nW\tYes\nM\tNo\nW\tNo\nW\tYes\nM\tYes\nW\tYes\nM\tNo\nM\tYes\nW\tYes\nM\tYes\nW\tYes\nM\tYes\nM\tNo\nM\tNo\nW\tNo\nW\tNo\nM\tYes\nW\tNo\nM\tYes\nW\tYes\nW\tYes\nM\tNo\nM\tNo\nM\tYes\nW\tYes\nM\tNo\nW\tYes\nW\tYes\nM\tYes\nW\tNo\nW\tYes\nM\tNo\nW\tYes\nW\tNo\nM\tYes",mode="r", theme="cobalt"),
br(),
h3("Contingency table"),
verbatimTextOutput("data3.out"),
br(),
h3("Test result"),
verbatimTextOutput("test3.out"),
br(),
h3("Plot"),
plotOutput("pPlot3"),
br(),
plotOutput("mPlot3", height = "550px"),
br(),
br(),
strong('R session info'),
verbatimTextOutput("info3.out")
),
tabPanel("Test of Independence (Tabulated data)",
h2("Test of Independence (Tabulated data)"),
h4("Two or more than two nominal variables"),
p('Note: Input values must be separated by tabs. Copy and paste from Excel/Numbers.'),
p(HTML("<b><div style='background-color:#FADDF2;border:1px solid black;'>Your data needs to have the header (variable names) in the first row. Missing values should be indicated by a period (.) or NA.</div></b>")),
aceEditor("text4", value="\tNo\tYes\nM\t20\t18\nW\t8\t24", mode="r", theme="cobalt"),
br(),
h3("Contingency table"),
verbatimTextOutput("data4.out"),
br(),
h3("Test result"),
verbatimTextOutput("test4.out"),
br(),
h3("Plot"),
plotOutput("pPlot4"),
br(),
plotOutput("mPlot4", height = "550px"),
br(),
br(),
strong('R session info'),
verbatimTextOutput("info4.out")
),
tabPanel("About",
strong('Note'),
p('This web application is developed with',
a("Shiny.", href="http://www.rstudio.com/shiny/", target="_blank"),
''),
br(),
strong('List of Packages Used'), br(),
code('library(shiny)'),br(),
code('library(shinyAce)'),br(),
code('library(pwr)'),br(),
code('library(vcd)'),br(),
br(),
strong('Code'),
p('Source code for this application is based on',
a('"The handbook of Research in Foreign Language Learning and Teaching" (Takeuchi & Mizumoto, 2012).', href='http://mizumot.com/handbook/', target="_blank")),
p('The code for this web application is available at',
a('GitHub.', href='https://github.com/mizumot/chi', target="_blank")),
p('If you want to run this code on your computer (in a local R session), run the code below:',
br(),
code('library(shiny)'),br(),
code('runGitHub("chi","mizumot")')
),
p('I referred to',
a("js-STAR", href="http://www.kisnet.or.jp/nappa/software/star/", target="_blank"),
'for some parts of the codes. I would like to thank the authors of js-STAR, the very fast and excellent online software.'),
br(),
strong('Citation in Publications'),
p('Mizumoto, A. (2015). Langtest (Version 1.0) [Web application]. Retrieved from http://langtest.jp'),
br(),
strong('Article'),
p('Mizumoto, A., & Plonsky, L. (2015).', a("R as a lingua franca: Advantages of using R for quantitative research in applied linguistics.", href='http://applij.oxfordjournals.org/content/early/2015/06/24/applin.amv025.abstract', target="_blank"), em('Applied Linguistics,'), 'Advance online publication. doi:10.1093/applin/amv025'),
br(),
strong('Recommended'),
p('To learn more about R, I suggest this excellent and free e-book (pdf),',
a("A Guide to Doing Statistics in Second Language Research Using R,", href="http://cw.routledge.com/textbooks/9780805861853/guide-to-R.asp", target="_blank"),
'written by Dr. Jenifer Larson-Hall.'),
p('Also, if you are a cool Mac user and want to use R with GUI,',
a("MacR", href="https://sites.google.com/site/casualmacr/", target="_blank"),
'is defenitely the way to go!'),
br(),
strong('Author'),
p(a("Atsushi MIZUMOTO,", href="http://mizumot.com", target="_blank"),' Ph.D.',br(),
'Associate Professor of Applied Linguistics',br(),
'Faculty of Foreign Language Studies /',br(),
'Graduate School of Foreign Language Education and Research,',br(),
'Kansai University, Osaka, Japan'),
br(),
a(img(src="http://i.creativecommons.org/p/mark/1.0/80x15.png"), target="_blank", href="http://creativecommons.org/publicdomain/mark/1.0/"),
p(br())
)
)
)
)) | 9,215 | unlicense |
96d8ad61b7d641c7edd8bf06f0401ce63acc774b | jread-usgs/gages-through-ages | scripts/fetch/getData_siteRecords.R | #Run this independent of vizlab - will require 8 + hours of DL time
#generates csv output currently retrieved from sciencebase
#one part is parellized
#get # years of record for every site
library(dplyr)
library(stringr)
library(data.table)
library(dataRetrieval)
library(lubridate)
library(parallel)
source('scripts/fetch/helperFunctions.R')
dir.create('chunks',showWarnings = FALSE)
#loop over HUC regions
hucs <- str_pad(1:21, width = 2, pad = "0")
setAccess("internal")
allDF <- data.frame()
for(h in hucs) {
#dataRet call
hucDF <- select(readNWISdata(huc = h, hasDataTypeCd = "dv", service = "site",
seriesCatalogOutput = TRUE, parameterCd = "00060"),
agency_cd, site_no, site_tp_cd, station_nm, dec_lat_va, dec_long_va,
huc_cd, data_type_cd, parm_cd, begin_date, end_date, count_nu)
hucDF <- filter(hucDF, data_type_cd == "dv", parm_cd == "00060")
hucDF <- mutate(hucDF, begin_date = as.character(begin_date),
end_date=as.character(end_date))
#filter & append
allDF <- bind_rows(allDF, hucDF)
}
#convert to dates
allDF <- mutate(allDF, begin_date = as.Date(begin_date),
end_date = as.Date(end_date),
dayRange = as.numeric(end_date - begin_date),
intDaysRecord = count_nu - 2, #assuming record exists on first and last days
intDaysAll = end_date - begin_date - 1,
diff = intDaysAll - intDaysRecord)
# x <- allDF[c(duplicated(allDF$site_no),duplicated(allDF$site_no, fromLast = TRUE)),]
#get sites where ratio days/ years is off
saveRDS(allDF, "allDF.rds")
allDF <- readRDS("allDF.rds")
completeSiteDF <- filter(allDF,
diff <= 10,
count_nu > 355) #can't have less than 355 days in any non-start/end year
incompleteSiteDF <- filter(allDF, diff > 10)
incompleteSiteDF <- filter(incompleteSiteDF, count_nu >= 355)
#want long df with row for each site/year
#For complete sites, check if start and end years should be counted
longOK <- checkCompleteYears(completeSiteDF)
longOK <- longOK[!duplicated(longOK),]
# fwrite(longOK, file = 'sitesYearsComplete_latLon_355_NEW.csv')
# #need to deal with multiple lines for some sites in allDF
# allDF_oneLineSite <- allDF[!duplicated(allDF$site_no),]
# longOK_join <- left_join(longOK, allDF_oneLineSite, by = "site_no")
#
# fwrite(longOK_join, file = 'sitesYearsComplete_latLon_355.csv')
#get data for incomplete sites
#check what sites I already downloaded - save 6000 some sites
dlSites <- checkDownloadedSites('old_chunks')
toDownloadDF <- filter(incompleteSiteDF, !site_no %in% dlSites)
#don't repeat - there are sites with multiple measurement points
toDownloadSites <- unique(toDownloadDF$site_no)
#chunk by 200 sites
reqBks <- seq(1,length(toDownloadSites),by=200)
for(i in reqBks) {
sites <- na.omit(toDownloadSites[i:(i+199)])
print(paste('starting', i))
all_sites <- tryCatch({
currentSitesDF <- readNWISdv(siteNumber = sites, parameterCd = "00060")
fwrite(currentSitesDF, file = file.path('chunks', paste0('newChunk', i)))
},
error=function(cond) {
message("***************Errored on",i,"***********\n")
return(all_sites)
})
print(paste("Finished sites", i, "through", i+199))
}
completeFromIncomplete <- data.frame()
for(i in files){
complete <- yearsFunc(i)
completeFromIncomplete <- bind_rows(completeFromIncomplete, complete)
}
cl <- makeCluster(4)
files <- list.files(c('chunks','old_chunks'), full.names = TRUE)
allIncompleteYears <- clusterApply(cl, fun = yearsFunc, x = files)
stopCluster(cl)
#reassemble to DF, write
allIncompleteDF <- do.call("bind_rows", allIncompleteYears)
incomplete_lat_lon <- left_join(allIncompleteDF, allDF_oneLineSite, by = "site_no")
fwrite(incomplete_lat_lon, file = "incomplete_lat_lon_355.csv")
allSites_355 <- bind_rows(longOK_join, incomplete_lat_lon)
fwrite(allSites_355, file = "allSitesYears_355.csv", quote = TRUE)
| 4,016 | cc0-1.0 |
3b0b6b06cf20d57423b736735ba3567ea7038eae | flaneuse/llamar | website_examples.R | library(dplyr)
library(haven)
library(RColorBrewer)
# data sets ---------------------------------------------------------------
dhs_orig = read_dta('~/Documents/USAID/Rwanda/processeddata/DHS_2010_2015_analysis.dta')
dhs = removeAttributes(dhs_orig) %>%
filter(year == 2014)
dhs = factorize(dhs, dhs_orig, 'lvdzone', 'lz')
df2 = data.frame(avg = sample(-100:100, 10)/100, region = letters[1:10], ci = sample(1:100, 20)/1000) %>% mutate(lb = avg - ci, ub = avg + ci)
# plot_dot ----------------------------------------------------------------
plot_avg_dot(dhs, value_var = 'stunted2', by_var = 'lz',
percent_vals = TRUE, weight_var = 'cweight',
dot_size = 9, include_n = FALSE, x_limits = c(0.15, 0.65), x_breaks = seq(0.2, 0.6, by = 0.2),
dot_fill_cont = rev(brewer.pal(11, 'Spectral')[1:6]), sat_threshold = 0.65) +
theme_stroke()
save_plot('~/GitHub/llamar/img/plot_dot1.png', width = 8, height = 8)
plot_dot(df2, by_var = 'region', value_var = 'avg', ref_line = 0,
ref_text = 'no change', label_ref = FALSE, lollipop = TRUE, value_label_offset = .075,
dot_fill_cont = brewer.pal(10, 'RdYlBu'), percent_vals = TRUE) +
theme_stroke()
save_plot('~/GitHub/llamar/img/plot_dot2.png', width = 5, height = 5)
# plot_avg_dot ------------------------------------------------------------
p = plot_avg_dot(dhs, value_var = 'stunted2', by_var = 'lz',
percent_vals = TRUE, weight_var = 'cweight',
dot_size = 9, include_n = TRUE, x_breaks = c(0.2, 0.4, 0.6),
dot_fill_cont = rev(brewer.pal(11, 'Spectral')[1:6]), sat_threshold = 0.65)
save_plot('~/GitHub/llamar/img/plot_avg_dot.png',p, width = 8, height = 8)
# DHS musings -------------------------------------------------------------
plot_avg_dot(dhs, value_var = 'improvedWater', by_var = 'lz',
percent_vals = TRUE, weight_var = 'cweight',
dot_size = 9, include_n = TRUE,
dot_fill_cont = (brewer.pal(9, "Blues")), sat_threshold = 0.65) +
theme_stroke()
w = read_dta('~/Documents/USAID/Rwanda/rawdata/RW_2014-15_DHS/rwir70dt/RWIR70FL.DTA')
w = w %>% mutate(contrac = ifelse(v313 == 3, 1,
ifelse(is.na(v313), NA, 0)))
w = factorize(w, w, 'sdistrict', 'dist')
plot_avg_dot(w, value_var = 'contrac', by_var = 'dist',
percent_vals = TRUE,
dot_size = 9, include_n = TRUE,
dot_fill_cont = (brewer.pal(9, "Blues")), sat_threshold = 0.65) +
theme_stroke()
| 2,536 | mit |
96d8ad61b7d641c7edd8bf06f0401ce63acc774b | ldecicco-USGS/gages-through-ages | scripts/fetch/getData_siteRecords.R | #Run this independent of vizlab - will require 8 + hours of DL time
#generates csv output currently retrieved from sciencebase
#one part is parellized
#get # years of record for every site
library(dplyr)
library(stringr)
library(data.table)
library(dataRetrieval)
library(lubridate)
library(parallel)
source('scripts/fetch/helperFunctions.R')
dir.create('chunks',showWarnings = FALSE)
#loop over HUC regions
hucs <- str_pad(1:21, width = 2, pad = "0")
setAccess("internal")
allDF <- data.frame()
for(h in hucs) {
#dataRet call
hucDF <- select(readNWISdata(huc = h, hasDataTypeCd = "dv", service = "site",
seriesCatalogOutput = TRUE, parameterCd = "00060"),
agency_cd, site_no, site_tp_cd, station_nm, dec_lat_va, dec_long_va,
huc_cd, data_type_cd, parm_cd, begin_date, end_date, count_nu)
hucDF <- filter(hucDF, data_type_cd == "dv", parm_cd == "00060")
hucDF <- mutate(hucDF, begin_date = as.character(begin_date),
end_date=as.character(end_date))
#filter & append
allDF <- bind_rows(allDF, hucDF)
}
#convert to dates
allDF <- mutate(allDF, begin_date = as.Date(begin_date),
end_date = as.Date(end_date),
dayRange = as.numeric(end_date - begin_date),
intDaysRecord = count_nu - 2, #assuming record exists on first and last days
intDaysAll = end_date - begin_date - 1,
diff = intDaysAll - intDaysRecord)
# x <- allDF[c(duplicated(allDF$site_no),duplicated(allDF$site_no, fromLast = TRUE)),]
#get sites where ratio days/ years is off
saveRDS(allDF, "allDF.rds")
allDF <- readRDS("allDF.rds")
completeSiteDF <- filter(allDF,
diff <= 10,
count_nu > 355) #can't have less than 355 days in any non-start/end year
incompleteSiteDF <- filter(allDF, diff > 10)
incompleteSiteDF <- filter(incompleteSiteDF, count_nu >= 355)
#want long df with row for each site/year
#For complete sites, check if start and end years should be counted
longOK <- checkCompleteYears(completeSiteDF)
longOK <- longOK[!duplicated(longOK),]
# fwrite(longOK, file = 'sitesYearsComplete_latLon_355_NEW.csv')
# #need to deal with multiple lines for some sites in allDF
# allDF_oneLineSite <- allDF[!duplicated(allDF$site_no),]
# longOK_join <- left_join(longOK, allDF_oneLineSite, by = "site_no")
#
# fwrite(longOK_join, file = 'sitesYearsComplete_latLon_355.csv')
#get data for incomplete sites
#check what sites I already downloaded - save 6000 some sites
dlSites <- checkDownloadedSites('old_chunks')
toDownloadDF <- filter(incompleteSiteDF, !site_no %in% dlSites)
#don't repeat - there are sites with multiple measurement points
toDownloadSites <- unique(toDownloadDF$site_no)
#chunk by 200 sites
reqBks <- seq(1,length(toDownloadSites),by=200)
for(i in reqBks) {
sites <- na.omit(toDownloadSites[i:(i+199)])
print(paste('starting', i))
all_sites <- tryCatch({
currentSitesDF <- readNWISdv(siteNumber = sites, parameterCd = "00060")
fwrite(currentSitesDF, file = file.path('chunks', paste0('newChunk', i)))
},
error=function(cond) {
message("***************Errored on",i,"***********\n")
return(all_sites)
})
print(paste("Finished sites", i, "through", i+199))
}
completeFromIncomplete <- data.frame()
for(i in files){
complete <- yearsFunc(i)
completeFromIncomplete <- bind_rows(completeFromIncomplete, complete)
}
cl <- makeCluster(4)
files <- list.files(c('chunks','old_chunks'), full.names = TRUE)
allIncompleteYears <- clusterApply(cl, fun = yearsFunc, x = files)
stopCluster(cl)
#reassemble to DF, write
allIncompleteDF <- do.call("bind_rows", allIncompleteYears)
incomplete_lat_lon <- left_join(allIncompleteDF, allDF_oneLineSite, by = "site_no")
fwrite(incomplete_lat_lon, file = "incomplete_lat_lon_355.csv")
allSites_355 <- bind_rows(longOK_join, incomplete_lat_lon)
fwrite(allSites_355, file = "allSitesYears_355.csv", quote = TRUE)
| 4,016 | cc0-1.0 |
f6f70a59a134f07a54ba970122ed982675715fc6 | rho-devel/rho | src/extra/testr/filtered-test-suite/unlist/tc_unlist_17.R | expected <- eval(parse(text="c(TRUE, TRUE, TRUE, TRUE)"));
test(id=0, code={
argv <- eval(parse(text="list(list(TRUE, TRUE, TRUE, TRUE), FALSE, TRUE)"));
.Internal(`unlist`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
| 260 | gpl-2.0 |
f6f70a59a134f07a54ba970122ed982675715fc6 | ArunChauhan/cxxr | src/extra/testr/filtered-test-suite/unlist/tc_unlist_17.R | expected <- eval(parse(text="c(TRUE, TRUE, TRUE, TRUE)"));
test(id=0, code={
argv <- eval(parse(text="list(list(TRUE, TRUE, TRUE, TRUE), FALSE, TRUE)"));
.Internal(`unlist`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
| 260 | gpl-2.0 |
3dd40a8734a58418bd5208f7d01dc4196bb9490f | carlganz/survey | R/twophase.R | ##
##
twophase<-function(id,strata=NULL, probs=NULL, weights=NULL, fpc=NULL,
subset, data, method=c("full","approx","simple")){
method<-match.arg(method)
if(method=="full") {
if (!is.null(weights)) stop("weights not accepted by method='full'")
return(twophase2(id=id, strata=strata, probs=probs, fpc=fpc,subset=subset,data=data))
}
d1<-svydesign(ids=id[[1]],strata=strata[[1]],weights=weights[[1]],
probs=probs[[1]],fpc=fpc[[1]],data=data)
if(inherits(subset,"formula"))
subset<-eval.parent(model.frame(subset,data=data,na.action=na.pass))[[1]]
if(!is.logical(subset) && sort(unique(subset))==c(0,1))
subset<-as.logical(subset)
if (any(is.na(subset))) stop("missing values in 'subset'")
d1s<-svydesign(ids=id[[1]],strata=strata[[1]],weights=weights[[1]],
probs=probs[[1]],fpc=fpc[[1]],data=data[subset,])
d1s$prob<-d1$prob[subset]
d1s$allprob<-d1$allprob[subset,,drop=FALSE]
##if (NCOL(d1s$allprob)>1)
## stop("Can't handle multistage sampling at phase 1 (yet)")
## work out phase-two fpc
if (is.null(fpc[[2]])){
complete.vars<-names(data)[apply(data, 2, function(v) all(!is.na(v)))]
if (all(c(all.vars(id[[2]]), all.vars(strata[[2]])) %in% complete.vars)){
dfpc<-svydesign(ids=id[[2]], strata=strata[[2]], data=data, probs=NULL)
popsize<-mapply(function(s,i) ave(!duplicated(i),s,FUN=sum), dfpc$strata, dfpc$cluster)
rm(dfpc)
} else {
warning("Second-stage fpc not specified and not computable")
popsize<-NULL
}
} else popsize<-NULL
d2<-svydesign(ids=id[[2]], strata=strata[[2]], probs=probs[[2]],
weights=weights[[2]], fpc=fpc[[2]], data=data[subset,])
## ugly hack to get nicer labels
if(!is.null(fpc[[2]])){
d2call<-bquote(svydesign(ids=.(id[[2]]),strata=.(strata[[2]]), probs=.(probs[[2]]),
weights=.(weights[[2]]), fpc=.(fpc[[2]])))
} else{
d2call<-bquote(svydesign(ids=.(id[[2]]),strata=.(strata[[2]]), probs=.(probs[[2]]),
weights=.(weights[[2]]), fpc=`*phase1*`))
}
for(i in names(d2call)[-1])
d2call[[i]]<-d2call[[i]]
d2$call<-d2call
d1call<-bquote(svydesign(ids=.(id[[1]]), strata=.(strata[[1]]), probs=.(probs[[1]]),
weights=.(weights[[1]]), fpc=.(fpc[[1]])))
for(i in names(d1call)[-1])
d1call[[i]]<-d1call[[i]]
d1$call<-d1call
## Add phase 2 fpc and probs if they were computed rather than specified.
if (!is.null(popsize))
d2$fpc<-as.fpc(popsize[subset,,drop=FALSE],d2$strata,d2$cluster)
if(is.null(probs[[2]]) && is.null(weights[[2]]) && !is.null(d2$fpc$popsize)){
d2$allprob<-1/weights(d2$fpc,final=FALSE)
d2$prob<-apply(as.data.frame(d2$allprob),1,prod)
}
d2$variables<-NULL
rval<-list(phase1=list(full=d1,sample=d1s),
phase2=d2,
subset=subset)
rval$prob<-rval$phase1$sample$prob
## Are phase 2 PSUs the same as Phase 1 USUs, or smaller?
rval$samescale<- !any(duplicated(d1s$cluster[,NCOL(d1s$cluster)][!duplicated(d2$cluster[,1])]))
## For each phase 1 sampling unit, need probability of being represented
## at phase 2.
nunique<-function(x) sum(!duplicated(x))
m<-NCOL(rval$phase1$sample$cluster)
if(d2$has.strata){
if (inherits(strata[[2]],"formula"))
sa<-eval(attr(terms(strata[[2]]),"variables")[[2]],d1$variables)
else
sa<-d1$strata[,1]
cm<-rval$phase1$full$cluster[,m]
if (nunique(sa)!=nunique(sa[subset]))
stop("Some phase-2 strata have zero sampling fraction")
rval$usu<-ave(cm[subset],sa[subset],FUN=nunique)/ave(cm,sa,FUN=nunique)[subset]
} else {
rval$usu<-drop(with(rval$phase1$sample,ave(cluster[,m], strata[,m], FUN=nunique))/rval$phase1$full$fpc$sampsize[rval$subset])
}
## if (any(rval$usu<1) && any(duplicated(d1$cluster[,1])))
## stop("Phase 1 design must either be element sampling or have all phase 1 sampling units in phase 2")
if (length(rval$phase1$sample$prob)==length(d2$prob))
rval$prob<-rval$phase1$sample$prob*d2$prob
else{
rval$prob<-rep(Inf,length(rval$phase1$sample$prob))
rval$prob[subset]<-rval$prob[subset]*d2$prob
}
rval$call<-sys.call()
class(rval) <- c("twophase","survey.design")
rval
}
print.twophase<-function(x,...){
cat("Two-phase design: ")
print(x$call)
cat("Phase 1:\n")
print(x$phase1$full)
cat("Phase 2:\n")
print(x$phase2)
invisible(x)
}
summary.twophase<-function(object,...){
class(object)<-"summary.twophase"
object
}
print.summary.twophase<-function(x,...,varnames=TRUE){
cat("Two-phase design: ")
print(x$call)
cat("Phase 1:\n")
print(x$phase1$full,design.summaries=TRUE,varnames=FALSE)
cat("Phase 2:\n")
print(x$phase2,design.summaries=TRUE, varnames=FALSE)
if (varnames){
cat("Data variables:\n")
print(names(x$phase1$full$variables))
}
invisible(x)
}
twophasevar<-function(x,design){
d1 <- design$phase1$sample
if (NROW(x)==length(design$usu)){
ph2pr<-design$usu
if (any(design$prob==Inf))
x[is.na(x)]<-0
}else{
x[is.na(x)]<-0
ph2pr<-rep(1,NROW(x))
ph2pr[design$subset]<-design$usu
}
## compute phase 1 variance
vphase1 <- svyrecvar.phase1(x,d1$cluster, d1$strata, d1$fpc,
postStrata=d1$postStrata,
ph2prob=ph2pr,
nPSUfull=design$phase1$full$fpc$sampsize[design$subset,,drop=FALSE])
## is phase 2 sampling whole phase 1 units or subsampling within units?
if (design$samescale)
u2<-x
else
u2<-x*sqrt(d1$prob)
u2[is.na(u2)]<-0
## compute phase 2 variance
vphase2 <- with(design, svyrecvar(u2, phase2$cluster, phase2$strata,
phase2$fpc, postStrata=phase2$postStrata))
rval <- vphase1+vphase2
attr(rval, "phases")<-list(phase1=vphase1, phase2=vphase2)
rval
}
svyrecvar.phase1<-function(x, clusters, stratas, fpcs, postStrata=NULL,
lonely.psu=getOption("survey.lonely.psu"),
one.stage=getOption("survey.ultimate.cluster"),
ph2prob, nPSUfull){
x<-as.matrix(x)
cal<-NULL
## FIXME: calibration of phase 1 not yet implemented.
## Remove post-stratum means, which may cut across clusters
## Also center the data using any "g-calibration" models
if(!is.null(postStrata)){
stop("calibration of phase 1 not yet implemented")
for (psvar in postStrata){
if (inherits(psvar, "greg_calibration")) {
if (psvar$stage==0){
## G-calibration at population level
x<-qr.resid(psvar$qr,x/psvar$w)*psvar$w
} else {
## G-calibration within clusters
cal<-c(cal, list(psvar))
}
} else {
## ordinary post-stratification
psw<-attr(psvar, "weights")
postStrata<-as.factor(psvar)
psmeans<-rowsum(x/psw,psvar,reorder=TRUE)/as.vector(table(factor(psvar)))
x<- x-psmeans[match(psvar,sort(unique(psvar))),]*psw
}
}
}
multistage.phase1(x, clusters,stratas,fpcs$sampsize, fpcs$popsize,
lonely.psu=getOption("survey.lonely.psu"),
one.stage=one.stage,stage=1,cal=cal,ph2prob=ph2prob,
nPSUfull=nPSUfull)
}
multistage.phase1<-function(x, clusters, stratas, nPSUs, fpcs,
lonely.psu=getOption("survey.lonely.psu"),
one.stage=FALSE,stage,cal,ph2prob, nPSUfull){
n<-NROW(x)
v <- onestage.phase1(x,stratas[,1], clusters[,1], nPSUs[,1],
fpcs[,1], lonely.psu=lonely.psu,stage=stage,cal=cal,
ph2prob=ph2prob, nPSUfull=nPSUfull[,1])
if (one.stage!=TRUE && !is.null(fpcs) && NCOL(clusters)>1) {
v.sub<-by(1:n, list(as.numeric(clusters[,1])), function(index){
## residuals for G-calibration using population information
## only on clusters at this stage.
for(cali in cal){
if (cali$stage != stage)
next
j<-match(clusters[index,1],cali$index)
if (length(unique(j))!=1)
stop("Internal problem in g-calibration data: stage",stage,
", cluster", j)
j<-j[[1]]
x[index,]<-qr.resid(cali$qr[[j]], x[index,,drop=FALSE]/cali$w[[j]])*cali$w[[j]]
}
multistage.phase1(x[index,,drop=FALSE], clusters[index,-1,drop=FALSE],
stratas[index,-1,drop=FALSE], nPSUs[index,-1,drop=FALSE],
fpcs[index,-1,drop=FALSE],
lonely.psu=lonely.psu,one.stage=one.stage-1,
stage=stage+1,cal=cal,ph2prob=ph2prob[index],
nPSUfull=nPSUfull[index,-1,drop=FALSE])*nPSUfull[index[1],1]/fpcs[index[1],1]
})
for(i in 1:length(v.sub))
v<-v+v.sub[[i]]
}
v
}
onestrat.phase1<-function(x,cluster,nPSU,fpc, lonely.psu,stratum=NULL,
stage=1,cal,ph2prob, nPSUfull){
x<-rowsum(x, cluster)
ph2prob<-ph2prob[!duplicated(cluster)]
nsubset<-nrow(x)
if (nsubset<nPSU)
x<-rbind(x,matrix(0,ncol=ncol(x),nrow=nPSU-nrow(x)))
ph2prob<-c(ph2prob,rep(1,nPSU-nsubset))
xcenter<-colMeans(x*nPSU/nPSUfull)
x<-x*ph2prob
if (is.null(fpc))
f<-1
else
f<-ifelse(fpc==Inf, 1, (fpc-nPSUfull)/fpc)
if (lonely.psu!="adjust" || nsubset>1 ||
(nPSU>1 && !getOption("survey.adjust.domain.lonely")))
x<-sweep(x, 2, xcenter, "-")
if (nPSU>1)
scale<-f*nPSUfull/(nPSUfull-1)
else
scale<-f
if (nsubset==1 && nPSU>1){
warning("Stratum (",stratum,") has only one PSU at stage ",stage)
if (lonely.psu=="average" && getOption("survey.adjust.domain.lonely"))
scale<-NA
}
if (nPSU>1){
return(crossprod(x/sqrt(ph2prob))*scale)
} else if (f<0.0000001) ## certainty PSU
return(0*crossprod(x/sqrt(ph2prob)))
else {
rval<-switch(lonely.psu,
certainty=scale*crossprod(x/sqrt(ph2prob)),
remove=scale*crossprod(x/sqrt(ph2prob)),
adjust=scale*crossprod(x/sqrt(ph2prob)),
average=NA*crossprod(x/sqrt(ph2prob)),
fail= stop("Stratum (",stratum,") has only one PSU at stage ",stage),
stop("Can't handle lonely.psu=",lonely.psu)
)
rval
}
}
onestage.phase1<-function(x, strata, clusters, nPSU, fpc,
lonely.psu=getOption("survey.lonely.psu"),stage=0,
cal,ph2prob, nPSUfull){
stratvars<-tapply(1:NROW(x), list(factor(strata)), function(index){
onestrat.phase1(x[index,,drop=FALSE], clusters[index],
nPSU[index][1], fpc[index][1],
lonely.psu=lonely.psu,stratum=strata[index][1], stage=stage,cal=cal,
ph2prob=ph2prob[index], nPSUfull=nPSUfull[index][1])
})
p<-NCOL(x)
nstrat<-length(unique(strata))
nokstrat<-sum(sapply(stratvars,function(m) !any(is.na(m))))
apply(array(unlist(stratvars),c(p,p,length(stratvars))),1:2,sum,na.rm=TRUE)*nstrat/nokstrat
}
svytotal.twophase<-function(x,design, na.rm=FALSE, deff=FALSE,...){
if (inherits(x,"formula")){
## do the right thing with factors
mf<-model.frame(x,design$phase1$sample$variables,
na.action=na.pass)
xx<-lapply(attr(terms(x),"variables")[-1],
function(tt) model.matrix(eval(bquote(~0+.(tt))),mf))
cols<-sapply(xx,NCOL)
x<-matrix(nrow=NROW(xx[[1]]),ncol=sum(cols))
scols<-c(0,cumsum(cols))
for(i in 1:length(xx)){
x[,scols[i]+1:cols[i]]<-xx[[i]]
}
colnames(x)<-do.call("c",lapply(xx,colnames))
} else {
if(typeof(x) %in% c("expression","symbol"))
x<-eval(x, design$variables)
else {
if(is.data.frame(x) && any(sapply(x,is.factor))){
xx<-lapply(x, function(xi) {if (is.factor(xi)) 0+(outer(xi,levels(xi),"==")) else xi})
cols<-sapply(xx,NCOL)
scols<-c(0,cumsum(cols))
cn<-character(sum(cols))
for(i in 1:length(xx))
cn[scols[i]+1:cols[i]]<-paste(names(x)[i],levels(x[[i]]),sep="")
x<-matrix(nrow=NROW(xx[[1]]),ncol=sum(cols))
for(i in 1:length(xx)){
x[,scols[i]+1:cols[i]]<-xx[[i]]
}
colnames(x)<-cn
}
}
}
x<-as.matrix(x)
if (na.rm){
nas<-rowSums(is.na(x))
design<-design[nas==0,]
if(length(nas)>length(design$prob))
x<-x[nas==0,,drop=FALSE]
else
x[nas>0,]<-0
}
N<-sum(1/design$prob)
total <- colSums(x/as.vector(design$prob),na.rm=na.rm)
class(total)<-"svystat"
attr(total, "var")<-v<-twophasevar(x/design$prob,design)
attr(total,"statistic")<-"total"
if (is.character(deff) || deff){
nobs<-NROW(design$cluster)
if (deff=="replace")
vsrs<-svyvar(x,design,na.rm=na.rm)*sum(weights(design)^2)*(N-nobs)/N
else
vsrs<-svyvar(x,design,na.rm=na.rm)*sum(weights(design)^2)
attr(total, "deff")<-v/vsrs
}
return(total)
}
svymean.twophase<-function(x,design, na.rm=FALSE,deff=FALSE,...){
if (inherits(x,"formula")){
## do the right thing with factors
mf<-model.frame(x,design$phase1$sample$variables
,na.action=na.pass)
xx<-lapply(attr(terms(x),"variables")[-1],
function(tt) model.matrix(eval(bquote(~0+.(tt))),mf))
cols<-sapply(xx,NCOL)
x<-matrix(nrow=NROW(xx[[1]]),ncol=sum(cols))
scols<-c(0,cumsum(cols))
for(i in 1:length(xx)){
x[,scols[i]+1:cols[i]]<-xx[[i]]
}
colnames(x)<-do.call("c",lapply(xx,colnames))
}
else {
if(typeof(x) %in% c("expression","symbol"))
x<-eval(x, design$variables)
else {
if(is.data.frame(x) && any(sapply(x,is.factor))){
xx<-lapply(x, function(xi) {if (is.factor(xi)) 0+(outer(xi,levels(xi),"==")) else xi})
cols<-sapply(xx,NCOL)
scols<-c(0,cumsum(cols))
cn<-character(sum(cols))
for(i in 1:length(xx))
cn[scols[i]+1:cols[i]]<-paste(names(x)[i],levels(x[[i]]),sep="")
x<-matrix(nrow=NROW(xx[[1]]),ncol=sum(cols))
for(i in 1:length(xx)){
x[,scols[i]+1:cols[i]]<-xx[[i]]
}
colnames(x)<-cn
}
}
}
x<-as.matrix(x)
if (na.rm){
nas<-rowSums(is.na(x))
design<-design[nas==0,]
if(length(nas)>length(design$prob))
x<-x[nas==0,,drop=FALSE]
else
x[nas>0,]<-0
}
pweights<-1/design$prob
psum<-sum(pweights)
average<-colSums(x*pweights/psum)
x<-sweep(x,2,average)
v<-twophasevar(x*pweights/psum,design)
attr(average,"var")<-v
attr(average,"statistic")<-"mean"
class(average)<-"svystat"
if (is.character(deff) || deff){
nobs<-NROW(design$cluster)
if(deff=="replace"){
vsrs<-svyvar(x,design,na.rm=na.rm)/(nobs)
} else {
if(psum<nobs) {
vsrs<-NA*v
warning("Sample size greater than population size: are weights correctly scaled?")
} else{
vsrs<-svyvar(x,design,na.rm=na.rm)*(psum-nobs)/(psum*nobs)
}
}
attr(average, "deff")<-v/vsrs
}
return(average)
}
model.frame.twophase<-function(formula,phase=2,...){
if (phase==1)
formula$phase1$full$variables
else
formula$phase1$sample$variables
}
svyratio.twophase<-function(numerator=formula, denominator, design, separate=FALSE,na.rm=FALSE,formula,...){
if (separate){
strats<-sort(unique(design$phase2$strata[,1]))
if (!design$phase2$has.strata)
warning("Separate and combined ratio estimators are the same for unstratified designs")
rval<-list(ratios=lapply(strats,
function(s) {
tmp<-svyratio(numerator, denominator,
subset(design, design$phase2$strata[,1] %in% s),
separate=FALSE,...)
attr(tmp,"call")<-bquote(Stratum==.(s))
tmp}))
names(rval$ratios)<-strats
class(rval)<-c("svyratio_separate")
rval$call<-sys.call()
rval$strata<-strats
return(rval)
}
if (inherits(numerator,"formula"))
numerator<-model.frame(numerator,model.frame(design),na.action=na.pass)
else if(typeof(numerator) %in% c("expression","symbol"))
numerator<-eval(numerator, design$variables)
if (inherits(denominator,"formula"))
denominator<-model.frame(denominator,model.frame(design),na.action=na.pass)
else if(typeof(denominator) %in% c("expression","symbol"))
denominator<-eval(denominator, model.frame(design))
nn<-NCOL(numerator)
nd<-NCOL(denominator)
all<-cbind(numerator,denominator)
nas<-!complete.cases(all)
if (na.rm){
design<-design[!nas,]
all<-all[!nas,,drop=FALSE]
numerator<-numerator[!nas,,drop=FALSE]
denominator<-denominator[!nas,,drop=FALSE]
}
allstats<-svytotal(all, design)
rval<-list(ratio=outer(allstats[1:nn],allstats[nn+1:nd],"/"))
vars<-matrix(ncol=nd,nrow=nn)
for(i in 1:nn){
for(j in 1:nd){
r<-(numerator[,i]-rval$ratio[i,j]*denominator[,j])/sum(denominator[,j]/design$prob)
vars[i,j]<-twophasevar(r*1/design$prob, design)
}
}
colnames(vars)<-names(denominator)
rownames(vars)<-names(numerator)
rval$var<-vars
attr(rval,"call")<-sys.call()
class(rval)<-"svyratio"
rval
}
"[.twophase"<-function (x,i, ..., drop=TRUE){
if (!missing(i)){
if (is.calibrated(x$phase1$full) || is.calibrated(x$phase2) || !drop){
## Set weights to zero: no memory saving possible
## There should be an easier way to complement a subscript..
if (is.logical(i)){
x$prob[!i]<-Inf
x$phase2$prob[!i]<-Inf
} else if (is.numeric(i) && length(i)){
x$prob[-i]<-Inf
x$phase2$prob[-i]<-Inf
} else {
tmp<-x$prob[i,]
x$prob<-rep(Inf, length(x$prob))
x$prob[i,]<-tmp
}
index<-is.finite(x$prob)
psu<-!duplicated(x$phase2$cluster[index,1])
tt<-table(x$phase2$strata[index,1][psu])
if(any(tt==1)){
warning(sum(tt==1)," strata have only one PSU in this subset.")
}
} else {
## subset everything.
x$prob<-x$prob[i]
if (is.logical(i))
x$subset[x$subset]<- i
else if (is.numeric(i) && length(i))
x$subset[which(x$subset)[-i]]<- FALSE
else
x$subset<-FALSE & x$subset
x$usu<-x$usu[i]
x$phase1$sample<-x$phase1$sample[i,...,drop=TRUE]
x$phase2<-x$phase2[i,...,drop=TRUE]
}
} else {
x$phase1$full<-x$phase1$full[,...]
x$phase1$sample<-x$phase1$sample[,...]
x$phase2<-x$phase2[,...]
}
x
}
dim.twophase<-function(x,...){
dim(x$phase1$sample$variables)
}
na.fail.twophase<-function(object,...){
tmp<-na.fail(object$phase1$sample$variables,...)
object
}
na.omit.twophase<-function(object,...){
tmp<-na.omit(object$phase1$sample$variables,...)
omit<-attr(tmp,"na.action")
if (length(omit)){
object<-object[-omit,]
object$phase1$sample$variables<-tmp
attr(object,"na.action")<-omit
}
object
}
na.exclude.twophase<-function(object,...){
tmp<-na.exclude(object$phase1$sample$variables,...)
exclude<-attr(tmp,"na.action")
if (length(exclude)){
object<-object[-exclude,]
object$phase1$sample$variables<-tmp
attr(object,"na.action")<-exclude
}
object
}
update.twophase<-function(object,...){
dots<-substitute(list(...))[-1]
newnames<-names(dots)
for(j in seq(along=dots)){
object$phase1$sample$variables[,newnames[j]]<-eval(dots[[j]], object$phase1$sample$variables, parent.frame())
object$phase1$full$variables[,newnames[j]]<-eval(dots[[j]], object$phase1$full$variables, parent.frame())
}
object$call<-sys.call(-1)
object
}
subset.twophase<-function(x,subset,...){
e <- substitute(subset)
r <- eval(e, x$phase1$sample$variables, parent.frame())
r <- r & !is.na(r)
x<-x[r,]
x$call<-sys.call(-1)
x
}
calibrate.twophase<-function(design, phase=2, formula, population,
calfun=c("linear","raking","logit","rrz"),...){
if (phase==1){
stop("phase 1 calibration not yet implemented")
phase1<-calibrate(design$phase1$full,formula, population, ...)
design$phase1$full<-phase1
design$phase1$sample<-phase1[design$subset,]
} else if(phase==2){
if (is.character(calfun)) calfun<-match.arg(calfun)
if (is.character(calfun) && calfun=="rrz"){
design<-estWeights(design, formula,...)
design$call<-sys.call(-1)
return(design)
}
if (missing(population) || is.null(population)){
## calibrate to phase 1 totals
population<-colSums(model.matrix(formula,
model.frame(formula, design$phase1$full$variables)))
}
phase2<-design$phase2
phase2$variables<-design$phase1$sample$variables
phase2<-calibrate(phase2,formula,population,calfun=calfun,...)
g<-design$phase2$prob/phase2$prob
phase2$variables<-NULL
design$phase2<-phase2
design$usu<-design$usu/g
} else stop("`phase' must be 1 or 2")
if (length(design$phase1$sample$prob)==length(design$phase2$prob))
design$prob<-design$phase1$sample$prob*design$phase2$prob
else{
design$prob<-rep(Inf,length(design$phase1$sample$prob))
design$prob[subset]<-design$prob[subset]*design$phase2$prob
}
design$call<-sys.call(-1)
design
}
postStratify.twophase<-function(design, ...) {
stop("postStratify not implemented for two-phase designs. Use calibrate()")
}
estWeights<-function(data, formula, ...) UseMethod("estWeights")
estWeights.twophase<-function(data, formula=NULL, working.model=NULL,...){
if (!xor(is.null(formula), is.null(working.model)))
stop("Must specify one of formula, working.model")
certainty<-rep(FALSE,nrow(data$phase1$full$variables))
certainty[data$subset]<-data$phase2$fpc$popsize==data$phase2$fpc$sampsize
if (!is.null(formula)){
ff<-data$subset~rhs
ff[[3]]<-formula[[2]]
if(!attr(terms(ff),"intercept")) stop("formula must have an intercept")
model<-glm(ff, data=data$phase1$full$variables, family=binomial(),
subset=!certainty, na.action=na.fail)
} else {
xx<-estfun(working.model)
model<-glm(data$subset~xx,family=binomial(), subset=!certainty, na.action=na.fail)
}
fitp<-as.numeric(certainty[data$subset])
fitp[!certainty[data$subset]]<-fitted(model)[data$subset[!certainty]]
g<- (1/fitp)/(1/data$phase2$prob)
mm<-model.matrix(model)[data$subset[!certainty],,drop=FALSE]
if (any(certainty)){
mm1<-matrix(0,ncol=ncol(mm)+1,nrow=sum(data$subset))
mm1[,1]<-as.numeric(certainty[data$subset])
mm1[!certainty[data$subset],-1]<-mm
mm<-mm1
}
whalf<-sqrt(1/data$phase2$prob)
caldata<-list(qr=qr(mm*whalf), w=g*whalf, stage=0, index=NULL)
class(caldata) <- c("greg_calibration","gen_raking")
data$phase2$prob<-fitp
data$usu<-data$usu/g
data$phase2$postStrata <- c(data$phase2$postStrata, list(caldata))
if (length(data$phase1$sample$prob)==length(data$phase2$prob))
data$prob<-data$phase1$sample$prob*data$phase2$prob
else{
data$prob<-rep(Inf,length(data$phase1$sample$prob))
data$prob[subset]<-data$prob[subset]*data$phase2$prob
}
data$call <- sys.call(-1)
data
}
estfun<-function(model,...) UseMethod("estfun")
estfun.coxph<-function(model, ...) resid(model,"score")
estfun.glm<-function(model){
xmat<-model.matrix(model)
residuals(model,"working")*model$weights*xmat
}
estfun.lm<-function(model,...){
model.matrix(model)*resid(model)
}
estWeights.data.frame<-function(data,formula=NULL, working.model=NULL,
subset=NULL, strata=NULL,...){
if (is.null(subset)){
subset<-complete.cases(data)
if (all(subset))
stop("No missing data.")
}
if(is.null(strata)){
des<-twophase(id=list(~1,~1), subset=subset, data=data)
} else{
des<-twophase(id=list(~1,~1), subset=subset, data=data,
strata=list(NULL,strata))
}
rval<-estWeights(des,formula=formula,working.model=working.model)
rval$call<-sys.call(-1)
rval
}
| 25,145 | gpl-3.0 |
f6f70a59a134f07a54ba970122ed982675715fc6 | kmillar/cxxr | src/extra/testr/filtered-test-suite/unlist/tc_unlist_17.R | expected <- eval(parse(text="c(TRUE, TRUE, TRUE, TRUE)"));
test(id=0, code={
argv <- eval(parse(text="list(list(TRUE, TRUE, TRUE, TRUE), FALSE, TRUE)"));
.Internal(`unlist`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
| 260 | gpl-2.0 |
f6f70a59a134f07a54ba970122ed982675715fc6 | cxxr-devel/cxxr | src/extra/testr/filtered-test-suite/unlist/tc_unlist_17.R | expected <- eval(parse(text="c(TRUE, TRUE, TRUE, TRUE)"));
test(id=0, code={
argv <- eval(parse(text="list(list(TRUE, TRUE, TRUE, TRUE), FALSE, TRUE)"));
.Internal(`unlist`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
| 260 | gpl-2.0 |
f6f70a59a134f07a54ba970122ed982675715fc6 | kmillar/rho | src/extra/testr/filtered-test-suite/unlist/tc_unlist_17.R | expected <- eval(parse(text="c(TRUE, TRUE, TRUE, TRUE)"));
test(id=0, code={
argv <- eval(parse(text="list(list(TRUE, TRUE, TRUE, TRUE), FALSE, TRUE)"));
.Internal(`unlist`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
| 260 | gpl-2.0 |
f6f70a59a134f07a54ba970122ed982675715fc6 | krlmlr/cxxr | src/extra/testr/filtered-test-suite/unlist/tc_unlist_17.R | expected <- eval(parse(text="c(TRUE, TRUE, TRUE, TRUE)"));
test(id=0, code={
argv <- eval(parse(text="list(list(TRUE, TRUE, TRUE, TRUE), FALSE, TRUE)"));
.Internal(`unlist`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
| 260 | gpl-2.0 |
4f184a67481c62e86136bba02143e616bd46f94f | SoftFx/FDK | FDK2R/FdkRLib/RPackage/R/FdkBarPair.R |
#' Gets the bars' low as requested
#'
#' @param symbol Symbol looked
#' @param barPeriodStr (default 'M1') values like: S1, S10, M1, M5, M15, M30, H1, H4, D1, W1, MN1
#' @param startTime R time as start of interval
#' @param endTime R time as end of interval
#' @param barCountDbl Bar count
#' @export
ttBarsQuotesHistory <- function(symbol, barPeriodStr = "M1",
startTime = ttTimeZero(), endTime = ttNow(),
barCountDbl = 10000){
bars = ComputeGetPairBars(symbol, barPeriodStr, startTime, endTime, barCountDbl)
getBarPairFrame(bars)
}
#' Extracts bar pair array data as a full data frame
#'
#' @param bars Bars array variable
getBarPairFrame <- function (bars){
askHigh = GetBarsAskHigh(bars)
askLow = GetBarsAskLow(bars)
askopen = GetBarsAskOpen(bars)
askClose = GetBarsAskClose(bars)
askVolume = GetBarsAskVolume(bars)
bidHigh = GetBarsBidHigh(bars)
bidLow = GetBarsBidLow(bars)
bidOpen = GetBarsBidOpen(bars)
bidClose = GetBarsBidClose(bars)
bidVolume = GetBarsBidVolume(bars)
from = GetBarsAskFrom(bars)
to = GetBarsAskTo(bars)
UnregisterVar(bars)
data.table(askHigh, askLow, askopen, askClose, askVolume,
bidHigh, bidLow, bidOpen, bidClose, bidVolume,
from, to)
}
#' Gets the bars pairs as requested
#'
#' @param symbol Symbol looked
#' @param barPeriodStr (default 'M1') values like: S1, S10, M1, M5, M15, M30, H1, H4, D1, W1, MN1
#' @param startTime R time as start of interval
#' @param endTime R time as end of interval
#' @param barCount Bar count
ComputeGetPairBars <- function(symbol, barPeriodStr, startTime, endTime, barCount) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'ComputeGetPairBars', symbol, barPeriodStr, startTime, endTime, barCount)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsAskHigh <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsAskHigh', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsAskLow <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsAskLow', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsAskOpen <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsAskOpen', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsAskClose <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsAskClose', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsAskVolume <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsAskVolume', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsAskFrom <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsAskFrom', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsAskTo <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsAskTo', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsBidHigh <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsBidHigh', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsBidLow <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsBidLow', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsBidOpen <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsBidOpen', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsBidClose <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsBidClose', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsBidVolume <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsBidVolume', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsBidFrom <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsBidFrom', barsPairVar)
}
#' Gets the bars' ask as requested
#'
#' @param barsPairVar RHost variable that stores quotes array
GetBarsBidTo <- function(barsPairVar) {
rClr::clrCallStatic('RHost.FdkBarPairs', 'GetBarsBidTo', barsPairVar)
}
| 4,834 | mit |
f403d3cc80d96bb9d032068cabb16d96792776ad | tijoseymathew/mlr | todo-files/test_regr_randomUniformForest.R | context("regr_randomUniformForest")
test_that("regr_randomUniformForest", {
skip_on_travis() # FIXME: I dont know why this breaks on travis
requirePackages("randomUniformForest", default.method = "load")
parset.list = list(
list(ntree = 5, mtry = 4)
)
tsk.train = makeRegrTask(data = regr.train, target = regr.target)
tsk.test = makeRegrTask(data = regr.test, target = regr.target)
for (i in seq_along(parset.list)) {
parset = c(list(formula = regr.formula, data = regr.train, OOB = FALSE,
importance = FALSE, unsupervised = FALSE, threads = 1L), parset.list[[i]])
set.seed(getOption("mlr.debug.seed"))
m = do.call(randomUniformForest::randomUniformForest, parset)
old.predicts = predict(m, regr.test)
lrn = do.call("makeLearner", c("regr.randomUniformForest", parset.list[[i]]))
set.seed(getOption("mlr.debug.seed"))
trained.mod = train(lrn, tsk.train)
new.predicts = predict(trained.mod, tsk.test)$data$response
#randomUniformForest is such randomized that using the same seed will produce different results on
#the same data, see vignette("randomUniformForestsOverview") on page 22.
expect_true(length(old.predicts) == length(new.predicts))
}
})
| 1,224 | bsd-2-clause |
f403d3cc80d96bb9d032068cabb16d96792776ad | vinaywv/mlr | todo-files/test_regr_randomUniformForest.R | context("regr_randomUniformForest")
test_that("regr_randomUniformForest", {
skip_on_travis() # FIXME: I dont know why this breaks on travis
requirePackages("randomUniformForest", default.method = "load")
parset.list = list(
list(ntree = 5, mtry = 4)
)
tsk.train = makeRegrTask(data = regr.train, target = regr.target)
tsk.test = makeRegrTask(data = regr.test, target = regr.target)
for (i in seq_along(parset.list)) {
parset = c(list(formula = regr.formula, data = regr.train, OOB = FALSE,
importance = FALSE, unsupervised = FALSE, threads = 1L), parset.list[[i]])
set.seed(getOption("mlr.debug.seed"))
m = do.call(randomUniformForest::randomUniformForest, parset)
old.predicts = predict(m, regr.test)
lrn = do.call("makeLearner", c("regr.randomUniformForest", parset.list[[i]]))
set.seed(getOption("mlr.debug.seed"))
trained.mod = train(lrn, tsk.train)
new.predicts = predict(trained.mod, tsk.test)$data$response
#randomUniformForest is such randomized that using the same seed will produce different results on
#the same data, see vignette("randomUniformForestsOverview") on page 22.
expect_true(length(old.predicts) == length(new.predicts))
}
})
| 1,224 | bsd-2-clause |
71c5bb1c16e31e50b5a38baa1e64dc6ed53116de | guiastrennec/xpose | R/plot_residuals.R | #' Residuals plotted against population predictions
#'
#' @description Model residuals plotted against population predictions (PRED).
#'
#' The residuals can be one of:
#' \itemize{
#' \item RES: model residuals
#' \item WRES: weighted model residuals
#' \item CWRES: conditional weighted model residuals
#' \item EWRES/ECWRES: Monte Carlo based model residuals
#' \item NPDE: Normalized prediction distribution error
#' }
#'
#' @inheritParams dv_vs_pred
#' @param res Type of residual to be used. Default is "CWRES".
#' @inheritSection xplot_scatter Layers mapping
#' @inheritSection xplot_scatter Faceting
#' @inheritSection xplot_scatter Template titles
#' @seealso \code{\link{xplot_scatter}}
#' @examples
#' # Standard residual
#' res_vs_pred(xpdb_ex_pk, res = c('IWRES', 'CWRES'))
#'
#' # Absolute value of the residuals
#' absval_res_vs_pred(xpdb_ex_pk, res = 'CWRES')
#'
#' @export
res_vs_pred <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = TRUE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = 'value'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = toupper(res)), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
#' @rdname res_vs_pred
#' @export
absval_res_vs_pred <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = FALSE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = 'abs(value)'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = stringr::str_c('abs(', toupper(res), ')')), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
#' Residuals plotted against the independent variable
#'
#' @description Model residuals plotted against the independent variable (IDV).
#'
#' The residuals can be one of:
#' \itemize{
#' \item RES: model residuals
#' \item WRES: weighted model residuals
#' \item CWRES: conditional weighted model residuals
#' \item EWRES/ECWRES: Monte Carlo based model residuals
#' \item NPDE: Normalized prediction distribution error
#' }
#'
#' @inheritParams dv_vs_pred
#' @param res Type of residual to be used. Default is "CWRES".
#' @inheritSection xplot_scatter Layers mapping
#' @inheritSection xplot_scatter Template titles
#' @seealso \code{\link{xplot_scatter}}
#' @examples
#' # Standard residual
#' res_vs_idv(xpdb_ex_pk, res = c('IWRES', 'CWRES'))
#'
#' # Absolute value of the residuals
#' absval_res_vs_idv(xpdb_ex_pk, res = 'CWRES')
#'
#' @export
res_vs_idv <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = TRUE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = 'value'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = toupper(res)), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
#' @rdname res_vs_idv
#' @export
absval_res_vs_idv <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = FALSE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = 'abs(value)'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = stringr::str_c('abs(', toupper(res), ')')), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
| 10,260 | lgpl-3.0 |
71c5bb1c16e31e50b5a38baa1e64dc6ed53116de | guiastrennec/ggxpose | R/plot_residuals.R | #' Residuals plotted against population predictions
#'
#' @description Model residuals plotted against population predictions (PRED).
#'
#' The residuals can be one of:
#' \itemize{
#' \item RES: model residuals
#' \item WRES: weighted model residuals
#' \item CWRES: conditional weighted model residuals
#' \item EWRES/ECWRES: Monte Carlo based model residuals
#' \item NPDE: Normalized prediction distribution error
#' }
#'
#' @inheritParams dv_vs_pred
#' @param res Type of residual to be used. Default is "CWRES".
#' @inheritSection xplot_scatter Layers mapping
#' @inheritSection xplot_scatter Faceting
#' @inheritSection xplot_scatter Template titles
#' @seealso \code{\link{xplot_scatter}}
#' @examples
#' # Standard residual
#' res_vs_pred(xpdb_ex_pk, res = c('IWRES', 'CWRES'))
#'
#' # Absolute value of the residuals
#' absval_res_vs_pred(xpdb_ex_pk, res = 'CWRES')
#'
#' @export
res_vs_pred <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = TRUE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = 'value'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = toupper(res)), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
#' @rdname res_vs_pred
#' @export
absval_res_vs_pred <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = FALSE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = 'abs(value)'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'pred')$col,
y = stringr::str_c('abs(', toupper(res), ')')), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
#' Residuals plotted against the independent variable
#'
#' @description Model residuals plotted against the independent variable (IDV).
#'
#' The residuals can be one of:
#' \itemize{
#' \item RES: model residuals
#' \item WRES: weighted model residuals
#' \item CWRES: conditional weighted model residuals
#' \item EWRES/ECWRES: Monte Carlo based model residuals
#' \item NPDE: Normalized prediction distribution error
#' }
#'
#' @inheritParams dv_vs_pred
#' @param res Type of residual to be used. Default is "CWRES".
#' @inheritSection xplot_scatter Layers mapping
#' @inheritSection xplot_scatter Template titles
#' @seealso \code{\link{xplot_scatter}}
#' @examples
#' # Standard residual
#' res_vs_idv(xpdb_ex_pk, res = c('IWRES', 'CWRES'))
#'
#' # Absolute value of the residuals
#' absval_res_vs_idv(xpdb_ex_pk, res = 'CWRES')
#'
#' @export
res_vs_idv <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = TRUE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = 'value'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = toupper(res)), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
#' @rdname res_vs_idv
#' @export
absval_res_vs_idv <- function(xpdb,
mapping = NULL,
res = 'CWRES',
group = 'ID',
type = 'pls',
title = '@y vs. @x | @run',
subtitle = 'Ofv: @ofv',
caption = '@dir',
tag = NULL,
log = NULL,
guide = FALSE,
facets,
.problem,
quiet,
...) {
# Check input
check_xpdb(xpdb, check = 'data')
if (missing(.problem)) .problem <- default_plot_problem(xpdb)
check_problem(.problem, .subprob = NULL, .method = NULL)
if (missing(quiet)) quiet <- xpdb$options$quiet
if (length(res) > 1) {
if (missing(facets)) facets <- add_facet_var(facets = xpdb$xp_theme$facets,
variable = 'variable')
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet),
tidy = TRUE, value_col = res)
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = 'abs(value)'), mapping)
} else {
if (missing(facets)) facets <- xpdb$xp_theme$facets
opt <- data_opt(.problem = .problem,
filter = only_obs(xpdb, .problem, quiet))
vars <- aes_c(aes_string(x = xp_var(xpdb, .problem, type = 'idv')$col,
y = stringr::str_c('abs(', toupper(res), ')')), mapping)
}
xplot_scatter(xpdb = xpdb, group = group, quiet = quiet,
opt = opt, mapping = vars,
type = type, guide = guide, facets = facets,
xscale = check_scales('x', log),
yscale = check_scales('y', log),
title = title, subtitle = subtitle, caption = caption,
tag = tag, plot_name = as.character(match.call()[[1]]),
guide_slope = 0, ...)
}
| 10,260 | gpl-3.0 |
34a4a870fd7a95ac605c7b072f6fbd031e1341a1 | magrai/URBAN-MV-VIE_UniBw | fun/computeActivityRates.R |
# Objective ---------------------------------------------------------------
## Compute glance rates for each direction in two versions
## 1) As percentage of first glances
## 2) As percentage of overall glances
computeActivityRates <- function(dat,
col_name_act_level,
col_name_am,
col_name_case_id,
col_name_time = "time_s",
unique_level) {
outputFunProc(R)
## Replace missing values with 0
dat[is.na(dat)] <- 0
## Detect number of cases with available glance data
cases_n <-
dat %>%
#filter_(paste("!is.na(", col_name_act_level, ")")) %>%
distinct_(col_name_case_id) %>%
summarise(n = n()) %>%
pull(n)
# Create template for data completion -------------------------------------
template <-
dat %>%
select_(col_name_am) %>%
distinct() %>%
arrange_(col_name_am)
# Create activity ids -----------------------------------------------------
dat <-
dat %>%
select_(col_name_am,
col_name_time,
col_name_case_id,
col_name_act_level) %>%
## Order by case and time
## Create row numbers over complete data frame
arrange_(col_name_case_id, col_name_time) %>%
mutate(row_nr = row_number()) %>%
## Create row numbers per case and activity level
## Create id for current glance sequence
group_by_(col_name_case_id, col_name_act_level) %>%
mutate(act_row_nr = row_number()) %>%
## Create activity id
mutate(act_id = (row_nr - act_row_nr)) %>%
mutate(row_nr = NULL,
act_row_nr = NULL)
act_n <- length(unique(dat$act_id))
## In case of uncertainty call following line for proofing uniqueness of ids
## View(dat %>%
## group_by(subject_id, act_id) %>%
## summarise(n_unique_act_ids = length(unique(act_id))))
# Computations related to longitudinal reference --------------------------
dat_am <-
dat %>%
## Minimum AM for each activity
group_by(act_id) %>%
mutate_(.dots = setNames(list(interp(~ min(v), v = as.name(col_name_am))),
"am_min_PER_act_id")) %>%
## Minimum AM for each activity per case and activity level
group_by_(col_name_case_id, col_name_act_level) %>%
mutate(am_min_PER_case_id_and_act_level = min(am_min_PER_act_id)) %>%
## Minimum AM for each activity level
group_by_(col_name_act_level) %>%
mutate(am_min_PER_act_level = min(am_min_PER_act_id)) %>%
## Summarise by case and activity (only one row per activity id)
group_by_(col_name_case_id, "act_id", col_name_act_level) %>%
summarise(am_min_PER_act_id = min(am_min_PER_act_id),
am_min_PER_case_id_and_act_level = min(am_min_PER_case_id_and_act_level),
am_min_PER_act_level = min(am_min_PER_act_level)) %>%
arrange_(col_name_act_level, "am_min_PER_act_id")
# Enumerate and count all activities --------------------------------------
dat_n <-
dat_am %>%
## Enumerate and count activites per case
group_by_(col_name_case_id) %>%
arrange_(col_name_case_id, "am_min_PER_act_id") %>%
mutate(act_nr_PER_case_id = row_number(),
act_n_PER_case_id = length(unique(act_id))) %>%
## Enumeriate and count activities per case and activity level
group_by_(col_name_case_id, col_name_act_level) %>%
mutate(act_nr_PER_case_id_and_act_level = row_number(),
act_n_PER_case_id_and_act_level = length(unique(act_id))) %>%
## Enumerate and count activites per activity level
group_by_(col_name_act_level) %>%
arrange_(col_name_act_level, "am_min_PER_act_id") %>%
mutate(act_nr_PER_act_level = row_number(),
act_n_PER_act_level = length(unique(act_id)))
# Enumerate and count first activities ------------------------------------
dat_n_1st <-
dat_n %>%
## Filter for first activity per case and activity level
filter(act_nr_PER_case_id_and_act_level == 1) %>%
## Enumerate and count first activities by activity level
group_by_(col_name_act_level) %>%
arrange_(col_name_act_level, "am_min_PER_act_id") %>%
mutate(act_nr_PER_act_level_1st = row_number(),
act_n_PER_act_level_1st = n())
# Compute ratio for all activities ----------------------------------------
dat_n_ratio <-
dat_n %>% mutate(
## Rate of activity number per case
## ... on total activities per case
act_nr_PER_case_id_ON_act_n_PER_case_id =
act_nr_PER_case_id / act_n_PER_case_id * 100,
## Rate of activity number per case and activity level
## ... on total activities per case
act_nr_PER_case_id_and_act_level_ON_act_n_PER_case_id =
act_nr_PER_case_id_and_act_level / act_n_PER_case_id * 100,
## Rate of activity number per case and activity level
## ... on total activities per activity level
act_nr_PER_case_id_and_act_level_ON_act_n_PER_act_level =
act_nr_PER_case_id_and_act_level / act_n_PER_act_level * 100,
## Rate of activity number per case and activity level
## ... on total activities
act_nr_PER_case_id_and_act_level_ON_act_n =
act_nr_PER_case_id_and_act_level / act_n * 100,
## Rate of activity number per case
## ... on total activities
act_nr_PER_case_id_ON_act_n =
act_nr_PER_case_id / act_n * 100,
## Rate of activity number per activity level
## ... on total activities per activity level
act_nr_PER_act_level_ON_act_n_PER_act_level =
act_nr_PER_act_level / act_n_PER_act_level * 100,
## Rate of activity number per activity level
## on total activities
act_nr_PER_act_level_ON_act_n =
act_nr_PER_act_level / act_n * 100)
# Compute ratio for first activities --------------------------------------
## Result won't end up at 100 % for each activity level
## ... as not necessarily all cases show these activity levels
dat_n_1st_ratio <-
dat_n_1st %>% mutate(
## Rate of 1st activity number per activity level
## ... on total 1st activities per activity level
act_nr_PER_act_level_1st_ON_act_n_PER_act_level_1st =
act_nr_PER_act_level_1st / act_n_PER_act_level_1st * 100,
## Rate of 1st activity number per activity level
## ... on total activities per activity level
act_nr_PER_act_level_1st_ON_act_n_PER_act_level =
act_nr_PER_act_level_1st / act_n_PER_act_level * 100,
## Rate of 1st activity number per activity level
## ... on total activities
act_nr_PER_act_level_1st_ON_act_n =
act_nr_PER_act_level_1st / act_n * 100,
## Rate of 1st activity number per activity level
## ... on total cases
act_nr_PER_act_level_1st_ON_cases_n =
act_nr_PER_act_level_1st / cases_n * 100
)
# LOCF over longitudinal reference for all activities ---------------------
dat_full <-
extendActivityRatesOnFullData(
dat_n_ratio,
col_name_am = col_name_am,
col_name_ref_related = "am_min_PER_act_id",
col_name_act_level = col_name_act_level,
col_names_ratio_related = grep("ON", names(dat_n_ratio), value = T),
dat_long_ref = template,
unique_level
) %>%
## Summarize per AM to maximum
group_by_(col_name_am) %>%
summarise_all(max)
# LOCF over longitudinal reference for first activities -------------------
dat_full_1st <-
extendActivityRatesOnFullData(
dat_n_1st_ratio,
col_name_am = col_name_am,
col_name_ref_related = "am_min_PER_case_id_and_act_level",
col_name_act_level = col_name_act_level,
col_names_ratio_related = grep("ON", names(dat_n_1st_ratio), value = T),
dat_long_ref = template,
unique_level
) %>%
## Summarize per AM to maximum
group_by_(col_name_am) %>%
summarise_all(max)
# Return list of data frames ----------------------------------------------
dat_return <- list(dat = data.frame(dat),
dat_am = data.frame(dat_am),
dat_n = data.frame(dat_n),
dat_n_1st = data.frame(dat_n_1st),
dat_n_ratio = data.frame(dat_n_ratio),
dat_n_1st_ratio = data.frame(dat_n_1st_ratio),
dat_full = data.frame(dat_full),
dat_full_1st = data.frame(dat_full_1st))
outputDone()
return(dat_return)
}
| 8,837 | gpl-3.0 |
69dc476ae6309c2f6c8cfc9ca510157ed1ba3ad1 | eddelbuettel/nanotime | tests/simpleTests.R |
library(nanotime)
z <- RcppCCTZ:::parseDouble("1970-01-01T00:00:00.000000001+00:00")
cat("z is: ")
print(z)
x <- nanotime("1970-01-01T00:00:00.000000001+00:00")
cat("x is: ")
print(x)
format(x)
cat("x+1 is: ")
x <- x + 1
print(x)
format(x)
cat("y is: ")
y <- nanotime(z)
print(y)
#print(class(y))
format(y)
cat("y+1 is: ")
y <- y + 1
print(y)
format(y)
print(x == y)
od <- getOption("digits.secs")
options("digits.secs"=6)
as.POSIXct(x)
as.POSIXct(x+1000)
as.POSIXlt(x)
as.POSIXlt(x+1000)
as.Date(x)
options("digits.secs"=od)
y <- nanotime(1L) # integer, may dispatch via nanotime.numeric
print(y)
y <- nanotime(1) # numeric
print(y)
## v <- nanotime:::nanotime.default(1) # forced call, gets imprecise value
## print(v)
options("nanotimeFormat"="%Y-%m-%d %H:%M:%S")
format(x <- nanotime("1970-01-01 00:00:00"))
options("nanotimeFormat"="%Y-%m-%d %H:%M:%E*S")
format(x <- nanotime("1970-01-01 00:00:00.123456789"))
options("nanotimeFormat"="%Y-%m-%d %H:%M:%E*S%Ez") # default
cat("Done\n")
| 1,016 | gpl-2.0 |
b10d1a211c120106c80fdad3d262cc257e4753eb | environmentalinformatics-marburg/Rsenal | R/roc.R | #' Calculation of ROC and AUC
#'
#' @param pred An object of type "RasterLayer" containing the probability of each pixel to belong to the class of investigation.
#' @param obs An object of type "RasterLayer" containing 1 and 0 values with 1 indicating that a pixel belongs to the class of investigation.
#' @param mask An object of type "RasterLayer" containing 1 and 0 values with 0 indicating that the pixel cannot be classified into the class of investigation.
#' @param plot Logical. Indicates whether a plot of the ROC curve should be drawn or not
#' @param th The number of thresholds to calculate the ROC-curve
#'
#' @details
#' The probability map is ranked decendingly. For each threshold, the corresponding percentage of pixels in the ranked probability map is then classified as 1, the pixels with lower probability are classified as 0. This classification is then compared to the observed classification and the false positives and true positives are calculated.
#' The AUC is the integral of a natural spline interpolation. An AUC of 0.5 indicates a ROC-curve asssociated with a random classification.
#' This function might be used to compare the performance of different models, assess the threshold-independent performance of a model or to find the best threshold for your model.
#'
#' @return
#' A list with the first element is the calculated AUC and the second element is a matrix containing for each threshold the percentage of pixels classified as 1, the true positive rate and the false positive rate.
#'
#' @author
#' Hanna Meyer
#'
#' @note
#' The number of pixels is divided by the number of thresholds. In case that the number of pixels cannot be equally dived by the number of thresholds, the number of thresholds is adjusted. So don't get confused if the result has one row more or less than the specified number of thresholds.
#'
#' @seealso
#' For further functions related to model validation see \code{\link{ctab}} and \code{\link{kstat}}
#'
#' @examples
#' #### Example 1: Calculate the ROC curve from a model of the growth of the town "Marburg".
#' library(raster)
#' library(rgdal)
#'
#' #load data
#' #Use a probability map assuming high potential for city expansion is just
#' #resulting from proximity to current urban area:
#' pred <- raster(system.file("probability.rst", package="Rsenal"))
#' #observed city growth between 1990 and 2006
#' obs <- raster(system.file("citygrowth.tif", package="Rsenal"))
#' #masking current urban area since these pixels have no potential for change
#' mask <- raster(system.file("citymask.tif", package="Rsenal"))
#'
#' #plot to get an impression:
#' par(mfrow=c(1,3))
#' plot(pred,main="Probability for urban expansion")
#' plot(obs,main="Urban expansion from 1990 to 2006")
#' plot(mask,main="Mask: Urban area 1990")
#'
#' #calculate ROC
#' roc(pred,obs,mask,th=25)
#'
#' @aliases roc
#' @export roc
roc=function(pred,obs,mask=NA,plot=TRUE,th=100) {
if (class(pred)!="RasterLayer"||class(obs)!="RasterLayer"){
stop ("Input data must be of type 'RasterLayer'")
}
if (class(mask)=="RasterLayer"){
pred <- pred*mask
}
va <- values(pred)
vb <- values(obs)
pixel <- length(va)
vasort <- sort(va,decreasing=TRUE,index.return=TRUE)
vbsort <- vb[vasort$ix]
vasort <- vasort$x
th <- th-2 #adjust the number of threshold since 0 and the number of pixels (=max. threshold) are added
result <- matrix(nrow=length(c(0,seq(round(pixel/th,0),pixel,by=round(pixel/th,0)),pixel)),ncol=3)
for (i in c(0,seq(round(pixel/th,0),pixel,by=round(pixel/th,0)),pixel)){
vasort[] <- 0
vasort[1:i] <- 1 #classify the probability map according to current threshold
A <- sum(vasort==1&vbsort==1) #calculate contingency table
B <- sum(vasort==1&vbsort==0)
C <- sum(vasort==0&vbsort==1)
D <- sum(vasort==0&vbsort==0)
#calculate false positive and versus true positive rate
result[which(c(0,seq(round(pixel/th,0),pixel,by=round(pixel/th,0)),pixel)==i),1] <- i/pixel
result[which(c(0,seq(round(pixel/th,0),pixel,by=round(pixel/th,0)),pixel)==i),3] <- A/(A+C)
result[which(c(0,seq(round(pixel/th,0),pixel,by=round(pixel/th,0)),pixel)==i),2] <- B/(B+D)
}
AUC <- integrate(splinefun(result[,2],result[,3],method="natural"),0,1)$value
if (plot==TRUE){
plot(result[,2],result[,3],type="l",xlab="False positive rate",ylab="True positive rate",xlim=c(0,1),ylim=c(0,1))
lines(c(0,1),c(0,1),col="grey50")
legend("topleft",legend=paste("AUC = ",round(AUC,3)),bty="n")
}
colnames(result) <- c("threshold","falsePositives","truePositives")
result2 <- list()
result2[[1]] <- AUC
result2[[2]] <- result
return (result2)
} | 4,668 | gpl-3.0 |
e89793f1135a32879f08c483eb5de406cfc88fc2 | keithschulze/supr | tests/testthat/test-localk-multi.R | context("Local multitype Ripley's K")
test_that("the mean value of cross-type local K values should be the same as Kcross for a given r", {
kc <- spatstat::Kcross(spatstat::amacrine, correction="isotropic", r=c(0, 0.05, 0.1, 0.15))
lkc0.05 <- local_k_cross(spatstat::amacrine, correction="isotropic", rvalue=0.05, verbose=FALSE)
lkc0.1 <- local_k_cross(spatstat::amacrine, correction="isotropic", rvalue=0.1, verbose=FALSE)
lkc0.15 <- local_k_cross(spatstat::amacrine, correction="isotropic", rvalue=0.15, verbose=FALSE)
expect_equal(mean(lkc0.05), kc$iso[2])
expect_equal(mean(lkc0.1), kc$iso[3])
expect_equal(mean(lkc0.15), kc$iso[4])
})
| 656 | mit |
f7b3e2e05f8ab250e5f20a1abb8df06b8c6e0b85 | dcgerard/vicar | tests/testthat/test_ash.R | library(vicar)
context("ASH Wrappers")
test_that("ash_ruv4 works", {
set.seed(21)
n <- 11
p <- 113
k <- 2
q <- 3
pi_vals <- c(0.5, 0.3, 0.2)
sd_seq <- c(0, 1, 2)
X <- cbind(rep(1, n), sample(c(0, 1), size = n, replace = TRUE))
beta <- matrix(NA, nrow = k, ncol = p)
beta[1, ] <- stats::rnorm(p)
beta[2, ] <- rmixnorm(n = p, pi_vals = pi_vals, sd_seq = sd_seq)
alpha <- matrix(stats::rnorm(q * p), ncol = p)
Z <- matrix(stats::rnorm(n * q), nrow = n)
sig_diag <- stats::rchisq(p, 5) / 5
E <- matrix(rnorm(n * p), nrow = n) %*% diag(sqrt(sig_diag))
Y <- X %*% beta + Z %*% alpha + E
which_null <- beta[2, ] == 0
ctl <- which_null
ncontrol <- 31
ctl[ctl][sample(1:sum(ctl), size = sum(ctl) - ncontrol)] <- FALSE
vout <- vruv4(Y = Y, X = X, ctl = ctl, k = q, likelihood = "normal")
vashout <- ash_ruv4(Y = Y, X = X, ctl = ctl, k = q, likelihood = "normal")
expect_equal(vout, vashout$ruv4)
## had a bug where t likelihood and k not specified gave error
vout <- vruv4(Y = Y, X = X, ctl = ctl, likelihood = "t")
vout <- ash_ruv4(Y = Y, X = X, ctl = ctl, likelihood = "t")
}
)
| 1,182 | gpl-3.0 |
ba7f54862c519e204f06e99f3ee8b06c8fff3741 | citiususc/voila | demo/ornstein.R | library("voila")
# simulate Ornstein-Uhlenbeck time series ---------------------------------
h = 0.001
set.seed(1234)
drift = "-x"
diffusion = "sqrt(1.5)"
x = simulate_sde(drift, diffusion, samplingPeriod = 0.001, tsLength = 20000)
plot.ts(x, ylab = "x(t)", xlab = "Time t", main = "Ornstein-Uhlenbeck process")
# do inference ----------------------------------------------------------
m = 10
uncertainty = 5
targetIndex = 1
inputDim = 1
driftLengthScale = 1
diffLengthScale = 1.5
epsilon = 1e-4
relTol = 1e-6
xm = matrix(seq(min(x), max(x), len = m), ncol = 1)
diffParams = select_diffusion_parameters(x, h, priorOnSd = uncertainty)
v = diffParams$v
# create the kernels through the 'new' function: note that the resulting
# objects are C++ pointers
driftKer = new(exp_kernel, inputDim, uncertainty, driftLengthScale, epsilon)
diffKer = new(exp_kernel, inputDim, diffParams$kernelAmplitude, diffLengthScale, epsilon)
# create other kernels using 'sde_kernel' interface. The resulting
# objects hide the use of C++ kernels
driftKer2 = sde_kernel("clamped_exp_lin_kernel",
list('maxAmplitude' = uncertainty,
'linAmplitude' = uncertainty/ 3 / max((x - median(x)) ^ 2),
'linCenter' = median(x),
'lengthScales' = driftLengthScale),
inputDim, epsilon)
diffKer2 = sde_kernel("exp_const_kernel",
list('maxAmplitude' = diffParams$kernelAmplitude,
'expAmplitude' = diffParams$kernelAmplitude * 1e-5,
'lengthScales' = diffLengthScale),
inputDim, epsilon)
# perform the inference using the different kernels
inference1 = sde_vi(targetIndex, x, h, xm, driftKer, diffKer,
v, 10, relTol = relTol)
inference2 = sde_vi(targetIndex, x, h, xm, driftKer2, diffKer2,
v, 10, relTol = relTol)
# check results -----------------------------------------------------------
# check convergence
oldPar = par(mfrow = c(2,1))
plot(inference1$likelihoodLowerBound[-1],
main = "Lower Bound (Estimate 1)", ylab = "L", xlab = "Iteration")
plot(inference2$likelihoodLowerBound[-1],
main = "Lower Bound (Estimate 2)", ylab = "L", xlab = "Iteration")
par(oldPar)
# get predictions for plotting
predictionSupport = matrix(seq(quantile(x,0.05), quantile(x,0.95), len = 100),
ncol = 1)
driftPred = predict(inference1$drift, predictionSupport)
diffPred = predict(inference1$diff, predictionSupport, log = TRUE)
driftPred2 = predict(inference2$drift, predictionSupport)
diffPred2 = predict(inference2$diff, predictionSupport, log = TRUE)
# plot drift
realDrift = eval(parse(text = drift), list(x = predictionSupport))
plot(predictionSupport, realDrift,
ylim = range(c(realDrift, driftPred$qs, driftPred2$qs)), type = "l",
main = "Drift")
lines(driftPred, col = 2, lty = 2)
lines(driftPred2, col = 3, lty = 3)
legend("topright", lty = 1:3, col = 1:3,
legend = c("Real", "Estimate 1", "Estimate 2"), bty = "n")
# plot diff
realDiff = eval(parse(text = diffusion), list(x = predictionSupport)) ^ 2
if (length(realDiff) == 1) {
realDiff = rep(realDiff, length(predictionSupport))
}
plot(predictionSupport, realDiff,
ylim = range(c(realDiff, diffPred$qs, diffPred2$qs)) * c(1, 1.05),
type = "l", main = "Diffusion")
lines(diffPred, col = 2, lty = 2)
lines(diffPred2, col = 3, lty = 3)
legend("topright", lty = 1:3, col = 1:3,
legend = c("Real", "Estimate 1", "Estimate 2"), bty = "n")
| 3,606 | gpl-3.0 |
caf19087e19219c9ae2dc7c2f1242d73fafe9944 | jefferis/nat.examples | 07-insectbraindb/01-download-neurons.R | ## This script assumed that you have run the file "07-insectbraindb/00-setup.R"
## Functions to talk to insectbraindb.org, a site primarily curated by Prof. Stanley Heinze, are wrapped up into the package neuromorphr
## So let's use that
## What neurons does the insectbraindb.org host?
available.neurons = insectbraindb_neuron_info()
## Let's just download all of the neurons in the database to play with,
## there are not very many:
nrow(available.neurons)
## First, we call the read neurons function, with ids set to NULL
insect.neurons = insectbraindb_read_neurons(ids = NULL)
## Hmm, let's see how many neurons we have per species
table(insect.neurons[,"common_name"])
## So, there are a good number of Monarch Butterfly reconstructions,
## maybe let's just have those
butterfly.neurons = subset(insect.neurons, common_name == "Monarch Butterfly")
| 861 | mit |
86432a8c5ab3285ee12fa606aefa1e4a25fa1102 | mmuscarella/StarvationTraits | analyses/tests/GrowthCurveTest.R | ################################################################################
# #
# Test Script for Analysis of Exponential Growth Curve Data #
# This uses ModifiedGomp.R Version 2.0 #
# Written By: Mario Muscarella #
# Last Update: 29 Jan 2015 #
# #
# Use this file to analyze Synergy MX Growth Curve data #
# #
################################################################################
setwd("~/GitHub/StarvationTraits/")
rm(list=ls())
# Inport the function from source file
source("./bin/GrowthCurveInteractiveRegression.R")
source("./bin/ModifiedGomp.R")
# Create Directory For Output
dir.create("./data/GrowthCurves/output", showWarnings = FALSE)
################################################################################
# Example ######################################################################
################################################################################
# Run Example with Test Data
growth.modGomp("./data/GrowthCurves/GrowthCurveExample.txt", "test", skip=31)
| 1,429 | gpl-3.0 |
653696c813f91dc586ae31cdac73bc430ae093c7 | JoshuaSlocum/model-log | R/generate_entry.R | # Header ------------------------------------------------------------------
# Created: 1/25/2016
# Author: Joshua Slocum
# Purpose: Combine results to make entry
#' Combine results from helper functions to create an entry for the model
#'
#' @param object The model object of the model to be logged
#' @return Named list of values to be used as a line entry in the model log.
#' @export
#'
#'
generate_entry <- function(object){
# Time Stamp Entry ----------
# For simplicity, assume the model was fitted when the log was entered
# Precise times are, so far, unimportant for this
log_ts <- as.character(Sys.time())
# Model Info -----------------
model_info <- parse_model(object)
# User Info ------------------
user_info <- get_user()
# Create Output -------------
model_entry <- list(
"model_info" = model_info
,"user_info" = user_info
,"time_entered" = log_ts
)
return(model_entry)
}
| 951 | mit |
3fb1d9c1a0a65557146796daff0ad8e65a5ec253 | gersteinlab/exceRpt | mergePipelineRuns_functions.R | ##########################################################################################
## ##
## Functions to combine pipeline runs for individual samples into something more useful ##
## ##
## Author: Rob Kitchen (r.r.kitchen@gmail.com) ##
## ##
## Version 4.6.3 (2016-10-08) ##
## ##
##########################################################################################
##
## Main function to read and plot exceRpt output in a given directory
##
processSamplesInDir = function(data.dir, output.dir=data.dir, scriptDir="~/Dropbox/Work/YALE/exRNA/exceRpt"){
## Look for samples to merge
printMessage(c("Searching for valid exceRpt pipeline output in ",data.dir))
samplePathList = unique(SearchForSampleData(data.dir,""))
## get sample names and remove duplicates:
sampleIDs = sapply(samplePathList, function(path){ tmp=unlist(strsplit(path,"/")); tmp[length(tmp)] })
samplePathList = samplePathList[!duplicated(sampleIDs)]
## -- Kill script if we do not have any samples to process
NumberOfCompatibleSamples = length(samplePathList)
stopifnot(NumberOfCompatibleSamples > 0)
printMessage(c("Found ",NumberOfCompatibleSamples," valid samples"))
## reads, normalises, and saves individual sample results
sampleIDs = readData(samplePathList, output.dir)
## do we have sample groups?
sampleGroups = data.frame(sampleID=sampleIDs, sampleGroup=rep("noGroup",length(sampleIDs)),stringsAsFactors=F)
if("exceRpt_sampleGroupDefinitions.txt" %in% dir(output.dir)){
## read new groups
groups.tmp = read.table(paste(output.dir,"/exceRpt_sampleGroupDefinitions.txt",sep=""), stringsAsFactors=F,header=T)
## remove samples that are not present in this sample set
groups.tmp = groups.tmp[groups.tmp$sampleID %in% sampleGroups$sampleID, ]
## apply new sample groups to these samples
sampleGroups[match(groups.tmp$sampleID, sampleGroups$sampleID), ]$sampleGroup = groups.tmp$sampleGroup
## write the table back in case there are unassigned / new samples
write.table(sampleGroups, file=paste(output.dir,"/exceRpt_sampleGroupDefinitions.txt",sep=""), sep="\t",row.names=F,col.names=T,quote=F)
}else{
# if not, write a template
write.table(sampleGroups, file=paste(output.dir,"/exceRpt_sampleGroupDefinitions.txt",sep=""), sep="\t",row.names=F,col.names=T,quote=F)
}
## if there's only one sample group, don't bother
if(length(unique(sampleGroups$sampleGroup))==1){
sampleGroups = NA
}
## plot the data
#PlotData(sampleIDs, output.dir, taxonomyPath=paste(scriptDir,"/NCBI_Taxonomy.RData",sep=""), sampleGroups)
PlotData(sampleIDs, output.dir, sampleGroups)
## output warnings
w = warnings()
if(!is.null(w)){
printMessage("Warning messages:")
print(w)
}
}
##
## check dependencies
##
#baseURL = "https://cran.us.r-project.org"
baseURL = "https://cran.r-project.org"
if(!"plyr" %in% rownames(installed.packages())) { install.packages("plyr",repos=baseURL) }
if(!"gplots" %in% rownames(installed.packages())) { install.packages("gplots",repos=baseURL) }
if(!"marray" %in% rownames(installed.packages())) { source("http://bioconductor.org/biocLite.R"); biocLite("marray",ask=F) }
if(!"reshape2" %in% rownames(installed.packages())) { install.packages("reshape2",repos=baseURL) }
if(!"ggplot2" %in% rownames(installed.packages())) { install.packages("ggplot2",repos=baseURL) }
if(!"tools" %in% rownames(installed.packages())) { install.packages("tools",repos=baseURL) }
if(!"Rgraphviz" %in% rownames(installed.packages())) { source("http://bioconductor.org/biocLite.R"); biocLite("Rgraphviz",ask=F) }
if(!"scales" %in% rownames(installed.packages())) { install.packages("scales",repos=baseURL) }
## update
update.packages(repos=baseURL,ask=F)
## load
require(plyr)
require(gplots)
require(marray)
require(reshape2)
require(ggplot2)
require(tools)
require(Rgraphviz)
require(scales)
##
## Function to recursively search a given directory for pipeline output
##
SearchForSampleData = function(base.dir, directory=""){
to.return = NULL
dir.use = paste(base.dir, directory, sep = "/")
subdirs = dir(dir.use)
if(length(subdirs) > 0){
i.stats = grep("\\.stats$", subdirs, perl=T)
i.zip = grep("\\.zip$", subdirs, perl=T)
i.tar = grep("\\.tgz$|\\.tar.gz$", subdirs, perl=T)
## handle decompressed pipeline output
if(length(i.stats) > 0){
tmp = gsub("\\.stats$","",subdirs[i.stats])
to.return = c(to.return, paste(dir.use,tmp,sep="/"))
}
## handle zipped pipeline output
if(length(i.zip) > 0){
for(x in i.zip){
tmp.contents = unzip(paste(dir.use,subdirs[x],sep="/"), list=T)[,1]
if(length(grep("\\.stats$", tmp.contents, perl=T)) > 0){
try(unzip(paste(dir.use,subdirs[x],sep="/"), exdir=gsub("\\.zip","",file_path_as_absolute(paste(dir.use,subdirs[x],sep="/"))), overwrite=FALSE), silent=T)
to.return = c(to.return, paste(dir.use,gsub("\\.zip$","",subdirs[x]),sep="/",gsub("\\.stats$","",tmp.contents[grep("\\.stats$", tmp.contents)])))
}
}
}
## handle [tar] gzipped pipeline output
if(length(i.tar) > 0){
for(x in i.tar){
tmp.dir = paste(dir.use,subdirs[x],sep="/")
tmp.contents = untar(tmp.dir, list=T, tar="tar")
if(length(grep("\\.stats$", tmp.contents, perl=T)) > 0){
try(untar(tmp.dir, exdir=gsub("\\.tgz$|\\.tar.gz$","",file_path_as_absolute(tmp.dir)), tar="tar"), silent=T)
to.return = c(to.return, paste(dir.use, gsub("\\.tgz$|\\.tar.gz$","",subdirs[x]), gsub("\\.stats$","",tmp.contents[grep("\\.stats$", tmp.contents)]),sep="/"))
}
}
}
## handle unknown directories
i.known = c(i.stats,i.zip,i.tar)
if(length(i.known) == 0){ # there are no .stats, .zip, or .tgz/.tar.gz files in the directory!
i.unknown = 1:length(subdirs)
}else{
i.unknown = (1:length(subdirs))[-i.known]
}
if(length(i.unknown) > 0){
for(x in subdirs[i.unknown]){
to.return = c(to.return, SearchForSampleData(dir.use, x))
}
}
return(unique(to.return))
}
}
##
## Prints the given message with a timestamp
##
printMessage = function(message=""){
cat(as.character(Sys.time()),": ",paste(message,sep=""),"\n",sep="")
}
##
## Plots a taxonomy tree with a given set of weights
##
plotTree = function(rEG, taxonomyInfo, counts_uniq, counts_cum, title="", what){
## node parameters
nNodes = length(nodes(rEG))
nA <- list()
nA$shape = rep("circle",nNodes)
nA$fixedSize<-rep(FALSE, nNodes)
nA$height <- nA$width <- rescale(sqrt(counts_cum/10), to=c(0.25,7))
nA$color <- rep(rgb(0,0,0,0.25),nNodes)
nA$style <- rep("bold", nNodes)
if(what == "exogenousRibosomal"){
nA$fillcolor <- sapply(counts_uniq*10, function(val){ if(val>100){val=100}; rgb(100-val,100,100-val,maxColorValue=100)})
}else{
nA$fillcolor <- sapply(counts_uniq*10, function(val){ if(val>100){val=100}; rgb(100-val,100-val,100,maxColorValue=100)})
}
newNodeIDs = sapply(taxonomyInfo[match(as.numeric(nodes(rEG)), taxonomyInfo$ID), ]$name, function(id){ newID=unlist(strsplit(id," ")); if(length(newID) == 1){id}else{paste(newID[1], "\n", paste(newID[-1],collapse=" "), sep="") }})
nA$label <- paste(newNodeIDs,"\n",round(counts_cum*10)/10,"%",sep="")
nA <- lapply(nA, function(x) { names(x) <- nodes(rEG); x})
## edge parameters
eA <- list(arrowsize=rep(0.1,length(names(rEG@edgeData))), arrowhead=rep("none",length(names(rEG@edgeData))))
eA <- lapply(eA, function(x) { names(x) <- names(rEG@edgeData); x})
## layout the graph
tmp = layoutGraph(rEG, nodeAttrs=nA, edgeAttrs=eA)
## hack to make sure the node labels are visible!
sizes = rescale(tmp@renderInfo@nodes$rWidth, to=c(0.2,1.5))
names(sizes) = nodes(rEG)
nodeRenderInfo(tmp) <- list(cex=sizes)
graphRenderInfo(tmp) <- list(main=title)
## plot the graph
renderGraph(tmp)
}
#
# ##
# ##
# ##
# getIntermediateNodeIDs = function(nodeID, edges, intermediates){
# parentID = edges[edges$tax_id == nodeID, ]$parent_tax_id
# if(nodeID == parentID){
# # this is root, add it just in case
# intermediates = c(intermediates, parentID)
# }
#
# if(parentID %in% intermediates | nodeID == parentID){
# return(intermediates)
# }else{
# return(getIntermediateNodeIDs(parentID, edges, c(intermediates, parentID)))
# }
# }
##
## Plot exogenous genomes
##
plotExogenousTaxonomyTrees = function(counts, cumcounts, what, output.dir, taxonomyInfo, fontScale=2, sampleGroups=NA, minPercent=0.5){
# counts = exprs.exogenousGenomes_specific
# cumcounts = exprs.exogenousGenomes_cumulative
# taxonomyInfo = taxonomyInfo.exogenous_genomes
#
# counts = exprs.exogenousRibosomal_specific
# cumcounts = exprs.exogenousRibosomal_cumulative
# taxonomyInfo = taxonomyInfo.exogenous_rRNA
## add direct count to the cumulative counts matrix
cumcounts = cumcounts+counts
#counts.norm = t(t(counts*100)/colSums(counts))
counts.norm = apply(counts, 2, function(col){ col*100/sum(col) })
cumcounts.norm = apply(cumcounts, 2, function(col){ col*100/col[1] })
dim(counts)
## remove nodes with < 0.1% of all reads
#minPercent = 1
keepRows = which(apply(counts.norm, 1, max) >= minPercent)
keepRows = sort(unique(c(keepRows, which(apply(cumcounts.norm, 1, max) >= minPercent))))
# use only paths through the tree that capture above a certain fraction of reads
counts = counts[keepRows, , drop=F]
cumcounts = cumcounts[keepRows, , drop=F]
nrow(counts)
#data_uniq = counts.norm[keepRows, , drop=F]
#data_cum = cumcounts.norm[keepRows, , drop=F]
#nrow(data_cum)
## Re-scale the node percentages after trimming branches to make the numbers make more sense - shouldn't make much diff to the cumcounts
data_uniq = apply(counts, 2, function(col){ col*100/sum(col) })
data_cum = apply(cumcounts, 2, function(col){ col*100/col[1] })
#if("significantDEX" %in% names(combinedSamples)){
# significant = combinedSamples$significantDEX[keepRows]
# foldChange = combinedSamples$foldChange[keepRows]
#}
## remove edges with no useable counts (based on minPercent threshold)
taxonomyInfo = taxonomyInfo[taxonomyInfo$ID %in% rownames(data_cum), ]
## Build the graph object
rEG <<- new("graphNEL", nodes=as.character(taxonomyInfo$ID), edgemode="directed")
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
apply(taxonomyInfo[-1,], 1, function(row){
from = trim(as.character(row[4]));
if(from %in% taxonomyInfo$ID){ rEG <<- addEdge(trim(as.character(row[4])), trim(as.character(row[3])), rEG, 1) }
NULL })
data_uniq = data_uniq[match(taxonomyInfo$ID, rownames(data_uniq)), , drop=F]
data_cum = data_cum[match(taxonomyInfo$ID, rownames(data_cum)), , drop=F]
data_uniq[is.na(data_uniq)] = 0
data_cum[is.na(data_cum)] = 0
##
## Write to PDF
##
## plot an average tree over all samples
printMessage(c("Plotting a taxonomy tree based on the average of all samples "))
pdf(file=paste(output.dir,"/exceRpt_",what,"_TaxonomyTrees_aggregateSamples.pdf",sep=""),height=7,width=15)
plotTree(rEG, taxonomyInfo, apply(data_uniq, 1, max), rowMeans(data_cum), what=what)
dev.off()
## plot samples individually
printMessage(c("Plotting a separate taxonomy tree for each sample"))
pdf(file=paste(output.dir,"/exceRpt_",what,"_TaxonomyTrees_perSample.pdf",sep=""), height=7, width=15)
for(i in 1:ncol(data_uniq))
plotTree(rEG, taxonomyInfo, data_uniq[,i], data_cum[,i], title=paste(colnames(data_uniq)[i]," (total reads: ",cumcounts[1,i],")", sep=""), what=what)
dev.off()
## if there are groups of samples
if(is.data.frame(sampleGroups)){
printMessage(c("Plotting a separate taxonomy tree for each sample-group"))
pdf(file=paste(output.dir,"/exceRpt_",what,"_TaxonomyTrees_perGroup.pdf",sep=""), height=7, width=15)
for(thisgroup in levels(as.factor(sampleGroups$sampleGroup))){
tmpDat_uniq = rowMeans(data_uniq[, match(sampleGroups[sampleGroups$sampleGroup %in% thisgroup, ]$sampleID, colnames(data_uniq)), drop=F])
tmpDat_cum = rowMeans(data_cum[, match(sampleGroups[sampleGroups$sampleGroup %in% thisgroup, ]$sampleID, colnames(data_cum)), drop=F])
plotTree(rEG, taxonomyInfo, tmpDat_uniq, tmpDat_cum, title=paste(thisgroup,sep=""), what=what)
}
dev.off()
}
}
##
##
##
readData = function(samplePathList, output.dir){
##
## Create objects to contain the data
##
sample.data = vector(mode="list",length=length(samplePathList))
allIDs.calibrator = NULL
allIDs.miRNA = NULL
allIDs.tRNA = NULL
allIDs.piRNA = NULL
allIDs.gencode = NULL
allIDs.circularRNA = NULL
allIDs.exogenous_miRNA = NULL
allIDs.exogenous_rRNA = NULL
allIDs.exogenous_genomes = NULL
taxonomyInfo.exogenous_rRNA = NULL
taxonomyInfo.exogenous_genomes = NULL
mapping.stats = matrix(0,nrow=length(samplePathList),ncol=30, dimnames=list(1:length(samplePathList), c("input","successfully_clipped","failed_quality_filter","failed_homopolymer_filter","calibrator","UniVec_contaminants","rRNA","reads_used_for_alignment","genome","miRNA_sense","miRNA_antisense","miRNAprecursor_sense","miRNAprecursor_antisense","tRNA_sense","tRNA_antisense","piRNA_sense","piRNA_antisense","gencode_sense","gencode_antisense","circularRNA_sense","circularRNA_antisense","not_mapped_to_genome_or_libs","repetitiveElements","endogenous_gapped","input_to_exogenous_miRNA","exogenous_miRNA","input_to_exogenous_rRNA","exogenous_rRNA","input_to_exogenous_genomes","exogenous_genomes")))
qc.results = matrix(0,nrow=length(samplePathList),ncol=5, dimnames=list(1:length(samplePathList), c("InputReads","GenomeReads","TranscriptomeReads","TranscriptomeGenomeRatio","TranscriptomeComplexity")))
maxReadLength = 10000
read.lengths = matrix(0,nrow=length(samplePathList),ncol=maxReadLength+1,dimnames=list(1:length(samplePathList), 0:maxReadLength))
##
## Loop through all samples and read the pipeline output
##
printMessage(c("Reading sample data..."))
removeSamples = NULL
for(i in 1:length(samplePathList)){
## Parse the sampleID from the path:
tmp = unlist(strsplit(samplePathList[i], "/"))
thisSampleID = tmp[length(tmp)]
## Get timings and check this sample finished successfully
tmp.stats = read.table(paste(samplePathList[i],".stats",sep=""), stringsAsFactors=F, fill=T, header=F, sep="\t",skip=0,comment.char="")
x.start = grep("#STATS",tmp.stats[,1])
x.end = grep("#END OF STATS",tmp.stats[,1])
if(length(x.start) > 0 && length(x.end) > 0){
tmp.start = strptime(unlist(strsplit(tmp.stats[x.start[1],1],"Run started at "))[2],"%Y-%m-%d--%H:%M:%S")
tmp.end = strptime(unlist(strsplit(tmp.stats[x.end[1],1],"Run completed at "))[2],"%Y-%m-%d--%H:%M:%S")
runTiming = data.frame(start=tmp.start, completed=tmp.end, duration=difftime(tmp.end,tmp.start), duration_secs=as.numeric(difftime(tmp.end,tmp.start,units="secs")))
continue = T
}else{
continue = F
removeSamples = c(removeSamples, i)
printMessage(c("[",i,"/",length(samplePathList),"] WARNING: Incomplete run for sample \'",thisSampleID,"\', ignoring"))
}
if(continue == T){
##
## Read sample mapping stats
##
tmp.stats = read.table(paste(samplePathList[i],".stats",sep=""), stringsAsFactors=F, fill=T, header=T, sep="\t",skip=0)
tmp.stats[tmp.stats[,1] %in% "clipped", 1] = "successfully_clipped"
#mapping.stats[i, match(tmp.stats[,1], colnames(mapping.stats))] = as.numeric(tmp.stats[,2])
mapping.stats[i, match(tmp.stats[,1], colnames(mapping.stats))] = as.numeric(tmp.stats[,2])
rownames(mapping.stats)[i] = thisSampleID
##
## Read the QC result
##
adapterConfidence = NA
qcOutcome = NA
if(file.exists(paste(samplePathList[i],".qcResult",sep=""))){
tmp.qc = read.table(paste(samplePathList[i],".qcResult",sep=""), stringsAsFactors=F, fill=T, header=F, sep=" ",skip=0)
if(tmp.qc[1,1] == "Adapter_confidence:"){
adapterConfidence = tmp.qc[1,2]
tmp.qc = tmp.qc[-1,]
}
qcOutcome = tmp.qc[1,2]
qc.results[i, match(gsub(":$","",tmp.qc[-1,1]), colnames(qc.results))] = as.numeric(tmp.qc[-1,2])
#qc.results[i, ] = as.numeric(tmp.qc[-1,2])
rownames(qc.results)[i] = thisSampleID
}
##
## Read the adapter sequence
##
if(paste(thisSampleID,".adapterSeq",sep="") %in% dir(samplePathList[i])){
tmp.seq = try(read.table(paste(samplePathList[i],"/",thisSampleID,".adapterSeq",sep="")), silent=T)
if(class(tmp.seq) == "try-error"){
adapterSeq = NA
}else{
adapterSeq = as.character(tmp.seq[1,1])
}
}
##
## Read the calibrator counts, if available
##
calibratorCounts = NULL
if(paste(thisSampleID,".clipped.trimmed.filtered.calibratormapped.counts",sep="") %in% dir(samplePathList[i])){
calibratorCounts = try(read.table(paste(samplePathList[i],"/",thisSampleID,".clipped.trimmed.filtered.calibratormapped.counts",sep=""), stringsAsFactors=F)[,2:1], silent=T)
if(class(calibratorCounts) == "try-error"){
calibratorCounts = NULL
}else{
colnames(calibratorCounts) = c("calibratorID","readCount")
}
}
##
## Read the clipped read lengths
##
if(length(grep(".readLengths.txt$", dir(samplePathList[i]))) == 1){
tmp = read.table(paste(samplePathList[i], dir(samplePathList[i])[grep(".readLengths.txt$", dir(samplePathList[i]))], sep="/"))
read.lengths[i, 1:ncol(tmp)] = as.numeric(tmp[2,])
rownames(read.lengths)[i] = thisSampleID
}
##
## Read sample data
##
availableFiles = dir(samplePathList[i])
miRNA_sense=miRNA_antisense = tRNA_sense=tRNA_antisense = piRNA_sense=piRNA_antisense = gencode_sense=gencode_antisense = circRNA_sense=circRNA_antisense = NULL
if("readCounts_miRNAmature_sense.txt" %in% availableFiles){
miRNA_sense = read.table(paste(samplePathList[i],"readCounts_miRNAmature_sense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
miRNA_sense = cbind(miRNA_sense, ID=sapply(rownames(miRNA_sense), function(id){ multiID = unlist(strsplit(id,"\\|")); multiIDs = sapply(multiID, function(idPart){unlist(strsplit(idPart,":"))[1]}); if(length(multiIDs) == 1){ multiIDs }else{ paste(sort(multiIDs),collapse="|") } }))
}
if("readCounts_miRNAmature_antisense.txt" %in% availableFiles){
miRNA_antisense = read.table(paste(samplePathList[i],"readCounts_miRNAmature_antisense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
miRNA_antisense = cbind(miRNA_antisense, ID=sapply(rownames(miRNA_antisense), function(id){ multiID = unlist(strsplit(id,"\\|")); multiIDs = sapply(multiID, function(idPart){unlist(strsplit(idPart,":"))[1]}); if(length(multiIDs) == 1){ multiIDs }else{ paste(sort(multiIDs),collapse="|") } }))
}
if("readCounts_tRNA_sense.txt" %in% availableFiles){
tmp = read.table(paste(samplePathList[i],"readCounts_tRNA_sense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
tmp = cbind(tmp, ID=sapply(rownames(tmp), function(id){ unlist(strsplit(id,"-"))[2] }))
tRNA_sense = ddply(tmp, "ID", function(mat){ c(as.numeric(mat[1,1:2]),sum(mat$multimapAdjustedReadCount),sum(mat$multimapAdjustedBarcodeCount)) })
colnames(tRNA_sense)[-1] = colnames(tmp)[1:4]
tRNA_sense = tRNA_sense[order(tRNA_sense$multimapAdjustedReadCount,decreasing=T), ]
}
if("readCounts_tRNA_antisense.txt" %in% availableFiles){
tmp = read.table(paste(samplePathList[i],"readCounts_tRNA_antisense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
tmp = cbind(tmp, ID=sapply(rownames(tmp), function(id){ unlist(strsplit(id,"-"))[2] }))
tRNA_antisense = ddply(tmp, "ID", function(mat){ c(as.numeric(mat[1,1:2]),sum(mat$multimapAdjustedReadCount),sum(mat$multimapAdjustedBarcodeCount)) })
colnames(tRNA_antisense)[-1] = colnames(tmp)[1:4]
tRNA_antisense = tRNA_antisense[order(tRNA_antisense$multimapAdjustedReadCount,decreasing=T), ]
}
if("readCounts_piRNA_sense.txt" %in% availableFiles){
piRNA_sense = read.table(paste(samplePathList[i],"readCounts_piRNA_sense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
}
if("readCounts_piRNA_antisense.txt" %in% availableFiles){
piRNA_antisense = read.table(paste(samplePathList[i],"readCounts_piRNA_antisense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
}
makeGeneID = function(id){
bits=unlist(strsplit(id,":")); geneNameBits=unlist(strsplit(bits[3],"-"));
geneName = geneNameBits[1]
if(length(geneNameBits) > 2){ geneName=paste(geneNameBits[-length(geneNameBits)],collapse="-") }
paste(geneName,bits[2],sep=":")
}
if("readCounts_gencode_sense.txt" %in% availableFiles){
tmp = read.table(paste(samplePathList[i],"readCounts_gencode_sense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
tmp = cbind(tmp, ID=sapply(rownames(tmp), makeGeneID))
gencode_sense = ddply(tmp, "ID", function(mat){ c(as.numeric(mat[1,1:2]),sum(mat$multimapAdjustedReadCount),sum(mat$multimapAdjustedBarcodeCount)) })
colnames(gencode_sense)[-1] = colnames(tmp)[1:4]
gencode_sense = gencode_sense[order(gencode_sense$multimapAdjustedReadCount,decreasing=T), ]
}
if("readCounts_gencode_antisense.txt" %in% availableFiles){
tmp = read.table(paste(samplePathList[i],"readCounts_gencode_antisense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
tmp = cbind(tmp, ID=sapply(rownames(tmp), makeGeneID))
gencode_antisense = ddply(tmp, "ID", function(mat){ c(as.numeric(mat[1,1:2]),sum(mat$multimapAdjustedReadCount),sum(mat$multimapAdjustedBarcodeCount)) })
colnames(gencode_antisense)[-1] = colnames(tmp)[1:4]
gencode_antisense = gencode_antisense[order(gencode_antisense$multimapAdjustedReadCount,decreasing=T), ]
}
if("readCounts_circRNA_sense.txt" %in% availableFiles){
circRNA_sense = read.table(paste(samplePathList[i],"readCounts_circRNA_sense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
}
if("readCounts_circRNA_antisense.txt" %in% availableFiles){
circRNA_antisense = read.table(paste(samplePathList[i],"readCounts_circRNA_antisense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
}
##
## Read exogenous miRNA alignments (if applicable)
##
exogenous_miRNA_sense = NA
exogenous_miRNA_IDs = NULL
if("EXOGENOUS_miRNA" %in% availableFiles){
tmp.dir = paste(samplePathList[i],"EXOGENOUS_miRNA",sep="/")
if("readCounts_miRNAmature_sense.txt" %in% dir(tmp.dir)){
exogenous_miRNA_sense = read.table(paste(tmp.dir,"readCounts_miRNAmature_sense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
exogenous_miRNA_IDs = rownames(exogenous_miRNA_sense)
}
if("readCounts_miRNAmature_antisense.txt" %in% dir(tmp.dir)){
exogenous_miRNA_antisense = read.table(paste(tmp.dir,"readCounts_miRNAmature_antisense.txt",sep="/"), header=T, sep="\t", comment.char="", stringsAsFactors=F,colClasses=c("character","numeric","numeric","numeric","numeric"), row.names=1)
}
}
##
## Read exogenous rRNA alignments (if applicable)
##
exogenous_rRNA = NA
exogenous_rRNA_IDs = NULL
if("EXOGENOUS_rRNA" %in% availableFiles){
tmp.dir = paste(samplePathList[i],"EXOGENOUS_rRNA",sep="/")
if("ExogenousRibosomalAlignments.result.taxaAnnotated.txt" %in% dir(tmp.dir)){
exogenous_rRNA = try(read.table(paste(tmp.dir,"/ExogenousRibosomalAlignments.result.taxaAnnotated.txt",sep=""), sep="\t", stringsAsFactors = F, quote="", comment.char="",header=T), silent=T)
if(class(exogenous_rRNA) == "try-error"){
exogenous_rRNA = NULL
}#else{
# colnames(exogenous_rRNA) = c("indent","distFromRoot","level","name","uniqueReads","allSumReads")
#}
## remove the 'Bacteria' stick insect!
i.toRemove = which(exogenous_rRNA$name == "Bacteria" & exogenous_rRNA$level == "genus")
if(length(i.toRemove) > 0)
exogenous_rRNA = exogenous_rRNA[-i.toRemove, ]
exogenous_rRNA_IDs = exogenous_rRNA$name
taxonomyInfo.exogenous_rRNA = unique(rbind(taxonomyInfo.exogenous_rRNA, exogenous_rRNA[,1:5]))
}
}
##
## Read exogenous genome alignments (if applicable)
##
exogenous_genomes = NA
exogenous_genomes_IDs = NULL
if("EXOGENOUS_genomes" %in% availableFiles){
tmp.dir = paste(samplePathList[i],"EXOGENOUS_genomes",sep="/")
if("ExogenousGenomicAlignments.result.taxaAnnotated.txt" %in% dir(tmp.dir)){
exogenous_genomes = read.table(paste(tmp.dir,"/ExogenousGenomicAlignments.result.taxaAnnotated.txt",sep=""), sep="\t", stringsAsFactors = F, quote="", comment.char="", header=T)
if(class(exogenous_genomes) == "try-error"){
exogenous_genomes = NULL
}#else{
# colnames(exogenous_genomes) = c("indent","distFromRoot","level","name","uniqueReads","allSumReads")
#}
i.toRemove = which(exogenous_genomes$name == "Bacteria" & exogenous_genomes$level == "genus")
if(length(i.toRemove) > 0)
exogenous_genomes = exogenous_genomes[-i.toRemove, ]
exogenous_genomes_IDs = exogenous_genomes$name
taxonomyInfo.exogenous_genomes = unique(rbind(taxonomyInfo.exogenous_genomes, exogenous_genomes[,1:5]))
}
}
# Update list of detected smallRNA IDs
allIDs.calibrator = unique(c(allIDs.calibrator, as.character(calibratorCounts$calibratorID)))
allIDs.miRNA = unique(c(allIDs.miRNA, as.character(miRNA_sense$ID)))
allIDs.tRNA = unique(c(allIDs.tRNA, as.character(tRNA_sense$ID)))
allIDs.piRNA = unique(c(allIDs.piRNA, rownames(piRNA_sense)))
allIDs.gencode = unique(c(allIDs.gencode, as.character(gencode_sense$ID)))
allIDs.circularRNA = unique(c(allIDs.circularRNA, rownames(circRNA_sense)))
allIDs.exogenous_miRNA = unique(c(allIDs.exogenous_miRNA, exogenous_miRNA_IDs))
allIDs.exogenous_rRNA = unique(c(allIDs.exogenous_rRNA, exogenous_rRNA_IDs))
allIDs.exogenous_genomes = unique(c(allIDs.exogenous_genomes, exogenous_genomes_IDs))
sample.data[[i]] = list("miRNA_sense"=miRNA_sense,"miRNA_antisense"=miRNA_antisense, "tRNA_sense"=tRNA_sense,"tRNA_antisense"=tRNA_antisense, "piRNA_sense"=piRNA_sense,"piRNA_antisense"=piRNA_antisense, "gencode_sense"=gencode_sense,"gencode_antisense"=gencode_antisense, "circRNA_sense"=circRNA_sense,"circRNA_antisense"=circRNA_antisense, "exogenous_miRNA_sense"=exogenous_miRNA_sense, "exogenous_rRNA"=exogenous_rRNA, "exogenous_genomes"=exogenous_genomes, "adapterSeq"=adapterSeq, "adapterConfidence"=adapterConfidence, "qcOutcome"=qcOutcome, "runTiming"=runTiming, "calibratorCounts"=calibratorCounts)
names(sample.data)[i] = thisSampleID
printMessage(c("[",i,"/",length(samplePathList),"] Added sample \'",thisSampleID,"\'"))
}
}
##
## Remove failed/incomplete samples
##
stopifnot(length(removeSamples) < length(sample.data))
if(length(removeSamples) > 0){
read.lengths = read.lengths[-removeSamples, ]
sample.data = sample.data[-removeSamples]
mapping.stats = mapping.stats[-removeSamples,]
qc.results = qc.results[-removeSamples,]
}
##
## Trim read-length matrix
##
read.lengths = read.lengths[,0:(max(as.numeric(colnames(read.lengths[, colSums(read.lengths) > 0, drop=F])))+1), drop=F]
#read.lengths = read.lengths[,colSums(read.lengths) > 0, drop=F]
##
## Collect IDs
##
allIDs = list("calibrator"=allIDs.calibrator, "miRNA_sense"=allIDs.miRNA, "tRNA_sense"=allIDs.tRNA, "piRNA_sense"=allIDs.piRNA, "gencode_sense"=allIDs.gencode, "circRNA_sense"=allIDs.circularRNA, "exogenous_miRNA"=allIDs.exogenous_miRNA, "exogenous_rRNA"=allIDs.exogenous_rRNA, "exogenous_genomes"=allIDs.exogenous_genomes)
##
## Convert sample data to large per-smallRNA expression matrices
##
printMessage("Creating raw read-count matrices for available libraries")
#run.duration = data.frame(runDuration_string=rep("",length(sample.data)), runDuration_secs=rep(0,length(sample.data)),stringsAsFactors = F)
run.duration = data.frame(runDuration_secs=rep(0,length(sample.data)),stringsAsFactors = F)
rownames(run.duration) = names(sample.data)
exprs.calibrator = matrix(0,ncol=length(sample.data),nrow=length(allIDs$calibrator), dimnames=list(allIDs$calibrator, names(sample.data)))
exprs.miRNA = matrix(0,ncol=length(sample.data),nrow=length(allIDs$miRNA_sense), dimnames=list(allIDs$miRNA_sense, names(sample.data)))
exprs.tRNA = matrix(0,ncol=length(sample.data),nrow=length(allIDs$tRNA_sense), dimnames=list(allIDs$tRNA_sense, names(sample.data)))
exprs.piRNA = matrix(0,ncol=length(sample.data),nrow=length(allIDs$piRNA_sense), dimnames=list(allIDs$piRNA_sense, names(sample.data)))
exprs.gencode = matrix(0,ncol=length(sample.data),nrow=length(allIDs$gencode_sense), dimnames=list(allIDs$gencode_sense, names(sample.data)))
exprs.circRNA = matrix(0,ncol=length(sample.data),nrow=length(allIDs$circRNA_sense), dimnames=list(allIDs$circRNA_sense, names(sample.data)))
exprs.exogenous_miRNA = matrix(0,ncol=length(sample.data),nrow=length(allIDs$exogenous_miRNA), dimnames=list(allIDs$exogenous_miRNA, names(sample.data)))
if(is.null(taxonomyInfo.exogenous_rRNA))
tmp.nrow = 0
else
tmp.nrow = nrow(taxonomyInfo.exogenous_rRNA)
exprs.exogenousRibosomal_specific = matrix(0,ncol=length(sample.data),nrow=tmp.nrow, dimnames=list(taxonomyInfo.exogenous_rRNA$ID, names(sample.data)))
exprs.exogenousRibosomal_cumulative = matrix(0,ncol=length(sample.data),nrow=tmp.nrow, dimnames=list(taxonomyInfo.exogenous_rRNA$ID, names(sample.data)))
if(is.null(taxonomyInfo.exogenous_genomes))
tmp.nrow = 0
else
tmp.nrow = nrow(taxonomyInfo.exogenous_genomes)
exprs.exogenousGenomes_specific = matrix(0,ncol=length(sample.data),nrow=tmp.nrow, dimnames=list(taxonomyInfo.exogenous_genomes$ID, names(sample.data)))
exprs.exogenousGenomes_cumulative = matrix(0,ncol=length(sample.data),nrow=tmp.nrow, dimnames=list(taxonomyInfo.exogenous_genomes$ID, names(sample.data)))
for(i in 1:length(sample.data)){
run.duration[i,] = sample.data[[i]]$runTiming[1,4,drop=F]
if(!is.null(nrow(sample.data[[i]]$calibratorCounts)))
exprs.calibrator[match(sample.data[[i]]$calibratorCounts$calibratorID, rownames(exprs.calibrator)),i] = as.numeric(sample.data[[i]]$calibratorCounts$readCount)
exprs.miRNA[match(sample.data[[i]]$miRNA_sense$ID, rownames(exprs.miRNA)),i] = as.numeric(sample.data[[i]]$miRNA_sense$multimapAdjustedReadCount)
exprs.tRNA[match(sample.data[[i]]$tRNA_sense$ID, rownames(exprs.tRNA)),i] = as.numeric(sample.data[[i]]$tRNA_sense$multimapAdjustedReadCount)
exprs.piRNA[match(rownames(sample.data[[i]]$piRNA_sense), rownames(exprs.piRNA)),i] = as.numeric(sample.data[[i]]$piRNA_sense$multimapAdjustedReadCount)
exprs.gencode[match(sample.data[[i]]$gencode_sense$ID, rownames(exprs.gencode)),i] = as.numeric(sample.data[[i]]$gencode_sense$multimapAdjustedReadCount)
exprs.circRNA[match(rownames(sample.data[[i]]$circRNA_sense), rownames(exprs.circRNA)),i] = as.numeric(sample.data[[i]]$circRNA_sense$multimapAdjustedReadCount)
## Exogenous miRNA
if(!is.null(nrow(sample.data[[i]]$exogenous_miRNA)))
exprs.exogenous_miRNA[match(rownames(sample.data[[i]]$exogenous_miRNA), rownames(exprs.exogenous_miRNA)),i] = as.numeric(sample.data[[i]]$exogenous_miRNA$multimapAdjustedReadCount)
## Exogenous rRNA
if(!is.null(nrow(sample.data[[i]]$exogenous_rRNA))){
exprs.exogenousRibosomal_specific[match(sample.data[[i]]$exogenous_rRNA$ID, rownames(exprs.exogenousRibosomal_specific)),i] = as.numeric(sample.data[[i]]$exogenous_rRNA$readCount_direct)
exprs.exogenousRibosomal_cumulative[match(sample.data[[i]]$exogenous_rRNA$ID, rownames(exprs.exogenousRibosomal_cumulative)),i] = as.numeric(sample.data[[i]]$exogenous_rRNA$readCount_inherited)
}
## Exogenous Genomes
if(!is.null(nrow(sample.data[[i]]$exogenous_genomes))){
exprs.exogenousGenomes_specific[match(sample.data[[i]]$exogenous_genomes$ID, rownames(exprs.exogenousGenomes_specific)),i] = as.numeric(sample.data[[i]]$exogenous_genomes$readCount_direct)
exprs.exogenousGenomes_cumulative[match(sample.data[[i]]$exogenous_genomes$ID, rownames(exprs.exogenousGenomes_cumulative)),i] = as.numeric(sample.data[[i]]$exogenous_genomes$readCount_inherited)
}
}
##
## Calculate the total number of mapped reads to the rRNA, genome, and exogenous sequences
##
mapping.stats[is.na(mapping.stats)] = 0
mapping.stats = as.data.frame(mapping.stats)
libSizes = list()
libSizes$input = mapping.stats[,colnames(mapping.stats) %in% c("input")]
libSizes$successfully_clipped = mapping.stats[,colnames(mapping.stats) %in% c("successfully_clipped")]
libSizes$reads_used_for_alignment = mapping.stats[,colnames(mapping.stats) %in% c("reads_used_for_alignment")]
libSizes$all = rowSums(mapping.stats[,colnames(mapping.stats) %in% c("rRNA","genome","miRNA_exogenous_sense")])
libSizes$endogenous = rowSums(mapping.stats[,colnames(mapping.stats) %in% c("rRNA","genome")])
libSizes$genome = mapping.stats[,colnames(mapping.stats) %in% "genome"]
libSizes$smRNA = mapping.stats[,grep("sense",colnames(mapping.stats))]
libSizes$miRNA = colSums(exprs.miRNA)
libSizes$exogenous_miRNA = colSums(exprs.exogenous_miRNA)
libSizes$exogenous_rRNA = exprs.exogenousRibosomal_cumulative[rownames(exprs.exogenousRibosomal_cumulative)=="1",]
libSizes$exogenous_genomes = exprs.exogenousGenomes_cumulative[rownames(exprs.exogenousGenomes_cumulative)=="1",]
##
## Save the raw count data
##
printMessage("Saving raw data to disk")
#save(exprs.miRNA, exprs.tRNA, exprs.piRNA, exprs.gencode, exprs.circRNA, exprs.exogenous_miRNA, exprs.exogenous_genomes, mapping.stats, libSizes, read.lengths, file=paste(output.dir, "exceRpt_smallRNAQuants_ReadCounts.RData", sep="/"))
save(exprs.miRNA, exprs.tRNA, exprs.piRNA, exprs.gencode, exprs.circRNA, exprs.exogenous_miRNA, exprs.exogenousRibosomal_specific, exprs.exogenousRibosomal_cumulative, taxonomyInfo.exogenous_rRNA, exprs.exogenousGenomes_specific, exprs.exogenousGenomes_cumulative, taxonomyInfo.exogenous_genomes, mapping.stats, qc.results, libSizes, read.lengths, run.duration, exprs.calibrator, file=paste(output.dir, "exceRpt_smallRNAQuants_ReadCounts.RData", sep="/"))
if(nrow(exprs.calibrator) > 0)
write.table(exprs.calibrator, file=paste(output.dir, "exceRpt_CALIBRATOR_ReadCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.miRNA) > 0)
write.table(exprs.miRNA, file=paste(output.dir, "exceRpt_miRNA_ReadCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.tRNA) > 0)
write.table(exprs.tRNA, file=paste(output.dir, "exceRpt_tRNA_ReadCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.piRNA) > 0)
write.table(exprs.piRNA, file=paste(output.dir, "exceRpt_piRNA_ReadCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.gencode) > 0)
write.table(exprs.gencode, file=paste(output.dir, "exceRpt_gencode_ReadCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.circRNA) > 0)
write.table(exprs.circRNA, file=paste(output.dir, "exceRpt_circularRNA_ReadCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.exogenous_miRNA) > 0)
write.table(exprs.exogenous_miRNA, file=paste(output.dir, "exceRpt_exogenous_miRNA_ReadCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.exogenousRibosomal_specific) > 0){
tmp = cbind(taxonomyInfo.exogenous_rRNA[match(rownames(exprs.exogenousRibosomal_specific), taxonomyInfo.exogenous_rRNA$ID), ], exprs.exogenousRibosomal_specific)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousRibosomal_taxonomySpecific_ReadCounts.txt", sep="/"), sep="\t", row.names=F, quote=F)
tmp = cbind(taxonomyInfo.exogenous_rRNA[match(rownames(exprs.exogenousRibosomal_cumulative), taxonomyInfo.exogenous_rRNA$ID), ], exprs.exogenousRibosomal_cumulative)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousRibosomal_taxonomyCumulative_ReadCounts.txt", sep="/"), sep="\t", row.names=F, quote=F)
}
if(nrow(exprs.exogenousGenomes_specific) > 0){
tmp = cbind(taxonomyInfo.exogenous_genomes[match(rownames(exprs.exogenousGenomes_specific), taxonomyInfo.exogenous_genomes$ID), ], exprs.exogenousGenomes_specific)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousGenomes_taxonomySpecific_ReadCounts.txt", sep="/"), sep="\t", row.names=F, quote=F)
tmp = cbind(taxonomyInfo.exogenous_genomes[match(rownames(exprs.exogenousGenomes_cumulative), taxonomyInfo.exogenous_genomes$ID), ], exprs.exogenousGenomes_cumulative)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousGenomes_taxonomyCumulative_ReadCounts.txt", sep="/"), sep="\t", row.names=F, quote=F)
}
write.table(read.lengths, file=paste(output.dir, "exceRpt_ReadLengths.txt", sep="/"), sep="\t", col.names=NA, quote=F)
##
## Keep a record of the adapter sequences for QC
##
adapterSeq = unlist(lapply(sample.data, function(l){ l$adapterSeq }))
write.table(as.data.frame(adapterSeq), file=paste(output.dir, "exceRpt_adapterSequences.txt", sep="/"), sep="\t", col.names=NA, quote=F)
##
## Write the numbers of reads mapping at each stage and the QC results
##
write.table(mapping.stats, file=paste(output.dir,"exceRpt_readMappingSummary.txt",sep="/"), sep="\t", col.names=NA, quote=F)
write.table(qc.results, file=paste(output.dir,"exceRpt_QCresults.txt",sep="/"), sep="\t", col.names=NA, quote=F)
##
## Calculate reads per million (RPM)
##
printMessage("Normalising to RPM")
#libSize.use = libSizes$all
#libSize.use = libSizes$miRNA
libSize.use = libSizes$genome
exprs.miRNA.rpm = t(10^6 * t(exprs.miRNA) / libSize.use)
exprs.tRNA.rpm = t(10^6 * t(exprs.tRNA) / libSize.use)
exprs.piRNA.rpm = t(10^6 * t(exprs.piRNA) / libSize.use)
exprs.gencode.rpm = t(10^6 * t(exprs.gencode) / libSize.use)
exprs.circRNA.rpm = t(10^6 * t(exprs.circRNA) / libSize.use)
exprs.exogenous_miRNA.rpm = t(10^6 * t(exprs.exogenous_miRNA) / libSizes$exogenous_miRNA)
exprs.exogenousRibosomal_specific.rpm = exprs.exogenousRibosomal_specific
exprs.exogenousRibosomal_cumulative.rpm = exprs.exogenousRibosomal_cumulative
if(nrow(exprs.exogenousRibosomal_specific) > 0){
exprs.exogenousRibosomal_specific.rpm = t(10^6 * t(exprs.exogenousRibosomal_specific) / libSizes$exogenous_rRNA)
exprs.exogenousRibosomal_cumulative.rpm = t(10^6 * t(exprs.exogenousRibosomal_cumulative) / libSizes$exogenous_rRNA)
}
exprs.exogenousGenomes_specific.rpm = exprs.exogenousGenomes_specific
exprs.exogenousGenomes_cumulative.rpm = exprs.exogenousGenomes_cumulative
if(nrow(exprs.exogenousGenomes_specific) > 0){
exprs.exogenousGenomes_specific.rpm = t(10^6 * t(exprs.exogenousGenomes_specific) / libSizes$exogenous_genomes)
exprs.exogenousGenomes_cumulative.rpm = t(10^6 * t(exprs.exogenousGenomes_cumulative) / libSizes$exogenous_genomes)
}
##
## Save the RPM normalised data
##
printMessage("Saving normalised data to disk")
save(exprs.miRNA.rpm, exprs.tRNA.rpm, exprs.piRNA.rpm, exprs.gencode.rpm, exprs.circRNA.rpm, exprs.exogenous_miRNA.rpm, exprs.exogenousRibosomal_specific.rpm, exprs.exogenousRibosomal_cumulative.rpm, exprs.exogenousGenomes_specific.rpm, exprs.exogenousGenomes_cumulative.rpm, file=paste(output.dir, "exceRpt_smallRNAQuants_ReadsPerMillion.RData", sep="/"))
if(nrow(exprs.miRNA.rpm) > 0)
write.table(exprs.miRNA.rpm, file=paste(output.dir, "exceRpt_miRNA_ReadsPerMillion.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.tRNA.rpm) > 0)
write.table(exprs.tRNA.rpm, file=paste(output.dir, "exceRpt_tRNA_ReadsPerMillion.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.piRNA.rpm) > 0)
write.table(exprs.piRNA.rpm, file=paste(output.dir, "exceRpt_piRNA_ReadsPerMillion.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.gencode.rpm) > 0)
write.table(exprs.gencode.rpm, file=paste(output.dir, "exceRpt_gencode_ReadsPerMillion.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.circRNA.rpm) > 0)
write.table(exprs.circRNA.rpm, file=paste(output.dir, "exceRpt_circularRNA_ReadsPerMillion.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.exogenous_miRNA.rpm) > 0)
write.table(exprs.exogenous_miRNA.rpm, file=paste(output.dir, "exceRpt_exogenous_miRNA_ReadsPerMillion.txt", sep="/"), sep="\t", col.names=NA, quote=F)
if(nrow(exprs.exogenousRibosomal_specific) > 0){
tmp = cbind(taxonomyInfo.exogenous_rRNA[match(rownames(exprs.exogenousRibosomal_specific.rpm), taxonomyInfo.exogenous_rRNA$ID), ], exprs.exogenousRibosomal_specific.rpm)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousRibosomal_taxonomySpecific_ReadsPerMillion.txt", sep="/"), sep="\t", row.names=F, quote=F)
tmp = cbind(taxonomyInfo.exogenous_rRNA[match(rownames(exprs.exogenousRibosomal_cumulative.rpm), taxonomyInfo.exogenous_rRNA$ID), ], exprs.exogenousRibosomal_cumulative.rpm)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousRibosomal_taxonomyCumulative_ReadsPerMillion.txt", sep="/"), sep="\t", row.names=F, quote=F)
}
if(nrow(exprs.exogenousGenomes_specific) > 0){
tmp = cbind(taxonomyInfo.exogenous_genomes[match(rownames(exprs.exogenousGenomes_specific.rpm), taxonomyInfo.exogenous_genomes$ID), ], exprs.exogenousGenomes_specific.rpm)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousGenomes_taxonomySpecific_ReadsPerMillion.txt", sep="/"), sep="\t", row.names=F, quote=F)
tmp = cbind(taxonomyInfo.exogenous_genomes[match(rownames(exprs.exogenousGenomes_cumulative.rpm), taxonomyInfo.exogenous_genomes$ID), ], exprs.exogenousGenomes_cumulative.rpm)
write.table(tmp, file=paste(output.dir, "exceRpt_exogenousGenomes_taxonomyCumulative_ReadsPerMillion.txt", sep="/"), sep="\t", row.names=F, quote=F)
}
return(rownames(mapping.stats))
}
##
##
##
PlotData = function(sampleIDs, output.dir, sampleGroups=NA, minPercent_exogenousRibosomal=0.5, minPercent_exogenousGenomes=0.5){
load(paste(output.dir, "exceRpt_smallRNAQuants_ReadCounts.RData", sep="/"))
load(paste(output.dir, "exceRpt_smallRNAQuants_ReadsPerMillion.RData", sep="/"))
##
## Order samples based on similarity of mapping statistics
##
sampleOrder = 1
if(nrow(mapping.stats) > 1){
h = hclust(dist(1-cor(t(mapping.stats))))
sampleOrder = h$order
}
##
## Open PDF for diagnostic plots
##
printMessage("Creating QC plots")
pdf(paste(output.dir,"exceRpt_DiagnosticPlots.pdf",sep="/"), height=10, width=20)
#tiff(paste(output.dir,"DiagnosticPlots.tiff",sep="/"))
if(ncol(read.lengths) > 1){
printMessage("Plotting read-length distributions")
##
## plot distribution of clipped read lengths - read count
##
tmp = melt(read.lengths); colnames(tmp) = c("sample","length","count")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sample, sampleGroups$sampleID), 2] }
maxX = min(c(100,max(tmp$length)))
p = ggplot(tmp, aes(x=length, y=count, colour=sample)) +geom_line(alpha=0.75) +xlab("read length (nt)") +ylab("# reads") +ggtitle("read-length distributions: raw read count") +scale_x_continuous(limits=c(15,maxX), minor_breaks=1:maxX, breaks=seq(15,maxX,by=5))
if(nrow(read.lengths) > 30){ p = p +guides(colour=FALSE) }
if(is.data.frame(sampleGroups)){ p = p +facet_wrap(~sampleGroup,ncol=1)}
print(p)
#ggplot(tmp, aes(x=as.factor(length), y=count)) +geom_violin()
#ggplot(tmp, aes(x=as.factor(length), y=count)) +geom_boxplot()
##
## plot distribution of clipped read lengths - fraction
##
tmp = melt(t(apply(read.lengths, 1, function(row){ row/sum(row) }))); colnames(tmp) = c("sample","length","fraction")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sample, sampleGroups$sampleID), 2] }
maxX = min(c(100,max(tmp$length)))
p = ggplot(tmp, aes(x=length, y=fraction, colour=sample)) +geom_line(alpha=0.75) +xlab("read length (nt)") +ylab("fraction of reads") +ggtitle("read-length distributions: normalised read fraction") +scale_x_continuous(limits=c(15,maxX), minor_breaks=1:maxX, breaks=seq(15,maxX,by=5))
if(nrow(read.lengths) > 30){ p = p +guides(colour=FALSE) }
if(is.data.frame(sampleGroups)){ p = p +facet_wrap(~sampleGroup,ncol=1)}
print(p)
}
##
## Plot run duration of each sample
##
printMessage("Plotting run-duration")
tmp=melt(as.matrix(run.duration))
colnames(tmp) = c("sampleID","stuff","runDuration_seconds")
tmp = cbind(tmp, category=.bincode(tmp[,3], breaks=c(0,as.numeric(quantile(tmp[,3],probs=c(0.10,0.90,1))))))
tmp = cbind(tmp, colour=tmp$category)
tmp = cbind(tmp, inputReadCount=mapping.stats$input)
tmp$category[tmp$category == 1] = "fast"
tmp$category[tmp$category == 2] = "normal"
tmp$category[tmp$category == 3] = "slow"
tmp$colour[tmp$colour == 1] = "red"
tmp$colour[tmp$colour == 2] = "green"
tmp$colour[tmp$colour == 3] = "blue"
tmp$runDuration_minutes = tmp$runDuration_seconds/60
tmp$runDuration_hours = tmp$runDuration_minutes/60
p = ggplot(tmp, aes(x=sampleID,y=runDuration_hours,fill=colour)) +geom_bar(stat="identity") +facet_grid(~category,scales="free_x",space="free_x") +guides(fill=FALSE) +theme(axis.text.x=element_text(angle=60, hjust=1.0, vjust=1)) +ggtitle("Duration of exceRpt run for each sample") +ylab("Run duration (hours)")
print(p)
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sampleID, sampleGroups$sampleID), 2] }
p = ggplot(tmp, aes(x=inputReadCount,y=runDuration_hours,colour=colour)) +geom_point(size=5) +guides(colour=FALSE) +scale_y_log10(limits=c(0.1,10^ceiling(log10(max(tmp$runDuration_hours)))), breaks=c(0.1,1,10^seq(0:ceiling(log10(max(tmp$runDuration_hours)))))) +scale_x_log10(limits=c(min(c(100000,10^floor(log10(min(tmp$inputReadCount+1))))),10^ceiling(log10(max(tmp$inputReadCount)))), breaks=10^seq(min(c(100000,floor(log10(min(tmp$inputReadCount+1))))),ceiling(log10(max(tmp$inputReadCount))))) +ggtitle("Duration of exceRpt run per sequencing yield") +ylab("Run duration (hours)") +xlab("Total number of reads input")
#if(is.data.frame(sampleGroups)){ p = p +facet_wrap(~sampleGroup,ncol=1)}
print(p)
##
## plot distribution of # mapped reads per sample
##
printMessage("Plotting # mapped reads")
tmp = log10(libSizes$all)
hist(tmp, breaks=seq(0,ceiling(max(tmp)), by=0.1), col="grey", border="white", xlab="log10(# mapped reads)", main="Library size (all mapped reads)", ylab="# samples")
##
## Plot the rRNA contamination
##
#par(mfrow=c(1,2))
#hist((mapping.stats$UniVec_contaminants / libSizes$all), breaks=seq(0,1,by=0.05), col="grey", border="white", xlim=c(0,1), main="UniVec contaminant signal",xlab="fraction contaminant reads",ylab="# samples")
#hist((mapping.stats$rRNA / libSizes$all), breaks=seq(0,1,by=0.05), col="grey", border="white", xlim=c(0,1), main="rRNA signal",xlab="fraction rRNA reads",ylab="# samples")
mapping.stats.orig = mapping.stats
mapping.stats = mapping.stats[,-grep("input_to_",colnames(mapping.stats))]
## remove the exogenous stuff from the stats if this wasn't used in the run
if(sum(mapping.stats[,23:27]) == 0)
mapping.stats = mapping.stats[, -c(23:27)]
##
## Plot heatmap of mapping percentages through the pipeline
##
printMessage("Plotting mapping stats heatmap (1/3)")
toplot = melt(as.matrix(mapping.stats / mapping.stats$input)); colnames(toplot) = c("Sample","Stage","ReadFraction")
toplot$Stage = with(toplot, factor(Stage, levels = rev(levels(Stage))))
toplot$Sample = factor(as.character(toplot$Sample), levels=rownames(mapping.stats)[sampleOrder])
if(is.data.frame(sampleGroups)){ toplot$sampleGroup = sampleGroups[match(toplot$Sample, sampleGroups$sampleID), 2] }
p = ggplot(toplot, aes(x=Sample, y=Stage, group=Sample, fill=ReadFraction, label=sprintf("%1.1f%%",ReadFraction*100))) +geom_tile() +scale_fill_gradient2(low="white",high="yellow",mid="steelblue", midpoint=0.5) +theme(axis.text.x=element_text(angle=40, hjust=1.0, vjust=1)) +ggtitle("fraction aligned reads (normalised by # input reads)")
if(nrow(mapping.stats) < 50){ p = p +geom_text(size=3) }
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x",space="free_x")}
print(p)
##
## Plot heatmap of mapping percentages through the pipeline
##
printMessage("Plotting mapping stats heatmap (2/3)")
if(max(mapping.stats$successfully_clipped) > 0){
tmp = mapping.stats
i.toFix = which(tmp$successfully_clipped == 0)
if(length(i.toFix) > 0)
tmp$successfully_clipped[i.toFix] = tmp$input[i.toFix]
toplot = melt(as.matrix(tmp / tmp$successfully_clipped)[,-1,drop=F]); colnames(toplot) = c("Sample","Stage","ReadFraction")
toplot$Stage = with(toplot, factor(Stage, levels = rev(levels(Stage))))
toplot$Sample = factor(as.character(toplot$Sample), levels=rownames(mapping.stats)[sampleOrder])
if(is.data.frame(sampleGroups)){ toplot$sampleGroup = sampleGroups[match(toplot$Sample, sampleGroups$sampleID), 2] }
p = ggplot(toplot, aes(x=Sample, y=Stage, group=Sample, fill=ReadFraction, label=sprintf("%1.1f%%",ReadFraction*100))) +geom_tile() +scale_fill_gradient2(low="white",high="yellow",mid="steelblue", midpoint=0.5) +theme(axis.text.x=element_text(angle=40, hjust=1.0, vjust=1)) +ggtitle("fraction aligned reads (normalised by # adapter-clipped reads)")
if(nrow(mapping.stats) < 50){ p = p +geom_text(size=3) }
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x",space="free_x")}
print(p)
}
##
## Plot heatmap of mapping percentages through the pipeline
##
printMessage("Plotting mapping stats heatmap (3/3)")
toplot = melt(as.matrix(mapping.stats / mapping.stats$reads_used_for_alignment)[,-c(1:7),drop=F]); colnames(toplot) = c("Sample","Stage","ReadFraction")
toplot$Stage = with(toplot, factor(Stage, levels = rev(levels(Stage))))
toplot$Sample = factor(as.character(toplot$Sample), levels=rownames(mapping.stats)[sampleOrder])
if(is.data.frame(sampleGroups)){ toplot$sampleGroup = sampleGroups[match(toplot$Sample, sampleGroups$sampleID), 2] }
p = ggplot(toplot, aes(x=Sample, y=Stage, group=Sample, fill=ReadFraction, label=sprintf("%1.1f%%",ReadFraction*100))) +geom_tile() +scale_fill_gradient2(low="white",high="yellow",mid="steelblue", midpoint=0.5) +theme(axis.text.x=element_text(angle=40, hjust=1.0, vjust=1)) +ggtitle("fraction aligned reads (normalised by # non-contaminant reads)")
if(nrow(mapping.stats) < 50){ p = p +geom_text(size=3) }
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x",space="free_x")}
print(p)
##
## Plot QC results
##
printMessage("Plotting QC result")
toplot = as.data.frame(qc.results)
toplot$Sample = factor(rownames(toplot), levels=rownames(mapping.stats)[sampleOrder])
p = ggplot(toplot, aes(x=TranscriptomeReads, y=TranscriptomeGenomeRatio))
if(is.data.frame(sampleGroups)){
toplot$sampleGroup = sampleGroups[match(toplot$Sample, sampleGroups$sampleID), 2]
p = ggplot(toplot, aes(x=TranscriptomeReads, y=TranscriptomeGenomeRatio, colour=sampleGroup))
}
minX = floor(log10(min(toplot$TranscriptomeReads)+0.001))
maxX = ceiling(log10(max(toplot$TranscriptomeReads)+0.001))
p = p +scale_x_log10(breaks=10^c(minX:maxX)) +coord_cartesian(xlim=c(10^(minX),10^(maxX)),ylim=c(0,1)) +geom_vline(xintercept=100000,col="red",alpha=0.5) +geom_hline(yintercept=0.5,col="red",alpha=0.5) +annotate("rect",xmin=0,xmax=Inf,ymin=-1,ymax=0.5,alpha=0.2,fill="red") +annotate("rect",xmin=0,xmax=100000,ymin=-1,ymax=1.1,alpha=0.2,fill="red") +ylab("# transcriptome reads / # genome reads") +xlab("# transcriptome reads (log10)") +ggtitle("QC result: overall")
print(p +geom_point(size=4) )
if(is.data.frame(sampleGroups)){ print(p +facet_wrap(~sampleGroup) +theme(legend.position="none") +geom_point(size=2)) }
##
## Heatmap
##
qc.results[,4] = round(qc.results[,4]*100)/100
qc.results[,5] = round(qc.results[,5]*1000)/1000
tmp.mat = qc.results
tmp.mat[,1] = rep(1,nrow(tmp.mat))
tmp.mat[,2] = rep(1,nrow(tmp.mat))
tmp.mat[,5] = rep(1,nrow(tmp.mat))
tmp.pass=tmp.mat[,3] >= 100000; tmp.mat[tmp.pass,3] = "pass"; tmp.mat[!tmp.pass,3] = "fail"
tmp.pass=tmp.mat[,4] >= 0.5; tmp.mat[tmp.pass,4] = "pass"; tmp.mat[!tmp.pass,4] = "fail"
toplot=cbind(melt(tmp.mat), Actual=melt(qc.results)[,3]); colnames(toplot)[1:3]=c("Sample","Stage","Value")
#toplot$Stage = with(toplot, factor(Stage, levels = rev(levels(Stage))))
toplot$Sample = factor(as.character(toplot$Sample), levels=rownames(mapping.stats)[sampleOrder])
if(is.data.frame(sampleGroups)){ toplot$sampleGroup = sampleGroups[match(toplot$Sample, sampleGroups$sampleID), 2] }
p = ggplot(toplot, aes(y=Sample, x=Stage, fill=Value, label=Actual)) +scale_fill_manual(values=c("fail"="red","pass"="palegreen","1"="lightgrey")) +geom_label() +theme(plot.background=element_rect(fill="white"),panel.background=element_rect(fill=rgb(0.97,0.97,0.97)), axis.text.x=element_text(angle=20, hjust=1, vjust=1), legend.position="none") +ggtitle("QC result: per-sample results") +xlab("") +ylab("")
#if(is.data.frame(sampleGroups)){ p = p +facet_wrap(~sampleGroup, scales="free_y", ncol=1)}
print(p)
##
## Plot breakdown of counts in each biotype
##
printMessage("Plotting biotype counts")
require(plyr)
sampleTotals=matrix(NA,ncol=nrow(mapping.stats),nrow=0); colnames(sampleTotals) = rownames(mapping.stats)
if(nrow(exprs.miRNA) > 0){
sampleTotals = rbind(sampleTotals, colSums(exprs.miRNA))
rownames(sampleTotals)[nrow(sampleTotals)] = "miRNA"
}
if(nrow(exprs.tRNA) > 0){
sampleTotals = rbind(sampleTotals, colSums(exprs.tRNA))
rownames(sampleTotals)[nrow(sampleTotals)] = "tRNA"
}
if(nrow(exprs.piRNA) > 0){
sampleTotals = rbind(sampleTotals, colSums(exprs.piRNA))
rownames(sampleTotals)[nrow(sampleTotals)] = "piRNA"
}
if(nrow(exprs.gencode) > 0){
tmp = data.frame(biotype=sapply(rownames(exprs.gencode), function(id){bits=unlist(strsplit(id,":")); bits[length(bits)]}), exprs.gencode)
tmp = ddply(tmp, "biotype", function(mat){ colSums(mat[,-1,drop=F]) })
rownames(tmp) = tmp[,1]; tmp = tmp[,-1,drop=F]
colnames(tmp) = colnames(sampleTotals)
## add gencode miRNA to the existing count...
if("miRNA" %in% rownames(tmp) && "miRNA" %in% rownames(sampleTotals)){
i = which(rownames(tmp) %in% "miRNA")
j = which(rownames(sampleTotals) %in% "miRNA")
for(x in 1:ncol(sampleTotals)){
sampleTotals[j,x] = sampleTotals[j,x] + tmp[i,x]
}
tmp = tmp[-i,,drop=F]
}
sampleTotals = rbind(sampleTotals, tmp)
}
if(nrow(exprs.circRNA) > 0){
sampleTotals = rbind(sampleTotals, colSums(exprs.circRNA))
rownames(sampleTotals)[nrow(sampleTotals)] = "circularRNA"
}
sampleTotals = rbind(sampleTotals, mapping.stats$exogenous_miRNA)
rownames(sampleTotals)[nrow(sampleTotals)] = "exogenous_miRNA"
sampleTotals = rbind(sampleTotals, mapping.stats$exogenous_rRNA)
rownames(sampleTotals)[nrow(sampleTotals)] = "exogenous_rRNA"
sampleTotals = rbind(sampleTotals, mapping.stats$exogenous_genomes)
rownames(sampleTotals)[nrow(sampleTotals)] = "exogenous_genomes"
sampleTotals = sampleTotals[order(apply(sampleTotals, 1, median, na.rm=T), decreasing=F), ,drop=F]
tmp = melt(as.matrix(sampleTotals))
colnames(tmp) = c("biotype","sampleID","readCount")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sampleID, sampleGroups$sampleID), 2] }
p = ggplot(na.omit(tmp), aes(y=readCount,x=biotype, colour=biotype)) +geom_hline(aes(yintercept=1),linetype="dashed") +geom_boxplot() +scale_y_log10(breaks=c(0.01,0.1,1,10,100,1000,10000,100000,1000000,10000000,100000000)) +guides(colour=FALSE) +coord_flip() +ggtitle("Biotypes: distributions, raw read-counts")
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x")}
print(p)
## save the biotype counts
write.table(sampleTotals[order(apply(sampleTotals, 1, median, na.rm=T), decreasing=T), ,drop=F], file=paste(output.dir, "exceRpt_biotypeCounts.txt", sep="/"), sep="\t", col.names=NA, quote=F)
## plot biotype breakdown as RPM:
tmp = melt(as.matrix(apply(sampleTotals, 2, function(col){ col*1000000/sum(col) })))
colnames(tmp) = c("biotype","sampleID","readPerMillion")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sampleID, sampleGroups$sampleID), 2] }
p = ggplot(na.omit(tmp), aes(y=readPerMillion,x=biotype, colour=biotype)) +geom_hline(aes(yintercept=1),linetype="dashed") +geom_boxplot() +scale_y_log10(breaks=c(0.01,0.1,1,10,100,1000,10000,100000,1000000,10000000,100000000)) +guides(colour=FALSE) +coord_flip() +ggtitle("Biotypes: distributions, normalised")
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x")}
print(p)
## plot top N biotypes for each sample as a barplot - normalised to INPUT reads
N = 7
tmp = sampleTotals
for(i in 1:ncol(sampleTotals))
tmp[,i] = sampleTotals[,i]*1000000/libSizes$reads_used_for_alignment[i]
biotypeOrder = order(apply(tmp, 1, mean), decreasing=T)
tmp = tmp[biotypeOrder, , drop=F]
tmp = as.matrix(rbind(tmp[1:N, , drop=F], other=colSums(tmp[-c(1:N), , drop=F])))
tmp = melt(rbind(tmp, unmapped=1000000-colSums(tmp)))
#tmp = melt(tmp)
colnames(tmp) = c("biotype","sampleID","readsPerMillion")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sampleID, sampleGroups$sampleID), 2] }
p = ggplot(na.omit(tmp), aes(y=readsPerMillion,x=sampleID,fill=biotype)) +geom_bar(stat="identity") +scale_fill_brewer(palette = "Paired") +theme(axis.text.x=element_text(angle=50, hjust=1.0, vjust=1)) +ggtitle("Biotypes: per-sample, normalised") +ylab("reads per million reads used for alignment") +xlab("") +ylim(limits=c(0,1E6)) #+scale_fill_discrete(rich.colors(10))
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x",space="free_x")}
suppressWarnings(print(p))
## plot top N biotypes for each sample as a barplot - normalised to MAPPED reads
N = 7
tmp = as.matrix(apply(sampleTotals, 2, function(col){ col*1000000/sum(col) }))
tmp = tmp[biotypeOrder, , drop=F]
tmp = melt(rbind(tmp[1:N, , drop=F], other=colSums(tmp[-c(1:N), , drop=F])))
colnames(tmp) = c("biotype","sampleID","readsPerMillion")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sampleID, sampleGroups$sampleID), 2] }
p = ggplot(na.omit(tmp), aes(y=readsPerMillion,x=sampleID,fill=biotype)) +geom_bar(stat="identity") +theme(axis.text.x=element_text(angle=50, hjust=1.0, vjust=1)) +ggtitle("Biotypes: per-sample, normalised") +ylab("reads per million mapped reads") +xlab("") +scale_fill_brewer(palette = "Paired") #+scale_fill_manual( values = c(colorRampPalette( brewer.pal( 6 , "Paired" ) )(8), "grey") )
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x",space="free_x")}
suppressWarnings(print(p))
## Plot miRNA expression distributions
if(nrow(exprs.miRNA) > 0){
printMessage("Plotting miRNA expression distributions")
tmp = melt(exprs.miRNA)
colnames(tmp) = c("miRNA","sample","abundance")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sample, sampleGroups$sampleID), 2] }
p = ggplot(tmp, aes(y=abundance, x=sample, colour=sample)) +geom_violin() +geom_boxplot(alpha=0.2) +ylab("Read count") +ggtitle("miRNA abundance distributions (raw counts)") +scale_y_log10() +guides(colour=FALSE)
if(ncol(exprs.miRNA) < 30){
p = p +theme(axis.text.x=element_text(angle=50, hjust=1.0, vjust=1))
}else{
p = p+theme(axis.ticks = element_blank(), axis.text.x = element_blank())
}
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x",space="free_x")}
print(p)
p = ggplot(tmp, aes(x=abundance, colour=sample)) +geom_density() +xlab("Read count") +ggtitle("miRNA abundance distributions (raw counts)") +scale_x_log10()
if(ncol(exprs.miRNA.rpm) > 30){ p = p +guides(colour=FALSE) }
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup)}
print(p)
tmp = melt(exprs.miRNA.rpm)
colnames(tmp) = c("miRNA","sample","abundance")
if(is.data.frame(sampleGroups)){ tmp$sampleGroup = sampleGroups[match(tmp$sample, sampleGroups$sampleID), 2] }
p = ggplot(tmp, aes(y=abundance, x=sample, colour=sample)) +geom_violin() +geom_boxplot(alpha=0.2) +ylab("Reads per million (RPM)") +ggtitle("miRNA abundance distributions (RPM)") +theme(axis.ticks = element_blank(), axis.text.x = element_blank()) +scale_y_log10() +guides(colour=FALSE)
if(ncol(exprs.miRNA.rpm) < 30){
p = p +theme(axis.text.x=element_text(angle=50, hjust=1.0, vjust=1))
}else{
p = p+theme(axis.ticks = element_blank(), axis.text.x = element_blank())
}
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup, scales="free_x",space="free_x")}
print(p)
p = ggplot(tmp, aes(x=abundance, colour=sample)) +geom_density() +xlab("Reads per million (RPM)") +ggtitle("miRNA abundance distributions (RPM)") +scale_x_log10()
if(ncol(exprs.miRNA.rpm) > 30){ p = p +guides(colour=FALSE) }
if(is.data.frame(sampleGroups)){ p = p +facet_grid(~sampleGroup)}
print(p)
}
##
## Finally, plot exogenous if there are any
##
if(nrow(exprs.exogenousGenomes_specific) > 0){
printMessage("Plotting exogenous counts")
par(oma=c(20,2,0,0))
barplot(exprs.exogenousGenomes_cumulative[1,,drop=F], las=2, main="Total # reads mapped to NCBI taxonomy")
## if we have more than one sample, plot some heatmaps
if(ncol(exprs.exogenousGenomes_specific) > 1){
par(oma=c(8,0,0,20))
maxRow = 50; if(nrow(exprs.exogenousGenomes_specific) < maxRow){ maxRow = nrow(exprs.exogenousGenomes_specific) }
tmp.order = order(apply(t(t(exprs.exogenousGenomes_specific)/colSums(exprs.exogenousGenomes_specific)), 1, median), decreasing=T)
tmp = t(log10(t(t(exprs.exogenousGenomes_specific)*1000000/colSums(exprs.exogenousGenomes_specific))[tmp.order, ][1:maxRow,]+0.1))
colnames(tmp) = taxonomyInfo.exogenous_genomes[match(colnames(tmp), taxonomyInfo.exogenous_genomes$ID), ]$name
heatmap.2(tmp,trace="none",main="top taxa nodes: specific normalised read count", symbreaks=F,col=rich.colors(50))
tmp.order = order(apply(t(t(exprs.exogenousGenomes_specific)), 1, median), decreasing=T)
tmp = t(log10(exprs.exogenousGenomes_specific[tmp.order, ][1:maxRow,]+0.1))
colnames(tmp) = taxonomyInfo.exogenous_genomes[match(colnames(tmp), taxonomyInfo.exogenous_genomes$ID), ]$name
heatmap.2(tmp,trace="none",main="top taxa nodes: specific absolute read count", symbreaks=F,col=rich.colors(50))
maxRow = 50; if(nrow(exprs.exogenousGenomes_cumulative) < maxRow){ maxRow = nrow(exprs.exogenousGenomes_cumulative) }
tmp.order = order(apply(t(t(exprs.exogenousGenomes_cumulative)/libSizes$exogenous_genomes), 1, median), decreasing=T)
tmp = t(log10(t(t(exprs.exogenousGenomes_cumulative)*1000000/libSizes$exogenous_genomes)[tmp.order, ][1:maxRow,]+0.1))
colnames(tmp) = taxonomyInfo.exogenous_genomes[match(colnames(tmp), taxonomyInfo.exogenous_genomes$ID), ]$name
heatmap.2(tmp,trace="none",main="top taxa nodes: cumulative normalised read count", symbreaks=F,col=rich.colors(50))
tmp.order = order(apply(t(t(exprs.exogenousGenomes_cumulative)), 1, median), decreasing=T)
tmp = t(log10(exprs.exogenousGenomes_cumulative[tmp.order, ][1:maxRow,]+0.1))
colnames(tmp) = taxonomyInfo.exogenous_genomes[match(colnames(tmp), taxonomyInfo.exogenous_genomes$ID), ]$name
heatmap.2(tmp,trace="none",main="top taxa nodes: cumulative absolute read count", symbreaks=F,col=rich.colors(50))
}
}
dev.off()
##
## Plot exogenous rRNAs if there are any
##
if(nrow(exprs.exogenousRibosomal_specific) > 0 && ncol(exprs.exogenousRibosomal_specific) > 0){
printMessage("Making taxonomy trees using exogenous rRNA counts")
plotExogenousTaxonomyTrees(exprs.exogenousRibosomal_specific, exprs.exogenousRibosomal_cumulative, what="exogenousRibosomal", output.dir, taxonomyInfo.exogenous_rRNA, sampleGroups=sampleGroups, minPercent=minPercent_exogenousRibosomal)
}
##
## Finally, plot exogenous genomes if there are any
##
if(nrow(exprs.exogenousGenomes_specific) > 0 && ncol(exprs.exogenousGenomes_specific) > 0){
printMessage("Making taxonomy trees using exogenous genomes counts")
plotExogenousTaxonomyTrees(exprs.exogenousGenomes_specific, exprs.exogenousGenomes_cumulative, what="exogenousGenomes", output.dir, taxonomyInfo.exogenous_genomes, sampleGroups=sampleGroups, minPercent=minPercent_exogenousGenomes)
}
printMessage("All done!")
}
| 67,683 | gpl-3.0 |
86432a8c5ab3285ee12fa606aefa1e4a25fa1102 | jaytlennon/StarvationTraits | analyses/tests/GrowthCurveTest.R | ################################################################################
# #
# Test Script for Analysis of Exponential Growth Curve Data #
# This uses ModifiedGomp.R Version 2.0 #
# Written By: Mario Muscarella #
# Last Update: 29 Jan 2015 #
# #
# Use this file to analyze Synergy MX Growth Curve data #
# #
################################################################################
setwd("~/GitHub/StarvationTraits/")
rm(list=ls())
# Inport the function from source file
source("./bin/GrowthCurveInteractiveRegression.R")
source("./bin/ModifiedGomp.R")
# Create Directory For Output
dir.create("./data/GrowthCurves/output", showWarnings = FALSE)
################################################################################
# Example ######################################################################
################################################################################
# Run Example with Test Data
growth.modGomp("./data/GrowthCurves/GrowthCurveExample.txt", "test", skip=31)
| 1,429 | gpl-3.0 |
86432a8c5ab3285ee12fa606aefa1e4a25fa1102 | LennonLab/StarvationTraits | analyses/tests/GrowthCurveTest.R | ################################################################################
# #
# Test Script for Analysis of Exponential Growth Curve Data #
# This uses ModifiedGomp.R Version 2.0 #
# Written By: Mario Muscarella #
# Last Update: 29 Jan 2015 #
# #
# Use this file to analyze Synergy MX Growth Curve data #
# #
################################################################################
setwd("~/GitHub/StarvationTraits/")
rm(list=ls())
# Inport the function from source file
source("./bin/GrowthCurveInteractiveRegression.R")
source("./bin/ModifiedGomp.R")
# Create Directory For Output
dir.create("./data/GrowthCurves/output", showWarnings = FALSE)
################################################################################
# Example ######################################################################
################################################################################
# Run Example with Test Data
growth.modGomp("./data/GrowthCurves/GrowthCurveExample.txt", "test", skip=31)
| 1,429 | gpl-3.0 |
502a51fe90d821322c9ccacb5bcbbda69ae0852f | XiaodanLyu/Lognormal-Extension | 2_Zeros/Code/overlaysd_parallel.R | rm(list = ls(all = T))
setwd("C:/Users/lyux/Box Sync/SAEZeros")
library("multidplyr")
library("parallel")
library("tidyverse")
# library("raster")
# library("sp")
geo.proj <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0"
## CDL raster and dbf
cdl.sd <- raster::raster("Data/CDL/CDL_2006_46.tif")
cdl.sd.dbf <- foreign::read.dbf("Data/CDL/CDL_2006_46.tif.vat.dbf")
count.col <- cdl.sd.dbf %>% filter(VALUE>0, !is.na(CLASS_NAME)) %>%
dplyr::select(VALUE) %>% unlist %>% unname
format(object.size(cdl.sd), units = "Kb")
get_CDLoverMU_tables <- function(areasym){
## SD Mapunit shape
mu.sd <- rgdal::readOGR(dsn = paste0("G:/Grads/lyux/mu_by_areasym/", areasym),
layer = "sdmus.subA1")
mukeyname <- levels(mu.sd@data$MUKEY)
## table CDL pixels by MUKEY
result <- matrix(nrow = length(mukeyname), ncol = length(count.col))
foldername <- paste0("Figures/CDLOverMU/", areasym)
dir.create(foldername)
iter <- 0
repeat{
iter <- iter + 1
musub <- subset(mu.sd, MUKEY == mukeyname[iter])
cdlsub <- raster::crop(cdl.sd, bbox(musub))
cdlpoint <- raster::rasterToPoints(cdlsub, spatial = T,
fun = function(x) x %in% count.col)
cdlpoint.proj <- sp::spTransform(cdlpoint, CRS(projargs = geo.proj))
overlay <- sp::over(cdlpoint.proj, geometry(musub))
CDLinMU <- cdlpoint.proj[!is.na(overlay), ]
## plot CDLOverMU ####
musub.frame <- ggplot2::fortify(musub)
imagename <- paste0(foldername, "/MU", mukeyname[iter], ".png")
if(nrow(CDLinMU@data) > 0) {
data.frame(CDLinMU) %>% mutate(x = round(x), y = round(y)) %>% ggplot() +
geom_tile(aes(x = x, y= y, fill = factor(CDL_2006_46))) +
geom_path(aes(x = long, y = lat, group = factor(group)), data = musub.frame)
}
if(nrow(CDLinMU@data) == 0) {
musub.frame %>% ggplot(aes(x = long, y = lat, group = factor(group))) + geom_path()
}
ggsave(filename = imagename)
result[iter,] <- CDLinMU@data$CDL_2006_46 %>% factor(levels = count.col) %>% table
if(iter == length(mukeyname)) {break}
}
## save result ####
colnames(result) <- paste("Category", count.col, sep = "_")
result <- result %>% as_tibble() %>%
mutate(MUKEY = mukeyname) %>%
dplyr::select(MUKEY, everything())
filename <- paste0("Tables/", areasym, "_MU_CDL_Table.csv")
write.csv(result, file = filename, row.names = F)
return(result)
# return()
}
num_core <- detectCores()-4
num_core
mulist <- read.csv("Data/MAPUNITTABLE.csv")
areasym <- levels(mulist$AREASYM)
workflow <- read.csv("Data/Workflow.csv")
areasym <- as.character(subset(workflow, status==1)$areasym)
group <- rep(1:num_core, length.out = length(areasym))
areasym <- data.frame(tibble(group), areasym)
areasym %>% glimpse
cluster <- create_cluster(cores = num_core)
cluster
by_group <- areasym %>%
partition(group, cluster = cluster)
by_group
by_group %>%
cluster_library("tidyverse") %>%
cluster_library("raster") %>%
cluster_library("rgdal") %>%
cluster_library("sp") %>%
cluster_assign_value("geo.proj", geo.proj) %>%
cluster_assign_value("cdl.sd", cdl.sd) %>%
cluster_assign_value("cdl.sd.dbf", cdl.sd.dbf) %>%
cluster_assign_value("count.col", count.col) %>%
cluster_assign_value("get_CDLoverMU_tables", get_CDLoverMU_tables)
cluster_get(by_group, "geo.proj")[[1]]
allCDLoverMU <- by_group %>%
mutate(
CDLoverMU = map(.x = areasym, .f = ~get_CDLoverMU_tables(areasym = .x))
) %>% collect() %>% as_tibble() %>% unnest()
allCDLoverMU %>% glimpse
## read tables and then collect
read_tables <- function(areasym){
filename <- paste0("C:/Users/lyux/Box Sync/SAEZeros/Tables/Rest_2016/", areasym,
"_MU_CDL_Table.csv")
tb <- read.csv(filename, header = T) %>% as_tibble()
tb
}
by_group %>%
cluster_library("tidyverse") %>%
cluster_assign_value("read_tables", read_tables)
tables <- by_group %>%
mutate(
tb = map(.x = areasym, .f = ~read_tables(areasym = .x))
) %>% collect() %>% as_tibble() %>% unnest()
write.csv(tables, file = "Tables/Rest_2016/SDRest_MU_CDL_Table.csv", row.names = F)
| 4,254 | mit |