[ { "ID": 1, "Comment": "exclude NAs from data", "Code": "data <- data[complete.cases(data$average), ] summary(data) nrow(data) ", "Label": "Data Variable", "Source": "https://osf.io/qhaf8/", "File": "pupillometry_tutorial_calignano.R" }, { "ID": 2, "Comment": " 2. compare model indices (ICC, conditional and margina R²) ", "Code": "tab_model(model0.1, model1, model1_full) tab_model(model0.1, model2, model2_full) tab_model(model0.2, model3, model3_full) ", "Label": "Statistical Modeling", "Source": "https://osf.io/8edp7/", "File": "Social Factors COVID-19_Konrad.R" }, { "ID": 3, "Comment": "Plot robust model 1 get predicted Values for different levels of rsa_socialresources", "Code": "gg_model1 <- ggpredict(model1_robust, c(\"daycount[0, 25, 50, 75]\", \"rsa_socialresources[meansd]\", \"diagnosis\")) ", "Label": "Visualization", "Source": "https://osf.io/8edp7/", "File": "Social Factors COVID-19_Konrad.R" }, { "ID": 4, "Comment": "count of articles that generated data using experimental techniques (includes articles that use both, percentage calculated using total empirical articles) ", "Code": "GenerateData[2,2] <- sum(MMCPSR_emp$EHPdata) GenerateData[2,3] <- sum(MMCPSR_emp$EHPdata)/nrow(MMCPSR_emp) ", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "AnalysisPost-PAP.R" }, { "ID": 5, "Comment": "Data organization Rename columns/variable names to make it simpler variable names", "Code": "colnames(srmadata) srmadata <- srmadata %>% rename(assessors = 2, journal = 3, pubyear = 4, pubmonth = 5, study.title = 6, pmid = 7, regist = 8, regist.num = 9, protocol = 10, title.ident = 11, ab.sources = 12, ab.eleg.crit = 13, ab.particip = 14, ab.interv = 15, ab.effect = 16, ab.included = 17, ab.outcome = 18, in.picos = 19, me.database = 20, me.search.avai = 21, me.grey.lit = 22, me.date.just = 23, me.lang.num = 24, me.picos.desc = 25, me.sele.dup = 26, me.extr.dup = 27, me.rob.desc = 28, me.rob.dup = 29, me.stat.desc = 30, me.heterog = 31, item.removed01 = 32, re.flowdia = 33, re.ssizes = 34, re.picos.desc = 35, re.lengths = 36, re.estim.desc = 37, re.meta.studies = 38, re.rob = 39, re.deviations = 40, di.spin = 41, di.rob.studies = 42, di.limitations = 43, data.statem = 44, fund.statem = 45, funders = 46, coi.statem = 47) ", "Label": "Data Variable", "Source": "https://osf.io/ntw7d/", "File": "SRMA2019_analyses.R" }, { "ID": 6, "Comment": "Redefine columns types to factor", "Code": "srmadata <- data.frame(srmadata) srmadata %>% mutate_if(is.character, as.factor) %>% str() ", "Label": "Data Variable", "Source": "https://osf.io/ntw7d/", "File": "SRMA2019_analyses.R" }, { "ID": 7, "Comment": " Add labels to variables in participant dataframe;; then, generate table ", "Code": "table1::label(participant$ab.particip) <- \"Description of participants (ab)\" table1::label(participant$re.picos.desc) <- \"Detailed studies' characteristics\" table1::table1(~ab.particip + re.picos.desc, data = participant) ", "Label": "Data Variable", "Source": "https://osf.io/ntw7d/", "File": "SRMA2019_analyses.R" }, { "ID": 8, "Comment": " HISTOGRAM Generate a histogram of density of scores achieved by the 104 assessed studies Create a 0 or 1 dataset, where No0 and Yes1, with respective study IDs (variable \"id\") ", "Code": "id <- (1:104) allbinary <- data.frame(id, transparency, completenessbinary, participant, intervention, outcome, rigorbinary, appraisalbinary) allbinary[allbinary == \"Yes\"] <- \"1\" allbinary[allbinary == \"No\"] <- \"0\" lapply(allbinary,as.numeric) ", "Label": "Visualization", "Source": "https://osf.io/ntw7d/", "File": "SRMA2019_analyses.R" }, { "ID": 9, "Comment": " Set the dataframe as numeric so that we can sum up recommended practices for each study (variable \"yes.score\") ", "Code": "allbinary[] <- lapply(allbinary, function(x) as.numeric(as.character(x))) ", "Label": "Data Variable", "Source": "https://osf.io/ntw7d/", "File": "SRMA2019_analyses.R" }, { "ID": 10, "Comment": "Create a new dataframe (binarytotalsdf) with a new variable (yes.score) that reflects the number of recommended practices from each study", "Code": "binary-scoredf <- allbinary %>% mutate(yes.score = regist + protocol + me.search.avai + data.statem + + title.ident + + ab.sources + ab.eleg.crit + ab.included + in.picos + me.picos.desc + re.flowdia + + re.ssizes + re.lengths + fund.statem + coi.statem + ab.particip + re.picos.desc + + ab.interv + re.picos.desc + ab.outcome + me.stat.desc + me.heterog + re.estim.desc + + re.meta.studies + me.grey.lit + me.date.just + me.lang.num + me.sele.dup + me.extr.dup + + me.rob.desc + me.rob.dup +re.rob + re.deviations + di.spin + di.rob.studies + + di.limitations)", "Label": "Data Variable", "Source": "https://osf.io/ntw7d/", "File": "SRMA2019_analyses.R" }, { "ID": 11, "Comment": "Create the histogram", "Code": "histplot.score <- ggplot(binary-scoredf, aes(x=yes.score)) + geom_histogram(binwidth=1, color=\"black\", fill=\"lightblue\") histplot.score + scale_x_continuous(name=\"Number of recommended practices (max: 36 items)\", breaks=seq(0,36,2)) + scale_y_continuous(name=\"Frequency of publications\", limits=c(0, 20)) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = \"black\")) ", "Label": "Visualization", "Source": "https://osf.io/ntw7d/", "File": "SRMA2019_analyses.R" }, { "ID": 12, "Comment": " Load data: data file \"hfp\" contains individuallevel foraging return data (kcal) at the daily level along with the date, unique camp id, age, sex, and unique person id. ", "Code": "hfp <- read.csv(\"food_pro_data.csv\", as.is=T) h <- hfp[hfp$age > 5,] h$age_z <- (h$age-mean(h$age, na.rm=T))/sd(h$age, na.rm=T) h$month <- month(ymd(h$date)) ", "Label": "Data Variable", "Source": "https://osf.io/92e6c/", "File": "hadza_returns_model.R" }, { "ID": 13, "Comment": "Plot the marginal/conditional effects for predicted values and probability of zerodays", "Code": "p <- conditional_effects(hadza_lognormal_returns_model, effects=c(\"age_z:sex\")) # includes the hurdle component p2 <- conditional_effects(hadza_lognormal_returns_model, effects=c(\"age_z:sex\"), dpar=\"hu\") # to see the hurdle component (Pr zero-days) plot(p)[[1]] + scale_x_continuous(breaks = c(-1, 0, 1, 2), labels= round(c(-1, 0, 1, 2)*sd(h$age, na.rm=T) + mean(h$age, na.rm=T))) + ggplot2::labs(x=\"Age\", y=\"kcal/day\") +ggplot2::lims(y=c(0,6500)) + theme_classic() plot(p2)[[1]] + ggplot2::lims(y=c(0,1)) + labs(x=\"Age\", y=\"Probability of zero day\\n(hurdle component of model)\") + theme_classic() + scale_x_continuous(breaks = c(-1, 0, 1, 2), labels= round(c(-1, 0, 1, 2)*sd(h$age, na.rm=T) + mean(h$age, na.rm=T))) ", "Label": "Visualization", "Source": "https://osf.io/92e6c/", "File": "hadza_returns_model.R" }, { "ID": 14, "Comment": "Calculate overall mean standard length (Ls)", "Code": "Ls <- mean(standard.length, na.rm = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/6ukwg/", "File": "stuart.R" }, { "ID": 15, "Comment": "Generate Psi of an AR(1) model", "Code": "Psi = diag(p) diag(Psi) = runif(p,b.ar.min,b.ar.max) return(Psi) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/rs6un/", "File": "Psi.PS.AR.Matrix.R" }, { "ID": 16, "Comment": "We use the R package qgraph to visualize our matrix", "Code": "if (!require(qgraph)) install.packages(\"qgraph\");; require(qgraph) ", "Label": "Visualization", "Source": "https://osf.io/3kem6/", "File": "Rcode_Figure2.R" }, { "ID": 17, "Comment": "VISUALIZE YOUR NETWORKS Make sure you have the right package: qgraph", "Code": "if (!require(qgraph)) install.packages(\"qgraph\");; require(qgraph) ", "Label": "Visualization", "Source": "https://osf.io/3kem6/", "File": "Rcode_Figure2.R" }, { "ID": 18, "Comment": " load source functions and data generates four data sets: FF, FS, SF, SS ", "Code": "source(\"prep-Exp1-data.r\") source(\"pmwg-DIC.r\") ", "Label": "Data Variable", "Source": "https://osf.io/wbyj7/", "File": "Exp1-LBA-null.r" }, { "ID": 19, "Comment": "estimate model independently for each condition in the experiment", "Code": "for(condition in names(all.data)) { cat(\"\\n\\n\\n\\nEstimating model for: \", condition, \"\\n\\n\") fnam <- paste0(\"Exp1-LBA-\", model.par, \"-\", condition, \".RData\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/wbyj7/", "File": "Exp1-LBA-null.r" }, { "ID": 20, "Comment": " Then write loop to go through all Scopus IDs, adding the output to the first dataframe NOTE: create 'output' folder in your working directory before running the following code ", "Code": "for (i in 1:length(authors)){ tryCatch({ #using tryCatch () to go around error https://stackoverflow.com/questions/14748557/skipping-error-in-for-loop. So here it skips the authorIDs with an error and keeps going res = retrievalByAuthorID(authors[i], apik) M2 = res$M output <- rbind(M,M2) write.csv(output, paste0(output$AU_ID[1],\".csv\"), row.names=F) #creates seperate csv's for each Scopus ID }, error=function(e){}) } ", "Label": "Data Variable", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 21, "Comment": "Select first letter of first name + surname", "Code": "for(i in names(x)){ firstname = word(x[[i]],-1) initial = substring(firstname, 1, 1) surname = word(x[[i]],-2) surname[is.na(surname)] <- \"\" tot = as.data.frame(paste(initial, surname,sep = \".\")) tot[tot==\".\"]=\"\" x[[i]] <- tot #join first and last name with a full stop } ", "Label": "Data Variable", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 22, "Comment": " Add a column per author, indicating which papers belong to them (0 or 1) ", "Code": "authors = as.character(unique(authorswithAPInotworking$author.name)) x[is.na(x)]=\"\" for(i in (authors)){ x$i = rowSums(x == i) colnames(x)[colnames(x) == 'i'] <- i } ncol.new = ncol(x) ", "Label": "Data Variable", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 23, "Comment": "Have a look at distribution first and last years, to look at outliers ", "Code": "hist(as.numeric(new$first.year)) hist(as.numeric(new$last.year))", "Label": "Visualization", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 24, "Comment": "Add position of word in sentence (prefabricated list made in Python)", "Code": "positionlist <- read.delim(\"U:/surfdriveRU/Thesis analyse/LMM analyse/positionlist.txt\", header = FALSE) eyetrackingdata$position <- positionlist$V1 ", "Label": "Data Variable", "Source": "https://osf.io/qynhu/", "File": "combinealldata.R" }, { "ID": 25, "Comment": "density plotting function", "Code": "denschart3 <- function (x, labels = NULL, groups = NULL, gdata = NULL, cex = par(\"cex\"), pt.cex = cex, bg = par(\"bg\"), color = \"grey20\", colorHPDI =\"grey60\", HPDI=0.9, vline = NULL, gcolor = par(\"fg\"), lcolor = \"gray\", xlim = range(unlist(x)), yvals = 1:length(x), yextra=0.7, main = NULL, xlab = NULL, ylab = NULL, height=0.7 , border=NA, adjust=1, ...) { opar <- par(\"mai\", \"mar\", \"cex\", \"yaxs\") on.exit(par(opar)) par(cex = cex, yaxs = \"i\") if (!is.list(x)) stop(\"'x' must be a list of vectors or matrices\") n <- length(x) glabels <- NULL if (is.list(x)) { if (is.null(labels)) labels <- names(x) if (is.null(labels)) labels <- as.character(1L:n) labels <- rep_len(labels, n) ", "Label": "Visualization", "Source": "https://osf.io/a3yd4/", "File": "Functions_IRT.R" }, { "ID": 26, "Comment": " Data wrangling recode values for missing data (9, 999) in whole dataset as NA ", "Code": "data <- data %>% mutate_all(~na_if(., -999)) data <- data %>% mutate_all(~na_if(., -9)) ", "Label": "Data Variable", "Source": "https://osf.io/r4wg2/", "File": "stadyl_analyses.R" }, { "ID": 27, "Comment": "Moderated regression center variables", "Code": "data$ls.1c <- data$ls.1 - 3.57 data$sc.c <- scale(data$sc, center = T, scale = F) data$mob.c <- scale(data$mob, center = T, scale = F) data$age.c <- scale(data$age, center = T, scale = F) ", "Label": "Statistical Modeling", "Source": "https://osf.io/r4wg2/", "File": "stadyl_analyses.R" }, { "ID": 28, "Comment": "get standardised regression coefficients", "Code": "lm.beta(fit1) lm.beta(fit2)", "Label": "Statistical Modeling", "Source": "https://osf.io/r4wg2/", "File": "stadyl_analyses.R" }, { "ID": 29, "Comment": "Plots Moderated regression", "Code": "psych::describe(data[c(\"sc\", \"mob\")]) interact_plot(fit1, pred = mob.c, modx =sc.c, #partial.residuals = TRUE, interval = TRUE, int.width = 0.95, modx.values = c(-1.48, 0, 1.48), colors = c(\"#D9ED92\", \"#52B69A\", \"#1E6091\"), modx.labels = c(\"Low (- 1SD)\", \"Middle (Mean)\", \"High (+ 1SD)\"), legend.main = \"Perceived status loss\") + labs(y = \"Predicted life satisfaction\", x = \"Upward mobility beliefs\") + guides(fill = guide_legend(title = \"Perceived status loss\")) + coord_cartesian(ylim = c(1,7)) + scale_y_continuous(expand = c(0, 0), breaks = c(1,2,3,4,5,6,7)) + scale_x_continuous(expand = c(0.05, 0.05), breaks = c(-2.94,-1.94, -0.94, 0.06, 1.06, 2.06, 3.06), labels = c(0,1,2,3,4,5,6)) + theme_classic(base_size = 15) + theme( axis.title = element_text(colour = \"black\", size = 19, margin = margin(t = 0, r = 15, b = 0, l = 0)), axis.text.x = element_text(colour = \"black\", size = 19, margin = margin(t = 15, r = 0, b = 0, l = 0)), axis.text.y = element_text(colour = \"black\", size = 19), legend.position = c(0.7,0.85), legend.title = element_text(size = 17), legend.text = element_text(size = 17), legend.key.width = unit(1,\"cm\") ) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ", "Label": "Visualization", "Source": "https://osf.io/r4wg2/", "File": "stadyl_analyses.R" }, { "ID": 30, "Comment": "get diffs get rownumbers in order to get the corresponding RecordingTime", "Code": "rownums_parent <- fi_pairs$fiend[latencies[[curr_colname]][[curr_hitname]]$fi_pairs[[1]]] rownums_successor <- fi_pairs$fistart[latencies[[curr_colname]][[curr_hitname]]$fi_pairs[[2]]]", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "get_gazeshift_latency.R" }, { "ID": 31, "Comment": "Funnel plot carry out trimandfill analysis", "Code": "taf<-trimfill(m.random,main=\"\", ma.fixed = FALSE, fixed = FALSE, random = TRUE, label=F) ", "Label": "Visualization", "Source": "https://osf.io/dqjyh/", "File": "Script.R" }, { "ID": 32, "Comment": "draw funnel plot with missing studies filled in", "Code": "funnel(taf, legend=TRUE) summary(trimfill(m.random)) ", "Label": "Visualization", "Source": "https://osf.io/dqjyh/", "File": "Script.R" }, { "ID": 33, "Comment": "create container variable to store results", "Code": "if(exists(\"data.full\") == 0){ subject <- c(1:50) mean.3SD <- NA sd.3SD <- NA min.3SD <- NA max.3SD <- NA container.3SD <- data.frame(subject, mean.3SD, sd.3SD, min.3SD, max.3SD) } container.3SD$mean.3SD[i] <- mean(data.3SD$rt) container.3SD$sd.3SD[i] <- sd(data.3SD$rt) container.3SD$min.3SD[i] <- container.3SD$mean.3SD[i] - 3*container.3SD$sd.3SD[i] container.3SD$max.3SD[i] <- container.3SD$mean.3SD[i] + 3*container.3SD$sd.3SD[i] ", "Label": "Data Variable", "Source": "https://osf.io/5yvnb/", "File": "analyse_final_Exp5_OSF.R" }, { "ID": 34, "Comment": "setting up colors and line types for plots", "Code": "ns=unique(qms$name) nl=length(ns) disp=data.frame(name=ns,lty=rep(1:9,nl)[1:nl]) disp$col=ifelse(grepl('Hit|Rod',disp$name),'#880101','#aaaaaa') disp$lty[grepl('Hit|Rod',disp$name)]=1:2 ", "Label": "Visualization", "Source": "https://osf.io/vwq9p/", "File": "analysis.R" }, { "ID": 35, "Comment": "plot the results of the vector fits color ages > 85 in red", "Code": "col <- c(\"gray\", \"red\")[1 + (age >= 85)] main <- \"PCoA of Pollen: pollen and independent vectors fitted onto ordination\" plot(poln.ord$points, pch = 21, type = \"p\", xlab = paste0(\"PCoA I (\", round(var.expl[1] * 100, 1), \"%)\"), ylab = paste0(\"PCoA II (\", round(var.expl[2] * 100, 1), \"%)\"), main = main, col = \"black\", bg = col, cex = 1.5, xaxt = \"n\", yaxt = \"n\", bty = \"n\") axis(1, at = seq(-0.5, 1, by = 0.25)) axis(2, at = seq(-1, 0.25, by = 0.25)) legend(0.5, -0.85, pch = c(21, 21, NA), lty = c(NA, NA, 1), lwd = c(NA, NA, 2), col = c(\"black\", \"black\", \"light blue\"), pt.bg = c(\"gray\",\"red\", NA), cex = 1.25, legend = c(\"Post 85k\", \"Pre 85k\", \"Lake Level\")) ", "Label": "Visualization", "Source": "https://osf.io/7h94n/", "File": "Malawi_ordination.R" }, { "ID": 36, "Comment": "create categorical variables", "Code": "share <- to_factor( share, select = c(\"health_past3months\", \"wave\", \"gender\", \"covid_affected\", \"partnerinhh\", \"covid_regime_si3\", \"covid_regime_ch3\") ) share$stringency_index <- share$global_covid_regime_si3 ", "Label": "Data Variable", "Source": "https://osf.io/cht59/", "File": "01_tables_1_2.r" }, { "ID": 37, "Comment": "Overall mean of Social factors", "Code": "share_all <- data_filter(share, !is.na(health_past3months)) ", "Label": "Data Variable", "Source": "https://osf.io/cht59/", "File": "01_tables_1_2.r" }, { "ID": 38, "Comment": " Model comparison apply hierarchical versions of CC, EWA, LMM, and motivational EWA models ", "Code": "setwd(\"SET TO MODEL FILE DIRECTORY\") data <- list(\"groupSize\", \"ngroups\", \"ntrials\", \"ntokens\", \"pi\", \"vals\",\"c\",\"Gc\",\"c_choice_index\",\"Ga\") #data inputted into jags params <- c(\"mu_c\") #parameters we'll track in jags samplesCC <- jags(data, inits=NULL, params, model.file =\"CC_group.txt\", n.chains=3, n.iter=5000, n.burnin=1000, n.thin=1) params <- c(\"mu_c\") #parameters we'll track in jags samplesEWA <- jags(data, inits=NULL, params, model.file =\"EWA_group.txt\", n.chains=3, n.iter=5000, n.burnin=1000, n.thin=1) params <- c(\"nu_c\") #parameters we'll track in jags samplesLMM <- jags(data, inits=NULL, params, model.file =\"LMM_group.txt\", n.chains=3, n.iter=5000, n.burnin=1000, n.thin=1) ", "Label": "Statistical Modeling", "Source": "https://osf.io/meh5w/", "File": "modelComparison.R" }, { "ID": 39, "Comment": "Number of subjects in each group Create a group variable according to each cluster condition", "Code": "if (size == 1){ N.size = N/K n.group = unlist(lapply(1:K, function(k) rep(k,N.size))) } if (size == 2){ if (K == 2){ N.1 = 0.10*N N.2 = N - N.1 n.group = c(rep(1,N.1),rep(2,N.2)) } if (K == 4){ N.1 = 0.10*N N.rest = N - N.1 N.size = N.rest/(K-1) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size),rep(4,N.size)) }} if (size == 3){ if (K == 2){ N.1 = 0.6*N N.2 = N - N.1 n.group = c(rep(1,N.1),rep(2,N.2)) } if (K == 4){ if (N == 20){ N.1 = 0.6*N N.rest = N - N.1 N.size = floor(N.rest/(K-1)) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size+1),rep(4,N.size+1)) } else{ N.1 = 0.6*N N.rest = N - N.1 N.size = N.rest/(K-1) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size),rep(4,N.size)) }}} ", "Label": "Data Variable", "Source": "https://osf.io/rs6un/", "File": "Data.Cluster.VAR.R" }, { "ID": 40, "Comment": "Create variable subjno Scramble data so people appear unordered in the final dataset", "Code": "ID = expand.grid(1:T,1:N)[,2] data = cbind(data,ID) p = ncol(Psi[[1]]) Y = matrix(0,nrow(data),p) colnames(Y) = sprintf(\"Y%d\",seq(1:p)) ", "Label": "Data Variable", "Source": "https://osf.io/rs6un/", "File": "Data.Cluster.VAR.R" }, { "ID": 41, "Comment": "Frequency plot", "Code": "top.freqs = sort(colSums(data.table), decreasing = T)[1:40] cex.val = 1.6 cairo_pdf('pdfs/top_freq_segments.pdf', width = 16, height = 10) par(family = \"Doulos SIL\") plot(top.freqs ~ seq_along(top.freqs), xlab = 'Frequency rank', ylab = 'Frequency', xlim = c(1, 40), type = 'n', xaxt = 'n', cex.lab = cex.val, cex.axis = cex.val) axis(1, at = seq_along(top.freqs), labels = seq_along(top.freqs), cex.axis = 1.1) lines(seq_along(top.freqs), top.freqs, lty = 2, col = 'grey') text(seq_along(top.freqs), top.freqs, labels = names(top.freqs), cex = 2) dev.off() ", "Label": "Visualization", "Source": "https://osf.io/2qjn5/", "File": "redraw_figures.R" }, { "ID": 42, "Comment": "Contrast between countries as a new variable", "Code": "d$CountC<-as.numeric(as.integer(as.factor(d$Country))-1.5) table(d$Country,d$CountC) ", "Label": "Data Variable", "Source": "https://osf.io/fr5ed/", "File": "01_data_prepare.R" }, { "ID": 43, "Comment": "Estimated Marginal Means Model 3", "Code": "m1 <- ggemmeans(model3, c(\"welle [1:4 by=.2]\", \"isced\")) m2 <- ggemmeans(model3, c(\"welle [1:4 by=.2]\", \"aee_oecd_between_z2\")) m3 <- ggemmeans(model3, c(\"welle [1:4 by=.2]\", \"lone6_between_z2\")) m1$Model = \"ISCED\" levels(m1$group) <- c(\"low\", \"middle\", \"high\") m2$Model = \"Income\" levels(m2$group) <- c(\"-1 SD\", \"Mean\", \"+1 SD\") m3$Model = \"Loneliness\" levels(m3$group) <- c(\"-1 SD\", \"Mean\", \"+1 SD\") create_plot(m1, title = \"(a) PF and Education\") create_plot(m2, title = \"(b) PF and Income\") create_plot(m3, title = \"(c) PF and Loneliness\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/dcw4x/", "File": "04_figures_estimates_marginal_means.R" }, { "ID": 44, "Comment": "Matrix with correlations and pvalues", "Code": "Cormatrix1 <- data.frame(matrix(NA,nrow = 15, ncol = 15)) for(i in 1:14){ for(j in (i+1):15){ Cormatrix1[i,j] <- round(cor(Cordata[,i], Cordata[,j]), digits=2) Cormatrix1[j,i] <- round(cor.test(Cordata[,i], Cordata[,j])$p.value, digits=2) } } Cormatrix1 ", "Label": "Data Variable", "Source": "https://osf.io/t93pf/", "File": "Simulation-Random-Significance.r" }, { "ID": 45, "Comment": "Matrix with correlations and significance stars", "Code": "Cormatrix2 <- data.frame(matrix(NA,nrow = 15, ncol = 15)) for(i in 1:14){ for(j in (i+1):15){ Cormatrix2[i,j] <- round(cor(Cordata[,i], Cordata[,j]), digits=2) Cormatrix2[j,i] <- ifelse(cor.test(Cordata[,i], Cordata[,j])$p.value<=0.05, \"*\", \"no\") } } Cormatrix2 ", "Label": "Visualization", "Source": "https://osf.io/t93pf/", "File": "Simulation-Random-Significance.r" }, { "ID": 46, "Comment": "Generate Psi matrix for each cluster", "Code": "for (n in 1:length(N)){ for (t in 1:length(T)){ for (p in 1:length(P)){ for (k in 1:length(K)){ for (d in 1:length(diff)){ for (r in 1:R){ Psi.list = lapply(1:K[k], function(kl) Psi.Matrix.Diff(P[p],b.ar.min,b.ar.max,b.cr.min,b.cr.max,d)) ", "Label": "Data Variable", "Source": "https://osf.io/rs6un/", "File": "Code_Simulation_Part_III_MVAR_RE.R" }, { "ID": 47, "Comment": "Load training and testing set", "Code": "load(file = paste(\"Data_Block_Cluster_MVAR_RE_N_\",N[n],\"_T_\",T[t],\"_P_\",P[p],\"_K_\",K[k], \"_Diff_\",diff[d],\"_size_\",size[s],\"_R_\",r,\".RData\",sep = \"\")) MSE.MVAR.PS.Sys = MSE.Sys(data.list,P[p]) save(MSE.MVAR.PS.Sys, file = paste(\"MSE_MVAR_Cluster_MVAR_RE_N_\",N[n],\"_T_\",T[t],\"_P_\",P[p],\"_K_\",K[k], \"_Diff_\",diff[d],\"_size_\",size[s],\"_R_\",r,\".RData\",sep = \"\")) ", "Label": "Data Variable", "Source": "https://osf.io/rs6un/", "File": "Code_Simulation_Part_III_MVAR_RE.R" }, { "ID": 48, "Comment": " standardize the model inputs, excluding the response and random effects ", "Code": "d_std <- stand(d, cols = f2) # use the fitting function for convenience ", "Label": "Statistical Modeling", "Source": "https://osf.io/3gfqn/", "File": "VADIS_particles_InnerC-only.R" }, { "ID": 49, "Comment": " > Left panel: QQplot (uniform distribution) > Right panel: Residuals against predicted values;; shaded (due to sample size) with extreme residuals colored red, and 3) MAIN EFFECTS OF KEY VARIABLES FormatInfo ", "Code": "emmip(CorResult, ~FormatInfo, type = \"response\", CIs = TRUE) (emm <- emmeans(CorResult, specs = ~FormatInfo, type = \"response\")) pairs(emm) EffPlotData_CorrFormat <- summary(emm) ## Data for Fig. 3 ", "Label": "Visualization", "Source": "https://osf.io/2sz48/", "File": "Model_Correctness.R" }, { "ID": 50, "Comment": "Rescale order of variables on yaxis (for BRT figures)", "Code": "BRT.plot.label.limits <- c(\"site.centrality\", \"mean.annual.flow\", \"basin.area\", \"site.long\", \"site.lat\", \"pct.ISC\", \"pct.urb\", \"pct.ag\", \"pct.for\", \"ALG.cover\", \"NAT.cover\", \"LWD.reach\", \"DOC\", \"cond\", \"pH.lab\", \"total.P\", \"NH4\", \"NO3\") ", "Label": "Visualization", "Source": "https://osf.io/62je8/", "File": "DMS-NRSA-CA-QC-Figures.R" }, { "ID": 51, "Comment": "Rename variables names on yaxis (for BRT figures)", "Code": "BRT.plot.labels <- c(\"site.centrality\" = \"Cent\", \"mean.annual.flow\" = \"Flow\", \"basin.area\" = \"Area\", \"site.long\" = \"Long\", \"site.lat\" = \"Lat\", \"pct.ISC\" = \"ISC\", \"pct.urb\" = \"Urb\", \"pct.ag\" = \"Ag\", \"pct.for\" = \"For\", \"ALG.cover\" = \"Alg\", \"NAT.cover\" = \"Nat\", \"LWD.reach\" = \"LWD\", \"DOC\" = \"DOC\", \"cond\" = \"Cond\", \"pH.lab\" = \"pH\", \"total.P\" = \"TP\", \"NH4\" = expression(NH[4]), \"NO3\" = expression(NO[3])) ", "Label": "Visualization", "Source": "https://osf.io/62je8/", "File": "DMS-NRSA-CA-QC-Figures.R" }, { "ID": 52, "Comment": "Sample posetrior and prior for graphical comparison", "Code": "post1<-extract.samples(m1) set.seed(42) prio1<-extract.prior(m1,n=10000) save.image(file=\"posterior_samples_single.RData\") ", "Label": "Visualization", "Source": "https://osf.io/fr5ed/", "File": "02_analysis_single_estimate.R" }, { "ID": 53, "Comment": "correlation matrix for the DVs", "Code": "judcorMat2 <- lowerCor(judgmentRatings2) corr.test(judgmentRatings2) corrplot(judcorMat2, method=\"color\", type = 'lower',tl.col=\"black\", addCoef.col = \"black\", tl.srt = 45) ", "Label": "Statistical Modeling", "Source": "https://osf.io/dhmjx/", "File": "Experiment4a-Analyses.R" }, { "ID": 54, "Comment": "Subset data that includes only partner cooperation means within 0.4 0.6", "Code": "bound_data <- subset(expt1_data, expt1_data$cooplc_means >= 0.4 & expt1_data$cooplc_means <= 0.6 & expt1_data$readlc_means >= 0.4 & expt1_data$readlc_means <= 0.6) ", "Label": "Data Variable", "Source": "https://osf.io/zcv4m/", "File": "winke_stevens_2017_rcode.R" }, { "ID": 55, "Comment": "Plot histogram of chances that partner's choice was positive for all data", "Code": "coop_hist_ggplot <- ggplot(all_pc, aes(x = alllc_means * 100)) + geom_histogram(aes(fill = included), bins = 50) + # plot histogram scale_fill_manual(values = c(\"black\", \"grey50\"), name=\"\", label=c(\"Included\", \"Not included\")) + # color values inside and outside of 0.4-0.6 differently labs(x = \"Percent partner positive actions\", y = \"Number of participants\") + # label axes theme_classic() + # use classic theme theme(axis.title=element_text(size=45), axis.text=element_text(size=30), legend.text=element_text(size=30), legend.position = c(0.25, 0.9), legend.key.size = unit(2.5, 'lines')) png(file = \"figures/partner_action_histogram.png\", width = 1200, height = 750) # open device plot(coop_hist_ggplot) # plot figure dev.off() # close device ", "Label": "Visualization", "Source": "https://osf.io/zcv4m/", "File": "winke_stevens_2017_rcode.R" }, { "ID": 56, "Comment": " Analyze accuracy as a function of payoff scheme (Standard or Costly), context (Cooperation or Newspaper), and partner action (Cooperate or Defect) Conduct binomial GLMM of payoff scheme * partner action + context for memory accuracy ", "Code": "accuracy_glmer_full <- glmer(accuracy ~ payoff_scheme * partner_action * context + (1 | subject), bound_data, family = binomial(link = \"logit\")) # calculate GLMM of full model ", "Label": "Statistical Modeling", "Source": "https://osf.io/zcv4m/", "File": "winke_stevens_2017_rcode.R" }, { "ID": 57, "Comment": "Conduct binomial GLMMs for memory accuracy to calculate BIC values to transform to Bayes factors", "Code": "accuracy_glmer_null <- summary(glmer(accuracy ~ (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for null model accuracy_glmer_payoff <- summary(glmer(accuracy ~ payoff_scheme + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for payoff scheme accuracy_glmer_context <- summary(glmer(accuracy ~ context + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for context accuracy_glmer_action <- summary(glmer(accuracy ~ partner_action + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for partner action accuracy_glmer_payoff_action <- summary(glmer(accuracy ~ payoff_scheme + partner_action + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for payoff_scheme + partner_action accuracy_glmer_payoff_action_inter <- summary(glmer(accuracy ~ payoff_scheme * partner_action + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for payoff_scheme * partner_action accuracy_glmer_action_context <- summary(glmer(accuracy ~ partner_action + context + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for partner_action + context accuracy_glmer_action_context_inter <- summary(glmer(accuracy ~ partner_action * context + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for partner_action * context accuracy_glmer_payoff_action_context <- summary(glmer(accuracy ~ payoff_scheme + partner_action + context + (1 | subject), bound_data, family = binomial(link = \"logit\"))) # calculate GLMM for payoff_scheme * partner_action * context last_accuracy_glmer_null <- summary(glmer(accuracy ~ (1 | subject), last_data, family = binomial(link = \"logit\"))) # calculate GLMM for null model last_accuracy_glmer_payoff <- summary(glmer(accuracy ~ payoff_scheme + (1 | subject), last_data, family = binomial(link = \"logit\"))) # calculate GLMM for payoff scheme last_accuracy_glmer_action <- summary(glmer(accuracy ~ partner_action + (1 | subject), last_data, family = binomial(link = \"logit\"))) # calculate GLMM for partner action last_accuracy_glmer_payoff_action <- summary(glmer(accuracy ~ payoff_scheme + partner_action + (1 | subject), last_data, family = binomial(link = \"logit\"))) # calculate GLMM for payoff_scheme + partner_action last_accuracy_glmer_payoff_action_inter <- summary(glmer(accuracy ~ payoff_scheme * partner_action + (1 | subject), last_data, family = binomial(link = \"logit\"))) # calculate GLMM for payoff_scheme * partner_action ", "Label": "Statistical Modeling", "Source": "https://osf.io/zcv4m/", "File": "winke_stevens_2017_rcode.R" }, { "ID": 58, "Comment": "Conduct binomial GLMM of payoff scheme * partner action + context for memory accuracy", "Code": "last_accuracy_glmer_full <- glmer(accuracy ~ payoff_scheme * partner_action + (1 | subject), last_data, family = binomial(link = \"logit\")) # calculate GLMM of full model ", "Label": "Statistical Modeling", "Source": "https://osf.io/zcv4m/", "File": "winke_stevens_2017_rcode.R" }, { "ID": 59, "Comment": "Calculate correlation between mean number of contacts and mean memory accuracy", "Code": "coop_contacts_cor2 <- cor.test(expt2_data_subj$accuracy, expt2_data_subj$contacts) # calculate network size/accuracy correlation coop_contacts2_bfdf <- data.frame(accuracy = expt2_data_subj$accuracy, contacts = expt2_data_subj$contacts) # create new data frame for Bayesian analysis coop_contacts2_lmbf <- lmBF(accuracy ~ contacts, data = coop_contacts2_bfdf) # calculate Bayes regression coop_contacts2_bf <- extractBF(coop_contacts2_lmbf)$bf # extract Bayes factor ", "Label": "Statistical Test", "Source": "https://osf.io/zcv4m/", "File": "winke_stevens_2017_rcode.R" }, { "ID": 60, "Comment": " Create dummy variable for ethnicity: 0 option 4 only (Anglo/White), 1 any other option / combination of options ", "Code": "table(survey$ethnic_group[!duplicated(survey$id)], useNA = \"ifany\") survey$ethnicity <- ifelse(survey$ethnic_group == \"4\", 0, 1) table(survey$ethnicity[!duplicated(survey$id)], useNA = \"ifany\") ", "Label": "Data Variable", "Source": "https://osf.io/jpxts/", "File": "Data_Prep_S1S2.R" }, { "ID": 61, "Comment": " Create dummy variable for SES: 1 mother or father completed at least some college (4 some college), 0 otherwise ", "Code": "table(survey$mother_educationlevel[!duplicated(survey$id)], useNA = \"ifany\") table(survey$father_educationlevel[!duplicated(survey$id)], useNA = \"ifany\") survey$SES <- ifelse(survey$mother_educationlevel >= 4 | survey$father_educationlevel >= 4, 1, 0) table(survey$SES[!duplicated(survey$id)], useNA = \"ifany\") ", "Label": "Data Variable", "Source": "https://osf.io/jpxts/", "File": "Data_Prep_S1S2.R" }, { "ID": 62, "Comment": " Weekend Create dummy variable for weekend: 1 weekend, 0 weekday ", "Code": "survey$weekend <- ifelse(weekdays(survey$StartDate, abbr = TRUE) %in% c(\"Sat\", \"Sun\"), 1, 0) ", "Label": "Data Variable", "Source": "https://osf.io/jpxts/", "File": "Data_Prep_S1S2.R" }, { "ID": 63, "Comment": "Create dummy variable for mixed interaction partners", "Code": "survey$no_partners <- apply(survey[c(\"close_peers\", \"family\", \"weak_ties\")], 1, sum) survey$mixed_partner <- ifelse(survey$no_partners > 1, 1, 0) table(survey$mixed_partner, useNA = \"ifany\") # number of observations with mixed interaction partners: 11479 (S1) / 5797 (S2) table(apply(survey[c(\"close_peers\", \"family\", \"weak_ties\")], 1, sum), useNA = \"ifany\") survey$close_peers_all <- survey$close_peers survey$family_all <- survey$family survey$weak_ties_all <- survey$weak_ties survey$close_peers <- ifelse(survey$mixed_partner == 1, NA, survey$close_peers) survey$family <- ifelse(survey$mixed_partner == 1, NA, survey$family) survey$weak_ties <- ifelse(survey$mixed_partner == 1, NA, survey$weak_ties) table(survey$close_peers, useNA = \"ifany\") # number of observations with interactions with close peers ONLY: 17194 (S1) / 9406 (S2) table(survey$interacting_people[survey$close_peers == 1]) table(survey$family, useNA = \"ifany\") # number of observations with interactions with family ONLY: 2450 (S1) / 1946 (S2) table(survey$interacting_people[survey$family == 1]) table(survey$weak_ties, useNA = \"ifany\") # number of observations with interactions with weak ties ONLY: 4336 (S1) / 2823 (S2) table(survey$interacting_people[survey$weak_ties == 1]) table(apply(survey[c(\"close_peers\", \"family\", \"weak_ties\")], 1, sum), useNA = \"ifany\") # 23980 (S1) / 14175 (S2) length(which(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people) | survey$mixed_partner == 1)) table(apply(survey[c(\"close_peers_all\", \"family_all\", \"weak_ties_all\")], 1, sum), useNA = \"ifany\") # 23980, 10176, 1303 (S1) / 14175, 5116, 681 (S2) length(which(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people))) survey$no_partners2 <- apply(survey[c(\"friends_roommates\", \"significant_other\", \"family_all\", \"weak_ties_all\")], 1, sum) survey$mixed_partner2 <- ifelse(survey$no_partners2 > 1, 1, 0) table(survey$mixed_partner2, useNA = \"ifany\") # number of observations with mixed interaction partners: 13456 (S1) / 6847 (S2) table(apply(survey[c(\"friends_roommates\", \"significant_other\", \"family_all\", \"weak_ties_all\")], 1, sum), useNA = \"ifany\") survey$friends_roommates2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$friends_roommates) survey$significant_other2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$significant_other) survey$family2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$family_all) survey$weak_ties2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$weak_ties_all) table(survey$friends_roommates2, useNA = \"ifany\") # number of observations with interactions with friends and roommates ONLY: 12882 (S1) / 6865 (S2) table(survey$interacting_people[survey$friends_roommates2 == 1]) table(survey$significant_other2, useNA = \"ifany\") # number of observations with interactions with significant others ONLY: 2335 (S1) / 1491 (S2) table(survey$interacting_people[survey$significant_other2 == 1]) table(survey$family2, useNA = \"ifany\") # number of observations with interactions with family ONLY: 2450 (S1) / 1946 (S2) table(survey$interacting_people[survey$family2 == 1]) table(survey$weak_ties2, useNA = \"ifany\") # number of observations with interactions with weak ties ONLY: 4336 (S1) / 2823 (S2) table(survey$interacting_people[survey$weak_ties2 == 1]) table(apply(survey[c(\"friends_roommates2\", \"significant_other2\", \"family2\", \"weak_ties2\")], 1, sum), useNA = \"ifany\") # 22003 (S1) / 13125 (S2) length(which(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people) | survey$mixed_partner2 == 1)) ", "Label": "Data Variable", "Source": "https://osf.io/jpxts/", "File": "Data_Prep_S1S2.R" }, { "ID": 64, "Comment": "Create dummy variable for significant others", "Code": "survey$significant_other <- ifelse(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people), NA, ifelse(survey$Significant_other == 1, 1, 0)) table(survey$significant_other, useNA = \"ifany\") # number of observations with interactions with significant others: 6681 (S1) / 3822 (S2) table(survey$interacting_people[survey$significant_other == 1]) ", "Label": "Data Variable", "Source": "https://osf.io/jpxts/", "File": "Data_Prep_S1S2.R" }, { "ID": 65, "Comment": "converting PPId and Statement to factors", "Code": "Data$PPID <- as.factor(Data$PPID) Data$Statement <-as.factor(Data$Statement)", "Label": "Data Variable", "Source": "https://osf.io/dh32q/", "File": "PositivityratingsRscript.R" }, { "ID": 66, "Comment": "find Q1, Q3, and interquartile range for values in column A ", "Code": "Q1 <- quantile(DV2, .25) Q3 <- quantile(DV2, .75) IQR2 <- IQR(DV2) ", "Label": "Data Variable", "Source": "https://osf.io/dzwct/", "File": "Fisher_Z_3PERIODS_std.R" }, { "ID": 67, "Comment": "only keep rows in dataframe that have values within 1.5*IQR of Q1 and Q3", "Code": "no_outliers2 <- subset(df, DV2> (Q1 - 1.5*IQR2) & DV2< (Q3 + 1.5*IQR2)) no_outliers3 <- subset(df, DV3> (Q1 - 1.5*IQR3) & DV3< (Q3 + 1.5*IQR3)) ", "Label": "Data Variable", "Source": "https://osf.io/dzwct/", "File": "Fisher_Z_3PERIODS_std.R" }, { "ID": 68, "Comment": "Plot beta weights for interaction", "Code": "betalms # called \"beta\" but represents \"b\" betalms <- betalms[,1:2] names(betalms) <- c(\"b\", \"SE\") ", "Label": "Visualization", "Source": "https://osf.io/k853j/", "File": "ESS_openess_2018_perCountry.R" }, { "ID": 69, "Comment": "Plot beta weights for engagement", "Code": "betalms_eng betalms_eng <- betalms_eng[,1:2] names(betalms_eng) <- c(\"b\", \"SE\") ", "Label": "Visualization", "Source": "https://osf.io/k853j/", "File": "ESS_openess_2018_perCountry.R" }, { "ID": 70, "Comment": "Arrange b coeff plots", "Code": "grid.arrange(betalms_ope_p, betalms_eng_p, betalms_p, ncol = 3, nrow = 1) ", "Label": "Visualization", "Source": "https://osf.io/k853j/", "File": "ESS_openess_2018_perCountry.R" }, { "ID": 71, "Comment": "Plot slopes", "Code": "listofcharts = list() # create empty list for charts index = 0 # zero the index for (df in listofdfs) { index = index + 1 xlab_str = paste0(\"Economic beliefs in \", names(listofdfs)[index]) listofcharts[[index]] <- ggpredict(listoflms[[index]], terms = c(\"conservation2_s_c\", \"polit_eng_c[-0.14,0.14]\"), type = \"fe\") %>% plot(colors = \"bw\") + ggtitle(names(listoflms[index])) + xlab(\"NSC\") + ylab(\"Economic beliefs\") + labs(linetype = \"Political \\nengagement\") + scale_linetype_manual(values=c(\"solid\", \"dashed\"), labels = c(\"Low\", \"High\")) + theme_classic() + theme(legend.position = \"none\") + ", "Label": "Visualization", "Source": "https://osf.io/k853j/", "File": "ESS_openess_2018_perCountry.R" }, { "ID": 72, "Comment": "Does the start model fit the data significantly better as compared to a model without random intercepts over items?", "Code": "tic();; start_min_item_intercepts <- glmer(bin_score ~ input*testmoment*learningtype + (1|participant), family = 'binomial', data = data, control = glmerControl(optimizer = \"bobyqa\", optCtrl=list(maxfun=1e5)));; toc() anova(start_min_item_intercepts, start) # Start model is significantly better (p < .001);; AIC difference of +- 240 ", "Label": "Statistical Modeling", "Source": "https://osf.io/938ye/", "File": "Statistical_models_no_T3_criticals.R" }, { "ID": 73, "Comment": "Random slope of input over item", "Code": "tic();; input_item <- glmer(bin_score ~ input*testmoment*learningtype + (1+input|item) + (1|participant), family = 'binomial', data = data, control = glmerControl(optimizer = \"bobyqa\", optCtrl=list(maxfun=1e5)));; toc() anova(start, input_item) # Significant improvement (p = .03);; small AIC difference (3). summary(rePCA(input_item)) # All dimensions are supported by the data. >> KEEP ", "Label": "Data Variable", "Source": "https://osf.io/938ye/", "File": "Statistical_models_no_T3_criticals.R" }, { "ID": 74, "Comment": "Investigate model fit Inspect residuals with a binned residual plot", "Code": "residualsplot <- binnedplot(fitted(final), resid(final, type = \"response\"), cex.pts=1, col.int=\"black\", xlab = \"Estimated score (as probability)\") ", "Label": "Visualization", "Source": "https://osf.io/938ye/", "File": "Statistical_models_no_T3_criticals.R" }, { "ID": 75, "Comment": "model 1 is just baseHIne PHQ_9 severity in a simple logistic regression", "Code": "HI_prognostic_model1 = glm(y ~ PHQ9_first, data = X_HI_only, family = \"binomial\") summary(HI_prognostic_model1) ", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgzu/", "File": "outcome_evaluation_code_v5.R" }, { "ID": 76, "Comment": "model 3 is a logistic regression built using elastic net regularization use elastic net to build this model", "Code": "set.seed(12345678) fit_HI = glmnet(data.matrix(X_HI_only_forEN), y_HI_only, family=\"binomial\", alpha=.5) plot(fit_HI,label=TRUE) HI_prognostic_model3 = cv.glmnet(data.matrix(X_HI_only_forEN), y_HI_only, family=\"binomial\", alpha=.5) plot(HI_prognostic_model3) HI_prognostic_model3$lambda.min HI_prognostic_model3$lambda.1se print(coef(HI_prognostic_model3, s = \"lambda.min\")) print(coef(HI_prognostic_model3, s = \"lambda.1se\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgzu/", "File": "outcome_evaluation_code_v5.R" }, { "ID": 77, "Comment": "calculate deviance statistic logHIkeHIhood : sum from i1 to N of [ Yi*ln(P(Yi))+(1Yi)*ln(1P(yi))] calculate brier score brier_score: (1/n)*sum(pioi)^2", "Code": "log_likelihood_calculator_HI_1 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_1 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_2 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_2 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_3 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_3 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_4 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_4 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_5 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_5 = rep(NA,dim(X_HI_only_hold_out)[1]) for (i in 1:dim(X_HI_only_hold_out)[1]){ log_likelihood_calculator_HI_1[i] = y_HI_only_hold_out[i]*log(HI_prognosis_1[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_1[i]) brier_score_calculator_HI_1[i] = (HI_prognosis_1[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_2[i] = y_HI_only_hold_out[i]*log(HI_prognosis_2[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_2[i]) brier_score_calculator_HI_2[i] = (HI_prognosis_2[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_3[i] = y_HI_only_hold_out[i]*log(HI_prognosis_3[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_3[i]) brier_score_calculator_HI_3[i] = (HI_prognosis_3[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_4[i] = y_HI_only_hold_out[i]*log(HI_prognosis_4[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_4[i]) brier_score_calculator_HI_4[i] = (HI_prognosis_4[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_5[i] = y_HI_only_hold_out[i]*log(HI_prognosis_5[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_5[i]) brier_score_calculator_HI_5[i] = (HI_prognosis_5[i]-y_HI_only_hold_out[i])^2 } deviance_for_HI_model_1 = sum(log_likelihood_calculator_HI_1,na.rm=TRUE) brier_score_for_HI_model_1 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_1,na.rm=TRUE) deviance_for_HI_model_2 = sum(log_likelihood_calculator_HI_2,na.rm=TRUE) brier_score_for_HI_model_2 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_2,na.rm=TRUE) deviance_for_HI_model_3 = sum(log_likelihood_calculator_HI_3,na.rm=TRUE) brier_score_for_HI_model_3 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_3,na.rm=TRUE) deviance_for_HI_model_4 = sum(log_likelihood_calculator_HI_4,na.rm=TRUE) brier_score_for_HI_model_4 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_4,na.rm=TRUE) deviance_for_HI_model_5 = sum(log_likelihood_calculator_HI_5,na.rm=TRUE) brier_score_for_HI_model_5 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_5,na.rm=TRUE) print(round(deviance_for_HI_model_1,1)) print(round(brier_score_for_HI_model_1,3)) print(round(deviance_for_HI_model_2,1)) print(round(brier_score_for_HI_model_2,3)) print(round(deviance_for_HI_model_3,1)) print(round(brier_score_for_HI_model_3,3)) print(round(deviance_for_HI_model_4,1)) print(round(brier_score_for_HI_model_4,3)) print(round(deviance_for_HI_model_5,1)) print(round(brier_score_for_HI_model_5,3)) ", "Label": "Statistical Test", "Source": "https://osf.io/wxgzu/", "File": "outcome_evaluation_code_v5.R" }, { "ID": 78, "Comment": "here we create a PAIstyle model that includes variables and their interaction with tx we use this model as a demonstration:", "Code": "differential_model_6 = glm(y ~ tx*(PHQ9_first+WSAS_first+Employment_binary+Ethnicity_binary+GAD7_first+Phobia_Q3_first), data = X_training, family = \"binomial\") summary(differential_model_6) ", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgzu/", "File": "outcome_evaluation_code_v5.R" }, { "ID": 79, "Comment": "grab the indices of which individuals got tx0 (li) and tx1 (hi)", "Code": "tx_HI_i = which(X_hold_out$tx==1, arr.ind = TRUE) tx_LI_i = which(X_hold_out$tx==0, arr.ind = TRUE) step_size = 150 window_size = 300 bin_number = ceiling((dim(X_hold_out)[1]-window_size)/step_size) ", "Label": "Data Variable", "Source": "https://osf.io/wxgzu/", "File": "outcome_evaluation_code_v5.R" }, { "ID": 80, "Comment": "the below command can be used to see what y limits and x limits should be used to standardize all plots", "Code": "cat(\"y_limits\",round(c(min(observed_differential_response),max(observed_differential_response)),3),\"\\n\") cat(\"x_limits\",round(c(min(predicted_differential_response),max(predicted_differential_response)),3),\"\\n\") cat(\"predicted avg differential response (full sample avg):\",round(mean(differential_prediction),3),\"\\n\") cat(\"predicted avg differential response (binned avg):\",round(mean(predicted_differential_response),3),\"\\n\") predicted_avg_diff = mean(differential_prediction) plot(predicted_differential_response,observed_differential_response,'p',ylim=c(-.1,.3), xlim = c(-.1,.3),xlab=x_label) plot(predicted_differential_response,observed_differential_response,'p',ylim=c(-.1,.3), xlim = c(min(predicted_differential_response),max(predicted_differential_response)),xlab=x_label) cor_windows = cor(1:bin_number,observed_differential_response) cor_pred_diff = cor(predicted_differential_response,observed_differential_response) cat(\"windows correlation = \",round(cor_windows,3),\"\\n\") cat(\"predicted difference correlation = \",round(cor_pred_diff,3),\"\\n\") ", "Label": "Visualization", "Source": "https://osf.io/wxgzu/", "File": "outcome_evaluation_code_v5.R" }, { "ID": 81, "Comment": "we can use the tstatistic to adjust for the error around the slope tvalue is the coefficient divided by its standard error", "Code": "cat(\"slope t-stat = \",round(sc_coefs_summary$t.value[2],3),\"\\n\") print(round(sc_coefs_summary,3)) cat(\"\\n\") observed_range = max(observed_differential_response) - min(observed_differential_response) predicted_range = max(predicted_differential_response) - min(predicted_differential_response) tstat = sc_coefs_summary$t.value[2] slope = sc_coefs_summary$Estimate[2] model_evaluations[k,] = c(tstat, slope, observed_range, predicted_range, predicted_avg_diff, cor_pred_diff) model_predictions[k,] = predicted_differential_response model_results[k,] = observed_differential_response } print(round(model_evaluations,3)) ", "Label": "Statistical Test", "Source": "https://osf.io/wxgzu/", "File": "outcome_evaluation_code_v5.R" }, { "ID": 82, "Comment": "comparisons to average for each trait", "Code": "summary(lmer(RATINGc ~ TSELFc* EX + SMEANc* EX + SDMEAN* EX +PSELFc* EX + (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* OP + SMEANc* OP + SDMEAN* OP +PSELFc*OP+ (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* AG + SMEANc* AG + SDMEAN* AG +PSELFc*AG + (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* NE + SMEANc* NE + SDMEAN* NE +PSELFc*NE+ (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* CO + SMEANc* CO + SDMEAN* CO+PSELFc*CO+ (TSELFc + SMEANc + SDMEANc +PSELFc | PID), data= subset(fmimlm,fmimlm$motive == 0 ) )) ", "Label": "Data Variable", "Source": "https://osf.io/ns4h9/", "File": "RScript_motives.R" }, { "ID": 83, "Comment": "Conduct RSA We want to estimate the RSA model in which the intercept is allowed to vary across states. The equation for individual i living in state j (z selfesteem, x IV, y SV) is: zij b0 + b1*xij + b2*yj + b3*xij^2 + b4*xij*yj + b5*yj^2 + uj + eij specify and estimate this model: ", "Code": "m.c <- lmer(selfesteem ~ IV.c + SV.c + IV2.c + IVSV.c + SV2.c + (1 | state), data = df) summary(m.c) ", "Label": "Statistical Modeling", "Source": "https://osf.io/jhyu9/", "File": "example_Rcode_mlrsa_osf_oneL1pred.R" }, { "ID": 84, "Comment": "Plot the average surface using MLRSA_AverageSurfacePlot:", "Code": "MLRSA_AverageSurfacePlot(m.c, name_vars=c(\"IV.c\",\"SV.c\",\"IV2.c\",\"IVSV.c\",\"SV2.c\"), outcome=\"selfesteem\", data=df, xlab=\"Individual-level values\", ylab=\"State-level values\", zlab=\"Self-esteem\") ", "Label": "Visualization", "Source": "https://osf.io/jhyu9/", "File": "example_Rcode_mlrsa_osf_oneL1pred.R" }, { "ID": 85, "Comment": "only keeping latest reported dates for each patient", "Code": "data_lab_res_dcr <- data_lab[!duplicated(data_lab$shcsid,fromLast=TRUE),] data_drug_res_dcr <- data_drug[!duplicated(data_drug$shcsid,fromLast=TRUE),] data_dis_res_dcr <- data_dis[order(data_dis$shcsid,data_dis$newdate),] data_dis_res_dcr <- data_dis_res_dcr[!duplicated(data_dis_res_dcr$shcsid,fromLast=TRUE),] ", "Label": "Data Variable", "Source": "https://osf.io/gy5vm/", "File": "preprocess_SHCS.R" }, { "ID": 86, "Comment": "Plots with H(A|M) nonnormalized conditional entropy Getting mean, median and standard deviation across different motifs per period ", "Code": "N <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = length) MEAN <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = mean) MEDIAN <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = median) SD <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = sd) resultsmotifs_summary <- cbind.data.frame(N, MEAN$CEAuthoritiesMotifs, MEDIAN$CEAuthoritiesMotifs, SD$CEAuthoritiesMotifs) colnames(resultsmotifs_summary) <- c(\"DATE\",\"N\",\"MEAN\",\"MEDIAN\",\"SD\") resultsmotifs_summary$SE <- resultsmotifs_summary$SD / sqrt(resultsmotifs_summary$N) ", "Label": "Visualization", "Source": "https://osf.io/uckzx/", "File": "P1_motif-by-motif_newbins.R" }, { "ID": 87, "Comment": "Plots with H(A|M)/H(A) normalized conditional entropy", "Code": "resultsmotifs <- as.data.frame(rbind(results330,results350,results370,results390,results405,results415,results425, results435,results455,results470,results490,results600)) ", "Label": "Visualization", "Source": "https://osf.io/uckzx/", "File": "P1_motif-by-motif_newbins.R" }, { "ID": 88, "Comment": "Getting mean, median and standard deviation across different motifs per period ", "Code": "N <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = length) MEAN <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = mean) MEDIAN <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = median) SD <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = sd) Nresultsmotifs_summary <- cbind.data.frame(N, MEAN$NormCEAuthoritiesMotifs, MEDIAN$NormCEAuthoritiesMotifs, SD$NormCEAuthoritiesMotifs) colnames(Nresultsmotifs_summary) <- c(\"DATE\",\"N\",\"MEAN\",\"MEDIAN\",\"SD\") Nresultsmotifs_summary$SE <- Nresultsmotifs_summary$SD / sqrt(Nresultsmotifs_summary$N) ", "Label": "Statistical Modeling", "Source": "https://osf.io/uckzx/", "File": "P1_motif-by-motif_newbins.R" }, { "ID": 89, "Comment": "Plot mean and median H(A|M)/H(A) per motif per period", "Code": "require(ggplot2) ggmean <- ggplot(Nresultsmotifs_summary,aes(x=DATE,y=MEAN)) + labs(title = \"P1: Mean normalized conditional entropy of authorities given motifs across motifs\", x = \"Year BCE\", y = \"mean H(A|M)/H(A) across motifs\") + scale_x_reverse() + geom_errorbar(aes(ymin=Nresultsmotifs_summary$MEAN-Nresultsmotifs_summary$SE, ymax=Nresultsmotifs_summary$MEAN+Nresultsmotifs_summary$SE),width=.1) + geom_line() + geom_point() ggmean ggmedian <- ggplot(Nresultsmotifs_summary,aes(x=DATE,y=MEDIAN)) + labs(title = \"P1: Median normalized conditional entropy of authorities given motifs across motifs\", x = \"Year BCE\", y = \"median H(A|M)/H(A) across motifs\") + scale_x_reverse() + geom_line() + geom_point() ggmedian ", "Label": "Visualization", "Source": "https://osf.io/uckzx/", "File": "P1_motif-by-motif_newbins.R" }, { "ID": 90, "Comment": "In renamed \"gender\" variable replace \"son\" with \"Male\" etc. ", "Code": "StataGDIM$gender <- gsub('son', 'Male', StataGDIM$gender) StataGDIM$gender <- gsub('daughter', 'Female', StataGDIM$gender) StataGDIM$gender <- gsub('all', 'Mixed', StataGDIM$gender) ", "Label": "Data Variable", "Source": "https://osf.io/pk9my/", "File": "PNAS_Social_Mobility_public.R" }, { "ID": 91, "Comment": "Create cohort (decade of birth) variable in ACE data", "Code": "StataACE <- StataACE %>% mutate(cohort = as.integer(dob_midrange/10)*10)", "Label": "Data Variable", "Source": "https://osf.io/pk9my/", "File": "PNAS_Social_Mobility_public.R" }, { "ID": 92, "Comment": "Create short cohort ID by concatenating two character vectors. cohortIDshort is created to reduce clutter in multipanel figures", "Code": "ETmerged$cohort_short <- substring(ETmerged$cohort, 3) ETmerged$cohortIDshort <- paste(ETmerged$wbcode, ETmerged$cohort_short, sep = \"\", collapse = NULL) ", "Label": "Data Variable", "Source": "https://osf.io/pk9my/", "File": "PNAS_Social_Mobility_public.R" }, { "ID": 93, "Comment": "Inverse variance weightings for variance of h2, c2 and e2 ", "Code": "ETmerged<-ETmerged %>% mutate (h2weight = 1/h2var) ETmerged<-ETmerged %>% mutate (c2weight = 1/c2var) ETmerged<-ETmerged %>% mutate (e2weight = 1/e2var)", "Label": "Statistical Modeling", "Source": "https://osf.io/pk9my/", "File": "PNAS_Social_Mobility_public.R" }, { "ID": 94, "Comment": "Manually derive pvalues from tvalues for Alt_Norway (for a twosided ttest):", "Code": "2*pt(abs(-1.0320512), df=14,lower.tail=FALSE) 2*pt(abs(2.48367345), df=14,lower.tail=FALSE) 2*pt(abs(-2.828329), df=14,lower.tail=FALSE) 2*pt(abs(1.2811099), df=14,lower.tail=FALSE) 2*pt(abs(3.4124371), df=14,lower.tail=FALSE) 2*pt(abs(0.6433726), df=14,lower.tail=FALSE) 2*pt(abs(-1.0321), df=14,lower.tail=FALSE) 2*pt(abs(2.4837), df=14,lower.tail=FALSE) 2*pt(abs(-2.8283), df=14,lower.tail=FALSE) ", "Label": "Statistical Test", "Source": "https://osf.io/pk9my/", "File": "PNAS_Social_Mobility_public.R" }, { "ID": 95, "Comment": "correlations separately for each partner partner 1", "Code": "chart.Correlation(data_dyadic[ , c(\"pes_1\",\"p_self_1\",\"d_self_1\",\"p_other_1\",\"d_other_1\")], use = \"pairwise.complete.obs\", pch = 20, histogram = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/sb3kw/", "File": "Study3B_analyses.R" }, { "ID": 96, "Comment": " Remove\" AoI > 1200 or < 50 ms ", "Code": "prevalues_gaze <- sum(PAST_O_19$gazedur > 0, na.rm=T)+sum(PAST_M_19$gazedur > 0, na.rm=T)+ sum(PRES_M_19$gazedur > 0, na.rm=T)+sum(PRES_O_19$gazedur > 0, na.rm=T) prevalues_fix <- sum(PAST_O_19$fixdur > 0, na.rm=T)+sum(PAST_M_19$fixdur > 0, na.rm=T)+ sum(PRES_M_19$fixdur > 0, na.rm=T)+sum(PRES_O_19$fixdur > 0, na.rm=T) PAST_M_19$gazedur[PAST_M_19$gazedur > 1200 | PAST_M_19$gazedur < 50] <- NaN PAST_M_19$fixdur[PAST_M_19$fixdur > 1200 | PAST_M_19$fixdur < 50] <- NaN PRES_O_19$gazedur[PRES_O_19$gazedur > 1200 | PRES_O_19$gazedur < 50] <- NaN PRES_O_19$fixdur[PRES_O_19$fixdur > 1200 | PRES_O_19$fixdur < 50] <- NaN PRES_M_19$gazedur[PRES_M_19$gazedur > 1200 | PRES_M_19$gazedur < 50] <- NaN PRES_M_19$fixdur[PRES_M_19$fixdur > 1200 | PRES_M_19$fixdur < 50] <- NaN PAST_O_19$gazedur[PAST_O_19$gazedur > 1200 | PAST_O_19$gazedur < 50] <- NaN PAST_O_19$fixdur[PAST_O_19$fixdur > 1200 | PAST_O_19$fixdur < 50] <- NaN postvalues_gaze <- sum(PAST_O_19$gazedur > 0, na.rm=T)+sum(PAST_M_19$gazedur > 0, na.rm=T)+ sum(PRES_M_19$gazedur > 0, na.rm=T)+sum(PRES_O_19$gazedur > 0, na.rm=T) postvalues_fix <- sum(PAST_O_19$fixdur > 0, na.rm=T)+sum(PAST_M_19$fixdur > 0, na.rm=T)+ sum(PRES_M_19$fixdur > 0, na.rm=T)+sum(PRES_O_19$fixdur > 0, na.rm=T) dataloss_fix <- prevalues_fix-postvalues_fix dataloss_fix_percentage <- (1-(postvalues_fix/prevalues_fix))*100 dataloss_gaze <- prevalues_gaze-postvalues_gaze dataloss_gaze_percentage <- (1-(postvalues_gaze/prevalues_gaze))*100 ", "Label": "Data Variable", "Source": "https://osf.io/qynhu/", "File": "subject19.R" }, { "ID": 97, "Comment": "Distributional properties of the individual items for the Germanspeaking sample", "Code": "jmv::descriptives( data = DataGerman, vars = vars(DMW1, DMW2, DMW3, DMW4, SMW1, SMW2, SMW3, SMW4, SBPS1, SBPS2, SBPS3, SBPS4, SBPS5, SBPS6, SBPS7, SBPS8), freq = TRUE, hist = TRUE, violin = TRUE, skew = TRUE, kurt = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/tg3fq/", "File": "syntax_SDMWS&SBPS.R" }, { "ID": 98, "Comment": "Distributional properties of the individual items for the US sample", "Code": "jmv::descriptives( data = DataUS, vars = vars(DMW1, DMW2, DMW3, DMW4, SMW1, SMW2, SMW3, SMW4, SBPS1, SBPS2, SBPS3, SBPS4, SBPS5, SBPS6, SBPS7, SBPS8), freq = TRUE, hist = TRUE, violin = TRUE, skew = TRUE, kurt = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/tg3fq/", "File": "syntax_SDMWS&SBPS.R" }, { "ID": 99, "Comment": "Network analysis using EBICglasso", "Code": "n1<-estimateNetwork(DataAll, default= \"EBICglasso\") plot(n1, groups = gr, nodeNames = names, legend.cex=.35) centrality_auto(n1, weighted = TRUE, signed = TRUE) centralityPlot(n1, include =c(\"Betweenness\",\"Closeness\", \"Strength\")) print(n1) ", "Label": "Statistical Modeling", "Source": "https://osf.io/tg3fq/", "File": "syntax_SDMWS&SBPS.R" }, { "ID": 100, "Comment": "Network based on correlations", "Code": "n1a<-estimateNetwork(DataAll, default= \"cor\") plot(n1a, groups = gr, legend.cex=.35) ", "Label": "Statistical Modeling", "Source": "https://osf.io/tg3fq/", "File": "syntax_SDMWS&SBPS.R" }, { "ID": 101, "Comment": "Network analysis using EBICglasso with the Germanspeaking sample including the openmindedness scale", "Code": "gr2<- list('Deliberate MW'=c(1:4), 'Spontaneous MW'=c(5:8), 'Boredom'=c(9:16), 'Open-Mindedness'=c(17:22)) names2<-c(\"I allow my thoughts to wander on purpose\", \"I enjoy mind-wandering\", \"I find mind-wandering is a good way to cope with boredom\", \"I allow myself to get absorbed in pleasant fantasy\", \"I find my thoughts wandering spontaneously\", \"When I mind-wander my thoughts tend to be pulled from topic to topic\", \"It feels like I don’t have control over when my mind wanders\", \"I mind wander even when I’m supposed to be doing something else\", \"I often find myself at “loose ends,” not knowing what to do\", \"I find it hard to entertain myself\", \"Many things I have to do are repetitive and monotonous\", \"It takes more stimulation to get me going than most people\", \"I don’t feel motivated by most things that I do\", \"In most situations, it is hard for me to find something to do or see to keep me interested\", \"Much of the time, I just sit around doing nothing\", \"Unless I am doing something exciting, even dangerous, I feel half-dead and dull\", \"Has few artistic interests\", \"Is complex, a deep thinker\", \"Is original, comes up with new ideas\", \"Is fascinated by art, music, or literature\", \"Has little interest in abstract ideas\", \"Has little creativity\") n2<-estimateNetwork(DataGerman, default= \"EBICglasso\") plot(n2, groups = gr2, nodeNames = names2, legend.cex=.35) centrality_auto(n2, weighted = TRUE, signed = TRUE) centralityPlot(n2, include =c(\"Betweenness\",\"Closeness\", \"Strength\")) print(n2) ", "Label": "Statistical Modeling", "Source": "https://osf.io/tg3fq/", "File": "syntax_SDMWS&SBPS.R" }, { "ID": 102, "Comment": "Extract the vectors of analysed characteristics of Partner and Nonbiological father from the dataset.", "Code": "text<-paste(\"trait_p<-data$Partner_\",char,sep=\"\") eval(parse(text=text)) text<-paste(\"trait_f<-data$Nonbiol_\",char,sep=\"\") eval(parse(text=text)) text<-paste(\"trait_pb<-dbiol$Partner_\",char,sep=\"\") eval(parse(text=text)) text<-paste(\"trait_b<-dbiol$Biol_\",char,sep=\"\") eval(parse(text=text)) ", "Label": "Data Variable", "Source": "https://osf.io/greqt/", "File": "functions2.R" }, { "ID": 103, "Comment": "Cohen's d First create the equivalent arrangement for the stimation of differences if the father is biologicla and present", "Code": "muB<-rep(mean(diffb,na.rm=T),15) sdB<-rep(sd(diffb,na.rm=T),15) nB<-rep(length(diffb[!is.na(diffb)]),15) ", "Label": "Statistical Test", "Source": "https://osf.io/greqt/", "File": "functions2.R" }, { "ID": 104, "Comment": "plots also when there is no interaction and also squared terms (potentially involved in interactions) create data frame storing the observed mean values per combination of the values of the grided covariate and factor levels in the data create vectors denoting the grid cell center values for the two covariate:", "Code": "if(!is.na(grid.resol)){ xvar=seq(min(plot.data[,covariate]), max(plot.data[,covariate]), length.out=grid.resol) bin.x=cut(x=plot.data[,covariate], breaks=xvar, include.lowest=T, labels=F) bin.x=min(xvar)+diff(xvar[1:2])/2+(bin.x-1)*diff(xvar[1:2]) }else{ bin.x=plot.data[, covariate] } ", "Label": "Visualization", "Source": "https://osf.io/vjeb3/", "File": "draw.2.w.int.bw.1.cov.and.1.fac.r" }, { "ID": 105, "Comment": "remove rows where wkl_uuid is NA:", "Code": "VS <- VS[!is.na(VS$wkl_uuid), ] ", "Label": "Data Variable", "Source": "https://osf.io/w7pjy/", "File": "format_captWKLquality.R" }, { "ID": 106, "Comment": "Generate data assuming an underlying VAR(1) process Function to generate data assuming an underlying VAR(1) process the input is N the number of samples, T.days and T.beeps are the number of days and the number of beeps ESM is conducted (Total number of assessments is T.days x T.beeps) Psi is a matrix with the fixed regression weights, mu is a vector with the fixed intercepts, var.Psi is variance of the random regression weights (i.e., assumed to be equal), For each participant, it is checked whether the matrix Psi conforms the assumption of stationary time series with the absolute value of the maximum eigenvalue smaller than 1 The distribution of the random effects resembles a truncated multivariate normal distribution. This function simulates data from a VAR(1) model ", "Code": "Data.VAR.Fixed = function(N,T,Psi,cor.Sigma){ p = ncol(Psi) ", "Label": "Statistical Modeling", "Source": "https://osf.io/rs6un/", "File": "Data.VAR.Fixed.R" }, { "ID": 107, "Comment": " remove layers with gtype_class \"MF\" (only three distinct MF layers in data set!) ", "Code": "toDiscard <- which(CM$gtype_class == \"MF\") if (length(toDiscard) > 0) CM <- CM[-toDiscard, ] ", "Label": "Data Variable", "Source": "https://osf.io/w7pjy/", "File": "format_confusionMatrix.R" }, { "ID": 108, "Comment": "Testing participant heterogeneity chisquare test:", "Code": "testHetChi(freq = \"data/data_retrieval.csv\", tree = c(\"E\",\"E\",\"E\", \"U\",\"U\",\"U\", \"N\",\"N\",\"N\") ) testHetChi(freq = \"data/data_encoding.csv\", tree = c(\"E\",\"E\",\"E\", \"U\",\"U\",\"U\", \"N\",\"N\",\"N\") ) ", "Label": "Statistical Test", "Source": "https://osf.io/s82bw/", "File": "01_TreeBUGS_with_csv_files.R" }, { "ID": 109, "Comment": "Fitting a betaMPT model", "Code": "m.retrieval.beta <- betaMPT(eqnfile=\"model/2htsm.eqn\", data = \"data/data_retrieval.csv\", restrictions = \"model/restrictions.txt\", modelfilename = \"results/2htsm_betaMPT.jags\", transformedParameters = list(\"deltaDd=D1-d1\"), parEstFile = \"results/results_retrieval_betaMPT.txt\", n.chain = 4, n.iter = 50000, n.adapt = 10000, n.burnin = 10000, n.thin = 10, ppp = 5000, dic = TRUE ) summary(m.retrieval.beta) ", "Label": "Statistical Modeling", "Source": "https://osf.io/s82bw/", "File": "01_TreeBUGS_with_csv_files.R" }, { "ID": 110, "Comment": " Withinsubject tests Example: twohigh threshold model (2HTM, included in TreeBUGS) ", "Code": "htm <- system.file(\"MPTmodels/2htm.eqn\", package=\"TreeBUGS\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/s82bw/", "File": "01_TreeBUGS_with_csv_files.R" }, { "ID": 111, "Comment": "Plot ranked correlations", "Code": "cor_dat <- corr_cross(ogdatasub, max_pvalue = 0.05, # display only significant correlations (at 5% level) pvalue =T, plot = F, top = 10 # display top 10 correlations (by correlation coefficient) ) |> arrange(abs(corr)) |> mutate(lab = paste(key, mix, sep = \" + \"), lab = factor(lab)) cor_dat |> ggplot(aes(y = lab, x = abs(corr), fill = as.factor(sign(corr)))) + geom_col() + scale_fill_manual(values = c(\"#E64B3D\", \"#0092B2\")) + labs(title=\"\", subtitle = \"\", caption=\"Note: Red bars indicate a negative relationship\") + geom_text(aes(label = signif(.data$corr, 2)), colour = \"#FFFFFF\", size = 3.5, hjust = 1.1) + scale_x_continuous(limits=c(0, .4), position=\"top\") + scale_y_discrete(labels=c(\"fst + cst\" = \"Difficult Financial Situation & Cost of Food\", \"rsp + spp\" = \"Loved Ones' Needs & Lack of Support\", \"pdd + ppd\" = \"Positive Experiences with Veg*nism & Positive Elements Post-Diet\", \"eft + prc\" = \"Meal Complexity & Need for Specialized Equipment or Planning\", \"fds + oth\" = \"Discontent and Cravings for Animal Products & Other Obstacles\", \"rsp + eft\" = \"Loved Ones' Needs & Meal Complexity\", \"fph + cst\" = \"Challenging Purchase Requirements & Cost of Food\", \"avl + fph\" = \"Limited Access to Veg*n Options & Challenging Purchase Requirements\", \"med + con\" = \"Professional Medical Advice & Difficulty Managing Health\", \"con + com\" = \"Difficulty Managing Health & Commitment Difficulties\", \"fcn + prc\" = \"Lack of Veg*n Knowledge & Need for Specialized Equipment or Planning\")) + theme_minimal() + theme(axis.title = element_blank(), legend.position = \"none\") cor_dat ggsave(\"Correlations.png\", width=8, height=6) ", "Label": "Visualization", "Source": "https://osf.io/q2zrp/", "File": "Graphs.R" }, { "ID": 112, "Comment": "Predictive performance plot ( model size selection plot)", "Code": "stat_pretty <- setNames(nm = proj_evalstats) stat_pretty <- toupper(stat_pretty) stopifnot(identical(proj_evalstats, \"mlpd\")) ggeval <- plot(C_cvvs, stats = proj_evalstats, deltas = TRUE, ranking_nterms_max = NA) ggeval <- ggeval + facet_null() ggeval <- ggeval + scale_y_continuous( sec.axis = sec_axis(~ exp(.), name = bquote(Lambda*\" \"*\"GMPD\")) ) ggeval <- ggeval + labs(y = bquote(Delta*\" \"*.(stat_pretty))) print(ggeval) ggsave(file.path(\"output\", out_folder, paste0(plot_prefix, \"projpred_search_deltas.jpeg\")), width = 7, height = 7 * 0.618) saveRDS(last_plot(), file = file.path(\"output\", out_folder, paste0(plot_prefix, \"projpred_search_deltas.rds\"))) ", "Label": "Visualization", "Source": "https://osf.io/emwgp/", "File": "projpred.R" }, { "ID": 113, "Comment": "save attribute level for option i in trial row.id", "Code": "wide.data[[paste0(\"opt\",i,att.names[k])]][row.id]=feat.ij ", "Label": "Data Variable", "Source": "https://osf.io/tbczv/", "File": "01-readData.r" }, { "ID": 114, "Comment": "save position of attribute j", "Code": "wide.data[[paste0(\"pos\",att.names[k])]][row.id]=j ", "Label": "Data Variable", "Source": "https://osf.io/tbczv/", "File": "01-readData.r" }, { "ID": 115, "Comment": "save attribute of position k", "Code": "wide.data[[paste0(\"pos\",j)]][row.id]=att.names[k] } } } if(y$selected[i]==1) wide.data$response[row.id]=i } ", "Label": "Data Variable", "Source": "https://osf.io/tbczv/", "File": "01-readData.r" }, { "ID": 116, "Comment": "1. recode RT for trials with slow (missed) response as NA", "Code": "data$rt[data$trialError == \" Slow\"] <- NA ", "Label": "Data Variable", "Source": "https://osf.io/tbczv/", "File": "01-readData.r" }, { "ID": 117, "Comment": "ttests to compare composite 'animal protection behaviors' in people who have vs. have not experienced each (16 types total)", "Code": "t.test(beh_animalprotect_comp ~ graphic_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp~ nongraphic_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ person_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ leaflet_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ news_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ social_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ humaneed_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ documentary_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ book_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ celebrity_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ ad_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ challenge_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ labels_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp~ labeled_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ ndisprotest_exp_buc, data=data, var.equal = TRUE) t.test(beh_animalprotect_comp ~ disprotest_exp_buc, data=data, var.equal = TRUE) ", "Label": "Statistical Test", "Source": "https://osf.io/3aryn/", "File": "7AnimalProtectionandConsumerBehaviors_Spanish.R" }, { "ID": 118, "Comment": "FDR correction for ttests that compared composite 'animal protection behaviors' between people who have and have not experienced each advocacy type", "Code": "adjusted_pvalues_protectionbehaviors <- data %>% summarise(graphic_exp_buc = t.test(beh_animalprotect_comp ~ graphic_exp_buc, var.equal = TRUE)$p.value, nongraphic_exp_buc = t.test(beh_animalprotect_comp ~ nongraphic_exp_buc, var.equal = TRUE)$p.value, person_exp_buc = t.test(beh_animalprotect_comp ~ person_exp_buc, var.equal = TRUE)$p.value, leaflet_exp_buc = t.test(beh_animalprotect_comp ~ leaflet_exp_buc, var.equal = TRUE)$p.value, news_exp_buc = t.test(beh_animalprotect_comp ~ news_exp_buc, var.equal = TRUE)$p.value, social_exp_buc = t.test(beh_animalprotect_comp ~ social_exp_buc, var.equal = TRUE)$p.value, humaneed_exp_buc = t.test(beh_animalprotect_comp ~ humaneed_exp_buc, var.equal = TRUE)$p.value, documentary_exp_buc = t.test(beh_animalprotect_comp ~ documentary_exp_buc, var.equal = TRUE)$p.value, book_exp_buc = t.test(beh_animalprotect_comp ~ book_exp_buc, var.equal = TRUE)$p.value, celebrity_exp_buc = t.test(beh_animalprotect_comp ~ celebrity_exp_buc, var.equal = TRUE)$p.value, ad_exp_buc = t.test(beh_animalprotect_comp ~ ad_exp_buc, var.equal = TRUE)$p.value, challenge_exp_buc = t.test(beh_animalprotect_comp ~ challenge_exp_buc, var.equal = TRUE)$p.value, labels_exp_buc = t.test(beh_animalprotect_comp ~ labels_exp_buc, var.equal = TRUE)$p.value, labeled_exp_buc = t.test(beh_animalprotect_comp ~ labeled_exp_buc, var.equal = TRUE)$p.value, ndisprotest_exp_buc = t.test(beh_animalprotect_comp ~ ndisprotest_exp_buc, var.equal = TRUE)$p.value, disprotest_exp_buc = t.test(beh_animalprotect_comp ~ disprotest_exp_buc, var.equal = TRUE)$p.value) %>% gather(\"Advocacy\",\"p_value\") %>% mutate(p_fdr = p.adjust(p_value, method = \"fdr\", n = length(p_value))) %>% print() ", "Label": "Statistical Test", "Source": "https://osf.io/3aryn/", "File": "7AnimalProtectionandConsumerBehaviors_Spanish.R" }, { "ID": 119, "Comment": "ttests to compare composite 'animal consumer behaviors' in people who have vs. have not experienced each (16 types total)", "Code": "t.test(beh_consumer_comp ~ graphic_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp~ nongraphic_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ person_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ leaflet_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ news_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ social_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ humaneed_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ documentary_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ book_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ celebrity_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ ad_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ challenge_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ labels_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ labeled_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ ndisprotest_exp_buc, data=data, var.equal = TRUE) t.test(beh_consumer_comp ~ disprotest_exp_buc, data=data, var.equal = TRUE) ", "Label": "Statistical Test", "Source": "https://osf.io/3aryn/", "File": "7AnimalProtectionandConsumerBehaviors_Spanish.R" }, { "ID": 120, "Comment": "prepare animal protection behaviors for stacked graph", "Code": "mean_CI_protect <- data %>% group_by(advocacytype, experienced) %>% summarize(n = n(), protect_mean = mean(beh_animalprotect_comp), protect_lci = t.test(beh_animalprotect_comp, conf.level = 0.95)$conf.int[1], protect_uci = t.test(beh_animalprotect_comp, conf.level = 0.95)$conf.int[2]) ", "Label": "Visualization", "Source": "https://osf.io/3aryn/", "File": "7AnimalProtectionandConsumerBehaviors_Spanish.R" }, { "ID": 121, "Comment": "add asterisks to advocacy types that had sig different scores between experienced vs. not experienced from ttests", "Code": "mean_CI_protect <- mean_CI_protect %>% mutate(advocacytype = case_when(advocacytype == \"ad\" ~ \"Anuncio o Valla Publicitaria*\", advocacytype == \"book\" ~ \"Libro*\", advocacytype == \"celebrity\" ~ \"Celebridad*\", advocacytype == \"challenge\" ~ \"Reto de Evitar la Carne*\", advocacytype == \"disprotest\" ~ \"Protesta Disruptiva*\", advocacytype == \"graphic\" ~ \"Video Gráfico*\", advocacytype == \"humaneed\" ~ \"Educación en el Aula de Clases*\", advocacytype == \"labeled\" ~ \"Información Educativa Sobre las Etiquetas de Bienestar*\", advocacytype == \"labels\" ~ \"Etiquetas Vegano/De Base Vegetal*\", advocacytype == \"ndisprotest\" ~ \"Protesta No Disruptiva*\", advocacytype == \"news\" ~ \"Artículo de Noticias*\", advocacytype == \"nongraphic\" ~ \"Video No Gráfico*\", advocacytype == \"person\" ~ \"Divulgación Boca a Boca *\", advocacytype == \"social\" ~\"Publicación en Redes Sociales o en un Blog*\", advocacytype == \"documentary\" ~ \"Documental*\", advocacytype == \"leaflet\" ~ \"Folleto o Volante*\")) mean_CI_consume <- mean_CI_consume %>% mutate(advocacytype = case_when(advocacytype == \"ad\" ~ \"Anuncio o Valla Publicitaria*\", advocacytype == \"book\" ~ \"Libro*\", advocacytype == \"celebrity\" ~ \"Celebridad*\", advocacytype == \"challenge\" ~ \"Reto de Evitar la Carne*\", advocacytype == \"disprotest\" ~ \"Protesta Disruptiva*\", advocacytype == \"graphic\" ~ \"Video Gráfico*\", advocacytype == \"humaneed\" ~ \"Educación en el Aula de Clases*\", advocacytype == \"labeled\" ~ \"Información Educativa Sobre las Etiquetas de Bienestar*\", advocacytype == \"labels\" ~ \"Etiquetas Vegano/De Base Vegetal*\", advocacytype == \"ndisprotest\" ~ \"Protesta No Disruptiva*\", advocacytype == \"news\" ~ \"Artículo de Noticias*\", advocacytype == \"nongraphic\" ~ \"Video No Gráfico*\", advocacytype == \"person\" ~ \"Divulgación Boca a Boca *\", advocacytype == \"social\" ~\"Publicación en Redes Sociales o en un Blog*\", advocacytype == \"documentary\" ~ \"Documental*\", advocacytype == \"leaflet\" ~ \"Folleto o Volante*\")) ", "Label": "Statistical Test", "Source": "https://osf.io/3aryn/", "File": "7AnimalProtectionandConsumerBehaviors_Spanish.R" }, { "ID": 122, "Comment": "factorize advocacy type for graph", "Code": "mean_CI_protect <- mean_CI_protect %>% mutate(advocacytype = factor(advocacytype, bar_order)) mean_CI_consume <- mean_CI_consume %>% mutate(advocacytype = factor(advocacytype, bar_order)) ", "Label": "Data Variable", "Source": "https://osf.io/3aryn/", "File": "7AnimalProtectionandConsumerBehaviors_Spanish.R" }, { "ID": 123, "Comment": "grouped bar graph for animal protection behaviors", "Code": "ggplot(mean_CI_protect, aes(x= advocacytype, fill=experienced, y=protect_mean)) + geom_col(width = 0.8, position=\"dodge\") + coord_flip(ylim=c(1,5)) + geom_errorbar(aes(x= advocacytype, ymin = protect_lci, ymax = protect_uci), width=0.4, colour = \"black\", position=position_dodge(.8)) + geom_text(aes(label = format(round(protect_mean, 1)), y = protect_uci), hjust = -0.2,# nudge_y = 2, size = 3, position = position_dodge(width = 1)) + labs(y= \"Puntuación Media del Comportamiento de Protección de los Animales\", x = \"Tipo de Defensa\", caption = \"Un asterisco (*) indica que hubo una diferencia estadísticamente significativa entre los grupos (todos los ps < 0,05) después de haber corregido mediante FDR. Para más detalles sobre cómo se llevaron a cabo estos análisis, véase el Material Complementario.\") + theme(legend.title = element_blank(), legend.position = \"bottom\", panel.background = element_rect(\"white\"), panel.border = element_rect(fill = NA), panel.grid.major.x = element_line(\"grey\")) + scale_fill_manual(values = c(\"Experimentado\" = \"#c47020\", \"No Experimentado\" = \"#F68D29\")) ", "Label": "Visualization", "Source": "https://osf.io/3aryn/", "File": "7AnimalProtectionandConsumerBehaviors_Spanish.R" }, { "ID": 124, "Comment": "We create a list of parameters to visualize and fill the labels etc to the .txt file in any table editor (Excel)", "Code": "write.table(data.frame(nam=rep.par2),\"params_empty.txt\",row.names=F,sep=\"\\t\") ", "Label": "Visualization", "Source": "https://osf.io/fr5ed/", "File": "03_posterior_visualization_country_contrast.R" }, { "ID": 125, "Comment": "plot contrasts estimates", "Code": "plot(NULL,ylim=c(max(dec$y)+const1+const2,min(dec$y)-const1),type=\"n\",xaxs=\"i\",yaxs=\"i\",xaxt=\"n\",yaxt=\"n\",xlim=xlims[[block]],bty=\"n\") abline(h=dec$y,col=col.grid,lty=1,lwd=lwd.grid) segments(axes[[block]],max(dec$y)+const1,axes[[block]],max(dec$y)+tic+const1,lwd=lwd.ax,col=col.ax) text(axes[[block]],max(dec$y)+tic+const1+ofs,labels=axes[[block]],col=col.ax,cex=0.9,font=2) lines(range(axes[[block]]),rep(max(dec$y)+const1,2),lwd=lwd.ax,col=col.ax) segments(axes[[block]],min(dec$y)-const1,axes[[block]],max(dec$y)+const1,lwd=lwd.v,col=col.ax,lty=3) segments(0,min(dec$y)-const1,0,max(dec$y)+const1,lwd=lwd.v,col=col.ax,lty=1) ", "Label": "Visualization", "Source": "https://osf.io/fr5ed/", "File": "03_posterior_visualization_country_contrast.R" }, { "ID": 126, "Comment": "Draw density polygons density areas are scaled within each block.", "Code": "area<-0.25*diff(xlims[[block]]) for(i in 1:nrow(dec)){ thispost<-postsc[,dec$n[i]] dens<-density(thispost) polX<-c(dens$x,rev(dens$x)) polY<-c(dens$y,rev(-dens$y)) ar1<-abs(polyarea(polX,polY)) perc<-area/ar1 polygon(polX,polY*perc+dei$y[i],col=dec$hex[i],border=col.pol,lwd=lwd.pol) ", "Label": "Visualization", "Source": "https://osf.io/fr5ed/", "File": "03_posterior_visualization_country_contrast.R" }, { "ID": 127, "Comment": "correlations between csd_mean and averaged csds for Big Five traits (mentioned in the text in Appendix A):", "Code": "psych::corr.test( df2$n_csd,df2$csd_mean_n ) psych::corr.test( df2$e_csd,df2$csd_mean_e ) psych::corr.test( df2$o_csd,df2$csd_mean_o ) psych::corr.test( df2$a_csd,df2$csd_mean_a ) psych::corr.test( df2$c_csd,df2$csd_mean_c ) ", "Label": "Data Variable", "Source": "https://osf.io/tajd9/", "File": "Flip_MainDataAnalyses.R" }, { "ID": 128, "Comment": "correlations between variability and wellbeing measures:", "Code": "names_var <- c(\"csd_mean_n\",\"csd_mean_e\",\"csd_mean_o\",\"csd_mean_a\",\"csd_mean_c\", \"sccs\",\"csd_nob\",\"simpson_csd\",\"rses\",\"swls\",\"pa\",\"na\") psych::corr.test( df2[,names_var] ) ", "Label": "Statistical Modeling", "Source": "https://osf.io/tajd9/", "File": "Flip_MainDataAnalyses.R" }, { "ID": 129, "Comment": "catch situation where Sx is missing so we cannot assess response to treatment", "Code": "if ( nrow( thisSx ) < 1 ) { thisRow$status.sx <- NA noFurtherFlag <- 1 ## tell rest of loop to not bother thisRow$missing.Sx <- 1 }", "Label": "Data Variable", "Source": "https://osf.io/5y27d/", "File": "tabulate_cases.R" }, { "ID": 130, "Comment": "Descriptive analysis: counting occurrence of bodybased units and coded themes. Step 1. Analyse frequencies of bodybased units of measure Ensure data is in character format", "Code": "df$`Body dimension` <- as.character(df$`Body dimension`)", "Label": "Data Variable", "Source": "https://osf.io/fegvr/", "File": "Analysis.R" }, { "ID": 131, "Comment": " Repeating confirmatory analyses with extra exclusions: in addition to excluding participants who did not complete the study, and excluding timeuntilguess and arithmeticsolvingtimes more than 5 standard deviations away from the mean, this analysis also excludes participants who removed 0 tiles when guessing. Exclude participants who did not complete study ", "Code": "d.conf_1_2.complete <- d.conf_1_2.b[complete.cases(d.conf_1_2.b),] d.conf_3_math.complete <- d.conf_3_math.b[complete.cases(d.conf_3_math.b),] ", "Label": "Data Variable", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Exploratory.R" }, { "ID": 132, "Comment": "remove rows with timeuntilguess values that are more than 5 standard deviations away from the mean", "Code": "agg <- aggregate(ElapsedTime_Guess ~ Guess_Number + ID_Player + Effort + Competition, data=d.conf_1_2.complete, FUN = mean) sd5.times <- mean(agg$ElapsedTime_Guess) + (5 * sd(agg$ElapsedTime_Guess)) nrow(agg[agg$ElapsedTime_Guess > sd5.times,]) ", "Label": "Statistical Test", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Exploratory.R" }, { "ID": 133, "Comment": "remove rows with arithmeticsolvingtimes that are more than 5 standard deviations away from the mean", "Code": "sd5.times <- mean(d.conf_3_math.complete$ElapsedTime_Math) + (5 * sd(d.conf_3_math.complete$ElapsedTime_Math)) nrow(d.conf_3_math.complete[d.conf_3_math.complete$ElapsedTime_Math > sd5.times,]) ", "Label": "Statistical Test", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Exploratory.R" }, { "ID": 134, "Comment": "Exclude participants who revealed 0 tiles", "Code": "d.conf_1_2.complete_no0 <- d.conf_1_2.complete[d.conf_1_2.complete$TilesRevealed > 0,] d.conf_3_math.complete_no0 <- d.conf_3_math.complete[d.conf_3_math.complete$TilesRevealed > 0,] ", "Label": "Data Variable", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Exploratory.R" }, { "ID": 135, "Comment": "compare simpler models with sex and sex*effort", "Code": "df <- compare(m_accuracy_guess_nointer, m_accuracy_componly, m_accuracy_sex, m_accuracy_sex_effort_inter, m_accuracy_sex_effort_inter_CEinter) df <- round(df@output, 2) df %>% kable(caption = \"ACCURACY\") %>% kable_styling(bootstrap_options = c(\"striped\", \"condensed\", \"responsive\"), full_width = TRUE) %>% column_spec(1:5, width = \"2cm\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Exploratory.R" }, { "ID": 136, "Comment": "aggregate data for time elapsed per guess, by first removing duplicate times, then summing up total times per player ", "Code": "d.reward.agg <- aggregate(ElapsedTime_Guess ~ Effort + Competition + Sex + Guess_Number + ID_Player, data = d.reward, FUN = mean) d.reward.agg <- aggregate(ElapsedTime_Guess ~ Effort + Competition + Sex + ID_Player, data = d.reward.agg, FUN = sum) ", "Label": "Data Variable", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Exploratory.R" }, { "ID": 137, "Comment": "Bayes Factors load additional libraries subset data into separate objects for frequentist analyses", "Code": "d.math.agg.f <- d.math.agg d.math.agg.f$Competition <- as.factor(d.math.agg.f$Competition) d.math.agg.f$ID_Player <- as.factor(d.math.agg.f$ID_Player) d.math.agg.f$Sex <- as.factor(d.math.agg.f$Sex) d.conf.agg.f <- d.conf.agg d.conf.agg.f$Competition <- as.factor(d.conf.agg.f$Competition) d.conf.agg.f$Effort <- as.factor(d.conf.agg.f$Effort) d.conf.agg.f$ID_Player <- as.factor(d.conf.agg.f$ID_Player) d.conf.agg.f$Sex <- as.factor(d.conf.agg.f$Sex)", "Label": "Data Variable", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Exploratory.R" }, { "ID": 138, "Comment": "Plot AUC", "Code": "ggplot(plot.data, aes(x = Target, y = AUC, color = Algorithm, fill = Algorithm, shape = Algorithm)) + geom_boxplot(width = 0.3,lwd = 1, aes(color = Algorithm, fill = Algorithm), alpha = 0.3, outlier.shape=NA, position=position_dodge(0.5)) + geom_point(position=position_jitterdodge(jitter.width = 0.1, dodge.width = 0.5), size = 0.5) + coord_flip() + geom_hline(yintercept = 0.5, color=\"black\", linetype = \"dashed\", size = 1) + theme_classic() + theme(text = element_text(size = 18), axis.title.y = element_blank(), legend.position = c(0.85, 0.5)) + scale_color_manual(values = c(\"#440154\", \"#5ec962\")) + scale_fill_manual(values = c(\"#440154\", \"#5ec962\")) + ylim(0.25, 1) + scale_x_discrete(labels=rev(c(\"Sociality\" = expression(paste(\"* \", bold(\"S\"), \"ociality\")), \"Deception\" = expression(paste(bold(\"D\"), \"eception\")), \"Negativity\" = expression(paste(bold(\"N\"), \"egativity\")), \"Positivity\" = expression(paste(\"* p\", bold(\"O\"), \"sitivity\")), \"Mating\" = expression(paste(\"* \", bold(\"M\"), \"ating\")), \"Adversity\" = expression(paste(bold(\"A\"), \"dversity\")), \"Intellectuality\" = expression(paste(\"* \", bold(\"I\"), \"ntellect\")), \"Duty\" = expression(paste(\"* \", bold(\"D\"), \"uty\"))))) ", "Label": "Visualization", "Source": "https://osf.io/b7krz/", "File": "01_ML_bmr_resultsSummary.R" }, { "ID": 139, "Comment": " Step 4*: Visualization of the ROC curves for \"core benchmark\" for the Online Supplemental Material Duty ", "Code": "p_Duty = autoplot(bmr$clone(deep = TRUE)$filter(task_ids = \"Duty\", learner_ids = c(\"lasso_Duty\", \"rf_Duty\")), type = \"roc\") + theme_classic() + theme(title = element_blank(), legend.position = \"none\", axis.title = element_text(size = 18), axis.text = element_text(size = 16)) + labs(y = \"Sensitivity\", x = \"1 - Specifity\") + geom_abline(intercept = 0, slope = 1, color = \"black\", linetype = \"dotted\", size = 0.8) + annotate(geom = \"text\", x = 0, y = 1, label = \"Duty\", hjust = 0, vjust = 1, size = 10) png(\"Figures_Tables/Figures/ROC_Curves/Duty.png\", width = 450, height = 400) p_Duty dev.off() ", "Label": "Visualization", "Source": "https://osf.io/b7krz/", "File": "01_ML_bmr_resultsSummary.R" }, { "ID": 140, "Comment": "Plot ROC curves and caculate AUC (if desired)", "Code": "plotIt <- FALSE currAUC <- plotAUC(allPs,allBFs,dt,numStudies,plotIt) #currently does not save the AUCs ai <- ai+1 aucs[ai,1] <- sampleSizes[1] aucs[ai,2] <- effectSizes[2] aucs[ai,3] <- i #runNumber aucs[ai,4] <- currAUC[1] aucs[ai,5] <- currAUC[2] plotIt <- FALSE currAUC <- plotAUC(allPs2,allBFs2,dt,numStudies,plotIt) #currently does not save the AUCs aucs$aucPhacked[ai] <- currAUC[1] aucs$aucBFhacked[ai] <- currAUC[2] if (currAUC[1] != currAUC[2]) { print(paste(effectSizes,i)) print(currAUC) } } #end for i runTimes } #end for ssa allDt <- allDt[-1,] #get rid of initial dummy row that was just used to init allDt ", "Label": "Visualization", "Source": "https://osf.io/hzncs/", "File": "Witt_SDT_Simulations_OptionalStopping.R" }, { "ID": 141, "Comment": " Stacked plots by SDT outcome Plot by whether hacked or not plot hit, FA, miss, corrRejection for each effect size/sample size combo ", "Code": "ap <- aggregate(hits ~ criterion+sampleSize+effectSizes+withHack, data=allDt,mean) ap2 <- aggregate(fa ~ criterion+sampleSize+effectSizes+withHack, data=allDt,mean) hFA <- merge(ap,ap2,by=c(\"criterion\",\"sampleSize\",\"effectSizes\",\"withHack\")) ap2 <- aggregate(miss ~ criterion+sampleSize+effectSizes+withHack, data=allDt,mean) hFA <- merge(hFA,ap2,by=c(\"criterion\",\"sampleSize\",\"effectSizes\",\"withHack\")) ap2 <- aggregate(corrRej ~ criterion+sampleSize+effectSizes+withHack, data=allDt,mean) hFA <- merge(hFA,ap2,by=c(\"criterion\",\"sampleSize\",\"effectSizes\",\"withHack\")) hFA$hits <- hFA$hits / numStudies hFA$fa <- hFA$fa / numStudies hFA$corrRej <- hFA$corrRej / numStudies hFA$miss <- hFA$miss / numStudies ", "Label": "Visualization", "Source": "https://osf.io/hzncs/", "File": "Witt_SDT_Simulations_OptionalStopping.R" }, { "ID": 142, "Comment": "plot BF vs pvalues", "Code": "plot(log(allPs),log(allBFs),bty=\"l\") ", "Label": "Visualization", "Source": "https://osf.io/hzncs/", "File": "Witt_SDT_Simulations_OptionalStopping.R" }, { "ID": 143, "Comment": "Create matrix of input variables with populationlevel effects:", "Code": "C_datMM <- model.matrix( as.formula(paste(\"~\", paste(vpreds_noInt, collapse = \" + \"))), data = C_dat ) ", "Label": "Data Variable", "Source": "https://osf.io/emwgp/", "File": "ppfs.R" }, { "ID": 144, "Comment": " Set the latent regression coefficients for booklet 1, 5, 7 and 10 for reading and booklet 4, 6, 9 and 11 for science to 0 ", "Code": "betas <- data.frame(var = c(which(names(con_dat) %in% paste0(\"bookid.\", c(1, 5, 7, 10))), which(names(con_dat) %in% paste0(\"bookid.\", c(4, 6, 9, 11)))), dim = rep(2:3, each = 4), value = 0) betas <- data.frame(var = c(which(names(con_dat) %in% paste0(\"bookid.\", c(1, 5, 7, 10))), which(names(con_dat) %in% paste0(\"bookid.\", c(4, 6, 9, 11)))), dim = rep(2:3, each = 4), value = 0) ", "Label": "Data Variable", "Source": "https://osf.io/8fzns/", "File": "4H_PV_helper.R" }, { "ID": 145, "Comment": "Compute latent regression of the latent ability on the conditioning variables (excl. the first column which is the student ID)", "Code": "latreg <- tam.latreg(likeli, Y = con_dat[, -1], pid = pid.sele, control = list(maxiter = iter.2, acceleration = \"Ramsay\"), beta.fixed = as.matrix(betas)) } else { latreg <- tam.latreg(likeli, Y = con_dat[, -1], pid = pid.sele, control = list(maxiter = iter.2, acceleration = \"Ramsay\")) } latreg <- tam.latreg(likeli, Y = con_dat1[, -1], pid = pid.sele, control = list(maxiter = iter.2, acceleration = \"Ramsay\"), beta.fixed = as.matrix(betas)) } else { latreg <- tam.latreg(likeli, Y = con_dat1[, -1], pid = pid.sele, control = list(maxiter = iter.2, acceleration = \"Ramsay\")) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/8fzns/", "File": "4H_PV_helper.R" }, { "ID": 146, "Comment": "Draw 5 plausible values for each student out of the resulting distribution which is assumed to be normal distributed", "Code": "pvs <- tam.pv(latreg, nplausible = 5, normal.approx = T, samp.regr = samp.regr.opt) } else { pvs <- tam.pv(latreg_md, nplausible = 5, normal.approx = T, samp.regr = samp.regr.opt) } else { ", "Label": "Statistical Modeling", "Source": "https://osf.io/8fzns/", "File": "4H_PV_helper.R" }, { "ID": 147, "Comment": " Extract the regression coefficients of the conditioning variables, because they are fixed for the core domains in the next step at that value ", "Code": "reg.coefs <- cbind(rep(1:dim(latreg$beta)[1], 3), rep(1:3, each = dim(latreg$beta)[1]), c(latreg$beta[, 1], latreg$beta[, 2], latreg$beta[, 3])) ", "Label": "Statistical Modeling", "Source": "https://osf.io/8fzns/", "File": "4H_PV_helper.R" }, { "ID": 148, "Comment": " Extract IRT likelihood of the second model (math, read and scie plus digital domains) ", "Code": "likeli_md <- IRT.likelihood(mod2)", "Label": "Statistical Modeling", "Source": "https://osf.io/8fzns/", "File": "4H_PV_helper.R" }, { "ID": 149, "Comment": "Compute latent regression of the latent ability on the conditioning variables (excl. the first column which is the student ID). But this time only the regression coefficients of the digital domains are computed freely. The rest is fixed at the values of the first model", "Code": "latreg_md <- tam.latreg(likeli_md, Y = con_dat[, -1], pid = pid.sele, control = list(maxiter = iter.2, acceleration = \"Ramsay\"), beta.fixed = reg.coefs) ", "Label": "Statistical Modeling", "Source": "https://osf.io/8fzns/", "File": "4H_PV_helper.R" }, { "ID": 150, "Comment": "Calculate the number of surveys completed per person", "Code": "N <- as.data.frame(table(dat$id)) names(N) <- c(\"id\", \"N\") dat <- merge(dat, N, by = \"id\", all.x = T) table(dat$N, useNA = \"ifany\") ", "Label": "Data Variable", "Source": "https://osf.io/nxyh3/", "File": "01a_DataPrep_Study1.R" }, { "ID": 151, "Comment": " DOWNSAMPLING Loop to downsample data based on minimum time and distance between points Time parameter is set in \"while(diff < time in minutes | dist > in meters)\" downsampled pts that were less than 60min apart unless frog moved more than 20m during the time window ", "Code": "tracks_dsmpl <- data.frame() ids <- unique(tracks$id) for(i in ids) { traj = subset(tracks, tracks$id == i) for(i in 1:nrow(traj)) { diff <- difftime(traj$dt[i+1], traj$dt[i], units = \"mins\") delta_x <- traj$x_utm[i+1] - traj$x_utm[i] delta_y <- traj$y_utm[i+1] - traj$y_utm[i] dist <- sqrt(delta_x^2+delta_y^2) if(is.na(diff)) {break} while(diff <= 60 & dist < 20) { traj <- traj[-(i+1),] diff <- difftime(traj$dt[i+1], traj$dt[i], units = \"mins\") delta_x <- traj$x_utm[i+1] - traj$x_utm[i] delta_y <- traj$y_utm[i+1] - traj$y_utm[i] dist <- sqrt(delta_x^2+delta_y^2) if(is.na(diff)) {break} } } tracks_dsmpl <- bind_rows(tracks_dsmpl, traj) } tracks_dsmpl <- tracks_dsmpl %>% group_by(id) %>% mutate(time_diff = difftime(dt, lag(dt, n = 1L), units = \"min\"), delta_x = x_utm - lag(x_utm, n = 1L), delta_y = y_utm - lag(y_utm, n = 1L), dist = sqrt(delta_x^2+delta_y^2)) tracks <- tracks_dsmpl ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 152, "Comment": " Remove first row (tagging date) and last row (untagging day) per individual Sort the daily, group by id and add index of id+date as a new variable ", "Code": "daily %>% arrange(id) %>% group_by(id) %>% mutate(id_day = paste(id, date)) -> daily_grouped ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 153, "Comment": "Join with summary stats", "Code": "tracks_sum <- left_join(tracks_sum, duration) tracks_sum <- left_join(tracks_sum, relocs) tracks_sum <- left_join(tracks_sum, days_tracked) ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 154, "Comment": "Calculate average daily movement by behavioral category per individual for plotting", "Code": "daily_beh_mean <- daily_beh %>% group_by(id, behavior) %>% dplyr::summarize(daily_dist = mean(daily_dist), max_dist = mean(max_dist), sex = first(sex), species = first(species)) ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 155, "Comment": "MCPs home range with 95% minimum convex ploygon (MCP95)", "Code": "mcp95<- mcp(data_sp[\"id\"], percent = 95, unout = \"m2\") ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 156, "Comment": "95% contour with hpi pluging method Define countour level", "Code": "cont=c(95)", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 157, "Comment": "Initiate empty df to store the metadarta", "Code": "allfrogs_info <- data.frame(ID = numeric(0), Cont = numeric(0), frog_id = character(0)) allfrogs_info <- data.frame(ID = numeric(0), Cont = numeric(0), frog_id = character(0)) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 158, "Comment": "BOXPLOTS: DAILY MOVEMENT FIGURE EXPORT: daily movement by sex", "Code": "daily_plot <-tracks_sum%>% ggplot(aes(x= sex, y=log(mean_cumul), fill = species)) + theme_bw(20) + geom_boxplot(width= 0.6, outlier.shape = NA) + geom_jitter(aes(group = sex), position=position_jitterdodge(0.4), shape = 21, stroke = 1, color = \"black\", size = 2, alpha = 0.6) + scale_color_manual(values=c(\"black\", \"black\")) + scale_fill_manual(values=c(\"#E7B800\", \"#0072B2\",\"#FC4E07\")) + theme(legend.position=\"none\") + labs(y = \"ln Daily movement (m)\") + facet_wrap(~species, labeller = labeller(species=c(\"afemo\" = \"A. femoralis\", \"dtinc\" = \"D. tinctorius\", \"osylv\" = \"O. sylvatica\"))) + theme(axis.title.x = element_blank(), axis.text.x = element_text(color = \"black\", size = 18), strip.text = element_text(face = \"italic\"), aspect.ratio = 4) + scale_x_discrete(labels= c(\"F\", \"M\"), expand = expansion(add = 1)) daily_plot ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 159, "Comment": "Daily movement by sex and species, log transformed violin plots ", "Code": "ggplot(daily_select, aes(x= sex, y=log(daily_dist))) + theme_bw(20) + geom_violin(aes(fill = species)) + scale_fill_manual(values=c(\"#E7B800\", \"#0072B2\",\"#FC4E07\")) + theme(legend.position=\"none\") + labs(y = \"ln Daily movement (m)\") + geom_jitter(aes(shape=sex, fill = species), position=position_jitterdodge(0.2), size = 4, alpha=0.2) + facet_wrap(~species, labeller = labeller( species=c(afemo =\"A. femoralis\", dtinc = \"D. tinctorius\", osylv = \"O. sylvatica\"))) + theme(axis.title.x = element_blank(), axis.text.x = element_text(color = \"black\", size = 18), strip.text = element_text(face = \"italic\")) + scale_x_discrete(labels= c(\"F\", \"M\")) ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "all_spaceuse_dataproc.R" }, { "ID": 160, "Comment": "convert attributes from character to numeric", "Code": "for(i in 1:3) { data[[paste0(\"opt\",i,\"cost\")]] <- as.numeric(substring(data[[paste0(\"opt\",i,\"cost\")]], 3, 7)) data[[paste0(\"opt\",i,\"sides\")]] <- as.numeric(substring(data[[paste0(\"opt\",i,\"sides\")]], 1, 2)) data[[paste0(\"opt\",i,\"deliveryTime\")]] <- as.numeric(substring(data[[paste0(\"opt\",i,\"deliveryTime\")]], 2, 3)) } ", "Label": "Data Variable", "Source": "https://osf.io/tbczv/", "File": "exp1aDead-MNL-SAT-A.r" }, { "ID": 161, "Comment": " Step 9: Create time difference variable, and the final dependent variable (media_success) calculating the time difference between each documentpair ", "Code": "result$date_diff <- as.Date(as.character(result$news_date), format=\"%Y-%m-%d\")- as.Date(as.character(result$date), format=\"%Y-%m-%d\") ", "Label": "Data Variable", "Source": "https://osf.io/hfy4k/", "File": "prep.analysis.data.R" }, { "ID": 162, "Comment": " Remove trials with RT < 4710 ms (i.e., presses before disambiguating information in sentence) ", "Code": "dataset_exp1 <- dataset_exp1[which(dataset_exp1$rt > 4710), ] ", "Label": "Data Variable", "Source": "https://osf.io/37rfb/", "File": "prediction_analyses.R" }, { "ID": 163, "Comment": " Add 'trackloss' column (if not looking at IA_1 or IA_2, then trackloss 1) ", "Code": "dataset_exp1 <- within(dataset_exp1, { trackloss <- ifelse(average_target_sample_count_proportion == 0 & average_distractor_sample_count_proportion == 0, 1, 0) }) ", "Label": "Data Variable", "Source": "https://osf.io/37rfb/", "File": "prediction_analyses.R" }, { "ID": 164, "Comment": "Ttests (using participantaveraged data)", "Code": "num_sub_exp1 <- length(unique((eyetrackingr.data.exp1$participant_number))) threshold_t_exp1 <- qt(p = 1 - .05 / 2, df = num_sub_exp1 - 1) # Pick threshold for t based on alpha = .05, two-tailed df_timeclust_exp1 <- make_time_cluster_data(response_time_exp1, test = \"t.test\", paired = TRUE, predictor_column = \"trial_condition_new\", threshold = threshold_t_exp1 ) ", "Label": "Statistical Test", "Source": "https://osf.io/37rfb/", "File": "prediction_analyses.R" }, { "ID": 165, "Comment": "MSE for each participant", "Code": "MSE.Sys.VAR.i = lapply(1:fold, function(k) MSE.k[[k]]$MSE.ki) ", "Label": "Data Variable", "Source": "https://osf.io/rs6un/", "File": "MSE.VAR.Sys.R" }, { "ID": 166, "Comment": "use mutate and the dffit fit information to make the data for mal", "Code": "mal_fit <- fit_num %>% mutate(fit = (s)/(1.196506 + s)) dex_fit <- fit_num %>% mutate(fit = (s)/(62.736799 + s)) lac_fit <- fit_num %>% mutate(fit = (s)/(43.839063 + s)) maldex_fit <- fit_num %>% mutate(fit = (s)/(7.253739 + s))", "Label": "Statistical Modeling", "Source": "https://osf.io/9e3cu/", "File": "titration_answers.R" }, { "ID": 167, "Comment": "Logistic Mixed Effects Regression for Original Critical Targets", "Code": "log_reg_data <- cards_long %>% filter(card == \"card_1\" | card == \"card_3\" | card == \"card_6\" | card == \"card_7\" | card == \"card_9\" | card == \"card_14\" ) %>% mutate( distance = case_when( card == \"card_1\" | card == \"card_3\" | card == \"card_6\" ~ 0, card == \"card_7\" | card == \"card_9\" | card == \"card_14\" ~ 1 ), comf_acquaint = scale(comf_acquaint, scale = FALSE), comf_close = scale(comf_close, scale = FALSE), approp_acquaint = scale(approp_acquaint, scale = FALSE), approp_close = scale(approp_close, scale = FALSE) ) log_reg_data$amit_wording <- factor(log_reg_data$amit_wording, levels = c(\"original\", \"new\")) h1_melogreg <- glmer(choice ~ distance + amit_wording + (1 + distance|id) + (1|card), data = log_reg_data, family = binomial(link = \"logit\")) h1_melogreg_inter <- glmer(choice ~ distance * amit_wording + (1 + distance|id) + (1|card), data = log_reg_data, family = binomial(link = \"logit\")) h1_model_comparison <- anova(h1_melogreg, h1_melogreg_inter) ", "Label": "Statistical Modeling", "Source": "https://osf.io/bkuwa/", "File": "main_analysis_code.R" }, { "ID": 168, "Comment": " Highest density interval ' ' This is a function that will calculate the highest density interval from a ' posterior sample. ' ' The default is to calcualte the highest 95 percent interval. It can be used ' with any numeric vector instead of having to use one of the specific MCMC ' classes. This function has been adapted from John K. Kruschke (2011). Doing ' Bayesian Data Analaysis: A Tutorial with R and BUGS. ' ' @param x Numeric vector of a distribution of data, typically a posterior ' sample ' @param prob Width of the interval from some distribution. Defaults to `0.95`. ' @param warn Option to turn off multiple sample warning message Must be in the ' range of `[0,1]`. ' @return Numeric range ' @export ' @examples ' x < qnorm(seq(1e04, .9999, length.out1001)) ' hdi_95 < hdi(x, .95) ' hdi_50 < hdi(x, .50) ' ' hist(x, br50) ' abline(vhdi_95, col\"red\") ' abline(vhdi_50, col\"green\") ' ' x < exp(seq(pi * (1 (1/16)), pi, len 1000)) ' x < c(x, rev(x)[1]) ' x < c(x, x) ' plot(sort(x), type\"l\") ' plot(density(x, adjust0.25)) ' abline(vhdi(x, p.49), col2) ' abline(vhdi(x, p.50), col3) ", "Code": "hdi <- function(x, prob=0.95, warn=TRUE) { if (anyNA(x)) { stop(\"HDI: \", \"x must not contain any NA values.\", call.=FALSE) } N <- length(x) if (N < 3) { if (warn) { warning(\"HDI: \", \"length of `x` < 3.\", \" Returning NAs\", call.=FALSE) } return(c(NA_integer_, NA_integer_)) } x_sort <- sort(x) window_size <- as.integer(floor(prob * length(x_sort))) if (window_size < 2) { if (warn) { warning(\"HDI: \", \"window_size < 2.\", \" `prob` is too small or x does not \", \"contain enough data points.\", \" Returning NAs.\", call.=FALSE) } return(c(NA_integer_, NA_integer_)) } lower <- seq_len(N - window_size) upper <- window_size + lower ", "Label": "Statistical Modeling", "Source": "https://osf.io/nd9yr/", "File": "ordinal_helper_functions.R" }, { "ID": 169, "Comment": "test whether Condition predicts exclusion probability", "Code": "Cond_exclude <- table(data$Cond, data$exclude) chisq.test(Cond_exclude) rm(Cond_exclude) ", "Label": "Data Variable", "Source": "https://osf.io/sb3kw/", "File": "Study2B_analyses.R" }, { "ID": 170, "Comment": " Fix the following script by: Take our dataset (df) , and then group it by our explanatory variable (Condition), and then Summarise this dataset by Creating a new variable in this summary dataset called Mean Mean takes the mean of our Response variable, and ignores NA. Repeat for standard deviation. We've added on one more column in our summary dataset N. N is the number of observations per group. To do so, we want to add up How many times our DV is not missing (per group). When you think it is a complete, save the script. The name of the script above will go from red to black. That is how you know you've saved it. You can use ctrl+S to save, or click the floppy disk above. We want to know how the means and standard deviations and Ns of Hire_Rating differed. ", "Code": "sum_hire <-", "Label": "Data Variable", "Source": "https://osf.io/9vr6q/", "File": "summarise2.R" }, { "ID": 171, "Comment": "Calculate R squared for each model", "Code": "r.squaredGLMM(m1) #indegree r.squaredGLMM(m2) #outdegree r.squaredGLMM(m3) #betweenness r.squaredGLMM(m4) #outcloseness r.squaredGLMM(m5) #incloseness r.squaredGLMM(m6) #local clustering r.squaredGLMM(m7) #average shortest path length r.squaredGLMM(m8) #eigenvector r.squaredGLMM(m9) #outstrength r.squaredGLMM(m10) #instrength", "Label": "Statistical Test", "Source": "https://osf.io/wc3nq/", "File": "6) agr_summer_model.R" }, { "ID": 172, "Comment": "split the data into two halves", "Code": "d$ItemNo = parse_number(as.character(d$ItemNo)) de = d %>% filter(ItemNo %% 2 == 0) do = d %>% filter(ItemNo %% 2 == 1)", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Buffinton_MorganShort.R" }, { "ID": 173, "Comment": "Zscore transform RTs, compute zRT difference, remove NAs for the 2 groups ", "Code": "de <- de |> group_by(Subject) |> mutate (zRT = scale(Reaction.Time, center = T, scale = T), meanRT = mean(Reaction.Time)) |> select (1,2,4,5) |> pivot_wider(names_from = Condition, values_from = zRT) |> mutate (zrt.diff = R-P) |> filter(!is.na(zrt.diff)) do <- do |> group_by(Subject) |> mutate (zRT = scale(Reaction.Time, center = T, scale = T), meanRT = mean(Reaction.Time)) |> select (1,2,4,5) |> pivot_wider(names_from = Condition, values_from = zRT) |> mutate (zrt.diff = R-P) |> filter(!is.na(zrt.diff)) de <- de |> group_by(Subject) |> mutate (zRT = scale(Reaction.Time, center = T, scale = T), meanRT = mean(Reaction.Time)) |> select (1,2,4,5) |> pivot_wider(names_from = Condition, values_from = zRT) |> mutate (zrt.diff = R-P) |> filter(!is.na(zrt.diff)) ", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Buffinton_MorganShort.R" }, { "ID": 174, "Comment": "fit models for even and odd numbered items (Bayesian & Generalized LMM)", "Code": "m_de = blmer(-1/Reaction.Time ~ Condition + (1+Condition|Subject) + (1+Condition|ItemNo), data = de, control=lmerControl(optimizer = \"nloptwrap\", optCtrl = list(algorithm = \"NLOPT_LN_NELDERMEAD\", maxit = 2e5))) m_do = blmer(-1/Reaction.Time ~ Condition + (1+Condition|Subject) + (1+Condition|ItemNo), data = do, control=lmerControl(optimizer = \"nloptwrap\", optCtrl = list(algorithm = \"NLOPT_LN_NELDERMEAD\", maxit = 2e5))) m_de = blmer(-1/Reaction.Time ~ Condition + (1+Condition|Subject) + (1+Condition|ItemNo), data = de, control=lmerControl(optimizer = \"nloptwrap\", optCtrl = list(algorithm = \"NLOPT_LN_NELDERMEAD\", maxit = 2e5))) m_do = blmer(-1/Reaction.Time ~ Condition + (1+Condition|Subject) + (1+Condition|ItemNo), data = do, control=lmerControl(optimizer = \"nloptwrap\", optCtrl = list(algorithm = \"NLOPT_LN_NELDERMEAD\", maxit = 2e5))) ", "Label": "Statistical Modeling", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Buffinton_MorganShort.R" }, { "ID": 175, "Comment": "fit lmm to the whole dataset", "Code": "m <- blmer(-1/Reaction.Time ~ Condition + (1+Condition|Subject) + (1+Condition|ItemNo), data = d_trimmed, control=lmerControl(optimizer = \"nloptwrap\", optCtrl = list(algorithm = \"NLOPT_LN_NELDERMEAD\", maxit = 2e5))) summary(m) ", "Label": "Statistical Modeling", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Buffinton_MorganShort.R" }, { "ID": 176, "Comment": "calculate shared and unique variance", "Code": "x1 = rsq - rx2y^2 x2 = rsq - rx1y^2 x1_x2 = rsq - x1 - x2 c(x1, x2, x1_x2, rsq) } semipartial_corr = function(r12, r13, r23) { ", "Label": "Statistical Modeling", "Source": "https://osf.io/sqfnt/", "File": "Goal" }, { "ID": 177, "Comment": "create empty df", "Code": "df <- data.frame( `rx1y` = numeric(), `rx2y` = numeric(), `rx1x2` = numeric(), `rx1y_x2` = numeric(), `rx2y_x1` = numeric(), `x1` = numeric(), `x2` = numeric(), `Common` = numeric(), `Total` = numeric() ) ", "Label": "Data Variable", "Source": "https://osf.io/sqfnt/", "File": "Goal" }, { "ID": 178, "Comment": " > Left panel: QQplot (uniform distribution) > Right panel: Residuals against predicted values;; shaded (due to sample size) with extreme residuals colored red, and 3) MAIN EFFECTS OF KEY VARIABLES AspElevForm ", "Code": "emmip(PrefRate, ~AspElevForm, type = \"response\", CIs = TRUE) (emm <- emmeans(PrefRate, specs = ~AspElevForm, type = \"response\")) pairs(emm) ", "Label": "Visualization", "Source": "https://osf.io/2sz48/", "File": "Model_Preference.R" }, { "ID": 179, "Comment": "Operator on x, rescale to be in y unit ", "Code": "Hlist <- list() for(i in seq(nt)){ Hlist[[paste0(\"H\",i)]] <- with(data, t(apply(data[[i]]$sdfp, 1, function(x) x*data[[i]]$prior)) %*% A) } H<-bdiag(unlist(Hlist)) strl <- function(x){ x[x < max(x)/1000] <- 0 return(x) } H <- as(t(apply(H, 1, strl)), 'dgCMatrix') #Make sparser ", "Label": "Data Variable", "Source": "https://osf.io/53w96/", "File": "INLA_GHG_GMD.R" }, { "ID": 180, "Comment": " Creates the variable 'distances', a 540 X 3 distance matrix between stimuli and category prototypes. ", "Code": "distances = as.matrix(pdist(coordinates, prototypes)) ", "Label": "Data Variable", "Source": "https://osf.io/hrf5t/", "File": "runPrototype.R" }, { "ID": 181, "Comment": "calculate similarity coefficient (mean of signif_line$similarity.scores)", "Code": "mean(signif_line$similarity.scores[,2]) mean(coef_line$similarity.scores[,2]) mean(varimp_line$similarity.scores[,2]) ", "Label": "Statistical Modeling", "Source": "https://osf.io/3gfqn/", "File": "VADIS_genitives_outer_circle.R" }, { "ID": 182, "Comment": "Calculate Priestley Taylor Ep", "Code": "data$PT.Ep <- 1.26/lambda.w*deltav/(deltav+rho.air)*Rn", "Label": "Data Variable", "Source": "https://osf.io/5ezfk/", "File": "ETfun.R" }, { "ID": 183, "Comment": "plot hit, FA, miss, corrRejection for each effect size/sample size combo ", "Code": "ap <- aggregate(hits ~ criterion+sampleSize+effectSizes, data=allDt,mean) ap2 <- aggregate(fa ~ criterion+sampleSize+effectSizes, data=allDt,mean) hFA <- merge(ap,ap2,by=c(\"criterion\",\"sampleSize\",\"effectSizes\")) ap2 <- aggregate(miss ~ criterion+sampleSize+effectSizes, data=allDt,mean) hFA <- merge(hFA,ap2,by=c(\"criterion\",\"sampleSize\",\"effectSizes\")) ap2 <- aggregate(corrRej ~ criterion+sampleSize+effectSizes, data=allDt,mean) hFA <- merge(hFA,ap2,by=c(\"criterion\",\"sampleSize\",\"effectSizes\")) hFA$hits <- hFA$hits / numStudies hFA$fa <- hFA$fa / numStudies hFA$corrRej <- hFA$corrRej / numStudies hFA$miss <- hFA$miss / numStudies ", "Label": "Visualization", "Source": "https://osf.io/hzncs/", "File": "Witt_SDT_Simulations_SeveralTests.R" }, { "ID": 184, "Comment": " plot errors only (FA and misses, separately) Plot ROC dist ", "Code": "aa <- aggregate(ROCdist ~ criterion, data=allDt,mean) ab <- aggregate(ROCdist ~ criterion, data=allDt,sd) ab$ci <- qnorm(.975) * ab$ROCdist / sqrt(numStudies) titleText <- ifelse(length(sSizesAll) < 2, paste(\"N=\",sSizesAll[1],\"d=\",effSzAll[1]), paste(\"firstES:\",effSzAll[1])) plot(seq(1:length(crits)),aa$ROCdist,bty=\"l\",xaxt=\"n\",xlab=\"Criterion for Statistical Significance\",ylab=\"Distance to Perfection\",pch=19,col=rainbow(length(crits)),cex=2,ylim=c(0,max(aa$ROCdist)+max(ab$ci)),main = titleText) axis(side=1,at=seq(1:length(crits)),labels = crits) for(i in 1:length(crits)) { segments(i,aa$ROCdist[i] - ab$ci[i],i,aa$ROCdist[i]+ab$ci[i]) } ", "Label": "Visualization", "Source": "https://osf.io/hzncs/", "File": "Witt_SDT_Simulations_SeveralTests.R" }, { "ID": 185, "Comment": "calculate the variance and bias for the log(DR)", "Code": "log.var <- ((res$se.tau2)^2)/4*1/(sum(w.star))^2*(sum(1/(res$vi + res$tau2)^2))^2 log.sd <- sqrt(log.var) bias <- 1/2*(res$se.tau2)^2*(1/2/sum(w.star)^2 - 1/sum(w.star)*sum(1/(res$vi + res$tau2)^3))", "Label": "Statistical Modeling", "Source": "https://osf.io/gwn4y/", "File": "Hospital_Stay_of_Stroke_Patients_forest_plot.R" }, { "ID": 186, "Comment": "create the forest plot including extra information (study names, weights, observed effects and associated variances). ", "Code": "f <- forest(res, col = \"blue\", border = \"blue\", ylim = c(-8,12), xlim = c(-4,5), pch = 19, slab = dat$study, showweights = FALSE, addfit = FALSE, refline = FALSE, ilab = cbind(paste(format(round(res$yi,2), nsmall = 2)) , paste(format(round(res$vi, 2), nsmall = 2)), paste(format(round(res$ni, 2))), paste(format(weights, nsmall = 2)), paste(format(weights.f, nsmall = 2))) , ilab.xpos = c(-3.15,-2.75, -2.35,-1.5,-0.75), ilab.pos = 2, efac = 0, digits = 2) ", "Label": "Visualization", "Source": "https://osf.io/gwn4y/", "File": "Hospital_Stay_of_Stroke_Patients_forest_plot.R" }, { "ID": 187, "Comment": "STEP 2: Filter the data in overall.data table according to preregistered inclusion criteria replace 0 with NA in outcome column", "Code": "overall.data$LTScreenOut[overall.data$LTScreenOut == 0] <- NA #replace NAs with 0 for averaging in the next steps overall.data$LTObjectOut[overall.data$LTObjectOut == 0] <- NA #replace NAs with 0 for averaging in the next steps overall.data$FirstLookDurationObjectOut[overall.data$FirstLookDurationObjectOut == 0] <- NA #replace NAs with 0 for averaging in the next steps", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "TablePrep_Third.R" }, { "ID": 188, "Comment": "fit indices function for the SEM and FA analyses", "Code": "fa.CFI<-function(x){ nombre<-paste(x,\"CFI\",sep = \".\") nombre<- ((x$null.chisq-x$null.dof)-(x$STATISTIC-x$dof))/(x$null.chisq-x$null.dof) return(nombre) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/bhrwx/", "File": "evaluation_data_analysis.R" }, { "ID": 189, "Comment": "check Descriptive statistics gender breakdown", "Code": "df <- eval_data %>% group_by(gender) %>% summarise(counts = n()) df", "Label": "Statistical Test", "Source": "https://osf.io/bhrwx/", "File": "evaluation_data_analysis.R" }, { "ID": 190, "Comment": "Decompose variance Estimate multilevel model (without predictors)", "Code": "m_logistic <- lmer(estimate ~ 1 + (1|controls) + (1|age) + (1|year) + (1|age:year) + (1|age:controls) + (1|year:controls), data = results %>% rename(age = age_group)) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m72gb/", "File": "analysis_privacysetting.r" }, { "ID": 191, "Comment": "Visualize four first principal components", "Code": "fig.df <- data.frame(HR = c(FPCAdense$phi[,1], FPCAdense$phi[,2], FPCAdense$phi[,3], FPCAdense$phi[,4]), pc = c(rep(1, 51), rep(2, 51), rep(3, 51), rep(4, 51)), Time= rep(FPCAdense$workGrid, 4)) all.four <- ggplot(fig.df, aes(x=Time, y=HR))+geom_line()+ geom_vline(xintercept=.20, linetype=\"dashed\")+ facet_grid(.~pc)+ theme_bw() all.four ggsave(\"../figures/figure5.png\", width=6, height=4) ", "Label": "Visualization", "Source": "https://osf.io/qj86m/", "File": "8_fda_socaccount.R" }, { "ID": 192, "Comment": "SPATIOTEMPORAL VARIABLES AND HOMING SUCCESS Join territory center pts to the dataframe to calculate translocation distance and homing success", "Code": "trajectory <- trajectory.df %>% left_join(territory_centers, by = \"id\")%>% mutate(trans_dist = sqrt((x_home - first(x_utm))^2+(y_home - first(y_utm))^2), trans_group = ifelse(trans_dist > 100, \"200m\", \"50m\"))%>% dplyr::select(id, sex, trans_group, trans_dist, dt, x_utm, y_utm, x_home, y_home, time_lag_min = time_diff, dist) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "dt_homing_dataproc.R" }, { "ID": 193, "Comment": "Tally the number relocations for each individual per day", "Code": "trajectory.df %>% group_by(id, date) %>% count(id) -> relocs.perday ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "dt_homing_dataproc.R" }, { "ID": 194, "Comment": "Plot full circular info by sex start with blank plot, then add groupspecific points ", "Code": "dev.new();; par(mai = c(1, 1, 0.1,0.1)) par(mar=c(0.5, 0.5, 0.5, 0.5)) pdf(\"dt_circ_50m.pdf\") plot(corient50_m, bg = rgb(0, 0.749, 0.769), pch = 21, cex = 1.5, lwd = 2, stack = TRUE, bin = 60, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2), sep = 0.05, shrink = 1, tcl.text = -0.125, control.circle=circle.control(lwd = 2)) ticks.circular(circular(seq(0,2*pi,pi/2)), tcl=0.075) par(new = T) plot(corient50_f, bg=rgb(0.973, 0.463, 0.427), pch = 21, cex=1.5, lwd = 2, stack=T, bins=60, sep = -0.05, shrink= 1.3, axes = FALSE, control.circle=circle.control(lwd = 1)) arrows.circular(mean(corient50_m), y = rho.circular(corient50_m), col = rgb(0, 0.749, 0.769), lwd = 5) arrows.circular(mean(corient50_f), y = rho.circular(corient50_f), col = rgb(0.973, 0.463, 0.427), lwd = 5) par(new = T) plot(corient50_m, col = NA, shrink= 2.5, axes = FALSE, control.circle=circle.control(lty = 2, lwd = 1)) ticks.circular(circular(seq(0,2*pi,pi/8)), tcl=0.2) lines(density.circular(corient50_m, bw=30), shrink= 1, col = rgb(0, 0.749, 0.769, 0.7), lwd=2, lty=1) lines(density.circular(corient50_f, bw=30), col = rgb(0.973, 0.463, 0.427, 0.7), lwd=2, lty=1) dev.new();; par(mai = c(1, 1, 0.1,0.1)) par(mar=c(0.5, 0.5, 0.5, 0.5)) pdf(\"dt_circ_200m.pdf\") plot(corient200_m, bg = rgb(0, 0.749, 0.769), pch = 21, cex = 1.5, lwd = 2, stack = TRUE, bin = 60, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2), sep = 0.05, shrink = 1, tcl.text = -0.125, control.circle=circle.control(lwd = 2)) ticks.circular(circular(seq(0,2*pi,pi/2)), tcl=0.075) par(new = T) plot(corient200_f, bg=rgb(0.973, 0.463, 0.427), pch = 21, cex=1.5, lwd = 2, stack=T, bins=60, sep = -0.05, shrink= 1.3, axes = FALSE, control.circle=circle.control(lwd = 1)) arrows.circular(mean(corient200_m), y = rho.circular(corient200_m), col = rgb(0, 0.749, 0.769), lwd = 5) arrows.circular(mean(corient200_f), y = rho.circular(corient200_f), col = rgb(0.973, 0.463, 0.427), lwd = 5) par(new = T) plot(corient200_m, col = NA, shrink= 2.5, axes = FALSE, control.circle=circle.control(lty = 2, lwd = 1)) ticks.circular(circular(seq(0,2*pi,pi/8)), tcl=0.2) lines(density.circular(corient200_m, bw=30), shrink= 1, col = rgb(0, 0.749, 0.769, 0.7), lwd=2, lty=1) lines(density.circular(corient200_f, bw=30), col = rgb(0.973, 0.463, 0.427, 0.7), lwd=2, lty=1) ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "dt_homing_dataproc.R" }, { "ID": 195, "Comment": "Helper function to prepare raw keyboard data ' ' @author F. Bemmann ' @family Preprocessing function ' @description this function unfolds json data into one column per keyvalue pair ' @export", "Code": "parseJsonColumnSensing = function(df, column_name){ parseJsonColumn = function(x){ str_c(\"[ \", str_c(x, collapse = \",\", sep=\" \"), \" ]\") %>% jsonlite::fromJSON(flatten = T) %>% as_tibble() } df2 = df %>% select(user_uuid,client_event_id,!!column_name) %>% filter(!is.na(!!rlang::sym(column_name))) %>% map_dfc(.f = parseJsonColumn) %>% distinct() colnames(df2)[1:2] = c(\"user_uuid\", \"client_event_id\") df = left_join(df, df2, by = c(\"user_uuid\", \"client_event_id\")) df[,column_name] = NULL return(df) } ", "Label": "Data Variable", "Source": "https://osf.io/b7krz/", "File": "helper_JsonFormat.R" }, { "ID": 196, "Comment": "Separate ANOVAs for each subscale Pairwise ttests with adjusted alpha level of .05/4 .0125 (because there are four relevant comparisons)", "Code": "summary(aov(value~Fan*time+Error(VP_t0/(time)),data=dat[dat$scale==\"Attentiveness\",])) pairwise.t.test(dat[dat$scale==\"Attentiveness\",]$value,paste(dat[dat$scale==\"Attentiveness\",]$Fan,dat[dat$scale==\"Attentiveness\",]$time),p.adj=\"none\") ", "Label": "Statistical Test", "Source": "https://osf.io/xcthg/", "File": "0-Script.R" }, { "ID": 197, "Comment": " Models .. Contrasts (Reverse) Helmert for cond speech, cond mask Treatment for speech_mask Sum for group Factor variables ", "Code": "df <- df %>% mutate( group = factor(group), # oc, pd gender = factor(intake_gender), # f, m cond_mask = factor(cond_mask, levels = c(\"nm\",\"sm\",\"kn\")), cond_speech = factor(cond_speech, levels = c(\"habitual\",\"clear\",\"loud\")) ) %>% mutate(speech_mask = paste(cond_speech,cond_mask,sep=\"_\")) levels(df$group) # oc, pd levels(df$gender) # f, m levels(df$cond_speech) # habitual, clear, loud levels(df$cond_mask) # nm, sm, kn ", "Label": "Statistical Modeling", "Source": "https://osf.io/5s34w/", "File": "simpd_helper.R" }, { "ID": 198, "Comment": "4) Descriptives and data inspection total duration on survey", "Code": "stat.desc(working_file %>% group_by(pp) %>% slice(1) %>% pull(duration))", "Label": "Data Variable", "Source": "https://osf.io/g8kbu/", "File": "dataAnalysisRewardAppsSurvey.R" }, { "ID": 199, "Comment": "bayesian ttests", "Code": "ttestBF(wide$high, wide$neutral, paired = TRUE) ttestBF(wide$low, wide$neutral, paired = TRUE) ttestBF(wide$high, wide$low, paired = TRUE) formatC(3057348376, format = \"e\", digits = 2) ", "Label": "Statistical Test", "Source": "https://osf.io/g8kbu/", "File": "dataAnalysisRewardAppsSurvey.R" }, { "ID": 200, "Comment": "Confidence interval as a vector", "Code": "result <- c(\"lower\" = vec_mean - error, \"upper\" = vec_mean + error) return(result) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/92e6c/", "File": "fill_summary_table.R" }, { "ID": 201, "Comment": "Transform academic background and job category variables from character to factor", "Code": "class(expertsample$acadgroup) expertsample$acadgroup<-as.factor(expertsample$acadgroup) class(expertsample$job_cat) expertsample$job_cat <- as.factor(expertsample$job_cat)", "Label": "Data Variable", "Source": "https://osf.io/u9hkj/", "File": "expertsurvey_CDRCCS.R" }, { "ID": 202, "Comment": "graphical check for normal distribution of differences support (mean)", "Code": "diff_support <- bothexpert$BECCS_support-bothexpert$DACCS_support hist(diff_support)", "Label": "Visualization", "Source": "https://osf.io/u9hkj/", "File": "expertsurvey_CDRCCS.R" }, { "ID": 203, "Comment": "Wilcoxon tests support (mean)", "Code": "wilcox.test(bothexpert$BECCS_support, bothexpert$DACCS_support, paired=TRUE) ", "Label": "Statistical Test", "Source": "https://osf.io/u9hkj/", "File": "expertsurvey_CDRCCS.R" }, { "ID": 204, "Comment": "correlations separately for each condition", "Code": "data_con <- data[data$Cond == \"Control\", ] data_ent <- data[data$Cond == \"Entitlement\", ] chart.Correlation(data_con[ , c(\"pes\",\"prestige\",\"dominance\",\"benign\",\"malicious\",\"pain\")], use = \"pairwise.complete.obs\", pch = 20, histogram = TRUE) chart.Correlation(data_ent[ , c(\"pes\",\"prestige\",\"dominance\",\"benign\",\"malicious\",\"pain\")], use = \"pairwise.complete.obs\", pch = 20, histogram = TRUE) rm(data_con,data_ent) ", "Label": "Data Variable", "Source": "https://osf.io/sb3kw/", "File": "Study2A_analyses.R" }, { "ID": 205, "Comment": "Estimate partial correlation matrices using fiml (partial cors and not glasso b/c different sample sizes leads to different glasso penalties)", "Code": "net1hi <- cor1.edhi$cor %>% EBICglasso(., n = cor1.edhi$n) net2hi <- cor2.edhi$cor %>% EBICglasso(., n = cor2.edhi$n) net3hi <- cor3.edhi$cor %>% EBICglasso(., n = cor3.edhi$n) net4hi <- cor4.edhi$cor %>% EBICglasso(., n = cor4.edhi$n) net5hi <- cor5.edhi$cor %>% EBICglasso(., n = cor5.edhi$n) net6hi <- cor6.edhi$cor %>% EBICglasso(., n = cor6.edhi$n) net7hi <- cor7.edhi$cor %>% EBICglasso(., n = cor7.edhi$n) net1lo <- cor1.edlo$cor %>% EBICglasso(., n = cor1.edlo$n) net2lo <- cor2.edlo$cor %>% EBICglasso(., n = cor2.edlo$n) net3lo <- cor3.edlo$cor %>% EBICglasso(., n = cor3.edlo$n) net4lo <- cor4.edlo$cor %>% EBICglasso(., n = cor4.edlo$n) net5lo <- cor5.edlo$cor %>% EBICglasso(., n = cor5.edlo$n) net6lo <- cor6.edlo$cor %>% EBICglasso(., n = cor6.edlo$n) net7lo <- cor7.edlo$cor %>% EBICglasso(., n = cor7.edlo$n) ", "Label": "Statistical Modeling", "Source": "https://osf.io/mj5nh/", "File": "educationnetworksatleast6.R" }, { "ID": 206, "Comment": " 5. Confirmatory Factor analysis To replicate our results without item 1, delete yts_1 from the analyses 5.1 yes/no RESIST ", "Code": "dataCFA1 <- na.omit(data.frame(data$id,data$yts_1, data$yts_2, data$yts_3, data$yts_4, data$yts_5, data$yts_6, data$yts_7, data$yts_8, data$yts_9, data$yts_10, data$yts_11, data$yts_12, data$yts_13)) colnames (dataCFA1) <-c(\"id\",\"yts_1\",\"yts_2\",\"yts_3\",\"yts_4\",\"yts_5\",\"yts_6\", \"yts_7\",\"yts_8\",\"yts_9\",\"yts_10\",\"yts_11\",\"yts_12\", \"yts_13\") model1 <- 'EA =~ yts_1 + yts_2 + yts_3 + yts_4 + yts_5 + yts_6 + yts_7 + yts_8 + yts_9 + yts_10 + yts_11 + yts_12 + yts_13' fit1 <- cfa(model1, data=dataCFA1, meanstructure=T, std.lv=T, estimator = \"WLSMV\", ordered = c(\"yts_1\", \"yts_2\", \"yts_3\", \"yts_4\", \"yts_5\", \"yts_6\", \"yts_7\", \"yts_8\", \"yts_9\", \"yts_10\", \"yts_11\", \"yts_12\", \"yts_13\")) summary(fit1, fit.measures=TRUE, standardized = T) fitMeasures(fit1,c(\"chisq\",\"df\",\"pvalue\",\"cfi\",\"tli\",\"rmsea\", \"wrmr\",'rmsea.ci.lower','rmsea.ci.upper')) modindices(fit1, sort. = T) ", "Label": "Statistical Modeling", "Source": "https://osf.io/g2nkw/", "File": "YCAS_Scale_development.R" }, { "ID": 207, "Comment": "get factorscores and include them in origingal data", "Code": "dataCFA1$factorscore1 <- predict(fit1) data <- left_join(data, dataCFA1, by = \"id\") data$factorscore1 dataCFA3$factorscore3 <- predict(fit3) data1 <- left_join(data1, dataCFA3, by = \"record_id\") data1$factorscore3 dataCFA4$factorscore4 <- predict(fit4) data1 <- left_join(data1, dataCFA4, by = \"record_id\") data1$factorscore4 ", "Label": "Data Variable", "Source": "https://osf.io/g2nkw/", "File": "YCAS_Scale_development.R" }, { "ID": 208, "Comment": "join behDat and demDat", "Code": "allData = inner_join(behDat, demDat, by = \"userCode\") ", "Label": "Data Variable", "Source": "https://osf.io/wcfj3/", "File": "1_dataPrep.R" }, { "ID": 209, "Comment": "Standardize Variables zscoring", "Code": "df_overall = df df_overall$sWM = scale(df_overall$sWM) df_overall$rtWM = scale(df_overall$rtWM) df_overall$rtDivAtt = scale(df_overall$rtDivAtt) df_overall$rtAlert = scale(df_overall$rtAlert) df_overall$sRWT = scale(df_overall$sRWT)", "Label": "Data Variable", "Source": "https://osf.io/wcfj3/", "File": "1_dataPrep.R" }, { "ID": 210, "Comment": " Remove time points above 29, because not everyone attended that often ", "Code": "df_overall = df_overall %>% filter(Time < 29) %>% ungroup()", "Label": "Data Variable", "Source": "https://osf.io/wcfj3/", "File": "1_dataPrep.R" }, { "ID": 211, "Comment": "ensure that userCode is a factor", "Code": "df_overall$userCode = as.factor(df_overall$userCode)", "Label": "Data Variable", "Source": "https://osf.io/wcfj3/", "File": "1_dataPrep.R" }, { "ID": 212, "Comment": "We can also try to cluster the languages based on the feature values they have. We currently use hierarchical clustering. This will normally also reflect the PCA visualization. The output can also be compared with the family grouping of the languages. generate the clusters", "Code": "hclust_avg <- hclust(daisy(con_data)) hclust_avg <- hclust(daisy(con_data))", "Label": "Visualization", "Source": "https://osf.io/6hx2n/", "File": "DM_a_k.R" }, { "ID": 213, "Comment": "add squares around the clusters", "Code": "rect.hclust(hclust_avg , k = 4, border = 1:4) rect.hclust(hclust_avg , k = 4, border = 1:4) ", "Label": "Visualization", "Source": "https://osf.io/6hx2n/", "File": "DM_a_k.R" }, { "ID": 214, "Comment": " Extracting distances We can extract the pairwise distance between the points of the data set. First, we can visualize the distances in a twodimensional space. Normally, we expect that this visualization matches the output of the PCA. change the content to a distance matrix ", "Code": "distances <- con_data %>% dist(method = \"euclidean\") distances <- con_data %>% dist(method = \"euclidean\") ", "Label": "Visualization", "Source": "https://osf.io/6hx2n/", "File": "DM_a_k.R" }, { "ID": 215, "Comment": "recodes ethnicity (Decline/Other category is set to missing)", "Code": "sambdif<-samb %>% select(1:19, SAmb_tot, gender, ethnic) %>% mutate(ethnic_fac = rec(ethnic, rec = \"1=0;; 6=NA;; else=1\")) %>% convert(fct(gender, ethnic_fac)) table(sambdif$gender) # 0=women, 1=men table(sambdif$ethnic_fac) #0=EuroAm, 1=PoC ", "Label": "Data Variable", "Source": "https://osf.io/ztycp/", "File": "Schizotypal Ambivalence.R" }, { "ID": 216, "Comment": "reorder the list of results to match the alignment of the dendrogram", "Code": "list.res.reo<-list.res[order.dendrogram(dend1)] layd<-c(10,1) tiff(\"Figure_hclust2.tif\",width=sum(layd),height=11.5,units=\"cm\",res=600,compression=\"lzw\") layout(matrix(1:2,nrow=1),widths=layd) par(mar=c(3.5,1,1.5,14),mgp=c(2,0.8,0)) plot(dend1, cex = 0.8, horiz=T,xlab=\"Height\") ", "Label": "Visualization", "Source": "https://osf.io/greqt/", "File": "04_clusters2_tvals.R" }, { "ID": 217, "Comment": "function to read in cleaned data and format it as a tibble that can be used for plotting", "Code": "read_data_for_plotting <- function(data_path) { dat <- read_csv(data_path, col_types = cols()) ", "Label": "Visualization", "Source": "https://osf.io/fb5tw/", "File": "plotting_style.R" }, { "ID": 218, "Comment": "create a matrix of relevant item loadings to calculate nonrefined factor (scale) scores", "Code": "scaleMatrix <- data.frame(data = matrix(EFA$loadings, ncol = 6)) scaleMatrix <- lapply(scaleMatrix, function(x) ifelse(x >= 0.4, 1, ifelse(x <= -0.4, -1, NA))) scaleMatrix <- data.frame(data = scaleMatrix, row.names = colnames(mainData[, c(2:56)])) colnames(scaleMatrix) <- c(\"Sensory\", \"CognitiveDemand\", \"ThreatToSelf\", \"CrossSettings\", \"Safety\", \"States\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/2j47e/", "File": "Cluster analysis non-refined factor scores.R" }, { "ID": 219, "Comment": "get number of patients (from 'table_overview_of_studies.R')", "Code": "df_long <- left_join(df_long, tab_study_overview %>% select(study_name, N_Patients) %>% rename(Study = study_name) %>% mutate(N_Patients = as.numeric(str_extract(N_Patients, '(\\\\d*)'))), by = 'Study') ", "Label": "Data Variable", "Source": "https://osf.io/fykpt/", "File": "03_table_comorbidities_by_study.R" }, { "ID": 220, "Comment": "add error bars with standard error", "Code": "plot <- plot + geom_errorbar(aes(ymin=lower,ymax=upper),width=.2) plot <- plot + ggtitle(\"2a) Average accuracy across four Cue Type conditions\") ", "Label": "Visualization", "Source": "https://osf.io/bfq39/", "File": "Code_LMMs_BestPractice_Example_withOutput.R" }, { "ID": 221, "Comment": "Capture Age for Each particpant", "Code": "ParticipantAge <- summarize(Summary, count = n(), Age = mean(Age, na.rm = T)) ", "Label": "Data Variable", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 222, "Comment": "EVENT VALENCE: Compare Rated Strength of Positive & Negative Events CHECKING FOR OUTLIERS Figure: Boxplot Order Conditions", "Code": "EventValenceRatings$Valence <- factor(EventValenceRatings$Valence, levels = c(\"Positive\", \"Negative\")) BoxplotFig <- ggplot(EventValenceRatings, aes(x=Valence, y=Strength)) + theme_bw() BoxplotFig <- BoxplotFig + geom_boxplot(aes(color = Valence, fill = Valence), outlier.size = 2.5, alpha = 0.5) BoxplotFig <- BoxplotFig + labs(x=\"Event Valence\", y=\"Event Valence Ratings (-50 to 50)\") + theme(title= element_text(size=14, face='bold'), axis.title.x= element_text(size=14), axis.text.x= element_text(face=\"bold\", size=14), axis.title.y = element_text(size=14), axis.text.y= element_text(face=\"bold\", size=14), strip.text.x= element_text(size = 14, face='bold'), strip.text.y= element_text(size = 14, face='bold')) ", "Label": "Visualization", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 223, "Comment": "Dotplot Valence Figure (Winsorized & ReverseCoded)", "Code": "EventValenceRatings_MEANFig <- ggplot(data=EventValenceRatings_ByParticipant_MEAN, aes(x=Valence, y=Strength_ReverseCoded)) EventValenceRatings_MEANFig <- EventValenceRatings_MEANFig + stat_summary(fun=mean, geom=\"bar\", position =\"dodge\", alpha=0.5, aes(fill=Valence)) EventValenceRatings_MEANFig <- EventValenceRatings_MEANFig + theme_bw() EventValenceRatings_MEANFig <- EventValenceRatings_MEANFig + geom_violin(alpha = 0.5, color='grey50') ", "Label": "Visualization", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 224, "Comment": "Mixed Effects Model with EventPosition as a Covariate (Linear & Quadratic) Random Effects Structure simplified to converge: ChainPosition removed from ChainID SocialContext removed from Event", "Code": "Analysis1GLMER <- glmer (Present~ SocialContext.C*Valence.C*ChainPosition.C + poly(EventPosition.C, 2) + (1 + Valence.C | ChainID) + (1 | Event), data=Analysis1, family=binomial) summary(Analysis1GLMER) Analysis2GLMER <- glmer (Present~ SocialContext.C*Valence.C*ChainPosition.C + poly(EventPosition.C, 2) + (1 + Valence.C | ChainID) + (1 | Event), data=Analysis2, family=binomial) summary(Analysis2GLMER) ", "Label": "Statistical Modeling", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 225, "Comment": "Mixed Effects Model with EventPosition as a Covariate (Linear & Quadratic) Random Effects Structure simplified to converge: Valence & ChainPosition removed from ChainID SocialContext removed from Event", "Code": "Analysis3GLMER <- glmer (Present~ SocialContext.C*Valence.C*ChainPosition.C + poly(EventPosition.C, 2) + (1 | ChainID) + (1 | Event), data=Analysis3, family=binomial) summary(Analysis3GLMER) ", "Label": "Statistical Modeling", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 226, "Comment": " VISUALISE BIAS (POSITIVE, NEGATIVE) IN EACH SocialContext Substract Mean Positive From Mean Negative Survival Score for each SocialContext Prediction is that Perference for Negativity Decreases first with Communicative Intent and then further with Social Interaction Calculate Means for Positive & Negative Information Calculate Mean Positive Mean Negative for Each Chain Plot Filter ", "Code": "Bias <- filter(NegativityData, Valence %in% c(\"Positive\", \"Negative\")) ", "Label": "Visualization", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 227, "Comment": "Create a Vector of colours based on each Participant's Mean Bias Score (Positive Negative) Use this to Color the dot points by Value", "Code": "wideBiasMEAN <- mutate(wideBiasMEAN, ColourFig = ifelse(Bias >1, \"#006A40FF\", ifelse(Bias <0, \"#F08892FF\", \"#95828DFF\"))) NegativityBiasFig <- ggplot(data=wideBiasMEAN, aes(x=SocialContext, y=Bias)) NegativityBiasFig <- NegativityBiasFig + stat_summary(fun.y=mean, geom=\"bar\", position =\"dodge\", alpha=0.5, fill=\"red\") NegativityBiasFig <- NegativityBiasFig + theme_bw() NegativityBiasFig <- NegativityBiasFig + geom_violin(alpha = 0.5, color='grey50') ", "Label": "Visualization", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 228, "Comment": "Geom_dotplot with points coloured by Value", "Code": "NegativityBiasFig <- NegativityBiasFig + geom_dotplot(aes(x=SocialContext, y=Bias, fill=ColourFig), binaxis='y', stackdir='center', dotsize=0.9, alpha = 1, stroke=0.75, colour='grey15') + scale_fill_identity() ", "Label": "Visualization", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 229, "Comment": "Add Violin Geom to give indication or data normality", "Code": "ResolutionCollapsedFig <- ResolutionCollapsedFig + geom_violin(alpha = 0.0, colour='grey50') ResolutionALLPosNegFig <- ResolutionALLPosNegFig + geom_violin(alpha = 0.5, color='grey50') ", "Label": "Visualization", "Source": "https://osf.io/2uf8j/", "File": "Negativity R.R" }, { "ID": 230, "Comment": " Run IRT model: 3dim model for math, read and scie, no weights Possibility to set seeds and iteration number in the function parameter ", "Code": "mod <- run.irt(pa12_resp_s, items = item.dif) ", "Label": "Statistical Modeling", "Source": "https://osf.io/8fzns/", "File": "3_IRT.R" }, { "ID": 231, "Comment": "function to create tick marks on a logscale", "Code": "log10Tck <- function(side, type){ lim <- switch(side, x = par('usr')[1:2], y = par('usr')[3:4], stop('side argument must be \"x\" or \"y\"')) at <- floor(lim[1]) : ceiling(lim[2]) at <- 0:8 return(switch(type, minor = outer(1:9, 10^(min(at):max(at)))[,1:8], major = 10^at, stop('type argument must be \"major\" or \"minor\"') )) } if(saveFigures) cairo_ps(file = '../R_Output/Images/OriginalBFScatter.eps', onefile = TRUE, fallback_resolution = 600, width = 7.3, height = 4.16) ", "Label": "Visualization", "Source": "https://osf.io/x72cy/", "File": "ConfirmatoryAnalyses.R" }, { "ID": 232, "Comment": "Plot: original Bayes factor and replication effect size", "Code": "plot(original.bf, replication.effectsizes, xlim = c(1, 10^8), ylim = c(-0.2, 1), axes = FALSE, log = 'x', cex = 2, cex.lab = 1.6, pch = 21, bg = c('grey36', 'grey')[replication.outcomes], xlab = 'Bayes Factor Original Study', ylab = 'Replication Effect Size (r)') ", "Label": "Visualization", "Source": "https://osf.io/x72cy/", "File": "ConfirmatoryAnalyses.R" }, { "ID": 233, "Comment": " take the xaxis indices and add a jitter, proportional to the N in each level ", "Code": "myjitter <- jitter(rep(i, length(thisvalues)), amount=levelProportions[i]/6) points(myjitter, thisvalues, pch=1, col=rgb(0,0,0,.9), cex = 1.5, lwd = 1.5) } graphics.off() rm(myjitter, thislevel, thisvalues, mylevels, levelProportions, nsubjects, subjects) ", "Label": "Data Variable", "Source": "https://osf.io/x72cy/", "File": "ConfirmatoryAnalyses.R" }, { "ID": 234, "Comment": "Multiple Regression Analysis", "Code": "multreg.second(Speaking ~ Vocabulary+Grammar+Writing+Reading, corr=correl, n=100) multreg.second(Y~ X1+X2+X3, corr=correl, n=100) multreg.second(Score~ Wordcount+CLI+Commas+Stopwords+Linking+WordsSentence, corr=correl, n=200) multreg(Score~ Wordcount+CLI+Commas+Stopwords+Linking+WordsSentence, data=dat1) lm.out <- lm(Score ~., dat1) ", "Label": "Statistical Modeling", "Source": "https://osf.io/uxdwh/", "File": "code.R" }, { "ID": 235, "Comment": "Dominance Analysis (using dominanceanalysis package)", "Code": "lm.cov <- lmWithCov(Score ~ Wordcount+CLI+Commas+Stopwords+Linking+WordsSentence, correl) da <- dominanceAnalysis(lm.cov) print(da) ", "Label": "Statistical Modeling", "Source": "https://osf.io/uxdwh/", "File": "code.R" }, { "ID": 236, "Comment": "FUnctions for Cleaning Function to generate a table with all observations within a variable and its corresponding count", "Code": "check_observation <- function (df, column) { require (tidyverse) check <- df %>% group_by (as.vector(unlist(df[, column]))) %>% count () colnames(check)[1] <- column return (check) } recur.collapse <- function(df, n = ncol(df)) { #collapse function combine <- function(x,y){ if(!is.na(x)){ return(x) } else if (!is.na(y)) { return(y) } else { return(NA) } } if(n==1){ return(df[,1]) } else { return(mapply(combine, df[,n], recur.collapse(df, n-1))) } } multi_spread <- function(df, id, key, var){ require(gtools) require(tidyverse) df.list <- lapply(var, function(x){ df.temp <- spread(df[, c(id, key, x)], key, x) # spread 1 variable cname <- mixedsort(unique(as.vector(unlist(df[,key])))) # sort colnames in alphanumeric order df.temp <- df.temp[, c(id, cname)] # rename column names colnames(df.temp)[2:ncol(df.temp)] <- paste(x, colnames(df.temp[,cname]), sep= \"_\") #add key to column names return(df.temp) }) df.output <- df.list[[1]] # initialise for(i in 1:(length(var)-1)){ # combine matrices together df.output <- full_join(df.output, df.list[[i+1]]) } return(as.data.frame(df.output)) } ", "Label": "Data Variable", "Source": "https://osf.io/6qej7/", "File": "ECoach Functions.R" }, { "ID": 237, "Comment": "Extract posteriors of effects from Bayesian ANOVA", "Code": "bestmod <- modBF[1] chains <- posterior(bestmod, iterations=50000, columnFilter=\"^Subject$\") colnames(chains) ", "Label": "Statistical Modeling", "Source": "https://osf.io/8abj4/", "File": "Exp2.R" }, { "ID": 238, "Comment": "STATISTICAL TEST: Nonparametric Spearman's correlation nonnormalized conditional entropy", "Code": "cor.test(results$DATE, results$CEDenominationsMotifs, method = \"spearman\") rdates <- rev(dates) #dates = centuries BCE plot(results$DATE, results$CEDenominationsMotifs, xlim = c(6,4), xaxt='n', xlab = \"Century BCE\", ylab = \"H(D|d)\", main = \"P2: Conditional entropy of denominantions given designs\") axis(1, at = rdates, labels = rdates) ", "Label": "Statistical Test", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_oldbins.R" }, { "ID": 239, "Comment": "____________________________________________________________ Plots with H(D|d) nonnormalized conditional entropy Getting statistics: mean, meadian, standard deviation among authorities per period for nonnormalized conditional entropy of denomination given designs ", "Code": "N <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = length) MEAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = mean) MEDIAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = median) SD <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = sd) resultspoleis_summary <- cbind.data.frame(N, MEAN$CEDenominationsMotifs, MEDIAN$CEDenominationsMotifs, SD$CEDenominationsMotifs) colnames(resultspoleis_summary) <- c(\"DATE\",\"N\",\"MEAN\",\"MEDIAN\",\"SD\") resultspoleis_summary$SE <- resultspoleis_summary$SD / sqrt(resultspoleis_summary$N) ", "Label": "Visualization", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_oldbins.R" }, { "ID": 240, "Comment": "Getting statistics: mean, meadian, standard deviation among authorities per period for normalized conditional entropy of denomination given designs ", "Code": "N <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = length) MEAN <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = mean) MEDIAN <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = median) SD <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = sd) Nresultspoleis_summary <- cbind.data.frame(N, MEAN$NormCEDenominationsMotifs, MEDIAN$NormCEDenominationsMotifs, SD$NormCEDenominationsMotifs) colnames(Nresultspoleis_summary) <- c(\"DATE\",\"N\",\"MEAN\",\"MEDIAN\",\"SD\") Nresultspoleis_summary$SE <- Nresultspoleis_summary$SD / sqrt(Nresultspoleis_summary$N) ", "Label": "Statistical Modeling", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_oldbins.R" }, { "ID": 241, "Comment": " aggregate data, zstandardize all predictor variables for the model ", "Code": "recogfreq <- aggregate(cbind(correct, intrusion, new) ~ id+setsize+rsizeList+rsizeNPL, data=recogdat, FUN=sum) recogfreq$zsetsize <- (recogfreq$setsize - mean(recogfreq$setsize))/sd(recogfreq$setsize) recogfreq$zrsizeList <- (recogfreq$rsizeList - mean(recogfreq$rsizeList))/sd(recogfreq$rsizeList) recogfreq$zrsizeNPL <- (recogfreq$rsizeNPL - mean(recogfreq$rsizeNPL))/sd(recogfreq$rsizeNPL) for (sc in 1:length(scales)) { prior <- paste0(\"cauchy(0, \", as.character(scales[sc]), \")\") fixefPrior <- c(set_prior(prior, class=\"b\")) ranefPrior <- set_prior(\"gamma(1,0.04)\", class=\"sd\") ", "Label": "Data Variable", "Source": "https://osf.io/qy5sd/", "File": "PairsBindingRSS_Stats.R" }, { "ID": 242, "Comment": "Cronbach's alpha (ingroup)", "Code": "iden.ing.woOutliers.a <- c(\"ingroup.value\", \"ingroup.like\", \"ingroup.connected\") iden.ing.woOutliers <- data.all[iden.ing.woOutliers.a] cronbach(iden.ing.woOutliers) ", "Label": "Statistical Test", "Source": "https://osf.io/9tnmv/", "File": "Exp1a_Election_post.R" }, { "ID": 243, "Comment": " min and max age, mean and sd age, percentage of men and women ", "Code": "minAge = min(df_preQ$age) maxAge = max(df_preQ$age) meanAge = mean(df_preQ$age) sdAge = sd(df_preQ$age) females = length(which(df_preQ$gender == \"female\")) males = length(which(df_preQ$gender == \"male\")) other = length(which(df_preQ$gender == \"other\")) ", "Label": "Data Variable", "Source": "https://osf.io/xh36s/", "File": "quest_analyses.R" }, { "ID": 244, "Comment": "Check normality with QQ plot and ShapiroWilk test Build the linear model", "Code": "model <- lm(FW ~ condition, data = df_postQ) ", "Label": "Statistical Test", "Source": "https://osf.io/xh36s/", "File": "quest_analyses.R" }, { "ID": 245, "Comment": "Question 4: pearson correlation between the different subscales", "Code": "df_cor <- data.frame(df_postQ$FW, df_postQ$DU, df_postQ$DET) colnames(df_cor) <- c('FW', 'DU', 'DET') res_cor <- rcorr(as.matrix(df_cor)) ", "Label": "Statistical Test", "Source": "https://osf.io/xh36s/", "File": "quest_analyses.R" }, { "ID": 246, "Comment": "Visualize correlations Insignificant correlations are leaved blank", "Code": "corrplot(res_cor$r, method = 'number', type=\"upper\", order=\"hclust\", p.mat = res_cor$P, sig.level = 0.05, insig = \"blank\") ", "Label": "Visualization", "Source": "https://osf.io/xh36s/", "File": "quest_analyses.R" }, { "ID": 247, "Comment": "Stepwise regression model", "Code": "step.model <- stepAIC(full.model, direction = \"both\", trace = FALSE) summary(step.model) step.model <- stepAIC(full.model, direction = \"both\", trace = FALSE) summary(step.model) step.model <- stepAIC(full.model, direction = \"both\", trace = FALSE) summary(step.model) ", "Label": "Statistical Modeling", "Source": "https://osf.io/xh36s/", "File": "quest_analyses.R" }, { "ID": 248, "Comment": "Subset the data set to use base2 instead of base1 for the individuals that have the 2nd baseline Extract values of frogs that have two baselines", "Code": "base2_frogs <-filter(hormones, condition == \"base_02\") ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "os_testosterone_analysis.R" }, { "ID": 249, "Comment": "LMER testosterone by sex and condition with interaction", "Code": "m1_osT <- lmer(log(t_conc_corr) ~ sex*condition + (1|id), data = hormones) ", "Label": "Statistical Modeling", "Source": "https://osf.io/3bpn6/", "File": "os_testosterone_analysis.R" }, { "ID": 250, "Comment": "Sex difference in Tlevel Model plot wit sjPlot", "Code": "plot_model(m1_osT_1, title = \"\", axis.title = \"Fixed effect estimates\", axis.labels = c(\"Time point\\n (back home)\", \"Sex \\n(male)\"), dot.size = 5, line.size = 2, transform = NULL, sort.est = TRUE, colors = \"gs\", vline.color = \"darkgrey\")+ ylim(-0.5, 1) + theme_sjplot2(24) ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "os_testosterone_analysis.R" }, { "ID": 251, "Comment": "now the random slopes part of the model including random intercepts and slopes and their correlation:", "Code": "pot.terms.with.corr=paste(c(xnames, fe.me[modes[fe.me]!=\"factor\"]), collapse=\"+\")#get all fixed effects terms together... pot.terms.with.corr=paste(paste(\"(1+\", pot.terms.with.corr, \"|\", re, \")\", sep=\"\"), collapse=\"+\")#... and paste random effects, brackets and all that pot.terms.with.corr=paste(c(xnames, fe.me[modes[fe.me]!=\"factor\"]), collapse=\"+\")#get all fixed effects terms together... pot.terms.with.corr=paste(paste(\"(1+\", pot.terms.with.corr, \"|\", re, \")\", sep=\"\"), collapse=\"+\")#... and paste random effects, brackets and all that ", "Label": "Statistical Modeling", "Source": "https://osf.io/vjeb3/", "File": "diagnostic_fcns.r" }, { "ID": 252, "Comment": " Supplemental Material: To check whether the participants followed gaze at all, we tested the gaze following score against chance level by running a one sample test against zero. Children followed gaze in the additional gazefollowing task (M 3;; SD 1.66;; t(29) 9.89, p .00, d 1.8). Correlation GF & Difference Score ", "Code": "Data$DiffSocCon <- as.numeric(Data$DiffSocCon) cor.test(Data$DS, Data$DiffSocCon, method = \"pearson\") #only younger ", "Label": "Statistical Test", "Source": "https://osf.io/4a9b6/", "File": "EXPLORATORY_Gaze Following_ANALYSES.R" }, { "ID": 253, "Comment": "use network data only for students in classrooms with at least 50% response rate", "Code": "dat$nwinclude <- 0 dat[which(dat$prop_part > .50 & !is.na(dat$prop_part)),\"nwinclude\"] <- 1 length(unique(dat[which(dat$nwinclude == 1), \"IDTESTGROUP_FDZ\"])) # 1708 classes with at least 50% response rate length(unique(dat[which(dat$nwinclude == 0), \"IDTESTGROUP_FDZ\"])) # 290 classes excluded for network related analyses dat[which(dat$nwinclude == 0), c(nw_covariates, grep(\"deg\", names(dat), value = T))] <- NA #recode sociometric variables to NA for classes with low level of completeness length(unique(dat[which(dat$nwinclude == 0), \"IDSTUD_FDZ\"])) # 5450 students without sociometric data length(unique(dat[which(dat$nwinclude == 1), \"IDSTUD_FDZ\"])) # 36,920 students with sociometric data ", "Label": "Data Variable", "Source": "https://osf.io/hu2n8/", "File": "01_get_gads.R" }, { "ID": 254, "Comment": "Reorder factor levels: (this will be useful once we create the plot legend)", "Code": "dataset$trial_condition <- factor(dataset$trial_condition, levels = c(\"diff_3sg\", \"diff_3pl\", \"same_3sg\", \"same_3pl\")) dataset$trial_condition_new <- factor(dataset$trial_condition_new, levels = c(\"different number\", \"same number\")) ", "Label": "Visualization", "Source": "https://osf.io/37rfb/", "File": "prediction_plots.R" }, { "ID": 255, "Comment": "Prepare dataframe for eyetracking analysis & graphs (package eyetrackingR required)", "Code": "eyetrackingr.data <- make_eyetrackingr_data(dataset, participant_column = \"participant_number\", trial_column = \"item_nr\", time_column = \"time_ms_absolute\", trackloss_column = \"trackloss\", aoi_columns = c(\"average_target_sample_count_proportion\", \"average_distractor_sample_count_proportion\"), treat_non_aoi_looks_as_missing = TRUE ) ", "Label": "Visualization", "Source": "https://osf.io/37rfb/", "File": "prediction_plots.R" }, { "ID": 256, "Comment": "Remove trackloss per trial > 50% (removed 4 trials)", "Code": "eyetrackingr.data <- clean_by_trackloss(data = eyetrackingr.data, trial_prop_thresh = .5) ", "Label": "Data Variable", "Source": "https://osf.io/37rfb/", "File": "prediction_plots.R" }, { "ID": 257, "Comment": "check histogram for binning", "Code": "hist(quop_use$t1_eff) quop_use$bins<-cut(quop_use$t1_eff,quantile(quop_use$t1_eff,p=seq(0,1,length=29),na.rm=T)) ", "Label": "Visualization", "Source": "https://osf.io/vphyt/", "File": "Sentence_Level.R" }, { "ID": 258, "Comment": "visualize with forest plot compare corona vs. precorona", "Code": "forest(lvd$est[1:16],sei=lvd$se[1:16], ilab = cbind(c(\"Pre-pandemic Cohorts vs. First Pandemic Cohort\",rep(\"\",7),\"Pre-pandemic Cohorts vs. Second Pandemic Cohort\",rep(\"\",7)), paste0(\"T\",rep(1:8,2)), c(\"Schools Open\",\"???\",\"???\",\"???\",\"Schools Closed\",\"???\",\"???\",\"Alternating Lessons\", \"Schools Open\",\"???\",\"Alternating Lessons\",\"???\",\"???\",\"???\",\"???\",\"Schools Open\")), ilab.xpos = c(-1.35,-.95,-.65), ylim = c(0,22), xlim = c(-1.7,1), slab = NA, rows = c(18:11, 8:1), at = c(-.7,-.35,0,.35,.7), xlab = \"Latent Variance Differences\", col=c(\"gray\",\"gray\", \"gray\",\"gray\", \"black\", \"black\", \"black\", \"black\", \"gray\", \"gray\", \"black\", \"black\", \"black\", \"black\", \"black\", \"gray\")) forest(lmd$est[1:16],sei=lmd$se[1:16], ilab = cbind(c(\"Pre-pandemic Cohorts vs. First Pandemic Cohort\",rep(\"\",7),\"Pre-pandemic Cohorts vs. Second Pandemic Cohort\",rep(\"\",7)), paste0(\"T\",rep(1:8,2)), c(\"Schools Open\",\"???\",\"???\",\"???\",\"Schools Closed\",\"???\",\"???\",\"Alternating Lessons\", \"Schools Open\",\"???\",\"Alternating Lessons\",\"???\",\"???\",\"???\",\"???\",\"Schools Open\")), ilab.xpos = c(-1.35,-.95,-.65), ylim = c(0,22), xlim = c(-1.7,1), slab = NA, rows = c(18:11, 8:1), at = c(-.7,-.35,0,.35,.7), xlab = \"Standardized Latent Mean Differences\", col=c(\"gray\",\"gray\", \"gray\",\"gray\", \"black\", \"black\", \"black\", \"black\", \"gray\", \"gray\", \"black\", \"black\", \"black\", \"black\", \"black\", \"gray\")) ", "Label": "Visualization", "Source": "https://osf.io/vphyt/", "File": "Sentence_Level.R" }, { "ID": 259, "Comment": "to see how well the distances are measured", "Code": "stressplot(species_matrix_short.nmds) #R2 = 0.961, linear R2 = 0.891 data.scores <- as.data.frame(scores(species_matrix_short.nmds$points)) data.scores$site <- substr(rownames(data.scores),1,6) data.scores$plots <- substr(rownames(data.scores),8,15) species.scores <- as.data.frame(scores(species_matrix_short.nmds, \"species\")) species.scores$species <- rownames(species.scores) ", "Label": "Statistical Test", "Source": "https://osf.io/uq3cv/", "File": "5_NMDS_comparing_dawn_and_morning_assemblage_compositions.R" }, { "ID": 260, "Comment": "now statistical test to see if communities are statistically different from one another", "Code": "species_matrix2 <- species_matrix %>% mutate_if(is.numeric, ~1 * (. > 0)) sp_matrix_with_group <- species_matrix2 rownames(sp_matrix_with_group) <- rownames(species_matrix) sp_matrix_with_group$grouping <- substr(rownames(sp_matrix_with_group), 7,13) ano <- anosim(species_matrix2, distance = 'euclidean', grouping = sp_matrix_with_group$grouping) summary(ano) ", "Label": "Statistical Test", "Source": "https://osf.io/uq3cv/", "File": "5_NMDS_comparing_dawn_and_morning_assemblage_compositions.R" }, { "ID": 261, "Comment": " SEM Analysis Section The following section sets up a series of path models. Note that this is a highly revised version of the SEM section, which originally tried to incorporate a complex latent variable model to simultaneously test all hypotheses. I decided to break it down into smaller, simpler models, and to focused on observedvariableonly models using the composite meausres instead. I'm not sure if that's the \"best\" approach or not, but I think it at least makes for a simpler approach. The code below is thus a mixture of what I originally wrote and the revised models. Let's check differences between the two groups: ", "Code": "t.test(Measures$Age~Measures$Normative) t.test(Measures$Adj_Income~Measures$Normative) t.test(Measures$Parent_Edu~Measures$Normative) t.test(Measures$Political~Measures$Normative) t.test(Measures$Crit_Reflection_Mean~Measures$Normative) t.test(Measures$Efficacy_Mean~Measures$Normative) t.test(Measures$Crit_Action_Mean~Measures$Normative) t.test(Measures$BLM_Activism~Measures$Normative) t.test(Measures$Militarism_Mean~Measures$Normative) t.test(Measures$EDNhComp~Measures$Normative) t.test(Measures$ACEs_Sum~Measures$Normative) t.test(Measures$MEQ_Total~Measures$Normative) t.test(Measures$Discrim_Major_Sum~Measures$Normative) t.test(Measures$Discrim_Everyday_Mean~Measures$Normative) t.test(Measures$BNSS_Mean~Measures$Normative) t.test(Measures$BNSSh_Mean~Measures$Normative) t.test(Measures$Zero_Sum_Mean~Measures$Normative) sjmisc::frq(Measures$Race, out = \"v\") Measures %>% group_by(Race) %>% summarise(BLM_Mean = mean(BLM_Activism)) data <- Measures %>% filter(Race %in% c(\"Black or African-American\",\"White or Caucasian\") & !is.na(BLM_Activism)) %>% mutate(Black = ifelse(Race == \"Black or African-American\", 1,0)) ", "Label": "Statistical Modeling", "Source": "https://osf.io/xhrw6/", "File": "4_sem_models.R" }, { "ID": 262, "Comment": " To assess BLM Activism by race, keep in mind that the BLM measure is count data (with a lot of zeros) A ttest therefore is not appropriate. We will use \"zeroinflated Poisson regression\" ", "Code": "ggplot(Measures, aes(BLM_Activism)) + geom_histogram() model.zi = zeroinfl(BLM_Activism ~ Black, data = data, dist = \"poisson\") summary(model.zi) Descriptives3 <- Measures %>% select(Normative,Age,Adj_Income,Parent_Edu,Political,ACEs_Sum,EDNhComp,Discrim_Major_Sum,Discrim_Everyday_Mean,MEXQ_Exp, Crit_Reflection_Mean,Efficacy_Mean,Crit_Action_Mean,BLM_Activism, Militarism_Mean) %>% psych::describeBy(group=\"Normative\") %>% as.data.frame() %>% rownames_to_column(var=\"Measure\") %>% select(-vars) DescrNonNorm<- as.data.frame(Descriptives3[1]) %>% rownames_to_column(var=\"Measure\") DescrNorm<- as.data.frame(Descriptives3[2]) %>% rownames_to_column(var=\"Measure\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/xhrw6/", "File": "4_sem_models.R" }, { "ID": 263, "Comment": "Write cleaned covariate files to xlsx", "Code": "cleaned_group_covariates_data <- list( \"cleaned_group_participant\" = cleaned_group_participant_dat, \"cleaned_group_instructional\" = cleaned_group_instructional_dat, \"cleaned_group_DV\" = cleaned_group_DV_dat, \"cleaned_group_race_ethnicity\" = race_eth ) write_xlsx(cleaned_group_covariates_data, path = \"Cleaned data/Group covariates data tables.xlsx\") ", "Label": "Data Variable", "Source": "https://osf.io/b5ydr/", "File": "3-Cleancovariatesforgroupdesigns.R" }, { "ID": 264, "Comment": "Weighted regression model for Ordinary People subscale score (mean)", "Code": "m1.ppl.svyglm.fit <- svyglm(scipopppl ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita, design = bar.design.scipopppl, family = gaussian, na.action = na.omit) m2.ppl.svyglm.fit <- svyglm(scipopppl ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita + polorientation + religiosity, design = bar.design.scipopppl, family = gaussian, na.action = na.omit) m3.ppl.svyglm.fit <- svyglm(scipopppl ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita + polorientation + religiosity + interestscience + sciliteracy + trustscience + trustscientists, design = bar.design.scipopppl, family = gaussian, na.action = na.omit) ", "Label": "Statistical Modeling", "Source": "https://osf.io/qj4xr/", "File": "03_explaining-scipop-attitudes.R" }, { "ID": 265, "Comment": " Model assumption checks Do some assumption checks for multiple linear regression (e.g., see Field, 2012, p. 292). These are: (1) Multicollinearity (2) Nonnormality/heteroscedasticity of residuals Assumption checks (1): Multicollinearity Specify model and inspect GVIFs Note: The VIF (usual collinearity diagnostic) may not be applicable to models with dummy regressors constructed from a polytomous categorical variable or polynomial regressors (Fox, 2016: 357). Fox and Monette (1992) introduced generalized variance inflation factor (GVIF) for these cases. As education.uni/education.comp and languageregion.ger/languageregion.ita are such dummy regressors based on polytomous categorial variables (i.e. education and languageregion), we should compute GVIFs. car::vif() does that automatically. GVIFs of SciPop Score models ", "Code": "car::vif(m1.scipopgoertz.svyglm.fit) %>% as.data.frame %>% rename(GVIF = deparse(substitute(.))) car::vif(m2.scipopgoertz.svyglm.fit) %>% as.data.frame %>% rename(GVIF = deparse(substitute(.))) car::vif(m3.scipopgoertz.svyglm.fit) %>% as.data.frame %>% rename(GVIF = deparse(substitute(.)))", "Label": "Statistical Test", "Source": "https://osf.io/qj4xr/", "File": "03_explaining-scipop-attitudes.R" }, { "ID": 266, "Comment": "Nonnormality/heteroscedasticity of residuals of ppl Score models", "Code": "distributionchecks(m1.ppl.svyglm.fit) distributionchecks(m2.ppl.svyglm.fit) distributionchecks(m3.ppl.svyglm.fit)", "Label": "Statistical Modeling", "Source": "https://osf.io/qj4xr/", "File": "03_explaining-scipop-attitudes.R" }, { "ID": 267, "Comment": "ADDITIONAL ANALYSIS B: TESTING A USHAPED RELATIONSHIP BETWEEN SCIPOP AND POLITICAL ORIENTATION 1st test: Quadratic regression on weighted data", "Code": "bar2019pop$polorientation2 <- bar2019pop$polorientation^2 bar.design <- svydesign(id = ~0, data = bar2019pop, weights = ~weight) quad.scipopgoertz.svyglm.fit <- svyglm(scipopgoertz ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita + polorientation + polorientation2 + religiosity + interestscience + sciliteracy + trustscience + trustscientists, design = bar.design, family = gaussian, na.action = na.omit) summ(model = quad.scipopgoertz.svyglm.fit, binary.inputs = \"full\", n.sd = 2, transform.response = F, confint = F, vifs = F, scale = F, model.info = T, model.fit = T, digits = 4) quad.scipopgoertz.svyglm.fit$aic m0.scipopgoertz.svyglm.fit <- svyglm(scipopgoertz ~ 1, bar.design, family = gaussian, na.action = na.omit) anova(m0.scipopgoertz.svyglm.fit, quad.scipopgoertz.svyglm.fit, test = \"F\", method = \"Wald\") ", "Label": "Statistical Test", "Source": "https://osf.io/qj4xr/", "File": "03_explaining-scipop-attitudes.R" }, { "ID": 268, "Comment": "chisquare to compare vegan in experienced vs. not experienced for each advocacy type graphic video", "Code": "data %>% xtabs(~ graphic_exp_buc + vegn_bin, .) %>% print() %>% proportions(\"graphic_exp_buc\") graphic_tmp <- chisq.test(xtabs(~ graphic_exp_buc + vegn_bin, data)) ", "Label": "Statistical Test", "Source": "https://osf.io/3aryn/", "File": "5chisquare_Spanish.R" }, { "ID": 269, "Comment": "Boosted Regression Trees CUS BRT plot", "Code": "CUS.BRT.plot <- ggplot(data = CUS.BRT, aes(x = predictors, y = influence, fill = predictor.type)) + geom_bar(stat = \"identity\") + geom_hline(yintercept = 5, linetype = 3, size = 1, colour = \"gray60\") + scale_fill_manual(values = viridis.3) + coord_flip() + labs(x = \"Predictor\", y = \"Relative Influence\") + scale_y_continuous(breaks = c(0, 10, 20, 30, 40), limits = c(0, 40)) + scale_x_discrete(limits = BRT.plot.label.limits, labels = BRT.plot.labels) + theme_pubr() + theme(legend.position = c(0.85, 0.825), legend.title = element_blank()) ", "Label": "Visualization", "Source": "https://osf.io/62je8/", "File": "DMS-NRSA-CA-QC-Figures.R" }, { "ID": 270, "Comment": "Sets limits of the plot based on user choice", "Code": "limits <- matrix(0,2,2) limits <- if (lim == 1){ limits <-u.plot.limit(min.age,max.age,lim,min.ylim,max.ylim) } else if (lim == 2){ limits <-u.plot.limit(min.age,max.age,lim,min.ylim,max.ylim) } else { limits <-u.plot.limit(min.xlim,max.xlim,lim,min.ylim,max.ylim) } limconc <-uconcplot(limits[1,1],limits[1,2],int) #sets the concordia line based on plot limts plot_limconc <- limconc[2:(nrow(limconc)-1),] ", "Label": "Visualization", "Source": "https://osf.io/p46mb/", "File": "U-PbGeochronologyScripts.R" }, { "ID": 271, "Comment": "BELOW CALCULATES THE ELLIPSES calculate x/y coords that define ellipse based in input data including rho", "Code": "ell.coords=list() #sets up list to write results to for(n in 1:n){ covmat <-cor2cov2((Pb7Ue[n]/2),(Pb6Ue[n]/2),rho[n]) nn <- 75 cutoff <- stats::qchisq(1-0.05,2) e <- eigen(covmat) a <- sqrt(cutoff*abs(e$values[1])) # major axis b <- sqrt(cutoff*abs(e$values[2])) # minor axis v <- e$vectors[,1] beta <- atan(v[2]/v[1]) theta <- seq(0, 2 * pi, length=nn) out <- matrix(0,nrow=nn,ncol=3) out[,1] <- Pb7U[n] + a * cos(theta) * cos(beta) - b * sin(theta) * sin(beta) out[,2] <- Pb6U[n] + a * cos(theta) * sin(beta) + b * sin(theta) * cos(beta) out[,3] <- n ", "Label": "Visualization", "Source": "https://osf.io/p46mb/", "File": "U-PbGeochronologyScripts.R" }, { "ID": 272, "Comment": "Functions for plotting models Extracts significance from models for chart legend", "Code": "extractSign <- function (model, modeltype, MtnRanges, InclMIA = TRUE) { coef <- summary(model)$coefficients$cond SignDF <- data.frame(mtnrng = MtnRanges, main = NA, int_ao = NA, int_po = NA) if (InclMIA) { if (modeltype == \"int\") { if (coef[row.names(coef) == \"AO:MtnRange1\", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == \"Coast - North\"] <- \"I\"} if (coef[row.names(coef) == \"AO:MtnRange2\", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == \"Coast - South\"] <- \"I\"} if (coef[row.names(coef) == \"AO:MtnRange3\", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == \"Columbia - North\"] <- \"I\"} if (coef[row.names(coef) == \"AO:MtnRange4\", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == \"Columbia - South\"] <- \"I\"} if (coef[row.names(coef) == \"AO:MtnRange5\", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == \"Rockies - North\"] <- \"I\"} if (coef[row.names(coef) == \"MtnRange1:PO\", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == \"Coast - North\"] <- \"I\"} if (coef[row.names(coef) == \"MtnRange2:PO\", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == \"Coast - South\"] <- \"I\"} if (coef[row.names(coef) == \"MtnRange3:PO\", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == \"Columbia - North\"] <- \"I\"} if (coef[row.names(coef) == \"MtnRange4:PO\", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == \"Columbia - South\"] <- \"I\"} if (coef[row.names(coef) == \"MtnRange5:PO\", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == \"Rockies - North\"] <- \"I\"} } } SignAOMain <- paste(\"AO: \", format(round(coef[row.names(coef) == \"AO\", 1], 3), nsmall=3), \" (\", format(round(coef[row.names(coef) == \"AO\", 4], 3), nsmall=3),\")\") SignPOMain <- paste(\"PO: \", format(round(coef[row.names(coef) == \"PO\", 1], 3), nsmall=3), \" (\", format(round(coef[row.names(coef) == \"PO\", 4], 3), nsmall=3),\")\") return(list(SignAOMain = SignAOMain, SignPOMain = SignPOMain, SignDF = SignDF)) } ", "Label": "Visualization", "Source": "https://osf.io/7xsfj/", "File": "Fig05To08.R" }, { "ID": 273, "Comment": "outlier coding M and SD per subject and item depending on distractor condition and soa (independent variables)", "Code": "subj.M.SD = ddply(rawdat,.(subj,cond,soa), summarize, subj.M=mean(RT, na.rm=T), subj.SD=sd(RT, na.rm=T)) tar.M.SD = ddply(rawdat,.(targ_ID,cond,soa), summarize, tar.M=mean(RT, na.rm=T), tar.SD=sd(RT, na.rm=T)) rawdat = merge(rawdat, subj.M.SD, by=c(\"subj\", \"cond\", \"soa\")) rawdat = merge(rawdat, tar.M.SD, by=c(\"targ_ID\", \"cond\", \"soa\")) rawdat$subj.min = (rawdat$subj.M - 2*(rawdat$subj.SD)) rawdat$subj.max = (rawdat$subj.M + 2*(rawdat$subj.SD)) rawdat$tar.min = (rawdat$tar.M - 2*(rawdat$tar.SD)) rawdat$tar.max = (rawdat$tar.M + 2*(rawdat$tar.SD)) ", "Label": "Data Variable", "Source": "https://osf.io/c93vs/", "File": "exp01_prep.R" }, { "ID": 274, "Comment": "calculate the value of the closest peer estimate and store in 'closest'", "Code": "closest<-min(abs(si-firstEstimate))", "Label": "Data Variable", "Source": "https://osf.io/rmcuy/", "File": "Fig." }, { "ID": 275, "Comment": "define Gaussians for updating prior self (firstEstimate)", "Code": "density_self <- log(dnorm(x, firstEstimate, own_sd )) ", "Label": "Statistical Modeling", "Source": "https://osf.io/rmcuy/", "File": "Fig." }, { "ID": 276, "Comment": "1.1. Sample 1 PTSD Symptoms (PSSI) Run linear mixed effect model", "Code": "a1_model_01_sg_pssi_outcome <- lme(pssi_end ~ pssi_s0 + sg, random = ~ 1 | id, method = \"ML\", na.action = na.omit, data = data_a1_pssi) ", "Label": "Statistical Modeling", "Source": "https://osf.io/dgt8x/", "File": "04-revisions.R" }, { "ID": 277, "Comment": "Check asssumption of normality of the residuals", "Code": "qqnorm(resid(a1_model_01_sg_pssi_outcome)) qqnorm(resid(a2_model_01_sg_pssi_outcome))", "Label": "Statistical Modeling", "Source": "https://osf.io/dgt8x/", "File": "04-revisions.R" }, { "ID": 278, "Comment": " Calculate contrasts specified in in the contrast matrix \"contrast_outcome\" ", "Code": "a1_model_01_sg_pssi_outcome_contrasts <- glht(a1_model_01_sg_pssi_outcome, contrast_outcome) a2_model_01_sg_pssi_outcome_contrasts <- glht(a2_model_01_sg_pssi_outcome, contrast_outcome) ", "Label": "Statistical Modeling", "Source": "https://osf.io/dgt8x/", "File": "04-revisions.R" }, { "ID": 279, "Comment": "Show contrasts without adjusting pvalues for multiple comparisons", "Code": "summary(a1_model_01_sg_pssi_outcome_contrasts, test = adjusted(\"none\")) summary(a2_model_01_sg_pssi_outcome_contrasts, test = adjusted(\"none\")) ", "Label": "Statistical Test", "Source": "https://osf.io/dgt8x/", "File": "04-revisions.R" }, { "ID": 280, "Comment": " Select estimates, standard error and pvalue ", "Code": "table_a1_model_01_sg_pssi_outcome_full <- table_a1_model_01_sg_pssi_outcome_raw %>% dplyr::select(beta = estimate, se = std.error, p = p.value) table_a2_model_01_sg_pssi_outcome_full <- table_a2_model_01_sg_pssi_outcome_raw %>% dplyr::select(beta = estimate, se = std.error, p = p.value) ", "Label": "Data Variable", "Source": "https://osf.io/dgt8x/", "File": "04-revisions.R" }, { "ID": 281, "Comment": "Calculate pooled standard deviation at baseline", "Code": "sd_a1_pssi_s0 <- sd(data_a1_pssi$pssi_s0, na.rm = TRUE) sd_a2_pssi_s0 <- sd(data_a2_pssi$pssi_s0, na.rm = TRUE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/dgt8x/", "File": "04-revisions.R" }, { "ID": 282, "Comment": "Calculate Cohen's d using the absolute value of the baseline adjusted difference from the linear mixed effect model", "Code": "cohens_d_a1_pssi_end <- a1_pssi_end_diff / sd_a1_pssi_s0 cohens_d_a2_pssi_end <- a2_pssi_end_diff / sd_a2_pssi_s0", "Label": "Statistical Test", "Source": "https://osf.io/dgt8x/", "File": "04-revisions.R" }, { "ID": 283, "Comment": "Peer Rank Means and Standard Deviations of Conditions Unspecified Peer Rank", "Code": "round(mean(unspecified$peer.rank), 2) round(sd(unspecified$peer.rank), 2) ", "Label": "Data Variable", "Source": "https://osf.io/9tnmv/", "File": "Exp4_buddhist_post.R" }, { "ID": 284, "Comment": "determines and rounds (to digits as specified in digits) mean and sd of x, and then collapses them in a single entry ", "Code": "xx=as.character(round(c(mean(x, na.rm=T), sd(x, na.rm=T)), digits=digits)) if(any(grepl(x=xx, pattern=\".\", fixed=T))>0){ xx[!grepl(x=xx, pattern=\".\", fixed=T)]=paste(xx[!grepl(x=xx, pattern=\".\", fixed=T)], \"0\", sep=\".\") xx=matrix(unlist(strsplit(as.character(xx), split=\".\", fixed=T)), ncol=2, byrow=T) xx[, 2]=unlist(lapply(xx[, 2], function(x){ paste(c(x, paste(c(rep(\"0\", times=digits-nchar(x))), collapse=\"\")), collapse=\"\") })) xx=apply(xx, 1, paste, collapse=\".\") } paste(xx, collapse=sep) } c.tab<-function(x, digits=NA, n.spaces=1, add.hash=T, incl.fst=F, incl.rownames=T){ ", "Label": "Data Variable", "Source": "https://osf.io/vjeb3/", "File": "helpers.r" }, { "ID": 285, "Comment": "wrapper for savePlot with file \"clipboard\" and type \"wmf\" (default;; others are possible) ", "Code": "savePlot(file=\"clipboard\", type=type) } overdisp.correction<-function(coeffs, disp.param){ coeffs[, \"Std. Error\"]=coeffs[, \"Std. Error\"]*sqrt(disp.param) coeffs[, \"z value\"]=coeffs[, \"Estimate\"]/coeffs[, \"Std. Error\"] coeffs[, \"Pr(>|z|)\"]=2*pnorm(q=-abs(coeffs[, \"z value\"]), mean=0, sd=1) return(coeffs) } merge.ests.ci.stab<-function(coeffs, ci=NULL, tests=NULL, stab=NULL){ ires=coeffs if(!is.null(ci)){ coeffs=cbind(coeffs, ci[rownames(coeffs), ]) } if(!is.null(tests)){ xx=outer(rownames(tests), rownames(coeffs), Vectorize(function(tt, e){ if(nchar(tt)>nchar(e)){ return(0) }else{ ", "Label": "Visualization", "Source": "https://osf.io/vjeb3/", "File": "helpers.r" }, { "ID": 286, "Comment": "Add the starttime (intra_scope_window[1]) to every start position (scope$start) ... ... of the recording timestamp to get the actual start window in milliseconds", "Code": "starting_times <- df$RecordingTimestamp[scope$start] + as.numeric(intra_scope_window[1]) } if (intra_scope_window[2] != \"end\") { ", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "get_looks.R" }, { "ID": 287, "Comment": "set to current trial duration to 0", "Code": "current_trial_total_duration[[hn]] <- 0 } current_trial_total_looks[[hn]] <- 0 } current_first_look_duration[[hn]] <- 0 current_first_look_ending_reason[[hn]] <- \"\" first_looks_collection[[hn]]$found_first <- FALSE first_looks_collection[[hn]]$forced_stop <- FALSE } ", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "get_looks.R" }, { "ID": 288, "Comment": "Return sum log likelihood. Include protection against log(0) problems", "Code": "return(sum(log(pmax(like, 1e-10)))) } else { ", "Label": "Statistical Modeling", "Source": "https://osf.io/tbczv/", "File": "exp1bDead-MNL-SAT-A.r" }, { "ID": 289, "Comment": "Plot posterior fit and forward simulation prediction", "Code": "plot_fits <- function(stan_fit_ex, N_t, N_p, obs_times, pred_times, data_vector, S_0, D_0, M_0, filename) { sigma_hat <- median(stan_fit_ex$sigma) CO2_flux_ratios_hat_median <- rep(NA, N_t) #Pre-allocate vector for median model fit CO2_flux_ratios_pred_median <- rep(NA, N_p) sigma_hat <- median(stan_fit_ex$sigma) for (t in 1:N_t) { CO2_flux_ratios_hat_median[t] <- median(stan_fit_ex$CO2_flux_ratios_hat_vector[,t]) } for (t in 1:N_p) { CO2_flux_ratios_pred_median[t] <- median(stan_fit_ex$CO2_flux_ratios_new_vector[,t]) } ", "Label": "Visualization", "Source": "https://osf.io/7mey8/", "File": "stan_CON_adriana_pools5i.r" }, { "ID": 290, "Comment": "fit linear mixed effects models null model", "Code": "fitlmer0 <- lmer(rating ~ 1 + (1|subject), data = dat.long1, REML = FALSE) summary(fitlmer0) ", "Label": "Statistical Modeling", "Source": "https://osf.io/eg6w5/", "File": "experiment1c_analyses.R" }, { "ID": 291, "Comment": "the mean of each parameter across iterations. Keep dimensions for parameters and subjects", "Code": "mean.params <- t(apply(sampled$samples$alpha[,,keep],1:2,mean)) ", "Label": "Statistical Modeling", "Source": "https://osf.io/wbyj7/", "File": "pmwg-DIC.r" }, { "ID": 292, "Comment": "plot the calibration curve (Abs vs. [BSA]) with an appropriate trendline and figure caption", "Code": "ggplot(bca, aes(x = BSA, y = Abs562)) + ", "Label": "Visualization", "Source": "https://osf.io/9e3cu/", "File": "BCA_activity_answers.R" }, { "ID": 293, "Comment": "generate MPT data:", "Code": "gendat <- genMPT(theta = pnorm(theta), restrictions = \"model/restrictions.txt\", numItems = numItems, eqnfile=\"model/2htsm.eqn\") ", "Label": "Data Variable", "Source": "https://osf.io/s82bw/", "File": "04_simulation_continuous_predictor.R" }, { "ID": 294, "Comment": "Recode confirm(oppose/confirm) and won(win/lost) to factors", "Code": "DF$confirm <- recode(DF$confirm, `0` = \"Confirmed\", `1` = \"Opposed\") DF$won <- recode(DF$won, `0` = \"Lost\", `1` = \"Won\") DF <- DF %>% mutate(confirm = as_factor(confirm)) %>% mutate(treatment = as_factor(treatment)) %>% mutate(won = as_factor(won)) %>% mutate(majmin = as_factor(majmin)) %>% mutate(experiment = as_factor(experiment)) %>% mutate(evidence = as_factor(evidence)) summary(DF) ", "Label": "Data Variable", "Source": "https://osf.io/9gjyc/", "File": "BRMS + Figure4.R" }, { "ID": 295, "Comment": "create unique id number for each unique individual", "Code": "mutate(uniq.id = group_indices(., id, experiment)) %>% mutate(uniq.id = as_factor(uniq.id)) summary(Ind_E1) M_Ind_E1 <- brm(changed ~ won*confirm + trial + (1|uniq.id), data = Ind_E1, family = \"bernoulli\", iter = 6000, chains = 3, cores = 3, save_all_pars = TRUE, file = \"Ind_E1\") summary(M_Ind_E1) plot(M_Ind_E1, ask = FALSE) pp_check(M_Ind_E1, check = \"distributions\") pp_check(M_Ind_E1, check = \"residuals\") pp_check(M_Ind_E1, \"error_scatter_avg\") pp_check(M_Ind_E1, check = \"scatter\") ", "Label": "Data Variable", "Source": "https://osf.io/9gjyc/", "File": "BRMS + Figure4.R" }, { "ID": 296, "Comment": "generates 95% confidence intervals for each beta coefficient", "Code": "m.p1_decision.CI <- round(confint(m.p1_decision, parm = \"beta_\"), 3) ", "Label": "Statistical Test", "Source": "https://osf.io/uygpq/", "File": "Within-paradigm.R" }, { "ID": 297, "Comment": "exclude people who don't meet the inclusion criteria of having data for at least 3 days in each week", "Code": "person_weekly_entries <- dat2019 %>% group_by(identity_id, woy) %>% summarise(n_days = length(identity_id)) person_weekly_entries <- as.data.frame(person_weekly_entries) person_weekly_entries <- subset(person_weekly_entries, n_days >= 3) ", "Label": "Data Variable", "Source": "https://osf.io/wyrav/", "File": "Dataprep-ParticipantResponses.R" }, { "ID": 298, "Comment": " construct a maximal glmer() model This model contains a fixed withinsubjects effect of Ambiguity (effectcoded with 0.5 amb), and betweensubjects fixed effects for Vocabulary and ART test scores, plus random effects by participants and items. ", "Code": "Acc.max <- glmer(accuracy ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CohOnly, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/hn3bu/", "File": "AnalysisCode.R" }, { "ID": 299, "Comment": " construct a maximal lmer() model This model contains a fixed withinsubjects effect of Ambiguity (effectcoded with 0.5 amb), and betweensubjects fixed effects for Vocabulary and ART test scores, plus random effects by participants and items. Maximal model with interactions ", "Code": "TrialDwellTime.max <- lmer(logTrialDwellTime ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/hn3bu/", "File": "AnalysisCode.R" }, { "ID": 300, "Comment": " _firstfixation duration Construct a maximal lmer() model Because not all the necessary models converge, remove correlations between random effects for all models for the firstfixation duration measure: ", "Code": "AOIKey.FF.reduced <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 | RECORDING_SESSION_LABEL) + (0 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 | item) + (0 + Vocab.Cent | item) + (0 + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOICohcue.FF.reduced <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 | RECORDING_SESSION_LABEL) + (0 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 | item) + (0 + Vocab.Cent | item) + (0 + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/hn3bu/", "File": "AnalysisCode.R" }, { "ID": 301, "Comment": "_gaze duration Construct a maximal lmer() model", "Code": "AOIKey.GazeDur.max <- lmer(logGazeDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOICohcue.GazeDur.max <- lmer(logGazeDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOISpillover.GazeDur.max <- lmer(logGazeDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/hn3bu/", "File": "AnalysisCode.R" }, { "ID": 302, "Comment": "_regressions out Construct a maximal glmer() model", "Code": "AOIKey.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) AOICohcue.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) AOISpillover.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) AOISpillover.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/hn3bu/", "File": "AnalysisCode.R" }, { "ID": 303, "Comment": "_firstfixation duration Construct a maximal lmer() model", "Code": "AOISpillover.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOIWrapUp.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOICohcue.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOISpillover.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/hn3bu/", "File": "AnalysisCode.R" }, { "ID": 304, "Comment": "Creating subsets of coins by time period and obtaining measures for each of the time periods", "Code": "for (i in dates) { dsub <- (subset(df, df$DATE == i)) motifs <- cbind(dsub[318:681]) denom <- cbind(dsub[256:309]) DATE[i] <- i HDenomination[i] <- entropy(denom) HMotifs[i] <- entropy(motifs) CEDenominationsMotifs[i] <- condentropy(denom, motifs) NormCEDenominationsMotifs[i] <- condentropy(denom, motifs) / entropy(denom) CEMotifsDenominations[i] <- condentropy(motifs, denom) NormCEMotifsDenominations[i] <- condentropy(motifs, denom) / entropy(motifs) MI[i] <- mutinformation(denom, motifs) NDenominations[i] <- length(unique(dsub$DENOMINATION)) NCoins[i] <- length(unique(dsub$ID)) } ", "Label": "Data Variable", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_newbins.R" }, { "ID": 305, "Comment": "STATISTICAL TESTING: nonparametric Spearman's correlation nonnormalized conditional entropy", "Code": "cor.test(results$DATE, results$CEDenominationsMotifs, method = \"spearman\") rdates <- rev(dates) #dates = years BCE plot(results$DATE, results$CEDenominationsMotifs, xlim = c(600,330), xaxt='n', xlab = \"YEAR BCE\", ylab = \"H(D|d)\", main = \"P2: Conditional entropy of denomination given designs\") axis(1, at = rdates, labels = rdates) ", "Label": "Statistical Test", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_newbins.R" }, { "ID": 306, "Comment": "Plots with H(D|d) nonnormalized conditional entropy Getting mean, median and standard deviation across different authorities per period for nonnormalized conditional entropy of denomination given designs ", "Code": "N <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = length) MEAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = mean) MEDIAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = median) SD <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = sd) resultspoleis_summary <- cbind.data.frame(N, MEAN$CEDenominationsMotifs, MEDIAN$CEDenominationsMotifs, SD$CEDenominationsMotifs) colnames(resultspoleis_summary) <- c(\"DATE\",\"N\",\"MEAN\",\"MEDIAN\",\"SD\") resultspoleis_summary$SE <- resultspoleis_summary$SD / sqrt(resultspoleis_summary$N) ", "Label": "Visualization", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_newbins.R" }, { "ID": 307, "Comment": "Plot mean and median H(D|d) across authorities per period", "Code": "require(ggplot2) ggmean <- ggplot(resultspoleis_summary,aes(x=DATE,y=MEAN)) + labs(title = \"P2: Mean conditional entropy of denominations given designs across authorities\", x = \"Year BCE\", y = \"mean H(D|d) across authorities\") + scale_x_reverse() + geom_errorbar(aes(ymin=resultspoleis_summary$MEAN-resultspoleis_summary$SE, ymax=resultspoleis_summary$MEAN+resultspoleis_summary$SE),width=.1) + geom_line() + geom_point() ggmean ggmedian <- ggplot(resultspoleis_summary,aes(x=DATE,y=MEDIAN)) + labs(title = \"P2: Median conditional entropy of denominations given designs across authorities\", x = \"Year BCE\", y = \"median H(D|d) across authorities\") + scale_x_reverse() + geom_line() + geom_point() ggmedian ", "Label": "Visualization", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_newbins.R" }, { "ID": 308, "Comment": "REGRESSION ANALYSIS: GROUPING BY AUTHORITIES nonnormalized CE", "Code": "resultspoleis$AUTHORITIES <- rownames(resultspoleis) resultspoleis$DATE <- as.numeric(as.character(resultspoleis$DATE))", "Label": "Statistical Modeling", "Source": "https://osf.io/uckzx/", "File": "P2_analysis_newbins.R" }, { "ID": 309, "Comment": " Meanimputes missing values for a vector x NOTE: see http://www.mailarchive.com/rhelp@rproject.org/msg58289.html Args: x: numeric vector Returns: x, with missing values replaced by mean(x) ", "Code": "return(replace(x, is.na(x), mean(x, na.rm = T))) } makeConstructMatrix <- function(net, a) { ", "Label": "Data Variable", "Source": "https://osf.io/2phst/", "File": "mse_values_for_density_smoother.R" }, { "ID": 310, "Comment": "how many missing data for WVSES questions?", "Code": "sum(is.na(df21$WVSE1)) sum(is.na(df21$WVSE2))", "Label": "Data Variable", "Source": "https://osf.io/9jzfr/", "File": "20180714Study2analysisscriptextradata.R" }, { "ID": 311, "Comment": "To log transform Years", "Code": "TEST_data <- TEST_data %>% mutate(Yearslog = log(Years))", "Label": "Data Variable", "Source": "https://osf.io/4g2tu/", "File": "custom_functions.R" }, { "ID": 312, "Comment": "Making factors for reasons based on factor analysis. This is making a new data frames that also has plans", "Code": "TEST_data_withplans <- dplyr::select( TEST_data, c( 'Hierarchy':'Cooperate', 'plan_materials', 'plan_MS', 'self_MS_prestige', 'self_MS_cooper', 'self_materials_prestige', 'self_materials_cooper' ) ) ", "Label": "Data Variable", "Source": "https://osf.io/4g2tu/", "File": "custom_functions.R" }, { "ID": 313, "Comment": "Word Cloud GG plot", "Code": "ggwordcloudc <- function(x,y){ freqt<-count(x,y) set.seed(42) ggplot(freqt, aes( label = y, size = n, color=factor(sample.int(10,nrow(x),replace=TRUE)))) + geom_text_wordcloud(area_corr = TRUE) + scale_size_area(max_size = 24) + theme_minimal() } ", "Label": "Visualization", "Source": "https://osf.io/4g2tu/", "File": "custom_functions.R" }, { "ID": 314, "Comment": "Calculate quantiles for eigenvalues, but only store those from simulated CF model in percentile1 ", "Code": "percentile <- apply(parallel$values, 2, function(x) quantile(x, .95)) min <- as.numeric(nrow(obs)) min <- (4 * min) - (min - 1) max <- as.numeric(nrow(obs)) max <- 4 * max percentile1 <- percentile[min:max] percentile <- apply(parallel$values, 2, function(x) quantile(x, .95)) min <- as.numeric(nrow(obs)) min <- (4 * min) - (min - 1) max <- as.numeric(nrow(obs)) max <- 4 * max percentile1 = percentile[min:max] ", "Label": "Data Variable", "Source": "https://osf.io/4g2tu/", "File": "custom_functions.R" }, { "ID": 315, "Comment": "Label the yaxis 'Eigenvalue'", "Code": "scale_y_continuous(name = 'Eigenvalue') +", "Label": "Visualization", "Source": "https://osf.io/4g2tu/", "File": "custom_functions.R" }, { "ID": 316, "Comment": "Label the xaxis 'Factor Number', and ensure that it ranges from 1max of factors, increasing by one with each 'tick' mark. ", "Code": "scale_x_continuous(name = 'Factor Number', breaks = min(eigendat$num):max(eigendat$num)) + ", "Label": "Visualization", "Source": "https://osf.io/4g2tu/", "File": "custom_functions.R" }, { "ID": 317, "Comment": "drop row without fid data and without distance burrow data", "Code": "fid <- fid[!is.na(fid$FID),] fid <- fid[!is.na(fid$dist_burrow),] ", "Label": "Data Variable", "Source": "https://osf.io/3wy58/", "File": "bivariate_model_summer_revision.R" }, { "ID": 318, "Comment": "get probability contours for plot", "Code": "kd <- ks::kde(plot_data, compute.cont=TRUE) contour_90 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"10%\"])[[1]]) contour_90 <- data.frame(contour_90) contour_80 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"20%\"])[[1]]) contour_80 <- data.frame(contour_80) contour_70 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"30%\"])[[1]]) contour_70 <- data.frame(contour_70) contour_60 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"40%\"])[[1]]) contour_60 <- data.frame(contour_60) contour_50 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"50%\"])[[1]]) contour_50 <- data.frame(contour_50) contour_40 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"60%\"])[[1]]) contour_40 <- data.frame(contour_40) contour_30 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"70%\"])[[1]]) contour_30 <- data.frame(contour_30) contour_20 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"80%\"])[[1]]) contour_20 <- data.frame(contour_20) contour_10 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont[\"90%\"])[[1]]) contour_10 <- data.frame(contour_10) ", "Label": "Visualization", "Source": "https://osf.io/3wy58/", "File": "bivariate_model_summer_revision.R" }, { "ID": 319, "Comment": "pcurve pcurve(m.cor) Funnel plot, symmetry test (Egger's regression), failsafe n ", "Code": "pdf(\"Self_control_Funnel.pdf\",width=5,height=5) funnel(m.cor,xlab = \"Correlation\") dev.off() eggers.test(x = m.cor) fsn(yi=m.cor$TE, sei=m.cor$seTE, type=\"Rosenthal\") plot(copas(m.cor)) copas(m.cor) ", "Label": "Visualization", "Source": "https://osf.io/sqfnt/", "File": "Goal" }, { "ID": 320, "Comment": " H3a: stress ~ trust set Cauchy prior (0, 1) as stated in the preregistration ", "Code": "prior.coef <- brms::prior(cauchy(0,1),class='b') ", "Label": "Statistical Modeling", "Source": "https://osf.io/z39us/", "File": "Posthoc_H3b.R" }, { "ID": 321, "Comment": "Fit logistic model to each individual's data to estimate PSEs", "Code": "jdat$PSE <- 500 for(i in as.numeric(levels(jdat$Subject))){ dat <- subset(jdat, Subject==i) if(dat$cannot_fit[1]==0){ fit.glm <- glm(faster ~ Distort, family=binomial, data=dat) jdat$PSE[jdat$Subject==i] <- -coef(fit.glm)[1]/coef(fit.glm)[2] #PSE is -b0/b1 } } ", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgm5/", "File": "Exp2_judgment.R" }, { "ID": 322, "Comment": "Linear mixed effects model for graded judgments", "Code": "mod_full <- lmer(GradedJudge ~ Context*(GMSI_Gen_Z + Order*Distort) + (1|Subject), data=jdat2) summary(mod_full) Anova(mod_full, type=3, test='Chisq') ", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgm5/", "File": "Exp2_judgment.R" }, { "ID": 323, "Comment": "Custom function for weighted Likert plot", "Code": "panel <- function(...){ HH::panel.likert(...) vals <- list(...) df <- data.frame(x = vals$x, y = vals$y, groups = vals$groups) grps <- as.character(df$groups) for(i in 1:length(colnames(likertscipop.df))){ grps <- sub(paste0('^', colnames(likertscipop.df)[i]), i, grps) } df <- df[order(df$y,grps),] df$correctX <- ave(df$x, df$y, FUN = function(x){ x[x < 0] <- rev(cumsum(rev(x[x < 0]))) - x[x < 0]/2 x[x > 0] <- cumsum(x[x > 0]) - x[x > 0]/2 return(x) }) subs <- sub(' Positive$', '', df$groups) collapse <- subs[-1] == subs[-length(subs)] & df$y[-1] == df$y[-length(df$y)] df$abs <- abs(df$x) df$abs[c(collapse, F)] <- df$abs[c(collapse, F)] + df$abs[c(F, collapse)] df$correctX[c(collapse, F)] <- 0 df <- df[c(T, !collapse),] df$perc <- round(ave(df$abs, df$y, FUN = function(x){x/sum(x) * 100}), 1) df$perc <- paste0(df$perc,'%') df$perc[df$perc == \"0%\"] <- \"\" lattice::panel.text(x = df$correctX, y = df$y, label = df$perc, cex = 1.2, font = 1, col = \"white\") } ", "Label": "Visualization", "Source": "https://osf.io/qj4xr/", "File": "02_prevalence-of-scipop.R" }, { "ID": 324, "Comment": "calculate mean and sd in deduplicated data set", "Code": "means <- c(means, paste0(Round(mean(desc[!duplicated(desc$id), i])), \" (\", Round(sd(desc[!duplicated(desc$id), i])), \")\")) } } ", "Label": "Data Variable", "Source": "https://osf.io/nxyh3/", "File": "02b_Descriptives_Study2.R" }, { "ID": 325, "Comment": " For each person combine component scores F, with loading matrix B and error values E to obtain latent structure For each person standardized the data using the withinperson mean and standard deviation ", "Code": "Y = matrix(0,nrow(data),P) colnames(Y) = sprintf(\"Y%d\",seq(1:P)) Error.var = matrix(0,nrow(data),P) colnames(Error.var) = sprintf(\"E%d\",seq(1:P)) FB.var = matrix(0,nrow(data),P) colnames(FB.var) = sprintf(\"FB%d\",seq(1:P)) for (i in 1:N){ n.i = which(data$ID==i) E = sqrt(var.E)*mvrnorm(length(n.i), rep(0,P), diag(P)) Y[n.i,] = F[n.i,] %*% t(B) + E Error.var[n.i,] = E FB.var[n.i,] = F[n.i,] %*% t(B) } data = cbind(data,Y) return(list(Sigma.Psi=NULL,Psi=Psi,Psi.i=NULL,lambda.max.list=NULL,Components=F,data=data, Error.var=Error.var,FB.var=FB.var)) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/rs6un/", "File": "Data.PC.VAR.Fixed.R" }, { "ID": 326, "Comment": " Statistical analysis Make GramGender a factor with two levels (feminine, masculine) ", "Code": "subset.for.plot$GramGender <- as.factor(subset.for.plot$GramGender)", "Label": "Data Variable", "Source": "https://osf.io/przvy/", "File": "study1-rscript.R" }, { "ID": 327, "Comment": "Summary statistics on the reported frequence of the structures", "Code": "data_struc_names <- as.data.frame(sort(table(data$structures_names))) data_struc_names range(table(data$structures_names)) mean(table(data$structures_names)) sd(table(data$structures_names)) median(table(data$structures_names)) sort(table(data$structures_names))", "Label": "Data Variable", "Source": "https://osf.io/fwc2p/", "File": "Keuken_etal_UHF_MRI_review_analysis_script.r" }, { "ID": 328, "Comment": "Exclude subjects who have missing values on the ability test or on selfviewed ability", "Code": "mst2 <- subset(mst2, (Raven_self != \"NA\") & (Raven_obj != \"NA\") ) ", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_A.R" }, { "ID": 329, "Comment": " DESCRIPTIVE STATISTICS compute and save sample statistics (age distribution, number of females) ", "Code": "age <- round(select(psych::describe(mst2_descr$age), n, min, max, mean, sd),2) age$n <- nrow(mst2_descr) sampstats <- mutate(age, female=plyr::count(mst2_descr$sex)[plyr::count(mst2_descr$sex)[,1]==\"1\",][\"freq\"] ) write.table(sampstats, file=\"Descriptives/age_sex_Sample_A_mst2.dat\", sep=\"\\t\", row.names=FALSE) ", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_A.R" }, { "ID": 330, "Comment": "get estimated variance of tanh1 (p hat)", "Code": "dvartanh <- (1-(pe^2))^2 vartanh <- v/dvartanh vartanh", "Label": "Statistical Modeling", "Source": "https://osf.io/9jzfr/", "File": "metaBigFiveextraversion.R" }, { "ID": 331, "Comment": "We have weak evidence between precohort and coronacohort groups test for strong invariance fit model as multigroup model", "Code": "est_s <- cfa(mod_s,quop_use, estimator = \"MLR\", missing = \"FIML\", group = \"corona\", group.equal = c(\"loadings\",\"intercepts\"), cluster = \"class\") ", "Label": "Statistical Test", "Source": "https://osf.io/vphyt/", "File": "Text_Level.R" }, { "ID": 332, "Comment": "find the stations that have 90% of data after 2001", "Code": "Pdata_2000 <- Pdata[Dates >= \"2000-01-01\",] result <- rep(0,(ncol(Pdata_2000)-3)) for (i in 4:ncol(Pdata_2000)) { result[i-3] <- sum(ifelse(is.na(Pdata_2000[,i]),1,0))/nrow(Pdata_2000) } ", "Label": "Data Variable", "Source": "https://osf.io/5ezfk/", "File": "SantaLuciaStationsToPCPFile.R" }, { "ID": 333, "Comment": "result indicates the fraction of NA data for the stations throw out all the columns and rows where result >0.1", "Code": "Pdata_new <- Pdata_2000[,-(which(result>0.1)+3)] Stations <- Stations[-which(result>0.1),] sub_b_sp <- SpatialPoints(cbind(Subbasins$Long_, Subbasins$Lat), proj4string=CRS(\"+proj=longlat +datum=WGS84 +no_defs\")) ", "Label": "Data Variable", "Source": "https://osf.io/5ezfk/", "File": "SantaLuciaStationsToPCPFile.R" }, { "ID": 334, "Comment": " create dichotomous variable coded as 0 no, 1 yes ", "Code": "dat[, pb_investigated_dich := ifelse(pb_investigated == \"yes\", 1, 0)] ", "Label": "Data Variable", "Source": "https://osf.io/dqc3y/", "File": "calc_vars.R" }, { "ID": 335, "Comment": " show ttest, cohen's d, and corrected pvals + CIs for all tests ", "Code": "idx = 1 for (i in all_groups) { cat(\"\\n\") print(sprintf('TEST: %s, %s = %d', equation, grouping, i)) t <- t.test(formula = eval(parse(text = equation)), data = filter(df, .data[[grouping]] == i), paired = TRUE, conf.level = ci_level) print(t) print(sprintf('adjusted pval: %.4f', pvals_adj[idx])) d <- effsize::cohen.d(formula = eval(parse(text = paste0(equation, '| Subject(id)'))), data = filter(df, .data[[grouping]] == i), paired = TRUE) print(d) ", "Label": "Statistical Test", "Source": "https://osf.io/xgwzf/", "File": "utils.R" }, { "ID": 336, "Comment": " show wilcox test, cliff's d, and corrected pvals for all tests ", "Code": "idx = 1 for (i in all_groups) { print(sprintf('TEST: %s, %s = %d', equation, grouping, i)) w <- wilcox.test(formula = eval(parse(text = equation)), data = filter(df, .data[[grouping]] == i), paired = TRUE) print(w) print(sprintf('adjusted pval: %.4f', pvals_adj[idx])) d <- effsize::cliff.delta(formula = eval(parse(text = equation)), data = filter(df, .data[[grouping]] == i), paired = TRUE) print(d) ", "Label": "Visualization", "Source": "https://osf.io/xgwzf/", "File": "utils.R" }, { "ID": 337, "Comment": "fit tree with quadratic model", "Code": "hrstree <- semtree(fitted_quadratic_lgcm, as.data.frame(rndhrs_subset), control=semtree.control(verbose=TRUE, method=\"naive\", missing=\"party\", min.bucket = 500, min.N = 250, exclude.heywood = FALSE ", "Label": "Statistical Modeling", "Source": "https://osf.io/3uyjt/", "File": "tree.R" }, { "ID": 338, "Comment": "then determine which response was given unbiased guessing probablity of 1/N", "Code": "p.guess <- rep(1/n.acc, n.acc) out$R[timer.replace] <- sample(1:n.acc, size=sum(timer.replace), replace=TRUE, prob=p.guess) } out } ", "Label": "Statistical Modeling", "Source": "https://osf.io/tbczv/", "File": "TRDM-functions.r" }, { "ID": 339, "Comment": " BassAckward EFA We use bassAckward() to get the correlations between factors from successive solutions, so that we can create the hierarchical diagrams. The correlations are contained in the \"bass.ack\" result (See capture.output below). In multifactor solutions, the factors obtained from fa() and bassAckward() can be matched by their sequential order (i.e., first factor in fa() is the same as the first factor from bassAckward()). Note: BassAckwards is consistent with faCor but not factor.scores ", "Code": "bass.usm=bassAckward(usm, nfactors=7, fm='ml', cut = .45, lr=F, items=F, plot=T) bass.uss=bassAckward(uss, nfactors=8, fm='ml', cut = .45, lr=F, items=F, plot=T) bass.its=bassAckward(its, nfactors=9, fm='ml', cut = .45, lr=F, items=F, plot=T) bassAckward.diagram(bass.its, lr=T, items=F, cut=.6) capture.output(bass.usm[[\"bass.ack\"]],file='bassAck USM.csv') capture.output(bass.uss[[\"bass.ack\"]],file='bassAck USS.csv') capture.output(bass.its[[\"bass.ack\"]],file='bassAck ITS.csv') ", "Label": "Data Variable", "Source": "https://osf.io/w7afh/", "File": "EFA script.R" }, { "ID": 340, "Comment": " In the loop below, users define the datasets and the number of solutions to obtain, and the script returns the fa() results, the structure matrices with item labels, and factor congruence coefficients across samples. res stores complete fa results load stores structure matrices pv stores percent of variance accounted by the factors in each solution fcong stores matrices with factor congruence coefficients ", "Code": "for (i in 1:9) { # i = number of solutions we want for(s in seq_along(dl)) { # for each sample in \"dl\"... id=c('M','U','I')[s] # letter identifier for each sample: M = Mturk, U = US students, I = Italian students fa1=fa(dl[[s]],nfactors=i,fm='ml') # conduct EFA (oblimin rotation, ML estimation) pv[[s]][[i]]=fa1$Vaccounted[2,] # proportion variance accounted for by each factor colnames(fa1$Structure)=sub('ML',id,colnames(fa1$Structure)) # rename the factor labels according to sample res[[s]][[i]]=fa1 # append fa results to list load[[s]][[i]]=merge(unclass(fa.sort(fa1$Structure)), labels, by='row.names', sort=F) # attach item labels to structure loadings and append to list write.table(rbind(load[[s]][[i]],''), file=paste0(\"load\",s,\".csv\"), row.names=F, sep=',', append=T) # write matrix of structure loadings to file } fcong[[paste0(i,'_USM_USS')]]=fa.congruence(res[[1]][[i]],res[[2]][[i]],structure=T) # append factor congruence coefficients for usm and uss fcong[[paste0(i,'_USM_ITS')]]=fa.congruence(res[[1]][[i]],res[[3]][[i]],structure=T) fcong[[paste0(i,'_USS_ITS')]]=fa.congruence(res[[2]][[i]],res[[3]][[i]],structure=T) } capture.output(fcong,file='fcong.csv') capture.output(pv,file='pvaccounted.csv') rm(fa1,fcong,load,pv,res,i,s,id) # cleaning afterwards ", "Label": "Statistical Modeling", "Source": "https://osf.io/w7afh/", "File": "EFA script.R" }, { "ID": 341, "Comment": " Onefactor EFA Here, we obtain the correlations among the four factors (obliquely rotated) and use them to find the factor loadings on a general factor We also compute and save the factor scores for the 4 factors and the general factor 4factor EFA ", "Code": "fa4.usm=fa(usm[,(items),with=F], nfactors=4, fm='ml') fa4.uss=fa(uss[,(items),with=F], nfactors=4, fm='ml') fa4.its=fa(its[,(items),with=F], nfactors=4, fm='ml') ", "Label": "Statistical Modeling", "Source": "https://osf.io/w7afh/", "File": "EFA script.R" }, { "ID": 342, "Comment": "1factor EFA using the factor scores (we need these scores in order to obtain the general factor scores)", "Code": "fa1.usm=fa(usm[,(fnames),with=F], nfactors=1, fm='ml') fa1.uss=fa(uss[,(fnames),with=F], nfactors=1, fm='ml') fa1.its=fa(its[,(fnames),with=F], nfactors=1, fm='ml') ", "Label": "Statistical Modeling", "Source": "https://osf.io/w7afh/", "File": "EFA script.R" }, { "ID": 343, "Comment": " in case xlim is set to 0, and some value has been given to xrange, center plot symmetrically on zero, using maximal extension in case range is set to 0, and given range otherwise ", "Code": "if (xlim[1]==0 & !is.null(xrange)) { if (xrange==0) { maxext <- max(abs(min(paramSampleVec)), abs(max(paramSampleVec))) #largest extension into positive or negative range xlim = c(-maxext, maxext) #centers plot symmetrically on zero } else { xlim = c(-xrange, xrange) } } ", "Label": "Visualization", "Source": "https://osf.io/qy5sd/", "File": "plotPostKO.R" }, { "ID": 344, "Comment": " compute and save correlation table of selfrated and objective ability measures and outcome aggregates, ", "Code": "varnames <- c(\"Raven\",\"reasoning_self\",\"MWTB\",\"vocabulary_self\",\"global_selfevaluation\",\"well_being\",\"agency_self\",\"communion_self\",\"agency_peer\",\"communion_peer\") outcomes_pils <- dplyr::select(pils, Z_Raven_obj, Z_Raven_self, Z_MWTB_obj, Z_MWTB_self, Z_global_selfeval, Z_well_being, Z_agency_self, Z_comm_self, Z_agency_peer, Z_comm_peer) names(outcomes_pils) <- varnames cor_aggr <- corcons(outcomes_pils) write.table(cor_aggr, file=\"Descriptives/correlations_aggr_Sample_B_pils.dat\", sep=\"\\t\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_B.R" }, { "ID": 345, "Comment": "Function 5 Stouffer test for a vector of ppvalues", "Code": "stouffer=function(pp) sum(qnorm(pp),na.rm=TRUE)/sqrt(sum(!is.na(pp))) ", "Label": "Statistical Test", "Source": "https://osf.io/ujpyn/", "File": "pcurve_app4.052.r" }, { "ID": 346, "Comment": "1.4 Create family to turn t>F and z>chi2", "Code": "family=test family=ifelse(test==\"t\",\"f\",family) family=ifelse(test==\"z\",\"c\",family) ", "Label": "Statistical Test", "Source": "https://osf.io/ujpyn/", "File": "pcurve_app4.052.r" }, { "ID": 347, "Comment": "1.9 Take value after equal sign, the value of the teststatistic, and put it in vector \"equal\" ", "Code": "equal=abs(as.numeric(substring(raw,eq+1)))", "Label": "Data Variable", "Source": "https://osf.io/ujpyn/", "File": "pcurve_app4.052.r" }, { "ID": 348, "Comment": "Compute ppvalues for the half", "Code": "pp33.half=ifelse(family==\"f\" & p<.025, (1/prop25)*( pf(value,df1=df1,df2=df2,ncp=ncp33)-(1-prop25)),NA) pp33.half=ifelse(family==\"c\" & p<.025, (1/prop25)*(pchisq(value,df=df1, ncp=ncp33)-(1-prop25)),pp33.half) pp33.half=pbound(pp33.half) ", "Label": "Statistical Modeling", "Source": "https://osf.io/ujpyn/", "File": "pcurve_app4.052.r" }, { "ID": 349, "Comment": "remove lower triangle of correlation matrix", "Code": "else if(removeTriangle[1]==\"lower\"){ Rnew <- as.matrix(Rnew) Rnew[lower.tri(Rnew, diag = TRUE)] <- \"\" Rnew <- as.data.frame(Rnew) } ", "Label": "Data Variable", "Source": "https://osf.io/xhrw6/", "File": "corstars.R" }, { "ID": 350, "Comment": "mean RTs by condition", "Code": "pretrain_study_rt <- pretrain_study %>% filter(block == 3) %>% group_by(id, condition) %>% summarise(mean_rt = mean(rt)) %>% ungroup() ", "Label": "Data Variable", "Source": "https://osf.io/xgwzf/", "File": "exp3_analysis.R" }, { "ID": 351, "Comment": "show get descriptive stats (how many/what proportion of participants reach each accuracy level)", "Code": "pretrain_test_acc %>% group_by(test_rep) %>% summarise(n_80 = sum(mean_acc > 0.8), n_100 = sum(mean_acc == 1), prop_100 = sum(mean_acc == 1) / length(mean_acc), prop_80 = sum(mean_acc > 0.8) / length(mean_acc)) pretrain_test_acc %>% filter(test_rep == max(pretrain_test_acc$test_rep)) %>% summarise(group_mean_acc = mean(mean_acc), group_sd_acc = sd(mean_acc)) ", "Label": "Visualization", "Source": "https://osf.io/xgwzf/", "File": "exp3_analysis.R" }, { "ID": 352, "Comment": "P1 COUNTED as 6 seconds until sound event to normalize across trials P2 COUNTED AS FROM SOUND ONSENT (6SECONDS) plus 9 SECONDS 15 SECONDS P3 COUNTED AS 20 SECONDS FROM P2 TO ALMOST END OF THE TRIAL ORDER PAPAMETER 'OPA', FISHER Z TRANSFORMED ORDER PARAMETER 'FOPA' P2 processing ", "Code": "OPAmedianP2<-read.csv(file=\"OPAmedianP2.csv\", sep = \"\", header=FALSE) colnames(OPAmedianP2)<-c(colheaderChronos) DW_OPAmedianP2=add_column(OPAmedianP2, GR, .before = 1) FOPAmedianP2=FisherZ(OPAmedianP2) DW_FOPAmedianP2=add_column(FOPAmedianP2, GR, .before = 1) DL_OPAmedianP2=melt(DW_OPAmedianP2, id=c(\"GR\"), measured=c(\"colheaderChronos\")) DL_FOPAmedianP2=melt(DW_FOPAmedianP2, id=c(\"GR\"), measured=c(\"colheaderChronos\")) colnames(DL_OPAmedianP2)<-c(\"GrNr\", \"TrialNr\",\"OPAmedianP2\") colnames(DL_FOPAmedianP2)<-c(\"GrNr\", \"TrialNr\",\"FOPAmedianP2\") ", "Label": "Data Variable", "Source": "https://osf.io/dzwct/", "File": "Fisher_Z_3PERIODS_median.R" }, { "ID": 353, "Comment": "plotting correlations with qgraph", "Code": "cor_graph <- qgraph(correlations)", "Label": "Visualization", "Source": "https://osf.io/8akru/", "File": "workshop_example.R" }, { "ID": 354, "Comment": "estimate regularized logistic nodewise regression network define where to binarize variables eLASSO (LASSO with EBIC model selection) listwise deletion of missing values (pairwise not possible for regressions)", "Code": "Ising_net <- estimateNetwork(data, default = \"IsingFit\", split = split, missing = \"listwise\", rule = \"OR\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/8akru/", "File": "workshop_example.R" }, { "ID": 355, "Comment": "standardize the variables (to obtain standardized coefficients)", "Code": "dfAD[,c(\"S\",\"R\",\"H\")] <- data.frame(apply(dfAD[,c(\"S\",\"R\",\"H\")], 2, scale)) dfBE[,c(\"S\",\"R\",\"H\")] <- data.frame(apply(dfBE[,c(\"S\",\"R\",\"H\")], 2, scale)) dfCF[,c(\"S\",\"R\",\"H\")] <- data.frame(apply(dfCF[,c(\"S\",\"R\",\"H\")], 2, scale)) ", "Label": "Statistical Modeling", "Source": "https://osf.io/fbshg/", "File": "ComF_SOM_Rcode.R" }, { "ID": 356, "Comment": " run logistic regression on each subset, and predict the probability of outcome for each country at each wave. these data points are used for the metaanalysis ", "Code": "response <- deparse(form[[2]]) data_list <- lapply(1:nrow(sub_data), function(i) { dat <- sub_data$data[[i]] if (!all_na(dat$migrant) && !all_na(dat[[response]]) && nlevels(droplevels(dat$migrant)) > 1) { m <- glm(form, family = binomial(), data = dat) pr <- ggemmeans(m, \"migrant\") pr$wave <- sub_data$wave[i] pr$country <- sub_data$country[i] pr } else { NA } }) ", "Label": "Statistical Modeling", "Source": "https://osf.io/7wd8e/", "File": "06-Trends.R" }, { "ID": 357, "Comment": "Binomial test for significant improvement in accuracy.", "Code": "ifelse(rf_probs > .5, 0, 1) %>% sum %>% # count number of continuous responses binom.test(., nrow(pv), p = baseline) ", "Label": "Statistical Test", "Source": "https://osf.io/x8vyw/", "File": "02_random_forest_analysis.R" }, { "ID": 358, "Comment": "Multiple item parameters and thresholds were in one cell. Split them into separate columns", "Code": "item.dif <- cbind(item.dif, str_split_fixed(item.dif$orig.parameter, \" \", 3)) item.dif <- cbind(item.dif, str_split_fixed(item.dif$orig.threshold, \" \", 2)) item.dif2 <- cbind(item.dif2, str_split_fixed(item.dif2$orig.parameter, \" \", 3)) item.dif2$Tau3 <- ifelse(item.dif2$item == \"CR021Q08\", 0.74200, NA) item.dif2 <- cbind(item.dif2, str_split_fixed(item.dif2$orig.threshold, \" \", 3)) ", "Label": "Data Variable", "Source": "https://osf.io/8fzns/", "File": "1_Get_Item-Params_TR.R" }, { "ID": 359, "Comment": "interrater reliability create dataframe of reliability statistic for each of the 10 pairs of data", "Code": "interraterReliability <- data.frame(row.names = row.names(interraterData)) for (i in c(1:10)) { interraterReliability[i,1] <- cohen.kappa(cbind(t(interraterData[i ,c(1:55)]), t(interraterData[i ,c(56:110)])))$weighted.kappa } ", "Label": "Data Variable", "Source": "https://osf.io/2j47e/", "File": "Reliability.R" }, { "ID": 360, "Comment": "fill empty matrix with the counts significant (p < .05) per lag (rows) and per individual (column)", "Code": "for (j in unique(dataset_imp$Participant)){ tempacf <- acf(dataset_imp[dataset_imp$Participant == j , -c(1,8:13)], lag.max = 60) acfmatrix[,paste0(\"participant\", j)] <- 0 for(i in as.numeric(rownames(acfmatrix))){ tempmatrix <- tempacf$acf[i,,] tempvector <- tempmatrix[upper.tri(tempmatrix, diag = T)] acfmatrix[i,paste0(\"participant\", j)] <- length(which(tempvector > 0.25 | tempvector < -0.25)) } } ", "Label": "Statistical Test", "Source": "https://osf.io/tfbps/", "File": "R_Script_Idiographic_network_analyses.R" }, { "ID": 361, "Comment": "Create list with partial correlation matrices per window", "Code": "pcorlist <- list() for (d in seq_len(30)) { pcorlist[[d]] <- pcor(temp_data[temp_data$Time %in% d:(d+Window),c(\"Happy\", \"Worrying\",\"Nervous\",\"Act_later_regret\",\"Act_without_thinking\",\"Restless\")])$estimate } pcorlist <- rapply(pcorlist,function(x) ifelse(x==0.00000000,0.00000001,x), how = \"replace\") # will be pruned ", "Label": "Data Variable", "Source": "https://osf.io/tfbps/", "File": "R_Script_Idiographic_network_analyses.R" }, { "ID": 362, "Comment": "Smooth with (bayesian) logistic regression", "Code": "(mym<-mean(nus)) d<-list(nus=nus-mym,fur=prob.fur*10000) m<-quap(alist( fur~dbinom(10000,p), logit(p)<-a+b*nus, a~dnorm(0,1), b~dnorm(0,1) ),data=d) ", "Label": "Statistical Modeling", "Source": "https://osf.io/pvyhe/", "File": "Prob_between.R" }, { "ID": 363, "Comment": " For home (emotional) climate items, recode values of \"99\" to missing. ", "Code": "mutate_at(vars(contains(\"EDNh_Emotion\")), ~ifelse(. == 99, NA, .)) %>% # (Not sure exactly how this code works, but it does) ", "Label": "Data Variable", "Source": "https://osf.io/xhrw6/", "File": "1_create_composite_measures.R" }, { "ID": 364, "Comment": "figure < annotate_figure(figure, top text_grob(str_c(Q_text[1]), size 8, color\"black\")) figure ", "Code": "ggsave(str_c(ordinal_y[QN],\"_random.pdf\"),plot=figure,width=12,height=14) return(figure) } ", "Label": "Visualization", "Source": "https://osf.io/nd9yr/", "File": "ordinal_plot_functions.R" }, { "ID": 365, "Comment": "ordered_logistic stacked plot", "Code": "N_cat <- max(d[,var_y],na.rm=T) grn <- 250 zLogM <- seq(-5,4,length.out=grn) P <- matrix(NA, nrow=grn, ncol=N_cat) for(i in 1:grn){ P[i,] <- ordered_logistic(fixef['b_X']*zLogM[i],cutpoints) } if(QN==1 | QN==4 | QN==5 | QN==8){ axis_labels <- c('fraction of responses','M-ratio') }else{ axis_labels <- c(' ',' ') } data.frame(P) -> P colnames(P) <- x_lab #str_c(\"p_\",1:N_cat) P$zlogM <- zLogM P$logM <- P$zlogM * 2*sd(log(d$M_cov)) + mean(log(d$M_cov)) P$M <- exp(P$logM) P %>% pivot_longer(cols=x_lab, #starts_with(\"p_\"), values_to=\"p\", names_to=\"k\") %>% mutate(k = as_factor(k), k = fct_rev(k)) %>% ggplot(aes(x=M, y=p, fill=k)) + scale_fill_viridis_d(name=\"\",labels = wrap_format(20), option=\"rocket\") + geom_area(alpha=0.8 , size=0.4, colour=\"black\")+ themeXstack + labs(y=axis_labels[1],x=axis_labels[2])+ scale_y_continuous(breaks=seq(0,1,0.1))+ coord_cartesian(xlim=c(0.5,1.3),ylim=c(0+0.045,1-0.045))+ ggtitle(label=str_wrap(sel_Q_text,22)) -> pl_stack ", "Label": "Visualization", "Source": "https://osf.io/nd9yr/", "File": "ordinal_plot_functions.R" }, { "ID": 366, "Comment": "calculate threshold for lowest quartile", "Code": "quantile(tau_squared_self)[2]] tau2_thres_publ <- dat[as_factor(primary_data) == \"yes\" & k_publ > k_thres, quantile(tau_squared_self_publ)[2]] ", "Label": "Data Variable", "Source": "https://osf.io/dqc3y/", "File": "analysis_MASTER.R" }, { "ID": 367, "Comment": "now we add up all corresponding answers to create one row per participant, as in a typical ANOVA design analysis ", "Code": "sdt.agg <- sdt.agg %>% group_by(Participant, type) %>% summarise(count = n()) %>% spread(type, count) sdt.agg #this is a new dataframe for the aggregate data, you can delete after. ", "Label": "Statistical Modeling", "Source": "https://osf.io/abts4/", "File": "DECEPTION-ProbitSDT-v1.R" }, { "ID": 368, "Comment": " function pcor2beta gives you data from a partial correlation/network input pcor a partial correlation matrix / network output a matrix of betas, each column corresponds to a dependent variable so that you can get predicted values by a matrix multiplication in the form betas %*% data ", "Code": "pcor2beta <- function(pcor) { require(psych) require(corpcor) diag(pcor) <- 1 p <- ncol(pcor) betas <- matrix(0, ncol = p, nrow = p) for(i in 1:p) betas[-i,i] <- matReg(y = i, x = seq(p)[-i], C = pcor2cor(pcor))$beta betas[abs(betas) < 1e-13] <- 0 betas } ", "Label": "Statistical Modeling", "Source": "https://osf.io/ywm3r/", "File": "predictability.R" }, { "ID": 369, "Comment": "extract posterior samples of omega", "Code": "posterior.omega <- c(posterior.omega.check[, 1], posterior.omega.check[, 2]) posterior.omega.description.only <- c(posterior.omega.check[, 1], posterior.omega.check[, 2]) posterior.omega.description.plus.stats <- c(posterior.omega.check[, 1], posterior.omega.check[, 2]) ", "Label": "Statistical Modeling", "Source": "https://osf.io/x72cy/", "File": "AnalyzeDummyData.R" }, { "ID": 370, "Comment": "MEAN & SD FOR AMOUNT OF REM SLEEP (min) BETWEEN BASELINE AND ISOLATION", "Code": "aggregate(REM~Place, data=REM_GNS, FUN=mean) aggregate(REM~Place, data=REM_GNS, FUN=sd) ", "Label": "Data Variable", "Source": "https://osf.io/sx6yf/", "File": "2021-6-7_No_Man_Is_An_Island_analyses.R" }, { "ID": 371, "Comment": "calculating sociality bias ratio variable Converting the amount of dream interactions to same scale as the amount of wake time interactions was reported", "Code": "IN$int_cat<-ifelse(IN$Interactions == 0, 1, ifelse(IN$Interactions >= 1 & IN$Interactions <= 5, 2, ifelse(IN$Interactions >= 6 & IN$Interactions <= 15, 3, ifelse(IN$Interactions >= 16 & IN$Interactions <= 25, 4,5)))) IN$dream_per_sos<-IN$int_cat/IN$sos_int_prev_day hist(IN$dream_per_sos) qqnorm(IN$dream_per_sos) #skewed hist(log(IN$dream_per_sos)) qqnorm(log(IN$dream_per_sos)) #looks more normally distributed after log-transformed ", "Label": "Data Variable", "Source": "https://osf.io/sx6yf/", "File": "2021-6-7_No_Man_Is_An_Island_analyses.R" }, { "ID": 372, "Comment": "join predicted to test data", "Code": "test_pred <- left_join(test,prediction,by=\"id\") test_pred <- as.data.frame(test_pred) test_pred$class_var <- as.factor(test_pred$class_var) test_pred$class_var_pred <- as.factor(test_pred$class_var_pred) ", "Label": "Data Variable", "Source": "https://osf.io/cqsr8/", "File": "boosting_xgbDART.R" }, { "ID": 373, "Comment": "Computing SMD (g) and its variance", "Code": "smd <- escalc(measure = \"SMD\", m1i = wm, m2i = mm, sd1i = wsd, sd2i = msd, n1i = wn, n2i = mn, data = smd.means, append = TRUE) smd #Two columns (yi and vi) have been added# ", "Label": "Statistical Modeling", "Source": "https://osf.io/rbxzs/", "File": "Script_R.R" }, { "ID": 374, "Comment": "Computing Fisher's z and its variance", "Code": "zcor <- escalc(measure = \"ZCOR\", ri = cor, ni = sample, data = zcor.correlations, append = TRUE) zcor ", "Label": "Statistical Modeling", "Source": "https://osf.io/rbxzs/", "File": "Script_R.R" }, { "ID": 375, "Comment": "Obtaining the forest plot", "Code": "forest(res)", "Label": "Visualization", "Source": "https://osf.io/rbxzs/", "File": "Script_R.R" }, { "ID": 376, "Comment": "Analog to ANOVA: If we want to know the mean ES for each 'random' level: yes/no", "Code": "res_r <- rma(yi = g, vi = var, mods = ~ factor(random)-1, data = dat) res_r ", "Label": "Statistical Test", "Source": "https://osf.io/rbxzs/", "File": "Script_R.R" }, { "ID": 377, "Comment": "Obtaining a funnel plot (model without moderators)", "Code": "funnel(res, main = \"Random-Effects Model\") ", "Label": "Visualization", "Source": "https://osf.io/rbxzs/", "File": "Script_R.R" }, { "ID": 378, "Comment": " inspect potential multicollinearity using the variance inflation factor (VIF), for the example of the criterion variable outcome_sqd (see Fox, 2016 for a discussion of VIFs and their cutoffs) ", "Code": "lm_sqd <- lm(outcome_sqd ~ X + Y + X2 + XY + Y2, data=df) vif(lm_sqd) ", "Label": "Statistical Modeling", "Source": "https://osf.io/yvw93/", "File": "R_code_test_congruence_effects.R" }, { "ID": 379, "Comment": "Mixed effects logistic regression", "Code": "logistic_model_1 <- glmer(instructions ~ distance + (1|ID) + (1|target) + (1|task), data = choices, family = binomial(link = \"logit\")) logistic_model_2 <- glmer(instructions ~ distance + (1 + distance|ID) + (1|target) + (1|task), data = choices, family = binomial(link = \"logit\")) logistic_model_3 <- glmer(instructions ~ distance + (1 + distance|ID) + (1|target) + (1 + distance|task), data = choices, family = binomial(link = \"logit\"), control = glmerControl(optimizer = \"bobyqa\", optCtrl = list(maxfun = 100000))) model_comparison <- anova(logistic_model_1, logistic_model_2, logistic_model_3) ", "Label": "Statistical Modeling", "Source": "https://osf.io/hbju7/", "File": "study-2_main-analysis-code.R" }, { "ID": 380, "Comment": "GLMMs results of models with year as continuous are in table 3 Model with year as continuous variable", "Code": "mod1ADI<-lmer(ADImS~LAT+LONG+YEAR1+(1|STATE)+(1|ROUTE2)+(1|SITE)+(1|YEAR1),dat=datUS,REML=F) mod1ADI<-lmer(ADImS~LAT+LONG+YEAR1+(1|SITE)+(1|COUNTRY)+(1|YEAR1),dat=datEU,REML=F) ", "Label": "Statistical Modeling", "Source": "https://osf.io/jyuxk/", "File": "Analysis_and_source_code_table_3_and_figure_3.R" }, { "ID": 381, "Comment": "familiarity x female x BL check effect of Baseline on familiarity x gender (self) interaction", "Code": "HR_test_BL = lmer(HR_EMA ~ SI_familiarity_cw * female * HR_BL_cb + female * SI_gender_partner + SIAS_cb + State_SI_Anxiety_cw + SI_count + SI_type_simple + SI_duration_cw + SI_caffeine + SI_nicotin + SI_alcohol + accel_EMA_cw + (1|Participant), data=df_interact) summary(HR_test_BL) Anova(HR_test_BL, type=3) anova(HR_A, HR_test_BL) # no difference RMSSD_test_BL = lmer(ln_RMSSD_EMA ~ SI_familiarity_cw * female * ln_RMSSD_BL_cb + female * SI_gender_partner + SIAS_cb + State_SI_Anxiety_cw + SI_count + SI_type_simple + SI_duration_cw + SI_caffeine + SI_nicotin + SI_alcohol + accel_EMA_cw + (1|Participant), data=df_interact) summary(RMSSD_test_BL) Anova(RMSSD_test_BL, type=3) anova(RMSSD_A, RMSSD_test_BL) # better ", "Label": "Statistical Modeling", "Source": "https://osf.io/d3tg5/", "File": "Manuscript_main analyses.R" }, { "ID": 382, "Comment": "let's try to create a boxplot for mpg on the of cylinders (cyl)", "Code": "ggplot(mtcars, aes(factor(cyl), mpg)) + geom_boxplot() ", "Label": "Visualization", "Source": "https://osf.io/6g4js/", "File": "Graphics_Section_5.R" }, { "ID": 383, "Comment": "Likelihood Ratio Tests for nested model comparison", "Code": "pair1_rand_anova <- anova(pair1_rand_full, pair1_rand2, pair1_rand3) # empty model = pair1_rand2 pair1_rand_anova_tidy <- tidy(pair1_rand_anova) pair2_rand_anova <- anova(pair2_rand_full, pair2_rand2, pair2_rand3) pair2_rand_anova_tidy <- tidy(pair2_rand_anova) # create tidy table of model parameters pair2_fixed_anova <- anova(pair2_full, pair2_fixed, pair2_fixed2, pair2_fixed3, pair2_rand2) pair2_fixed_anova_tidy <- tidy(pair2_fixed_anova) # create tidy table of model parameters pair2_fixed_lmer <- tidy(pair2_fixed2) # create tidy table of model parameters group1_rand_anova <- anova(group1_rand_full, group1_rand2, group1_rand3, group1_rand4) # a random intercept for each pair is significantly better than empty, otherwise all other random effects tested were not warranted group1_rand_anova_tidy <- tidy(group1_rand_anova) # create tidy table of model parameters group1_rand_lmer <- tidy(group1_rand3) # create tidy table of model parameters group2_rand_anova <- anova(group2_rand_full, group2_rand2, group2_rand3, group2_rand4, group2_rand5) group2_rand_anova_tidy <- tidy(group2_rand_anova) # create tidy table of model parameters group2_rand_lmer <- tidy(group2_rand2) # create tidy table of model parameters ", "Label": "Statistical Test", "Source": "https://osf.io/67ncp/", "File": "duque_etal_2019_rcode.R" }, { "ID": 384, "Comment": "Fills in missing values of x with the mean of x Args: x: a numeric vector Returns: x with missing values filled in", "Code": "return(replace(x, is.na(x), mean(x, na.rm = T))) } mseOfMatchingColumns <- function(nm, mat, dt) { ", "Label": "Data Variable", "Source": "https://osf.io/2phst/", "File": "mse_values_latent_space_diffusion_slurm.R" }, { "ID": 385, "Comment": " **** 0.3.4) logisticPseudoR2s calculates Logistic pseudo Rs (from Field et al., 2013) input: glm object ", "Code": "logisticPseudoR2s <- function(LogModel) { dev <- LogModel$deviance nullDev <- LogModel$null.deviance modelN <- length(LogModel$fitted.values) R.l <- 1 - dev / nullDev R.cs <- 1- exp ( -(nullDev - dev) / modelN) R.n <- R.cs / ( 1 - ( exp (-(nullDev / modelN)))) outdat <- data.table(R.l = R.l, R.cs = R.cs, R.n = R.n) outdat[, r.sq.print := paste(round(R.l, 3), \"/\", round(R.cs, 3), \"/\", round(R.n, 3), sep = \"\")] return(outdat) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/dqc3y/", "File": "prep_functions.R" }, { "ID": 386, "Comment": "for loop goes through all the files in myLists and applies addTagsTextEntry to each row in the file", "Code": "for (k in 1:length(myLists)) { i <- paste(\"list\", k, \".txt\", sep = \"\") o <- paste(\"upload\", k, \".txt\", sep = \"\") f <- read.delim(i, header = FALSE, stringsAsFactors = FALSE) write(\"[[AdvancedFormat]]\", o) apply(f, 1, addMultiChoiceSame, o) } rm(list=ls()) ", "Label": "Data Variable", "Source": "https://osf.io/t2jka/", "File": "multipleChoiceSame.R" }, { "ID": 387, "Comment": " compute mean LT per condition, group and accuracy ", "Code": "LT <- ddply(data, .(rctype,group,acc_lab), summarize, mean.rt = mean(rt), se.lower = mean.rt - se(rt), se.upper = mean.rt + se(rt)) ", "Label": "Data Variable", "Source": "https://osf.io/kdjqz/", "File": "Lissonetal2021-script.R" }, { "ID": 388, "Comment": "Fit uni.cfa and save output in uni.cfi.fit. Use fixed factor (std.lv TRUE), and FIML for missing data) ", "Code": "uni.cfa.fit = cfa(uni.cfa, data = SSSSdat, std.lv = TRUE, missing = \"ML\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/mbf32/", "File": "SSSS-Latent Variable Analysis Made Easy (1.0) .R" }, { "ID": 389, "Comment": "Request summary output from model, including fit indexes, standardized estimates, and R^2/communalities ", "Code": "summary(uni.cfa.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE) summary(five.cfa.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE) summary(five.sem.parcel.corr.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE) summary(five.sem.parcel.reg.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/mbf32/", "File": "SSSS-Latent Variable Analysis Made Easy (1.0) .R" }, { "ID": 390, "Comment": "We need to get an idea of how many factors are likely needed. Parallel analysis can help. Save output in object \"parallel\", using psych() fa.parallel function. Variables 125 (the BFI items) are to be analzyed, using maximum likelihood (ml) common factors (fa). Simulate 50 other samples of \"garbage factors\", using R^2s (SMC) as initial communality estimates and compare observed eigenvalues to 95th quantile of simulated \"garbage factor\" eigenvalues ", "Code": "parallel = fa.parallel(SSSSdat[1:25], fm = 'ml', fa = 'fa', n.iter = 50, SMC = TRUE, quant = .95) ", "Label": "Statistical Modeling", "Source": "https://osf.io/mbf32/", "File": "SSSS-Latent Variable Analysis Made Easy (1.0) .R" }, { "ID": 391, "Comment": "Fit measurement invariance models based on five.cfa model, distinguishing by group levels of \"gender\". Save output in invar.output object ", "Code": "invar.output = measurementInvariance(five.cfa, data = SSSSdat, group = \"gender\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/mbf32/", "File": "SSSS-Latent Variable Analysis Made Easy (1.0) .R" }, { "ID": 392, "Comment": "Create function to calculate ICC from fitted model: ICC between family variance / total family variance", "Code": "calc.icc <- function(y) { sumy <- summary(y) (sumy$varcor$famnumber[1]) / (sumy$varcor$famnumber[1] + sumy$sigma^2) } calc.icc <- function(y) { sumy <- summary(y) (sumy$varcor$famnumber[1]) / (sumy$varcor$famnumber[1] + sumy$sigma^2) }", "Label": "Statistical Modeling", "Source": "https://osf.io/9vn68/", "File": "Syntax_Sibpaper1_final_2021-02-12.R" }, { "ID": 393, "Comment": "** 5.2) Poisson regression **** 5.2.1) Full set model 1: summary effect (absolute value)", "Code": "pred.nbias.1 <- glm(nmeth_bias ~ scale(abs(MA_ES_self)), data = dat[!is.na(nmeth_bias) & k_self > k_thres, ], family = poisson) summary(pred.nbias.1) rsq.pred.nbias.1 <- logisticPseudoR2s(pred.nbias.1) ", "Label": "Statistical Modeling", "Source": "https://osf.io/dqc3y/", "File": "analysis_mmreg.R" }, { "ID": 394, "Comment": " ANOVA dependent variable: drift rate independent variable:masking time condition letters task, pretest ", "Code": "cv_pre_drift = cogito %>% gather(key = \"maskingtime\", value = \"drift\", cvt1_v1,cvt1_v2,cvt1_v3,cvt1_v4) %>% convert_as_factor(obs, maskingtime) aov_cv_pre <- anova_test(data = cv_pre_drift , dv = drift, wid = obs, within = maskingtime, type=3, detailed=T) get_anova_table(aov_cv_pre, correction=\"none\") ", "Label": "Data Variable", "Source": "https://osf.io/5qx7e/", "File": "Descriptives_Tables_1_2_S1_S2.R" }, { "ID": 395, "Comment": "subsample of the PG without patients with schizophrenia or other psychotic disorders (F20. or F23.)", "Code": "subsample_PGwithoutF20_23 <- subsample_PG[subsample_PG$F20_23==0, ] ", "Label": "Data Variable", "Source": "https://osf.io/73y8p/", "File": "RAQ-R_reliability after exclusion.R" }, { "ID": 396, "Comment": " function returns raster of posterior probabilities for bivariate normal data x is the unknown tissue of interest, will have two values, one for each isotope m is a 2D vector, all the values in the raster for each isotope v is the same as m, but for variances r is a single number the covariance. Can be vector if estimated as nonstationary ras is a raster that will serve as a template for the final product ", "Code": "calcCellProb2D <- function(x,m,v,r,ras) { pd <- 1/(2*pi*sqrt(v[,1])*sqrt(v[,2])*sqrt(1-r^2))*exp(-(1/(2*(1-r^2)))* ((x[1]-m[,1])^2/v[,1]+(x[2]-m[,2])^2/v[,2]-(2*r*(x[1]-m[,1])* (x[2]-m[,2]))/(sqrt(v[,1])*sqrt(v[,2])))) pdras <- setValues(ras,pd) return(pdras) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/ynx3m/", "File": "WoCP_publication_script.R" }, { "ID": 397, "Comment": "function returns raster of posterior probability distribution", "Code": "calcPostProb <- function(x){ pp <- x/cellStats(x,sum) return(pp) } ", "Label": "Data Variable", "Source": "https://osf.io/ynx3m/", "File": "WoCP_publication_script.R" }, { "ID": 398, "Comment": "reproject the site coordinates to get the site to plot (thanks to P. Schauer)", "Code": "myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS(\"+proj=longlat +datum=WGS84\") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords ", "Label": "Visualization", "Source": "https://osf.io/ynx3m/", "File": "WoCP_publication_script.R" }, { "ID": 399, "Comment": "cutting dendrogram in 3 clusters", "Code": "clus3 = cutree(hcOSrenamel, 3) Women_WOCP_cluster<-plot(as.phylo(hcOSrenamel), type = \"fan\", tip.color = cbbPalette[clus3]) Women_WOCP_cluster ", "Label": "Data Variable", "Source": "https://osf.io/ynx3m/", "File": "WoCP_publication_script.R" }, { "ID": 400, "Comment": "calculate intercorrelations between diamonds on the betweenperson level with 95% Bootstrapped CI", "Code": "between.person.diamonds = diamonds %>% group_by(user_id) %>% summarise(across(where(is.numeric), ~ mean(.x))) between.person.diamonds = between.person.diamonds %>% dplyr::select(diamonds_duty, diamonds_intellect, diamonds_adversity, diamonds_mating, diamonds_positivity, diamonds_negativity, diamonds_deception, diamonds_sociality) cors.diamonds = round(cor(between.person.diamonds, method = \"pearson\"),2) cors.diamonds = as.data.frame(cors.diamonds) ", "Label": "Statistical Test", "Source": "https://osf.io/b7krz/", "File": "Descriptives_Selfreports.R" }, { "ID": 401, "Comment": "Analysis Linear Probability Model on Impact Factor", "Code": "fit_ev_if <- lm(X1.year.Impact.Factor ~ diff_ev + factor(year), data=FullDataset) summary(fit_ev_if) # show results fit_ex_if <- lm(X1.year.Impact.Factor ~ diff_ex + factor(year), data=FullDataset) summary(fit_ex_if) # show results ", "Label": "Statistical Modeling", "Source": "https://osf.io/jh47m/", "File": "Gatekeeper analysis.r" }, { "ID": 402, "Comment": "Tobit Model as robusntess check assign value 0 to Impact factors for unpublished studies", "Code": "FullDataset$X1.year.Impact.Factor[FullDataset$published == \"No\"] = 0 tobit_ev_if <- censReg( X1.year.Impact.Factor ~ diff_ev + factor(year), data = FullDataset, left = 0) summary(tobit_ev_if) tobit_ex_if <- censReg( X1.year.Impact.Factor ~ diff_ex + factor(year), data = FullDataset, left = 0) summary(tobit_ex_if) ", "Label": "Statistical Modeling", "Source": "https://osf.io/jh47m/", "File": "Gatekeeper analysis.r" }, { "ID": 403, "Comment": "initialize standard error function", "Code": "stderr <- function(x, na.rm=FALSE) { if (na.rm) x <- na.omit(x) sqrt(var(x)/length(x)) } ", "Label": "Data Variable", "Source": "https://osf.io/wyrav/", "File": "AnalysisScript-ResearchQuestion1.R" }, { "ID": 404, "Comment": "2019 data zstandardize mood scores for each person", "Code": "dat2019_scaled <- dat2019_complete %>% dplyr::select(identity_id, country, doy, mood, weekday) %>% group_by(identity_id) %>% mutate_at(vars(-identity_id,-doy,-country, -weekday), scale) ", "Label": "Data Variable", "Source": "https://osf.io/wyrav/", "File": "AnalysisScript-ResearchQuestion1.R" }, { "ID": 405, "Comment": "Run regression anlayses and save as table (make one table with mood and depression see depression section)", "Code": "m1_het <- lm(mood_change ~ gender + age + education_level + race, data = dat_het_prepost) m2_het <- lm(depression_change ~ gender + age + education_level + race, data = dat_het_prepost) tab_model(m1_het, m2_het, show.est = F, show.std = T, digits = 3, file=\"Mood_Depression_PrePost_Demog_std.doc\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/wyrav/", "File": "AnalysisScript-ResearchQuestion1.R" }, { "ID": 406, "Comment": "Run multilevel model analyses predicting mood scores from the the dummycoded month variable", "Code": "dat2020_complete$month <- as.factor(dat2020_complete$month) summary(m1 <- lmer(depression ~ month + (1|identity_id), data = subset(dat2020_complete, country == \"United States\"))) summary(m2 <- lmer(depression ~ month + (1|identity_id), data = subset(dat2020_complete, country == \"Germany\"))) summary(m3 <- lmer(depression ~ month + (1|identity_id), data = subset(dat2020_complete, country == \"United Kingdom\"))) tab_model(m1,m2,m3, show.est = F, show.std = T, digits = 3, file=\"Depression_2020_Months.doc\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/wyrav/", "File": "AnalysisScript-ResearchQuestion1.R" }, { "ID": 407, "Comment": "Step 2: Create onehot encodings (dummy variables)", "Code": "testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2) ", "Label": "Data Variable", "Source": "https://osf.io/wyrav/", "File": "ThesisMLROCCode.R" }, { "ID": 408, "Comment": " function to generate output csv files for a list of response IDs inputs: dataset, list of response IDs outputs: csv files named with response IDs ", "Code": "multi_response <- function(dataset, IDs_list) { for (i in IDs_list) { participant_response <- single_response(dataset, i) csv_name <- str_c(i, \".csv\", sep = \"\") write.csv(participant_response, csv_name) } } ", "Label": "Visualization", "Source": "https://osf.io/3bn9u/", "File": "4_4_identification.R" }, { "ID": 409, "Comment": "Means and SDs of proportional looking time to novel object in the preferential looking phase condition 1", "Code": "mean(dfX_base$PrefLook_LT_Object_Nov_PROP[which(dfX_base$Condition == \"Con1\")], na.rm = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/yfegm/", "File": "PREPROC_script_Experiment1.r" }, { "ID": 410, "Comment": "calculate mode of a vector", "Code": "mode.knn = function(x){ uniq.x = unique(x) uniq.x = uniq.x[which(!is.na(uniq.x))] knn = uniq.x[which.max(tabulate(match(x, uniq.x)))] return(knn) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/b7krz/", "File": "helper_variables.R" }, { "ID": 411, "Comment": "find [k] neigherst neigbors and impute by their mode", "Code": "impute.knn = function(y, k){ t = which(is.na(y)) if(length(t) == 0){ return(y) }else{ is = 1:length(t) for(i in is){ if(i > k){ look.at = y[(t[i]-k):(t[i]+k)] y[t[i]] = mode.knn(look.at) } if(i <= k){ look.at = y[1:(t[i]+k)] y[t[i]] = mode.knn(look.at) } } return(y) } } ", "Label": "Statistical Modeling", "Source": "https://osf.io/b7krz/", "File": "helper_variables.R" }, { "ID": 412, "Comment": "get categorical data information then look up the variables in the process in categorical_data so we can get the number of categories for each variable in the process this would be achieved by categories[cat_indices[i]] for variable i in the process", "Code": "categories <- as.integer(mplus.get.group.attribute(file,'categorical_data','categories')) catvars <- mplus.get.group.attribute(file,'categorical_data','var_names') vartypes <- as.integer(mplus.get.group.attribute(file,'categorical_data','vtype')) if (series) { categories <- as.integer(mplus.get.group.attribute(file,'categorical_data','categories')) catvars <- mplus.get.group.attribute(file,'categorical_data','var_names') vartypes <- as.integer(mplus.get.group.attribute(file,'categorical_data','vtype')) if (series) { ", "Label": "Data Variable", "Source": "https://osf.io/nxyh3/", "File": "mplus.R" }, { "ID": 413, "Comment": "get indices and names of the variables in the series", "Code": "var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices') var_names <- mplus.get.group.attribute(file,cstr2,'var_names') cat_indices <- pmatch(var_names, catvars, nomatch=0) cat_indices <- as.integer(cat_indices) var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices') var_names <- mplus.get.group.attribute(file,cstr2,'var_names') cat_indices <- as.integer(pmatch(var_names, catvars, nomatch=0)) ", "Label": "Data Variable", "Source": "https://osf.io/nxyh3/", "File": "mplus.R" }, { "ID": 414, "Comment": "extract correlations between selfreported scores (observational level!)", "Code": "self.reported.scores = c(\"duty\", \"intellect\", \"mating\", \"positivity\", \"sociality\") l = \"mating\" for(i in self.reported.scores[1:length(self.reported.scores)-1]){ data_phi = ftable(data[,c(paste0(\"diamonds_\", l), paste0(\"diamonds_\", self.reported.scores[which(self.reported.scores == i)+1]))]) print(paste(l, \"and\", self.reported.scores[which(self.reported.scores == i)+1])) print(phi(data_phi)) } ", "Label": "Data Variable", "Source": "https://osf.io/b7krz/", "File": "02_IML_LASSO_FeatImp.R" }, { "ID": 415, "Comment": "generate unique study identifier, format and annotate data ", "Code": "d$id <- paste0(d$First_Author,\", \", d$Year) d$Year <- as.numeric(substr(d$Year,1,4)) d$ids <- NA for(i in d$id) d[d$id == i,\"ids\"] <- 1:dim(d[d$id == i,\"ids\"])[1] d$ids <- as.character(d$ids) d$Age <- (as.numeric(d$Age_M) - 30) / 20 d$energy_renew <- d$energy_renew1 + d$energy_renew2 find_cCode <- Vectorize(function(i) which(unlist(Map(function(x) sum(d[i,grepl(\"SVS\",names(d))] - f[x,grepl(\"SVS\",names(f))]), 1:dim(f)[1])) == 0)) d$Ccode <- f$Ccode[find_cCode(1:dim(d)[1])] ", "Label": "Data Variable", "Source": "https://osf.io/qxf5t/", "File": "TSST_Meta.R" }, { "ID": 416, "Comment": " to make sure that effects are tested from complex to simple, we reverse the order of the vector containing the to be tested effects: ", "Code": "effects_to_test = rev(effects_to_test)", "Label": "Data Variable", "Source": "https://osf.io/dpkyb/", "File": "create_model_formulas.R" }, { "ID": 417, "Comment": "combine model matrix and data to include participant id and DV:", "Code": "new_data = data.frame(dplyr::select(data, contains(c(group, DV_variables, by))), model_matrix) ", "Label": "Data Variable", "Source": "https://osf.io/dpkyb/", "File": "create_model_formulas.R" }, { "ID": 418, "Comment": "check if retention is a number from 0 to 1", "Code": "if (retention < 0 || retention > 1) { stop('Retention value is not a number from 0 to 1.') }", "Label": "Data Variable", "Source": "https://osf.io/a9bv6/", "File": "sanet_2.R" }, { "ID": 419, "Comment": " construct a maximal glmer() model This model contains code for Ambiguity effect, plus random effects by participants and items. ", "Code": "Acc.Modality.max <- glmer(Correct ~ 1 + Ambiguity.code + (1 + Ambiguity.code | ï..ID) + (1 | Item), data = Data.DisambTask.AmbvsUnamb, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m87vg/", "File": "Exp2_BehaviouralAnalyses_Code.R" }, { "ID": 420, "Comment": "create a histogram of the residuals", "Code": "hist(rawResiduals) hist(invResiduals) hist(logResiduals)", "Label": "Visualization", "Source": "https://osf.io/m87vg/", "File": "Exp2_BehaviouralAnalyses_Code.R" }, { "ID": 421, "Comment": "construct a maximal lmer() model This model contains a fixed withinsubjects effect of Ambiguity (effectcoded with 0.5 amb) plus random effects by participants and items.", "Code": "RT.max <- lmer(logRT ~ 1 + Ambiguity.code + (1 + Ambiguity.code | ï..ID) + (1 | Item), data = Data.DisambTask.AmbvsUnamb, REML=FALSE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m87vg/", "File": "Exp2_BehaviouralAnalyses_Code.R" }, { "ID": 422, "Comment": " construct a maximal glmer() model This model contains codes for Modality effects, plus random effects by participants and items. ", "Code": "Acc.Modality.max <- glmer(Correct ~ 1 + Modality.code1 + Modality.code2 + (1 + Modality.code1 + Modality.code2 | ï..ID) + (1 | Item), data = Data.DisambTask.Amb, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) Acc.Modality.max <- glmer(True.Positive ~ 1 + Modality.code2 + (1 + Modality.code2 | ID) + (1 | Item), data = Data.ListAndRead, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) Acc.Modality.max <- glmer(True.Positive ~ 1 + Modality.code3 + (1 + Modality.code3 | ID) + (1 | Item), data = Data.ListAndRSVP, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) Acc.Modality.max <- glmer(True.Positive ~ 1 + Modality.code3 + (1 + Modality.code3 | ID) + (1 | Item), data = Data.ReadAndRSVP, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m87vg/", "File": "Exp2_BehaviouralAnalyses_Code.R" }, { "ID": 423, "Comment": " construct a maximal glmer() model This model contains codes for run effects, plus random effects by participants and items. ", "Code": "Acc.Run.max <- glmer(Correct ~ 1 + Run.code1 + Run.code2 + (1 + Run.code1 + Run.code2 | ï..ID) + (1 | Item), data = Data.DisambTask.Amb, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) Acc.Run.max <- glmer(True.Positive ~ 1 + Run.code1 + Run.code2 + (1 + Run.code1 + Run.code2 | ID) + (1 | Item), data = Data.RecMem, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m87vg/", "File": "Exp2_BehaviouralAnalyses_Code.R" }, { "ID": 424, "Comment": "run a onesample ttest comparing accuracy to chance level (0.5)", "Code": "t.test(Data.List$True.Positive, mu=0.5) t.test(Data.Read$True.Positive, mu=0.5) t.test(Data.RSVP$True.Positive, mu=0.5) ", "Label": "Statistical Test", "Source": "https://osf.io/m87vg/", "File": "Exp2_BehaviouralAnalyses_Code.R" }, { "ID": 425, "Comment": "abbreviate first names to match with abbreviations in publication list", "Code": "stats_persons <- lapply(stats_persons, function(x){ x[2]<-str_sub(x[2], 1, 1) x }) stats_persons <- lapply(stats_persons, function(x)paste(x[1], x[2], sep = \",\")) ", "Label": "Data Variable", "Source": "https://osf.io/rf6zu/", "File": "scrape_web_pages.R" }, { "ID": 426, "Comment": "Load the data", "Code": "load('../Relative Effectiveness Data - final.Rdata')", "Label": "Data Variable", "Source": "https://osf.io/3aryn/", "File": "6speciesismgraphs.R" }, { "ID": 427, "Comment": "Print a txt and csv file with the results ' ' @param stats_to_print the tibble/table to print ' @param name_file name of the file to write ' ' @return 2 files with the results (txt and csv)", "Code": "print_result <- function(stats_to_print, name_file){ knitr::kable( stats_to_print, format = \"rst\") %>% cat( file = here('results', str_c(name_file, '.txt', sep = \"\")), sep = \"\\n\") stats_to_print %>% write_csv(file = here('results', str_c(name_file, '.csv', sep = \"\"))) } ", "Label": "Visualization", "Source": "https://osf.io/4fvwe/", "File": "print_result.R" }, { "ID": 428, "Comment": "convert degrees of freedom into numeric variables and store in new variable", "Code": "ScienceStatus$df.numerator.value = as.numeric(as.character(ScienceStatus$df.numerator)) ScienceStatus$df.denominator.value = as.numeric(as.character(ScienceStatus$df.denominator))", "Label": "Data Variable", "Source": "https://osf.io/he8mu/", "File": "Study2_Load_Analysis_Post_Review_11-14-16_Final.R" }, { "ID": 429, "Comment": "Assigning numeric values to variable labels which were stored in CSV", "Code": "ScienceStatus$coding.difficulty_r = revalue(ScienceStatus$coding.difficulty, c(\"Very Easy\"=\"1\", \"Moderately Easy\"=\"2\", \"Slightly Easy\"=\"3\", \"Neither Difficult nor Easy\"=\"4\", \"Slightly Difficult\"=\"5\", \"Moderately Difficult\"=\"6\", \"Very Difficult\"=\"7\")) ScienceStatus$coding.difficulty_r = as.numeric(as.character(ScienceStatus$coding.difficulty_r)) ", "Label": "Data Variable", "Source": "https://osf.io/he8mu/", "File": "Study2_Load_Analysis_Post_Review_11-14-16_Final.R" }, { "ID": 430, "Comment": "Calcuate EXACT pvalue from the stats reported in the paper (Posthoc)", "Code": "ScienceStatus_J$Calc.Pvalue<-(rowSums(cbind(ScienceStatus_J$T.pvalue_calc, ScienceStatus_J$F.pvalue_calc, ScienceStatus_J$rg.pvalue_calc, ScienceStatus_J$r.pvalue_calc,ScienceStatus_J$chi.pvalue_calc), na.rm = TRUE) + ifelse(is.na(ScienceStatus_J$T.pvalue_calc) & is.na(ScienceStatus_J$F.pvalue_calc) & is.na(ScienceStatus_J$rg.pvalue_calc) & is.na(ScienceStatus_J$r.pvalue_calc) & is.na(ScienceStatus_J$chi.pvalue_calc), NA, 0)) ", "Label": "Statistical Test", "Source": "https://osf.io/he8mu/", "File": "Study2_Load_Analysis_Post_Review_11-14-16_Final.R" }, { "ID": 431, "Comment": "PCurve Graph Calculated from statistic and DF By Paper", "Code": "P.By.Paper<-ddply(ScienceStatus_SP, .(article.id,yearcat), summarize, Pmean.calc = stouffer.P(Calc.Pvalue.Clean), PMedian.calc = median(Calc.Pvalue.Clean)) ", "Label": "Visualization", "Source": "https://osf.io/he8mu/", "File": "Study2_Load_Analysis_Post_Review_11-14-16_Final.R" }, { "ID": 432, "Comment": " RIndex Calcuate Mean, Median, Peak ", "Code": "Rindex.Reults<-BCa.Boot.CI(ScienceStatus_SP,Calc.Z,r.index.calc.boot,LogT=FALSE,splitter=yearcat, StatType=\"AR\") Rindex.Reults ", "Label": "Statistical Modeling", "Source": "https://osf.io/he8mu/", "File": "Study2_Load_Analysis_Post_Review_11-14-16_Final.R" }, { "ID": 433, "Comment": "FIGURE 3 Length in phonemes RD fit a model with fixed effects and with the random effect of subjects on intercepts and the slopes for length", "Code": "Cue.Subj.lmer = glmer(ACC ~ CueCondition + zLengthPh + zFreq + (1+zLengthPh|Subject), data = NamingData, family = \"binomial\", control = glmerControl(optimizer=\"bobyqa\")) summary(Cue.Subj.lmer) ", "Label": "Statistical Modeling", "Source": "https://osf.io/bfq39/", "File": "Code_LMMs_Code_BestPractice_Example.R" }, { "ID": 434, "Comment": "extract fitted values and align with original data for plotting", "Code": "a<-fitted.values(Cue.Subj.lmer) length(a) Dat<-na.omit(NamingData) #remove NAs from original data so aligns with model values Dat$fitted<-a names(Dat) a<-fitted.values(Cue.Subj.lmer) length(a) Dat<-na.omit(NamingData) #remove NAs from original data so aligns with model values Dat$fitted<-a names(Dat)", "Label": "Visualization", "Source": "https://osf.io/bfq39/", "File": "Code_LMMs_Code_BestPractice_Example.R" }, { "ID": 435, "Comment": "FIGURE 4a Plot average slope for effect of Frequency", "Code": "p <- ggplot(data=Dat,aes(x=zFreq, y=fitted)) p <- p + geom_smooth(method = \"glm\") p <- p + xlab(\"Frequency (z score)\") + ylab(\"Accuracy (model fit)\") p <- p + ggtitle(\"4a) Average (group) effect of Frequency\") p <- p + ylim(0,1) p <- p + theme_bw() + theme(text=element_text(size=12)) p ", "Label": "Visualization", "Source": "https://osf.io/bfq39/", "File": "Code_LMMs_Code_BestPractice_Example.R" }, { "ID": 436, "Comment": "Main effects model for predicting inversion questions during alignment sessions.", "Code": "inversion_main_effects_model<-glmer(inversion_dv ~ prime_type + WMC + modality + inversion_production_pre + (1 + prime_type + trial_order | subject) + (1|verb), priming, family=\"binomial\"(link=\"logit\"),glmerControl(optimizer = \"bobyqa\", optCtrl = list(maxfun = 100000))) summary(inversion_main_effects_model) ", "Label": "Statistical Modeling", "Source": "https://osf.io/f3qrh/", "File": "Kim Skalicky and Jung - R syntax.R" }, { "ID": 437, "Comment": "Odds ratios for main effects model", "Code": "inversion.me.CI<-confint(inversion_main_effects_model, parm=\"beta_\", level=0.90, method=\"Wald\") inversion.me.tab <- cbind(est = fixef(inversion_main_effects_model),inversion.me.CI) inversion.me.tab <- exp(inversion.me.tab) inversion.me.tab <- as.data.frame(inversion.me.tab) inversion.me.tab <- rownames_to_column(inversion.me.tab) inversion.me.tab ", "Label": "Statistical Modeling", "Source": "https://osf.io/f3qrh/", "File": "Kim Skalicky and Jung - R syntax.R" }, { "ID": 438, "Comment": "Visualize the interaction between prime type and modality. save the specific effect to a variable", "Code": "effect2 <-effect(\"prime_type*modality\",inversion_full_model_sig_int_only) summary(effect2) plot(effect2) ", "Label": "Visualization", "Source": "https://osf.io/f3qrh/", "File": "Kim Skalicky and Jung - R syntax.R" }, { "ID": 439, "Comment": "post hoc analyses to compare all levels of test order between groups indirect production main effect only", "Code": "ind_prod_me_between_groups <- emmeans(indirect_production_me,c(\"test_order\"), type = \"response\") pairs(ind_prod_me_between_groups, reverse = F, type = 'response', adjust = 'none') plot(ind_prod_me_between_groups) ", "Label": "Statistical Test", "Source": "https://osf.io/f3qrh/", "File": "Kim Skalicky and Jung - R syntax.R" }, { "ID": 440, "Comment": "Test assumptions MANOVA Test whether residuals are normally distributed", "Code": "df$pc1.residuals = lm(pc1~condition.socaccount, data=df)$residuals df$pc2.residuals = lm(pc2~condition.socaccount, data=df)$residuals df$pc3.residuals = lm(pc3~condition.socaccount, data=df)$residuals df$pc4.residuals = lm(pc4~condition.socaccount, data=df)$residuals shapiro.test(df$pc1.residuals) shapiro.test(df$pc2.residuals) shapiro.test(df$pc3.residuals) shapiro.test(df$pc4.residuals) ", "Label": "Statistical Test", "Source": "https://osf.io/qj86m/", "File": "9_manova_fda_socaccount.R" }, { "ID": 441, "Comment": "Only include tweets with at least 3 words", "Code": "final_twitter_data_ex <- final_twitter_data %>% filter(wc>=3) final_twitter_data_ex <- final_twitter_data_ex %>% filter(bryscore>=0.0001)", "Label": "Data Variable", "Source": "https://osf.io/qxwsz/", "File": "anxiety_abstraction_s2.R" }, { "ID": 442, "Comment": "Means and SDs in feartweets and anxietytweets", "Code": "final_twitter_data_ex %>% group_by(fearVSanx) %>% summarise_at(vars(bryscore,i,we,they,focusfuture), list(mean=mean, sd=sd)) ", "Label": "Data Variable", "Source": "https://osf.io/qxwsz/", "File": "anxiety_abstraction_s2.R" }, { "ID": 443, "Comment": "Independent samples ttest: Brysbaert concreteness score as DV", "Code": "t.test_bci <- final_twitter_data_ex %>% rstatix::t_test(bryscore ~ fearVSanx, var.equal = TRUE, detailed=TRUE) %>% rstatix::add_significance() t.test_bci ", "Label": "Statistical Test", "Source": "https://osf.io/qxwsz/", "File": "anxiety_abstraction_s2.R" }, { "ID": 444, "Comment": "convert age from months to years", "Code": "demog.tab$interview_age <- demog.tab$interview_age / 12", "Label": "Data Variable", "Source": "https://osf.io/5y27d/", "File": "load_all_tables.R" }, { "ID": 445, "Comment": "multinomial CI (these are simulataneous CI although assume full pooling)", "Code": "obs_p_CI <- DescTools::MultinomCI(pl_d$N, conf.level=1-0.05/2, method=\"goodman\") *4 pl_d$obs_p_lb <- obs_p_CI[,2] pl_d$obs_p_ub <- obs_p_CI[,3] pl_d %>% mutate(resp_true = ifelse(k>=4,1,0), fill_group = str_c(item, resp_true, type), line_group = str_c(item, type), item = factor(item, levels=c(\"true\",\"false\"))) %>% ggplot(aes(x=k,y=obs_p))+ facet_grid(item ~ type) + nice_theme + geom_col(aes(fill=fill_group))+ geom_errorbar(aes(y=obs_p,ymin=obs_p_lb, ymax=obs_p_ub),color=\"dark grey\",width=0,lwd=0.8)+ ", "Label": "Statistical Modeling", "Source": "https://osf.io/nd9yr/", "File": "make_fig1_main_text.R" }, { "ID": 446, "Comment": "Mratio posterior density plot", "Code": "log_mu_sci <- unlist(c(output[,grep('mu_logMratio_1',varnames(output))])) log_mu_cov <- unlist(c(output[,grep('mu_logMratio_2',varnames(output))])) d_groupM <- data.frame(logM =c(log_mu_sci,log_mu_cov), type=c(rep('science',length(log_mu_sci)),rep('Covid-19',length(log_mu_sci)))) d_groupM %>% mutate(M=exp(logM)) %>% ggplot(aes(x=M,y=type,fill=type))+ nice_theme + geom_vline(xintercept = 1, lty=2,size=0.4)+ stat_halfeye(.width = c(.95, .95),aes(fill=type),alpha=0.8)+ ", "Label": "Visualization", "Source": "https://osf.io/nd9yr/", "File": "make_fig1_main_text.R" }, { "ID": 447, "Comment": "Convert `campaign_week` from a character string to a factor with 11 levels to order weeks chronologically.", "Code": "Facebook_Users$campaign_week <- factor(Facebook_Users$campaign_week, levels = c(\"Week 1\", \"Week 2\", \"Week 3\", \"Week 4\", \"Week 5\", \"Week 6\", \"Week 7\", \"Week 8\", \"Week 9\", \"Week 10\", \"Week 11\", \"Election Day\")) ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_users.R" }, { "ID": 448, "Comment": "Convert `candidate_page` from a character string to a factor with 3 levels.(Facilitates data visualization and data exploration).", "Code": "Facebook_Users$candidate_page <- factor(Facebook_Users$candidate_page, levels = c (\"Harper\", \"Trudeau\", \"Mulcair\")) ", "Label": "Visualization", "Source": "https://osf.io/3fnjq/", "File": "facebook_users.R" }, { "ID": 449, "Comment": "Adds a new column to identify the Facebook page being `liked`.", "Code": "Harper_User_Likes$partisanship <- \"Conservative\" Trudeau_User_Likes$partisanship <- \"Liberal\" Mulcair_User_Likes$partisanship <- \"Social Democrat\" ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_users.R" }, { "ID": 450, "Comment": "Match partisan assignment from Facebook_User_Likes to the Facebook Users Dataset using `user_id`.", "Code": "Facebook_Users$partisanship <- NA Facebook_Users$partisanship <- Facebook_User_Likes$partisanship[match(Facebook_Users$user_id, Facebook_User_Likes$user_id)] ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_users.R" }, { "ID": 451, "Comment": "Convert `partisanship` from a character string to a factor with 3 levels.(Facilitates data visualization and data exploration).", "Code": "Facebook_Users$partisanship <- factor(Facebook_Users$partisanship, levels = c (\"Conservative\", \"Liberal\", \"Social Democrat\")) ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_users.R" }, { "ID": 452, "Comment": "split the data and transform it into integers", "Code": "ARTE <- as.numeric(data[which(data$TV.Station == 'ARTE'),]$Rating) Degeto <- as.numeric(data[which(data$TV.Station == 'Degeto Film'),]$Rating) ", "Label": "Data Variable", "Source": "https://osf.io/8fsbd/", "File": "IMDB_analysis.r" }, { "ID": 453, "Comment": "run the onesided ttest with alpha 0.1", "Code": "t.test(ARTE, Degeto, alternative = \"greater\", conf.level = 0.90) ", "Label": "Statistical Test", "Source": "https://osf.io/8fsbd/", "File": "IMDB_analysis.r" }, { "ID": 454, "Comment": "Create list of dataframes per country", "Code": "countries <- unique(data8$cntry_full) index = 0 listofdfs <- list()", "Label": "Data Variable", "Source": "https://osf.io/k853j/", "File": "ESS_openness_2016_perCountry.R" }, { "ID": 455, "Comment": "Draw correlation plots", "Code": "char.col<-c(\"#228822\",\"#663388\",\"#006688\") groupn<-c(4,11,6,3) left<--8 top<-25 corrplot.mixed(cortabNB, upper = \"ellipse\",tl.pos=\"lt\",tl.col=rep(char.col,groupn[1:3]),number.cex = .7) text(left,top,\"Non-biological\\nfathers\",cex=1.2,pos=4) nc<-0.5 w<-16 tiff(\"Figure_S3_corr1.tif\",width=w,height=2*w,units=\"cm\",res=600,compression=\"lzw\") par(mfrow=c(2,1)) corrplot.mixed(cortabNB, upper = \"ellipse\",tl.pos=\"lt\",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,\"Non-biological\\nfathers\",cex=1.2,pos=4) corrplot.mixed(cortabPA, upper = \"ellipse\",tl.pos=\"lt\",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,\"Partners\",cex=1.2,pos=4) dev.off() tiff(\"Figure_S4_corr2.tif\",width=w,height=2*w,units=\"cm\",res=600,compression=\"lzw\") par(mfrow=c(2,1)) corrplot.mixed(cortabLO, upper = \"ellipse\",tl.pos=\"lt\",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,\"Sensitive\\nperiod\\nlog-odds\",cex=1.2,pos=4) corrplot.mixed(cortabD, upper = \"ellipse\",tl.pos=\"lt\",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,\"Relative\\nsimilarity\\nt value\",cex=1.2,pos=4) dev.off() ", "Label": "Visualization", "Source": "https://osf.io/greqt/", "File": "07_correlation_structure.R" }, { "ID": 456, "Comment": " function to calculate proporiton of studies within each time period for continuous variables that were transformed in a cutoff x character string of the variable name character string of the name that will appear in the table time character string of the time period variable method either \"chisq\" or \"fisher\" depending on the type of the test wanted ", "Code": "CAT<-function(x, name, time, method) { tabtot <- table(DATA[,x]) sumtot <- sum(tabtot) tabtime <- table(DATA[,x], DATA[,time]) sumtime <- apply(tabtime, 2, sum) proptot <- tabtot[2]/sum(tabtot) CItot <- exactci(tabtot[2], sum(tabtot), conf.level = 0.95) proptime <- tabtime[2,]/sumtime CItime <- map2(tabtime[2,], sumtime, function(x, y){exactci(x, y, conf.level = 0.95)}) propCItime <- map2(proptime, CItime, function(x,y){paste0(round(x*100,0),\" (\", round(y[[\"conf.int\"]][1]*100,0),\"-\", round(y[[\"conf.int\"]][2]*100,0),\")\")}) if(method == \"fisher\"){ p <- fisher.test(tabtime) } else if(method == \"chisq\"){ p<-chisq.test(tabtime) } N_propCItime <- NULL for(i in 1:length(propCItime)){ N_propCItime <- c(N_propCItime, c(sumtime[i], propCItime[[i]])) } res=c(name, sumtot, paste0(round(proptot*100,0),\" (\",round(CItot$conf.int[1]*100,0),\"-\", round(CItot$conf.int[2]*100,0),\")\"), N_propCItime, round(p$p.value[1],4)) return(res) } ", "Label": "Statistical Test", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 457, "Comment": "function to calculate overall effect size for death or readmission rates (output has results + model to do model checks) var character string of the variable to calculate the effect size for", "Code": "rate_ES <- function(var){ dat <- na.omit(DATA[,c(\"number_follow_up\", var)]) colnames(dat)<-c(\"Ni\",\"Ei\") dat_es <- escalc( xi= Ei, ni =Ni, data = dat, measure = \"PLO\", to=\"if0all\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 458, "Comment": " function to get weighted logistic regression with continuous X variable and proportion Y variable and ggplot showing the predicted relationship x X variable as character y Y variable as character Xpred vector of X values to get predictions for labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative position of text with OR and pvalue ", "Code": "wtlogis_plot <- function(NB, x, y, Xpred, labX, labY, limY, title, ypos, unit){ dat <- data.frame(count=as.integer(DATA[,y]), Ntot=DATA[,NB], t=DATA[,x]) %>% mutate(prop = count/Ntot, perc = prop*100, wt = log(Ntot), t_div = t/10) dat <- na.omit(dat) ", "Label": "Visualization", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 459, "Comment": "model with time divided by 10 to get OR for 10yr increment", "Code": "timediv_mod <- glm(prop ~ t_div, family = \"binomial\", weights = wt, data = dat) ORdiv <- tidy(timediv_mod, conf.int = TRUE) %>% mutate(OR_CI = paste0(round(exp(estimate),2), \" (\", round(exp(conf.low),2),\"-\", round(exp(conf.high),2), \")\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 460, "Comment": " function to get weighted linear regression with continuous X variable and continuous Y variable and ggplot showing the predicted relationship x X variable as character y Y variable as character Xpred vector of X values to get predictions for labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative position of text with OR and pvalue ", "Code": "wtlin_plot <- function(NB, x, y, Xpred, labX, labY, limY, title, ypos, unit){ dat <- data.frame(y=as.integer(DATA[,y]), Ntot=DATA[,NB], t=DATA[,x]) %>% mutate(wt = log(Ntot), t_div = t/10) dat <- na.omit(dat) ", "Label": "Statistical Modeling", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 461, "Comment": "function to get OR and CI from the model with 10 year increment obj model with x variable divided by 10 p pvalue from the LRT", "Code": "OR_div <- function(obj, p){ OR_CI <- paste0(round(exp(obj$beta[2]),2), \" (\", round(exp(obj$ci.lb[2]),2),\"-\", round(exp(obj$ci.ub[2]),2), \")\") pval_clean <- ifelse(p[[\"pval\"]] < 0.001, \"p<0.001\", paste0(\"p=\",round(p[[\"pval\"]],3))) OR_p <- c(OR_CI, p, pval_clean) return(OR_p) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 462, "Comment": " function to get metaregression model with continuous X variable and proportion Y variable and ggplot showing the predicted relationship x X variable as character y Y variable as character Xpred vector of X values to get predictions for labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative position of text with OR and pvalue ", "Code": "MA_prop_plot <- function(NB, x, y, Xpred, labX, labY, limY, title, ypos){ prop_dat <- na.omit(DATA[,c(NB, y, x)]) colnames(prop_dat)<-c(\"Ni\",\"Ei\",\"X\") prop_dat$prop <- (prop_dat$Ei/prop_dat$Ni) * 100 prop_dat$X_div <- prop_dat$X/10 es_prop_dat <- escalc( xi= Ei, ni =Ni, data = prop_dat, measure = \"PLO\", to=\"if0all\") ", "Label": "Visualization", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 463, "Comment": "function to get OR for subgroup variables with 2 categories x character string for the x variable y character string for the y variable group chracter string for the group variable newref character string for the level of the group variable that is not the reference level", "Code": "OR_group_fun <- function(x, y, group, newref){ OR_data <- data.frame(Group = rep(NA,2), N = rep(NA,2), OR = rep(NA,2), lowCI = rep(NA,2), upCI = rep(NA,2), p = rep(NA,2), I2 = rep(NA,2)) group_dat <- na.omit(DATA[,c(\"number_follow_up\", y, x, group)]) colnames(group_dat)<-c(\"Ni\",\"Ei\",\"X\",\"group\") group_dat$X_div <- group_dat$X/10 OR_data$Group <- levels(group_dat$group) OR_data$N <- as.integer(table(group_dat$group)) es_group_dat <- escalc( xi= Ei, ni =Ni, data = group_dat, measure = \"PLO\", to=\"if0all\") inter_mod1 <- rma(yi=yi,mods = ~ X_div * group, vi=vi, data=es_group_dat, method = \"REML\") inter_mod2 <- rma(yi=yi,mods = ~ X_div * I(relevel(group, newref)) , vi=vi, data=es_group_dat, method = \"REML\") ", "Label": "Data Variable", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 464, "Comment": " function to get metaregression model with continuous X1 and X2 variables and 2 group variables and proportion Y variable and ggplot showing the predicted relationship with x1 for a median value of x2 x1 X1 variable as character x2 X2 variable as character group1 group1 variable (2level factor) as character group2 group2 variable (2level factor) as character y Y variable as character x1pred vector of values for which to predict x1 x2val median value of X2 for which to get predictions labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative for text of OR and pvalue ", "Code": "MA_prop_plot_adj <- function(NB, x1, x2, group1, group2, y, x1pred, x2val, labX, labY, limY, title, ypos){ prop_dat <- na.omit(DATA[,c(NB, y, x1, x2, group1, group2)]) colnames(prop_dat)<-c(\"Ni\",\"Ei\",\"X1\", \"X2\", \"group1\", \"group2\") prop_dat$prop <- (prop_dat$Ei/prop_dat$Ni) * 100 prop_dat$X1_div <- prop_dat$X1/10 es_prop_dat <- escalc( xi= Ei, ni =Ni, data = prop_dat, measure = \"PLO\", to=\"if0all\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/cxv5k/", "File": "R_functions_Kimmoun_et_al_final.R" }, { "ID": 465, "Comment": " rayleigh_test Rayleigh test for the uniformity of directional data, as described by Mardia et al. (1979: chapter 15) and Mardia & Jupp (1999) Note that there would probably be better alternatives on CRAN ", "Code": "rayleigh_test <- function(X, correction = TRUE, check = TRUE, convert = TRUE) { if(check) { if(!isTRUE(all.equal(diag(tcrossprod(X)), 1))) { if(convert) { X / sqrt(diag(tcrossprod(X))) warning(\"X was converted into directional data\") } else { stop(\"Directional data (in coordinates) are assumed as X\") } } } p <- ncol(X) n <- nrow(X) Mean <- colMeans(X) Norm_Mean_2 <- drop(crossprod(Mean)) S <- p * n * Norm_Mean_2 if(correction) { S <- S * (1 - 1 / 2 / n) + S ^ 2 / (2 * n * (p + 2)) } P_value <- pchisq(S, p, lower.tail = FALSE) list(Norm_Mean = sqrt(Norm_Mean_2), n = n, p = p, Statistics = S, P_value = P_value) } ", "Label": "Statistical Test", "Source": "https://osf.io/6ukwg/", "File": "utility_functions.R" }, { "ID": 466, "Comment": "Compute ICC run null/unconditional model and use intercept and residual variances to solve for ICC (i.e., Intercept Variance / (Intercept Variance + Residual Variance)) ", "Code": "Model.Null <- lmer(VerbPhysAggSum ~ 1 + (1 | ID), data = LineBisectionStudy) summary(Model.Null) ICC <- (18.69/(18.69+7.76)) print(ICC) ", "Label": "Statistical Modeling", "Source": "https://osf.io/vprwb/", "File": "MLM--lmerTest--JPNpsyUpdated.R" }, { "ID": 467, "Comment": "Function to extract legend (from: https://gist.github.com/crsh/be88be19233f1df4542aca900501f0fbfilegglegendrL7) otherwise plots without tracks would have no legend", "Code": "gglegend <- function(x){ tmp <- ggplot_gtable(ggplot_build(x)) leg <- which(sapply(tmp$grobs, function(y) y$name) == \"guide-box\") tmp$grobs[[leg]] } legend = gglegend(gi) ", "Label": "Visualization", "Source": "https://osf.io/amd3r/", "File": "D_Wind_support_and_track_animation.R" }, { "ID": 468, "Comment": "define function doing the bootstrap:", "Code": "boot.fun<-function(x, xcall., data., rv.name., m., discard.non.conv., save.path., extract.all.){ xdone=F while(!xdone){ done2=F while(!done2){ data.[, rv.name.]=simulate(object=m.)[, 1] if(xcall[[\"xfam\"]][[\"family\"]]!=\"beta\" | (xcall[[\"xfam\"]][[\"family\"]]==\"beta\" & min(data.[, rv.name.])>0 & max(data.[, rv.name.])<1)){ done2=T } } i.res=try(update(m., data=data.), silent=T) ", "Label": "Data Variable", "Source": "https://osf.io/vjeb3/", "File": "boot_glmm.r" }, { "ID": 469, "Comment": "prepare model frame for conditional model for prediction:", "Code": "model=attr(terms(as.formula(xcall[[\"cond.form\"]])), \"term.labels\") model=model[!grepl(x=model, pattern=\"|\", fixed=T)] if(length(model)==0){model=\"1\"} cond.m.mat=model.matrix(object=as.formula(paste(c(\"~\", paste(model, collapse=\"+\")), collapse=\"\")), data=new.data) if(set.circ.var.to.zero){ cond.m.mat[,paste(c(\"sin(\", circ.var.name, \")\"), collapse=\"\")]=0 cond.m.mat[,paste(c(\"cos(\", circ.var.name, \")\"), collapse=\"\")]=0 } ", "Label": "Statistical Modeling", "Source": "https://osf.io/vjeb3/", "File": "boot_glmm.r" }, { "ID": 470, "Comment": "Function that draws any bellcurve in the form and shape as the ones used in the paper", "Code": "docurve<-function( sd=2, dist=1, xcent=10, ycent=0.6, paroff=0.15, offoff=0.1, percoff=0.06, percoff2=0.03, thresh.vis=0.01, bellims=15, botlims=8, pardown=0.01, xoff=0, par.col=\"#0097BD\", off.col=\"#FF0066\", off.in.col=\"#FF0066\", off.out.col=\"#FF6600\", fading=\"33\", col.bot=\"#555555\", lwd.par=1.2, lwd.bot=1.2){ off.in.col2<-paste(off.in.col,fading,sep=\"\") off.out.col2<-paste(off.out.col,fading,sep=\"\") p1<-xcent-dist/2 p2<-xcent+dist/2 c1<-gen.curve(sd,average=0,bellims=bellims) polygon(replast(c1[[1]][c1[[1]]<(-dist/2)])+xcent,c(c1[[2]][c1[[1]]<(-dist/2)],0)+ycent,col=off.out.col2,border=NA) polygon(repfirst(c1[[1]][c1[[1]]>(dist/2)])+xcent,c(0,c1[[2]][c1[[1]]>(dist/2)])+ycent,col=off.out.col2,border=NA) polygon(repboth(c1[[1]][c1[[1]]>=(-dist/2)&c1[[1]]<=(dist/2)])+xcent,c(0,c1[[2]][c1[[1]]>=(-dist/2)&c1[[1]]<=(dist/2)],0)+ycent,col=off.in.col2,border=NA) lines(c(xcent-botlims,xcent+botlims),c(ycent,ycent),col=col.bot,lwd=lwd.bot) lines(c(p1,p1),c(ycent-pardown,ycent+paroff),col=par.col,lwd=lwd.par) lines(c(p2,p2),c(ycent-pardown,ycent+paroff),col=par.col,lwd=lwd.par) outprob<-sum(c1[[2]][c1[[1]]>dist/2])/sum(c1[[2]]) inprob<-sum(c1[[2]][c1[[1]]<=dist/2&c1[[1]]>=-dist/2])/sum(c1[[2]]) offtail<-mean(c(c1[[1]][c1[[1]]>dist/2][1], c1[[1]][c1[[1]]>dist/2][!(c1[[2]][c1[[1]]>dist/2]>thresh.vis)][1])) text(xcent,ycent+percoff,paste(round(inprob*100),\"%\",sep=\"\"),col=off.in.col,cex=0.8) text(c(xcent-offtail,xcent+offtail)+c(-1,1)*xoff,ycent+percoff2,paste(round(outprob*100),\"%\",sep=\"\"),col=off.out.col,cex=0.8) text(c(p1,p2),ycent+paroff,c(expression(t[p[1]]),expression(t[p[2]])),pos=3,col=par.col,offset=0.15) } ", "Label": "Visualization", "Source": "https://osf.io/pvyhe/", "File": "Figure1.R" }, { "ID": 471, "Comment": "loglikelihood for each subject using their mean parameter vector", "Code": "mean_pars_ll <- numeric(ncol(mean_pars)) data <- transform(sampled$data, subject = match(subject, unique(subject))) for (j in 1:nsubj) { mean_pars_ll[j] <- sampled$ll_func(mean_pars[j, ], data = data[data$subject == j,], sample = FALSE) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/tbczv/", "File": "pmwgDIC.r" }, { "ID": 472, "Comment": "Create the plots and populate them. The yaxt \"n\" serves to remove axis labels from the yaxis while the axis function serves to add axis labels in line with APS standards for graph publication. The lines and points functions add the connecting lines and symbols to the graphs. We repeat the process three times to create the three graphs corresponding to different values of tau. ", "Code": "plot(k, end.data.cov[ ,1], type = \"l\", ylab = \"Coverage\", main = \"A: Tau = 0.2\", yaxt = \"n\", ylim = c(0.9, 1),xlab = expression(\"Number of Studies \" (italic(k)))) axis(side = 2, at = c(0.92,0.94,0.96,0.98,1),labels = c(\".92\",\".94\",\".96\",\".98\",\"1.0\")) points(k, end.data.cov[ ,1], pch = 0) lines(k, end.data.cov[ ,2]) points(k, end.data.cov[ ,2], pch = 3) lines(k, end.data.cov[ ,7]) points(k, end.data.cov[ ,7], pch = 5) lines(k, end.data.cov[ ,14]) points(k, end.data.cov[ ,14], pch = 4) legend(\"bottomright\", c(\"bWT-DL\",\"bWT-REML\", \"bWT-HE\", \"Sub-Q\"), pch = c(0,5,4,3), cex = 0.9, ncol = 2) plot(k, end.data.cov[ ,3], type = \"l\", ylab = \"Coverage\", main = \"B: Tau = 0.4\", yaxt = \"n\", ylim = c(0.9, 1),xlab = expression(\"Number of Studies \" (italic(k)))) axis(side = 2, at = c(0.92,0.94,0.96,0.98,1),labels = c(\".92\",\".94\",\".96\",\".98\",\"1.0\")) points(k, end.data.cov[ ,3], pch = 0) lines(k, end.data.cov[ ,4]) points(k, end.data.cov[ ,4], pch = 3) lines(k, end.data.cov[ ,9]) points(k, end.data.cov[ ,9], pch = 5) lines(k, end.data.cov[ ,17]) points(k, end.data.cov[ ,17], pch = 4) legend(\"bottomright\", c(\"bWT-DL\",\"bWT-REML\", \"bWT-HE\", \"Sub-Q\"), pch = c(0,5,4,3), cex = 0.9, ncol = 2) plot(k, end.data.cov[ ,5], type = \"l\", ylab = \"Coverage\", main = \"C: Tau = 0.6\", yaxt = \"n\", ylim = c(0.9, 1),xlab = expression(\"Number of Studies \" (italic(k)))) axis(side = 2, at = c(0.92,0.94,0.96,0.98,1),labels = c(\".92\",\".94\",\".96\",\".98\",\"1.0\")) points(k, end.data.cov[ ,5], pch = 0) lines(k, end.data.cov[ ,6]) points(k, end.data.cov[ ,6], pch = 3) lines(k, end.data.cov[ ,11]) points(k, end.data.cov[ ,11], pch = 5) lines(k, end.data.cov[ ,20]) points(k, end.data.cov[ ,20], pch = 4) legend(\"topright\", c(\"bWT-DL\",\"bWT-REML\", \"bWT-HE\", \"Sub-Q\"), pch = c(0,5,4,3), cex = 0.9, ncol = 2) ", "Label": "Visualization", "Source": "https://osf.io/gwn4y/", "File": "Line_plots.R" }, { "ID": 473, "Comment": "Define the initial column name for x/y coordinates you want to use", "Code": "xy_columns = list( x = \"GazePointX (ADCSpx)\", y = \"GazePointY (ADCSpx)\" ), ", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "interface.R" }, { "ID": 474, "Comment": "compute cutoff for guessing by taking the 99% quantile from the binomial distribution (given by guessing probability and number of tests)", "Code": "mutate(guessing_probability = 1/19, cut_off = qbinom(p = .99, size = number_tests, prob = guessing_probability)/number_tests, guessing_check = ifelse(mean_acc > cut_off, TRUE, FALSE)) %>% group_by(participant_id, condition) %>% summarize(guessing_check = as.logical(min(guessing_check))) %>% ungroup() ", "Label": "Statistical Modeling", "Source": "https://osf.io/dpkyb/", "File": "data-processing.R" }, { "ID": 475, "Comment": "split the data into different data frames depending on the intended analysis create dataframe with data on the hebb effect for the main analysis of the learning data", "Code": "data_hebb_task = data_filtered %>% filter(phase == \"WM\") %>% select(participant_id, condition, phase, block, trial_number, hebb_trial, presentation_order:duration) %>% ungroup() ", "Label": "Data Variable", "Source": "https://osf.io/dpkyb/", "File": "data-processing.R" }, { "ID": 476, "Comment": "check cluster agreement using hierarchical clustering on raw items instead of factors", "Code": "HCheck.HClusterD <- dist(EFA.mainData[, c(2:56)], method = \"euclidean\") HCheck.HClusterFit <- hclust(HCheck.HClusterD, method=\"ward.D2\") HCheck.HCluster <- cutree(HCheck.HClusterFit, k = 3) HCheckCluster <- data.frame(Factor = HCluster, Item = HCheck.HCluster) HCheckCluster <- cbind(HCheckCluster, ifelse(HCheckCluster[1] == HCheckCluster[2], 1, 0)) colnames(HCheckCluster)[3] <- 'Agreement' HCheckClusterAgreement <- HCheckCluster[, c(2:3)] %>% group_by(Item) %>% summarise(total = n(), n_agree = sum(Agreement)) %>% rename(Cluster = Item) ", "Label": "Statistical Modeling", "Source": "https://osf.io/2j47e/", "File": "Cluster analysis refined factor scores.R" }, { "ID": 477, "Comment": "create dataframe with factor scores and cluster membership", "Code": "KClusterScores <- cbind(EFA.mainData[, c(111:116)], KClusterFit$cluster) colnames(KClusterScores)[7] <- \"KCluster\" ", "Label": "Data Variable", "Source": "https://osf.io/2j47e/", "File": "Cluster analysis refined factor scores.R" }, { "ID": 478, "Comment": "testing homogeneity of variance in factors", "Code": "KClusterScores %>% pivot_longer(c(Sensory, CognitiveDemand, ThreatToSelf, CrossSettings, Safety, States), names_to = c(\"Factor\")) %>% group_by(Factor) %>% levene_test(value ~ as.factor(KCluster)) ", "Label": "Statistical Test", "Source": "https://osf.io/2j47e/", "File": "Cluster analysis refined factor scores.R" }, { "ID": 479, "Comment": "univariate Welch's ANOVA test with Bonferroni correction", "Code": "table1 <- KClusterScores %>% pivot_longer(c(Sensory, CognitiveDemand, ThreatToSelf, CrossSettings, Safety, States), names_to = c(\"Factor\")) %>% mutate(Factor = factor(Factor, levels = c(\"Sensory\", \"CognitiveDemand\", \"ThreatToSelf\", \"CrossSettings\", \"Safety\", \"States\"))) %>% group_by(Factor) %>% welch_anova_test(value ~ as.factor(KCluster)) %>% adjust_pvalue(method = \"bonferroni\") ", "Label": "Statistical Test", "Source": "https://osf.io/2j47e/", "File": "Cluster analysis refined factor scores.R" }, { "ID": 480, "Comment": "manually calculating omega squared and adjusted confidence intervals for each test", "Code": "table1$omegasq <- apply(table1, 1, function(x) omega.F(as.numeric(x[5]), as.numeric(x[6]), as.numeric(x[4]), as.numeric(x[3]), 0.05/6)$omega) table1$omegalow <- apply(table1, 1, function(x) omega.F(as.numeric(x[5]), as.numeric(x[6]), as.numeric(x[4]), as.numeric(x[3]), 0.05/6)$omegalow) table1$omegahigh <- apply(table1, 1, function(x) omega.F(as.numeric(x[5]), as.numeric(x[6]), as.numeric(x[4]), as.numeric(x[3]), 0.05/6)$omegahigh) ", "Label": "Statistical Test", "Source": "https://osf.io/2j47e/", "File": "Cluster analysis refined factor scores.R" }, { "ID": 481, "Comment": "Linear mixed effect model (accuracy)", "Code": "LDTnonword_ACC_LME = glmer(accuracy ~ DISHARc + (1+DISHARc|item) + (1|subject), data = byTrial, family=binomial, control=glmerControl(optimizer=\"bobyqa\", optCtrl=list(maxfun=2e5))) summary(LDTnonword_ACC_LME) ", "Label": "Statistical Modeling", "Source": "https://osf.io/gztxa/", "File": "Vowel_Harmony_LDT_Exp3.R" }, { "ID": 482, "Comment": "Estimate d from t and df", "Code": "dest <-function (t,df) { r2 <- (t^2) / (t^2 + df) d.est <- (2*(r2^.5)) / (1-r2) } ", "Label": "Data Variable", "Source": "https://osf.io/he8mu/", "File": "Custom_Functions.R" }, { "ID": 483, "Comment": "Stimuli categorized according to a multinomial distribution", "Code": "y[i,] ~ dmulti(r[i,], t[i]) predy[i,1:ncat] ~ dmulti(r[i,], t[i]) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/hrf5t/", "File": "Prototype.R" }, { "ID": 484, "Comment": "Denominator is just the sum of the numerator values", "Code": "denominator[i] = sum(numerator[i,]) } ", "Label": "Data Variable", "Source": "https://osf.io/hrf5t/", "File": "Prototype.R" }, { "ID": 485, "Comment": "annotate lines so that each has a unique id for ggplot overplotting (else two lines from the same draw but different replicates can get confused with each other)", "Code": "func_samples_surface <- func_samples_surface %>% mutate(line_id = as.numeric(rownames(func_samples_surface))) func_samples_aer <- func_samples_aer %>% mutate(line_id = as.numeric(rownames(func_samples_aer)))", "Label": "Visualization", "Source": "https://osf.io/fb5tw/", "File": "figure_main.R" }, { "ID": 486, "Comment": "cross product decay_rates with x (time) values and calculate y (titer) values", "Code": "cat('setting up x values...\\n') to_plot_surface <- func_samples_surface %>% crossing(surface_plot_times) to_plot_aer <- func_samples_aer %>% crossing(aer_plot_times) to_plot_surface <- to_plot_surface %>% mutate(predicted_titer = 10^(intercept - decay_rate * time)) to_plot_aer <- to_plot_aer %>% mutate(predicted_titer = convert_mL_media_to_L_air * 10^(intercept - decay_rate * time)) max_nonzero_time <- to_plot_surface %>% filter(log10(predicted_titer) > lowest_log_titer) %>% select(time) %>% max() surface_xlim <- c(0, max_nonzero_time) aer_xlim <- c(0, aer_max_x) print(aer_xlim) aer_jitwid <- 3/100 fit_panel_surface <- to_plot_surface %>% ggplot(aes( x = time, y = predicted_titer, color = virus, group = line_id)) + geom_line(alpha = line_alpha, size = line_size) + scale_colour_manual(values = unlist(virus_colors)) + geom_point(aes(x = time, y = 10^(log10_titer), group = trial_unique_id), data = surface_dat, color = pointborder, fill = pointfill, alpha = pointalpha, size = pointsize, stroke = pointstroke, position = position_jitter( width = jitwid, height = jith, seed = 5)) + geom_hline( data = experiment_dat_virus_surface, aes(yintercept = detection_limit), linetype = detection_linestyle, size = detection_linesize) + scale_y_continuous(trans = ytrans, breaks = ybreaks, labels = yformat) + coord_cartesian(ylim = surface_ylim, xlim = surface_xlim) + facet_grid(vars(virus), vars(material), drop = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/fb5tw/", "File": "figure_main.R" }, { "ID": 487, "Comment": " group adjustment for sigma with prior 0,0.1 for the beta ", "Code": "ACT.BF.sigma.prior01 <- stan(data=ACT.data,file=\"./BayesFactor/models/ACT/ACT.BF.sigma.prior01.stan\", iter=40000, warmup =1000, chains=3,control = list(adapt_delta = 0.9)) ACT.BF.sigma.prior01.bridge <- bridge_sampler(ACT.BF.sigma.prior01) saveRDS(ACT.BF.sigma.prior01.bridge, \"./BayesFactor/marginal_lik/ACT/ACT.BF.sigma.prior01.rds\") DA.BF.sigma.prior01 <- stan(data=DA.standata,file=\"./BayesFactor/models/DA/DA.BF.sigma01.stan\", iter=40000, warmup =1000, chains=3) DA.BF.sigma.prior01.bridge <- bridge_sampler(DA.BF.sigma.prior01) saveRDS(DA.BF.sigma.prior01.bridge, \"./BayesFactor/marginal_lik/DA/DA.BF.sigma.prior01.rds\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/kdjqz/", "File": "BF_LissonEtAl2020.R" }, { "ID": 488, "Comment": "increase in rsq when using elastic net with items instead of linear regression with sum scores (without covariates)", "Code": "tab_delta_rsq <- merge(tab[learner.id == \"elastic net\" & predictors == \"items\" & control == \"excluded\", .(elasticnet_items = rsq.test.mean), by = c(\"target\", \"design\")], tab[learner.id == \"linear regr\" & predictors == \"sum scores\" & control == \"excluded\", .(linearregr_sumscores = rsq.test.mean), by = c(\"target\", \"design\")]) tab_delta_rsq[, .(mean_delta_rsq = mean(elasticnet_items - linearregr_sumscores)), by = \"design\"] ", "Label": "Statistical Modeling", "Source": "https://osf.io/t7a28/", "File": "collect_results.R" }, { "ID": 489, "Comment": "Calculate Cohen's d_z", "Code": "return_Cohen_d_z <- function(variable_1, variable_2){ mean_differences <- mean(variable_1) - mean(variable_2) sd_var1 <- sd(variable_1) sd_var2 <- sd(variable_2) cor_var1_var2 <- cor(variable_1, variable_2) Cohen_d_z <- abs(mean_differences) / sqrt( (sd_var1 ^ 2) + (sd_var2 ^ 2) - (2 * sd_var1 * sd_var2 * cor_var1_var2) ) return(Cohen_d_z) } ", "Label": "Statistical Test", "Source": "https://osf.io/5te7n/", "File": "return_effect_sizes.R" }, { "ID": 490, "Comment": "visualizing the data add +scale_x_discrete(guide guide_axis(angle 45)) to the plot to change the angle of the lables on the X axis", "Code": "ggplot(mydata_p6_la, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45)) ggplot(mydata_p6_ra, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45)) ggplot(mydata_p6_lp, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45)) ggplot(mydata_p6_rp, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45)) ", "Label": "Visualization", "Source": "https://osf.io/p7zwr/", "File": "P600.R" }, { "ID": 491, "Comment": "fill output matrix with the calculated social measure", "Code": "socialmeasures[,3] <- measure1 socialmeasures[,4] <- measure2 socialmeasures[,5] <- measure3 socialmeasures[,6] <- measure4 socialmeasures[,7] <- measure5 socialmeasures[,8] <- measure6 socialmeasures[,10] <- measure8 socialmeasures[,11] <- measure9 socialmeasures[,12] <- measure10 socialmeasures[,3] <- measure1 socialmeasures[,4] <- measure2 socialmeasures[,5] <- measure3 socialmeasures[,6] <- measure4 socialmeasures[,7] <- measure5 socialmeasures[,8] <- measure6 socialmeasures[,10] <- measure8 socialmeasures[,11] <- measure9 socialmeasures[,12] <- measure10 ", "Label": "Data Variable", "Source": "https://osf.io/wc3nq/", "File": "2) soc_measures_code.R" }, { "ID": 492, "Comment": "7 calculate the AVERAGE SHORTEST PATH of each node of the matrix", "Code": "measure7 <- mean_distance(graphN, directed = TRUE, unconnected = TRUE) print(measure7) measure7 <- mean_distance(graphN, directed = TRUE, unconnected = TRUE) print(measure7) ", "Label": "Statistical Modeling", "Source": "https://osf.io/wc3nq/", "File": "2) soc_measures_code.R" }, { "ID": 493, "Comment": " Add Age Bins This acts to help preserve privacy and allows easy plotting: e.g., ggplot(Data, aes(x Agebins) ) + geom_bar() ", "Code": "Agebin = c('< 20', '20-29', '30-39','40-49','50-59','60-69','70-79','80+') Data %<>% mutate(Agebins = case_when( Age < 20 ~ Agebin[1], Age >= 20 & Age < 30 ~ Agebin[2], Age >= 30 & Age < 40 ~ Agebin[3], Age >= 40 & Age < 50 ~ Agebin[4], Age >= 50 & Age < 60 ~ Agebin[5], Age >= 60 & Age < 70 ~ Agebin[6], Age >= 70 & Age < 80 ~ Agebin[7], Age >= 80 ~ Agebin[8] ) ) %>% apply_labels(Agebins = c('< 20' = 1, '20-29' = 2, '30-39' = 3, '40-49' = 4,'50-59' = 5,'60-69' = 6, '70-79' = 7,'80+' = 8)) Agebin = c('< 20', '20-29', '30-39','40-49','50-59','60-69','70-79','80+') Data %<>% mutate(Agebins = case_when( Age < 20 ~ Agebin[1], Age >= 20 & Age < 30 ~ Agebin[2], Age >= 30 & Age < 40 ~ Agebin[3], Age >= 40 & Age < 50 ~ Agebin[4], Age >= 50 & Age < 60 ~ Agebin[5], Age >= 60 & Age < 70 ~ Agebin[6], Age >= 70 & Age < 80 ~ Agebin[7], Age >= 80 ~ Agebin[8] ) ) %>% apply_labels(Agebins = c('< 20' = 1, '20-29' = 2, '30-39' = 3, '40-49' = 4,'50-59' = 5,'60-69' = 6, '70-79' = 7,'80+' = 8)) ", "Label": "Visualization", "Source": "https://osf.io/sw7rq/", "File": "Functions.R" }, { "ID": 494, "Comment": "ChiSquared Gender create dataframe of gender and cluster membership, then exclude participant with 'Other' as response to analyse with Chisquared test ", "Code": "data.OtherExcluded <- demographics %>% select(Gender, KCluster) %>% filter(Gender != 'Other') data.OtherExcluded$Gender <- droplevels(data.OtherExcluded$Gender) ", "Label": "Statistical Test", "Source": "https://osf.io/2j47e/", "File": "Demographics.R" }, { "ID": 495, "Comment": "posthoc pairwise Chisquared with Bonferroni correction", "Code": "pairwiseNominalIndependence(ASD.contable, compare = \"row\", fisher = FALSE, gtest = FALSE, chisq = TRUE, method = \"bonferroni\", digits = 3) ", "Label": "Statistical Test", "Source": "https://osf.io/2j47e/", "File": "Demographics.R" }, { "ID": 496, "Comment": "compile parameter estimates and fit of population ODE/SDE models", "Code": "modt <- readRDS(\"PSM_transits_ODE.RDS\") mods <- readRDS(\"PSM_population_ODE.RDS\") i <- length(mods) pars <- c(round(modt[[4]]$THETA,1), \"OMEGA_stress\"=1.5, \"OMEGA_ke\"=.05, \"OMEGA_kt\"=.15, \"OMEGA_init\"=.15) pars[c(\"init\",\"sigma\")] <- c(.001,.001) parA <- list(LB=pars*.2, Init=pars, UB=pars*2.5) #bounds + inits npars <- Vectorize(function(x) sum(parA$Init != round(mods[[x]]$THETA,3)))(1:i) res <- Vectorize(function(x) round(mods[[x]]$THETA,3))(1:i) #parameter estimates for each fitted model res <- rbind(res, \"Mtt\"= Vectorize(function(x) round(4/mods[[x]]$THETA[\"kt\"],3))(1:i)) #add mean transit time for each model res <- rbind(res, \"LL\"= -Vectorize(function(x) mods[[x]]$NegLogL)(1:i)) res <- rbind(res, \"AIC\"= 2*npars +2*Vectorize(function(x) mods[[x]]$NegLogL)(1:i)) res <- rbind(res, \"R2\"= Vectorize(function(x) 1 - mods[[x]]$THETA[\"S\"]/var(PKdata$DV))(1:i)) colnames(res) <- letters[1:i] print(res) ", "Label": "Statistical Modeling", "Source": "https://osf.io/ecjy6/", "File": "CortStressResponse.R" }, { "ID": 497, "Comment": " define a fixed grid of cutoff (threshold) values from [1, 0] with length `resolution` ", "Code": "resolution <- 500 cutoff_out <- seq(1, 0, length.out = resolution) resolution <- 500 cutoff_out <- seq(1, 0, length.out = resolution) ", "Label": "Data Variable", "Source": "https://osf.io/w7pjy/", "File": "bootstrapConfusionMatrix.R" }, { "ID": 498, "Comment": " Compute F1 skill score... ' ...for a given cutoffthreshold. ' @param sim vector of numeric values between [0, 1] (e.g., proportion of unstable grid points) ' @param obs vector of logicals (TRUE/FALSE) stating whether the layer was observed (of concern) or not. ' @param cutoff in percentage within (0, 1] ' @return numeric value of skill score ' @export ", "Code": "calculateF1Score <- function(sim, obs, cutoff) { tp <- length(sim[sim >= cutoff & obs]) fn <- length(sim[sim < cutoff & obs]) fp <- length(sim[sim >= cutoff & !obs]) f1 <- 2*tp / (2*tp + fp + fn) return(f1) } ", "Label": "Data Variable", "Source": "https://osf.io/w7pjy/", "File": "bootstrapConfusionMatrix.R" }, { "ID": 499, "Comment": " Underscore split: split up the \"variable\" column in order to get the different factors (verbtype, input, moment) ", "Code": "participantsdata.long <- cbind(participantsdata.long, colsplit(participantsdata.long$variable, \"_\", names = c(\"verbtype\", \"input\", \"testmoment\"))) ", "Label": "Data Variable", "Source": "https://osf.io/938ye/", "File": "Descriptive_statistics.R" }, { "ID": 500, "Comment": "LDA training the model", "Code": "model_lda = train(class ~ ., data=trainSet, method='lda', trControl = trainControl(method = \"cv\")) fitted <- predict(model_lda) ", "Label": "Statistical Modeling", "Source": "https://osf.io/xuz8d/", "File": "ThesisMLRCode.R" }, { "ID": 501, "Comment": " function to extract the sequence \"Obj_xxx\" untile a period \".\" appears ", "Code": "slice_object_string <- function(string) { gsub(\".*Obj_(.*)\\\\..*\", \"\\\\1\", c(string)) } familiar_objects <- unlist(lapply(string_list, slice_object_string)) ", "Label": "Data Variable", "Source": "https://osf.io/yfegm/", "File": "getObjects.r" }, { "ID": 502, "Comment": "ChiSquare tests Relations between diet type and major themes", "Code": "chisq.test(data$Diet, data$Health) #p = 0.94, chi-squared = 0.01 chisq.test(data$Diet, data$Food) #p = 0.44, chi-squared = 0.59 chisq.test(data$Diet, data$Social) #p = 0.80, chi-squared = 0.06 chisq.test(data$Diet, data$Logistic) #p = 0.17, chi-squared = 1.90 chisq.test(data$Diet, data$Finance) #p = 0.06, chi-squared = 3.61 chisq.test(data$Diet, data$Motivat.) #p = 0.76, chi-squared = 0.10 chisq.test(data$Diet, data$Diet.Cons.) #p = 0.05, chi-squared = 3.97 chisq.test(data$Diet, data$Other) #p = 1, chi-squared = 8.90e-29 chisq.test(data$Diet, data$Positive) #p = 0.16, chi-squared = 1.94 ", "Label": "Statistical Test", "Source": "https://osf.io/q2zrp/", "File": "Chi-squaretests.R" }, { "ID": 503, "Comment": "Hypotheses Fit multivariate regression model", "Code": "moltenformula <- as.formula(\"value ~ v - 1 + v:scipopgoertz + v:age + v:sex + v:lingreg_D + v:lingreg_I + v:urbanity_log + v:edu_uni + v:edu_com + v:sciprox_score + v:sciliteracy + v:rel + v:pol + v:inter_science + v:trust_science + v:trust_scientists + v:scimedia_att + v:scimedia_sat\") m_inx <- svyglm(moltenformula, design = svydsgn_melt_inx) ", "Label": "Statistical Modeling", "Source": "https://osf.io/yhmbd/", "File": "02_main-analysis.R" }, { "ID": 504, "Comment": "estimate bifactor model (maximum likelihood)", "Code": "fit <- cfa(model, dat,estimator = \"MLM\", std.lv = TRUE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/qk3bf/", "File": "iip_estimate_bifactor.R" }, { "ID": 505, "Comment": "draw random factor scores from a multivariate normal distribution", "Code": "lat_scores <- mvrnorm(n = n, mu = mu, Sigma = sigma) ", "Label": "Statistical Modeling", "Source": "https://osf.io/qk3bf/", "File": "iip_estimate_bifactor.R" }, { "ID": 506, "Comment": "function to create data summaries in figures", "Code": "data_summary <- function(x) { m <- mean(x) ymin <- m-ci(x) ymax <- m+ci(x) return(c(y=m,ymin=ymin,ymax=ymax)) } ", "Label": "Visualization", "Source": "https://osf.io/mj5nh/", "File": "educationestimatecentralitydefaults.R" }, { "ID": 507, "Comment": "to get numbers for confidence intervals click on item in global environment and expand plot 95% credible intervals", "Code": "x<-plot(me, plot = FALSE)[[1]] + scale_color_grey() + scale_fill_grey() p<-x+ylim(0,1)+ theme(panel.grid.major = element_line(colour=\"gray\"), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_blank(),panel.grid.major.x = element_blank())+xlab(\"\")+ylab(\"\") p ", "Label": "Visualization", "Source": "https://osf.io/a8htx/", "File": "SSK_Cleaned_copy.R" }, { "ID": 508, "Comment": " 1 Never, 2 I tried it but don't do it systematically, 3 I do it when it feels convenient, 4 I do it for most research projects/studies, 5 I do it for every research project/study. Make dataframe 'Wish' with intentions to engage in open science practices ", "Code": "wish = data[, 17:25] wish_complete = wish[,-9] # remove missing data wish = apply(wish, 2, as.numeric) wish = as.data.frame(unlist(wish)) colnames(wish) = c(\"preregister\", \"sharedatas\", \"sharecode\", \"openaccess\", \"preprint\", \"openpeer\", \"opensw\", \"replicate\", \"other\") wish = t(wish) rowMeans(wish, na.rm = T) # mean apply(wish, 1, sd, na.rm = T) # standard deviation ", "Label": "Data Variable", "Source": "https://osf.io/zv3uc/", "File": "analysis.R" }, { "ID": 509, "Comment": "generates 95% confidence intervals for each beta coefficient", "Code": "m.stakes_decision.CI <- round(confint(m.stakes_decision, parm = \"beta_\"), 3) ", "Label": "Statistical Test", "Source": "https://osf.io/uygpq/", "File": "Cross-paradigm.R" }, { "ID": 510, "Comment": "2.3 Loss tangents We draw a diagram of loss tangents with logarithmic scale on the xaxis", "Code": "plot(graphData$Freq[graphData$site == \"wreck\"], graphData$tan[graphData$site == \"wreck\"], log = \"x\", axes = FALSE, xlab = \"\", ylab = \"\", type = \"b\", pch = PCH, col = col_wreck, xlim = c(0.01, 100), ylim = c(0, 15)) par(new = TRUE, ps = PS, mar = MAR) plot(graphData$Freq[graphData$site == \"beach\"], graphData$tan[graphData$site == \"beach\"], type = \"b\", log = \"x\", xlab = expression(\"Frequency [rad s\"^-1 *\"]\"), ylab = expression(paste(\"tan \", delta)), pch = PCH, col = col_beach, axes = FALSE, xlim = c(0.01, 100), ylim = c(0, 15)) axis(1, at = c(0, 0.1, 1, 10, 100, 1000), labels = c(0, 0.1, 1, 10, 100, 100)) axis(2, at = c(-1 , 0, 3, 6, 9, 12, 15)) text(x = 0.01, y = 15, label = expression(bold(\"c\"))) abline(1, 0) ", "Label": "Visualization", "Source": "https://osf.io/9jxzs/", "File": "07_analysis_rheology.R" }, { "ID": 511, "Comment": "estimate mean average difference between observed and corrected values check for sign reversals", "Code": "dat_analysis[sign(MA_ES_self_corr) == sign(MA_ES_self_obs), .N] # 46 (no sign rev) ", "Label": "Statistical Modeling", "Source": "https://osf.io/dqc3y/", "File": "REL_corr_obs_descr.R" }, { "ID": 512, "Comment": "Step 3: Fit varyingintercept ESEM model", "Code": "esem_anti_fit_alt1F <- cfa(esem_anti_model_alt1F, esem_anti_data_alt, group = \"country_iso\", estimator = \"MLM\", group.equal = \"loadings\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/w4gey/", "File": "02_measures.R" }, { "ID": 513, "Comment": "performing the chisquared comparisons across all the possible pairs", "Code": "pairwise.table(chi_squared_paired_comparison_1, rownames(hypotheses_per_research_area), p.adjust.method=\"none\") pairwise.table(chi_squared_paired_comparison_2, rownames(hypotheses_per_research_area), p.adjust.method=\"none\") ", "Label": "Statistical Test", "Source": "https://osf.io/4ya6x/", "File": "R Code First Year Paper Cas Goos Analysis.R" }, { "ID": 514, "Comment": "chi squared test of criteria met by association type", "Code": "f_statistic <- chisq.test(data_hypotheses_long$association_type, data_hypotheses_long$reporting_criteria_met) f_statistic ", "Label": "Statistical Test", "Source": "https://osf.io/4ya6x/", "File": "R Code First Year Paper Cas Goos Analysis.R" }, { "ID": 515, "Comment": "Hastags wordcloud of most common hashtags", "Code": "frequenthashes <- hashtable[hashtable>floor(quantile(hashtable,.999))] hashvec <- unlist(lapply(frequenthashes,function(x) rep(names(x),x))) stretched <- log10(frequenthashes-min(frequenthashes)+1)/max(log10(frequenthashes-min(frequenthashes)+1)) cols <- rgb(1- stretched,0,0) png(\"hashcloud.png\",width = 12,heigh=12,units = \"cm\",res=300) wordcloud(names(frequenthashes),frequenthashes,random.order = F,colors=cols) dev.off() ", "Label": "Visualization", "Source": "https://osf.io/u3wby/", "File": "twitter_analyze.R" }, { "ID": 516, "Comment": "Fit the model using unweighted least square (ULS)", "Code": "m1.fit <- sem( m1, sample.cov = S, sample.nobs = 200, estimator = \"ULS\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/vy763/", "File": "Acloserlookatfixed-effectsregressioninsem_supplementarymaterials.R" }, { "ID": 517, "Comment": "Remove items that do not occur in the country", "Code": "items <- items[items$item %in% colnames(x_country), ] ", "Label": "Data Variable", "Source": "https://osf.io/8fzns/", "File": "3H_IRT_helper.R" }, { "ID": 518, "Comment": "multilevel model (repeated measures) correlation using package rmcorr:", "Code": "td_corr <- rmcorr(subject, trust, distrust, td) td_corr ", "Label": "Statistical Modeling", "Source": "https://osf.io/kwp6n/", "File": "Everyday_Trust_Rcode.R" }, { "ID": 519, "Comment": "Robust growth model MLR estimator", "Code": "rctBoot <- growth(rctModel, data=rctWide, se = \"bootstrap\") summary(rctBoot) rctMLR <- growth(rctModel, data=rctWide, estimator = \"MLR\") summary(rctMLR) ", "Label": "Statistical Modeling", "Source": "https://osf.io/fbj3z/", "File": "R Script for Field & Wilcox (2017).R" }, { "ID": 520, "Comment": "create a scatterplot of sleep quality and insomnia scores (FIRST). Add appropriate labels", "Code": "plot(Sleep_and_COVID19$PSQI_global, Sleep_and_COVID19$FIRST, main = \"Main title\", xlab = \"X axis title\", ylab = \"Y axis title\", frame = FALSE) ", "Label": "Visualization", "Source": "https://osf.io/94jyp/", "File": "Ex2_BasicAnalysis_Answers.R" }, { "ID": 521, "Comment": "4) Ttest Examine whether there is a significant difference in sleep quality in those who may have symptoms or been diagnosed with COVID", "Code": "t.test(PSQI_global ~ SymptomsOrPositive, data = Sleep_and_COVID19) ", "Label": "Statistical Test", "Source": "https://osf.io/94jyp/", "File": "Ex2_BasicAnalysis_Answers.R" }, { "ID": 522, "Comment": "Examine whether there is a significant difference in mean sleep difficulty scores in those who undertake shiftwork", "Code": "t.test(PSQI_global ~ ShiftWork, data = Sleep_and_COVID19) ?t.test ", "Label": "Statistical Test", "Source": "https://osf.io/94jyp/", "File": "Ex2_BasicAnalysis_Answers.R" }, { "ID": 523, "Comment": " 6) Regression Analysis Using multiple regression analysis, examine the influence of the age, hours worked, physical health, coffee drank alertness and insomnia proneness on sleep quality ", "Code": "model1 <- lm(PSQI_global ~ Age + WorkHour + Health + CoffeeToday + StanfordSleepinessScale + FIRST, data = Sleep_and_COVID19) summary(model1) ", "Label": "Statistical Modeling", "Source": "https://osf.io/94jyp/", "File": "Ex2_BasicAnalysis_Answers.R" }, { "ID": 524, "Comment": "Draw samples setting heart_rate_traj above as mean with appropriate constant variance (10 beats/min)", "Code": "heart_rate <- rnorm(n = length(heart_rate_traj), mean = heart_rate_traj, sd = 10) ", "Label": "Statistical Modeling", "Source": "https://osf.io/skp56/", "File": "CSEP_DataScienceExPhys_DataGen_Akerman.R" }, { "ID": 525, "Comment": "Computing pvalues based on the Fdistribution", "Code": "p_durable <- pf(q=Femp_durable, df1=Df_aest_durable, df2=Df_residual, lower.tail = F) p_nond <- pf(q=Femp_nond, df1=Df_aest_nond, df2=Df_residual, lower.tail = F) ", "Label": "Statistical Test", "Source": "https://osf.io/n3zfp/", "File": "Aesthetic-Fidelity-Effect-Statistical Analyses.r" }, { "ID": 526, "Comment": "Logistic Regression Model Creating null model and a logistic regression model based on all variables", "Code": "lr.null <- glm(Treatment ~ 1, data = dat2, family = binomial(link=\"logit\")) lrm <- glm(Treatment ~ Sq + Ssk + Sku + Sp + Sv + Sz + Sa,data = dat2, family = binomial(link=\"logit\")) summary(lrm) ", "Label": "Statistical Modeling", "Source": "https://osf.io/fvw2k/", "File": "Murrayetal_SurfaceR_Code.R" }, { "ID": 527, "Comment": "Filter and select variables of interest", "Code": "d <- d_2011 %>% subset(sns == 1) %>% # indicated to use social media subset(!is.na(beh_disclose_soc)) %>% select(id, country, year, gender, age, cons_record, cons_zweck, cons_target, contains(\"soc\"), v143:v156, # beh_setting, int_home, int_work, sns, nth_arg) %>% mutate(cons_mean = (cons_record + cons_zweck + cons_target)/3, cons_mean_bin = ifelse(cons_mean > median(cons_mean, na.rm = T), 1, ifelse(is.na(cons_mean), NA, 0))) %>% mutate(age_group = ifelse(age <= 32, \"younger than 32 years\", \"older than 32 years\")) ", "Label": "Data Variable", "Source": "https://osf.io/m72gb/", "File": "analysis_disclosure.r" }, { "ID": 528, "Comment": "Confirmatory factor analyses CFA for privacy concerns (tauequivalent)", "Code": "cfa.model <- \" priv_con =~ 1*cons_record + 1*cons_zweck + 1*cons_target \" fit <- cfa(cfa.model, d_2011) summary(fit, std = T, fit = T) reliability(fit) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m72gb/", "File": "analysis_disclosure.r" }, { "ID": 529, "Comment": "Estimate models across all specifications Customized model functions to include random country effect", "Code": "linear <- function(formula, data) { pb$tick() # set tick for progress bar lmer(paste(formula, \"+ (1|country)\"), data = data) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/m72gb/", "File": "analysis_disclosure.r" }, { "ID": 530, "Comment": "Decomposing variance Estimate multilevel model (without predictors)", "Code": "m_lin <- lmer(stdcoef ~ 1 + (1|x) + (1|controls) + (1|subsets) + (1|x:controls) + (1|x:subsets) + (1|subsets:controls), data = results) summary(m_lin) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m72gb/", "File": "analysis_disclosure.r" }, { "ID": 531, "Comment": "run polynomial regresion as a OLS linear model", "Code": "f <- paste0(DV, \" ~ \", paste(\"1\", IV1, IV2, IV12, IV_IA, IV22, sep=\" + \"), controlvariables) lm.full <- lm(f, data=df, na.action=na.exclude) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m6pb2/", "File": "helpers.R" }, { "ID": 532, "Comment": " Matrix of phenotypic change trajectories X and Z, interlineage correlation matrix C, and intertrait crossproduct matrix A, as well as the number of lineages n ( 13) and dimensionality of the vectors p ( 76) ", "Code": "X <- f.mean.diff(data.sc, fishID) Z <- X / sqrt(diag(tcrossprod(X))) C <- tcrossprod(Z) A <- crossprod(Z) n <- nrow(Z) p <- ncol(Z) - 4 # -4 accounting for the loss of d.f. from Procrustes alignment ", "Label": "Data Variable", "Source": "https://osf.io/6ukwg/", "File": "codes_reanalysis.R" }, { "ID": 533, "Comment": " Histograms comparing dissimilarity between the original and bootstrap replicates between \"Moo\" and \"Pac\" (largest and smallest dispersion) This shows that the heteroscedasticity is real (not due to visual distortion) ", "Code": "hist(acos(Z[10, ] %*% Zb[10, , ]), breaks = seq(0, pi / 2, 0.1), col = \"#FF000080\", main = \"Pac (red) versus Moo (blue)\", xlab = \"Angle between original and bootstrap vectors (radian)\") hist(acos(Z[7, ] %*% Zb[7, , ]), add = TRUE, breaks = seq(0, pi / 2, 0.1), col = \"#0000FF80\") ", "Label": "Visualization", "Source": "https://osf.io/6ukwg/", "File": "codes_reanalysis.R" }, { "ID": 534, "Comment": " Use kNNdistplot(d.cor.umap, k 3) to determine the value for eps ", "Code": "d.cor.dbscan <- dbscan(d.cor.umap, minPts = 3, eps = 1.8) data.table.large.cluster <- data.table[,d.cor.dbscan$cluster == 1] large.cluster.cor.metric <- as.dist(acos(cor(data.table.large.cluster))) large.umap <- umap( large.cluster.cor.metric, min_dist = 1, n_neighbors = 3 ) large.dbscan <- dbscan(large.umap, minPts = 3, eps = 1) large.cols <- rainbow(length(unique(large.dbscan$cluster)), start = 2/6) write.csv( d.cor.umap, 'd_acos_cor_fixed.csv', row.names = F ) write.csv( d.cor.dbscan$cluster, 'dbscan_clustering_fixed.csv' ) write.csv( large.umap, 'd_acos_cor_fixed_large_cluster.csv', row.names = F ) write.csv( large.dbscan$cluster, 'dbscan_clustering_of_large_cluster_fixed.csv' ) ", "Label": "Visualization", "Source": "https://osf.io/2qjn5/", "File": "dump_csv.R" }, { "ID": 535, "Comment": " Chisquare test: Difference in preference based on outcome, by condition", "Code": "chisq.test(matrix(c(19,5,5,17), ncol = 2, byrow = T)) oddsratio.wald.outcome <- oddsratio.wald(matrix(c(19,5,5,17), ncol = 2, byrow = T))$measure[2] # Note: I reorganized the matrix so that r is of the same sign as that of Experiment 2 esc_2x2( grp1yes = 19, grp1no = 5, grp2yes = 5, grp2no = 17, es.type = \"or\" ) # this confirms Wald's odds ratio ", "Label": "Statistical Test", "Source": "https://osf.io/qrvje/", "File": "beliefhelp15-220615.R" }, { "ID": 536, "Comment": " Two sample ttest: Difference in proportionate looking based on outcome, by condition", "Code": "twosamplet.outcome <- t.test(pPositiveO ~ Condition, data = bh2zoom, alternative = \"two.sided\") twosampled.outcome <- cohen.d(pPositiveO ~ Condition, data = bh2zoom)$estimate ", "Label": "Statistical Test", "Source": "https://osf.io/qrvje/", "File": "beliefhelp15-220615.R" }, { "ID": 537, "Comment": " Assess how well model captured PWLs ' ' This function calculates several indicators that help to assess how well the model captured captWKLs beyond simple layer structure. ' It get's called by the overarching evaluation script `evaluate_main.R` ' ' So, for each WKL the function computes ' ' * correlation factors for likelihood/distribution/sensitvity versus proportion of unstable grid points ' * the maximum proportion of unstable grid points during the lifetime of the WKL ' * the temporal lag between first forecaster concern (> avy problem) and first time that grid points are unstable in that layer ' different timing instances possible: first time (i) at least one grid point unstable, (ii) the majority of grid points is unstable, (iii) more than 50 % of max proportion unstable ' * the temporal lag between latest forecaster concern (> last day of avy problem) and last time that grid points are unstable in that layer ' need to find rules for becoming dormant and waking up again versus becoming inactive ' ' @param VData object;; don't include both primary/secondary and tertiary gtype_ranks! ' ' @export ", "Code": "assessQualityOfcaptWKL <- function(VData, band, stabilityindex = \"p_unstable\") { gtype_rank <- unique(VData$vframe$gtype_rank) if (any(c(\"primary\", \"secondary\") %in% gtype_rank) & \"tertiary\" %in% gtype_rank) stop(\"You can only provide either primary and/or secondary, or tertiary gtype_ranks in your VData object to this function!\") possible_ranks <- c(\"tertiary\", \"secondary\", \"primary\") gtype_rank <- possible_ranks[possible_ranks %in% gtype_rank][1] nrow_max <- length(VData$wkl$wkl_uuid) OUTnames <- c( \"wkl_uuid\", \"wkl_iscrust\", \"wkldate\", \"wkltag\", \"nPDays\", \"nPDays_anyunstable\", \"nPDays_median\", \"nPDays_halfofmax\", \"nPDays_20\", \"rho_llhd\", \"rho_dist\", \"rho_sens\", \"p_llhd\", \"p_dist\", \"p_sens\", \"pu_max\", \"offset_maxes\", \"offset_mean\", \"pcapt_max\", \"lagA_anyunstable\", \"lagA_median\", \"lagA_halfofmax\", \"lagA_20\", \"lagZ_anyunstable\", \"lagZ_median\", \"lagZ_halfofmax\", \"lagZ_20\", \"band\", \"stabilityindex\", \"gtype_rank\" ) OUT <- matrix(nrow = max(1, nrow_max), ncol = length(OUTnames), dimnames = list(seq(max(1, nrow_max)), OUTnames)) for (i in seq_along(VData$wkl$wkl_uuid)) { wuid <- VData$wkl$wkl_uuid[i] wkldate <- as.character(as.Date(VData$wkl$datetag[i])) wkltag <- paste(format(as.Date(VData$wkl$datetag[i]), \"%b %d\"), substr(gtype_rank, start = 1, stop = 4)) wkl_iscrust <- as.logical(VData$wkl$iscrust[i]) ", "Label": "Statistical Modeling", "Source": "https://osf.io/w7pjy/", "File": "assessQualityOfcaptWKL.R" }, { "ID": 538, "Comment": "Correlation of life satisfaction and depression across all measurement occasions", "Code": "d.all %>% select(contains(\"fsat\"), contains(\"depr\"), id) %>% gather(key, value, -id) %>% separate(key, c(\"time\", \"variable\")) %>% spread(variable, value) %>% select(depr, fsat) %>% zero_order_corr(print = T, digits = 3) %>% select(`1`) %>% slice(2) ", "Label": "Statistical Test", "Source": "https://osf.io/fdp39/", "File": "analysis.R" }, { "ID": 539, "Comment": "check if latent covariances are equal across groups", "Code": "est_svc <- cfa(mod_s,quop_use, estimator = \"MLR\", missing = \"FIML\", group = \"year\", group.equal = c(\"loadings\",\"intercepts\",\"residuals\", \"lv.variances\",\"lv.covariances\"), cluster = \"class\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/vphyt/", "File": "Pandemic_Cohorts_vs_Pre_Pandemic_Cohorts.R" }, { "ID": 540, "Comment": "get factor scores from model est_svcm_sl", "Code": "fs <- lavPredict(est_svcm_sl, method = \"bartlett\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/vphyt/", "File": "Pandemic_Cohorts_vs_Pre_Pandemic_Cohorts.R" }, { "ID": 541, "Comment": " Plot for distribution of emotions Arguments: original ddbb, type of emotion, plot title ", "Code": "create.emotion.plot <- function(case_ddbb, emotion=\"emotion\", plot_title=NULL){ ", "Label": "Visualization", "Source": "https://osf.io/unxj2/", "File": "functions_1.R" }, { "ID": 542, "Comment": " Table and plot for effect sizes Arguments: original ddbb, plot title ", "Code": "create.effectsize.tableplot <- function(case_ddbb, plot_title = NULL){ ", "Label": "Visualization", "Source": "https://osf.io/unxj2/", "File": "functions_1.R" }, { "ID": 543, "Comment": "Convert average time into a ordinal variable", "Code": "Tbl2$DurSecAvg1Cat <- cut(Tbl2$DurSecAvg1, breaks = quantile(Tbl2$DurSecAvg1, c(0, 1/3, 2/3, 1)), include.lowest = T) aggregate(Tbl2$DurSecAvg1, list(Tbl2$DurSecAvg1Cat), summary) levels(Tbl2$DurSecAvg1Cat) <- c(\"Less\", \"Avg\", \"More\") ", "Label": "Data Variable", "Source": "https://osf.io/n5j3w/", "File": "2021-12-03_AnalysisCode.R" }, { "ID": 544, "Comment": "Plotting Figure 3", "Code": "png(filename = \"Fig03_Demographics.png\", width = 19, height = 10, units = \"cm\", res = 500, pointsize = 7) par(mfrow=c(2,3)) ", "Label": "Visualization", "Source": "https://osf.io/n5j3w/", "File": "2021-12-03_AnalysisCode.R" }, { "ID": 545, "Comment": "Predict node for checking differences in distributions with chisquared test", "Code": "Tbl2$NodeRate <- predict(Engage, newdata = Tbl2, type = \"node\") table(Tbl2$NodeRate) (temp <- chisq.test(Tbl2$NodeRate, Tbl2$FeedbackEngage)) temp$observed round(100*prop.table(temp$observed, 1), 1) mosaic(temp$observed, shade = T) Tbl2$NodeRate <- predict(FeedTree, newdata = Tbl2, type = \"node\") table(Tbl2$NodeRate) (temp <- chisq.test(Tbl2$NodeRate, Tbl2$Useful_FeedbackMiddle)) temp$observed round(100*prop.table(temp$observed, 1), 1) mosaic(temp$observed, shade = T) Tbl2$NodeRate <- predict(ExTree, newdata = Tbl2, type = \"node\") table(Tbl2$NodeRate) (temp <- chisq.test(Tbl2$NodeRate, Tbl2$Useful_FeedbackMiddle)) temp$observed round(100*prop.table(temp$observed, 1), 1) mosaic(temp$observed, shade = T) head(Tbl2$RankTaskPerform) ", "Label": "Statistical Test", "Source": "https://osf.io/n5j3w/", "File": "2021-12-03_AnalysisCode.R" }, { "ID": 546, "Comment": "Convert `from_name` to factor with three levels. (Facilitates data visualization).", "Code": "Campaign_Messages$from_name <- factor(Campaign_Messages$from_name, levels = c (\"Stephen Harper\", \"Justin Trudeau\", \"Tom Mulcair\")) ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "campaign_messages.R" }, { "ID": 547, "Comment": "change the title text to size 20 and bold", "Code": "axis.title = element_text(size = 20, face = \"bold\"), axis.title = element_text(size = 20, face = \"bold\"), ", "Label": "Visualization", "Source": "https://osf.io/9e3cu/", "File": "visualization_code.R" }, { "ID": 548, "Comment": " change the axis label text to size 14, bold, and black color ", "Code": "axis.text.x = element_text(size = 14, face = \"bold\", color = \"black\"), axis.text.y = element_text(size = 14, face = \"bold\", color = \"black\")) + axis.text.x = element_text(size = 14, face = \"bold\", color = \"black\"), axis.text.y = element_text(size = 14, face = \"bold\", color = \"black\")) + scale_y_continuous(limits = c(0, 17), expand = c(0,0)) + ", "Label": "Visualization", "Source": "https://osf.io/9e3cu/", "File": "visualization_code.R" }, { "ID": 549, "Comment": "Identify all neighbours within 2km", "Code": "sp_nb <- spdep::dnearneigh(coords, d1 = 0, d2 = km, row.names = row.names(sp), longlat = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/hfjgw/", "File": "00-utils.r" }, { "ID": 550, "Comment": "take the inverse of the distances", "Code": "W <- lapply(dsts, function(x) 1 / x) W <- lapply(dsts, function(x) 1 / x) ", "Label": "Data Variable", "Source": "https://osf.io/hfjgw/", "File": "00-utils.r" }, { "ID": 551, "Comment": "recode base price and overage as numeric (without $ sign)", "Code": "x$b1=as.numeric(sapply(strsplit(as.character(x$b1),\"\\\\$\"),function(y) y[2])) x$b2=as.numeric(sapply(strsplit(as.character(x$b2),\"\\\\$\"),function(y) y[2])) x$b3=as.numeric(sapply(strsplit(as.character(x$b3),\"\\\\$\"),function(y) y[2])) x$c1=as.numeric(sapply(strsplit(as.character(x$c1),\"\\\\$\"),function(y) y[2])) x$c2=as.numeric(sapply(strsplit(as.character(x$c2),\"\\\\$\"),function(y) y[2])) x$c3=as.numeric(sapply(strsplit(as.character(x$c3),\"\\\\$\"),function(y) y[2])) ", "Label": "Data Variable", "Source": "https://osf.io/wbyj7/", "File": "read-data-Exp1.r" }, { "ID": 552, "Comment": " create dataset with possible k values, their prior and likelihood probabilities likelihood is drawn from 1000 random samples from beta distibution ", "Code": "df <- tibble(k = hypo_k) %>% mutate(model_vote = 0.0727^k) %>% mutate(occur = map_int(.$model_vote, ~ length(which(polls$value >= .x))), prior = prob_k) %>% mutate(like = map(.$occur,~ rbeta(trials, .x, polls_len - .x))) ", "Label": "Statistical Modeling", "Source": "https://osf.io/d4hjq/", "File": "02_parameter_estimation.R" }, { "ID": 553, "Comment": "visualise normalised posterior on a graph", "Code": "p <- df %>% ggplot(aes(k, norm_posterior)) + geom_point() + geom_line() p + labs( title = \"Probability of k exponent in the interval <0.89;; 0.99>\", x = \"Value o k exponent\", y = \"Normalised probability\" ) + theme_minimal() ", "Label": "Visualization", "Source": "https://osf.io/d4hjq/", "File": "02_parameter_estimation.R" }, { "ID": 554, "Comment": "3) CALCULATION OF ESTIMATED MARGINAL MEANS Tests for differences in response to jargon for plot", "Code": "at_jarg <- list(Jargon = c(\"More\", \"Less\"), UnderN = \"Easy\", RecognN = \"Fairly\", TrvlAdv_Atten = \"Cons\", cut = \"3|4\") grid_jarg <- ref_grid(clmm_usejar, mode = \"exc.prob\", at = at_jarg) (emm_jarg <- emmeans(grid_jarg, specs = pairwise ~ Jargon, by = \"BackgrAvTraining\")) plot_jarg <- summary(emm_jarg$emmeans) cont_jarg <- summary(emm_jarg$contrasts) ", "Label": "Statistical Modeling", "Source": "https://osf.io/aczx5/", "File": "220423_Fig04_Use_JargExpl.R" }, { "ID": 555, "Comment": " Gradient of FDR with respect to alpha (and its implied power) across ncp for a onesided ztest Note that the gradient is increasing towards $\\infty$ from both side at ncp 0 and alpha 0, but is equal to 0 at that point. The figure does not allow to depict it properly and it seems like as if it peaked at ncp around .5 and was decreasing towards 0. Green corresponds to $P(H_0) .2$, blue to $P(H_0) .5$, and red to $P(H_0) .8$. ", "Code": "alpha <- seq( 0, 1, length.out = 500) ncp <- seq(-5, 5, length.out = 500) FDR.2od <- fill_gradFDR.alpha(.2, alpha, ncp, FALSE) FDR.5od <- fill_gradFDR.alpha(.5, alpha, ncp, FALSE) FDR.8od <- fill_gradFDR.alpha(.8, alpha, ncp, FALSE) plot_ly(x = ~alpha, y = ~ncp, z = ~FDR.5od) %>% add_surface(contours = list( y = list( show = TRUE, project = list(y = TRUE), usecolormap = FALSE, color = \"blue\" ), x = list( show = TRUE, project = list(x = TRUE), usecolormap = FALSE, color = \"blue\" ) ), opacity = .4) %>% layout(scene = list( xaxis = list( tickvals = c(0, .05, .1, .2, .5, 1) ), yaxis = list( tickvals = pretty(ncp) ), zaxis = list( tickvals = seq(0, 1, .2), title = list(text = \"FDR'\") ) )) %>% add_surface(x = ~alpha, y = ~ncp, z = ~FDR.8od, opacity = .4, contours = list( y = list( show = TRUE, project = list(y = TRUE), color = \"red\" ), x = list( show = TRUE, project = list(x = TRUE), color = \"red\" ) )) %>% add_surface(x = ~alpha, y = ~ncp, z = ~FDR.2od, opacity = .4, contours = list( y = list( show = TRUE, project = list(y = TRUE), color = \"green\" ), x = list( show = TRUE, project = list(x = TRUE), color = \"green\" ) )) ", "Label": "Visualization", "Source": "https://osf.io/kbjw9/", "File": "3DFDRPlot.R" }, { "ID": 556, "Comment": "List that will include table (each element should be vector of 3):", "Code": "Table <- list( c(\"Network Characteristics\",title,\"\"), c(\"Comparing Global Characteristics\",nameOrig, nameRepl) ) ", "Label": "Data Variable", "Source": "https://osf.io/akywf/", "File": "splithalf_table_function.R" }, { "ID": 557, "Comment": " The row with \"def_cond\" is moved to a new column and duplicated (this is done with reference to \"cond_to_group\") ", "Code": "df_tmp_2 <- full_join( df_tmp_1 %>% filter({{cond_to_compare}} != def_cond), df_tmp_1 %>% filter({{cond_to_compare}} == def_cond), by = col ) ", "Label": "Data Variable", "Source": "https://osf.io/4fvwe/", "File": "return_BF_ttests.R" }, { "ID": 558, "Comment": "Repeating the analyses with closeness as covariate", "Code": "cor.test(data_3A$closeness_1,data_3A$closeness_2) cor.test(data_3B$closeness_1,data_3B$closeness_2) cor.test(data_3C$closeness_1,data_3C$closeness_2) ", "Label": "Statistical Modeling", "Source": "https://osf.io/sb3kw/", "File": "Meta_Analysis.R" }, { "ID": 559, "Comment": "check if the number of processed partners is smaller than the number of listed partners as saved in the counter variable", "Code": "nrow(Relationship_details) < My_EHC_survey$count_SP", "Label": "Data Variable", "Source": "https://osf.io/y5gr9/", "File": "Skip_Backwards.R" }, { "ID": 560, "Comment": "run anovas to check interaction between specificity and condition LA", "Code": "anova_N4_la_sp = summary(aov(Voltage ~ Condition *Specificity , data = mydata_n4_la)) anova_N4_la_sp ", "Label": "Statistical Test", "Source": "https://osf.io/p7zwr/", "File": "N400.R" }, { "ID": 561, "Comment": "Visualize grid cluststers", "Code": "mgp.u<-c(2.3,0.6,0.5) if(tifit==T){ tiff(tifname,width=14,height=12.5,units=\"cm\",res=600,compression=\"lzw\") } par(oma=c(0,4,4,4)) layout(matrix(1:9,ncol=3,byrow=T),widths=c(1.28,1,1),heights=c(1,1,1.28)) doFloorC(n=1000,eta=1,char=char,mar1=c(gap,3.5,gap,gap),mgp1=mgp.u,xax=F,yax=T,xl=\"\",col2=col2,seq.s=seq.s,seq.c=seq.c,contour=contour) ", "Label": "Visualization", "Source": "https://osf.io/pvyhe/", "File": "grid_visualize_3by3.R" }, { "ID": 562, "Comment": "Create the dataset for visualizations Summarize the means (and sd for clusters)", "Code": "means.cl<-tapply(res$clust,paste(res$n,res$h,res$nu,res$eta),mean,na.rm=T) sd.cl<-tapply(res$clust,paste(res$n,res$h,res$nu,res$eta),sd,na.rm=T) means.dim<-tapply(res$explained3D,paste(res$n,res$h,res$nu,res$eta),mean,na.rm=T) means.avg<-tapply(log(res$avgdist),paste(res$n,res$h,res$nu,res$eta),mean,na.rm=T) vars<-strsplit(names(means.cl),\" \") ", "Label": "Data Variable", "Source": "https://osf.io/pvyhe/", "File": "grid_visualize_3by3.R" }, { "ID": 563, "Comment": "Graph of the distribution of pvalues for each effect", "Code": "pvalue.plot <- ggplot(melt_pvalue,aes(x = value)) + facet_wrap(~variable, ncol = 5) + geom_histogram() ", "Label": "Visualization", "Source": "https://osf.io/unxj2/", "File": "functions_2.R" }, { "ID": 564, "Comment": "Graph of the distribution of standard estimates for each effect", "Code": "est.std.plot <- ggplot(melt_est.std,aes(x = value)) + facet_wrap(~variable, ncol = 5) + geom_histogram() list.result <- list(\"dist_pvalue.plot\" = pvalue.plot, \"dist_est.std.plot\" = est.std.plot) return(list.result) ", "Label": "Visualization", "Source": "https://osf.io/unxj2/", "File": "functions_2.R" }, { "ID": 565, "Comment": "returns plot of pvalue disttribution, plot of standard estimates distribution ", "Code": "list.result <- list(\"dist_pvalue.plot\" = pvalue.plot, \"dist_est.std.plot\" = est.std.plot) return(list.result) ", "Label": "Visualization", "Source": "https://osf.io/unxj2/", "File": "functions_2.R" }, { "ID": 566, "Comment": " Model estimates plot Arguments: standard estimates distribution, pvalues distributions, use sign level TREU/FALSE, level of signicance, plot title ", "Code": "create.model_estimates.plot <- function (est.std_dist, pvalue_dist, significant = FALSE, sig_level=.05, title = NULL){ ", "Label": "Visualization", "Source": "https://osf.io/unxj2/", "File": "functions_2.R" }, { "ID": 567, "Comment": "Median of each standard estimates and TRUE/FALSE values if mean(pvalues) < sig_level", "Code": "est.std_All <- data.frame(cbind(colMeans(est.std_dist), colMeans(pvalue_dist) <= sig_level)) ", "Label": "Statistical Test", "Source": "https://osf.io/unxj2/", "File": "functions_2.R" }, { "ID": 568, "Comment": " Change the participantID to a shorter & uniform name \"Subject\" ", "Code": "names(d)[names(d) == \"Participant.Public.ID\"] <- \"Subject\" ", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Hui_et_al.R" }, { "ID": 569, "Comment": " For trimmed set, we remove any RT that is too long (>2500ms) ", "Code": "d_trimmed = d %>% filter (acc == 2, Reaction.Time > 300, Reaction.Time < 2500) ", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Hui_et_al.R" }, { "ID": 570, "Comment": "Plotting create a dataset that contains means of both oddnumbered & evennumbered items' RTs", "Code": "raw_untrimmed<-inner_join(x= mean.o, y=mean.e, by=\"Subject\") names(raw_untrimmed) <- c(\"Subject\", \"Odd\", \"Even\") ", "Label": "Visualization", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Hui_et_al.R" }, { "ID": 571, "Comment": "AVERAGE BY PARTICIPANT", "Code": "mean.e = de %>% group_by(Subject) %>% summarise(mean(rt.diff)) mean.o = do %>% group_by(Subject) %>% summarise(mean(rt.diff)) mean.e = de %>% group_by(Subject) %>% summarise(mean(zrt.diff)) mean.o = do %>% group_by(Subject) %>% summarise(mean(zrt.diff)) mean.e = de %>% group_by(Subject) %>% summarise(mean(zrt.diff)) mean.o = do %>% group_by(Subject) %>% summarise(mean(zrt.diff))", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Hui_et_al.R" }, { "ID": 572, "Comment": "combine them into one data frame", "Code": "person <- inner_join(person_rand_e, person_rand_o, by = \"Subject\", suffix = c(\"_even\", \"_odd\")) ", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Hui_et_al.R" }, { "ID": 573, "Comment": "Wilcoxon ranksum test on widths of unbroken artefacts:", "Code": "results <- wilcox.test(An2entire$Width,Gmeentire$Width) Z <- qnorm(results$p.value) ", "Label": "Statistical Test", "Source": "https://osf.io/tgf3q/", "File": "SI_4_Statistical_analyses.R" }, { "ID": 574, "Comment": "Wilcoxon ranksum test on lengths of unbroken artefacts:", "Code": "results <- wilcox.test(An2entire$Length,Gmeentire$Length) Z <- qnorm(results$p.value) ", "Label": "Statistical Test", "Source": "https://osf.io/tgf3q/", "File": "SI_4_Statistical_analyses.R" }, { "ID": 575, "Comment": "DATA DOWNSAMPLING add collumns with delta distance and delta time for downsampling", "Code": "trajectory.df <- trajectory.df %>% group_by(id) %>% mutate(time_diff = difftime(dt, lag(dt, n = 1L), units = \"min\"), delta_x = x_utm - lag(x_utm, n = 1L), delta_y = y_utm - lag(y_utm, n = 1L), dist = sqrt(delta_x^2+delta_y^2)) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "af_homing_dataproc.R" }, { "ID": 576, "Comment": "Caculate daily cumulative distances and time lags Calculate speed Join with relocation info Add consecutive number per individual to indicate tracking day", "Code": "daily <- trajectory.df %>% group_by(id, date) %>% filter(!dist == \"NA\") %>% dplyr::summarize(daily_dist = sum(dist), sex = first(sex), time_lag_min = sum(time_lag_min)) %>% mutate(id_day = paste(id, date), daily_speed = (daily_dist/as.numeric(time_lag_min) * 60)) %>% left_join(relocs.perday) %>% group_by(id) %>% mutate(trans_day = row_number()) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "af_homing_dataproc.R" }, { "ID": 577, "Comment": "Split up the trajectories into 50m and 200m translocations", "Code": "trajectory50m <- trajectory.df %>% filter(trans_group == \"50m\") trajectory200m <- trajectory.df %>% filter(trans_group == \"200m\") ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "af_homing_dataproc.R" }, { "ID": 578, "Comment": "Plot daily movement by daily temp & sex", "Code": "ggplot(data = daily, aes(x = temp, y = daily_dist, group = sex, color = sex)) + geom_point() + stat_smooth(method=lm) + theme_bw() ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "af_homing_dataproc.R" }, { "ID": 579, "Comment": "Plot daily movement by daytime rainfall & sex", "Code": "ggplot(data = daily, aes(x = rain_daytime, y = daily_dist, group = sex, color = sex)) + geom_point() + stat_smooth(method=lm) + theme_bw() ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "af_homing_dataproc.R" }, { "ID": 580, "Comment": "Plot daily movement by cumulative rainfall & sex", "Code": "ggplot(data = daily, aes(x = rain_cumul, y = daily_dist, group = sex, color = sex)) + geom_point() + stat_smooth(method=lm) + theme_bw() ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "af_homing_dataproc.R" }, { "ID": 581, "Comment": " x is a matrix containing the data method : correlation method. \"pearson\"\" or \"spearman\"\" is supported removeTriangle : remove upper or lower triangle results : if \"html\" or \"latex\" the results will be displayed in html or latex format ", "Code": "corstars <-function(x, method=c(\"pearson\", \"spearman\"), removeTriangle=c(\"upper\", \"lower\"), result=c(\"none\", \"html\", \"latex\")){ ", "Label": "Statistical Test", "Source": "https://osf.io/wcfj3/", "File": "funs.R" }, { "ID": 582, "Comment": "Diagonal matrix Tau used to scale the z_w", "Code": "Tau_w <- matrix(c(DA.mp$`tau_w[1]`,0,0, 0,DA.mp$`tau_w[2]`,0, 0,0,DA.mp$`tau_w[3]` ),nrow=3,ncol=3) ", "Label": "Data Variable", "Source": "https://osf.io/kdjqz/", "File": "sim_data_LissonEtAl2020.R" }, { "ID": 583, "Comment": "Run fixed effect model", "Code": "life <- rma(yi = corrs$cor, vi = corrs$vi, measure = \"COR\", method = \"FE\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/9jzfr/", "File": "20180806funnelplotlifesatisfaction.R" }, { "ID": 584, "Comment": "Set max Y for graph", "Code": "y_max<-max(d_dist)+1", "Label": "Visualization", "Source": "https://osf.io/ha4q8/", "File": "p_curve_d_distribution_power_app_Lakens.R" }, { "ID": 585, "Comment": " This does the summary. For each group's data frame, return a vector with N, mean, median, and sd ", "Code": "datac <- ddply(data, groupvars, .drop=.drop, .fun = function(xx, col) { c(N = length2(xx[[col]], na.rm=na.rm), mean = mean (xx[[col]], na.rm=na.rm), median = median (xx[[col]], na.rm=na.rm), sd = sd (xx[[col]], na.rm=na.rm) ) }, measurevar ) ", "Label": "Data Variable", "Source": "https://osf.io/gk6jh/", "File": "summarySE.R" }, { "ID": 586, "Comment": " Rename the \"mean\" and \"median\" columns ", "Code": "datac <- rename(datac, c(\"mean\" = paste(measurevar, \"_mean\", sep = \"\"))) datac <- rename(datac, c(\"median\" = paste(measurevar, \"_median\", sep = \"\"))) datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean ", "Label": "Data Variable", "Source": "https://osf.io/gk6jh/", "File": "summarySE.R" }, { "ID": 587, "Comment": " A function that reads in one line in the csv file, add tags and texts around it, and write the output line into an output file ", "Code": "add_tags <- function(l,o){ image_no <- strsplit(as.character(l[1]),\"\\\\.\")[[1]][1] image_URL <- paste(\"\",sep=\"\") write(paste(\"[[Block:\", image_no, \"]] \\n [[Question:DB]] \\n [[ID:\", image_no, \"-image]] \\n\", image_URL, \" \\n [[Question:TE:SingleLine]] \\n [[ID:\", image_no, \"-TE]] \\n\", TE, \" \\n [[Question:MC:SingleAnswer:Vertical]] \\n [[ID:\", image_no, \"-MC]] \\n\", MC, \" \\n [[Choices]] \\n\", choices_formatted, \"\\n [[Question:Matrix]] \\n [[ID:\", image_no, \"-ratings]] \\n \",rating_prompt,\" \\n [[AdvancedChoices]] \\n \", rating_statements_formatted, \"\\n [[AdvancedAnswers]] \\n [[Answer]] \\n\",\"1 - Strongly Disagree\", \"\\n [[Answer]] \\n 2 \\n [[Answer]] \\n 3 \\n [[Answer]] \\n 4 \\n [[Answer]] \\n 5 \\n [[Answer]] \\n 6 \\n [[Answer]] \\n\", \"7 - Strongly Agree\",\" \\n [[PageBreak]] \\n\", sep = \"\"), o, append = TRUE) } ", "Label": "Data Variable", "Source": "https://osf.io/t2jka/", "File": "batchUploadImages.R" }, { "ID": 588, "Comment": "Generate a plot for posterior predictive check (evaluate whether the ' posterior predictive data look more or less similar to the observed data) ' ' @param df dataframe with the data ' @param mod Bayesian model ' ' @return plot with posterior predictive check", "Code": "get_pp_check <- function(df, mod) { map(list(df), ~brms::pp_check(mod, ", "Label": "Visualization", "Source": "https://osf.io/5te7n/", "File": "save_get_pp_check.R" }, { "ID": 589, "Comment": " Simulate a single between group study given sample size n, true (fixed) effect size delta, and heterogeneity (random effect) tau. Generate data for the experimental group (y_e) and for the control group (y_c). ", "Code": "y_e = rnorm(n, 0, 1) + delta + rnorm(1, 0, tau) y_c = rnorm(n, 0, 1) ", "Label": "Statistical Modeling", "Source": "https://osf.io/mg3ny/", "File": "1_sim_functions.R" }, { "ID": 590, "Comment": " calculate pooled variance S, standardized mean difference d, the variance of d, the pvalue, and N. ", "Code": "S = sqrt(((n - 1) * v_e + (n - 1) * v_c) / df) d = (m_e - m_c) / S var.d = (n + n)/(n * n) + (d^2 / (2 * (n + n))) se.d = sqrt(var.d) N = n + n", "Label": "Data Variable", "Source": "https://osf.io/mg3ny/", "File": "1_sim_functions.R" }, { "ID": 591, "Comment": "Box and Whisker Plots to Compare Models", "Code": "scales <- list(x=list(relation=\"free\"), y=list(relation=\"free\")) bwplot(results, scales=scales) ", "Label": "Visualization", "Source": "https://osf.io/uxdwh/", "File": "code.R" }, { "ID": 592, "Comment": "stimuli specify correlations for rnorm_multi (one of several methods)", "Code": "stim_cors = stim_i_cor stim = rnorm_multi( n = stim_n, vars = 2, r = stim_cors, mu = 0, # means of random intercepts and slopes are always 0 sd = c(stim_sd, stim_version_sd), varnames = c(\"stim_i\", \"stim_version_slope\") ) %>% mutate( stim_id = 1:stim_n ) stim_cors = stim_i_cor stim = rnorm_multi( n = stim_n, vars = 2, r = stim_cors, mu = 0, # means of random intercepts and slopes are always 0 sd = c(stim_sd, stim_version_sd), varnames = c(\"stim_i\", \"stim_version_slope\") ) %>% mutate( stim_id = 1:stim_n ) stim_cors = stim_i_cor stim = rnorm_multi( n = stim_n, vars = 2, r = stim_cors, mu = 0, # means of random intercepts and slopes are always 0 sd = c(stim_sd, stim_version_sd), varnames = c(\"stim_i\", \"stim_version_slope\") ) %>% mutate( stim_id = 1:stim_n ) ", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "Sim_Function.R" }, { "ID": 593, "Comment": "calculate trialspecific effects by adding overall effects and slopes", "Code": "version_eff = stim_version_eff + stim_version_slope + sub_version_slope, version_eff = stim_version_eff + stim_version_slope + sub_version_slope, version_eff = stim_version_eff + stim_version_slope + sub_version_slope, ", "Label": "Statistical Modeling", "Source": "https://osf.io/cd5r8/", "File": "Sim_Function.R" }, { "ID": 594, "Comment": " Take the list of csv files from above, and read them all into R (purrr::map function) This will download 100 csv files into a list The reduce function will then bind them all together into a dataframe ", "Code": "large_df <- csv_files %>% purrr::map(function(x) { read.csv(x) }) %>% purrr::reduce(cbind)", "Label": "Data Variable", "Source": "https://osf.io/skp56/", "File": "CSEP_DataScienceExPhys_Akerman2021.R" }, { "ID": 595, "Comment": " Re structure this dataframe to turn it to long format (pivot_longer) Keep the X columns in, these are the rowvalues we can use to denote a sample The column names contain the participant (P) and the session number (S) It will display with each 1st sample of each session for a given participant, so need to reorder to get sorted by participant, session, then second sample The names_pattern uses regular expresisons to denote that the names will come from the specified area i.e., after the P, which could contain any values which occur before the underscore, and likewise after the S ", "Code": "large_df <- large_df %>% tidyr::pivot_longer(cols = !contains(\"X\"), names_to = c(\"participant\", \"session\"), names_pattern = c(\"P(.*)_S(.*)\"), values_to = \"heart_rate\") %>% dplyr::rename(seconds = \"X\") %>% dplyr::arrange(participant, session, seconds) %>% dplyr::mutate(participant = as.factor(participant), session = as.factor(session)) ", "Label": "Data Variable", "Source": "https://osf.io/skp56/", "File": "CSEP_DataScienceExPhys_Akerman2021.R" }, { "ID": 596, "Comment": "pool \"exp_buc\" columns into one column, so that advocacy type is one variable and create new \"experienced\" column for each level of advocacy type ", "Code": "data <- data %>% pivot_longer(names_to = \"advocacytype\", values_to = \"experienced\", graphic_exp_buc:disprotest_exp_buc) ", "Label": "Data Variable", "Source": "https://osf.io/3aryn/", "File": "9Graphingspeciesism_Spanish.R" }, { "ID": 597, "Comment": "Create variables for residue that is 'present' vs 'absent' This is used for the tables/descriptives AND for color/filling the figures", "Code": "data <- dplyr::mutate(data, oro_zero = if_else(valleculae_severity_rating == 0, \"Absent\", \"Present\")) data <- dplyr::mutate(data, hypo_zero = if_else(piriform_sinus_severity_rating == 0,\"Absent\", \"Present\")) data <- dplyr::mutate(data, epi_zero = if_else(epiglottis_severity_rating == 0, \"Absent\", \"Present\")) data <- dplyr::mutate(data, lv_zero = if_else(laryngeal_vestibule_severity_rating == 0, \"Absent\", \"Present\")) data <- dplyr::mutate(data, vf_zero = if_else(vocal_folds_severity_rating == 0, \"Absent\", \"Present\")) data <- dplyr::mutate(data, sg_zero = if_else(subglottis_severity_rating == 0, \"Absent\", \"Present\")) data <- dplyr::mutate(data, pas_zero = if_else(pas_max < 2, \"Absent\", \"Present\")) ", "Label": "Data Variable", "Source": "https://osf.io/4anzm/", "File": "norms_code.R" }, { "ID": 598, "Comment": "Diagnostic tests to determine appropriate number of factors in EFA parallel analysis (includes scree plot) and Very Simple Structure test optimal number of factors is 6", "Code": "fa.parallel(mainData[, c(2:56)], fm = 'ml', fa = 'fa') vss(mainData[, c(2:56)], n = 8, rotate = 'oblimin', fm = 'mle') ", "Label": "Statistical Test", "Source": "https://osf.io/2j47e/", "File": "Factor analysis.R" }, { "ID": 599, "Comment": "replace missings values (9 and 99) with NA", "Code": "pilot<- na_if(pilot,99) pilot<- na_if(pilot,-9) ", "Label": "Data Variable", "Source": "https://osf.io/6579b/", "File": "03_Supplement.R" }, { "ID": 600, "Comment": "subset of participants with less then 4 missings", "Code": "pilot<- subset(pilot, pilot$missings_a < 4) pilot_c <- pilot[,c(\"SJT_kb_00007\",\"SJT_kb_00028\",\"SJT_kb_00058\", \"SJT_kb_00054\",\"SJT_kb_00072\",\"SJT_kb_00026\", \"SJT_kb_00060\",\"SJT_kb_00053\",\"SJT_kb_00039\", \"SJT_kb_00027\",\"SJT_kb_00015\",\"SJT_kb_00038\", \"SJT_kb_00035\",\"SJT_kb_00055\",\"SJT_kb_00010\")] pilot$missings_c <- apply(pilot_w,1,function(x) sum(is.na(x))) pilot<- subset(pilot, pilot$missings_c < 4) pilot_es <- pilot[,c(\"SJT_kb_00103\",\"SJT_kb_00197\",\"SJT_kb_00136\", \"SJT_kb_00134\",\"SJT_kb_00113\",\"SJT_kb_00128\", \"SJT_kb_00117\",\"SJT_kb_00127\",\"SJT_kb_00104\", \"SJT_kb_00108\",\"SJT_kb_00094\",\"SJT_kb_00143\", \"SJT_kb_00116\",\"SJT_kb_00202\",\"SJT_kb_00175\")] pilot$missings_es <- apply(pilot_es,1,function(x) sum(is.na(x))) pilot<- subset(pilot, pilot$missings_es < 4) ", "Label": "Data Variable", "Source": "https://osf.io/6579b/", "File": "03_Supplement.R" }, { "ID": 601, "Comment": "Coding past experiences into binary variables", "Code": "data <- data %>% mutate_at(vars(\"graphic_exp\":\"attentioncheck_exp\"), list(buc = bucketing_experienced)) %>% glimpse ", "Label": "Data Variable", "Source": "https://osf.io/3aryn/", "File": "3VariableCreation.R" }, { "ID": 602, "Comment": "Coding behaviors into binary variables 0 unselected, 1 selected ", "Code": "data <- data %>% mutate_at(vars(\"behavior_donation\":\"behavior_none\"), list(bin = binary.behavior)) %>% glimpse ", "Label": "Data Variable", "Source": "https://osf.io/3aryn/", "File": "3VariableCreation.R" }, { "ID": 603, "Comment": "Creating a combined race/ethnicity variable", "Code": "data <- data %>% mutate(race.eth = case_when(hispanic == \"Yes\" ~ \"Hispanic or Latinx\", race == \"Caucasian/White\" & hispanic == \"No\" ~ \"White, Non-Latinx\", race == \"Black or African-American\" & hispanic == \"No\" ~ \"Black, Non-Latinx\", race == \"Asian\" & hispanic == \"No\" ~ \"Asian\", race == \"Multi-Racial\" & hispanic == \"No\" ~ \"Multi-Racial\", TRUE ~ \"Other Races/Ethnicities\")) ", "Label": "Data Variable", "Source": "https://osf.io/3aryn/", "File": "3VariableCreation.R" }, { "ID": 604, "Comment": "correlations between csd_mean and averaged csds for Big Five traits (not mentioned in the text):", "Code": "psych::corr.test( df2$e_csd,df2$csd_mean_e ) psych::corr.test( df2$a_csd,df2$csd_mean_a ) psych::corr.test( df2$pa_csd,df2$csd_mean_pa ) psych::corr.test( df2$na_csd,df2$csd_mean_na ) ", "Label": "Data Variable", "Source": "https://osf.io/tajd9/", "File": "Flux_MainDataAnalyses.R" }, { "ID": 605, "Comment": " Participants could select all response options that applied to them in any given situation. Here, we create one dummy variable for every response option. ", "Code": "for (i in 1:length(survey$interacting_by)) { v <- unlist(strsplit(survey$interacting_by[i], \",\")) if (is.element(\"1\", v)) { survey$TiP[i] <- 1 } if (is.element(\"2\", v)) { survey$ToP[i] <- 1 } if (is.element(\"3\", v)) { survey$TM[i] <- 1 } if (is.element(\"4\", v)) { survey$ChattingWHATSAPP[i] <- 1 } if (is.element(\"5\", v)) { survey$ChattingDATING[i] <- 1 } if (is.element(\"6\", v)) { survey$Emailing[i] <- 1 } if (is.element(\"7\", v)) { survey$Videochatting[i] <- 1 } if (is.element(\"8\", v)) { survey$Facebook[i] <- 1 } if (is.element(\"9\", v)) { survey$Instagram[i] <- 1 } if (is.element(\"10\", v)) { survey$Snapchat[i] <- 1 } if (is.element(\"11\", v)) { survey$Twitter[i] <- 1 } if (is.element(\"12\", v)) { survey$OTHER[i] <- 1 } if (is.element(\"0\", v)) { survey$NoInteraction[i] <- 1 } if (is.element(\"999\", v)) { survey$SKIP[i] <- 1 } } for (i in 1:length(survey$interacting_people)){ v <- unlist(strsplit(survey$interacting_people[i], \",\")) if (is.element(\"1\", v)) { survey$Classmates[i] <- 1 } if (is.element(\"2\", v)) { survey$Coworkers[i] <- 1 } if (is.element(\"3\", v)) { survey$Family[i] <- 1 } if (is.element(\"4\", v)) { survey$Friends[i] <- 1 } if (is.element(\"5\", v)) { survey$Roommates[i] <- 1 } if (is.element(\"6\", v)) { survey$Significant_other[i] <- 1 } if (is.element(\"7\", v)) { survey$Strangers[i] <- 1 } if (is.element(\"8\", v)) { survey$OTHER2[i] <- 1 } if (is.element(\"999\", v)) { survey$SKIP2[i] <- 1 } } ", "Label": "Data Variable", "Source": "https://osf.io/nxyh3/", "File": "Data_Prep_S1S2.R" }, { "ID": 606, "Comment": "Rescore so high values are what we expect to be positive predictors (e.g. female 2 male 1)", "Code": "df$Nationality2 <- df$Nationality df$Nationality2 <- as.integer(df$Nationality2 %in% \"Dutch\") df$Nationality2[df$Nationality2==0] <- 2 df$Nationality2[df$Nationality2==1] <- 0 df$Nationality2[df$Nationality2==2] <- 1 # Dutch=0 (36), Rest=1 (44) count(df$Nationality2) df$RelationshipStatus2 <- df$RelationshipStatus df$RelationshipStatus2 <- recode(df$RelationshipStatus2, \"3\" = 2) df$RelationshipStatus2[df$RelationshipStatus2==2] <- 0 # Partnered=0 (30), Single=1 (50) count(df$RelationshipStatus2) df$Gender2 <- df$Gender # df$Gender2[df$Gender2==3] <- NA # code 1 nonbinary participant as NA for this to avoid 3 levels df$Gender2[df$Gender2==1] <- 0 df$Gender2[df$Gender2==2] <- 1 # men=0 (19), women=1 (60), 1 NA count(df$Gender2) df$WorkStatus2 <- df$WorkStatus df$WorkStatus2[df$WorkStatus2==2] <- 0 # 54no, 26yes count(df$WorkStatus2) df$MentalHealth2 <- as.numeric(df$MentalHealth) df$MentalHealth2[df$MentalHealth2==1] <- 0 df$MentalHealth2[df$MentalHealth2==2] <- 1 # healthy=0 (59), history=1 (17), 4 NA count(df$MentalHealth2) df$selfefficacy_pre2 <- df$selfefficacy_pre*-1 # reverse code ", "Label": "Data Variable", "Source": "https://osf.io/mvdpe/", "File": "4. Compare pre-post.R" }, { "ID": 607, "Comment": "Rating Means and Standard Deviations of Conditions Unspecified Rating", "Code": "round(mean(unspecified$rating), 2) round(sd(unspecified$rating), 2) ", "Label": "Statistical Test", "Source": "https://osf.io/9tnmv/", "File": "Exp4_buddhist_post.R" }, { "ID": 608, "Comment": "Save and export image as .eps (8.18 x 4.88 inch)", "Code": "if(saveFigures) cairo_ps(file = '../R_Output/Images/AccuracyPerProject_Revision.eps', onefile = TRUE, fallback_resolution = 600, width = 8.18, height = 4.88) par(cex.main = 2.2, mar = c(5, 5.5, 2, 2) + 0.1, mgp = c(3, 1, 0), cex.lab = 2, font.lab = 1, cex.axis = 1.6, bty = 'n', las = 1) plot(x = c(.5, .8), y = c(0, 50), type = 'n', xlab = expression(paste(\"Accuracy Rate \", omega, \" (in %)\")), ylab = 'Density', main = '', axes = FALSE) axis(2, lwd.tick = 0, labels = FALSE) axis(1, at = seq(.5, .8, by = .05), labels = seq(50, 80, by = 5)) d <- lapply(split(df$omega, df$condition), density) Map(function(dens, col) polygon(dens, col = col), dens = d, col = figureColors[['cols.transparent']]) text(.64,44.5,'Laypeople\\n(Evidence)', cex = 1.3) text(.55,44.5,'Laypeople\\n(Description)', cex = 1.3) text(.621,25,'Experts\\nML2', cex = 1.3) text(.75,25, 'Experts\\nSSRP', cex = 1.3) graphics.off() ", "Label": "Visualization", "Source": "https://osf.io/x72cy/", "File": "ExploratoryAnalyses.R" }, { "ID": 609, "Comment": "function to calculate AUC", "Code": "groupAUC.description <- pnorm(description.mud / sqrt(2)) # signal distribution has sigma = 1 quantile(groupAUC.description, c(0.025, 0.5, 0.975)) groupAUC.evidence <- pnorm(evidence.mud / sqrt(2)) # signal distribution has sigma = 1 quantile(groupAUC.evidence, c(0.025, 0.5, 0.975)) ", "Label": "Statistical Modeling", "Source": "https://osf.io/x72cy/", "File": "ExploratoryAnalyses.R" }, { "ID": 610, "Comment": "get relevant information compute mode of signal distribution", "Code": "estimate_mode <- function(x) { d <- density(x) d$x[which.max(d$y)] } modeD <- estimate_mode(mud) medianC <- median(muc) empiricalLambda <- (0.5*modeD) + medianC", "Label": "Statistical Modeling", "Source": "https://osf.io/x72cy/", "File": "ExploratoryAnalyses.R" }, { "ID": 611, "Comment": "compute CIs for each value of lambda", "Code": "probD <- quantile(mud, probs = probs) for (i in seq_len(nPoints)){ c <- lambdas[i] - (0.5 * probD) qph[i, ] <- pnorm((0.5 * probD) - c) } ", "Label": "Statistical Test", "Source": "https://osf.io/x72cy/", "File": "ExploratoryAnalyses.R" }, { "ID": 612, "Comment": " get the posterior distributions of the intercepts, transformed to rate scale ", "Code": "post.description <- inv_logit_scaled(posterior_samples(m.studies, 'b_intercept'))[,1] post.evidence <- inv_logit_scaled(posterior_samples(m.studies, 'b_intercept') + posterior_samples(m.studies, 'b_conditionDescriptionPlusEvidence'))[,1] ", "Label": "Statistical Modeling", "Source": "https://osf.io/x72cy/", "File": "ExploratoryAnalyses.R" }, { "ID": 613, "Comment": "add the observed average accuracy per study and per condition", "Code": "fit$obs.accuracy <- c(aggregate(dat$guessed.correctly, list(condition = dat$condition), mean)[,'x'], aggregate(dat$guessed.correctly, list(study = dat$study, condition = dat$condition), mean)[,'x']) ", "Label": "Data Variable", "Source": "https://osf.io/x72cy/", "File": "ExploratoryAnalyses.R" }, { "ID": 614, "Comment": "Create a binary trial type variable 0 Go, 1 No Go ", "Code": "sart$trial_type <- sart$code sart$trial_type[sart$code == 3] <- NA sart$trial_type <- as.factor(sart$trial_type-1)", "Label": "Data Variable", "Source": "https://osf.io/7vbtr/", "File": "Gyurkovics_Stafford_Levita_analyses.R" }, { "ID": 615, "Comment": "Calculate Go RT coefficient of variation (CV) to index intraindividual variability", "Code": "sartsub$go_cv <- sartsub$gosd / sartsub$gomean sart <- merge(sart, sartsub[,c(1,5,6,9)], by = \"subid\") ", "Label": "Statistical Test", "Source": "https://osf.io/7vbtr/", "File": "Gyurkovics_Stafford_Levita_analyses.R" }, { "ID": 616, "Comment": "Add CSE variables to the MW probe data set", "Code": "probes <- merge(probes, cse_wide[,c(\"subid\",\"cse\",\"ce\",\"csez\",\"cez\")], by = \"subid\") ", "Label": "Data Variable", "Source": "https://osf.io/7vbtr/", "File": "Gyurkovics_Stafford_Levita_analyses.R" }, { "ID": 617, "Comment": " read in, display, and summarize 'csv' data ", "Code": "data_raw <- read.table(\"osylv_spaceuse_raw.csv\", header = T, sep =\",\") head(data_raw) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "osylv_sitecomparison.R" }, { "ID": 618, "Comment": "Add collumns with delta distance and delta time", "Code": "tracks <- tracks %>% group_by(id) %>% mutate(time_diff = difftime(dt, lag(dt, n = 1L), units = \"min\"), delta_x = x_utm - lag(x_utm, n = 1L), delta_y = y_utm - lag(y_utm, n = 1L), dist = sqrt(delta_x^2+delta_y^2)) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "osylv_sitecomparison.R" }, { "ID": 619, "Comment": "boxplot of maximum dist per id", "Code": "maxdist_plot <- ggplot(daily_perid, aes(x= sex, y=max_dist_perid, fill = sex)) + theme_bw(20) + geom_boxplot(aes(fill = sex), outlier.shape = NA) + labs(y = \"Longest movement (m)\") + geom_jitter(aes(fill=sex), position=position_jitterdodge(0.2), shape = 21, size = 4, alpha=0.4) + facet_wrap(~site, labeller = labeller( site=c(can =\"Natural site\", oto = \"Enclosures\"))) + scale_x_discrete(labels= c(\"F\", \"M\")) + theme(legend.position= \"none\", axis.title.x = element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_text(color = \"black\", size = 18), aspect.ratio = 3) + ylim(0, 25) # excludes one male point at >40m from Canande maxdist_plot ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "osylv_sitecomparison.R" }, { "ID": 620, "Comment": "centering a variable to a range from 0.5 to 0.5", "Code": "center <- function(x) { minimum <- min(x) maximum <- max(x) return((x - (minimum + maximum)/2)/(maximum - minimum)) }", "Label": "Data Variable", "Source": "https://osf.io/9qbwv/", "File": "helper.R" }, { "ID": 621, "Comment": "keep only data from 2003 onwards (because fid started in 2003)", "Code": "survival <- subset(survival, year >2002) ", "Label": "Data Variable", "Source": "https://osf.io/3wy58/", "File": "bivariate_model_winter_revision.R" }, { "ID": 622, "Comment": " identify influenial points (e.g., cooksD > 6/n) ", "Code": "influential_obs <- as.numeric(names(cooksD)[(cooksD > (6/n))])", "Label": "Statistical Modeling", "Source": "https://osf.io/3bpn6/", "File": "all_navigation_models.R" }, { "ID": 623, "Comment": "function serves to plot all important information what is plotted can be modified by whatplot argument first predict value line second confidence interval third points fourth axis fifth summary of the end point sixth corresponding line", "Code": "nice.plot<-function(X=describe$generation,Y,maxY,minY=0,col,whatplot=c(T,T,T,T,T,T),ofset=0,name=\"\",labs=c(minY,ifelse(explarge==T,signif(maxY,nsignif),maxY)),span=0.18,end=length(X),roundrep=0,nsignif=3,percentual=F,explarge=F,labrep=ifelse(explarge==T,signif((ci[,1][newend]*(maxY-minY)+minY),nsignif),ifelse(percentual==T,paste(round((ci[,1][newend]*(maxY-minY)+minY)*100,roundrep),\"%\",sep=\"\"),round((ci[,1][newend]*(maxY-minY)+minY),roundrep))),cextext=1,lwdmain=2,mtextcex=1,firstax=-0.3,smooth=F){ col.li<-col col.ci<-paste(col,\"20\",sep=\"\") col.po<-paste(col,\"33\",sep=\"\") Y<-(Y-minY)/(maxY-minY) if(whatplot[3]==T){ points(X[1:end],Y[1:end],col=col.po,pch=16) } tryCatch({ lo <- loess(Y~X,degree=1,span=span) newX <- seq(min(X),max(X), length.out=1000) ci = cbind( predict(lo, data.frame(X=newX)), predict(lo, data.frame(X=newX))+ predict(lo, data.frame(X=newX), se=TRUE)$se.fit*qnorm(1-.05/2), predict(lo, data.frame(X=newX))- predict(lo, data.frame(X=newX), se=TRUE)$se.fit*qnorm(1-.05/2) ) }, error = function(e){ print(\"warning: loess returned error, original values instead of loess smoothed curved are visualized\") newX <- X ci = cbind(Y,Y,Y) }) newend<-which.min(abs(newX-end)) if(smooth==F){ newY <- approx(Y,n=1000)$y ci <- cbind(newY,newY,newY) } if(whatplot[1]==T){ lines(newX[1:newend], ci[,1][1:newend], lwd=lwdmain,col=col.li) } if(whatplot[2]==T){ if(smooth==T){ polygon(c(newX[1:newend],rev(newX[1:newend])), c(ci[,2][1:newend],rev(ci[,3][1:newend])), col=col.ci,border=NA) } } if(whatplot[4]==T){ par(mgp=c(3,ofset+firstax,ofset+firstax-0.2)) axis(2,at=c(0,1),lab=c(\"\",\"\"),col=col.li,col.axis=col.li,tck=-0.008) mtext(name,2,line=ofset+firstax,col=col.li,cex=mtextcex) mtext(labs,at=c(0,1),2,line=ofset+firstax,col=col.li,cex=mtextcex) } xtext<-par(\"usr\")[2]-(par(\"usr\")[2]-par(\"usr\")[1])/50 if(whatplot[5]==T){ text(xtext,ci[,1][newend],labrep,col=col.li,xpd=T,pos=4,cex=cextext) } if(whatplot[6]==T){ lines(c(0,xtext),rep(ci[,1][newend],2),col=col.li,lty=3,xpd=T) } } ", "Label": "Visualization", "Source": "https://osf.io/pvyhe/", "File": "visualize_niceplot.R" }, { "ID": 624, "Comment": "We draw a lollipop diagram to illustrate color distribution", "Code": "par(ps = PS, mar = c(5, 5, 4, 2) + 0.1) plot(x = graphData$colour, y = graphData$Beach, type = \"p\", col = col_beach, ylim = c(0, 65), xlab = \"Colour\", ylab = \"Proportion [%]\", pch = PCH, xaxt = \"n\", cex = SS) axis(1, at = seq(1, 6, by = 1), labels = c(\"White\", \"Beige\", \"Grey\", \"Yellow\", \"Red\", \"Black\")) par(ps = PS, new = TRUE, mar = c(5, 5, 4, 2) + 0.1) plot(x = graphData$colour, y = graphData$Wreck, type = \"p\", col = col_wreck, ylim = c(0, 65), axes = FALSE, xlab = \"\", ylab = \"\", pch = PCH, cex = SS) arrows(x0 = graphData$colour, x1 = graphData$colour, y0 = graphData$Beach, y1 = graphData$Wreck, angle = 90, length = 0, ) legend(\"topright\", legend = c(\"Beach\", \"Wreck\"), fill = c(col_beach, col_wreck), bty = \"n\") ", "Label": "Visualization", "Source": "https://osf.io/9jxzs/", "File": "03_analysis_color.R" }, { "ID": 625, "Comment": "Extract model statistics Extract coefficients and std errors", "Code": "model_coefs <- glmm_list %>% plyr::ldply(.fun = tidy) %>% subset(group == 'fixed') %>% droplevels", "Label": "Statistical Modeling", "Source": "https://osf.io/x8vyw/", "File": "03_glmm_analysis.R" }, { "ID": 626, "Comment": "Regression test for funnel plot asymmetry", "Code": "res <- rma(Z_value,Z_var, data=data1, measure = \"ZCOR\", method = \"ML\") regtest(res, predictor = \"vi\") ", "Label": "Visualization", "Source": "https://osf.io/kpe75/", "File": "analysis.R" }, { "ID": 627, "Comment": "build a new matrix that includes the formatted correlations and their significance stars", "Code": "Rnew <- matrix(Rformatted, ncol = ncol(x)) rownames(Rnew) <- colnames(x) colnames(Rnew) <- paste(colnames(x), \"\", sep =\" \") ", "Label": "Data Variable", "Source": "https://osf.io/6jmke/", "File": "custom_functions.R" }, { "ID": 628, "Comment": "CLEAR ENVIRONMENT function to clear the environment after running a script while keeping important objects use clear_environment: add store_to_keep variable with list variables;; tip: include a variable with curr_env", "Code": "clear_environment <- function(keep, # vector with variable-names to be excluded from cleaning all=ls(envir=pos.to.env(1))){ rm(list = all[! all %in% keep], envir = pos.to.env(1)) } ", "Label": "Data Variable", "Source": "https://osf.io/6jmke/", "File": "custom_functions.R" }, { "ID": 629, "Comment": "boxplot visualizing the differences between the three languages", "Code": "par(mfrow = c(1,1)) boxplot(ReadRate ~ Language, data = joint.read_rate, main = \"Reading rate, words/minute\") ", "Label": "Visualization", "Source": "https://osf.io/ex9fj/", "File": "RCode_Final_24Feb20.R" }, { "ID": 630, "Comment": "zscore RSES T1 and T2", "Code": "df2$RSES1Z <- (df2$RSES1 - mean(df2$RSES1))/sd(df2$RSES1) df2$RSES2Z <- (df2$RSES2 - mean(df2$RSES2, na.rm = TRUE))/sd(df2$RSES2, na.rm = TRUE) ", "Label": "Statistical Test", "Source": "https://osf.io/9jzfr/", "File": "20180513Study3Analysis.R" }, { "ID": 631, "Comment": " ANALYSES Does Att Valence differ across Sections, OA Practice, and Level? ", "Code": "mod_attvalence <- lmer(Att~Level*Section*OAPractice+(1|Responder_ID), df_attvalence) summary(mod_attvalence) anova(mod_attvalence) eta_sq(mod_attvalence, partial = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/qbgct/", "File": "code_analyses.R" }, { "ID": 632, "Comment": "Create variable Past_Future from Item.", "Code": "df_use$Past_Future <- recode(df_use$Item, P1Use01 = \"In the past 12 months\",P1Use02 = \"In the following 12 months\", P2Use01 = \"In the past 12 months\",P2Use02 = \"In the following 12 months\", P3Use01 = \"In the past 12 months\",P3Use02 = \"In the following 12 months\", P4Use01 = \"In the past 12 months\",P4Use02 = \"In the following 12 months\", P5Use01 = \"In the past 12 months\",P5Use02 = \"In the following 12 months\") ", "Label": "Data Variable", "Source": "https://osf.io/qbgct/", "File": "code_analyses.R" }, { "ID": 633, "Comment": "Calculate average level of Att across Perspectives.", "Code": "df_use_attvalence$Att <- rowMeans(df_use_attvalence[,c('Att_DailyLife', 'Att_ResearchField', 'Att_PublicSociety')], na.rm=TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/qbgct/", "File": "code_analyses.R" }, { "ID": 634, "Comment": " Formula and family specification In our case, setting `k 4` and `bs 'cr'` in the mgcv::s() smooth term is not overly restrictive (checked by running the frequentist mgcv::gam() function for each existing activity index in turn, thereby varying arguments `k` and `bs`, and then checking the summary() of the resulting fits), but in a future (prospective) study, the mgcv::s() settings need to be checked again: ", "Code": "C_formula <- as.formula(paste(voutc, \"~\", paste0(\"s(\", vactidx_i, \", k = 4, bs = 'cr')\"))) C_family <- cumulative(link = \"logit\", link_disc = \"log\", threshold = \"flexible\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/emwgp/", "File": "comparison_smooth.R" }, { "ID": 635, "Comment": "display relationships between WVSES and associated variables", "Code": "ggplot(df11, aes(WVSES, SE)) + geom_jitter() ggplot(df11, aes(WVSES, SISE)) + geom_jitter() ggplot(df11, aes(WVSES, ANX)) + geom_jitter() ggplot(df11, aes(WVSES, AVO)) + geom_jitter() ggplot(df11, aes(WVSES, LIFE)) + geom_jitter() ggplot(df11, aes(WVSES, EXTRA)) + geom_jitter() ggplot(df11, aes(WVSES, AGREE)) + geom_jitter() ggplot(df11, aes(WVSES, CON)) + geom_jitter() ggplot(df11, aes(WVSES, NEUR)) + geom_jitter() ggplot(df11, aes(WVSES, OPEN)) + geom_jitter() ggplot(df11, aes(WVSES, SSTA)) + geom_jitter() ggplot(df11, aes(WVSES, SINC)) + geom_jitter() ggplot(df11, aes(WVSES, MDS)) + geom_jitter() ", "Label": "Visualization", "Source": "https://osf.io/9jzfr/", "File": "20180714Study1analysisscript.R" }, { "ID": 636, "Comment": "take RSES and run confirmatory factor analysis", "Code": "RSESdata <- data.frame(df11$SE1, df11$SE2, df11$SE3, df11$SE4, df11$SE5, df11$SE6, df11$SE7, df11$SE8, df11$SE9, df11$SE10) singleF.model <- 'SE =~ df11.SE1 + df11.SE2 + df11.SE3 + df11.SE4 + df11.SE5 + df11.SE6 + df11.SE7 + df11.SE8 + df11.SE9 + df11.SE10' ", "Label": "Statistical Modeling", "Source": "https://osf.io/9jzfr/", "File": "20180714Study1analysisscript.R" }, { "ID": 637, "Comment": " pivot authorlevel data to \"longer\" authorarticlelevel dataframe ", "Code": "article_author_level <- author_level %>% dplyr::select(FirstName_MI_LastName_str, full_name.kap, Article.1.Code.kap, Article.2.Code.kap, Article.3.Code.kap, Article.4.Code.kap, Article.5.Code.kap, Article.6.Code.kap, Article.7.Code.kap, Article.8.Code.kap, Article.9.Code.kap, Article.10.Code.kap, Gender.apsa, R_E.apsa, PhD.Year, PhD.Institution) %>% pivot_longer(cols = -c(FirstName_MI_LastName_str, full_name.kap, Gender.apsa, R_E.apsa, PhD.Year, PhD.Institution), values_to = \"article_title\", names_to = \"names\") %>% subset(article_title != \"NA\") %>% relocate(article_title) ", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "mmcpsr_author_data.R" }, { "ID": 638, "Comment": "authorlevel count and proportion for all race / ethnic identity categories", "Code": "summary_race_ethnicity_author <- article_author_level %>% dplyr::select(-c(article_title, names)) %>% distinct() %>% summarize(count = c(sum(white, na.rm = TRUE), sum(black, na.rm = TRUE), sum(east_asian, na.rm = TRUE), sum(south_asian, na.rm = TRUE), sum(latino, na.rm = TRUE), sum(mena, na.rm = TRUE), sum(native, na.rm = TRUE), sum(pacific, na.rm = TRUE), sum(other, na.rm = TRUE))) summary_race_ethnicity_author <- summary_race_ethnicity_author %>% mutate(race_ethnicity = c(\"White\", \"Black\", \"East Asian\", \"South Asian\", \"Hispanic or Latino\", \"Middle Eastern / Arab\", \"Native\", \"Pacific Islander\", \"Other\")) %>% mutate(proportion = round(count / sum(count), 2)) %>% dplyr::select(race_ethnicity, count, proportion) ", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "mmcpsr_author_data.R" }, { "ID": 639, "Comment": "Convert `candidate_page` to a factor. (Facilitates data visualizations and exploration).", "Code": "elxn2015$candidate_page <- factor(elxn2015$candidate_page, levels = c(\"Harper\", \"Trudeau\", \"Mulcair\")) ", "Label": "Visualization", "Source": "https://osf.io/3fnjq/", "File": "facebook_pages.R" }, { "ID": 640, "Comment": " Timestamps are initially encoded as factors. Convert them to POSIXct format to avoid errors. Dates and times are separated and saved as two new variables;; namely, `date_published` and `time_published`. `date_published` must be converted back to POSIXct format. `Time_published` is left as a character string as this format proves easier to work with when plotting timerelated data. ", "Code": "elxn2015$created_time <- anytime(elxn2015$created_time, tz = \"America/Los_Angeles\") elxn2015$date_published <- as.POSIXct(format(elxn2015$created_time, \"%Y-%m-%d\")) elxn2015$time_published <- format(elxn2015$created_time, \"%H:%M:%S\") ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_pages.R" }, { "ID": 641, "Comment": "Create a new variable from `date_published` to indicate the month each post was published.", "Code": "elxn2015$month_published <- as.POSIXct(elxn2015$date_published, format=\"%H:%M:%S\") elxn2015$month_published <- format(elxn2015$month_published, \"%B\") ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_pages.R" }, { "ID": 642, "Comment": "`month_published` is converted to a factor with three levels such that values are chronologically ordered.", "Code": "elxn2015$month_published <- factor(elxn2015$month_published, levels = c(\"August\", \"September\",\"October\")) ", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_pages.R" }, { "ID": 643, "Comment": "Ensure consistent data types across data sets. IDs are appropriately treated as characters rather than numeric.", "Code": "Facebook_Pages$id <- as.character(Facebook_Pages$id) Facebook_Pages$likes_count <- as.double(Facebook_Pages$likes_count) Facebook_Pages$comments_count <- as.double(Facebook_Pages$comments_count) Facebook_Pages$shares_count <- as.double(Facebook_Pages$shares_count)", "Label": "Data Variable", "Source": "https://osf.io/3fnjq/", "File": "facebook_pages.R" }, { "ID": 644, "Comment": "run random and fixed effects analyses on the data and calculate confidence intervals for the variables corresponding to heterogeneity (I2, H2 and T2). slab contains a vector of study names to display on the plot. ", "Code": "res <- rma(measure = \"MD\", m1i = m2,m2i = m1, sd1i = sd2, sd2i = sd1, n1i = n2, n2i = n1, weights = as.numeric(weights.r), slab = c(\"Mc1\", \"Mc2\", \"Mi1\", \"Mi2\", \"Mi3\", \"Mi4\", \"Mi5\", \"Mi6\", \"Mi7\", \"Mi8\" ), method = \"DL\") res.f <- rma(measure = \"MD\", m1i = m2,m2i = m1, sd1i = sd2, sd2i = sd1, n1i = n2, n2i = n1, weights = weights.f, method = \"FE\") con <- confint(res) ", "Label": "Statistical Test", "Source": "https://osf.io/gwn4y/", "File": "ESCI_forest_plot.R" }, { "ID": 645, "Comment": "2. Transform precision into partial correlations for interpretation", "Code": "pr2pc <- function(K) { D.Prec = diag(diag(K)^(-.5)) R <- diag(2,dim(K)[1])-D.Prec%*%K%*%D.Prec colnames(R) <- colnames(K) rownames(R) <- rownames(K) return(R) } ", "Label": "Data Variable", "Source": "https://osf.io/xjm6z/", "File": "AuxiliaryFunctions.R" }, { "ID": 646, "Comment": "4. BDgraph extract posterior distribution for estimates", "Code": "extractposterior <- function(fit, data, method = c(\"ggm\", \"gcgm\"), not.cont){ m <- length(fit$all_graphs) k <- 30000 n <- nrow(data) p <- ncol(data) j <- 1 densities <- rep(0, k) ", "Label": "Statistical Modeling", "Source": "https://osf.io/xjm6z/", "File": "AuxiliaryFunctions.R" }, { "ID": 647, "Comment": "objects to store graph centrality measures", "Code": "degree <- matrix(0, nrow = len, ncol = p) betweenness <- matrix(0, nrow = len, ncol = p) closeness <- matrix(0, nrow = len, ncol = p) ", "Label": "Data Variable", "Source": "https://osf.io/xjm6z/", "File": "AuxiliaryFunctions.R" }, { "ID": 648, "Comment": " create token named \"twitter_token\" ", "Code": "twitter_token <- create_token( app = appname, consumer_key = key, consumer_secret = secret, access_token = access_token, access_secret = access_secret) ", "Label": "Data Variable", "Source": "https://osf.io/yc8b5/", "File": "get_tweets.R" }, { "ID": 649, "Comment": "mean/sd ratings of targets and decoys by condition", "Code": "tapply(dat.long$rating, list(dat.long$condition, dat.long$target), mean) tapply(dat.long$rating, list(dat.long$condition, dat.long$target), function(x) sd(x)/sqrt(length(x))) ", "Label": "Data Variable", "Source": "https://osf.io/eg6w5/", "File": "experiment1b_analyses.R" }, { "ID": 650, "Comment": "mean difference between candidates in each condition", "Code": "round(s[s$target == 'envy.target' & s$condition == 'Baseline', \"emmean\"] - s[s$target == 'pity.target' & s$condition == 'Baseline', \"emmean\"],3) round(s[s$target == 'envy.target' & s$condition == 'PityAD', \"emmean\"] - s[s$target == 'pity.target' & s$condition == 'PityAD', \"emmean\"],3) round(s[s$target == 'envy.target' & s$condition == 'PityC', \"emmean\"] - s[s$target == 'pity.target' & s$condition == 'PityC', \"emmean\"],3) round(s[s$target == 'envy.target' & s$condition == 'EnvyAD', \"emmean\"] - s[s$target == 'pity.target' & s$condition == 'EnvyAD', \"emmean\"],3) round(s[s$target == 'envy.target' & s$condition == 'EnvyC', \"emmean\"] - s[s$target == 'pity.target' & s$condition == 'EnvyC', \"emmean\"],3) ", "Label": "Statistical Modeling", "Source": "https://osf.io/eg6w5/", "File": "experiment1b_analyses.R" }, { "ID": 651, "Comment": "regression for difference between social/nonsocial", "Code": "b = glmer(choice ~ pchoice1*social + opchoice2*social + opchoice1 + pchoice2 + (1 + pchoice1 + opchoice2 + opchoice1 + pchoice2 |code),family = 'binomial',data[data$opponent == 1,] ,control = glmerControl(optimizer=\"bobyqa\", tolPwrss = 1e-10, optCtrl = list(maxfun = 60000))) b = glmer(choice ~ pchoice1*social + opchoice2*social + opchoice1 + pchoice2 + (1 + pchoice1 + opchoice2 + opchoice1 + pchoice2 |code),family = 'binomial',data[data$opponent == 0,] ,control = glmerControl(optimizer=\"bobyqa\", tolPwrss = 1e-10, optCtrl = list(maxfun = 60000))) ", "Label": "Statistical Modeling", "Source": "https://osf.io/2yq8a/", "File": "figure2.R" }, { "ID": 652, "Comment": "Principal component analysis with correlation matrix", "Code": "pca <- prcomp(d[, -whi], scale. = T) ", "Label": "Statistical Modeling", "Source": "https://osf.io/8fzns/", "File": "2_Conditioning_Recode.R" }, { "ID": 653, "Comment": "Make stacked figure of costs by age", "Code": "stack <- lapply(pe.list.hadza_male, colMeans, na.rm=T) stacks <- as.data.frame(do.call(rbind, stack)) names(stacks) <- c(male.acts, \"climbing\") stacks$age <- age_seq stacks$Processing <- stacks$light_processing + stacks$other_out_of_camp + stacks$pounding_baobab stack_plot_male <- stacks %>% select(-nonsubsistence, -resting, -light_processing, -other_out_of_camp, -pounding_baobab) %>% gather(key=\"act\", value=\"cost\", carry_firewood_water:climbing, Processing) male_stack_hadza <- ggplot(stack_plot_male, aes(x=age, y=cost, fill=act)) + geom_area() + scale_fill_manual(name=\"Activity\", labels=c(\"Firewood and water collection\", \"Chopping\", \"Climbing\", \"Eating\", \"Manufacture\", \"Food processing\", \"Walking (during foraging)\"), values = brewer.pal(7, \"Set1\")) + labs(x=\"Age (years)\", y=\"Energy cost (kcal/day)\") + theme_classic(base_size=16) + ggtitle(\"Hadza: Men\") + lims(y=c(0, 750)) stack <- lapply(pe.list.hadza_female, colMeans, na.rm=T) stacks <- as.data.frame(do.call(rbind, stack)) names(stacks) <- female.acts stacks$age <- age_seq stacks$Processing <- stacks$light_processing + stacks$other_out_of_camp + stacks$pounding_baobab stack_plot_female <- stacks %>% select(-nonsubsistence, -resting, -light_processing, -other_out_of_camp, -pounding_baobab) %>% gather(key=\"act\", value=\"cost\", carry_firewood_water:digging, Processing) fhcols <- brewer.pal(8, \"Set1\")[c(1,2,4,5,6,7)] fhcols[2] <- \"#33CCFF\" female_stack_hadza <- ggplot(stack_plot_female, aes(x=age, y=cost, fill=act)) + geom_area() + scale_fill_manual(name=\"Activity\", labels=c(\"Firewood and water collection\", \"Digging\", \"Eating\", \"Manufacture\", \"Food processing\", \"Walking (during foraging)\"), values = fhcols) + labs(x=\"Age (years)\", y=\"Energy cost (kcal/day)\") + theme_classic(base_size=16) + ggtitle(\"Hadza: Women\") + lims(y=c(0, 750)) ", "Label": "Visualization", "Source": "https://osf.io/92e6c/", "File": "hadza_combine_data.R" }, { "ID": 654, "Comment": "create centered age^2 variable for quadratic LGCM", "Code": "qscale <- 100 rndhrs_subset <- rndhrs_subset %>% mutate(CAGE2_T4 = CAGE_T4^2 / qscale, CAGE2_T5 = CAGE_T5^2 / qscale, CAGE2_T6 = CAGE_T6^2 / qscale, CAGE2_T7 = CAGE_T7^2 / qscale, CAGE2_T8 = CAGE_T8^2 / qscale, CAGE2_T9 = CAGE_T9^2 / qscale, CAGE2_T10 = CAGE_T10^2 / qscale, CAGE2_T11 = CAGE_T11^2 / qscale ) ", "Label": "Statistical Modeling", "Source": "https://osf.io/3uyjt/", "File": "preprocessing_hrs.R" }, { "ID": 655, "Comment": "put.pvalue Function to put the pvalues accoridng to the journals guidelines", "Code": "put.pvalue=function(p){ sapply(p, function(x){ if (is.na(x)){pc=\"NA\"} else if (x<0.0001){pc=\"<0.0001\"} else if (0.0001<=x & x<0.001){pc <- sprintf(\"%.4f\", x)} else if (0.049<=x & x<0.05) {pc <- sprintf(\"%.3f\", floor(x*1000)/1000)} else if (0.001<=x & x<0.1){pc <- sprintf(\"%.3f\", x)} else if (0.1<=x){pc <- sprintf(\"%.2f\", x)} return(pc) }) } ", "Label": "Data Variable", "Source": "https://osf.io/dr8gy/", "File": "RCS_plot_LogisticCox.R" }, { "ID": 656, "Comment": "wald.test Function to perform Wald test in logistic or Cox model", "Code": "wald.test <- function(model, ntest){ d <- length(ntest) a <- t(coefficients(model)[ntest]) mat <- vcov(model)[ntest,ntest] value <- as.numeric(a%*%solve(mat)%*%t(a)) pvalue <- pchisq(value, df=d, lower.tail=F) return(list(value=value, df=d, pvalue=pvalue)) } ", "Label": "Statistical Test", "Source": "https://osf.io/dr8gy/", "File": "RCS_plot_LogisticCox.R" }, { "ID": 657, "Comment": "Function to change pvalues in the stat table with the ones from the model", "Code": "update_table <- function(mymod) { modsum <- summary(mymod) p <- modsum$coefficients[, \"Pr(>|t|)\"] p <- as.vector(p) p <- pvalr(p, sig.limit = .001, digits = 3) p } ", "Label": "Statistical Modeling", "Source": "https://osf.io/z6nm8/", "File": "Stats_figures_JBTxECT.R" }, { "ID": 658, "Comment": "Plot KM Curve", "Code": "tiff(\"PRAD_KM Curve.tiff\", units = \"in\", width=8, height=6, res=300) ggsurvplot(fit1, D2, risk.table=TRUE, palette = \"jco\", pval=TRUE, conf.int=FALSE, surv.median.line=\"hv\", ylab= \"Survival Probability (%)\", xlab= \"Time (months)\", legend.title = \"TX GROUP\", pval.coord=c(10,0.25)) dev.off() ", "Label": "Visualization", "Source": "https://osf.io/srbcf/", "File": "R_KMCurveCode.R" }, { "ID": 659, "Comment": " now iterate through each participant, take that PP's data frame and add z scores for RT afterwards, add that PP's data frame to the empty data frame to recreate the main df including z scores ", "Code": "for (aPP in unique(excl2Exp$PP)){ dat <- excl2Exp[excl2Exp$PP == aPP,] dat$zScore <- scale(dat$RespRT, scale = TRUE) excl2Exp2 <- rbind(excl2Exp2, dat) } ", "Label": "Data Variable", "Source": "https://osf.io/g8kbu/", "File": "dataAnalysisRewardAppsExperiment.R" }, { "ID": 660, "Comment": "ttest on subset 1 (n6/sex/group)", "Code": "p_6m6f = t.test(measure ~ group, data=simdata6each_sub1)$p.value #using 6 each sex, t-tests ", "Label": "Statistical Test", "Source": "https://osf.io/6q73b/", "File": "SubgroupStatsSimulationV5.R" }, { "ID": 661, "Comment": "box and whisker plot for refined factor scores convert dataframe of factor scores and cluster membership to long form", "Code": "KClusterScores.long <- KClusterScores %>% pivot_longer(c(Sensory, CognitiveDemand, ThreatToSelf, CrossSettings, Safety, States), names_to = c(\"Factor\")) KClusterScores.long$Factor <- factor(KClusterScores.long$Factor, levels = c(\"Sensory\", \"CognitiveDemand\", \"ThreatToSelf\", \"CrossSettings\", \"Safety\", \"States\")) ", "Label": "Visualization", "Source": "https://osf.io/2j47e/", "File": "Figures.R" }, { "ID": 662, "Comment": "box and whisker plot for nonrefined factor (scale) scores convert dataframe of factor scores and cluster membership to long form", "Code": "scale.KClusterScores.long <- scale.KClusterScores %>% pivot_longer(c(Sensory, CognitiveDemand, ThreatToSelf, CrossSettings, Safety, States), names_to = c(\"Factor\")) scale.KClusterScores.long$Factor <- factor(scale.KClusterScores.long$Factor, levels = c(\"Sensory\", \"CognitiveDemand\", \"ThreatToSelf\", \"CrossSettings\", \"Safety\", \"States\")) ", "Label": "Visualization", "Source": "https://osf.io/2j47e/", "File": "Figures.R" }, { "ID": 663, "Comment": " calculate logrank Zstatistics assuming balanced risk set (in OE, E is always 0.5) ", "Code": "zs <- sapply(1:(CureVobs1 + CureVobs0), function(n) (sum(CureVobs1s[1:n]) - n/2)/sqrt(n/4)) ", "Label": "Statistical Test", "Source": "https://osf.io/d9jny/", "File": "Ter Schure (2021) R code ALL-IN meta-analysis paper.R" }, { "ID": 664, "Comment": "Statistics check distributions check normal distribution of PSE's", "Code": "qqnorm(AllPSEs$Tennis.thre) shapiro.test(AllPSEs$Tennis.thre) # p < .01, qqnorm(AllPSEs$SoccerD.thre) shapiro.test(AllPSEs$SoccerD.thre) # p = .14, qqnorm(AllPSEs$SoccerI.thre) shapiro.test(AllPSEs$SoccerI.thre) # p < .01 qqnorm(AllPSEs$Coin.thre) shapiro.test(AllPSEs$Coin.thre) # p = .04 ", "Label": "Statistical Test", "Source": "https://osf.io/8fw3b/", "File": "QPsyAnalyse.R" }, { "ID": 665, "Comment": "TEST 1: wilcoxon's signed rank test for direct/ indirect trials", "Code": "(TestID <- wilcox.test(EffectDirect, EffectIndirect, paired = TRUE)) ", "Label": "Statistical Test", "Source": "https://osf.io/8fw3b/", "File": "QPsyAnalyse.R" }, { "ID": 666, "Comment": " add a column with \"time_interval\" : either 9:00;;13:00;;17:00;;21:00 for this, check the $created column and depending on which interval it is in, create a new column with mutate ", "Code": "df_ESM_data_analysis = df_ESM_data_analysis %>% mutate( time_interval = case_when( format(created,\"%H:%M\") >= param_timepoints[1] & format(created,\"%H:%M\") < param_timepoints[2] ~ param_timepoints[1], format(created,\"%H:%M\") >= param_timepoints[2] & format(created,\"%H:%M\") < param_timepoints[3] ~ param_timepoints[2], format(created,\"%H:%M\") >= param_timepoints[3] & format(created,\"%H:%M\") < param_timepoints[4] ~ param_timepoints[3], format(created,\"%H:%M\") >= param_timepoints[4] & format(created,\"%H:%M\") < param_t_end ~ param_timepoints[4], ) ) ", "Label": "Data Variable", "Source": "https://osf.io/dngyk/", "File": "ESM-RUM_R_code.R" }, { "ID": 667, "Comment": "first find the duplicates according to n_obs and time_interval", "Code": "df_ESM_data_analysis = df_ESM_data_analysis %>% arrange(session, created) %>% group_by(session, n_obs, time_interval) %>% mutate(num_dups = n(), dup_id = row_number()) %>% ungroup() %>% ", "Label": "Data Variable", "Source": "https://osf.io/dngyk/", "File": "ESM-RUM_R_code.R" }, { "ID": 668, "Comment": "plot perseverance as a function of n_obs for each subject", "Code": "gdat <- groupedData(perseverance ~ n_obs | session, data=df_ESM_data_analysis_noNAnoOutliers) plot(gdat) ", "Label": "Visualization", "Source": "https://osf.io/dngyk/", "File": "ESM-RUM_R_code.R" }, { "ID": 669, "Comment": "put alpha together with descriptive statistics", "Code": "questionnaire_stats <- bind_cols(questionnaires_stats, scores_alpha) %>% dplyr::select(-name, -vars) ", "Label": "Data Variable", "Source": "https://osf.io/dngyk/", "File": "ESM-RUM_R_code.R" }, { "ID": 670, "Comment": " Correlation plot (colors on top, numbers on bottom) JUST MEANS (aggregated within person) ", "Code": "corrplot(means, p.mat = res2$p, insig = \"label_sig\", sig.level = c(.001, .01, .05), pch.cex = .9, pch.col = \"gray\", method = \"color\", tl.col = \"black\", tl.cex = 1, cl.offset = 0.25, cl.cex = 0.75) corrplot(means, method=\"number\", col = \"black\", add = TRUE, cl.pos = \"n\", tl.pos = \"n\", type = \"lower\", number.cex = .60, number.digits = 2, tl.col = \"black\", cl.offset = 4) ", "Label": "Visualization", "Source": "https://osf.io/dngyk/", "File": "ESM-RUM_R_code.R" }, { "ID": 671, "Comment": "Independence test: two ways (HSIC or Nonlinear corr) HSIC", "Code": "if (independence == \"hsic\") { hsic.p <- dhsic.test(ry, rx, method = \"gamma\", kernel = \"gaussian\")$p.value pval[i] <- hsic.p } ", "Label": "Statistical Test", "Source": "https://osf.io/qwr69/", "File": "nGFS.R" }, { "ID": 672, "Comment": "Correlation table (only with complete cases so takes out data from single parent)", "Code": "means <- cor(mean_data_mat[,c(2:12)], use = \"complete.obs\") # Correlation matrix res2 <- cor.mtest(mean_data_mat[,c(2:12)], conf.level = .95, use = \"complete.obs\") # p-values ", "Label": "Data Variable", "Source": "https://osf.io/dngyk/", "File": "ESM-RUM_R_code.R" }, { "ID": 673, "Comment": "EXCLUDE PPN WITH RT < or > 3 SD", "Code": "max_sd_rt <- print(mean(data.rt.check$rt)+3*sd(data.rt.check$rt)) ppn.exclude.rt <- print(filter(data.rt.check, rt >= max_sd_rt)$ppn) ", "Label": "Data Variable", "Source": "https://osf.io/c8vfj/", "File": "Analysis_Imitation_and_Token.R" }, { "ID": 674, "Comment": "Calculate and plot difference between the distributions", "Code": "postDiff <- forModel2plotA$b_cfaInequality_nat - forModel2plotB$b_cfaSocial_control_nat %>% as.data.frame() ggplot(postDiff, aes(.)) + geom_histogram(bins = 100, color = \"black\", fill = \"lightgray\") quantile(postDiff, c(0.025, 0.975)) #So 95% confidence interval of the difference lies above 0 ", "Label": "Visualization", "Source": "https://osf.io/2sguf/", "File": "US attitude code.R" }, { "ID": 675, "Comment": "Exclude participants with accuracy less than 0.8", "Code": "d_lowAcc = CQ_acc %>% filter(accuracy < 0.8) d = d[!d$Subject %in% d_lowAcc$Subject,] rm(list=ls()[! ls() %in% c(\"d\")]) ", "Label": "Data Variable", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Fang_Wu.R" }, { "ID": 676, "Comment": "check the correlation between even and oddnumbered sets Pearson", "Code": "print(corr.test(person$ConditionOr_even, person$ConditionOr_odd), short = FALSE) KH_Correct(-.09) #.16 print(corr.test(person$ConditionOr_even, person$ConditionOr_odd), short = FALSE) KH_Correct(-.11) #.20 ", "Label": "Statistical Test", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Fang_Wu.R" }, { "ID": 677, "Comment": "model criticism (remove data points with abs(standardized residuals) over 2.5)", "Code": "de_trim<- de[abs(scale(resid(m_de)))<2.5,] do_trim<- do[abs(scale(resid(m_do)))<2.5,] m_de_mc = blmer(-1/Reaction.Time ~ Condition + (1+Condition|Subject) + (1+Condition|ItemNo), data = de_trim, control=lmerControl(optimizer = \"nloptwrap\", optCtrl = list(algorithm = \"NLOPT_LN_NELDERMEAD\", maxit = 2e5))) m_do_mc = blmer(-1/Reaction.Time ~ Condition + (1+Condition|Subject) + (1+Condition|ItemNo), data = do_trim, control=lmerControl(optimizer = \"nloptwrap\", optCtrl = list(algorithm = \"NLOPT_LN_NELDERMEAD\", maxit = 2e5))) ", "Label": "Statistical Modeling", "Source": "https://osf.io/cd5r8/", "File": "R_Code_Fang_Wu.R" }, { "ID": 678, "Comment": "Add new ParentIndex column in trialSummary and fill with NA", "Code": "trialSummary[,\"ParentIndex\"] <- NA ", "Label": "Data Variable", "Source": "https://osf.io/42nyv/", "File": "Eye tracking_pre-processing_R1.R" }, { "ID": 679, "Comment": "Lookup the parent row index of the stimulus string in s.data and insert it for social", "Code": "for(k in 1:length(trialSummary$Stimulus)){ trialSummary$ParentIndex[k] <- which(s.data$Stimulus == trialSummary$Stimulus[k])[1] } trialSummary$AgeGroup <- s.info[i,3] trialSummary$Sex <- s.info[i,4] trialSummary$ID <- i ", "Label": "Data Variable", "Source": "https://osf.io/42nyv/", "File": "Eye tracking_pre-processing_R1.R" }, { "ID": 680, "Comment": "Fixation Count custom mean function: mean of fixation count left and right eye (if only ONE eye contains 0.0 > no averaging) https://stackoverflow.com/questions/45998419/aggregategroupmeanswhileignoringzerosunless0istheonlyvalue", "Code": "meanSubset.FixCount = aggregate(Fixation.Count~Stimulus+AOI.Name, data=s.data, FUN=(function(x){ifelse(sum(x==0)>0 & sum(x !=0) >0, mean(x[x>0]), mean(x))})) ", "Label": "Data Variable", "Source": "https://osf.io/42nyv/", "File": "Eye tracking_pre-processing_R1.R" }, { "ID": 681, "Comment": "3) CALCULATION OF ESTIMATED MARGINAL MEANS Tests for differences in response to jargon and get data for plots", "Code": "at_jarg <- list(Jargon = c(\"Less\", \"More\"), TrvlAdv_Atten = \"Cons\", BackgrYrsOfExp = \"6-10 yrs\", BackgrDaysPerYr = \"11-20 days\", BullUseType = \"D\", cut = \"3|4\") grid_jarg <- ref_grid(clmm_underjar, mode = \"exc.prob\", at = at_jarg) (emm_jarg <- emmeans(grid_jarg, specs = pairwise ~ Jargon, by = \"BackgrAvTraining\")) plot_jarg <- summary(emm_jarg$emmeans) cont_jarg <- summary(emm_jarg$contrasts) ", "Label": "Statistical Test", "Source": "https://osf.io/aczx5/", "File": "220423_Fig06_Under_JargExpl.R" }, { "ID": 682, "Comment": " create data frame for mean acceptrate (%) per participant, per effortlevel in 'young' group ", "Code": "d.young.effort.1 <- filter(d, Group == 'Young') %>% group_by(ID, Agent, Effort) %>% summarise(m = mean(Choice)*100) ", "Label": "Data Variable", "Source": "https://osf.io/guqrm/", "File": "Reproduce_figures_prosocial_ageing.R" }, { "ID": 683, "Comment": "create data frame reflecting mean accept rate (%) per participant at each rewardlevel in 'old' group", "Code": "d.reward.old.1 <- filter(d, Group == 'Old') %>% group_by(ID, Agent, Reward) %>% summarise(m = mean(Choice)*100) ", "Label": "Data Variable", "Source": "https://osf.io/guqrm/", "File": "Reproduce_figures_prosocial_ageing.R" }, { "ID": 684, "Comment": "create data frame reflecting overall mean accept rate (%) and SE at each rewardlevel in 'old' group", "Code": "d.reward.old.2 <- d.reward.old.1 %>% group_by(Agent, Reward) %>% summarise(SE = std.error(m), M = mean(m, na.rm = T)) ", "Label": "Data Variable", "Source": "https://osf.io/guqrm/", "File": "Reproduce_figures_prosocial_ageing.R" }, { "ID": 685, "Comment": "Welch's oneway ANOVA", "Code": "anova_dehyd = df_dehyd %>% welch_anova_test(dehyd ~ cond) anova_dist_ran = df_dist_ran %>% welch_anova_test(dist ~ cond) anova_run_time = df_run_time %>% welch_anova_test(time ~ cond) anova_heat_time = df_heat_time %>% welch_anova_test(time ~ cond) anova_tcmax = df_tcmax %>% welch_anova_test(tcmax ~ cond) anova_tcmin = df_tcmin %>% welch_anova_test(tcmin ~ cond) anova_duration = df_duration %>% welch_anova_test(duration ~ cond) anova_thermal_load = df_thermal_load %>% welch_anova_test(thermal_load ~ cond) anova_heat_time = df_heat_time %>% welch_anova_test(heat_time ~ cond)", "Label": "Statistical Test", "Source": "https://osf.io/n5ahf/", "File": "Table1_analysis.R" }, { "ID": 686, "Comment": "Function to generate the true value for the Diamond Ratio", "Code": "true.ratio <- function(n1, n2, tau, delta){ v <- (n1 + n2)/(n1*n2) + delta^2/2/(n1 + n2) j <- 1 - 3/(4*(n1 + n2 - 2) - 1) v.g <- (j^2)*v w <- 1/v.g w.star <- 1/(v.g + tau^2) var.fixed <- 1/sum(w) var.random <- 1/sum(w.star) sqrt(var.random/var.fixed) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/gwn4y/", "File": "Simulations_line_plots.R" }, { "ID": 687, "Comment": "lmer model for dimorphism", "Code": "Lat.ML<-lmer(Latency.cen~Sex+scale(Mass, scale = F)+ Generation+Pop+Stage+Injured+scale(Temp, scale = F)+ scale(Time, scale = F)+JDate_dev+(1|Sire)+(1|Dam),data=Data) summary(Lat.ML) drop1(Lat.ML) OF.ML<-lmer(sqrt(OF.Dist)~Sex+scale(Mass, scale = F)+ Generation+Pop+Stage+Injured+scale(Temp, scale = F)+ scale(Time, scale = F)+JDate_dev+(1|Sire)+(1|Dam),data=Data) summary(OF.ML) drop1(OF.ML) AP.ML<-lmer(sqrt(AP.Dist)~Sex+scale(Mass, scale = F)+ Generation+Pop+Stage+Injured+scale(Temp, scale = F)+ scale(Time, scale = F)+JDate_dev+(1|Sire)+(1|Dam),data=Data) summary(AP.ML) drop1(AP.ML) Lat.ML<-lmer(Latency.Cens~Sex+Mass+Generation+Pop+Stage+Injured+Temp+ Time+JDate_dev+(1|Sire)+(1|Dam),data=Data) summary(Lat.ML) drop1(Lat.ML) OF.ML<-lmer(OF.Dist~Sex+Mass+Generation+Pop+Stage+Injured+Temp+ Time+JDate_dev+(1|Sire)+(1|Dam),data=Data) summary(OF.ML) drop1(OF.ML) AP.ML<-lmer(AP.Dist~Sex+Mass+Generation+Pop+Stage+Injured+Temp+ Time+JDate_dev+(1|Sire)+(1|Dam),data=Data) summary(AP.ML) drop1(AP.ML) ", "Label": "Statistical Modeling", "Source": "https://osf.io/pnug5/", "File": "3.Ginteger_CrossSex_Tables&Figures.R" }, { "ID": 688, "Comment": "return the lastN characters of a string", "Code": "fn$lastN<-function(string, n){ l<-nchar(string) result<-substr(string,l-n+1,l) result } ", "Label": "Data Variable", "Source": "https://osf.io/xwp2d/", "File": "generic.r" }, { "ID": 689, "Comment": "Linear mixed effects model for reproduction bias", "Code": "mod_full <- lmer(Prct_Bias ~ ContextC*(GMSI_Gen_Z + OrderC*Distort) + (1|Subject), data=dat2)", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgm5/", "File": "Exp2_tapping.R" }, { "ID": 690, "Comment": "Extra function MM to inchconversion for plot", "Code": "MMtoInch <- function(MM) { z <- MM / 25.4 return(z) }", "Label": "Visualization", "Source": "https://osf.io/3w8eg/", "File": "All_Figures.R" }, { "ID": 691, "Comment": "plot mean force (+/ SE) at each effortlevel in old group AND show individual data points", "Code": "plot.force.old.2 <- ggplot(d.force.old, aes(x = Effort, y = m)) + geom_point(aes(group = Agent, color = Agent), position = position_dodge(width = 0.3), alpha = 0.4, shape = 1, size = 0.5) + geom_errorbar(data = d.force.old.summary, aes(group = Agent, color = Agent, ymin = m - SE, ymax = m + SE), position = position_dodge(0.02), alpha = 1, width = 0.2) + geom_line(data = d.force.old.summary, aes(group = Agent, color = Agent), position = position_dodge(0.02)) + scale_x_continuous(name = 'Effort level', breaks = seq(2, 6, 1)) + scale_y_continuous(name = 'Force exerted (normalised AUC)', breaks = seq(0.0, 1.0, 0.2)) + scale_color_brewer(palette = 'Set1') + coord_cartesian(xlim = c(2, 6), ylim = c(0.30, 1.0)) + theme_classic() + theme(legend.position = c(0.9, 0.15)) + theme(axis.text=element_text(size = 11), axis.title=element_text(size = 13)) plot.force.old.2 ggsave(filename = 'plot_force_old_v2.pdf', plot.force.old.2, height = 5, width = 4) ", "Label": "Visualization", "Source": "https://osf.io/guqrm/", "File": "Reproduce_figures_prosocial_ageing.R" }, { "ID": 692, "Comment": "plot mean force (no SE) at each effortlevel in old group AND show individual data points", "Code": "plot.force.old.3 <- ggplot(d.force.old, aes(x = Effort, y = m, group = Agent, color = Agent, fill = Agent)) + geom_point(data = d.force.old, aes(group = Agent, color = Agent), position = position_dodge(width = 0.3), alpha = 0.4, shape = 1, size = 0.5) + geom_smooth(method = 'lm', alpha = 0.3, size = 0.6, se = F) + coord_cartesian(xlim = c(2, 6), ylim = c(0.35, 0.9)) + scale_x_continuous(name = 'Effort level', breaks = seq(2, 6, 1)) + scale_y_continuous(name = 'Force exerted (normalised AUC)', breaks = seq(0, 1.0, 0.2)) + scale_color_brewer(palette = 'Set1') + scale_fill_brewer(palette = 'Set1') + theme_classic() + theme(legend.position = c(0.9, 0.15)) + theme(axis.text=element_text(size = 11), axis.title=element_text(size = 13)) plot.force.old.3 ggsave(filename = 'plot_force_old_v3.pdf', plot.force.old.3, height = 5, width = 4) ", "Label": "Visualization", "Source": "https://osf.io/guqrm/", "File": "Reproduce_figures_prosocial_ageing.R" }, { "ID": 693, "Comment": "bootstrap modindices of item residual covariances", "Code": "cfa.csurf.bl.full.1.boot <- bootstrapLavaan(cfa.csurf.bl.full.1, R = 5000, FUN = function(x) {modindices(x)$mi}, verbose = TRUE) cfa.csurf.bl.full.1.boot <- data.frame(cfa.csurf.bl.full.1.boot) cfa.csurf.fu1.full.1.boot <- bootstrapLavaan(cfa.csurf.fu1.full.1, R = 5000, FUN = function(x) {modindices(x)$mi}, verbose = TRUE) cfa.csurf.fu1.full.1.boot <- data.frame(cfa.csurf.fu1.full.1.boot) cfa.csurf.bl.deponly.1.boot <- bootstrapLavaan(cfa.csurf.bl.deponly.1, R = 5000, FUN = function(x) {modindices(x)$mi}, verbose = TRUE) cfa.csurf.bl.deponly.1.boot <- data.frame(cfa.csurf.bl.deponly.1.boot) cfa.csurf.fu1.deponly.1.boot <- bootstrapLavaan(cfa.csurf.fu1.deponly.1, R = 5000, FUN = function(x) {modindices(x)$mi}, verbose = TRUE) cfa.csurf.fu1.deponly.1.boot <- data.frame(cfa.csurf.fu1.deponly.1.boot) ", "Label": "Statistical Modeling", "Source": "https://osf.io/a6tuw/", "File": "OnlinesupplementaryR-scriptc-surfsamples.R" }, { "ID": 694, "Comment": " recode played hours into numeric (if multiple numbers, choose the first one) ", "Code": "mydf$Hours <- as.numeric(stri_extract_first_regex(mydf$Hours, \"[0-9]+\")) ", "Label": "Data Variable", "Source": "https://osf.io/vnbxk/", "File": "GamingStudy_PreProcessing.R" }, { "ID": 695, "Comment": " recode streamed hours into numeric (if multiple numbers, choose the first one) ", "Code": "mydf$streams <- as.numeric(stri_extract_first_regex(mydf$streams, \"[0-9]+\")) ", "Label": "Data Variable", "Source": "https://osf.io/vnbxk/", "File": "GamingStudy_PreProcessing.R" }, { "ID": 696, "Comment": " recode narcisiim scale into numeric (if multiple numbers, choose the first one) ", "Code": "mydf$Narcissism <- as.numeric(stri_extract_first_regex(mydf$Narcissism, \"[0-9]+\")) ", "Label": "Data Variable", "Source": "https://osf.io/vnbxk/", "File": "GamingStudy_PreProcessing.R" }, { "ID": 697, "Comment": "Display and rank games by frequency", "Code": "tGames <- as.data.frame(table(data_main$Game)) View(tGames[order(tGames$Freq, decreasing = TRUE), ]) ", "Label": "Visualization", "Source": "https://osf.io/vnbxk/", "File": "GamingStudy_PreProcessing.R" }, { "ID": 698, "Comment": "Display and rank residence country by frequency", "Code": "tResidence <- as.data.frame(table(data_main$Residence)) View(tResidence[order(tResidence$Freq, decreasing = TRUE), ]) ", "Label": "Data Variable", "Source": "https://osf.io/vnbxk/", "File": "GamingStudy_PreProcessing.R" }, { "ID": 699, "Comment": "Display and rank country of birth by frequency", "Code": "tBirth <- as.data.frame(table(data_main$Birthplace)) View(tBirth[order(tBirth$Freq, decreasing = TRUE), ]) ", "Label": "Data Variable", "Source": "https://osf.io/vnbxk/", "File": "GamingStudy_PreProcessing.R" }, { "ID": 700, "Comment": "Loop to downsample data based on minimum time between points Time parameter is set in \"while(diff < time in minutes)\" ", "Code": "trajectory_dsmpl <- data.frame() ids <- unique(trajectory.df$id) for(i in ids) { traj = subset(trajectory.df, trajectory.df$id == i) for(i in 1:nrow(traj)) { diff <- difftime(traj$dt[i+1], traj$dt[i], units = \"mins\") if(is.na(diff)) {break} while(diff < 15) { traj <- traj[-(i+1),] diff <- difftime(traj$dt[i+1], traj$dt[i], units = \"mins\") if(is.na(diff)) {break} } } trajectory_dsmpl <- bind_rows(trajectory_dsmpl, traj) } trajectory_dsmpl <- trajectory_dsmpl %>% group_by(id) %>% mutate(time_diff = difftime(dt, lag(dt, n = 1L), units = \"min\"), delta_x = x_utm - lag(x_utm, n = 1L), delta_y = y_utm - lag(y_utm, n = 1L), dist = sqrt(delta_x^2+delta_y^2)) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "os_homing_dataproc.R" }, { "ID": 701, "Comment": "Clean up dataframe and create a grouping variable 50m or 200m based on translocation distance", "Code": "trajectory.df <- trajectory.df %>% ungroup %>% mutate(trans_group = ifelse(dist.y > 100, \"200m\", \"50m\")) %>% dplyr::select(id, sex, trans_group, trans_dist = dist.y, dt = dt.x, x_utm = x_utm.x, y_utm = y_utm.x, x_new, y_new, dist = dist.x, time_lag_min) ", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "os_homing_dataproc.R" }, { "ID": 702, "Comment": "Barplot of homing success with percents as title 50m", "Code": "bar_50m <- homing_variables %>% filter(trans_group == \"50m\") %>% ggplot(aes(x = sex, fill = sex, alpha = as.factor(homing_bin))) + geom_bar(position = \"fill\", color = \"black\", size = 2) + scale_fill_manual(values = c(\"#F8766D\", \"#00BFC4\")) + coord_flip() + theme(legend.position=\"none\", aspect.ratio = 0.4, axis.ticks.length = unit(.25, \"cm\"), axis.ticks.y = element_blank(), ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "os_homing_dataproc.R" }, { "ID": 703, "Comment": "Barplot of homing success with percents as title 200m", "Code": "bar_200m <- homing_variables %>% filter(trans_group == \"200m\") %>% ggplot(aes(x = sex, fill = sex, alpha = as.factor(homing_bin))) + geom_bar(position = \"fill\", color = \"black\", size = 2) + scale_fill_manual(values = c(\"#F8766D\", \"#00BFC4\")) + coord_flip() + theme(legend.position=\"none\", aspect.ratio = 0.4, axis.ticks.length = unit(.25, \"cm\"), axis.ticks.y = element_blank(), ", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "os_homing_dataproc.R" }, { "ID": 704, "Comment": " 2 Factor Model, correlated factors ", "Code": "model_2f <- 'f_a =~ parcel_a_1+parcel_a_2+parcel_a_3 # Agency f_c =~ parcel_c_1+parcel_c_2+parcel_c_3 # Communion ' fit.model_2f <- cfa(model_2f, data=data, std.lv=T) summary(fit.model_2f, fit.measures=TRUE, standardized = TRUE) # Robust model: CFI = .99, RMSEA = .02 ", "Label": "Statistical Modeling", "Source": "https://osf.io/6579b/", "File": "02_Main_Analyses.R" }, { "ID": 705, "Comment": "Factor Anaylsis with 3 factors", "Code": "factors <- fa(items, nfactors = 3)", "Label": "Statistical Modeling", "Source": "https://osf.io/74qnu/", "File": "Script_Predicting_Social_Skill_Expression.R" }, { "ID": 706, "Comment": "binomial logistic regression testing for effect of condition", "Code": "model1 <- glm(cbind(PredictTotal, PredictTrials-PredictTotal) ~ Condition, data = study1, family=binomial) summary(model1) Anova(model1, type=\"III\", test=\"Wald\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/4kmdv/", "File": "analysis code.R" }, { "ID": 707, "Comment": "binomial logistic regression testing for effects of behavior and context", "Code": "model3 <- glm(cbind(PredictMatch, NumQs-PredictMatch) ~ Behavior + Context + Behavior*Context, data = study2, family=binomial) Anova(model3, type=\"III\", test=\"Wald\") ", "Label": "Statistical Modeling", "Source": "https://osf.io/4kmdv/", "File": "analysis code.R" }, { "ID": 708, "Comment": "Function that creates a TRUE/FALSE matrix of nonbiological father availibility from two numbers that are recorded in the data FROM and TILL. The function does this by evaluating the presence of the caretaker using < and > for each interval in question.", "Code": "giveage<-function(data){ FROM<-data$Grew_up_from_nonbiol TILL<-data$Grew_up_till_nonbiol ages<-data.frame(x=rep(NA,nrow(data))) for(i in 1:15){ j<-i-1 text<-paste(\"grew\",j,i,\"<-TILL>\",j,\"&FROM<\",i,sep=\"\") eval(parse(text=text)) text<-paste(\"ages<-cbind(ages,grew\",j,i,\")\",sep=\"\") eval(parse(text=text)) } ages<-ages[,-1] return(ages) } ", "Label": "Data Variable", "Source": "https://osf.io/greqt/", "File": "functions1.R" }, { "ID": 709, "Comment": "Function that calculates differences between nonbiological fathers and partners in both groups (Nonbiological father currently present, nonbiological father currently absent) for each year in the analysis. ", "Code": "deltas<-function(diff,ages){ deltaT<-NA deltaF<-NA for(i in 1:15){ deltaT[i]<-mean(diff[ages[i]==T],na.rm=T) deltaF[i]<-mean(diff[ages[i]==F],na.rm=T) } return(rbind(deltaT,deltaF)) } ", "Label": "Data Variable", "Source": "https://osf.io/greqt/", "File": "functions1.R" }, { "ID": 710, "Comment": "Function that repeats last and first item of a vector usefull if we want to draw plots and CI all the way to the border of the plotting region.", "Code": "ad<-function(v){ return(c(v[1],v,v[length(v)])) } ", "Label": "Visualization", "Source": "https://osf.io/greqt/", "File": "functions1.R" }, { "ID": 711, "Comment": "Function that plots a text with an outline. it is equivalent to TeachingDemos shadowtext function described at: https://stackoverflow.com/questions/29303480/textlabelswithoutlineinr", "Code": "shadowtext <- function(x, y=NULL, labels, col='white', bg='black', theta= seq(pi/4, 2*pi, length.out=40), r=0.1, ... ) { xy <- xy.coords(x,y) xo <- r*strwidth('A') yo <- r*strheight('A') for (i in theta) { text( xy$x + cos(i)*xo, xy$y + sin(i)*yo, labels, col=bg, ... ) } text(xy$x, xy$y, labels, col=col, ... ) } ", "Label": "Visualization", "Source": "https://osf.io/greqt/", "File": "functions1.R" }, { "ID": 712, "Comment": "Functions coonducting logit and inverse logit transformation (p to logodds and back)", "Code": "logit<-function(x){log(x/(1-x))} inv_logit<-function(x){exp(x)/(1+exp(x))}", "Label": "Statistical Modeling", "Source": "https://osf.io/greqt/", "File": "functions1.R" }, { "ID": 713, "Comment": " read in all text files at working directory: all_data read_dir( pattern \"\\\\.txt$\", stringsAsFactors FALSE, fill TRUE, header TRUE ) look at the columns and what they contain: str(all_data) look at data range, potential outliers: peek_neat(all_data, 'rt') the same per various groupings: ' peek_neat( ' dat all_data, ' values 'rt', ' group_by 'response', ' round_to 1 ' ) ' peek_neat(all_data, 'rt', c('color', 'valence'), ' round_to 1) histogram and QQ plots for the same: peek_neat(all_data, 'rt', c('color', 'valence'), f_plot plot_neat) peek_neat(all_data, 'rt', c('color', 'valence'), f_plot ggpubr::ggqqplot) ", "Code": "filenames = list.files(pattern = \"^expsim_color_valence_.*\\\\.txt$\") # get all result file names for (file_name in enum(filenames)) { ", "Label": "Data Variable", "Source": "https://osf.io/49sq5/", "File": "example_analysis.R" }, { "ID": 714, "Comment": " look at rt data range and distribution, potential outliers ", "Code": "peek_neat( data_final, values = c( 'rt_green_negative', 'rt_red_negative', 'rt_green_positive', 'rt_red_positive' ), group_by = 'condition', f_plot = plot_neat ) ", "Label": "Data Variable", "Source": "https://osf.io/49sq5/", "File": "example_analysis.R" }, { "ID": 715, "Comment": "now ANOVA on RTs for the main question: Color/Valence/Group interaction with basic factorial plot of mean rt means (95% CI for error bars by default)", "Code": "anova_neat( data_final, values = c( 'rt_green_negative', 'rt_green_positive', 'rt_red_negative', 'rt_red_positive' ), within_ids = list( color = c('green', 'red'), valence = c('positive', 'negative') ), between_vars = 'condition', plot_means = TRUE, norm_tests = 'all', norm_plots = TRUE, var_tests = TRUE ) ", "Label": "Statistical Test", "Source": "https://osf.io/49sq5/", "File": "example_analysis.R" }, { "ID": 716, "Comment": " kmeans clustering Repeating exhaustive search for the kmeans clustering, with a plot of the best clustering for each k ", "Code": "KM2 <- kmeans.ex(PCA.Z$x, 2) KM2b <- KM2$best with(PCA.Z, { plot(x[, 1:2], pch = 20, asp = 1, col = cols[KM2b$cluster]) text(x[, 1:2], rownames(x), pos = 3) addhull(x[, 1], x[, 2], factor(KM2b$cluster), col.h = cols[1:2]) }) points(KM2b$centers[, 1], KM2b$centers[, 2], pch = 15, col = cols[1:2]) KM3 <- kmeans.ex(PCA.Z$x, 3) KM3b <- KM3$best with(PCA.Z, { plot(x[, 1:2], pch = 20, asp = 1, col = cols[KM3b$cluster]) text(x[, 1:2], rownames(x), pos = 3) addhull(x[, 1], x[, 2], factor(KM3b$cluster), col.h = cols[1:3]) }) points(KM3b$centers[, 1], KM3b$centers[, 2], pch = 15, col = cols[1:3]) KM4 <- kmeans.ex(PCA.Z$x, 4) KM4b <- KM4$best with(PCA.Z, { plot(x[, 1:2], pch = 20, asp = 1, col = cols[KM4b$cluster]) text(x[, 1:2], rownames(x), pos = 3) addhull(x[, 1], x[, 2], factor(KM4b$cluster), col.h = cols[1:4]) }) points(KM4b$centers[, 1], KM4b$centers[, 2], pch = 15, col = cols[1:4]) ", "Label": "Visualization", "Source": "https://osf.io/6ukwg/", "File": "codes_reanalysis.R" }, { "ID": 717, "Comment": "write the number of observed variables to the top", "Code": "header[1] <- paste(ifelse(Flow==FALSE , length(unique(df$Point)), length(unique(df$Point)) + 1), \" : number of observed variables\") write(header[1:(grep(\"subbasin number\",header) - 2)], file = outfile) header[1] <- paste(n + n1,\" : number of observed variables\") write(header[1:(grep(\"subbasin number\",header) - 2)], file = outfile) ", "Label": "Data Variable", "Source": "https://osf.io/5ezfk/", "File": "SWATCUPfunctions.R" }, { "ID": 718, "Comment": " \"Remove\" first word of each page, except on the first page where the title was the first word (but the title has already been removed) ", "Code": "PAST_M_21$gazedur[!duplicated(PAST_M_21$page) & PAST_M_21$page > 1] <- NaN PAST_M_21$fixdur[!duplicated(PAST_M_21$page) & PAST_M_21$page > 1] <- NaN PRES_O_21$gazedur[!duplicated(PRES_O_21$page) & PRES_O_21$page > 1] <- NaN PRES_O_21$fixdur[!duplicated(PRES_O_21$page) & PRES_O_21$page > 1] <- NaN PRES_M_21$gazedur[!duplicated(PRES_M_21$page) & PRES_M_21$page > 1] <- NaN PRES_M_21$fixdur[!duplicated(PRES_M_21$page) & PRES_M_21$page > 1] <- NaN PAST_O_21$gazedur[!duplicated(PAST_O_21$page) & PAST_O_21$page > 1] <- NaN PAST_O_21$fixdur[!duplicated(PAST_O_21$page) & PAST_O_21$page > 1] <- NaN", "Label": "Data Variable", "Source": "https://osf.io/qynhu/", "File": "subject21.R" }, { "ID": 719, "Comment": "create a vector with the rounded values (names(valM) adj_dimension)", "Code": "valM <- round(colMeans(d, na.rm = T), 2) ", "Label": "Data Variable", "Source": "https://osf.io/egpr5/", "File": "Analysisscript.R" }, { "ID": 720, "Comment": " Divide estimates, posterior sd, lower CI, and upper CI of the withinperson effects by the withinperson SDs of social interactions to obtain coefficients that are standardized with respect to the DV only ", "Code": "Values_Analysis1_Model1$est[rows] <- Values_Analysis1_Model1$est[rows] / sqrt(variances) Values_Analysis1_Model1$posterior_sd[rows] <- Values_Analysis1_Model1$posterior_sd[rows] / sqrt(variances) Values_Analysis1_Model1$lower_2.5ci[rows] <- Values_Analysis1_Model1$lower_2.5ci[rows] / sqrt(variances) Values_Analysis1_Model1$upper_2.5ci[rows] <- Values_Analysis1_Model1$upper_2.5ci[rows] / sqrt(variances) Values_Analysis1_Model1_Pers$est[rows] <- Values_Analysis1_Model1_Pers$est[rows] / sqrt(variances) Values_Analysis1_Model1_Pers$posterior_sd[rows] <- Values_Analysis1_Model1_Pers$posterior_sd[rows] / sqrt(variances) Values_Analysis1_Model1_Pers$lower_2.5ci[rows] <- Values_Analysis1_Model1_Pers$lower_2.5ci[rows] / sqrt(variances) Values_Analysis1_Model1_Pers$upper_2.5ci[rows] <- Values_Analysis1_Model1_Pers$upper_2.5ci[rows] / sqrt(variances)", "Label": "Statistical Modeling", "Source": "https://osf.io/jpxts/", "File": "Main Tables.R" }, { "ID": 721, "Comment": " Heart rate median considered as mean when mean not \"available\" ", "Code": "DATA$HR <- ifelse(is.na(DATA$\"all_mean_HR\")==T, DATA$all_median_HR, DATA$\"all_mean_HR\") ", "Label": "Data Variable", "Source": "https://osf.io/cxv5k/", "File": "data_preparation.R" }, { "ID": 722, "Comment": "Keep the removed people to plot separately", "Code": "nograph_removed <- nograph[nograph$belief_in_medicine >= 7,] graph_removed <- graph[graph$belief_in_medicine >= 7,] ", "Label": "Visualization", "Source": "https://osf.io/zh3f4/", "File": "regression analysis.R" }, { "ID": 723, "Comment": "Means and Standard Deviations of Conditions Experiencer", "Code": "exp <- data.all[ which(data.all$condition == 'experiencer'),] round(mean(exp$rating), 2) round(sd(exp$rating), 2) ", "Label": "Data Variable", "Source": "https://osf.io/9tnmv/", "File": "Exp2_OnlineExp_POST.R" }, { "ID": 724, "Comment": "Q3 create a new data set that contains the first 300 cases from the subset you have just created above", "Code": "working_data_2 <- slice(working_data_1, 1:300) ", "Label": "Data Variable", "Source": "https://osf.io/94jyp/", "File": "Ex1_ Data Wrangling_answers.R" }, { "ID": 725, "Comment": "Q5 create one single subset (based on your original dataset) where you select the variables id sex age source1 discuss flushot vacc1 and refus select cases 150 450 only change the name of the variable source1 to 'Main_Source' select particpants older than 39 years of age, and who vaccinate their own children (mandatory and reccommended vaccines) hint: %>% MAGRITTR package ", "Code": "working_data_final <- ex1_data %>% select(id, sex, age, source1, discuss, flushot, vacc1, refus) %>% rename(Main_Source=source1) %>% slice(150:450) %>% filter(age > 39 & vacc1 == 'Mandatory + all recommended') ", "Label": "Data Variable", "Source": "https://osf.io/94jyp/", "File": "Ex1_ Data Wrangling_answers.R" }, { "ID": 726, "Comment": " calculating r for each iteration, r as the mean of those five r RAQ_R and QoL ", "Code": "r_QoL <- (with(subsample_PG, cor(RAQ_Totalscore[.imp==1 & Sample==1], QoL[.imp==1 & Sample==1]))+ with(subsample_PG, cor(RAQ_Totalscore[.imp==2 & Sample==1], QoL[.imp==2 & Sample==1]))+ with(subsample_PG, cor(RAQ_Totalscore[.imp==3 & Sample==1], QoL[.imp==3 & Sample==1]))+ with(subsample_PG, cor(RAQ_Totalscore[.imp==4 & Sample==1], QoL[.imp==4 & Sample==1]))+ with(subsample_PG, cor(RAQ_Totalscore[.imp==5 & Sample==1], QoL[.imp==5 & Sample==1])))/5 p_QoL <- pt((r_QoL*sqrt(nrow(subsample_PG)/6-2)/sqrt(1-r_QoL)), nrow(subsample_PG)/6-2) ", "Label": "Data Variable", "Source": "https://osf.io/73y8p/", "File": "RAQ-R_Analyses.R" }, { "ID": 727, "Comment": "scaling RAQR Score", "Code": "reg_with_PG_CG_scaled <- with(data=as.mids(reg_PG_CG), exp=lm(scale(RAQ_Totalscore)~Sample+Gender+Age_groups+Level_of_education)) str(summary(pool(reg_with_PG_CG_scaled))) pooled_reg_PG_CG_scaled <- summary(pool(reg_with_PG_CG_scaled)) pooled_reg_PG_CG_scaled$p.value pooled_reg_PG_CG_scaled$estimate ", "Label": "Data Variable", "Source": "https://osf.io/73y8p/", "File": "RAQ-R_Analyses.R" }, { "ID": 728, "Comment": "regression for each iteration in order to check diagnostic plots look for differences between the adjusted Rsquared of the five iterations", "Code": "reg_PG_CG_1 <- reg_PG_CG[reg_PG_CG$.imp == 1, ] summary(lm(data=reg_PG_CG_1, RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) reg_with_PG_CG_1 <- with(data=reg_PG_CG_1, exp=lm(RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) plot(reg_with_PG_CG_1) reg_PG_CG_2 <- reg_PG_CG[reg_PG_CG$.imp == 2, ] summary(lm(data=reg_PG_CG_2, RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) reg_with_PG_CG_2 <- with(data=reg_PG_CG_2, exp=lm(RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) plot(reg_with_PG_CG_2) reg_PG_CG_3 <- reg_PG_CG[reg_PG_CG$.imp == 3, ] summary(lm(data=reg_PG_CG_3, RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) reg_with_PG_CG_3 <- with(data=reg_PG_CG_3, exp=lm(RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) plot(reg_with_PG_CG_3) reg_PG_CG_4 <- reg_PG_CG[reg_PG_CG$.imp == 4, ] summary(lm(data=reg_PG_CG_4, RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) reg_with_PG_CG_4 <- with(data=reg_PG_CG_4, exp=lm(RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) plot(reg_with_PG_CG_4) reg_PG_CG_5 <- reg_PG_CG[reg_PG_CG$.imp == 5, ] summary(lm(data=reg_PG_CG_5, RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) reg_with_PG_CG_5 <- with(data=reg_PG_CG_5, exp=lm(RAQ_Totalscore~Sample+Gender+Age_groups+Level_of_education)) plot(reg_with_PG_CG_5) ", "Label": "Statistical Modeling", "Source": "https://osf.io/73y8p/", "File": "RAQ-R_Analyses.R" }, { "ID": 729, "Comment": " calculating the mean of the five residual standard errors, R squared, adjusted Rsquared and F statistic ", "Code": "Residual_standard_error_reg_PG <- (10.35+10.45+10.16+10.29+10.28)/5 R_squared_reg_PG <- (0.639+0.630+0.652+0.642+0.642)/5 R_squared_adjusted_reg_PG <-(0.6267+0.6175+0.6403+0.6301+0.6298)/5 F_statistic_reg_PG <- (53.04+51.05+56.18+53.81+53.73)/5", "Label": "Statistical Test", "Source": "https://osf.io/73y8p/", "File": "RAQ-R_Analyses.R" }, { "ID": 730, "Comment": "Graph the correlation between secure attachment and visual cortex response", "Code": "ggplot(mydata, aes(SECURE.ATTACHMENT, LINGUAL.GYRUS..VISUAL.CORTEX.), scale=\"globalminmax\") + geom_smooth(method = \"lm\", fill = \"green\", alpha = 0.6)+ geom_point(size =5 ) + theme_minimal()+ theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = \"black\"))+ labs(title = \"Visual Cortex & Secure Attachment\", x = \"Secure Attachment\", y = \"Lingual Gyrus Activation\") scale_x_continuous(limits = c(1,5), breaks = c(1,2,3,4,5)) ", "Label": "Visualization", "Source": "https://osf.io/s6zeg/", "File": "Criticism-Attachment-RCode_v2.R" }, { "ID": 731, "Comment": "Graph the correlation between avoidant attachment and visual cortex response", "Code": "ggplot(mydata, aes(AVOIDANT.ATTACHMENT, LINGUAL.GYRUS..VISUAL.CORTEX.), scale=\"globalminmax\") + geom_smooth(method = \"lm\", fill = \"red\")+ geom_point(size = 5) + theme_minimal()+ theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = \"black\"))+ labs(title = \"Visual Cortex & Avoidant Attachment\", x = \"Avoidant Attachment\", y = \"Lingual Gyrus Activation\")+ scale_x_continuous(limits = c(1,5), breaks = c(1,2,3,4,5)) ", "Label": "Visualization", "Source": "https://osf.io/s6zeg/", "File": "Criticism-Attachment-RCode_v2.R" }, { "ID": 732, "Comment": " Graph the interaction plot of amygdala and visual cortex activation, with AVOIDANT attachment as the moderator. ", "Code": "p1 = interact_plot(fiti, pred = AMYGDALA, modx = AVOIDANT.ATTACHMENT,robust = FALSE, x.label = \"Amygdala\", y.label = \"LG Visual Cortex\", main.title = \"Avoidant Attachment\", legend.main = \"Avoidant Levels\", colors = \"red\",interval = TRUE, int.width = 0.8)+ theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = \"black\")) p1 ", "Label": "Visualization", "Source": "https://osf.io/s6zeg/", "File": "Criticism-Attachment-RCode_v2.R" }, { "ID": 733, "Comment": " Graph the interaction plot of amygdala and visual cortex activation, with SECURE attachment as the moderator. ", "Code": "p2 = interact_plot(fiti2, pred = AMYGDALA, modx = SECURE.ATTACHMENT,robust = FALSE, x.label = \"Amygdala\", y.label = \"LG Visual Cortex\", main.title = \"Secure Attachment\", legend.main = \"Secure Levels\", colors = \"green\",interval = TRUE, int.width = 0.8)+ theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = \"black\")) p2 ", "Label": "Visualization", "Source": "https://osf.io/s6zeg/", "File": "Criticism-Attachment-RCode_v2.R" }, { "ID": 734, "Comment": "Returns the ratio between the geometric means of two independent samples and its 95% BCa bootstrap CI.", "Code": "ratioGeomMeanCI.bootstrap <- function(group1, group2) { group1 <- log(group1) group2 <- log(group2) samplemean <- function(x, d) {return(mean(x[d]))} pointEstimate <- samplemean(group1) - samplemean(group2) set.seed(0) # make deterministic bootstrap_samples <- two.boot(sample1 = group1, sample2 = group2, FUN = samplemean, R = 5000) bootci <- boot.ci(bootstrap_samples, type = \"bca\", conf = conf.level) exp(c(pointEstimate, bootci$bca[4], bootci$bca[5])) } ", "Label": "Statistical Test", "Source": "https://osf.io/zh3f4/", "File": "CI.helpers.R" }, { "ID": 735, "Comment": "Returns the 95% confidence interval of a single proportion using the Wilson score interval.", "Code": "propCI <- function(numberOfSuccesses, sampleSize) { CI <- scoreci(x = numberOfSuccesses, n = sampleSize, conf.level = conf.level) c(numberOfSuccesses/sampleSize, CI$conf.int[1], CI$conf.int[2]) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/zh3f4/", "File": "CI.helpers.R" }, { "ID": 736, "Comment": "Returns the difference between two linear regression slopes and its 95% BCa bootstrap CI.", "Code": "diff.slopes.bootstrap <- function(x1, y1, x2, y2) { groups <- c(rep(1, length(x1)), rep(2, length(y1)), rep(3, length(x2)), rep(4, length(y2))) data <- data.frame(obs = c(x1, y1, x2, y2), group = groups) diffslope <- function(d, i) { db <- d[i,] x1 <- db[db$group==1,]$obs y1 <- db[db$group==2,]$obs x2 <- db[db$group==3,]$obs y2 <- db[db$group==4,]$obs fit1 <- lm(y1 ~ x1) a1 <- fit1$coefficients[[2]] fit2 <- lm(y2 ~ x2) a2 <- fit2$coefficients[[2]] a2 - a1 } a1 <- lm(y1 ~ x1)$coefficients[[2]] a2 <- lm(y2 ~ x2)$coefficients[[2]] pointEstimate <- a2 - a1 set.seed(0) # make deterministic bootstrap_samples <- boot(data = data, statistic = diffslope, stype = \"i\", strata = data$group, R = 5000) bootci <- boot.ci(bootstrap_samples, type = \"bca\") c(pointEstimate, bootci$bca[4], bootci$bca[5]) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/zh3f4/", "File": "CI.helpers.R" }, { "ID": 737, "Comment": "replace current subjects \"fixdata\" by fixation_info ", "Code": "if (any(S==replace_eye)){ fixdata[[S]] <- data.frame(fixation_info) colnames(fixdata[[S]]) <- c(\"Event.Start.Raw.Time..ms.\",\"Event.End.Raw.Time..ms.\",\"Event.Duration.Trial.Time..ms.\", \"Fixation.Position.X..px.\",\"Fixation.Position.Y..px.\",\"AOI.Name\",\"intrialonset\",\"trialnr\") } } ", "Label": "Data Variable", "Source": "https://osf.io/qrv2e/", "File": "DivNorm_R_EyeTrack.R" }, { "ID": 738, "Comment": "Mean Differences Comparing signed and unsigned reviews on four aspects For each, first ttest then violin plot Paneled plot created at end of series Testing word count ", "Code": "t.test(revdat$wc ~ revdat$signed) wc_sign <- ggplot(revdat, aes(signed, wc, fill=signed)) + geom_violin( trim = FALSE, draw_quantiles = c(0.25, 0.5, 0.75), alpha = 0.5) + geom_jitter( width = 0.20, height = 0, alpha = 0.5, size = 1) + xlab(\"Signed Reviews\") + ylab(\"Word Count\") ", "Label": "Statistical Test", "Source": "https://osf.io/uf63k/", "File": "MsReviewsAnalysisScript.R" }, { "ID": 739, "Comment": "Correlations Examining correlations between time and aspects of reviews For each, first compute r and then scatter plot Plots also mark whether or not review was signed Paneled plot created at end of series ", "Code": "cor.test(revdat$order, revdat$wc) wc_time <- ggplot(revdat, aes(order, wc, color=signed)) + geom_point(size=1)+ xlab(\"Time\") + ylab(\"Word Count\") cor.test(revdat$order, revdat$posemo) pos_time <- ggplot(revdat, aes(order, posemo, color=signed)) + geom_point(size=1) + xlab(\"Time\") + ylab(\"Positive Emotion Words\") cor.test(revdat$order, revdat$negemo) neg_time <- ggplot(revdat, aes(order, negemo, color=signed)) + geom_point(size=1) + xlab(\"Time\") + ylab(\"Negative Emotion Words\") cor.test(revdat$order, revdat$cogmech) cog_time <- ggplot(revdat, aes(order, cogmech, color=signed)) + geom_point(size=1) + xlab(\"Time\") + ylab(\"Cognitive Mechanism Words\") ", "Label": "Visualization", "Source": "https://osf.io/uf63k/", "File": "MsReviewsAnalysisScript.R" }, { "ID": 740, "Comment": "removes nonalphanumeric characters", "Code": "full.data <- multigsub(\"[^[:alnum]]\", \" \", full.data, fixed = TRUE) train.data <- multigsub(\"[^[:alnum]]\", \" \", train.data, fixed = TRUE) valid.data <- multigsub(\"[^[:alnum]]\", \" \", valid.data, fixed = TRUE) ", "Label": "Data Variable", "Source": "https://osf.io/tnbev/", "File": "lewis-acid-base-researchers.R" }, { "ID": 741, "Comment": "removes leading & trailing whitespaces", "Code": "full.data <- trimws(full.data) train.data <- trimws(train.data) valid.data <- trimws(valid.data)", "Label": "Data Variable", "Source": "https://osf.io/tnbev/", "File": "lewis-acid-base-researchers.R" }, { "ID": 742, "Comment": " construct a maximal glmer() model This model contains a fixed withinsubjects effect of Ambiguity (effectcoded with 0.5 amb), codes for modality effects and interactions, plus random effects by participants and items. ", "Code": "Acc.max <- glmer(Correct ~ 1 + Ambiguity.code + Modality.code1 + Modality.code2 + Interaction.code1 + Interaction.code2 + (1 + Ambiguity.code + Modality.code1 + Modality.code2 + Interaction.code1 + Interaction.code2 | Participant.Private.ID) + (1 | Item), data = Data.CohOnly, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) Acc.max <- glmer(Correct ~ 1 + Ambiguity.code + Modality.code2 + Interaction.code2 + (1 + Ambiguity.code + Modality.code2 + Interaction.code2 | Participant.Private.ID) + (1 | Item), data = Data.ListandRead, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) Acc.max <- glmer(Correct ~ 1 + Ambiguity.code + Modality.code3 + Interaction.code3 + (1 + Ambiguity.code + Modality.code3 + Interaction.code3 | Participant.Private.ID) + (1 | Item), data = Data.ListandRSVP, family = \"binomial\", control = glmerControl(optimizer =\"bobyqa\")) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m87vg/", "File": "Exp1_BehaviouralAnalyses_Code.R" }, { "ID": 743, "Comment": " construct a maximal lmer() model This model contains codes for modality effects, plus random effects by participants and items. ", "Code": "RT.AmbOnly.max <- lmer(logRT ~ 1 + Modality.code1 + Modality.code2 + (1 + Modality.code1 + Modality.code2 | Participant.Private.ID) + (1 | Item), data = Data.AmbOnly, REML=FALSE) RT.ListandRead.max <- lmer(logRT ~ 1 + Modality.code2 + (1 + Modality.code2 | Participant.Private.ID) + (1 | Item), data = Data.ListandRead, REML=FALSE) RT.ListandRSVP.max <- lmer(logRT ~ 1 + Modality.code3 + (1 + Modality.code3 | Participant.Private.ID) + (1 | Item), data = Data.ListandRSVP, REML=FALSE) RT.ReadandRSVP.max <- lmer(logRT ~ 1 + Modality.code3 + (1 + Modality.code3 | Participant.Private.ID) + (1 | Item), data = Data.ReadandRSVP, REML=FALSE) RT.UAOnly.max <- lmer(logRT ~ 1 + Modality.code1 + Modality.code2 + (1 + Modality.code1 + Modality.code2 | Participant.Private.ID) + (1 | Item), data = Data.UAOnly, REML=FALSE) RT.ListandRead.max <- lmer(logRT ~ 1 + Modality.code2 + (1 + Modality.code2 | Participant.Private.ID) + (1 | Item), data = Data.ListandRead, REML=FALSE) RT.ListandRSVP.max <- lmer(logRT ~ 1 + Modality.code3 + (1 + Modality.code3 | Participant.Private.ID) + (1 | Item), data = Data.ListandRSVP, REML=FALSE) RT.ReadandRSVP.max <- lmer(logRT ~ 1 + Modality.code3 + (1 + Modality.code3 | Participant.Private.ID) + (1 | Item), data = Data.ReadandRSVP, REML=FALSE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m87vg/", "File": "Exp1_BehaviouralAnalyses_Code.R" }, { "ID": 744, "Comment": " construct a maximal lmer() model This model contains a fixed effect for Ambiguity, plus random effects by participants and items. ", "Code": "RT.ListeningOnly.max <- lmer(logRT ~ 1 + Ambiguity.code + (1 + Ambiguity.code | Participant.Private.ID) + (1 | Item), data = Data.ListeningOnly, REML=FALSE) RT.ReadingOnly.max <- lmer(logRT ~ 1 + Ambiguity.code + (1 + Ambiguity.code | Participant.Private.ID) + (1 | Item), data = Data.ReadingOnly, REML=FALSE) RT.RSVPOnly.max <- lmer(logRT ~ 1 + Ambiguity.code + (1 + Ambiguity.code | Participant.Private.ID) + (1 | Item), data = Data.RSVPOnly, REML=FALSE) ", "Label": "Statistical Modeling", "Source": "https://osf.io/m87vg/", "File": "Exp1_BehaviouralAnalyses_Code.R" }, { "ID": 745, "Comment": " As this may be problematic for later analyses, we transform urbanity into its log: ", "Code": "dslong %<>% mutate(urbanity_log = log(urbanity))", "Label": "Data Variable", "Source": "https://osf.io/3hgpe/", "File": "01_data-preparation-variable-setup.R" }, { "ID": 746, "Comment": "calculate Bayes factors for difference using logspline fit", "Code": "prior <- dnorm(0,1) fit.posterior <- logspline(samples$BUGSoutput$sims.list$mu_alpha) posterior <- dlogspline(0, fit.posterior) # this gives the pdf at point delta = 0 prior/posterior ", "Label": "Statistical Modeling", "Source": "https://osf.io/meh5w/", "File": "multiplicationFactor_ttest.R" }, { "ID": 747, "Comment": "use normal distribution to approximate pvalue", "Code": "FullAim1aPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim1aPNcoefs$t.value))) FullAim1aPNcoefs effectsize::standardize_parameters(FullAim1aPN) FullAim1bPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim1bPNcoefs$t.value))) FullAim1bPNcoefs effectsize::standardize_parameters(FullAim1bPN) FullAim1cPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim1cPNcoefs$t.value))) FullAim1cPNcoefs effectsize::standardize_parameters(FullAim1cPN) FullAim2aPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim2aPNcoefs$t.value))) FullAim2aPNcoefs effectsize::standardize_parameters(FullAim2aPN) FullAim2bPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim2bPNcoefs$t.value))) FullAim2bPNcoefs effectsize::standardize_parameters(FullAim2bPN) FullAim3PNPPcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim3PNPPcoefs$t.value))) FullAim3PNPPcoefs effectsize::standardize_parameters(FullAim3PNPP) FullAim4aPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim4aPNcoefs$t.value))) FullAim4aPNcoefs effectsize::standardize_parameters(FullAim4aPN) FullAim4bPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim4bPNcoefs$t.value))) FullAim4bPNcoefs effectsize::standardize_parameters(FullAim4bPN) FullAim5bPosPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim5bPosPNcoefs$t.value))) FullAim5bPosPNcoefs effectsize::standardize_parameters(FullAim5bPosPN) FullAim5bNegPNcoefs$p.z <- 2 * (1 - pnorm(abs(FullAim5bNegPNcoefs$t.value))) FullAim5bNegPNcoefs effectsize::standardize_parameters(FullAim5bNegPN)", "Label": "Statistical Test", "Source": "https://osf.io/mcy6r/", "File": "BeerGogglesorLiquidCouragePPARatingAnalyses.R" }, { "ID": 748, "Comment": "this function give the integral of the survival curve given by S.hat ont the time.grid: Y.grid", "Code": "expected_survival <- function(S.hat, Y.grid) { grid.diff <- diff(c(0, Y.grid, max(Y.grid))) c(base::cbind(1, S.hat) %*% grid.diff) } threshol_list <- function(l,threshold){ l_threshold = c() for (x in (l)){ if (is.na(x)){ return(NA) } else{ if (x < - threshold){ l_threshold <- c(l_threshold,- threshold) } else{ if(x > threshold){ l_threshold <- c(l_threshold,threshold) } else{ l_threshold <- c(l_threshold, x) } } } } return(l_threshold) } ", "Label": "Statistical Modeling", "Source": "https://osf.io/dr8gy/", "File": "utils_surv.R" }, { "ID": 749, "Comment": "change data type of PHDYEAR to numeric", "Code": "author_phd_data$PHDYEAR <-as.numeric(author_phd_data$PHDYEAR)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAuthorAnalysis.R" }, { "ID": 750, "Comment": "calculate seniority of each author at the time of each article", "Code": "author_phd_data$status_article1 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.1.Year.published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article1 == 0|author_phd_data$yrs_to_article1 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article1 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article2 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.2.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article2 == 0|author_phd_data$yrs_to_article2 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article2 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article3 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.3.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article3 == 0|author_phd_data$yrs_to_article3 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article3 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article4 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.4.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article4 == 0|author_phd_data$yrs_to_article4 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article4 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article5 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.5.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article5 == 0|author_phd_data$yrs_to_article5 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article5 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article6 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.6.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article6 == 0|author_phd_data$yrs_to_article6 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article6 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article7 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.7.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article7 == 0|author_phd_data$yrs_to_article7 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article7 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article8 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.8.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article8 == 0|author_phd_data$yrs_to_article8 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article8 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article9 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.9.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article9 == 0|author_phd_data$yrs_to_article9 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article9 > 6, \"Senior Scholar\", \"NA\"))) author_phd_data$status_article10 <- ifelse(author_phd_data$PHDYEAR == 0 |author_phd_data$PHDYEAR > author_phd_data$Article.10.Year.Published, \"Grad Student\", ifelse(author_phd_data$yrs_to_article10 == 0|author_phd_data$yrs_to_article10 < 7, \"Junior Scholar\", ifelse(author_phd_data$yrs_to_article10 > 6, \"Senior Scholar\", \"NA\"))) ", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAuthorAnalysis.R" }, { "ID": 751, "Comment": "run check to see if any authors have missing data", "Code": "check <- subset(mmcpsr_authors, is.na(mmcpsr_authors$Title)) ", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAuthorAnalysis.R" }, { "ID": 752, "Comment": "stacked graph regarding how participants came to see each advocacy type", "Code": "ggplot(detected_advocacy, aes(x = Advocacy, y = Percentage, fill = Response, label = Percentage)) + geom_bar(position =\"stack\", stat=\"identity\") + coord_flip(ylim=c(0,100)) + scale_y_continuous(labels = scales::percent_format(scale = 1)) + geom_text(aes(label = Percentage), size = 3, position = position_stack(vjust = 0.5) ) + labs(y= \"Porcentaje de Participantes\", x = \"Tipo de Defensa\") + theme(text = element_text(family = \"Arial\", size = 14), panel.background = element_rect(\"white\"), panel.border = element_rect(fill = NA), panel.grid.major.x = element_line(colour = \"grey\"), axis.title.x = element_text(color = \"#39363D\", size = 14, hjust=0.5), axis.title.y = element_text(color = \"#39363D\", size = 14), legend.position=\"bottom\", legend.text = element_text(size = 12), legend.title = element_blank()) ", "Label": "Visualization", "Source": "https://osf.io/uhma8/", "File": "8RememberingAdvocacy_Spanish.R" }, { "ID": 753, "Comment": "Define adjustment to calculate Hedge's g. To calculate Cohen's d set J < 1. Remember to change the true.ratio value of J as well.", "Code": "J <- j <- 1 - 3/(4*(n + m - 2) - 1) G <- J*SMD sds <- sqrt((n + m)/(n*m) + SMD^2/2/(n + m)) V <- (J^2)*(sds^2) w <- 1/V if(method == \"REML\"){r.model <- rma(yi = G, vi = V,method = method, control=list(stepadj=0.5, maxiter=10000000000000000000000000))} else{ r.model <- rma(yi = G, vi = V,method = method) } fit.model <- rma(yi = G, vi = V, method = \"FE\") ratio <- abs(r.model$ci.ub - r.model$ci.lb)/abs(fit.model$ci.ub - fit.model$ci.lb) log.ratio <- log(ratio) w.star <- 1/(V + r.model$tau2) log.var <- ((r.model$se.tau2)^2)/4*1/(sum(w.star))^2*(sum(w.star^2))^2 log.sd <- sqrt(log.var) bias <- 1/2*(r.model$se.tau2)^2*(1/2/sum(w.star)^2 - 1/sum(w.star)*sum(w.star^3)) ", "Label": "Statistical Test", "Source": "https://osf.io/gwn4y/", "File": "Reproducible_Simulations_line_plots.R" }, { "ID": 754, "Comment": "Generate parcellated data and do CFA generate parcellated datasets must specify data and the number of allocations (nAlloc)", "Code": "list1=parcelAllocation(mod.par, data=usm, par.names, mod.items, nAlloc=2, do.fit=F, std.lv=T) ", "Label": "Data Variable", "Source": "https://osf.io/w7afh/", "File": "CFA script.R" }, { "ID": 755, "Comment": " Multifit() conducts a CFA for each data.frame in a list (saved from parcelAllocation) and returns all of the results of interest on one row per data.frame The results consist of 21 columns with fit measures (\"npar\", \"chisq\", \"df\", \"pvalue\", \"cfi\", \"tli\", \"rmsea\", \"rmsea.pvalue\", \"srmr\") followed by the latent r between scales (\"IA_NFC\", \"IA_URS\", etc.) WARNING: running each CFA takes roughly 20 seconds (Intel i58350U CPU), so running it for 200 data.frames should take 11.5 hours. ", "Code": "multifit=function(data) { rows=c(131:136) # rows indexing the parameter estimates of interest (correlations among general factors) names=c(\"npar\", \"chisq\", \"df\", \"pvalue\", \"cfi\", \"tli\", \"rmsea\", \"rmsea.pvalue\", \"srmr\", \"IA_NFC\", \"IA_IU\", \"IA_URS\", \"NFC_IU\", \"NFC_URS\", \"IU_URS\", \"IA_NFC_se\", \"IA_IU_se\", \"IA_URS_se\", \"NFC_IU_se\", \"NFC_URS_se\", \"IU_URS_se\") results=sapply(data, function(x){ output=cfa(data=x, model=mod.par, std.lv=T) # save CFA output est=as.matrix(out@ParTable[[\"est\"]][rows]) # save the parameter estimates of interest se=as.matrix(out@ParTable[[\"se\"]][rows]) fit=as.matrix(fitMeasures(output)[c(1,3,4,5,9,10,23,26,29)]) # save fit measures of interest bind=round(do.call(\"rbind\", (list(fit,est,se))),3) # bind the data }) results.t=t(results) # transpose the matrix colnames(results.t)=names # name columns results.t } ", "Label": "Statistical Modeling", "Source": "https://osf.io/w7afh/", "File": "CFA script.R" }, { "ID": 756, "Comment": "Get residuals of tie strength adjusted tie strength model_tiestrength glm(strength.mean~no.of.papers:Gender, family\"gaussian\", data new) model with strength not transformed looked a bit dodgy, so we logtransformed strength", "Code": "model_tiestrength = glm(log(strength.mean)~no.of.papers:Gender+no.of.papers, family=\"gaussian\", data = new) model_tiestrength = glm(log(strength.mean)~no.of.papers:Gender, family=\"gaussian\", data = new) hist(resid(model_tiestrength), main = \"residuals of model with log(tie strength)\") qqnorm(resid(model_tiestrength)) summary(model_tiestrength) summ(model_tiestrength) new$residuals.strength <- resid(model_tiestrength) hist(new$residuals.strength)", "Label": "Statistical Modeling", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 757, "Comment": "get number of papers (and other stats) per gender", "Code": "ddply(new, \"Gender\",summarise, mean = mean(no.of.papers, na.rm = TRUE), median = median(no.of.papers, na.rm = TRUE), sd = sd(no.of.papers, na.rm = TRUE), N = sum(!is.na(no.of.papers)), se = sd / sqrt(N), min = min(no.of.papers, na.rm = TRUE), max = max(no.of.papers, na.rm = TRUE))", "Label": "Data Variable", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 758, "Comment": "Create censoring VARIABLE PI status: 1 if PI, 0 if author did not make it to PI", "Code": "new.PI$status.PI = ifelse(!is.na(new.PI$Time.to.PI),1,0)", "Label": "Data Variable", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 759, "Comment": "Gender effect of Gender on time to PI", "Code": "flexgender<-flexsurvreg(Surv(new.PI$time.PI,new.PI$status.PI)~Gender, dist=\"lnorm\", data=new.PI) flexgender", "Label": "Data Variable", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 760, "Comment": "fit_lognormal Lowest AIC, best fit Package flexsurv provides access to additional distributions Also allows plotting options want red lines to match as close as possible to KM curve", "Code": "fit_exp<-flexsurvreg(Surv(new$n.years,new$status)~1, dist=\"exp\") fit_weibull<-flexsurvreg(Surv(new$n.years,new$status)~1, dist=\"weibull\") fit_gamma<-flexsurvreg(Surv(new$n.years,new$status)~1, dist=\"gamma\") fit_gengamma<-flexsurvreg(Surv(new$n.years,new$status)~1, dist=\"gengamma\") fit_genf<-flexsurvreg(Surv(new$n.years,new$status)~1, dist=\"genf\") fit_lognormal<-flexsurvreg(Surv(new$n.years,new$status)~1, dist=\"lnorm\") fit_gompertz<-flexsurvreg(Surv(new$n.years,new$status)~1, dist=\"gompertz\") fit_exp fit_weibull fit_gamma fit_gengamma fit_genf fit_lognormal fit_gompertz plot(fit_exp) plot(fit_weibull) plot(fit_gamma) plot(fit_gengamma) plot(fit_genf) plot(fit_lognormal) plot(fit_gompertz)", "Label": "Visualization", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 761, "Comment": "Generalized gamma distribution is not sig. better fit than lognormal distribution. Justifies using lognormal. AFT model AFT model uses survreg function from Survival package using lognormal distribution according to earlier exploration of the best distribution for the data use survreg for stepwise then flexsurvreg to generate graphs GENDER Examine gender differences first, ignoring any of the social metrics Effect of gender on career length (n.years)", "Code": "flexAFTgender<-flexsurvreg(Surv(n.years,status)~Gender,dist=\"lnorm\",data=new) flexAFTgender plot(flexAFTgender,col=c(\"blue\",\"red\"),ci=T,xlab=\"Years\",ylab=\"Survival probability\")", "Label": "Statistical Modeling", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 762, "Comment": "Reverses the factor level ordering for labels after coord_flip()", "Code": "df$labeltext<-factor(df$labeltext, levels=rev(df$labeltext)) df$colour <- c(\"dark grey\",\"dark grey\",\"black\",\"black\",\"dark grey\",\"dark grey\") df_TPICL$labeltext_TPICL <- factor(df_TPICL$labeltext_TPICL, levels=rev(df_TPICL$labeltext_TPICL)) df_TPICL$colour <- c(\"dark grey\",\"dark grey\",\"dark grey\",\"dark grey\",\"dark grey\",\"dark grey\",\"black\",\"black\",\"dark grey\",\"dark grey\",\"dark grey\",\"dark grey\") plotTPICL1<- ggplot(data=df_TPICL, aes(x=labeltext_TPICL, y=estimate_TPICL, ymin=lower_TPICL, ymax=upper_TPICL, col=Gender_TPICL, shape=Gender_TPICL))+ annotate(\"rect\", xmin = 0, xmax = 6.5, ymin = 1, ymax = 5, alpha = .2, fill = \"grey\") + annotate(\"rect\", xmin = 6.5, xmax = 12.7, ymin = 0, ymax = 1, alpha = .2, fill = \"grey\") + geom_point(aes(size=2,col = colour, fill = colour),show.legend=FALSE)+ geom_pointrange(size=0.7, col = df_TPICL$colour, fill = df_TPICL$colour,show.legend=FALSE) + geom_hline(yintercept=1, lty=2) + # Add a dotted line at x=1 after flip geom_vline(xintercept=6.5, lty=1) + # Adds a solid line at y=6.5 after flip to differentiate between top panel = career longevity and bottom panel = time to PI geom_errorbar(aes(ymin=lower_TPICL, ymax=upper_TPICL,width=0.6,cex=1,col=colour, fill = colour),show.legend=FALSE) + scale_x_discrete(breaks=c(1,3,5,7,9,11), labels=c(\"Adj. Network Size\",\"Adj. Tie Strength\",\"Adj. Clustering Coef.\", \"Adj. Network Size\",\"Adj. Tie Strength\",\"Adj. Clustering Coef.\")) + coord_flip() + # Flip coordinates (puts labels on y axis) xlab(\"Label\") + ylab(\"Deceleration factor\") + scale_fill_manual(values=c(\"dark grey\",\"dark grey\",\"black\",\"black\",\"dark grey\",\"dark grey\",\"black\",\"black\",\"dark grey\",\"dark grey\",\"dark grey\",\"dark grey\"))+ scale_colour_manual(values=c(\"black\",\"dark grey\",\"dark grey\",\"dark grey\",\"black\",\"black\")) + scale_shape_manual(values=c(17,16))+ annotate(\"text\",x=7.2, y=5, label=\"Time to become a PI\",size=5,fontface=\"bold\",hjust=1) + annotate(\"text\",x=1, y=5, label=\"Career length\", size=5,fontface=\"bold\",hjust=1) + theme(axis.title.y = element_blank(), # Remove y axis title axis.text.y = element_text(size=12, colour=\"black\"), # Change size of y axis labels axis.text.x = element_text(size=12,colour=\"black\"), # Change size of x axis numbers axis.title.x = element_text(size=14,vjust=0.5,colour=\"black\",face=\"bold\"), # Change size and font of x axis title and move it down a bit panel.grid.major = element_blank(), # Formatting to create blank plot with box around it axis.line = element_line(colour=\"black\"), panel.background = element_rect(colour = \"black\", size=1, fill=NA), legend.title =element_blank())+ guides(colour=FALSE)", "Label": "Data Variable", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 763, "Comment": "Add label to plot C", "Code": "plotTPICL2<-plotTPICL1+ labs(tag=\"C\")+ theme(plot.tag.position = c(0.19,0.5),plot.tag = element_text(size=14,face=\"bold\"))", "Label": "Visualization", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 764, "Comment": "Now layout plots and plot them labels for plots A and B added here", "Code": "ggarrange(plotPIstatus,plotTPICL2,heights=c(1,1.7),ncol=1,nrow=2,labels=c(\"A\",\"B\"),label.x=0.18,label.y=0.95)", "Label": "Visualization", "Source": "https://osf.io/7v4ep/", "File": "Collaboration boosts career progression_part" }, { "ID": 765, "Comment": "Display raincloud plot", "Code": "ggplot(dataset_anova, aes(session, score, fill = group)) + geom_rain(alpha = .5, cov = \"group\", rain.side = 'f2x2') + theme(text=element_text(size=20))", "Label": "Visualization", "Source": "https://osf.io/dez9b/", "File": "BaIn_ANOVAinR.R" }, { "ID": 766, "Comment": "Create a covariance matrix of the means in the placebo group", "Code": "cov_group1 = matrix(c(vcov(cov_fit)[1,1],vcov(cov_fit)[1,3],vcov(cov_fit)[3,1], vcov(cov_fit)[3,3]),2,2)", "Label": "Statistical Modeling", "Source": "https://osf.io/dez9b/", "File": "BaIn_ANOVAinR.R" }, { "ID": 767, "Comment": "Create a covariance matrix of the means in the treatment group", "Code": "cov_group2 = matrix(c(vcov(cov_fit)[2,2],vcov(cov_fit)[2,4],vcov(cov_fit)[4,2], vcov(cov_fit)[4,4]),2,2)", "Label": "Statistical Modeling", "Source": "https://osf.io/dez9b/", "File": "BaIn_ANOVAinR.R" }, { "ID": 768, "Comment": "Contrasting Two vs Fourstakes paradigms Decision generates table of beta coefficients and associated statistics", "Code": "coef.stakes_decision <- data.frame(cbind(summary(m.stakes_decision)$coefficients, m.stakes_decision.CI)) %>% tibble::rownames_to_column('Variable') %>% rename(SE = Std..Error) %>% rename(Z = z.value) %>% rename(P = Pr...z..) %>% rename(CI_L = X2.5..) %>% rename(CI_U = X97.5..) %>% mutate(Variable = factor(Variable, levels = c('fairnessunfair:recodedDecisionreject:stakes4', 'recodedDecisionreject:stakes4', 'fairnessunfair:stakes4', 'fairnessunfair:recodedDecisionreject', 'stakes4', 'recodedDecisionreject', 'fairnessunfair', 'genderfemale', 'centredAge', '(Intercept)' )))", "Label": "Statistical Modeling", "Source": "https://osf.io/uygpq/", "File": "Model figures.R" }, { "ID": 769, "Comment": "Calculating the conditional entropy of denomination given designs by authorities separately for each df HIGHER/LOWER Higher denominations", "Code": "authority <- sort(unique(higher$AUTHORITY)) CEDenomination.Designs <- c() HDenominations <- c() NormCEDenomination.Designs <- c() Ncoins <- c() AUTHORITY <- c() for(i in authority){ high_sub <- subset(higher, higher$AUTHORITY == i) denom_high <- as.data.frame(high_sub[,256:309]) motifs_high <- as.data.frame(high_sub[,318:681]) AUTHORITY[i] <- i CEDenomination.Designs[i] <- condentropy(denom_high, motifs_high) HDenominations[i] <- entropy(denom_high) NormCEDenomination.Designs[i] <- condentropy(denom_high, motifs_high) / entropy(denom_high) Ncoins[i] <- nrow(high_sub) } results_higher <- data.frame(\"AUTHORITY\" = AUTHORITY, \"CEDenomination.Designs\" = CEDenomination.Designs, \"Entropy.Denominations\" = HDenominations, \"NormCEDenomination.Designs\" = NormCEDenomination.Designs, \"Ncoins\" = Ncoins) write.csv2(results_higher,\"P3test_byauth_higher.csv\")", "Label": "Statistical Modeling", "Source": "https://osf.io/uckzx/", "File": "P3_analysis.R" }, { "ID": 770, "Comment": "destructure string into an array using \"_\" as the delimiter", "Code": "stringArr <- unlist(strsplit(as.character(string), split = \"_\", fixed = T))", "Label": "Data Variable", "Source": "https://osf.io/4a9b6/", "File": "PrefLook_functions.r" }, { "ID": 771, "Comment": "log transform RTs for statistical analysis", "Code": "RTdata$logRT <- log10(RTdata$rt) hist(RTdata$logRT)", "Label": "Data Variable", "Source": "https://osf.io/4sjxz/", "File": "ScenePrimingCFS_Analysis.R" }, { "ID": 772, "Comment": "fit a lienar mixed effects model with all theoretically relevant fixed effects and random intercepts for participant and target context", "Code": "modelRT <- mixed(logRT ~ congruency*soa*mask_contrast + (1| participant) + (1|target_context), data = RTdata, method = \"S\", type=3) modelRT", "Label": "Statistical Modeling", "Source": "https://osf.io/4sjxz/", "File": "ScenePrimingCFS_Analysis.R" }, { "ID": 773, "Comment": "pearon correlations within each of these four conditions", "Code": "ca_subset <- CA %>% filter(soa == \"200 ms\" & mc == \"100% contrast\") cor.test(ca_subset$PC, ca_subset$CE) ca_subset <- CA %>% filter(soa == \"400 ms\" & mc == \"100% contrast\") cor.test(ca_subset$PC, ca_subset$CE) ca_subset <- CA %>% filter(soa == \"200 ms\" & mc == \"20% contrast\") cor.test(ca_subset$PC, ca_subset$CE) ca_subset <- CA %>% filter(soa == \"400 ms\" & mc == \"20% contrast\") cor.test(ca_subset$PC, ca_subset$CE)", "Label": "Statistical Test", "Source": "https://osf.io/4sjxz/", "File": "ScenePrimingCFS_Analysis.R" }, { "ID": 774, "Comment": "descriptive statistics of accuracy / errors", "Code": "er_desc <- data_er %>% group_by(participant) %>% summarize(n_correct = sum(acc), n_trials = length(participant))", "Label": "Data Variable", "Source": "https://osf.io/4sjxz/", "File": "ScenePrimingCFS_Analysis.R" }, { "ID": 775, "Comment": "fit a GLMM model with all theoretically relevant fixed effects", "Code": "modelER <- mixed(acc ~ congruency*soa*mask_contrast + (1| participant) + (1|target_context), family = binomial(\"logit\"), data = data_er, method = \"LRT\", type =3) summary(modelER) anova(modelER)", "Label": "Statistical Modeling", "Source": "https://osf.io/4sjxz/", "File": "ScenePrimingCFS_Analysis.R" }, { "ID": 776, "Comment": "fit a GLMM model with all theoretically relevant effects", "Code": "modelER <- mixed(acc ~ soa*mask_contrast + (1| participant) + (1|prime_context), family = binomial(\"logit\"), data = acc_data, method = \"LRT\", type=3) modelER", "Label": "Statistical Modeling", "Source": "https://osf.io/4sjxz/", "File": "ScenePrimingCFS_Analysis.R" }, { "ID": 777, "Comment": "calculate overall proportion", "Code": "prop_overall := N / sum(N)]", "Label": "Data Variable", "Source": "https://osf.io/dqc3y/", "File": "analysis_fullset.R" }, { "ID": 778, "Comment": "creating variables for lagged values and moving averages", "Code": "delay_rf <- function(v,lag) c(rep(NA,lag),v[1:(length(v)-lag)]) moving_avg <- function(x,n) c(stats::filter(x,rep(1/n,n),sides=1)) res <- lapply(res,transform,cd4Rcd8=cd4/cd8) res <- lapply(res,transform,cd4_ma12=moving_avg(cd4,12),cd8_ma12=moving_avg(cd8,12),cd4Rcd8_ma12=moving_avg(cd4Rcd8,12),cd4_ma24=moving_avg(cd4,24),cd8_ma24=moving_avg(cd8,24),cd4Rcd8_ma24=moving_avg(cd4Rcd8,24), rna_ma12=moving_avg(rna,12),rna_ma24=moving_avg(rna,24)) res <- lapply(res,transform,cd4_lag12=delay_rf(cd4,12),cd4_lag24=delay_rf(cd4,24),cd4_lag36=delay_rf(cd4,36), cd8_lag12=delay_rf(cd8,12),cd8_lag24=delay_rf(cd8,24),cd8_lag36=delay_rf(cd8,36), cd4Rcd8_lag12=delay_rf(cd4Rcd8,12),cd4Rcd8_lag24=delay_rf(cd4Rcd8,24),cd4Rcd8_lag36=delay_rf(cd4Rcd8,36), rna_lag12=delay_rf(rna,12),rna_lag24=delay_rf(rna,24),rna_lag36=delay_rf(rna,36), cd4_ma12_lag12=delay_rf(cd4_ma12,12),cd4_ma24_lag12=delay_rf(cd4_ma24,12),cd4_ma12_lag24=delay_rf(cd4_ma12,24), cd8_ma12_lag12=delay_rf(cd8_ma12,12),cd8_ma24_lag12=delay_rf(cd8_ma24,12),cd8_ma12_lag24=delay_rf(cd8_ma12,24), cd4Rcd8_ma12_lag12=delay_rf(cd4Rcd8_ma12,12),cd4Rcd8_ma24_lag12=delay_rf(cd4Rcd8_ma24,12),cd4Rcd8_ma12_lag24=delay_rf(cd4Rcd8_ma12,24), rna_ma12_lag12=delay_rf(rna_ma12,12),rna_ma24_lag12=delay_rf(rna_ma24,12),rna_ma12_lag24=delay_rf(rna_ma12,24)) res <- lapply(res,transform,haart_lag6=delay_rf(haart,6),haart_lag12=delay_rf(haart,12),hepC_aHCVpos_lag12=delay_rf(hepC_aHCVpos,12),hepC_RNApos_lag12=delay_rf(hepC_RNApos,12), hepC_RNAdet_lag12=delay_rf(hepC_RNAdet,12),hepB_pos_lag12=delay_rf(hepB_pos,12),CDC_lag12_cat=delay_rf(CDC_cat,12),bpn_lag12=delay_rf(bpn,12)) res_df <- rbind.fill(res) res_df$CDC_lag12_cat[res_df$CDC_lag12_cat==1] <- \"A\" res_df$CDC_lag12_cat[res_df$CDC_lag12_cat==2] <- \"B\" res_df$CDC_lag12_cat[res_df$CDC_lag12_cat==3] <- \"C\" res_df$CDC_lag12_cat <- factor(res_df$CDC_lag12_cat,levels=c(\"A\",\"B\",\"C\")) res_df$hepC_aHCVpos_lag12[is.na(res_df$hepC_aHCVpos_lag12)] <- FALSE res_df$hepC_RNApos_lag12[is.na(res_df$hepC_RNApos_lag12)] <- FALSE res_df$hepC_RNAdet_lag12[is.na(res_df$hepC_RNAdet_lag12)] <- FALSE res_df <- transform(res_df,hepC_RNA=(hepC_RNApos | hepC_RNAdet),hepC_RNA_lag12=(hepC_RNApos_lag12 | hepC_RNAdet_lag12)) res_df$bpn_lag12[is.na(res_df$bpn_lag12)] <- FALSE rownames(res_df) <- NULL if(write_df) save(res_df,file=paste(filepath_read_R,\"\\\\monthly_rf_df_\",max_dist,\".RData\",sep='')) print(proc.time()-ptm)", "Label": "Data Variable", "Source": "https://osf.io/gy5vm/", "File": "risk_factors_monthly.R" }, { "ID": 779, "Comment": "create dummy variables gender", "Code": "Sample2$Dfemale <- recode(Sample2$gender, \"'weiblich'=1;; 'maennlich'=0;; 'divers'=0;; 'keine Angabe'=0\") Sample2$Dmale <- recode(Sample2$gender, \"'maennlich'=1;; 'weiblich'=0;; 'divers'=0;; 'keine Angabe'=0\") Sample2$Ddiverse <- recode(Sample2$gender, \"'divers'=1;; 'weiblich'=0;; 'maennlich'=0;; 'keine Angabe'=0\") Sample2$DNoGenderInd <- recode(Sample2$gender, \"'keine Angabe'=1;;'weiblich'=0;; 'maennlich'=0;; 'divers'=0\") Sample2$DComplianceFully <- recode(Sample2$complianceCurrent, \"'Ja'=1;; 'Ja, teilweise'=0;; 'Nein auch zuvor nicht'=0;; 'Nein, nicht mehr'=0\") Sample2$DCompliancePartly <- recode(Sample2$complianceCurrent, \"'Ja'=0;; 'Ja, teilweise'=1;; 'Nein auch zuvor nicht'=0;; 'Nein, nicht mehr'=0\") Sample2$DComplianceNever <- recode(Sample2$complianceCurrent, \"'Ja'=0;; 'Ja, teilweise'=0;; 'Nein auch zuvor nicht'=1;; 'Nein, nicht mehr'=0\") Sample2$DComplianceNotAnymore <- recode(Sample2$complianceCurrent, \"'Ja'=0;; 'Ja, teilweise'=0;; 'Nein auch zuvor nicht'=0;; 'Nein, nicht mehr'=1\")", "Label": "Data Variable", "Source": "https://osf.io/ezdgt/", "File": "3_Regression analyses.R" }, { "ID": 780, "Comment": "Plots Anxiety main effects LISD", "Code": "ggplot(Sample2_Reg,aes(y=Anxiety,x=z_LISD_State_F1))+ geom_point(color = \"indianred4\")+geom_smooth(method=\"lm\", color = \"black\", size = 0.5)+theme_classic() ggplot(Sample2_Reg,aes(y=Anxiety,x=z_LISD_Trait_F1))+ geom_point(color = \"indianred4\")+geom_smooth(method=\"lm\", color = \"black\", size = 0.5)+theme_classic() ggplot(Sample2_Reg,aes(y=Anxiety,x=z_LISD_Trait_F2))+ geom_point(color = \"indianred4\")+geom_smooth(method=\"lm\", color = \"black\", size = 0.5)+theme_classic()", "Label": "Visualization", "Source": "https://osf.io/ezdgt/", "File": "3_Regression analyses.R" }, { "ID": 781, "Comment": "Plots Depressed plot preparation: divide z_age into SD, mean, +SD", "Code": "attach(Sample2_Reg) Sample2_Reg$age_3groups <- case_when(z_age > mean(z_age)+sd(z_age) ~ \"high\", z_age < mean(z_age)+sd(z_age) & z_age > mean(z_age)-sd(z_age) ~ \"mean\", z_age < mean(z_age)-sd(z_age) ~ \"low\") detach(Sample2_Reg)", "Label": "Visualization", "Source": "https://osf.io/ezdgt/", "File": "3_Regression analyses.R" }, { "ID": 782, "Comment": "Plots Manuscript Plots Manuscript with raw WLISD scores Anxiety", "Code": "plot1_1 <- Sample2_Reg %>% ggplot(aes(x = LISD_State_F1, y = Anxiety)) + geom_point(color = \"#00AFBB\")+ geom_smooth(method = lm, color = \"black\", size = 0.5)+ xlim(1,5)+ ylim(-2,3)+ labs(x = \"lonely & isolated (State 1)\", y = \"Anxiety (z)\", Title = \"State Factor 1\")+ theme_classic() plot2_1 <- Sample2_Reg %>% ggplot(aes(x = LISD_State_F2, y = Anxiety)) + geom_point(color = \"#55C667FF\")+ geom_smooth(method = lm, color = \"black\", size = 0.5)+ xlim(1,5)+ ylim(-2,3)+ labs(x = \"supported & connected (State 2)\", y = \"Anxiety (z)\", Title = \"State Factor 2\")+ theme_classic() plot3_1 <- Sample2_Reg %>% ggplot(aes(x = LISD_Trait_F1, y = Anxiety)) + geom_point(color = \"#E7B800\")+ geom_smooth(method = lm, color = \"black\", size = 0.5)+ xlim(1,5)+ ylim(-2,3)+ labs(x = \"loneliness & isolation (Trait 1)\", y = \"Anxiety (z)\", Title = \"Trait Factor 1\")+ theme_classic() plot4_1 <- Sample2_Reg %>% ggplot(aes(x = LISD_Trait_F2, y = Anxiety)) + geom_point(color = \"#FC4E07\")+ geom_smooth(method = lm, color = \"black\", size = 0.5)+ xlim(1,5)+ ylim(-2,3)+ labs(x = \"sociability & sense of belonging (Trait 2)\", y = \"Anxiety (z)\", Title = \"Trait Factor 1\")+ theme_classic() plot1_1 plot2_1 plot3_1 plot4_1", "Label": "Visualization", "Source": "https://osf.io/ezdgt/", "File": "3_Regression analyses.R" }, { "ID": 783, "Comment": "Create a line graph articles per year", "Code": "ggplot(ArticlesbyYear, aes(x=Year, y=n, group = 1)) + geom_line(color=\"orange\") + labs(y = \"Number of Articles\", angle = 45) + theme_minimal(base_size = 12)", "Label": "Visualization", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 784, "Comment": "Create a barplot articles per year", "Code": "ggplot(ArticlesbyYear, aes(x = Year, y = n)) + geom_bar(stat = \"identity\", color = \"steelblue3\", fill = \"steelblue3\") + theme_minimal(base_size = 12) + labs(y = \"Articles\", angle = 45) + geom_text(aes(label = n), vjust =\"center\", size=3, hjust = \"center\", nudge_y = 1)", "Label": "Visualization", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 785, "Comment": "Create a barplot articles per journal", "Code": "ggplot(Journals, aes(x = reorder(Journal, Percent), y = Percent)) + geom_bar(stat = \"identity\", color = \"steelblue3\", fill = \"steelblue3\") + coord_flip() + theme_minimal(base_size = 11) + labs(y = \"Percent\", x = \"Journal\") + geom_text(aes(label = Percent), vjust =\"center\", size=3, hjust = \"center\", nudge_y = 0.01)", "Label": "Visualization", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 786, "Comment": "Create barplot Subfields Percents counts", "Code": "ggplot(subfield_count, aes(x = reorder(Subfield, Percent) , y = Percent, fill = Subfield)) + geom_bar(stat = \"identity\") + scale_fill_brewer(palette = \"Blues\", guide = FALSE) + coord_flip() + theme_minimal(base_size = 13) + labs(y = \"Percent of Articles\", x = \"Subfield\") + geom_text(aes(label = Percent), vjust =\"center\", size=3, hjust = \"center\", nudge_y =.1) + scale_y_continuous(labels = scales::percent, limits = c(0,1))", "Label": "Visualization", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 787, "Comment": "authorlevel count and proportion for all gender identity categories", "Code": "gender_author <- author_data %>% subset(!is.na(Gender.apsa)) %>% dplyr::summarize(count = c(sum(male_apsa, na.rm=T), sum(female_apsa, na.rm=T), sum(nonbinary_apsa, na.rm=T))) gender_author <- gender_author %>% mutate(proportion = round(count / sum(count), 2)) %>% mutate(gender = c(\"male\", \"female\", \"nonbinary\")) %>% dplyr::select(gender, count, proportion)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 788, "Comment": "calculate count and proportion of gender author structure", "Code": "gender_article_dedup <- gender_article %>% distinct(article_title, single_authored_male, single_authored_female, co_authored_male, co_authored_female, co_authored_mixed) gender_article_count <- data.frame(matrix(NA, nrow = 5, ncol = 3)) colnames(gender_article_count) <- c(\"Author_Gender\", \"Frequency\", \"Percent\") gender_article_count$Author_Gender <- c(\"Single Authored Male\", \"Single Authored Female\", \"Co-authored Male\", \"Co-authored Female\", \"Co-authored Mixed Gender\") gender_article_count$Frequency <- c(sum(gender_article_dedup$single_authored_male, na.rm=T), sum(gender_article_dedup$single_authored_female, na.rm=T), sum(gender_article_dedup$co_authored_male, na.rm=T), sum(gender_article_dedup$co_authored_female, na.rm=T), sum(gender_article_dedup$co_authored_mixed, na.rm=T)) gender_article_count$Percent <- formattable::percent(gender_article_count$Frequency/sum(gender_article_count$Frequency), digits = 1)", "Label": "Statistical Modeling", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 789, "Comment": "articlelevel authorship structure for race / ethnic identity categories", "Code": "race_ethnicity_article <- race_ethnicity_article %>% group_by(article_title) %>% mutate(white_authors = ifelse(mean(white) == 1, 1, 0), black_authors = ifelse(mean(black) == 1, 1, 0), east_asian_authors = ifelse(mean(east_asian) == 1, 1, 0), south_asian_authors = ifelse(mean(south_asian) == 1, 1, 0), latino_authors = ifelse(mean(latino) == 1, 1, 0), mena_authors = ifelse(mean(mena) == 1, 1, 0), native_authors = ifelse(mean(native) == 1, 1, 0), pacific_authors = ifelse(mean(pacific) == 1, 1, 0), other_authors = ifelse(mean(other) == 1, 1, 0), mixed_authors = ifelse(mean(white) < 1 & n() > 1, 1, 0))", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 790, "Comment": "count of articles that generated data using experimental techniques (includes articles that use both, percentage calculated using total empirical articles)", "Code": "GenerateData[2,2] <- sum(MMCPSR_emp$EHPdata) GenerateData[2,3] <- sum(MMCPSR_emp$EHPdata)/nrow(MMCPSR_emp)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 791, "Comment": "Number (%) of articles drawing on data collected via Survey (alone + in combo. w/other techniques) Raw Count/percent alone", "Code": "SurveySoloCount <- sum(MMCPSR_emp$`Ethnography / participant observation` ==0 & MMCPSR_emp$`Interviews/focus groups` == 0 & MMCPSR_emp$Survey == 1 & MMCPSR_emp$EHPdata == 0 & MMCPSR_emp$gendataNHP == 0 & MMCPSR_emp$`Employed data/information from pre-existing primary or secondary sources`==0) SurveySoloCount formattable::percent(SurveySoloCount/nrow(MMCPSR_emp), digits = 1)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 792, "Comment": "Number (%) of articles using Field experiments (alone + in combo. w/other techniques) Count/percent alone", "Code": "FieldExpSoloCount <- sum(MMCPSR_emp$`Survey experiment` ==0 & MMCPSR_emp$Field == 1 & MMCPSR_emp$Lab == 0 & MMCPSR_emp$OHPdata == 0 & MMCPSR_emp$gendataNHP == 0 & MMCPSR_emp$`Employed data/information from pre-existing primary or secondary sources`==0) FieldExpSoloCount formattable::percent(FieldExpSoloCount/nrow(MMCPSR_emp), digits = 1)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 793, "Comment": "Number (%) of articles using data generated through interaction with Dom gov", "Code": "sum(MMCPSR_emp$`EHP - Domestic government` == 1 | MMCPSR_emp$`OHP - Domestic government`==1) formattable::percent(sum(MMCPSR_emp$`EHP - Domestic government` == 1 | MMCPSR_emp$`OHP - Domestic government`==1) /nrow(MMCPSR_emp), digits = 1)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 794, "Comment": "Number (%) of articles using data generated through interaction with Media", "Code": "sum(MMCPSR_emp$`EHP - Media` == 1 | MMCPSR_emp$`OHP - Media`==1) formattable::percent(sum(MMCPSR_emp$`EHP - Media` == 1 | MMCPSR_emp$`OHP - Media`==1)/nrow(MMCPSR_emp), digits = 1)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 795, "Comment": "probit and marginal effects experimental data vs time", "Code": "out.1.probit <-glm(EHPdata~Year, data = MMCPSR_emp, family = binomial(link = \"probit\")) summary(out.1.probit) probitmfx(out.1.probit, data = MMCPSR_emp)", "Label": "Statistical Modeling", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 796, "Comment": "create correlation matrix author gender vs methods categories", "Code": "MethodGender_cor <- rcorr(as.matrix(MethodGender_sub))", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 797, "Comment": "DV Formal modeling only OLS Modeling only vs time", "Code": "out.12 <- lm(MMCPSR_emp$ModelingOnly ~ MMCPSR_emp$Year) summary(out.12) out.15$coefficients[2]", "Label": "Statistical Modeling", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 798, "Comment": "Conclusion Policy Recommendations count and percent of all articles", "Code": "sum(MMCPSR_Data$`Policy Recommendation`) formattable::percent(sum(MMCPSR_Data$`Policy Recommendation`)/nrow(MMCPSR_Data), digits = 1)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 799, "Comment": "DV time OLS policy recommendations vs time", "Code": "out.21 <- lm(MMCPSR_emp$`Policy Recommendation`~MMCPSR_emp$Year) summary(out.21) out.21$coefficients[2] out.21b <- lm(MMCPSR_emp$`Policy Recommendation`~as.factor(MMCPSR_emp$Year)) summary(out.21b) plot_coefs(out.21b) plot_coefs(lm(MMCPSR_emp$`Policy Recommendation`~ 0 + as.factor(MMCPSR_emp$Year)))", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "MMCPSRAnalysis.R" }, { "ID": 800, "Comment": "scaling all the variables of interest (between 01) for a composite score", "Code": "data8$gincdif_s = rescale(data8$gincdif) data8$smdfslv_s = rescale(data8$smdfslv) data8$sbstrec_r = 6 - data8$sbstrec # reverse scores first data8$sbstrec_r_s = rescale(data8$sbstrec_r)", "Label": "Data Variable", "Source": "https://osf.io/k853j/", "File": "ESS_openness_2016.R" }, { "ID": 801, "Comment": "create one index of political engagement using: interest, media reception, and political action", "Code": "polit_eng = c(\"polintr_r_s\", \"nwspol_s\", \"polit_action_s\") data8$polit_eng = rowMeans(data8[polit_eng], na.rm = TRUE) mean(data8$polit_eng, na.rm = TRUE) # 0.22 sd(data8$polit_eng, na.rm = TRUE) # 0.14 psych::alpha(data8[polit_eng]) # 0.39 data8$polit_eng_c = c(scale(data8$polit_eng, center = TRUE, scale = FALSE)) # center", "Label": "Data Variable", "Source": "https://osf.io/k853j/", "File": "ESS_openness_2016.R" }, { "ID": 802, "Comment": "Calculate values scores for PVQ recoding PVQ items (ESS uses 1 as \"like me\" and 6 \"totally not like me)", "Code": "data8$ipcrtiv_r = 7 - data8$ipcrtiv data8$imprich_r = 7 - data8$imprich data8$ipeqopt_r = 7 - data8$ipeqopt data8$ipshabt_r = 7 - data8$ipshabt data8$impsafe_r = 7 - data8$impsafe data8$impdiff_r = 7 - data8$impdiff data8$ipfrule_r = 7 - data8$ipfrule data8$ipudrst_r = 7 - data8$ipudrst data8$ipmodst_r = 7 - data8$ipmodst data8$ipgdtim_r = 7 - data8$ipgdtim data8$impfree_r = 7 - data8$impfree data8$iphlppl_r = 7 - data8$iphlppl data8$ipsuces_r = 7 - data8$ipsuces data8$ipstrgv_r = 7 - data8$ipstrgv data8$ipadvnt_r = 7 - data8$ipadvnt data8$ipbhprp_r = 7 - data8$ipbhprp data8$iprspot_r = 7 - data8$iprspot data8$iplylfr_r = 7 - data8$iplylfr data8$impenv_r = 7 - data8$impenv data8$imptrad_r = 7 - data8$imptrad data8$impfun_r = 7 - data8$impfun", "Label": "Data Variable", "Source": "https://osf.io/k853j/", "File": "ESS_openness_2016.R" }, { "ID": 803, "Comment": "evaluate goodness of fit of psychometric function using deviance test", "Code": "d_fit <-{} for(i in unique(d$Subject)){ m_1 <- glm(Response ~ JumpSize * Velocity +BlinkDuration , d[d$Subject==i & d$cond1==1,], family=binomial(probit)) m_0<- glm(Response ~ Velocity +BlinkDuration , d[d$Subject==i & d$cond1==1,], family=binomial(probit)) LRT <- anova(m_0, m_1,test=\"LRT\") d_fit <- rbind(d_fit, data.frame(id=i, D=LRT$Deviance[2], df=LRT$Df[2], p=LRT$`Pr(>Chi)`[2])) } print(d_fit,digits=2,row.names=F) round(d_fit$p,digits=10)", "Label": "Statistical Test", "Source": "https://osf.io/f6qsk/", "File": "analysis_exp2.R" }, { "ID": 804, "Comment": "use lmList to fit individual GLM models in one go", "Code": "mo <- lmList(Response ~ cond1 * JumpSize * Velocity +bdur:cond1 | Subject, d, family=binomial(probit))", "Label": "Statistical Modeling", "Source": "https://osf.io/f6qsk/", "File": "analysis_exp2.R" }, { "ID": 805, "Comment": "visualize Bayes factors and parameter estimates", "Code": "par(mfrow=c(1,2)) slope_bdur <- B[,8] / (((B[,3]+B[,5]) + (B[,3]+B[,7]+B[,5]+B[,9]))/2) slope_bayes <- outB$beta_5 / (((outB$beta_2) + (outB$beta_2+outB$beta_4))/2)", "Label": "Visualization", "Source": "https://osf.io/f6qsk/", "File": "analysis_exp2.R" }, { "ID": 806, "Comment": "sanity check to ensure that we get same estimates from frequentist or Bayesian analysis", "Code": "plot(slope_bdur, slope_bayes, ylab=\"slope (Bayesian)\",xlab=\"slope (frequentist)\", pch=19) abline(a=0,b=1,lty=2) cor.test(slope_bdur, slope_bayes)", "Label": "Statistical Test", "Source": "https://osf.io/f6qsk/", "File": "analysis_exp2.R" }, { "ID": 807, "Comment": "n: sample size of each study simtot: number of simulations seed 1 Compute d33, ncp33 and df for the original sample", "Code": "d33=pwr.t.test(n=n,sig.level=0.05,power=1/3,type=\"two.sample\")$d ncp33=sqrt(n/2)*d33 df=2*n-2", "Label": "Data Variable", "Source": "https://osf.io/ujpyn/", "File": "[2]R-AnswerstoRound3Reviews-Powertoacceptflatwhentrueeffectiszeroacross24studies.R" }, { "ID": 808, "Comment": "point estimate p hat (just mean average of coefficients)", "Code": "pe <-mean(cors)", "Label": "Data Variable", "Source": "https://osf.io/9jzfr/", "File": "metaBigFive.R" }, { "ID": 809, "Comment": "next, we use the bridge_sampler() function which will stabilize the calculated BFs see Schad et al 2022 https://psycnet.apa.org/doi/10.1037/met0000472", "Code": "model.intox.seq.bf <- bridge_sampler(model.intox.seq, silent = TRUE) model.intox.seq.null.bf <- bridge_sampler(model.intox.seq.null, silent = TRUE)", "Label": "Statistical Modeling", "Source": "https://osf.io/rh2sw/", "File": "bayes.ema.tutorial.analysis.R" }, { "ID": 810, "Comment": "figure out which columns contain NAs", "Code": "missing_cols <- colSums(is.na(data.conseq)) > 0 print(names(data.conseq)[missing_cols])", "Label": "Data Variable", "Source": "https://osf.io/rh2sw/", "File": "bayes.ema.tutorial.analysis.R" }, { "ID": 811, "Comment": "age recored as 1 18, to correct we added 17 to all age variables", "Code": "data$age <- data$age + 17", "Label": "Data Variable", "Source": "https://osf.io/zfqax/", "File": "Study1_manipulationeffectiveness.R" }, { "ID": 812, "Comment": "User language variable into a factor", "Code": "data$UserLanguage <- ifelse(data$UserLanguage == \"EN\", \"EN\", \"NL\") data$UserLanguage <- as.factor(data$UserLanguage) contrasts(data$UserLanguage) <- c(0, 1)", "Label": "Data Variable", "Source": "https://osf.io/zfqax/", "File": "Study1_manipulationeffectiveness.R" }, { "ID": 813, "Comment": "Test assumptions MANOVA Test whether residuals are normally distributed", "Code": "df$pc1.residuals = lm(pc1~condition.breach, data=df)$residuals df$pc2.residuals = lm(pc2~condition.breach, data=df)$residuals df$pc3.residuals = lm(pc3~condition.breach, data=df)$residuals df$pc4.residuals = lm(pc4~condition.breach, data=df)$residuals shapiro.test(df$pc1.residuals) shapiro.test(df$pc2.residuals) shapiro.test(df$pc3.residuals) shapiro.test(df$pc4.residuals)", "Label": "Statistical Test", "Source": "https://osf.io/qj86m/", "File": "7_manova_fda_breach.R" }, { "ID": 814, "Comment": "Plot effect of first and third principal component", "Code": "eigenfunctions.df = read.table(file=\"../data/final/eigenfunctions_breach.dat\", header=T) fig.df1 <- data.frame(HR = c(eigenfunctions.df$pc1*-0.0305, eigenfunctions.df$pc1*0.1355), Condition = rep(c(rep(\"Breach\", 51), rep(\"Fulfilment\", 51))), Time=rep(seq(0,1, by=1/50),2), pc=\"Principal component 1\") fig.df2 <- data.frame(HR = c(eigenfunctions.df$pc3*0.0278, eigenfunctions.df$pc3*-0.0617), Condition = rep(c(rep(\"Breach\", 51), rep(\"Fulfilment\", 51))), Time=rep(seq(0,1, by=1/50),2), pc=\"Principal component 3\") fig.df = rbind(fig.df1, fig.df2) fig <- ggplot(fig.df, aes(x=Time, y =HR, linetype=Condition))+ geom_line()+ geom_vline(xintercept=.20, linetype=\"dashed\")+ facet_grid(~pc)+ theme_bw() fig ggsave(file=\"../figures/figure3.png\", width=8, height=6)", "Label": "Visualization", "Source": "https://osf.io/qj86m/", "File": "7_manova_fda_breach.R" }, { "ID": 815, "Comment": "function pcor2beta gives you data from a partial correlation/network input pcor a partial correlation matrix / network output a matrix of betas, each column corresponds to a dependent variable so that you can get predicted values by a matrix multiplication in the form betas %*% data", "Code": "pcor2beta <- function(pcor) { require(psych) require(corpcor) diag(pcor) <- 1 p <- ncol(pcor) betas <- matrix(0, ncol = p, nrow = p) for(i in 1:p) betas[-i,i] <- matReg(y = i, x = seq(p)[-i], C = pcor2cor(pcor))$beta betas[abs(betas) < 1e-13] <- 0 betas }", "Label": "Statistical Modeling", "Source": "https://osf.io/ywm3r/", "File": "predictability.R" }, { "ID": 816, "Comment": "function R2 gives you two different types of R2 and of predicted values input pcor a partial correlation matrix / network output a matrix of betas, each column corresponds to a dependent variable so that you can get predicted values by a matrix multiplication in the form betas %*% data this function gives you R2 and predicted values from betas + data two types of R2 and predicted values are considered: R2_orig and predicted_orig use beta weights directly implied by the graphical lasso regularization R2_refit and predicted_refit use only the sparsity pattern of the network and then refit linear regression using the network only for prediction but not for shrinkage (this one should do better in terms of prediction) refit: logical, regulates whether R2_refit and predicted_refit are computed. set it to FALSE to speed up computations", "Code": "R2 <- function(betas, dt, refit = TRUE) { dt <- data.frame(scale(dt)) out <- list() p <- ncol(betas)", "Label": "Statistical Modeling", "Source": "https://osf.io/ywm3r/", "File": "predictability.R" }, { "ID": 817, "Comment": "refit the model considering the sparsity pattern inicated by the network first fit regression using the sparsity indciated in the matrix of betas", "Code": "betas_refit <- matrix(0, ncol = p, nrow = p) for(i in 1:p) { if(any(betas[,i] != 0)) { fit <- lm(dt[,i] ~ as.matrix(dt[,betas[,i] != 0])) betas_refit[betas[,i] != 0, i] <- fit$coefficients[-1] } else betas_refit[,i] <- 0 } predicted <- as.matrix(dt) %*% betas_refit", "Label": "Statistical Modeling", "Source": "https://osf.io/ywm3r/", "File": "predictability.R" }, { "ID": 818, "Comment": "fit the data using the lm function", "Code": "bcafit <- lm(Abs562 ~ BSA, data = bca)", "Label": "Statistical Modeling", "Source": "https://osf.io/9e3cu/", "File": "BCA_dilution_answers.R" }, { "ID": 819, "Comment": "rename the columns for display", "Code": "colnames(samptab) <- c(\"Absorbance\", \"Concentration (mg/mL)\")", "Label": "Data Variable", "Source": "https://osf.io/9e3cu/", "File": "BCA_dilution_answers.R" }, { "ID": 820, "Comment": "departure time (12 hours so we can calculate easy the mean)", "Code": "d1[, time_ := strftime(datetime_ - 60*60*12, format=\"%H:%M:%S\")] d1[, time_ := as.POSIXct(time_, format=\"%H:%M:%S\")]", "Label": "Data Variable", "Source": "https://osf.io/amd3r/", "File": "R_script_tables_and_figures.R" }, { "ID": 821, "Comment": "This creates a variable \"period\" in the dataframe myDataISO:", "Code": "myDataISO$period<-gl(2, 1202, labels = c(\"first\", \"second\")) # We created 2 sets of 1202 scores, the labels option then specifies the names to attach to these 2 sets, which correspond to the levels of \"period\" (first measurement and follow-up).", "Label": "Data Variable", "Source": "https://osf.io/wcqpa/", "File": "Rcode.R" }, { "ID": 822, "Comment": "MIXED DESIGNS AS A GLM Setting contrasts for quarantine subperiod:", "Code": "myDataISO$quar.duration<-as.factor(myDataISO$quar.duration) is.factor(myDataISO$quar.duration) basvs10days<-c(0,1,0,0) # this compares the baseline (prior to quarantine) to a quarantine sub.period of up to 10-days duration basvs50days<-c(0,0,1,0) # this compares the baseline (prior to quarantine) to a quarantine sub.period of up to 50-days duration basvs103days<-c(0,0,0,1) # this compares the baseline (prior to quarantine) to a quarantine sub.period of up to 103-days duration contrasts(myDataISO$quar.duration)<-cbind(basvs10days, basvs50days, basvs103days) myDataISO$quar.duration # To check we setted the contrasts correctly myDataISOIMP2$quar.duration<-as.factor(myDataISOIMP2$quar.duration) is.factor(myDataISOIMP2$quar.duration) basvs10days<-c(0,1,0,0) # this compares the baseline (prior to quarantine) to a quarantine sub.period of up to 10-days duration basvs50days<-c(0,0,1,0) # this compares the baseline (prior to quarantine) to a quarantine sub.period of up to 50-days duration basvs103days<-c(0,0,0,1) # this compares the baseline (prior to quarantine) to a quarantine sub.period of up to 103-days duration contrasts(myDataISOIMP2$quar.duration)<-cbind(basvs10days, basvs50days, basvs103days) myDataISOIMP2$quar.duration # To check we setted the contrasts correctly", "Label": "Statistical Modeling", "Source": "https://osf.io/wcqpa/", "File": "Rcode.R" }, { "ID": 823, "Comment": "Calculating effect sizes library(DSUR.noof) We got effect sizes of meaningful predictors by executing: rcontrast(t,df) periodsecond", "Code": "rcontrast(-2.685100, 97) rcontrast(-5.280908, 1201)", "Label": "Statistical Test", "Source": "https://osf.io/wcqpa/", "File": "Rcode.R" }, { "ID": 824, "Comment": "Simulate time varying variable X", "Code": "data.X = expand.grid(Obs=1:T.obs,ID=1:N.dyad) var.diag.X = c(sigma.XF,sigma.XM) Sigma.X = diag(length(var.diag.X)) Sigma.X[lower.tri(Sigma.X, diag=FALSE)] = rho.X Sigma.X = pmax(Sigma.X, t(Sigma.X), na.rm=TRUE) Sigma.X = diag(var.diag.X)%*%Sigma.X%*%diag(var.diag.X) data.X = cbind(data.X,mvrnorm(N.dyad*T.obs,c(mu.XF,mu.XM),Sigma.X)) colnames(data.X) = c('Obs','ID','X.F','X.M') X.F = data.X[,'X.F'] X.M = data.X[,'X.M'] D.dyad = rbinom(N.dyad*T.obs,1,prob.D) data.X = cbind(data.X,D.dyad)", "Label": "Data Variable", "Source": "https://osf.io/vtb9e/", "File": "Sim.Dyad.Model.16.R" }, { "ID": 825, "Comment": "left_join remaining_uncertain_matched to author_data, subset to remove non", "Code": "author_data <- left_join(author_data, remaining_uncertain_matched, by = c(\"index\" = \"index\")) author_data$match <- as.integer(author_data$match)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "apsa_race_ethnicity.R" }, { "ID": 826, "Comment": "remerge (left_join) author_names with author_data", "Code": "author_data <- left_join(test, author_data, by = c(\"Contact.apsa\", \"NameSort.apsa\")) %>% subset(duplicated(Contact.apsa) == FALSE)", "Label": "Data Variable", "Source": "https://osf.io/uhma8/", "File": "apsa_race_ethnicity.R" }, { "ID": 827, "Comment": "Number of subjects in each group", "Code": "n.group = c(rep(0,N.0),rep(1,N.1)) n.group = c(rep(0,N.0),rep(1,N.1)) n.group = c(rep(0,N.0),rep(1,N.1)) n.group = c(rep(0,N.0),rep(1,N.1))", "Label": "Data Variable", "Source": "https://osf.io/vguey/", "File": "Sim.Data.IL.R" }, { "ID": 828, "Comment": "Create variables days, beeps per day and Z", "Code": "data.IL = expand.grid(Time=1:T,Z=n.group) data.IL = expand.grid(Time=1:T,W=W.i) data.IL = expand.grid(Time=1:T,subjno=1:N) data.IL = expand.grid(Time=1:T,subjno=1:N) data.IL = expand.grid(Time=1:T,Z=n.group) data.IL = expand.grid(Time=1:T,Z=n.group) data.IL = expand.grid(Time=1:T,W=W.i) data.IL = expand.grid(Time=1:T,W=W.i) data.IL = expand.grid(Time=1:T,subjno=1:N) data.IL = expand.grid(Time=1:T,Z=n.group) data.IL = expand.grid(Time=1:T,W=W.i)", "Label": "Data Variable", "Source": "https://osf.io/vguey/", "File": "Sim.Data.IL.R" }, { "ID": 829, "Comment": "Create variable subjno", "Code": "subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) Z = data.IL$Z subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) W = data.IL$W subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) Z = data.IL$Z subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) Z = data.IL$Z subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) W = data.IL$W subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) W = data.IL$W subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) Z = data.IL$Z subjno = expand.grid(1:T,1:N)[,2] data.IL = cbind(subjno,data.IL) W = data.IL$W", "Label": "Data Variable", "Source": "https://osf.io/vguey/", "File": "Sim.Data.IL.R" }, { "ID": 830, "Comment": "Create lag Y variable within days for each individual", "Code": "Ylag = lag.Y(data) data = data.frame(cbind(data,Ylag)) } Ylag = lag.Y(data) data = data.frame(cbind(data,Ylag)) } Ylag = lag.Y(data) data = data.frame(cbind(data,Ylag)) }", "Label": "Data Variable", "Source": "https://osf.io/vguey/", "File": "Sim.Data.IL.R" }, { "ID": 831, "Comment": "Parameters of random intercept and random slope are generated from a Beta distribution Parameters of Group0 Stationarity condition: sigma.v1 < sqrt(1b10^2)", "Code": "if (sigma.v1 > sqrt(1-b10^2)) {stop('To ensure that the model in Group 0 is stationary check that standard deviation of the random slope is smaller than sqrt(1-b10^2) where b10 is the fixed autorregressive effect')} mu.beta.0 = (b10+1)/2 sigma.beta.0 = sigma.v1/2 alpha.beta.0 = mu.beta.0^2*(((1-mu.beta.0)/sigma.beta.0^2) - (1/mu.beta.0)) beta.beta.0 = alpha.beta.0*((1/mu.beta.0) - 1) gamma.01 = rbeta(N.0, alpha.beta.0, beta.beta.0)*2-1 gamma.00 = sigma.v0*(rho.v*scale(gamma.01)+sqrt(1-rho.v^2)*rnorm(N.0)) + rep(b00,N.0)", "Label": "Statistical Modeling", "Source": "https://osf.io/vguey/", "File": "Sim.Data.IL.R" }, { "ID": 832, "Comment": "Parameters of random intercept and random slope are generated from a Beta distribution", "Code": "mu.beta.1 = (b10+b11.W*W.i[i]+1)/2 sigma.beta.1 = sigma.v1/2 alpha.beta.1 = mu.beta.1^2*(((1-mu.beta.1)/sigma.beta.1^2) - (1/mu.beta.1)) beta.beta.1 = alpha.beta.1*((1/mu.beta.1) - 1) gamma.1 = rbeta(1, alpha.beta.1, beta.beta.1)*2-1 gamma.0 = sigma.v0*((rho.v*(gamma.1-(b10+b11.W*W.i[i]))/sigma.v1) + sqrt(1-rho.v^2)*rnorm(1)) + b00+b01.W*W.i[i] if (Ylag.center == TRUE){ AR.epsilon = list(order=c(1,0,0), ar=gamma.1, include.mean = FALSE) Y[which(data.IL$subjno==i)] = arima.sim(n=T,AR.epsilon)*sqrt(1-gamma.1^2)*sigma + gamma.0 } if (Ylag.center == FALSE){ AR.epsilon = list(order=c(1,0,0), ar=gamma.1, include.mean = FALSE) Y[which(data.IL$subjno==i)] = arima.sim(n=T,AR.epsilon)*sqrt(1-gamma.1^2)*sigma + gamma.0/(1-gamma.1) }}", "Label": "Statistical Modeling", "Source": "https://osf.io/vguey/", "File": "Sim.Data.IL.R" }, { "ID": 833, "Comment": "Get statistics for observed networks", "Code": "obs_df <- GetNetStats(group_fit$networks, group_fit$formula, \"model\") colnames(obs_df) <- stat_labels obs_df <- obs_df %>% mutate(n=1:200, Group=rep(c(\"Young\",\"Old\"), each=100)) %>% melt(measure.vars=stat_labels, variable.name=\"Stat\") obs_df <- rbind(obs_df, data.frame(n=1:200, Group=rep(c(\"Young\",\"Old\"), each=100), Stat = \"Local efficiency\", value = local_eff), data.frame(n=1:200, Group=rep(c(\"Young\",\"Old\"), each=100), Stat = \"Global efficiency\", value = global_eff)) sim_df <- obs_df[0, ] young_nets <- group_fit$networks[1:100] for (i in 1:n_sample){ y <- young_nets[[sample(length(young_nets),1)]] myformula <- statnet.common::nonsimp_update.formula(group_fit$formula, y ~., from.new = \"y\")", "Label": "Statistical Modeling", "Source": "https://osf.io/5nh94/", "File": "F8_plot_efficiency_goodness_of_fit.R" }, { "ID": 834, "Comment": "Source functions that allow to easily draw nice visualizations", "Code": "source(\"visual_functions_all.R\") source(\"visualize_niceplot.R\")", "Label": "Visualization", "Source": "https://osf.io/pvyhe/", "File": "exemplar_runs_r_add_xi.R" }, { "ID": 835, "Comment": "Test H1d. \"Compliance with behavioural guidelines\" by Countries random intercept (country) + the fixed effect of country for all coefficients, priors ~ Cauchy (0, 1)", "Code": "prior.coef <- brms::prior(cauchy(0,1),class='b')", "Label": "Statistical Modeling", "Source": "https://osf.io/z39us/", "File": "Bayesian_analyses_H1d.R" }, { "ID": 836, "Comment": "Calculate total number of language selected by each subj. We will use this to compute a mean weight", "Code": "sum.home.languages <- adj.home %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, language) %>% dplyr::summarise(n=any(val==1)) %>% dplyr::summarise(n_languages=sum(n)) sum.fam.languages <- adj.fam %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, language) %>% dplyr::summarise(n = any(val == 1)) %>% dplyr::summarise(n_languages=sum(n)) sum.social.languages <- adj.social %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, language) %>% dplyr::summarise(n = any(val == 1)) %>% dplyr::summarise(n_languages=sum(n)) sum.work.languages <- adj.work %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, language) %>% dplyr::summarise(n = any(val == 1)) %>% dplyr::summarise(n_languages=sum(n))", "Label": "Data Variable", "Source": "https://osf.io/6z79s/", "File": "bi.conv.networks.R" }, { "ID": 837, "Comment": "Sum up the adjacency values for each subject for each topictopic pairing, merge with the number of contexts containing responses, and compute mean", "Code": "sum.adj.home <- adj.home %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, topic,topic2) %>% dplyr::summarise(sum=sum(val)) %>% dplyr::left_join(sum.home.languages) %>% dplyr::mutate(avg=sum/n_languages) %>% ungroup() sum.adj.fam <- adj.fam %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, topic,topic2) %>% dplyr::summarise(sum=sum(val)) %>% dplyr::left_join(sum.fam.languages) %>% dplyr::mutate(avg=sum/n_languages) %>% ungroup() sum.adj.school <- adj.school %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, topic,topic2) %>% dplyr::summarise(sum=sum(val)) %>% dplyr::left_join(sum.school.languages) %>% dplyr::mutate(avg=sum/n_languages) %>% ungroup() sum.adj.social <- adj.social %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, topic,topic2) %>% dplyr::summarise(sum=sum(val)) %>% dplyr::left_join(sum.social.languages) %>% dplyr::mutate(avg=sum/n_languages) %>% ungroup() sum.adj.work <- adj.work %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, topic,topic2) %>% dplyr::summarise(sum=sum(val)) %>% dplyr::left_join(sum.work.languages) %>% dplyr::mutate(avg=sum/n_languages) %>% ungroup() sum.adj.dom <- adj.dom %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, topic,topic2) %>% dplyr::summarise(sum=sum(val)) %>% dplyr::left_join(sum.dom.context) %>% dplyr::mutate(avg=sum/n_context) %>% ungroup() sum.adj.non <- adj.non %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, topic,topic2) %>% dplyr::summarise(sum=sum(val)) %>% dplyr::left_join(sum.non.context) %>% dplyr::mutate(avg=sum/n_context) %>% ungroup()", "Label": "Data Variable", "Source": "https://osf.io/6z79s/", "File": "bi.conv.networks.R" }, { "ID": 838, "Comment": "Calculate total number of languages selected by each subj. We will use this to compute a mean weight", "Code": "sum.school.languages <- adj.school %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, language) %>% dplyr::summarise(n = any(val == 1)) %>% dplyr::summarise(n_languages=sum(n))", "Label": "Data Variable", "Source": "https://osf.io/6z79s/", "File": "bi.conv.networks.R" }, { "ID": 839, "Comment": "NETWORK SIZE (i.e., how many topics are used in each network?)", "Code": "c.size.long <- contexts.mean %>% select(subject, contains(\"networkSize\")) %>% gather(context, network.size, contains(\"networkSize\")) c.size.long$context = factor(c.size.long$context, levels = c(\"work.networkSize\", \"school.networkSize\", \"home.networkSize\", \"fam.networkSize\", \"social.networkSize\"), labels = c(\"Work\", \"School\", \"Home\", \"Family\", \"Social\")) c.size.long = c.size.long %>% filter(!is.na(network.size)) c.size.summary = convenience::sem(c.size.long, dv = network.size, id = subject, context)", "Label": "Data Variable", "Source": "https://osf.io/6z79s/", "File": "bi.conv.networks.R" }, { "ID": 840, "Comment": "Calculate total number of contexts selected by each subj. We will use this to compute a mean weight", "Code": "sum.dom.context <- adj.dom %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, context) %>% dplyr::summarise(n = any(val == 1)) %>% dplyr::summarise(n_context=sum(n)) sum.non.context <- adj.non %>% gather(topic2, val, chitchat:gossip) %>% dplyr::group_by(subject, context) %>% dplyr::summarise(n = any(val == 1)) %>% dplyr::summarise(n_context=sum(n))", "Label": "Data Variable", "Source": "https://osf.io/6z79s/", "File": "bi.conv.networks.R" }, { "ID": 841, "Comment": "Language network stats NETWORK WEIGHT (i.e., how many contexts is each topictopic pair used in this language?)", "Code": "language.wide <- sum.adj.dom %>% dplyr::full_join(sum.adj.non, by = c(\"subject\", \"topic\", \"topic2\")) %>% select(-contains(\"n_context\")) %>% select(-contains(\"avg\")) names(language.wide) <- c(\"subject\",\"topic\", \"topic2\", \"Dominant Language\", \"Non-dominant Language\") language.long <- gather(language.wide, language, weight, \"Dominant Language\":\"Non-dominant Language\") language.long = language.long %>% filter(!is.na(weight)) l.weight.summary = convenience::sem(language.long, dv = weight, id = subject, language) language.aov <- aov(weight ~ language, data = language.long) summary.aov(language.aov) # sig***", "Label": "Data Variable", "Source": "https://osf.io/6z79s/", "File": "bi.conv.networks.R" }, { "ID": 842, "Comment": "Mutate targets to uppercase", "Code": "SPD_all <- SPD_all %>% mutate(target = toupper(target)) dat <- dat %>% mutate(target = toupper(target))", "Label": "Data Variable", "Source": "https://osf.io/wgneh/", "File": "3 Prepare Data.R" }, { "ID": 843, "Comment": "Compute Zscores separately for each participant and each session", "Code": "group_by(Subject, Session) %>% mutate(Ztarget.RT = scale(target.RT)) %>% ungroup() %>%", "Label": "Statistical Test", "Source": "https://osf.io/wgneh/", "File": "3 Prepare Data.R" }, { "ID": 844, "Comment": "look up pchange in table based on reward (yes/no) and confidence unselected advisor", "Code": "sel.row <- subset(p_switch, Reward == reward & Conf.unsel.adv == unselect.adv.conf) sel.row pswitch <- sel.row$Changed pswitch", "Label": "Data Variable", "Source": "https://osf.io/9gjyc/", "File": "Simulations Evidence level.R" }, { "ID": 845, "Comment": "exploratory bifactor analysis", "Code": "fa(resp, fm=\"ml\", cor=\"tet\", nfactors = 5, rotate = \"bifactor\", correct = 0)", "Label": "Statistical Modeling", "Source": "https://osf.io/dkrhy/", "File": "BICBRaschModelsandPlots.R" }, { "ID": 846, "Comment": "estimating Rasch Tree models for gender & age", "Code": "gender.dif<-raschtree(dat~gender, data=bdat, deriv=\"numeric\", alpha=.01, bonferroni=TRUE) age.dif<-raschtree(dat~age, data=bdat, deriv=\"numeric\", minsize=400, alpha=.01, bonferroni=TRUE) summary(gender.dif) summary(age.dif) plot(gender.dif) plot(age.dif)", "Label": "Statistical Modeling", "Source": "https://osf.io/dkrhy/", "File": "BICBRaschModelsandPlots.R" }, { "ID": 847, "Comment": "set those study grades to NA which are outside the range of the grading system", "Code": "mSI$crt.gru_s_w2[mSI$crt.gru_s_w2 == 0] <- NA mSI$crt.gru_s_w2[mSI$crt.gru_s_w2 > 6] <- NA", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_E.r" }, { "ID": 848, "Comment": "compute and save descriptives statistics of variables before aggregation and standardization", "Code": "descriptives <- round(select(psych::describe(mSI_descr), n, min, max, mean, sd),2) write.table(descriptives, file=\"Descriptives/descriptives_Sample_E_mSI.dat\", sep=\"\\t\")", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_E.r" }, { "ID": 849, "Comment": "mean ratings of targets and decoys by condition", "Code": "tapply(dat.long$rating, list(dat.long$condition, dat.long$target), mean) tapply(dat.long$rating, list(dat.long$condition, dat.long$target), function(x) sd(x)/sqrt(length(x)))", "Label": "Data Variable", "Source": "https://osf.io/eg6w5/", "File": "experiment1d_analyses.R" }, { "ID": 850, "Comment": "paired contrasts on estimated marginal means to unpack interaction", "Code": "emm_options(lmer.df = \"satterthwaite\") fitlmer2.em <- emmeans::emmeans(fitlmer2, specs = ~ target*condition | target) fitlmer2.em # estimated marginal means contrast(fitlmer2.em, method = \"trt.vs.ctrl\", adjust = \"none\") # paired contrasts with no correction confint(contrast(fitlmer2.em, method = \"trt.vs.ctrl\", adjust = \"none\")) # 95% confidence intervals contrast(fitlmer2.em, method = \"trt.vs.ctrl\", adjust = \"holm\") # paired contrasts with holm correction s <- as.data.frame(summary(fitlmer2.em))", "Label": "Statistical Test", "Source": "https://osf.io/eg6w5/", "File": "experiment1d_analyses.R" }, { "ID": 851, "Comment": "Statistical tests: ECT Choice score vs Training criteria Did mice reduce their choice score in the test compared to 80% criteria? using JBTxECT_id.csv dataset", "Code": "wilcox.test(md_id$choice_score[md_id$rat_bedding != \"mixed\"], mu = 0.6) # 80% big = NCT score 0.6", "Label": "Statistical Test", "Source": "https://osf.io/z6nm8/", "File": "Stats_figures_ECT.R" }, { "ID": 852, "Comment": "Make figures and tables check for correlaitons between individuals", "Code": "chart.Correlation(ind_res[[14]][,1:6] , histogram=TRUE, pch=19)", "Label": "Visualization", "Source": "https://osf.io/rmcuy/", "File": "Model_analysis.R" }, { "ID": 853, "Comment": "define plot styling pp_color for predictive draw lines, set in plotting_style.R", "Code": "cat(\"\\n\\nPlotting posterior predictive checks...\\n\") cat(\"Ignore coordinate system and colour warnings;;\", \"these are expected behavior\\n\") chains <- rstan::extract(fit) n_checks = length(chains$titer_rep_censored[, 1]) sample_checks = sample(1:n_checks, n_to_plot) print(sample_checks) print(names(chains)) cat(sprintf(\"\\nPlotting pp checks...\\n\\n\")) real_titers <- dat$log10_titer rep_titers <- chains$titer_rep_censored[sample_checks, ] plot_upper <- pp_check(10^(real_titers), yrep = 10^(rep_titers), ## convert to RML titer fun = ppc_dens_overlay, alpha = 0.2) + ggtitle(\"Predictive checks\") + scale_color_manual(name=\"\", labels = c(\"real data\", \"posterior predictive draws\"), values = c(\"black\", pp_color)) + scale_x_continuous( trans = 'log10', labels = trans_format('log10', math_format(10^.x))) + coord_cartesian(xlim = c(10^0.5, 10^6.5)) + expand_limits(x = 0.5, y = 0) + theme_project(base_size = 30)", "Label": "Visualization", "Source": "https://osf.io/fb5tw/", "File": "figure_pp_check.R" }, { "ID": 854, "Comment": "extract relevant data & calculate Brier score for each participant", "Code": "lay.data <- NULL for (i in 1:nsubjects) { study.order <- as.numeric(unlist(strsplit(prediction.data$preorder[i], split=\"|\", fixed = TRUE)))[2:28] label <- ifelse(prediction.data$Conditie[i]==1,\"des.\",\"bf.\") subject.data <- NULL for (j in 1:nstudies){ understanding <- eval(parse(text = paste0(\"prediction.data$`\",study.order[j],\"_\",label,\"0`\" )))[i] # 1 = did not understand, otherwise: NA replication.belief <- eval(parse(text = paste0(\"prediction.data$`\",study.order[j],\"_\",label,\"belief`\" )))[i] - 1 # now 0 = will not be replicated, 1 = will be replicated confidence.rating <- eval(parse(text = paste0(\"prediction.data$`\",study.order[j],\"_\",label,\"conf_1`\")))[i] # on a scale from 0 - 100 confidence.rating <- ifelse(replication.belief == 0, confidence.rating*-1, confidence.rating) # make confidence in replication failure negative confidence.rating <- confidence.rating / 200 + .5 # convert to 0-1 scale study <- j condition <- ifelse(label == \"des.\",\"DescriptionOnly\",\"DescriptionPlusStatistics\") replication.outcome <- replication.outcomes[j] replication.effectsize <- replication.effectsizes[j] ind.subject.data <- cbind(study,condition,understanding,replication.belief,confidence.rating, replication.outcome,replication.effectsize) subject.data <- rbind(subject.data,ind.subject.data) } subject <- i ind.lay.data <- cbind(subject,subject.data) lay.data <- rbind(lay.data,ind.lay.data) } rm(study.order,label,condition,subject.data,understanding,replication.belief,confidence.rating, i,j,study,replication.outcome,replication.effectsize,ind.subject.data,subject,ind.lay.data)", "Label": "Data Variable", "Source": "https://osf.io/x72cy/", "File": "PreprocessingQualtricsData.R" }, { "ID": 855, "Comment": "3. Data exclusion based on set criteria criteria: 1. if participants failed the attention check (i.e., did not press 'NO' and 75% (range 7080 is allowed)) 2. if a study description is not understood, exlcude this study for this participant 3. if a study is not understood by > 50% of the participants, exclude this study 4. if a participant does not understand > 50% of the studies, exclude this participant", "Code": "bogus.study <- 27 clean.data <- as.data.frame(lay.data, stringsAsFactors = FALSE) correct.range <- clean.data[clean.data$study==bogus.study,]$confidence.rating correct.range <- rep(correct.range >= .1 & correct.range <= .15, each = nstudies) # correct range NO and 70-80% --> .1-.15 on the confidence scale clean.data.1 <- clean.data[correct.range,] # apply 1. clean.data.2 <- clean.data.1[is.na(clean.data.1$understanding),] # apply 2. remove.studies <- which(table(clean.data.2$study)% filter(!is.na(windxcoordmeanbearing) & !is.na(trashxcoordbearing)) %>% #Removes data where we don't have wind or dont have receipt direction from the analysis. dplyr::select(windxcoordmeanbearing, windycoordmeanbearing, row) %>% rename(x = windxcoordmeanbearing, y = windycoordmeanbearing) %>% bind_rows(CompleteDataWithGoogle %>% filter(!is.na(windxcoordmeanbearing) & !is.na(trashxcoordbearing)) %>% dplyr::select(trashycoordbearing, trashxcoordbearing, row) %>% rename(x = trashycoordbearing, y = trashxcoordbearing))", "Label": "Data Variable", "Source": "https://osf.io/82dqk/", "File": "PublicationCode2.R" }, { "ID": 860, "Comment": "Check if Trial column exists, if not create it", "Code": "if (!\"Trial\" %in% colnames(df)) { df <- cbind(Trial = NA, df) } trial_counter <- 1 for (i in seq_along(index_pairs$start)) { start_pos <- index_pairs$start[i] end_pos <- index_pairs$end[i]", "Label": "Data Variable", "Source": "https://osf.io/yfegm/", "File": "allocateTrials.r" }, { "ID": 861, "Comment": "group level Pi/Pb for each set size on probability scale", "Code": "PiIdx <- NULL for (j in 1:nsubj) PiIdx <- c(PiIdx, which(names(as.data.frame(mcmcChain)) == paste0(\"Pi[\", j, \",\", s, \"]\"))) PbIdx <- NULL for (j in 1:nsubj) PbIdx <- c(PbIdx, which(names(as.data.frame(mcmcChain)) == paste0(\"Pb[\", j, \",\", s, \"]\"))) meanPi[s,experiment,] <- rowMeans(mcmcChain[, PiIdx]) # means across subjects of posterior means of Pi for each set size meanPb[s,experiment,] <- rowMeans(mcmcChain[, PbIdx]) } DeltaPi[,experiment] <- mcmcChain[, paste0(\"dPi\")] # posterior samples of slope of Pi over set size DeltaPb[,experiment] <- mcmcChain[, paste0(\"dPb\")] print(colMeans(DeltaPi>0)) # proportion of posterior samples > 0 save(MuPi, MuPb, meanPi, meanPb, DeltaPi, DeltaPb, file=parChainFile) }", "Label": "Statistical Modeling", "Source": "https://osf.io/qy5sd/", "File": "PairsBindingRSS_MPT.R" }, { "ID": 862, "Comment": "determine manifestations and visualize cultural value dimensions", "Code": "d$SVS_d1 <- d$SVS_harmony - d$SVS_mastery d$SVS_d2 <- d$SVS_egalit - d$SVS_hierarchy d$SVS_d3 <- (d$SVS_autona+d$SVS_autoni)/2 - d$SVS_embed cor(d[grepl(\"_d\",names(d))]) f$SVS_d1 <- f$SVS_harmony - f$SVS_mastery f$SVS_d2 <- f$SVS_egalit - f$SVS_hierarchy f$SVS_d3 <- (f$SVS_autona+f$SVS_autoni)/2 - f$SVS_embed cor(f[grepl(\"_d\",names(f))]) pcf <- princomp(f[,3:9], cor=T) summary(pcf) loadings(pcf)", "Label": "Visualization", "Source": "https://osf.io/qxf5t/", "File": "TSST_Meta.R" }, { "ID": 863, "Comment": "CTRees analyze the trees for each agreement indicator individually: Variationcorrelation of reported likelihood ~ proportion unstable:", "Code": "png(\"output/figures/paper/ctree_rhopsi.png\", width = 600, height = 350) VS_ <- VS VS_ <- VS_[!is.na(VS_$rho_llhd) & !is.na(VS_$lagZ_20), ] VS_ <- VS_[!VS_$gtype_class %in% c(\"IFrc\", \"IFsc\", \"MFcr\", \"FCxr\") & VS_$band == \"TL\", ] VS_$gtype_class[VS_$gtype_class %in% \"SH\" & VS_$gtype_rank == \"secondary\"] <- \"SH/FC\" CT <- partykit::ctree(rho_llhd ~ nPDays , data = VS_, alpha = 0.05, maxdepth = 2) Vars <- setVars4ctreeColoring(VS_) plotCTree(CT) dev.off()", "Label": "Statistical Modeling", "Source": "https://osf.io/w7pjy/", "File": "analyze_agreementIndicators.R" }, { "ID": 864, "Comment": "Selecting and recoding items that were measured in all waves", "Code": "d <- df %>% select(id, wave, lsat = sat6, fsat = sat1i3, per1i2, per1i7, per1i13) %>% mutate(per1i2 = invert(per1i2, 5))", "Label": "Data Variable", "Source": "https://osf.io/fdp39/", "File": "functions.r" }, { "ID": 865, "Comment": "6. Extract & plot estimates Is evolution more comparable to B0 or B1?", "Code": "describe_posterior((r.delta.z[,1,])) # B0_Conc describe_posterior((r.delta.z[,2,])) # B0_Disc describe_posterior((r.delta.z[,3,])) # B1_Conc describe_posterior((r.delta.z[,4,])) # B1_Disc", "Label": "Visualization", "Source": "https://osf.io/pnug5/", "File": "2.Ginteger_CrossSex_skewers.R" }, { "ID": 866, "Comment": "for each subject, aggregate his/her selfratings across all interactions (in second and third week of eventbased assessment)", "Code": "app_SR_aggr <- aggregate(app_SR, by=list(app_SR$id_a), mean)", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 867, "Comment": "recode abitur grades which were not provided but stored as 0, and grades that were provided in units of the wrong grading system", "Code": "surv2345$abitur_grade[surv2345$abitur_grade == 0] <- NA surv2345$abitur_grade[surv2345$abitur_grade == 12] <- 2.0 surv2345$abitur_grade[surv2345$abitur_grade == 13] <- 1.7", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 868, "Comment": "define a function which, for each person, selects the first nonNA measurement he provided out of three time points ( out of three variables)", "Code": "firstnonNA_3timepoints <- function(df) {if(all(is.na(df))){ NA } else if (!is.na(df[1])){ df[1] } else if (!is.na(df[2])){ df[2] } else if (!is.na(df[3])){ df[3] } }", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 869, "Comment": "for the exam grades 15 to 18 which have been assessed only at the last occasions (Survey 5), save the grade in new variable for consistency in notation", "Code": "surv2345$retro_grades_15 <- surv2345$retro_grades_15_t5 surv2345$retro_grades_16 <- surv2345$retro_grades_16_t5 surv2345$retro_grades_17 <- surv2345$retro_grades_17_t5 surv2345$retro_grades_18 <- surv2345$retro_grades_18_t5", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 870, "Comment": "select variables (all 18 exam grades) to be aggregated for mean exam grade", "Code": "retro_grades_df <- as.matrix(surv2345[,paste0(\"retro_grades_\",1:18)])", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 871, "Comment": "Select variables for the analyses and save data frame that will be uploaded in the OSF", "Code": "connect_osf <- select(connect, Z_Raven_self:Z_MWTB_obj, Z_global_selfeval:Z_achievement) write.table(connect_osf, file=\"Data_Sample_C_connect.txt\",sep = \"\\t\",col.names=TRUE)", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 872, "Comment": "DESCRIPTIVE STATISTICS compute and save sample statistics (age distribution, number of females)", "Code": "age <- round(select(psych::describe(connect_descr$age), n, min, max, mean, sd),2) age$n <- nrow(connect_descr) sampstats <- mutate(age, female=plyr::count(connect_descr$sex)[plyr::count(connect_descr$sex)[,1]==\"1\",][\"freq\"] ) write.table(sampstats, file=\"Descriptives/age_sex_Sample_C_connect.dat\", sep=\"\\t\", row.names=FALSE)", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 873, "Comment": "compute and save correlation table of variables before aggregation and standardization", "Code": "cor_raw <- corcons(connect_descr) write.table(cor_raw, file=\"Descriptives/correlations_raw_Sample_C_connect.dat\", sep=\"\\t\")", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_C.R" }, { "ID": 874, "Comment": "Hypothesis 1.1 Hypothesis 1 was tested via pairedsample t.tests (alphas need to be Bonferronicorrected) Examples for conscientious goal classes (first two rows of Table 1 Conscientious Goals). Remaining tests are simialr", "Code": "t.test(x = dt1$CP_Class_01, y = dt1$CN_Class_01, paired = TRUE, alternative = \"greater\") t.test(x = dt1$CP_Class_02, y = dt1$CN_Class_02, paired = TRUE, alternative = \"greater\")", "Label": "Statistical Test", "Source": "https://osf.io/ywm3r/", "File": "Analyses.R" }, { "ID": 875, "Comment": "Hypothesis 1.2 Hypothesis 1.2 was tested via partiallyoverlapping samples ttests. We report the analyses for the first panel of Table S2 (and for pvalues reported in Table 2). The remaining analyses are identical, but were performed on other goal classes", "Code": "Partover.test(dt1$CP_Class_01, dt1$HP_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$EP_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$XP_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$AP_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$OP_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$HN_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$EN_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$XN_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$AN_Class_01, stacked = TRUE, alternative = \"greater\") Partover.test(dt1$CP_Class_01, dt1$ON_Class_01, stacked = TRUE, alternative = \"greater\")", "Label": "Statistical Test", "Source": "https://osf.io/ywm3r/", "File": "Analyses.R" }, { "ID": 876, "Comment": "Examples of Tobit regressions of the subjective importance of specific goals on HEXACO traits (first two lines of Table 3). Pvalues need to be corrected using the Bonferroni method, considering that we performed 21 multiple regressions (multiply them by 21 and round values larger than 1 to 1)", "Code": "predictors <- select(dt2, HEXACO_H:HEXACO_O) %>% scale() fit1 <- censReg(dt2$G_C_01_DimostrareQlcQln_Importance ~ predictors, left = 1, right = 9) summary(fit1) fit2 <- censReg(dt2$G_C_02_EssereDegnoFiducia_Importance ~ predictors, left = 1, right = 9) summary(fit2)", "Label": "Statistical Modeling", "Source": "https://osf.io/ywm3r/", "File": "Analyses.R" }, { "ID": 877, "Comment": "Hierarchical regression predicting the willingness to change conscientiousness according to CBFI (Table 4). For ease of formatting, I used the R package AutoModel", "Code": "Data4HierarchicalReg <- select(dt2, CBFI_C, POS, HEXACO_H:HEXACO_O, BFI2_O:BFI2_N, Importance_GC, Importance_GU) %>% scale() %>% data.frame() Data4HierarchicalReg$BFTGI_C_bin <- as.numeric(dt2$BFTGI_C == 3)", "Label": "Statistical Modeling", "Source": "https://osf.io/ywm3r/", "File": "Analyses.R" }, { "ID": 878, "Comment": "Hypothesis 3.2 multiple regressions predicting each goal class from HEXACO traits (we report examples reproducing the first two rows of Table 8, the code for the others is similar, save for the goal class). Pvalues need to be corrected using the Bonferroni method, considering 9 multiple regressions (multiply them by 9 and transform values > 1 to 1)", "Code": "lm(G08Rules ~ ., data = select(dt3, G08Rules, HEXACO_H:HEXACO_O)) %>% lm.beta %>% summary lm(G10Control ~ ., data = select(dt3, G10Control, HEXACO_H:HEXACO_O)) %>% lm.beta %>% summary", "Label": "Statistical Modeling", "Source": "https://osf.io/ywm3r/", "File": "Analyses.R" }, { "ID": 879, "Comment": "DIFFERENTIAL ITEM FUNCTIONING Create trichotomous income variable", "Code": "data = data %>% mutate( income3 = recode(income, '1=1;; 2=1;; 3=1;; 4=1;; 5=1;; 6=1;; 7=1;; 8=1;; 9=2;; 10=2;; 11=2;; 12=2;; 13=2;; 14=3;; 15=3;; 16=3;; 17=3;; 18=3')) q1merit = data[c(\"Q1A\", \"Q1B\", \"Q1H\", \"Q1N\")] q1opportunity = data[c(\"Q1C\", \"Q1D\", \"Q1F\", \"Q1G\")] q1chance = data[c(\"Q1I\", \"Q1O\", \"Q1P\", \"Q1Q\")] q2merit = data[c(\"Q2A\", \"Q2B\", \"Q2H\", \"Q2N\")] q2opportunity = data[c(\"Q2C\", \"Q2D\", \"Q2F\", \"Q2G\")] q2chance = data[c(\"Q2I\", \"Q2O\", \"Q2Q\", \"Q2R\")] q3merit = data[c(\"Q3A\", \"Q3B\", \"Q3H\", \"Q3N\")] q3opportunity = data[c(\"Q3C\", \"Q3D\", \"Q3F\", \"Q3G\")] q3chance = data[c(\"Q3I\", \"Q3O\", \"Q3P\", \"Q3Q\")] q4merit = data[c(\"Q4B\", \"Q4C\", \"Q4I\", \"Q4N\")] q4opportunity = data[c(\"Q4D\", \"Q4E\", \"Q4G\", \"Q4H\")] q4chance = data[c(\"Q4J\", \"Q4O\", \"Q4P\", \"Q4Q\")] sesmerit = data[c(\"hs_merit\", \"ls_merit\", \"ha_merit\", \"la_merit\")] sesopportunity = data[c(\"hs_opportunity\", \"ls_opportunity\", \"ha_opportunity\", \"la_opportunity\")] seschance = data[c(\"hs_chance\", \"ls_chance\", \"ha_chance\", \"la_chance\")] merit_full = data[c(\"hs_efft\", \"hs_abil\", \"ls_efft\", \"ls_abil\", \"ha_efft\", \"la_efft\")] gender = data$gender age = data$AGE4 politic = data$D3 income = data$income3 educ = data$EDUC4", "Label": "Data Variable", "Source": "https://osf.io/25a6x/", "File": "BeliefsScale_IRT_Rscript.R" }, { "ID": 880, "Comment": "Display distribution of efficacy judgments (can be used to set a prior on the next experiment)", "Code": "eff_all <- c(eff_nograph, eff_graph) par(mfrow=c(2,3)) myhist(\"Efficacy\", eff_all, 1, 9, N/2) myhist(\"Efficacy - no graph\", eff_nograph, 1, 9, N/4) myhist(\"Efficacy - graph\", eff_graph, 1, 9, N/4) cat(\"Grand mean for efficacy: \", format_number(mean(eff_all)), \"\\n\")", "Label": "Visualization", "Source": "https://osf.io/zh3f4/", "File": "exp1 simulated analysis.R" }, { "ID": 881, "Comment": "Models by social network measure 1. Indegree", "Code": "m1 <- glmer(pup_year_surv~indegree + valley + overall.index + pup_sex + pup_littersizeborn + mother_age + network_size + pup_emerjdate + overall.index*indegree + valley*indegree + (1|mother_uid) + (1|pup_yrborn), control = glmerControl(\"bobyqa\", optCtrl=list(maxfun=2e5)), data=sur_data, family= binomial) summary(m1)", "Label": "Data Variable", "Source": "https://osf.io/wc3nq/", "File": "7) agr_yearly_model.R" }, { "ID": 882, "Comment": "remove upper triangle of correlation matrix", "Code": "if(removeTriangle[1]==\"upper\"){ Rnew <- as.matrix(Rnew) Rnew[upper.tri(Rnew, diag = TRUE)] <- \"\" Rnew <- as.data.frame(Rnew) }", "Label": "Data Variable", "Source": "https://osf.io/3b59h/", "File": "Analysis.R" }, { "ID": 883, "Comment": "put SDs in parantheses", "Code": "mutate(gender_sd = paste0(\"(\", gender_sd, \")\"), age_sd = paste0(\"(\", age_sd, \")\"), parent_sd = paste0(\"(\", parent_sd, \")\"), pol_sd = paste0(\"(\", pol_sd, \")\")) %>% mutate(gender_sd = paste0(\"(\", gender_sd, \")\"), age_sd = paste0(\"(\", age_sd, \")\"), parent_sd = paste0(\"(\", parent_sd, \")\"), pol_sd = paste0(\"(\", pol_sd, \")\")) %>% mutate(intent_apply_job_sd = paste0(\"(\", intent_apply_job_sd, \")\"), intent_apply_ps_sd = paste0(\"(\", intent_apply_ps_sd, \")\"), psm_sd = paste0(\"(\", psm_sd, \")\"), pofit_sd = paste0(\"(\", pofit_sd, \")\"), pjfit_sd = paste0(\"(\", pjfit_sd, \")\")) %>% mutate(intent_apply_job_sd = paste0(\"(\", intent_apply_job_sd, \")\"), intent_apply_ps_sd = paste0(\"(\", intent_apply_ps_sd, \")\"), psm_sd = paste0(\"(\", psm_sd, \")\"), pofit_sd = paste0(\"(\", pofit_sd, \")\"), pjfit_sd = paste0(\"(\", pjfit_sd, \")\")) %>%", "Label": "Data Variable", "Source": "https://osf.io/3b59h/", "File": "Analysis.R" }, { "ID": 884, "Comment": "5.3 Pirateplot for dependent variables", "Code": "df_pirate_dv <- tibble(dv = c(df$intent_apply_job, df$intent_apply_ps), group = c(rep.int(0, length(df$intent_apply_job)), rep.int(1, length(df$intent_apply_ps)))) %>% mutate(group = factor(group, labels = c(\"Intention to\\napply for job\", \"Intention to apply\\nfor public service\"))) pirate_dvs <- yarrr::pirateplot(dv ~ group, data = df_pirate_dv, inf.method = \"ci\", xlab = \"Dependent variable\", ylab = \"\", theme = 2, cex.lab = 1.2, cex.axis = 1.2, cex.names = 1.2) pirate_dvs <- recordPlot() # contains all plotting information png(\"./output/Appendix_pirate_dvs.png\") pirate_dvs dev.off()", "Label": "Visualization", "Source": "https://osf.io/3b59h/", "File": "Analysis.R" }, { "ID": 885, "Comment": "Load the functions for the raincloud plots", "Code": "source('funcs/R_rainclouds.R')", "Label": "Visualization", "Source": "https://osf.io/4fvwe/", "File": "load_my_functions.R" }, { "ID": 886, "Comment": "Save the result and load if you run the permutation test on a different computer than the visualizations", "Code": "save(list.res,file=\"results.Rdata\") load(\"results.Rdata\")", "Label": "Visualization", "Source": "https://osf.io/greqt/", "File": "02_analysis.R" }, { "ID": 887, "Comment": "data as a scatter plot, column s for x and column v for y, size 4 points, use the sat_curve data frame as the source", "Code": "geom_point(data = sat_curve, aes(x = s, y = v), size = 4) +", "Label": "Visualization", "Source": "https://osf.io/9e3cu/", "File": "sat_curve.R" }, { "ID": 888, "Comment": "fit as a line plot, use the mm_fit data frame as the data source", "Code": "geom_line(data = mm_fit, aes(x = s, y = v)) +", "Label": "Visualization", "Source": "https://osf.io/9e3cu/", "File": "sat_curve.R" }, { "ID": 889, "Comment": "change the axis label text to size 22, bold, and black color", "Code": "axis.text.x = element_text(size = 22, face = \"bold\", color = \"black\"), axis.text.y = element_text(size = 22, face = \"bold\", color = \"black\")) +", "Label": "Visualization", "Source": "https://osf.io/9e3cu/", "File": "sat_curve.R" }, { "ID": 890, "Comment": "sequence of age inputs within range of model: (15, 75) years old", "Code": "seq.length <- length(seq(15, 75, 1))", "Label": "Data Variable", "Source": "https://osf.io/92e6c/", "File": "analyze_scan_model_hadza.R" }, { "ID": 891, "Comment": "Define link function with softmax transformation The following function is taken from Koster and McElreath (2017) Modifications made to accommodate current model structure (e.g. addition of com_id, month)", "Code": "link.mn <- function( data ) { K <- dim(post$v_id)[3] + 1 ns <- dim(post$v_id)[1] if ( missing(data) ) stop( \"BOOM: Need data argument\" ) n <- seq.length softmax2 <- function(x) { x <- max(x) - x exp(-x)/sum(exp(-x)) } p <- list() for ( i in 1:n ) { p[[i]] <- sapply( 1:K , function(k) { if ( k < K ) { ptemp <- post$a[,k] + post$bA[,k] * data$age_z[i] + post$bQ[,k] * data$age_zq[i] + post$bT[,k] * data$time_z[i] + post$bTQ[,k] * data$time_zq[i] if ( data$id[i]>0 ) ptemp <- ptemp + post$v_id[,data$id[i],k] if ( data$com_id[i]>0 ) ptemp <- ptemp + post$v_com[,data$com_id[i],k] if ( data$month_id[i]>0 ) ptemp <- ptemp + post$v_month[,data$month_id[i],k] } else { ptemp <- rep(0,ns) } return(ptemp) })", "Label": "Statistical Modeling", "Source": "https://osf.io/92e6c/", "File": "analyze_scan_model_hadza.R" }, { "ID": 892, "Comment": "The values are converted to probabilities using the softmax function which ensures that the predicted values across categories sum to 100% probabilities.", "Code": "for ( s in 1:ns ) p[[i]][s,] <- softmax2( p[[i]][s,] ) } return(p) } for ( s in 1:ns ) p[[i]][s,] <- softmax2( p[[i]][s,] ) } return(p) } low_age <- (15 - mean_age_female)/sd_age_female high_age <- (75 - mean_age_female)/sd_age_female", "Label": "Statistical Modeling", "Source": "https://osf.io/92e6c/", "File": "analyze_scan_model_hadza.R" }, { "ID": 893, "Comment": "everything will be calculated over the \"adult\" interval from 15 to 75", "Code": "age_seq <- (seq(15, 75, 1)- mean_age_female)/sd_age_female", "Label": "Data Variable", "Source": "https://osf.io/92e6c/", "File": "analyze_scan_model_hadza.R" }, { "ID": 894, "Comment": "We create a data frame from age_seq and its second order polynomial, holding most of predictors at their sample mean. In the original Koster+McElreath paper, they specified 8:00 am for the time of day in order to effect a more sensible alignment with the empirical data. In this case, we hold that parameter at 0, which therefore corresponds with the mean time of day in our dataset for behavioral observations (0.532 ~12:45). Also, as we noted earlier in the notes about the multinomial link function, we set \"id\" to zero in order to average over the random effects.", "Code": "pred_dat <- data.frame( id = 0 , age_z = age_seq, age_zq = age_seq^2, time_z = 0, time_zq = 0, com_id = 0, month_id = 0 ) p <- link.mn ( pred_dat )", "Label": "Statistical Modeling", "Source": "https://osf.io/92e6c/", "File": "analyze_scan_model_hadza.R" }, { "ID": 895, "Comment": "Biodiversity conversion general form is Index (Maxent ^ wM + Abundance ^ wL )^(1/(wM+wL)) but would need to take care of how this scales", "Code": "pro.fun <- function(x, abuntab, zonename){ (x^wM*abuntab[,which(names(abuntab)==zonename)]^wL)^(1/(wM+wL))}", "Label": "Statistical Modeling", "Source": "https://osf.io/5ejcq/", "File": "data_preperation_functions.R" }, { "ID": 896, "Comment": "Construct variable for number of family members in country", "Code": "famcountry = data4$partnerwhere + data4$mumwhere + data4$dadwhere table(famcountry) data4$famcountry <- famcountry hist(data4$famcountry) sum(is.na(data4$famcountry)) #175", "Label": "Data Variable", "Source": "https://osf.io/qjfv4/", "File": "refugeeservice_varwork2.R" }, { "ID": 897, "Comment": "Plot network latent variable on CFA latent variable", "Code": "layout(t(c(1,2))) plot(cfaLV, netLV, main = \"Network Latent Variable\\non CFA Latent Variable\", ylab = \"Network Latent Variable\", xlab = \"CFA Latent Variable\")", "Label": "Visualization", "Source": "https://osf.io/5hpjn/", "File": "NetworkToolbox.R" }, { "ID": 898, "Comment": "Plot participant means on CFA latent variable", "Code": "plot(cfaLV, pmeans, main = \"Participant Means on\\nCFA Latent Variable\", ylab = \"Participant Means\", xlab = \"CFA Latent Variable\")", "Label": "Visualization", "Source": "https://osf.io/5hpjn/", "File": "NetworkToolbox.R" }, { "ID": 899, "Comment": "initialize matrices to store validation results", "Code": "WklValidation <- matrix(nrow = nrow_max, ncol = 13, dimnames = list(seq(nrow_max), c(\"distribution\", \"nProfiles_checked\", \"day_after_burial\", \"timewindow_lower\", \"timewindow_upper\", \"likelihood_reported\", \"likelihoodSpread_reported\", \"distribution_reported\", \"sensitivity_reported\", \"size_reported\", \"sizeSpread_reported\", \"grain_size_reported\", \"data_quality\"))) WklValidation_char <- matrix(nrow = nrow_max, ncol = 10, dimnames = list(seq(nrow_max), c(\"vf_uuid\", \"pwl_uuid\", \"pwl\", \"validation_date\", \"band\", \"timing_mode\", \"gtype_class\", \"gtype_rank\", \"comment\", \"danger_rating\"))) vf_uuid <- NA", "Label": "Data Variable", "Source": "https://osf.io/w7pjy/", "File": "PWLcapturedByModel.R" }, { "ID": 900, "Comment": "run linear models on predicted probabilities, to calculate the trend", "Code": "models <- pred %>% group_by(x, country) %>% nest() %>% mutate(models = map(data, ~ lm(predicted ~ wave, data = .))) %>% spread_coef(models, se = TRUE)", "Label": "Statistical Modeling", "Source": "https://osf.io/7wd8e/", "File": "06 - Trends.R" }, { "ID": 901, "Comment": "one participant has a typing error in the age variable (stated he was 2). Set to NA.", "Code": "mst1[which(mst1$age == 2), \"age\"] <- NA", "Label": "Data Variable", "Source": "https://osf.io/m6pb2/", "File": "Data_preparation_Sample_D.r" }, { "ID": 902, "Comment": "association between vote and ideology", "Code": "ggplot(dat, aes(x = Ideology, y = Percent.Vote.Rep)) + geom_vline(xintercept = .50, color = \"gray50\", linetype = 2) + geom_hline(yintercept = .50, color = \"gray50\", linetype = 2) + stat_poly_line() + stat_correlation(label.x = \"left\", small.r = TRUE) + geom_point() + geom_text_repel(size = 3, aes(label = Group)) + labs(y = \"Proportion Voting Republican\", x = \"Ideology\") + coord_cartesian(xlim = c(0, 1), ylim = c(0, 1)) + theme_cowplot() ggsave(\"figures/predictor_scatter.pdf\", width = 5.5, height = 5)", "Label": "Data Variable", "Source": "https://osf.io/hfxsb/", "File": "basic_mods.R" }, { "ID": 903, "Comment": "Make a dot and a line color column depending on the significance level Check significance level and if significant add respective color to new column", "Code": "ageR = data.frame(dotCol = rep(ifelse(summary(lm(initLevel ~ age, data = id_df))$coefficients[2,4]< 0.05, \"khaki3\", \"lightgrey\"), length(unique(id_df$userCode))), lineCol = rep(ifelse(summary(lm(initLevel ~ age, data = id_df))$coefficients[2,4]< 0.05, \"cyan4\", \"darkgrey\"), length(unique(id_df$userCode)))) educR = data.frame(dotCol = rep(ifelse(summary(lm(initLevel ~ educ_y, data = id_df))$coefficients[2,4]< 0.05, \"khaki3\", \"lightgrey\"), length(unique(id_df$userCode))), lineCol = rep(ifelse(summary(lm(initLevel ~ educ_y, data = id_df))$coefficients[2,4]< 0.05, \"cyan4\", \"darkgrey\"), length(unique(id_df$userCode)))) blpR = data.frame(dotCol = rep(ifelse(summary(lm(initLevel ~ BLP, data = id_df))$coefficients[2,4]< 0.05, \"khaki3\", \"lightgrey\"), length(unique(id_df$userCode))), lineCol = rep(ifelse(summary(lm(initLevel ~ BLP, data = id_df))$coefficients[2,4]< 0.05, \"cyan4\", \"darkgrey\"), length(unique(id_df$userCode)))) actR = data.frame(dotCol = rep(ifelse(summary(lm(initLevel ~ Act_total, data = id_df))$coefficients[2,4]< 0.05, \"khaki3\", \"lightgrey\"), length(unique(id_df$userCode))), lineCol = rep(ifelse(summary(lm(initLevel ~ Act_total, data = id_df))$coefficients[2,4]< 0.05, \"cyan4\", \"darkgrey\"), length(unique(id_df$userCode)))) motivR = data.frame(dotCol = rep(ifelse(summary(lm(initLevel ~ socioAffect_init, data = id_df))$coefficients[2,4]< 0.05, \"khaki3\", \"lightgrey\"), length(unique(id_df$userCode))), lineCol = rep(ifelse(summary(lm(initLevel ~ socioAffect_init, data = id_df))$coefficients[2,4]< 0.05, \"cyan4\", \"darkgrey\"), length(unique(id_df$userCode)))) tmp = rbind.data.frame(ageR, educR, blpR, actR, motivR) corrDF = cbind(corrDF, tmp)", "Label": "Visualization", "Source": "https://osf.io/wcfj3/", "File": "2_idDiffs.R" }, { "ID": 904, "Comment": "Sample from those names 200 times, with replacement", "Code": "animal <- sample(categories, 200, replace = TRUE)", "Label": "Statistical Modeling", "Source": "https://osf.io/eks6u/", "File": "demo_script_2018_10_02.R" }, { "ID": 905, "Comment": "Create random variables Use rnorm() with varying means and SDs", "Code": "length <- rnorm(200, 30, 8) weight <- rnorm(200, 4, 2) happiness <- rnorm(200, 3.5, 1)", "Label": "Data Variable", "Source": "https://osf.io/eks6u/", "File": "demo_script_2018_10_02.R" }, { "ID": 906, "Comment": "Examine descriptive statistics Use the pipe, group_by(), and summarise() to get means, SDs, and medians for each category", "Code": "animal_data %>% group_by(animal) %>% summarise( weight_mean = mean(weight), weight_sd = sd(weight), weight_median = median(weight) )", "Label": "Data Variable", "Source": "https://osf.io/eks6u/", "File": "demo_script_2018_10_02.R" }, { "ID": 907, "Comment": "Subset data to include only the first two categories Use filter() to select only rows that pass the given logical test", "Code": "subset_1 <- animal_data %>% filter(animal == \"puppy\" | animal == \"kitten\")", "Label": "Data Variable", "Source": "https://osf.io/eks6u/", "File": "demo_script_2018_10_02.R" }, { "ID": 908, "Comment": "Use subset for ttest Welch's ttest", "Code": "t.test(weight ~ animal, data = subset_1)", "Label": "Statistical Test", "Source": "https://osf.io/eks6u/", "File": "demo_script_2018_10_02.R" }, { "ID": 909, "Comment": "plot spaghettiplot for minute 1 and minute 5", "Code": "df_1_5 <- rbind(df_min1, df_min5) df_1_5 <- df_1_5 %>% mutate(range=droplevels(range)) p <- ggplot(data = df_1_5, aes(x = range, y = count, group = pp)) p + geom_point() p + geom_line() p + geom_line() + facet_grid(. ~ condition) p <- ggplot(data = df_min, aes(x = range, y = count, group = pp)) p + geom_point() p + geom_line() p + geom_line() + facet_grid(. ~ condition)", "Label": "Visualization", "Source": "https://osf.io/xh36s/", "File": "behavior_analyses.R" }, { "ID": 910, "Comment": "do 3 way anova including the factor in which condition they are in", "Code": "bfFull = anovaBF(count ~ condition + range + experiment + condition:range:experiment, data = df_b) bfFull[4]/bfFull[3] bfFull[18]/bfFull[17] bf1 = anovaBF(count ~ condition + range + experiment, data = df_b)", "Label": "Statistical Test", "Source": "https://osf.io/xh36s/", "File": "behavior_analyses.R" }, { "ID": 911, "Comment": "pc: critical pvalue Overview: Creates a vector of the same length as the number of tests submitted to pcurve, significant and not, and computes the proportion of pvalues expected to be smaller than {pc} given the d.f. and outputs the entire vector, with NA values where needed Ftests (& thus ttests)", "Code": "prop=ifelse(family==\"f\" & p<.05,1-pf(qf(1-pc,df1=df1, df2=df2),df1=df1, df2=df2, ncp=ncp33),NA)", "Label": "Statistical Test", "Source": "https://osf.io/ptfye/", "File": "Analysis.R" }, { "ID": 912, "Comment": "Create vector that numbers studies 1 to N,includes n.s. studies", "Code": "k=seq(from=1,to=length(raw))", "Label": "Data Variable", "Source": "https://osf.io/ptfye/", "File": "Analysis.R" }, { "ID": 913, "Comment": "1.2 Parse the entered text into usable statistical results 1.3 Create test type indicator", "Code": "stat=substring(raw,1,1) #stat: t,f,z,c,r test=ifelse(stat==\"r\",\"t\",stat) #test: t,f,z,c (r-->t)", "Label": "Statistical Test", "Source": "https://osf.io/ptfye/", "File": "Analysis.R" }, { "ID": 914, "Comment": "Make red dot at the estimate", "Code": "points(hat,min(fit,na.rm=TRUE),pch=19,col=\"red\",cex=2)", "Label": "Visualization", "Source": "https://osf.io/ptfye/", "File": "Analysis.R" }, { "ID": 915, "Comment": "This loop creates a vector, blue, with 5 elements, with the proportions of p.01,p.02...p.05", "Code": "for (i in c(.01,.02,.03,.04,.05)) blue=c(blue,sum(ps==i,na.rm=TRUE)/ksig*100)", "Label": "Data Variable", "Source": "https://osf.io/ptfye/", "File": "Analysis.R" }, { "ID": 916, "Comment": "FIG 3: scree plot for parallel analysis", "Code": "rscree<-as.data.frame(cbind(spar$fa.values, spar$fa.simr)) colnames(rscree)<-c(\"Actual\",\"Resampled\") rscree$item<-seq.int(1, 12, 1) refplot<-pivot_longer(rscree, cols=1:2, names_to=\"Method\") refplot2<-refplot %>% filter(item<5) iscree<-as.data.frame(cbind(ipar$fa.values, ipar$fa.simr)) colnames(iscree)<-c(\"Actual\",\"Resampled\") iscree$item<-seq.int(1, 8, 1) insplot<-pivot_longer(iscree, cols=1:2, names_to=\"Method\") insplot2<-insplot %>% filter(item<5) refeig<-ggline(refplot2, x=\"item\", y=\"value\", group = \"Method\", color=\"Method\", size=1.1, palette = paletteer_d(\"palettetown::tangela\", direction=-1), xlab=\"Factor\", ylab=\"Eigenvalue\", title=\"Self-reflection\")+ theme_minimal_hgrid(font_size=12, font_family = \"Fira Sans Medium\") scree.a<-ggpar(refeig, ylim = c(0,6), yticks.by=1, legend = c(.7,.77), legend.title = \"\") scree.a inseig<-ggline(insplot2, x=\"item\", y=\"value\", group = \"Method\", color=\"Method\", size=1.1, palette = paletteer_d(\"palettetown::tangela\", direction=-1), xlab=\"Factor\", ylab=\"\", title=\"Insight\")+ theme_minimal_hgrid(font_size=12, font_family = \"Fira Sans Medium\") scree.b<-ggpar(inseig, ylim = c(0, 6), yticks.by=1, legend = c(.7,.77), legend.title = \"\") scree.b", "Label": "Visualization", "Source": "https://osf.io/qsa5w/", "File": "SRIS,FullScaleIRTModelsandPlots.R" }, { "ID": 917, "Comment": "make sure data are in chronological order", "Code": "pollenData <- pollenData[order(pollenData$age),] lakeData <- lakeData[order(lakeData$age),] charcoalData <- charcoalData[order(charcoalData$age),] tail(pollenData) tail(lakeData) tail(charcoalData)", "Label": "Data Variable", "Source": "https://osf.io/7h94n/", "File": "Malawi_interpolation.R" }, { "ID": 918, "Comment": "examine descriptives of newly created variables here we are combining the dplyr and summarytools packages to subset the data and then get descriptives", "Code": "eid_dat %>% dplyr::select(swl_mean, meim_ex_mean, meim_co_mean) %>% summarytools::descr()", "Label": "Data Variable", "Source": "https://osf.io/9gq4a/", "File": "Getting Started With R - Script Key.R" }, { "ID": 919, "Comment": "step 6: multiple regression with exploration and commitment predicting SWL this function is in base R so no package to call", "Code": "regression <- lm(swl_mean ~ meim_ex_mean + meim_co_mean, data=eid_dat) summary(regression) confint(regression)", "Label": "Statistical Modeling", "Source": "https://osf.io/9gq4a/", "File": "Getting Started With R - Script Key.R" }, { "ID": 920, "Comment": "step 7: create scattterplots using ggplot2 scatterplot of exploration with SWL there are many more options you can play with for customization. I just included some basics here", "Code": "ex_swl_scatter <- ggplot2::ggplot(eid_dat, aes(meim_ex_mean, swl_mean)) + geom_point() + ggtitle(\"My Scatterplot\") + xlab(\"Ethnic Identity Exploration (mean)\") + ylab(\"Satisfaction with Life (mean)\") + geom_smooth(method=\"lm\") ex_swl_scatter", "Label": "Visualization", "Source": "https://osf.io/9gq4a/", "File": "Getting Started With R - Script Key.R" }, { "ID": 921, "Comment": "scatterplot of commitment with SWL", "Code": "co_swl_scatter <- ggplot2::ggplot(eid_dat, aes(meim_co_mean, swl_mean)) + geom_point() + ggtitle(\"My Scatterplot\") + xlab(\"Ethnic Identity Commitment (mean)\") + ylab(\"Satisfaction with Life (mean)\") + geom_smooth(method=\"lm\") co_swl_scatter", "Label": "Visualization", "Source": "https://osf.io/9gq4a/", "File": "Getting Started With R - Script Key.R" }, { "ID": 922, "Comment": "step 8: indepedent samples ttests also in base R. note that Welch's ttest is the default this set is testing for mean differences by whether they were born in the U.S.", "Code": "t.test(eid_dat$meim_ex_mean ~ eid_dat$usborn) t.test(eid_dat$meim_co_mean ~ eid_dat$usborn) t.test(eid_dat$swl_mean ~ eid_dat$usborn)", "Label": "Statistical Test", "Source": "https://osf.io/9gq4a/", "File": "Getting Started With R - Script Key.R" }, { "ID": 923, "Comment": "step 11: create a series of boxplots to go with the previous ANOVAs note that the code here is only slightly different from the one variable case you can/should add elements to format the plots, as you did previously", "Code": "borngen_ex_boxplot <- ggplot2::ggplot(eid_dat, aes(usborn, meim_ex_mean, fill=firstgen)) + geom_boxplot(alpha = 0.5) borngen_ex_boxplot borngen_co_boxplot <- ggplot2::ggplot(eid_dat, aes(usborn, meim_co_mean, fill=firstgen)) + geom_boxplot(alpha = 0.5) borngen_co_boxplot borngen_swl_boxplot <- ggplot2::ggplot(eid_dat, aes(usborn, swl_mean, fill=firstgen)) + geom_boxplot(alpha = 0.5) borngen_swl_boxplot", "Label": "Visualization", "Source": "https://osf.io/9gq4a/", "File": "Getting Started With R - Script Key.R" }, { "ID": 924, "Comment": "Add a plotting function to get power curve", "Code": "plot_power_extend <- function(object, target = .80, ...) { nsim <- object[[1]]$n df <- purrr::map_dfr(object, powerinterval, .id = \"n\", ...) df$n <- as.integer(df$n) ggplot(df, aes(x = n, y = mean, group = 1)) + geom_hline(yintercept = target) + geom_smooth(method = \"glm\", formula = cbind(y * nsim, (1 - y) * nsim) ~ x, method.args = list(family = binomial(\"probit\"))) + geom_pointrange(aes(ymin = lower, ymax = upper)) + labs(y = \"power\") + ylim(0, 1) }", "Label": "Visualization", "Source": "https://osf.io/6ya5d/", "File": "Power_Analysis_OSF_22.R" }, { "ID": 925, "Comment": "check duplicate trials per participant (post)", "Code": "xtabs(~subj+trial,data=rawdat.all) table(rawdat.all$subj)", "Label": "Data Variable", "Source": "https://osf.io/c93vs/", "File": "exp02_prep.R" }, { "ID": 926, "Comment": "remove duplicate species per file, and species level data", "Code": "data2.5 <- data2[!data2$species == 'Certhia_sp.',] data2.6 <- data2.5[!data2.5$species == 'Regulus_sp.',] data3 <- data2.6 %>% group_by(Plot_no) %>% mutate(whichday = as.integer(factor(date))) data4 <- data3 %>% group_by(Plot_no, filename) %>% mutate(seq=cur_group_id())", "Label": "Data Variable", "Source": "https://osf.io/uq3cv/", "File": "4_Compute_beta_diversity_metrics.R" }, { "ID": 927, "Comment": "Show sample sizes per country", "Code": "pa12_resp_s %>% group_by(CNT) %>% summarise(n = n())", "Label": "Visualization", "Source": "https://osf.io/8fzns/", "File": "0_Data-Prep.R" }, { "ID": 928, "Comment": "Gaze Duration Descriptive stats per ID", "Code": "BFSGDesc <- BirdFSG %>% group_by(ID, Stimuli) %>% summarise (MGaze = mean(Gaze), sd=sd(Gaze), n=n(), se = sd/sqrt(n)) ;; BFSGDesc$Stimuli <- paste( BFSGDesc$Stimuli, \"S\", sep=\"\") BFSGDesc BNADDesc <- BirdNAD %>% group_by(ID, Stimuli) %>% summarise (MGaze = mean(Gaze), sd=sd(Gaze), n=n(), se = sd/sqrt(n)) ;; BNADDesc$Stimuli <- paste( BNADDesc$Stimuli, \"S\", sep=\"\") BFSGDesc <- as.data.frame(BFSGDesc);; BNADDesc <- as.data.frame(BNADDesc);; BFSGDesc$Stimuli <- as.factor(as.character(BFSGDesc$Stimuli)) BNADDesc$Stimuli <- as.factor(as.character(BNADDesc$Stimuli)) BFSGDesc", "Label": "Data Variable", "Source": "https://osf.io/mhgcx/", "File": "Figures for paper.R" }, { "ID": 929, "Comment": "p<.05 ONEWAY ANOVA let's say we're interested in investigating the relationship between three species (setosa, versicolor, virginica) outcome: sepal width run Levene's test to check assumption of homogeneity of variance by default centres variable using median library(car) leveneTest(Sepal.Width~Species, datairis,centermean) p>.05 therefore assumption met running oneway ANOVA", "Code": "mod.a<-aov(Sepal.Width~Species, data=iris) summary(mod.a)", "Label": "Statistical Test", "Source": "https://osf.io/6g4js/", "File": "Analyses_Section_4.R" }, { "ID": 930, "Comment": "generate a random number between 1 and 1000 that functions as the index into an array of random numbers of size 1000", "Code": "return(randNo[floor(runif(1)*1000)]) }", "Label": "Data Variable", "Source": "https://osf.io/fsbzw/", "File": "functions.R" }, { "ID": 931, "Comment": "print the simulation number after every 10% of the simulations", "Code": "if(s%%(sims/10) == 0){ cat(paste(\" \", s)) } if(verbose){ print(\"\",quote=FALSE) print(\"\",quote=FALSE) print(\"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\", quote=FALSE) print(paste(\" Simulation\", s, \" \"), quote=FALSE) print(\"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\", quote=FALSE) } for(i in c(1:stages)){ if(i==picCreStage){ add.time(par.dat) if(verbose){ print(paste(\"------ Adding picture chunks -------\"), quote=FALSE) } p.knopf[[\"ctime\"]] = currTime add.chunk(p.knopf) p.flasche[[\"ctime\"]] = currTime add.chunk(p.flasche) if(EXPT1){ p.ballon[[\"ctime\"]] = currTime add.chunk(p.ballon) p.blume[[\"ctime\"]] = currTime add.chunk(p.blume) } martin[[\"ctime\"]] = currTime add.chunk(martin) sarah[[\"ctime\"]] = currTime add.chunk(sarah) } if(verbose){ print(\"\",quote=FALSE) print(paste(\"=============== \", \"Stage \", i, \":\", input.stages[i], \" ===============\"), quote=FALSE) }", "Label": "Data Variable", "Source": "https://osf.io/fsbzw/", "File": "functions.R" }, { "ID": 932, "Comment": "mean level of transitive respondents per wave", "Code": "aggregate(long_svo_multi$trans,list(long_svo_multi$wave_cat),FUN=mean,na.rm =T)", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 933, "Comment": "create variable with only NA values", "Code": "long_svo_multi$vlengths = rep(NA, times = 2970)", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 934, "Comment": "calculate vector length per row", "Code": "for (i in 1:2970) {long_svo_multi$vlengths[i]=calculate_vlength(mean_self[i], mean_other[i])}", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 935, "Comment": "criterion to select applicable cases select only rows with transitive and good vector length response profiles as well as respondents with NA values this criterion used to select applicable cases for data analyses and figures", "Code": "data_criterion <- (long_svo_multi$trans == \"TRUE\" & long_svo_multi$trans_vlenght == \"TRUE\" | is.na(long_svo_multi$trans) | is.na(long_svo_multi$trans_vlenght))", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 936, "Comment": "KolmogorovSmirnov ksamples test for SVO angles", "Code": "ks.test(dropouts$SVO_angle, stayers$SVO_angle) ks.test(dropouts$SVO_angle, stayers$SVO_angle) ks.test(dropouts$SVO_angle, stayers$SVO_angle) ks.test(dropouts$SVO_angle, stayers$SVO_angle) ks.test(dropouts$SVO_angle, stayers$SVO_angle)", "Label": "Statistical Test", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 937, "Comment": "Fishersexact test for SVO types", "Code": "fisher.test(table(dropouts$SVO_dicho_num),table( stayers$SVO_dicho_num)) fisher.test(table(dropouts$SVO_dicho_num),table( stayers$SVO_dicho_num)) fisher.test(table(dropouts$SVO_dicho_num),table( stayers$SVO_dicho_num)) fisher.test(table(dropouts$SVO_dicho_num),table( stayers$SVO_dicho_num)) fisher.test(table(dropouts$SVO_dicho_num),table( stayers$SVO_dicho_num))", "Label": "Statistical Test", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 938, "Comment": "lagged variables of distance to 45 boundary in SVO", "Code": "long_svo_multi$distance_45<-round(abs(45 - long_svo_multi$prior_SVO),digits = 2)", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 939, "Comment": "alter a variable to label dropouts as 1 and stayers as 0 for ML analysis and this variable as dependent variable", "Code": "data_adjusted$stay_dropout <- data_adjusted$change_cat data_adjusted$stay_dropout <- ifelse(data_adjusted$stay_dropout == 1,0, ifelse(data_adjusted$stay_dropout == 0,0,1)) data_adjusted$stay_dropout <- replace_na(data_adjusted$stay_dropout,value = 1) data_adjusted_1 <- data_adjusted %>% filter(trans == \"TRUE\" & trans_vlenght == \"TRUE\" | is.na(trans) | is.na(trans_vlenght))", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 940, "Comment": "grid arrange to create figure with distribution of angles and SVO categories", "Code": "get_legend<-function(myggplot){ tmp <- ggplot_gtable(ggplot_build(myggplot)) leg <- which(sapply(tmp$grobs, function(x) x$name) == \"guide-box\") legend <- tmp$grobs[[leg]] return(legend)} bp <- theme(legend.position=\"none\")", "Label": "Visualization", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 941, "Comment": "Assessing minor longitudinal differences in SVO angles select applicable cases and variables", "Code": "adjust <-dplyr::select(long_svo_multi,id,SVO_angle,wave_cat,trans,trans_vlenght) data_input <- na.omit(adjust) data_input$SVO_angle <- round(data_input$SVO_angle, digits = 2) data_input <- data_input %>% filter(trans == \"TRUE\" & trans_vlenght == \"TRUE\") data_wide_2 <- dcast(data_input, id ~ wave_cat, value.var=\"SVO_angle\") data_wide_2 <- na.omit(data_wide_2)", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 942, "Comment": "cumulative change of difference in SVO angles wave per wave", "Code": "data_wide_6$change_wave_1 <- data_wide_6$diff_w1 data_wide_6$change_wave_1_2 <- data_wide_6$diff_w1_w2 data_wide_6$change_wave_1_2_3 <- data_wide_6$change_wave_1_2 + data_wide_6$diff_w2_w3 data_wide_6$change_wave_1_2_3_4 <- data_wide_6$change_wave_1_2_3 + data_wide_6$diff_w3_w4 data_wide_6$change_wave_1_2_3_4_5 <- data_wide_6$change_wave_1_2_3_4 + data_wide_6$diff_w4_w5 data_wide_6$change_wave_1_2_3_4_5_6 <- data_wide_6$change_wave_1_2_3_4_5 + data_wide_6$diff_w5_w6 data_wide_7 <- data_wide_6[, c(1,8:13)] long_SVO_4 <- melt(data = data_wide_7, id.vars = c(\"id\"), variable.name = \"wave_compare\", value.name = \"cumulative_change_in_SVO\")", "Label": "Data Variable", "Source": "https://osf.io/tw8dq/", "File": "SVOSM_analyses_final.R" }, { "ID": 943, "Comment": "Set up ERGM formula and constraints", "Code": "if (substr(thresh, 1, 4) == 'prop') { ergm_formula <- y ~ gwesp(0.75, fixed=TRUE) + gwnsp(0.75, fixed=TRUE) constraints <- ~edges } else { ergm_formula <- y ~ edges + gwesp(0.75, fixed=TRUE) + gwnsp(0.75, fixed=TRUE) constraints <- ~. } ergm_fit <- ergm(ergm_formula, constraints = constraints) f <- paste0(\"Output/Singles/\", thresh, \"/ergmFit_\", sprintf(\"%03d\",i), \".RDS\") dir.create(dirname(f), showWarnings = FALSE, recursive = TRUE) saveRDS(bergm_fit, f)", "Label": "Statistical Modeling", "Source": "https://osf.io/5nh94/", "File": "01b_fit_single_ergm.R" }, { "ID": 944, "Comment": "min and max age, mean and sd age, percentage of men and women", "Code": "minAge = min(df_preQ$age) maxAge = max(df_preQ$age) meanAge = mean(df_preQ$age) sdAge = sd(df_preQ$age) females = length(which(df_preQ$gender == \"female\")) males = length(which(df_preQ$gender == \"male\")) other = length(which(df_preQ$gender == \"other\"))", "Label": "Data Variable", "Source": "https://osf.io/xh36s/", "File": "questionnaires_analyses.R" }, { "ID": 945, "Comment": "Check normality with QQ plot and ShapiroWilk test Build the linear model", "Code": "model <- lm(FW ~ condition, data = df_postQ)", "Label": "Statistical Test", "Source": "https://osf.io/xh36s/", "File": "questionnaires_analyses.R" }, { "ID": 946, "Comment": "Visualize correlations Insignificant correlations are leaved blank", "Code": "corrplot(res_cor$r, method = 'number', type=\"upper\", order=\"hclust\", p.mat = res_cor$P, sig.level = 0.05, insig = \"blank\")", "Label": "Visualization", "Source": "https://osf.io/xh36s/", "File": "questionnaires_analyses.R" }, { "ID": 947, "Comment": "plot a correlation matrix with all the different measures Create a dataframe with all the different measures of above", "Code": "df_corM <- data.frame(df_postQ$FW, df_postQ$DU, df_postQ$arousal, df_postQ$valence, df_postQ$control, df_postQ$responsibility, df_diff$Diff) colnames(df_corM) <- c('FW', 'DU', 'arousal', 'valence', 'control', 'responsibility', 'Diff')", "Label": "Visualization", "Source": "https://osf.io/xh36s/", "File": "questionnaires_analyses.R" }, { "ID": 948, "Comment": "calculate posterior draws for regression lines", "Code": "experiment_dat_virus <- dat[dat$material == material, ] print(experiment_dat_virus) experiment_dat_virus <- experiment_dat_virus %>% summarise(detection_limit = 10^first(detection_limit_log10_titer)) print(experiment_dat_virus) mat_dat <- dat[dat$material == material, ] if(material != \"Aerosols\") { scaling <- 1 ylab_expression <- expression(\"titer (TCID\"[50] * \"/mL media)\") max_x <- mat_dat %>% group_by(trial_unique_id, replicate) %>% filter(log10_titer == detection_limit_log10_titer) %>% select(time, trial_unique_id, replicate) %>% summarise(min_time = min(time)) %>% ungroup() %>% select(min_time) %>% max() } else { scaling <- 10 / 3 ## convert to tcid50/L air ylab_expression <- expression(\"titer (TCID\"[50] * \"/L air)\") max_x <- max(mat_dat$time) } print(max_x) plot_times <- dat %>% data_grid(time = seq_range(c(0, max_x), n = fineness)) print(material)", "Label": "Statistical Modeling", "Source": "https://osf.io/fb5tw/", "File": "figure_individual_fits.R" }, { "ID": 949, "Comment": "draw n_lines random regression lines", "Code": "func_samples <- tidy_draws %>% group_by(trial_unique_id, replicate) %>% sample_n(n_lines) %>% ungroup() print(func_samples)", "Label": "Visualization", "Source": "https://osf.io/fb5tw/", "File": "figure_individual_fits.R" }, { "ID": 950, "Comment": "cross product decay_rates with x (time) values and calculate y (titer) values", "Code": "to_plot <- func_samples %>% crossing(plot_times) to_plot <- to_plot %>% mutate(predicted_titer = scaling * 10^(intercept - decay_rate * time)) dat <- dat[dat$material == material, ] max_titer <- scaling * max(dat$titer)", "Label": "Data Variable", "Source": "https://osf.io/fb5tw/", "File": "figure_individual_fits.R" }, { "ID": 951, "Comment": "this function accepts a column containing hitnames, a target hitname to look for, and a ... starting and ending fixation index", "Code": "is_hitname_in_range <- function(vec, hitname, fi_start, fi_end) { if (!\"fi_pairs\" %in% ls(envir = .GlobalEnv)) { fi_pairs <- get_fixationindex_pairs(df$FixationIndex)", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "is_hitname_in_range.R" }, { "ID": 952, "Comment": "0 get sensing/location data for the specific user", "Code": "sensing = dplyr::tbl(phonestudy, \"ps_activity\") %>% dplyr::filter(user_id %in% user) %>% dplyr::filter(!activityName %in% c(\"PHONESTUDY\", \"DNAPSOUND\", \"DNAPACCELEROMETER\", \"DNAPLIGHT\", \"DNAPPROXIMITY\", \"DNAPGYROSCOPE\")) %>% dplyr::mutate(timestamp = as.character(timestamp), created_at = as.character(created_at), updated_at = as.character(updated_at)) %>% data.frame() sensing$user_id = as.character(sensing$user_id)", "Label": "Data Variable", "Source": "https://osf.io/b7krz/", "File": "Enrichment_GPS_POIs_HERE_API.R" }, { "ID": 953, "Comment": "Creates the variable 'distances', a 540 X 540 distance matrix between stimuli", "Code": "coordinates = read.xlsx(\"540_coordinates.xlsx\", colNames = TRUE) coordinates = as.matrix(unname(coordinates)) coordinates = coordinates[,2:9] distances = as.matrix(dist(coordinates))", "Label": "Data Variable", "Source": "https://osf.io/hrf5t/", "File": "runHierarchicalGCM.R" }, { "ID": 954, "Comment": "Number of subjects in each group Create a group variable according to each cluster condition", "Code": "if (size == 1){ N.size = N/K n.group = unlist(lapply(1:K, function(k) rep(k,N.size))) } if (size == 2){ if (K == 2){ N.1 = 0.10*N N.2 = N - N.1 n.group = c(rep(1,N.1),rep(2,N.2)) } if (K == 4){ N.1 = 0.10*N N.rest = N - N.1 N.size = N.rest/(K-1) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size),rep(4,N.size)) }} if (size == 3){ if (K == 2){ N.1 = 0.6*N N.2 = N - N.1 n.group = c(rep(1,N.1),rep(2,N.2)) } if (K == 4){ if (N == 20){ N.1 = 0.6*N N.rest = N - N.1 N.size = floor(N.rest/(K-1)) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size+1),rep(4,N.size+1)) } else{ N.1 = 0.6*N N.rest = N - N.1 N.size = N.rest/(K-1) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size),rep(4,N.size)) }}}", "Label": "Data Variable", "Source": "https://osf.io/rs6un/", "File": "Data.Cluster.VAR.Fixed.R" }, { "ID": 955, "Comment": "make a blank output table to hold the derive stats, and fill it up", "Code": "out.tbl <- data.frame(array(NA, c(800, 5)));; # just make it big for now colnames(out.tbl) <- c(\"load.type\", \"cat.type\", \"trial.type\", \"stat.name\", \"stat.value\");; ctr <- 1;;", "Label": "Data Variable", "Source": "https://osf.io/p6msu/", "File": "create_output_behavioral.R" }, { "ID": 956, "Comment": "creating healthcare / containment variable", "Code": "measures_new <- measures_new %>% mutate(measure_category = if_else(str_starts(measure,\"H\"), \"Healthcare\", \"Containment\"))", "Label": "Data Variable", "Source": "https://osf.io/scn62/", "File": "three countries plot.R" }, { "ID": 957, "Comment": "Number of subject in Group 0", "Code": "N0.subject = 2*N0.dyad", "Label": "Data Variable", "Source": "https://osf.io/vtb9e/", "File": "Sim.Dyad.Model.3.R" }, { "ID": 958, "Comment": "Exclude data from students who changed class or school during the school year.", "Code": "quop2$exclusion[quop2$change>0] <- 1", "Label": "Data Variable", "Source": "https://osf.io/vphyt/", "File": "Prepare_wst_data_OSF.R" }, { "ID": 959, "Comment": "create date variable for birthday", "Code": "quop2$s_birth_date <- as.Date(quop2$s_birth, format = \"%d.%m.%Y\")", "Label": "Data Variable", "Source": "https://osf.io/vphyt/", "File": "Prepare_wst_data_OSF.R" }, { "ID": 960, "Comment": "set gavariables to NA if they have negative times where there are negative times recorded in the gqcvariables, use the corresponding gavariable value instead (hoping it is not negative) (!: ga also contains instruction pages) first and last item (one per page) between the instruction pages", "Code": "js_start = c(2, 23, 37) js_end = c(21, 35, 49) for(i in 1:8){ for(j in 1:length(js_start)){ for(k in js_start[j]:js_end[j]){ eval(parse(text=paste0(\"quop_use$t\",i,\"_gqc\",k-j,\"<- ifelse(quop_use$t\",i,\"_gqc\",k-j,\"<=0,quop_use$t\",i,\"_ga\",k,\", quop_use$t\",i,\"_gqc\",k-j,\")\"))) } } }", "Label": "Data Variable", "Source": "https://osf.io/vphyt/", "File": "Prepare_wst_data_OSF.R" }, { "ID": 961, "Comment": "Creat file paths for the raw data and the csv files that you get at the end", "Code": "file_path = 'data_fullstudy1.txt' file_path2 = 'behavioral.csv' file_path3 = 'questionnaires.csv' file_path4 = 'boxes.csv'", "Label": "Data Variable", "Source": "https://osf.io/xh36s/", "File": "preprocessing.R" }, { "ID": 962, "Comment": "Create loop for every participant, put the relevant data into the dataframe", "Code": "con <- file(file_path, open = \"r\") on.exit(close(con)) lines <- readLines(con) subjects_data <- list() game_data <- list() boxes_data <- list() subj <- 0 for (line in lines) { if (grepl(\"Enter your Prolific ID\", line, fixed = TRUE)) { subj = subj + 1 metadata_beginning <- fromJSON(str_replace(line, \"\\\\}\\\\{\", \",\")) subj_data <- \"\"", "Label": "Data Variable", "Source": "https://osf.io/xh36s/", "File": "preprocessing.R" }, { "ID": 963, "Comment": "Recode NAs to 0 for response frequency tables", "Code": "recodeNAto0 <- function(var) { case_when(is.na(var) ~ 0, T ~ var) }", "Label": "Data Variable", "Source": "https://osf.io/w4gey/", "File": "03_descriptive-analysis.R" }, { "ID": 964, "Comment": "Compute response frequency table for recoded items", "Code": "proptable <- function(var) { table(ds$country_name, var) %>% addmargins() %>% data.frame() %>% rename(country = 1, response = 2, prop = 3) %>% spread(response, prop) %>% mutate(`0` = `0`/Sum, `1` = `1`/Sum, `2` = `2`/Sum, `3` = `3`/Sum, `4` = `4`/Sum, `5` = `5`/Sum) }", "Label": "Data Variable", "Source": "https://osf.io/w4gey/", "File": "03_descriptive-analysis.R" }, { "ID": 965, "Comment": "Make density ridge plot", "Code": "ggplot(data = brmsmeans_antisci_cntr_post, aes(x = antisci_mean, y = fct_reorder(country_name, antisci_mean, .fun = mean), fill = factor(continent))) + stat_density_ridges(inherit.aes = T, calc_ecdf = F, quantile_lines = T, quantiles = 2, alpha = 0.7, scale = 3.5) + stat_summary(aes(label = paste0(sprintf(\"%0.2f\", round(..x.., 2)), \" [\", sprintf(\"%0.2f\", round(brmsmeans_hdi_l, 2)), \", \", sprintf(\"%0.2f\", round(brmsmeans_hdi_h, 2)), \"]\")), geom = \"text\", size = 3, hjust = 0, color = \"black\", position = position_nudge(x = 6.7 - brmsmeans_means)) + scale_fill_viridis_d(name = \"Continent\", option = \"inferno\", begin = 0.1, end = 0.9, direction = -1) + coord_cartesian(xlim = c(3.7, 6.3), clip = \"off\") + ggtitle(label = \"Posterior probability distributions of anti-science attitudes across countries\", subtitle = \"Annotations provide distribution means and 89% HDIs\") + xlab(\"Anti-science attitudes\") + ylab(\"\") + theme_minimal() + theme(plot.margin = unit(c(1, 10, 1, 1), \"lines\"), legend.position = \"bottom\")", "Label": "Visualization", "Source": "https://osf.io/w4gey/", "File": "03_descriptive-analysis.R" }, { "ID": 966, "Comment": "extract the head and convert it to image", "Code": "extract_pdf_top <- function(pdf_path) { page_img <- image_read_pdf(pdf_path, page = 1, density = 72) width <- image_info(page_img)$width height <- image_info(page_img)$height left <- 0 top <- 75 height <- height / 4 image_crop(page_img, geometry = sprintf(\"%fx%f+%f+%f\", width, height, left, top)) } dontcare <- first_page_paths %>% map(extract_pdf_top) %>% map2(png_paths, image_write)", "Label": "Data Variable", "Source": "https://osf.io/csy8q/", "File": "process_pdf.R" }, { "ID": 967, "Comment": "Figures function to create data summaries in figures calculating means and cis for the plot", "Code": "ci <- function(x) (sd(x)/sqrt(length(x))*qt(0.975,df=length(x)-1) ) #function to compute size of confidence intervals data_summary <- function(x) { m <- mean(x) ymin <- m-ci(x) ymax <- m+ci(x) return(c(y=m,ymin=ymin,ymax=ymax)) }", "Label": "Visualization", "Source": "https://osf.io/mj5nh/", "File": "politicalknowledgeestimatecentralitydefaultsatleast6.R" }, { "ID": 968, "Comment": "median thickness of new snow", "Code": "medianThicknessNewSnow <- sapply(avgSP$sets, function(set) { median(sapply(set, function(sp) { sum(sp$layers$thickness[findPWL(sp, pwl_gtype = c(\"PP\", \"DF\"))]) })) }) lines(avgSP$meta$date, avgSP$meta$hs_median - medianThicknessNewSnow, lty = \"dashed\", lwd = 1) avgSP$avgs <- snowprofileSet(lapply(avgSP$avgs, function(avg) { avg$layers$percentage <- avg$layers$ppu_all avg })) plot(avgSP$avgs[avgSP$meta$date >= xdaterange[1] & avgSP$meta$date <= xdaterange[2]], ColParam = \"percentage\", add = TRUE, yaxis = FALSE, ylab = \"\") legend(as.Date(\"2018-09-23\"), 190, c(\"\", \"\", \"PP\", \"DF\", \"SH\", \"DH\", \"FC\", \"FCxr\", \"RG\", \"MF\", \"MFcr\"), lty = c(\"solid\", \"dashed\", rep(NA, 9)), lwd = 2, col = c(rep(\"black\", 2), getColoursGrainType(c(\"PP\", \"DF\", \"SH\", \"DH\", \"FC\", \"FCxr\", \"RG\", \"MF\", \"MFcr\"))), pch = c(NA, NA, rep(15, 9)), pt.cex = 2.5, density = c(rep(0, 11)), border = \"transparent\", horiz = FALSE, bty = \"o\", box.lwd = 0, cex = 1.4) dev.off()", "Label": "Data Variable", "Source": "https://osf.io/w7pjy/", "File": "figures_paper.R" }, { "ID": 969, "Comment": "Means and SDs of TOTAL LT TO THE SCREEN in the outcome phase (Okumura) Table 2 in the main manuscript: for mean.overall.data (analogously to the boxplots)", "Code": "mean.overall.data %>% group_by(Condition) %>% summarise(mean=mean(LTScreenOut), sd=sd(LTScreenOut), na.rm = TRUE) %>% as.data.frame(.) %>% dplyr::mutate_if(is.numeric, round, 3)", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "Stats_Third.R" }, { "ID": 970, "Comment": "checking disctribution of the dependent variable", "Code": "hist(overall.data$LTScreenOut) hist(overall.data$LTObjectOut) hist(overall.data$FirstLookDurationObjectOut)", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "Stats_Third.R" }, { "ID": 971, "Comment": "reduced model revealed a convergence warning, with high SDs for TrialRun and Concontext (random effects). model with simpler random effect structure revealed similar estimates suggesting that the warning can be ignored", "Code": "red6_2 <- glmer(FirstLookDurationObjectOut ~ ConContext + Identity_change + Location_change + z.TrialRun + z.TrialCon + ObjectPosAct + (1 + z.TrialCon + ObjectPosAct | ID) , data=overall.data, family=Gamma(link=log), control=contr) summary(red6_2) summary(red6)", "Label": "Statistical Modeling", "Source": "https://osf.io/mp9td/", "File": "Stats_Third.R" }, { "ID": 972, "Comment": "Test the difference between correlations of the same skills vs. different skills based on multiple imputation", "Code": "names(Data_Table2a)[4]<-\"Bad_news\" m <- 'Persuasion ~~ v1*Persuasion + s1*Unreasonable + d1*Crisis + d2*Bad_news + d3*Presentation + d4*Mistake Unreasonable ~~ v2*Unreasonable + d5*Crisis + d6*Bad_news + d7*Presentation + d8*Mistake Crisis ~~ v3*Crisis + s2*Bad_news + d9*Presentation + d10*Mistake Bad_news ~~ v4*Bad_news + d11*Presentation + d12*Mistake Presentation ~~ v5*Presentation + s3*Mistake Mistake ~~ v6*Mistake", "Label": "Statistical Test", "Source": "https://osf.io/jy5wd/", "File": "Code.R" }, { "ID": 973, "Comment": "compute boxplot characteristics", "Code": "x <- boxplot(rep.int(yn, wn), plot = FALSE) top_vp <- viewport(layout = grid.layout(nrow = 2, ncol = 3, widths = unit(c(ylines, 1, 1), c(\"lines\", \"null\", \"lines\")), heights = unit(c(1, 1), c(\"lines\", \"null\"))), width = unit(1, \"npc\"), height = unit(1, \"npc\") - unit(2, \"lines\"), name = paste(\"node_boxplot\", nid, sep = \"\"), gp = gp) pushViewport(top_vp) grid.rect(gp = gpar(fill = bg, col = 0)) top <- viewport(layout.pos.col = 2, layout.pos.row = 1) pushViewport(top) if (is.null(mainlab)) { mainlab <- if (id) { function(id, nobs) sprintf(\"[%s] nWKL = %s\", # Node %s id, length(unique(VS_$wkl_uuid[fitted_node(obj$node, obj$data) == nid]))) # id, nobs", "Label": "Visualization", "Source": "https://osf.io/w7pjy/", "File": "node_violinplot.R" }, { "ID": 974, "Comment": "Calculate to and from percentages", "Code": "from_id <- unique(AbbyLinks$from) for (i in 1:length(from_id)) { AbbyLinks$perc_from[AbbyLinks$from == from_id[i]] <- AbbyLinks$n[AbbyLinks$from == from_id[i]]/sum(AbbyLinks$n[AbbyLinks$from == from_id[i]]) } to_id <- unique(AbbyLinks$to) for (i in 1:length(to_id)) { AbbyLinks$perc_to[AbbyLinks$to == to_id[i]] <- AbbyLinks$n[AbbyLinks$to == to_id[i]]/sum(AbbyLinks$n[AbbyLinks$to == to_id[i]]) } rm(i, from_id, to_id) head(AbbyLinks, 10)", "Label": "Data Variable", "Source": "https://osf.io/rtmyx/", "File": "05_PerceptionAnalysis_AllClassSolutions_V230401.R" }, { "ID": 975, "Comment": "Generate Data Generate the outcome (y) and mediator (m) in the population with a correlation of b, variances of 1, and means of 0.", "Code": "sigma <- rbind(c(1, b), c(b, 1)) mu <- c(0, 0) df <- as.data.frame(mvrnorm(n = n, mu = mu, Sigma = sigma)) names(df) <- c(\"y\", \"m\")", "Label": "Data Variable", "Source": "https://osf.io/975k3/", "File": "med.validation.R" }, { "ID": 976, "Comment": "create a plot that illustrats: Accuracy, Kappa, AUROC and MCC", "Code": "plot_3 <- methods_plot %>% filter(indices %in% c(\"Accuracy\", \"Kappa\", \"AUROC\" , \"MCC\")) ggplot(plot_3,aes(x=variable,y=value,fill=reorder(indices,value))) + geom_bar(stat = \"identity\",position = \"dodge\",color=\"black\") + xlab(\"\") + ylab(\"in %\") + ggtitle(\"\") + scale_fill_brewer(palette = \"Pastel1\",name=\"\") + theme_bw() + theme(legend.position=\"bottom\") + theme(legend.title=element_blank()) + theme(plot.title = element_text(hjust = 0.5)) + scale_y_continuous(breaks = c(0,12.5,25,37.5,50,62.5,75,87.5,100),limits = c(0,95)) + theme(axis.text.x = element_text(angle=30, hjust = 1, vjust = 1, size = 12)) + theme(axis.text.y = element_text( size = 12)) + theme(axis.title.y = element_text( size = 12)) + theme(legend.title = element_text( size = 12 ))", "Label": "Visualization", "Source": "https://osf.io/cqsr8/", "File": "plots.R" }, { "ID": 977, "Comment": "round all numeric variables x: data frame digits: number of digits to round", "Code": "numeric_columns <- sapply(x, class) == 'numeric' x[numeric_columns] <- round(x[numeric_columns], digits) x }", "Label": "Data Variable", "Source": "https://osf.io/7z3mk/", "File": "t1-stan-5.R" }, { "ID": 978, "Comment": "M, SD, range of key sociodemographic variables (across countries)", "Code": "summstats(ds, age, country_name) %>% print(n = nrow(.)) summstats(ds, sex_rec, country_name) %>% print(n = nrow(.)) summstats(ds, edu_9cat, country_name) %>% print(n = nrow(.)) summstats(ds, income_10cat, country_name) %>% print(n = nrow(.))", "Label": "Data Variable", "Source": "https://osf.io/w4gey/", "File": "01_setup.R" }, { "ID": 979, "Comment": "plot 95% credible intervals", "Code": "x<-plot(me, plot = FALSE)[[1]] + scale_color_grey() + scale_fill_grey() x+ylim(0,1)+ theme(panel.grid.major = element_line(colour=\"gray\"), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_blank(),panel.grid.major.x = element_blank())+xlab(\"\")+ylab(\"\")", "Label": "Visualization", "Source": "https://osf.io/a8htx/", "File": "SSK_Cleaned.R" }, { "ID": 980, "Comment": "Calculating Bayes Factors for effects", "Code": "bayes_factor(xfit1,xfit2) bayes_factor(xfit1,xfit3)", "Label": "Statistical Test", "Source": "https://osf.io/a8htx/", "File": "SSK_Cleaned.R" }, { "ID": 981, "Comment": "plot of abstractlevel rating averages Scatterplot with overlaid box", "Code": "eval.plot <- ggplot(abstract.plot, aes(y=eval, x=target.f, color=target.f)) + geom_jitter(aes(colour=target.f)) + geom_boxplot(outlier.shape = NA, alpha=0.5) eval.plot <- eval.plot + theme_bw() + scale_colour_manual(values=c(\"#9E2E2E\",\"royalblue4\")) + scale_x_discrete( labels = c(\"Conservatives\", \"Liberals\") ) + scale_y_continuous(limits = c(1, 7), expand = c(0,0)) + labs(y=\"Evaluative Rating\", x=\"\") + theme( text = element_text(size=16), panel.grid.major.x = element_blank(), legend.position = \"none\" ) explain.plot <- ggplot(abstract.plot, aes(y=explain, x=target.f, color=target.f)) + geom_jitter(aes(colour=target.f)) + geom_boxplot(outlier.shape = NA, alpha=0.5) explain.plot <- explain.plot + theme_bw() + scale_colour_manual(values=c(\"#9E2E2E\",\"royalblue4\")) + scale_x_discrete( labels = c(\"Conservatives\", \"Liberals\") ) + scale_y_continuous(limits = c(1, 7), expand = c(0,0)) + labs(y=\"Explanatory Rating\", x=\"\") + theme( text = element_text(size=16), panel.grid.major.x = element_blank(), legend.position = \"none\" ) grid.arrange(eval.plot,explain.plot, nrow = 1)", "Label": "Visualization", "Source": "https://osf.io/zhf98/", "File": "abstract ratings analysis.r" }, { "ID": 982, "Comment": "plot the raw data and the 95% HPDI from m.arithmetic", "Code": "d.gg.math <- d.math.agg d.gg.math$mean <- NA d.gg.math$low_ci <- NA d.gg.math$high_ci <- NA", "Label": "Visualization", "Source": "https://osf.io/7vbj9/", "File": "Analyses_Confirmatory.R" }, { "ID": 983, "Comment": "Linear mixedeffects model for reproduction bias", "Code": "mod_full <- lmer(Prct_Bias ~ ContextC*(GMSI_Gen_Z + GroupC*OrderC*Distort) + (ContextC | Subject), data=dat2) summary(mod_full) Anova(mod_full, type=3, test='Chisq') # Wald tests", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgm5/", "File": "Exp1_complete.R" }, { "ID": 984, "Comment": "Simple slopes: Model for testing effect of Context at high GMSI", "Code": "mod_1 <- lmer(Prct_Bias ~ ContextC*(GMSI_Gen_hi + GroupC*OrderC*Distort) + (ContextC | Subject), data=dat2) summary(mod_1) Anova(mod_1, type=3, test='Chisq')", "Label": "Statistical Modeling", "Source": "https://osf.io/wxgm5/", "File": "Exp1_complete.R" }, { "ID": 985, "Comment": "Add informaiton on species + condition to each row of dplot", "Code": "gramXspec <- as.factor(c(rep(\"Bird AD\", 3), rep(\"Bird NAD\", 3))) grammar <- as.factor(c(rep(\"AD\", 3), rep(\"NAD\", 3))) species <- as.factor(c(rep(\"Bird\", 6))) dplot $ Condition <- as.factor(grammar) ;; dplot$Species <- as.factor(species) ;; dplot $ gramXspec <- as.factor(gramXspec) dplot", "Label": "Visualization", "Source": "https://osf.io/mhgcx/", "File": "Bird AGL - Outputs and plots.R" }, { "ID": 986, "Comment": "exclude App Spirituality features (are accidentally still created in the data set as empty variables)", "Code": "sensing = sensing %>% dplyr::select(!matches(\"Spirituality\"), -date)", "Label": "Data Variable", "Source": "https://osf.io/b7krz/", "File": "04_SOURCE_ExclusionCriteria.R" }, { "ID": 987, "Comment": "Porportion of trials missed within each block, for each subj", "Code": "miss.blk <- with(data, tapply(trialError, list(subj, data$block), function(x) sum(x!=\"FALSE\") / length(x)))", "Label": "Data Variable", "Source": "https://osf.io/tbczv/", "File": "02-exclCriteria.r" }, { "ID": 988, "Comment": "plot fit model marginal_effects( FIT ) compute marginal means and average marginal effects", "Code": "mu_happy = rowMeans(getCAT_fitted_mean(FIT , 'emotion' , 'happy' )) mu_angry = rowMeans(getCAT_fitted_mean(FIT , 'emotion' , 'angry' )) mu_neutral = rowMeans(getCAT_fitted_mean(FIT , 'emotion' , 'neutral' )) contrast_mu_angry_neutral = mu_angry-mu_neutral contrast_mu_happy_neutral = mu_happy-mu_neutral contrast_mu_angry_happy = mu_angry - mu_happy", "Label": "Visualization", "Source": "https://osf.io/dkq3f/", "File": "eda_brms.R" }, { "ID": 989, "Comment": "Add column for centered logtransformed time series", "Code": "habsos.ts$ctr <- habsos.ts$log10_CELLS - mean(habsos.ts$log10_CELLS) head( habsos.ts )", "Label": "Data Variable", "Source": "https://osf.io/ajf3h/", "File": "02generateKbrevistimeseries.R" }, { "ID": 990, "Comment": "Custom functions 1. Data management 1.1 Build mean indices from multiple variables", "Code": "mean_index <- function (df, name, vars) { M1 <- dplyr::select(df, vars) M2 <- rowMeans(M1, na.rm = TRUE) M2 <- tibble::tibble(M2) colnames(M2) <- name df <- dplyr::bind_cols(df, M2) return(df) }", "Label": "Data Variable", "Source": "https://osf.io/w97h4/", "File": "Paper_functions.R" }, { "ID": 991, "Comment": "2. Stats 2.1 Calculate standard error (se_func)", "Code": "se_func <- function(var) { sd <- sd(var) n <- length(var) se <- sd / sqrt(n) return(se) }", "Label": "Statistical Test", "Source": "https://osf.io/w97h4/", "File": "Paper_functions.R" }, { "ID": 992, "Comment": "Calculate confidence interval (upper bound)", "Code": "upper_ci_func <- function(var) { m <- mean(var) sd <- sd(var) n <- length(var) se <- sd / sqrt(n) lower_ci <- m + qt(1 - (0.05 / 2), n - 1) * se return(lower_ci) }", "Label": "Statistical Test", "Source": "https://osf.io/w97h4/", "File": "Paper_functions.R" }, { "ID": 993, "Comment": "2.6 Bayes Factor paired ttest (func_ttest_paired_bf)", "Code": "func_ttest_paired_bf <- function(pre = df_merge1_s3$paffect_pre, post = df_merge1_s3$paffect_post, dv = \"paffect\") {", "Label": "Statistical Test", "Source": "https://osf.io/w97h4/", "File": "Paper_functions.R" }, { "ID": 994, "Comment": "calculate pvalues from ttests", "Code": "tee <- with(df, t.test(formula = dep_var ~ group_var, paired = FALSE, alternative = side)) return(tee) } tee <- with(df, t.test(x = var_pre, y = var_post, paired = TRUE, alternative = side)) } tee <- with(df, t.test(x = var_pre, y = var_post, paired = TRUE, alternative = side)) tee <- tee[[\"statistic\"]][[\"t\"]] effsize <- tee / sqrt(length(var_pre)) return(effsize) }", "Label": "Statistical Test", "Source": "https://osf.io/w97h4/", "File": "Paper_functions.R" }, { "ID": 995, "Comment": "impute missing values with the median of the respective variable", "Code": "phonedata <- impute(phonedata, target = \"Soci\", classes = list(numeric = imputeMedian(), integer = imputeMedian()))$data", "Label": "Data Variable", "Source": "https://osf.io/9mc84/", "File": "preprocessing.R" }, { "ID": 996, "Comment": "Print percents of stuff (used for displaying study demographics)", "Code": "print.percents <- function(x) { t <- as.data.frame(sort(table(as.character(x)), decreasing = T)) t$Percent <- round(t$Freq/sum(t$Freq)*100) colnames(t) <- c(\"\", \"n\", \"%\") print(t) }", "Label": "Visualization", "Source": "https://osf.io/zh3f4/", "File": "misc.helpers.R" }, { "ID": 997, "Comment": "Linear mixed effect model (RTs) 1000/RT as preregistered", "Code": "LDTword_LME = lmer(-1000/RT ~ primec + (1|item) + (1+primec|subject), data = byTrial, contrasts = list(primec = cc1), control=lmerControl(optimizer=\"bobyqa\", optCtrl=list(maxfun=2e5))) summary(LDTword_LME)", "Label": "Statistical Modeling", "Source": "https://osf.io/gztxa/", "File": "Vowel_Harmony_LDT_Naming.R" }, { "ID": 998, "Comment": "Exploratory: Linear mixed effect model (RTs) with AuthorTest_Finnish", "Code": "LDTword_LME_ARF = lmer(-1000/RT ~ primec*AR_Finnish + (1|item) + (1|subject), data = byTrial, contrasts = list(primec = cc1), control=lmerControl(optimizer=\"bobyqa\", optCtrl=list(maxfun=2e5))) summary(LDTword_LME_ARF) plot(allEffects(LDTword_LME_ARF), x.var = \"AR_Finnish\")", "Label": "Statistical Modeling", "Source": "https://osf.io/gztxa/", "File": "Vowel_Harmony_LDT_Naming.R" }, { "ID": 999, "Comment": "Linear mixed effect model (RTs) nonwords", "Code": "LDTword_LMEnw = lmer(-1000/RT ~ primec + (1|item) + (1+primec|subject), data = byTrialnw, contrasts = list(primec = cc1), control=lmerControl(optimizer=\"bobyqa\", optCtrl=list(maxfun=2e5))) summary(LDTword_LMEnw)", "Label": "Statistical Modeling", "Source": "https://osf.io/gztxa/", "File": "Vowel_Harmony_LDT_Naming.R" }, { "ID": 1000, "Comment": "Linear mixed effect model (RTs)", "Code": "Nword_LME = lmer(-1000/RT ~ primec + (1|item) + (1+primec|subject), data = nbyTrial, contrasts = list(primec = cc1), control=lmerControl(optimizer=\"bobyqa\", optCtrl=list(maxfun=2e5))) summary(Nword_LME)", "Label": "Statistical Modeling", "Source": "https://osf.io/gztxa/", "File": "Vowel_Harmony_LDT_Naming.R" }, { "ID": 1001, "Comment": "Linear mixed effect model (RTs) with ART_English", "Code": "Nword_LME_AR = lmer(-1000/RT ~ primec*AR_English + (1|item) + (1+primec|subject), data = nbyTrial, contrasts = list(primec = cc1), control=lmerControl(optimizer=\"bobyqa\", optCtrl=list(maxfun=2e5))) summary(Nword_LME_AR) plot(allEffects(Nword_LME_AR), x.var = \"AR_English\")", "Label": "Statistical Modeling", "Source": "https://osf.io/gztxa/", "File": "Vowel_Harmony_LDT_Naming.R" }, { "ID": 1002, "Comment": "exclude features with zero or nearzero variance", "Code": "exclude_zero.var = nearZeroVar(data[, which(!colnames(data) %in% no.features)], freqCut = 95/5, uniqueCut = 10, saveMetrics = FALSE, allowParallel = TRUE) length(exclude_zero.var) exclude_zero.var = colnames(data[, which(!colnames(data) %in% no.features)])[exclude_zero.var] data = data %>% dplyr::select(-all_of(exclude_zero.var))", "Label": "Data Variable", "Source": "https://osf.io/b7krz/", "File": "ML_target_independent_preprocessing.R" }, { "ID": 1003, "Comment": "Add the posterior_samples values as a column in the dataframe. Repeat it as many times as there are cell in the factorial design, namely 2. factor estimates", "Code": "post.data$b_GramGender2 <- rep(posterior_samples(model.study2)[['b_GramGender2']], 2)", "Label": "Data Variable", "Source": "https://osf.io/5xdbu/", "File": "study2-rscript.R" }, { "ID": 1004, "Comment": "create dataset with only correct trials and only trials above 100 ms", "Code": "data.3SD <- data[data$corr == 1, ] data.3SD <- data.3SD[data.3SD$min2000 == 1, ] data.3SD <- data.3SD[data.3SD$plus100 == 1, ]", "Label": "Data Variable", "Source": "https://osf.io/5yvnb/", "File": "analyse_final_Exp6_OSF.R" }, { "ID": 1005, "Comment": "Function for showing parameter estimats with pvalues for polr models", "Code": "showCoefWithPValue <- function(model) { coef <- coef(summary(model)) pvalue <- pnorm(abs(coef[, \"t value\"]), lower.tail = FALSE) * 2 coef <- cbind(coef, \"pvalue\" = pvalue) return(round(coef, 4)) }", "Label": "Statistical Modeling", "Source": "https://osf.io/aczx5/", "File": "220423_FinalModelEstimation.R" }, { "ID": 1006, "Comment": "Correlation between bulletin user type and avalanche awareness training", "Code": "cor.test(as.numeric(PartBkgr$BullUseType), as.numeric(PartBkgr$BackgrAvTraining), method = \"spearman\")", "Label": "Data Variable", "Source": "https://osf.io/aczx5/", "File": "220423_FinalModelEstimation.R" }, { "ID": 1007, "Comment": "This additional analysis test the influence of trialbytrial blink duration on response error by computing individual Bayes factors the Bayes factor is calculated by examining the residuals of a null model and how they correlate with blinkdurations the procedure use a 'default' prior for calculating bayes factors for correlations code here:", "Code": "source(\"BF_correlations.R\") bf_null <- rep(NA, length(unique(d$Subject))) dur_slope <- rep(NA, length(unique(d$Subject))) for(i in 1:length(bf_null)){ d_i <- d[d$Subject==unique(d$Subject)[i] & d$cond1==1,] m0 <- lm(ResponseError~vel1, d_i) m1 <- lm(ResponseError~vel1 + cond1:bdur, d_i) dur_slope[i] <- coef(m1)[3] bf_null[i] <- 1/bf10JeffreysIntegrate(n=nrow(d_i), r=cor(residuals(m0), d_i$bdur)) }", "Label": "Statistical Test", "Source": "https://osf.io/f6qsk/", "File": "analysis_exp1.R" }, { "ID": 1008, "Comment": "median and range of BF supporting the null", "Code": "round(median(bf_null[bf_null > 10^(1/2)]),digits=2) round(range(bf_null[bf_null > 10^(1/2)]),digits=2)", "Label": "Statistical Test", "Source": "https://osf.io/f6qsk/", "File": "analysis_exp1.R" }, { "ID": 1009, "Comment": "calculate number of days between symptoms", "Code": "mutate( ill_where_n=ifelse(sym_temp==1, 1, 0), ill_where_n=cumsum(ill_where_n), ill_where_n=ifelse(sym_temp==1, ill_where_n, NA), prev_day_ill=lag(sym_temp, 1), day_lag=lag(day_date, 1),", "Label": "Data Variable", "Source": "https://osf.io/n7sep/", "File": "code.R" }, { "ID": 1010, "Comment": "recode illness vars as NA if no specific symptoms during episode", "Code": "mutate_at(vars(ill_where_id, ill_where_n, ill_where_start, ill_where_end, ill_where_new), funs(ifelse(any_specif_sym==FALSE, NA,. ))) %>%", "Label": "Data Variable", "Source": "https://osf.io/n7sep/", "File": "code.R" }, { "ID": 1011, "Comment": "remove spaces, brackets, periods in column names", "Code": "names(df) <- gsub(\"\\\\s+\", \"\", names(df)) names(df) <- gsub(\"\\\\(|\\\\)\", \"\", names(df)) names(df) <- gsub(\"\\\\.\", \"\", names(df)) cat(\" 💅 Column names fixed (no spaces, brackets, periods)\\n\")", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "preflight.r" }, { "ID": 1012, "Comment": "initialization of the vectors with the names of the modueles/variables", "Code": "names.modules.df.w <- names.var.modules <- NULL", "Label": "Data Variable", "Source": "https://osf.io/kgtx6/", "File": "f_readRShare.R" }, { "ID": 1013, "Comment": "list of the directories included in the datadir directory", "Code": "my.dirlist <- list.dirs(path = datadir) if (length(my.dirlist) == 0) stop(\"There are no subdirectories in the directory that you selected.\")", "Label": "Data Variable", "Source": "https://osf.io/kgtx6/", "File": "f_readRShare.R" }, { "ID": 1014, "Comment": "creates a list with the names of the variables included for each module", "Code": "names.var.modules <- vector(\"list\", num.waves) names(names.var.modules) <- paste(\"Wave\", waves) if(verbose) cat(\"Reading data from the selected modules within the waves. \\n\")", "Label": "Data Variable", "Source": "https://osf.io/kgtx6/", "File": "f_readRShare.R" }, { "ID": 1015, "Comment": "check which of the specified variables are actually present in the downloaded data", "Code": "which.var.ok <- variables.in.modules[[i.mod]] %in% names(new.data) if (any(!which.var.ok)) warning( \"Some of the variables that you selected from module \", modules[i.mod], \" in wave \", waves[i.wave], \" are not included in the data set. \\n The variables that could not be imported were: \", variables.in.modules[[i.mod]][!which.var.ok], \". Please check these variables, the other variables were exported. \\n\" ) new.data <- select(new.data, variables.in.modules[[i.mod]][which.var.ok]) } names(new.data)[-1] <- paste(names(new.data)[-1], modules[i.mod], sep = \"...\")", "Label": "Data Variable", "Source": "https://osf.io/kgtx6/", "File": "f_readRShare.R" }, { "ID": 1016, "Comment": "Trim outliers Perform winsorizing (replacefor each individual seperatelyvalues below 5th percentile and values above 95th percentile with 5th and 95th percentile values respectively)", "Code": "for(i in min(all.df$person):max(all.df$person)){ all.df$zw.HR[all.df$person == i] <- Winsorize(all.df$z.HR[all.df$person==i]) }", "Label": "Data Variable", "Source": "https://osf.io/qj86m/", "File": "2_data_prep_merge.R" }, { "ID": 1017, "Comment": "Create a lag variable the data is lag within person and within days", "Code": "lag.Y = function(data){ Y_lag = rep(0,nrow(data)) subjno.i = unique(data$subjno) for (i in subjno.i){ n.i = which(data$subjno==i) Y_lag[n.i] = shift(data$Y[n.i],1) } return(Y_lag) }", "Label": "Data Variable", "Source": "https://osf.io/vguey/", "File": "lag.Y.R" }, { "ID": 1018, "Comment": "Personality models Extract standardized parameters and N from personality models", "Code": "Values_Analysis1_Model1_Pers <- Values_Personality[which(Values_Personality$analysis == \"analysis1\" & ((Values_Personality$study %in% c(\"S1\", \"S2\") & Values_Personality$DV == \"well_being\") | (Values_Personality$study == \"S3\" & Values_Personality$DV == \"affect_balance\")) & Values_Personality$output %in% c(\"standardized\", \"N\")), ] Values_Analysis1_Model2_Pers <- Values_Personality[which(Values_Personality$analysis == \"analysis2\" & ((Values_Personality$study %in% c(\"S1\", \"S2\") & Values_Personality$DV == \"well_being\") | (Values_Personality$study == \"S3\" & Values_Personality$DV == \"affect_balance\")) & Values_Personality$output %in% c(\"standardized\", \"N\")), ] Values_Analysis1_Model3_Pers <- Values_Personality[which(Values_Personality$analysis == \"analysis3\" & ((Values_Personality$study %in% c(\"S1\", \"S2\") & Values_Personality$DV == \"well_being\") | (Values_Personality$study == \"S3\" & Values_Personality$DV == \"affect_balance\")) & Values_Personality$output %in% c(\"standardized\", \"N\")), ]", "Label": "Data Variable", "Source": "https://osf.io/nxyh3/", "File": "MainTables.R" }, { "ID": 1019, "Comment": "Extract withinperson variances of interactions with close peers, family, and weak ties from unstandardized models", "Code": "variances <- Values_Base$est[which(Values_Base$analysis == \"analysis3\" & ((Values_Base$study %in% c(\"S1\", \"S2\") & Values_Base$DV == \"well_being\") | (Values_Base$study == \"S3\" & Values_Base$DV == \"affect_balance\")) & Values_Base$output == \"unstandardized\" & Values_Base$paramHeader == \"Variances\" & Values_Base$param %in% c(\"PEERS\", \"FAMILY\", \"WEAK_TIES\") & Values_Base$BetweenWithin == \"Within\")] rows <- which(Values_Analysis1_Model3$paramHeader %in% c(\"S1|WB.ON\", \"S2|WB.ON\", \"S3|WB.ON\") & Values_Analysis1_Model3$param %in% c(\"PEERS\", \"FAMILY\", \"WEAK_TIES\")) # within-person effects variances <- Values_Personality$est[which(Values_Personality$analysis == \"analysis3\" & ((Values_Personality$study %in% c(\"S1\", \"S2\") & Values_Personality$DV == \"well_being\") | (Values_Personality$study == \"S3\" & Values_Personality$DV == \"affect_balance\")) & Values_Personality$output == \"unstandardized\" & Values_Personality$paramHeader == \"Variances\" & Values_Personality$param %in% c(\"PEERS\", \"FAMILY\", \"WEAK_TIES\") & Values_Personality$BetweenWithin == \"Within\")] rows <- which(Values_Analysis1_Model3_Pers$paramHeader %in% c(\"S1|WB.ON\", \"S2|WB.ON\", \"S3|WB.ON\") & Values_Analysis1_Model3_Pers$param %in% c(\"PEERS\", \"FAMILY\", \"WEAK_TIES\")) # within-person effects", "Label": "Statistical Modeling", "Source": "https://osf.io/nxyh3/", "File": "MainTables.R" }, { "ID": 1020, "Comment": "Model selection for four data sets First find bestfitting random effect model, then test for fixed effect predictors Process is backward by elimintating weakest terms sequentially, starting with full model, until only significant effects remain Nested model comparisons (likelihood ratio tests using anova command) are used to select best fitting models _ Pair Experiment 1 Random effects: selecting empty model", "Code": "pair1_rand_full <- lmer(distance ~ (1 | group) + (1 + session0 | pair), data = pair_data1) # full random model;; fails to converge pair1_rand_full <- lmer(distance ~ (1 | group) + (1 + session0 | pair), data = pair_data1, control = lmerControl(optimizer = \"bobyqa\")) # full random model;; uses bobyqa optimizer pair1_rand2 <- lmer(distance ~ (1 + session0 | pair), data = pair_data1) # drops group ran.intercept;; fails to converge pair1_rand2 <- lmer(distance ~ (1 + session0 | pair), data = pair_data1, control = lmerControl(optimizer = \"bobyqa\")) # drops group ran.intercept;; uses bobyqa optimizer;; BEST pair1_rand3 <- lmer(distance ~ (1 | pair), data = pair_data1) # drops random slope", "Label": "Statistical Modeling", "Source": "https://osf.io/67ncp/", "File": "duque_etal_2020_rcode.R" }, { "ID": 1021, "Comment": "Pearson's correlation calculation", "Code": "calc_r <- function(model_vector, data_vector, S_0, D_0, M_0, E_0, filename) { sst_mean <- mean(data_vector) sst_vector <- (data_vector - sst_mean) ^ 2 sst <- sum(sst_vector) ssr_vector <- (data_vector - model_vector) ^ 2 ssr <- sum(ssr_vector) r_sq <- 1 - (ssr / sst) cat(r_sq, file = paste(format(Sys.time(),\"%Y_%m_%d_%H_%M\"), filename, \"r_sq\", \"S\", S_0, \"D\", D_0, \"M\", M_0, \"E\", E_0, \".txt\", sep = \"_\")) return(r_sq) } r_sq <- calc_r(CO2_flux_ratios_hat_median, data_vector, S_0, D_0, M_0, E_0, filename)", "Label": "Statistical Test", "Source": "https://osf.io/7mey8/", "File": "stan_AWB_adriana_pools5i_vary_mic.r" }, { "ID": 1022, "Comment": "Data wrangling recode values for missing data (9, 1) in whole dataset as NA", "Code": "data <- data %>% mutate_all(~na_if(., -9)) data <- data %>% mutate_all(~na_if(., -1))", "Label": "Data Variable", "Source": "https://osf.io/r4wg2/", "File": "elsa_analyses.R" }, { "ID": 1023, "Comment": "age recode values of 99 as missing", "Code": "data$indager.w2[data$indager.w2 == 99] <- NA data$age <- data$indager.w2", "Label": "Data Variable", "Source": "https://osf.io/r4wg2/", "File": "elsa_analyses.R" }, { "ID": 1024, "Comment": "Fit models with trimmed weights Weighted regression model for SciPop Score (Goertz approach)", "Code": "m3.scipopgoertz.trim.svyglm.fit <- svyglm(scipopgoertz ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita + polorientation + religiosity + interestscience + sciliteracy + trustscience + trustscientists, design = bar.design.scipopgoertz.trim, family = gaussian, na.action = na.omit)", "Label": "Statistical Modeling", "Source": "https://osf.io/qj4xr/", "File": "06_sensitivity-tests.R" }, { "ID": 1025, "Comment": "for each row assign fixation probability (1 or 0) to each picture based on the activation !! need a better decision criteria when the activations are the same;; !! right now it assigns '1' to both", "Code": "dt.act$max.act = apply(dt.act[,4:7], 1, max) dt.act = subset(dt.act, max.act != 0)", "Label": "Data Variable", "Source": "https://osf.io/fsbzw/", "File": "plot.fix.e1.R" }, { "ID": 1026, "Comment": "4.4.4) Create data where u(x) is at sample means to get residuals based on rest of models to act as yobs Recall: columns 1 & 2 have y and u(x) in obs.data", "Code": "data.xufixed =data.obs data.xufixed[,2]=mean(data.obs[,2]) #Note, the 1st predictor, 2nd columns, is always the one hypothesized to be u-shaped", "Label": "Data Variable", "Source": "https://osf.io/hu2n8/", "File": "Simonsohn_twolines_tweaked.R" }, { "ID": 1027, "Comment": "6 RUN TWO LINE REGRESSIONS 6.1 First an interrupted regression at the midpoint of the flat region", "Code": "rmid=reg2(f,xc=median(xflat),graph=0, axislabels=axislabels)", "Label": "Statistical Modeling", "Source": "https://osf.io/hu2n8/", "File": "Simonsohn_twolines_tweaked.R" }, { "ID": 1028, "Comment": "Pairwise comparison of optimism scores for each cue and table Wilcoxon signedrank test", "Code": "cue_diff_wilcox_ts = wilcox.paired.multcomp( opt_score ~ cue | id, subset(jbt_all, jbt_all$cjb_test == \"ts\"), p.method = \"holm\") cue_diff_wilcox_tunnel = wilcox.paired.multcomp( opt_score ~ cue | id, subset(jbt_all, jbt_all$cjb_test == \"tunnel\"), p.method = \"holm\")", "Label": "Statistical Test", "Source": "https://osf.io/z6nm8/", "File": "Stats_figures_JBT.R" }, { "ID": 1029, "Comment": "round the numeric values (p that is not < 0.001)", "Code": "cue_diff_wilcox_ts$p.value[numeric_values] <- round(as.numeric(cue_diff_wilcox_ts$p.value[numeric_values]), 3) names(cue_diff_wilcox_ts) <- c(\"Cues compared\",\"p-value\")", "Label": "Data Variable", "Source": "https://osf.io/z6nm8/", "File": "Stats_figures_JBT.R" }, { "ID": 1030, "Comment": "new tibble with IDs and emails", "Code": "wos_emails <- tibble( \"responseID\" = wos$response_ID, \"email\" = wos$email ) aaas_emails <- tibble( \"responseID\" = aaas$response_ID, \"email\" = aaas$email )", "Label": "Data Variable", "Source": "https://osf.io/3bn9u/", "File": "4_1_deidentification.R" }, { "ID": 1031, "Comment": "drop rows with empty email fields", "Code": "wos_emails <- drop_na( wos_emails, email ) aaas_emails <- drop_na( aaas_emails, email )", "Label": "Data Variable", "Source": "https://osf.io/3bn9u/", "File": "4_1_deidentification.R" }, { "ID": 1032, "Comment": "getmode() Get mode of a variable Input: numeric vector Output: single number", "Code": "getmode <- function(v) { uniqv <- unique(v) uniqv[which.max(tabulate(match(v, uniqv)))] }", "Label": "Data Variable", "Source": "https://osf.io/8fzns/", "File": "2H_Recode_helper.R" }, { "ID": 1033, "Comment": "For studyProgramme remove countries with only one programme", "Code": "if(varname == \"studyProgram\" & length(lev) == 1){ return(NA) }", "Label": "Data Variable", "Source": "https://osf.io/8fzns/", "File": "2H_Recode_helper.R" }, { "ID": 1034, "Comment": "Model for Frequency DV", "Code": "mod1.1 <- lmer(frequency ~ ruleType + (1|sceneType) + (1|Participant), contrasts = my.contrasts, data = PD3a.Long) icc(mod1.1) summary(mod1.1) emmeans(mod1.1,list(pairwise~ruleType),adjust=\"satterthwaite\") confint(mod1.1)", "Label": "Data Variable", "Source": "https://osf.io/dhmjx/", "File": "ExperimentS1-Analyses.R" }, { "ID": 1035, "Comment": "Define functions Means, SDs, and two sample ttests comparing Swiss census data and the panel sample", "Code": "census.msd <- function(v) { v <- reformulate(v) cbind( as.data.frame(svyby(v, ~t, dslong_dsgn, svymean, na.rm = T))[-c(1,3)], # M (exclude SE) sqrt(as.data.frame(svyby(v, ~t, dslong_dsgn, svyvar, na.rm = T)[[2]]))) %>% # SD (with a workaround) set_colnames(c(\"M\", \"SD\")) %>% set_rownames(c(\"2019 (census)\", \"2019 (panel)\")) }", "Label": "Statistical Test", "Source": "https://osf.io/3hgpe/", "File": "02_analysis.R" }, { "ID": 1036, "Comment": "RQ1: Examine predictors of change in SciPop Scores and subscale scores .. Visual inspection with Sankey plot Visualize withinsubject change in SciPop Score in Sankey plot", "Code": "dslongpnl %>% group_by(t, scipopgoertz) %>% summarise(scipopgoertz_freq = n()) %>% merge(dslongpnl, by = c(\"scipopgoertz\", \"t\")) %>% transform(scipopgoertz_lvls = factor(scipopgoertz, levels = c(\"5\", \"4.5\", \"4\", \"3.5\", \"3\", \"2.5\", \"2\", \"1.5\", \"1\", \"NA\"))) %>% replace_na(list(scipopgoertz_lvls = \"NA\")) %>% ggplot(aes(x = t, stratum = scipopgoertz_lvls, alluvium = id, y = scipopgoertz_freq, fill = scipopgoertz_lvls, label = scipopgoertz_lvls)) + scale_x_discrete(expand = c(.1, .1)) + scale_fill_viridis_d() + geom_flow(width = .1) + geom_stratum(alpha = .5, width = .1) + geom_text(stat = \"stratum\", size = 4) + ggtitle(\"Sankey plot of within-subject change in SciPop Score\") + theme_minimal() + theme(legend.position = \"none\", panel.grid = element_blank(), axis.text.y = element_blank(), axis.ticks.y = element_blank())", "Label": "Visualization", "Source": "https://osf.io/3hgpe/", "File": "02_analysis.R" }, { "ID": 1037, "Comment": "H1 analyses .. Descriptive analyses Plot means, SDs, skewness, kurtosis of alternative SciPop Scores", "Code": "meantbl <- as.data.frame(matrix(0, nrow = 2)) groups <- quos(scipopgoertz, scipopbollenm, scipopbollencfa, scipopsartori75, scipopsartoricat) for (j in seq_along(groups)) { meantbl <- cbind(meantbl, group_by(dslongpnl, t) %>% summarise(mean = mean(!!groups[[j]], na.rm = T), sd = sd(!!groups[[j]], na.rm = T), skewness = skewness(!!groups[[j]], na.rm = T), kurtosis = kurtosis(!!groups[[j]], na.rm = T))) names(meantbl)[names(meantbl) == \"mean\"] <- paste0(quo_text(groups[[j]]), \" (M)\") names(meantbl)[names(meantbl) == \"sd\"] <- paste0(quo_text(groups[[j]]), \" (SD)\") names(meantbl)[names(meantbl) == \"skewness\"] <- paste0(quo_text(groups[[j]]), \" (Skewness)\") names(meantbl)[names(meantbl) == \"kurtosis\"] <- paste0(quo_text(groups[[j]]), \" (Kurtosis)\") meantbl <- select(meantbl, contains(\"scipop\")) %>% set_rownames(c(\"2019\", \"2020\")) }", "Label": "Visualization", "Source": "https://osf.io/3hgpe/", "File": "02_analysis.R" }, { "ID": 1038, "Comment": "save means and compute their higher order terms", "Code": "dfs$x.mean <- mean( dfs$x, na.rm = TRUE ) dfs$y.mean <- mean( dfs$y, na.rm = TRUE ) dfs$x2.mean <- dfs$x.mean^2 dfs$xy.mean <- dfs$x.mean*dfs$y.mean dfs$y2.mean <- dfs$y.mean^2 dfs }", "Label": "Statistical Modeling", "Source": "https://osf.io/jhyu9/", "File": "PrepareData.R" }, { "ID": 1039, "Comment": "add column indicating if individual gave birth during testing (this will help with age classification)", "Code": "birthsanc <- read_excel(\"motherssanctuary.xlsx\", col_types=c(\"text\", \"date\")) bigdatasanctuary$Individual<-as.factor(bigdatasanctuary$Individual) birthsanc$Mother <- as.factor(birthsanc$Mother) bigdatasanctuary$mother <- ifelse(bigdatasanctuary$Individual %in% birthsanc$Mother, \"yes\", \"no\") bigdatasanctuary$birth <- birthsanc$Date[match(bigdatasanctuary$Individual, birthsanc$Mother)] bigdatasanctuary$birth <- as.Date(bigdatasanctuary$birth, format=\"YY-mm-dd\") bigdatasanctuary$testbeforebirth <- ifelse(!is.na(bigdatasanctuary$birth) & bigdatasanctuary$birth > bigdatasanctuary$TestDate, \"yes\", \"no\") bigdatasanctuary$AgeAtTesting <- as.numeric(bigdatasanctuary$AgeAtTesting)", "Label": "Data Variable", "Source": "https://osf.io/p2xgq/", "File": "Analyses for revision" }, { "ID": 1040, "Comment": "Step 1 compare the rising ridge asymmetric congruence model (RRCA) and the full thirdorder model (cubic)", "Code": "rrca_comp <- compare2(rrca_myrsa, \"RRCA\", \"cubic\") rrca_comp", "Label": "Statistical Modeling", "Source": "https://osf.io/drv3a/", "File": "illustration.R" }, { "ID": 1041, "Comment": "How much variance in the outcome can be explained by the full thirdorder polynomial model/the rising ridge leveldependent congruence model?", "Code": "getPar(rrcl_myrsa, model=\"cubic\", type=\"R2\") getPar(rrcl_myrsa, model=\"RRCL\", type=\"R2\")", "Label": "Statistical Modeling", "Source": "https://osf.io/drv3a/", "File": "illustration.R" }, { "ID": 1042, "Comment": "get subsequent fixation index (+1) and retrieve row number using fi_pars and assign", "Code": "scope_start[i] <- fi_pairs$fistart[scope_start_fis[i] + 1] } return(scope_start) }", "Label": "Data Variable", "Source": "https://osf.io/mp9td/", "File": "get_first_free_fi.R" }, { "ID": 1043, "Comment": "Exclude participants who completed less than 10 surveys", "Code": "dat <- dat[-which(dat$N < 10), ] dim(dat) # 36074 assessments length(unique(dat$id)) # 1177 participants (1109 participants excluded)", "Label": "Data Variable", "Source": "https://osf.io/nxyh3/", "File": "01b_DataPrep_Study2.R" }, { "ID": 1044, "Comment": "define function to compute colors and point sizes that yield 3d effect", "Code": "points3d <- function(data, xname, yname, zname, cex.limit.close = 1.7, cex.limit.far = 1, col.limit.close = 0.1, col.limit.far = 0.8, adapted_eye = list(x=-12, y=-16, z=2) ){", "Label": "Visualization", "Source": "https://osf.io/fbshg/", "File": "ComF_helpers.R" }, { "ID": 1045, "Comment": "for each row of the data, compute euclidean distance between the person's 3dim point and the plane", "Code": "distance <- abs( (data[,xname] - a$x) * n$x + (data[,yname] - a$y) * n$y + (data[,zname] - a$z) * n$z ) / sqrt(n$x^2 + n$y^2 + n$z^2)", "Label": "Data Variable", "Source": "https://osf.io/fbshg/", "File": "ComF_helpers.R" }, { "ID": 1046, "Comment": "Calculate delta change in testosterone pivot data sets for delta values", "Code": "hormones_wide <-hormones %>% pivot_wider(id_cols = c(\"id\", \"sex\"), names_from = condition, values_from = t_conc_corr) %>% mutate(delta_t = back_home - baseline) delta_t <- left_join(hormones, hormones_wide) delta_t <- delta_t %>% filter(condition == \"baseline\")%>% filter(!is.na(delta_t))", "Label": "Data Variable", "Source": "https://osf.io/3bpn6/", "File": "af_testosterone_analysis.R" }, { "ID": 1047, "Comment": "Plot sex and time point Boxplot for sex differences", "Code": "t_condition <- hormones %>% ggplot(aes(x = condition, y = log(t_conc_corr), fill = sex)) + geom_boxplot(outlier.shape = NA, width = 0.7) +", "Label": "Visualization", "Source": "https://osf.io/3bpn6/", "File": "af_testosterone_analysis.R" }, { "ID": 1048, "Comment": "Network Estimation Ising Model (Binary Data) can be estimated via estimateNetwork variables automatically binarized at median estimate regularized logistic nodewise regression network define where to binarize variables eLASSO (LASSO with EBIC model selection) listwise deletion of missing values (pairwise not possible for regressions)", "Code": "Ising_net <- estimateNetwork(data, default = \"IsingFit\", missing = \"listwise\", rule = \"OR\")", "Label": "Statistical Modeling", "Source": "https://osf.io/b4gc7/", "File": "workshop_example.R" }, { "ID": 1049, "Comment": "compare GGM and Ising model correlating weights matrices", "Code": "cor.test(Wmat_GGM[upper.tri(Wmat_GGM)], Wmat_Ising[upper.tri(Wmat_Ising)]) #.80", "Label": "Statistical Modeling", "Source": "https://osf.io/b4gc7/", "File": "workshop_example.R" }, { "ID": 1050, "Comment": "create a storage container (i.e. a empty lists) for all hit_names ... ... that track looking times over all trials (e.g., looking_times$left)", "Code": "looking_times <- setNames(vector(\"list\", length(hit_names)), hit_names)", "Label": "Data Variable", "Source": "https://osf.io/yfegm/", "File": "getLooks.r" }, { "ID": 1051, "Comment": "create a storage container for all looking frequencies (i.e., counting the number of looks within an AOI)", "Code": "looking_frequencies <- setNames(vector(\"list\", length(hit_names)), hit_names)", "Label": "Data Variable", "Source": "https://osf.io/yfegm/", "File": "getLooks.r" }, { "ID": 1052, "Comment": "init storage containers for looking frequencies", "Code": "current_trial_total_looks <- setNames(vector(\"list\", length(hit_names)), hit_names) for (hn in hit_names) {", "Label": "Data Variable", "Source": "https://osf.io/yfegm/", "File": "getLooks.r" }, { "ID": 1053, "Comment": "get first and last FixationIndex (remove NAs), which define the boundaries of a single trial", "Code": "min_FixationIndex <- min(inter_trial_FixationIndexes, na.rm = TRUE) max_FixationIndex <- max(inter_trial_FixationIndexes, na.rm = TRUE)", "Label": "Data Variable", "Source": "https://osf.io/yfegm/", "File": "getLooks.r" }, { "ID": 1054, "Comment": "space between axis label and tick mark labels", "Code": "my_settings$layout.widths$ylab.axis.padding <- 0.2 my_settings$layout.heights$axis.xlab.padding <- 0.2 my_settings$box.rectangle$col = 1 my_settings$box.umbrella$col = 1 my_settings$box.dot$col = 1 my_settings$plot.symbol$col = 1", "Label": "Visualization", "Source": "https://osf.io/mc26t/", "File": "my_utils.R" }, { "ID": 1055, "Comment": "Overlay the basic grid with colourful grid corresponding to the limits of sensitive period rectangles. These colourful lines should start in the points indicating the change values.", "Code": "cols<-colscale[sapply(result$p1,function(x){which.min(abs(x-scaleseq))})] cols<-colscale[sapply(result$p1.rand[,exrun],function(x){which.min(abs(x-scaleseq))})]", "Label": "Visualization", "Source": "https://osf.io/greqt/", "File": "06_extrapermut_comaprison_with_random.R" }, { "ID": 1056, "Comment": "Skew effect", "Code": "neg <- apply(chains[,c(1,3)], 1, sum) pos <- apply(chains[,c(1,2)], 1, sum) quantile(neg-pos, prob=c(.025, .975)) neg <- apply(chains[,c(1,3)], 1, sum) pos <- apply(chains[,c(1,2)], 1, sum) quantile(pos-neg, prob=c(.025, .975))", "Label": "Visualization", "Source": "https://osf.io/8abj4/", "File": "Exp1.R" }, { "ID": 1057, "Comment": "Fit hierarchical Bayesian model", "Code": "fit.HBM <- stan(file='CAM_full_1.stan', data=c('Nobs','Nind','stim','bias','RM','AS','B1','id','mid','stim_sizes','Nstim'), chains=4, iter=2500, cores=no_cores, control=list(adapt_delta=.90, max_treedepth=15)) fit.HBM sum(summary(fit.HBM)$summary[,'Rhat'] > 1.01) pars <- extract(fit.HBM)", "Label": "Statistical Modeling", "Source": "https://osf.io/8abj4/", "File": "Exp1.R" }, { "ID": 1058, "Comment": "Compute probabilities using MNL model", "Code": "P[['choice']] = apollo_mnl(mnl_settings1, functionality) P[[\"indic_cost_tap\"]] = apollo_mnl(mnl_settings2, functionality) P[[\"indic_na1\"]] = apollo_mnl(mnl_settings3, functionality) P[[\"indic_na2\"]] = apollo_mnl(mnl_settings4, functionality) P[[\"indic_na3\"]] = apollo_mnl(mnl_settings5, functionality) P[[\"indic_cost_bottle\"]] = apollo_ol(ol_settings5, functionality) P[[\"indic_bill\"]]= apollo_normalDensity(normalDensity_settings1,functionality) P[[\"indic_qual\"]] = apollo_ol(ol_settingsB1, functionality) P[[\"indic_qual_f\"]] = apollo_ol(ol_settingsB3, functionality) P = apollo_combineModels(P, apollo_inputs, functionality)", "Label": "Statistical Modeling", "Source": "https://osf.io/6pq9e/", "File": "Model.R" }, { "ID": 1059, "Comment": "d) Ftest (Stephan Gries 2013, p. 218) [only for normally distributed variables!] use var.test() Output interpretation: 1) Is p > .05?;; 2) Does the CI include 1?", "Code": "F.lextale <- var.test(pp.explicit.incidental$lextale~pp.explicit.incidental$learningtype);; F.lextale", "Label": "Statistical Test", "Source": "https://osf.io/938ye/", "File": "Apriori_group_differences.R" }, { "ID": 1060, "Comment": "c) Wilcoxon ranksum test ( MannWhitney U test) [Nonparametric alternative if normality is violated, or if you have ordinal data] Continuous variables: ratioscaled lextale", "Code": "wilcox.lextale <- wilcox.test(lextale~learningtype, data=pp.explicit.incidental, paired=FALSE);; wilcox.lextale", "Label": "Statistical Test", "Source": "https://osf.io/938ye/", "File": "Apriori_group_differences.R" }, { "ID": 1061, "Comment": "Selfrated variables: ordinal proficiency_overall", "Code": "wilcox.proficiency_overall <- wilcox.test(proficiency_overall~learningtype, data=pp.explicit.incidental, paired=FALSE);; wilcox.proficiency_overall", "Label": "Data Variable", "Source": "https://osf.io/938ye/", "File": "Apriori_group_differences.R" }, { "ID": 1062, "Comment": "Effect size (approximate) for Wilcoxon ranksum test Write the function (from Field, Miles & Fiels 2012, p.665)", "Code": "rWilcox <- function(wilcoxModel, N){ z <- qnorm(wilcoxModel$p.value/2) r <- z/sqrt(N) cat(wilcoxModel$data.name, \"Effect size, r = \", r) }", "Label": "Statistical Modeling", "Source": "https://osf.io/938ye/", "File": "Apriori_group_differences.R" }, { "ID": 1063, "Comment": "create new variable for the IRV intraindividual response variability", "Code": "data$irv <- irv( dplyr::select( data, belonging, control, meaningful_existence, self_esteem ), na.rm = TRUE, split = FALSE)", "Label": "Data Variable", "Source": "https://osf.io/bhrwx/", "File": "script_for_the_analysis_of_game_data.R" }, { "ID": 1064, "Comment": "display the correlations as a histogram and heatmap", "Code": "cor_matrix_half <- cor_matrix[upper.tri(cor_matrix)] mean(cor_matrix_half) sd(cor_matrix_half) hist(as.vector(cor_matrix_half), breaks=24, cex.axis=2) # Note: Novich et al. suppressed correlations of r<.4 in their visualisation heatmap(x = cor_matrix, symm = TRUE)", "Label": "Visualization", "Source": "https://osf.io/r24vb/", "File": "clustering_syn_types.R" }, { "ID": 1065, "Comment": "Create stringent dataset Exclude People with Strange Response Patterns These are identified as subclusters in the inclusive dendogram that group together", "Code": "inclusive_data$weird_responses = inclusive_data$body_postures_shape + inclusive_data$punctuation_shape + inclusive_data$letter_shape + inclusive_data$number_shape + inclusive_data$people_name_shape + inclusive_data$english_word_shape + inclusive_data$foreign_word_shape + inclusive_data$tastes_taste + inclusive_data$smells_smell + inclusive_data$noises_noise + inclusive_data$music_music + inclusive_data$colour_colour + inclusive_data$shapes_shape + inclusive_data$smells_taste + inclusive_data$tastes_smell + inclusive_data$voices_noise + inclusive_data$voices_music + inclusive_data$noises_music + inclusive_data$music_noise hist(inclusive_data$weird_responses) table(inclusive_data$weird_responses)", "Label": "Data Variable", "Source": "https://osf.io/r24vb/", "File": "clustering_syn_types.R" }, { "ID": 1066, "Comment": "gives the prevalence of each cluster of data", "Code": "colMeans(short_data) N_types <- matrix(apply(short_data[,1:i], 1, sum, na.rm=TRUE)) stringent_N_clusters <- cbind(stringent_N_clusters,N_types) }", "Label": "Data Variable", "Source": "https://osf.io/r24vb/", "File": "clustering_syn_types.R" }, { "ID": 1067, "Comment": "create a continuoius time metric (seconds since midnight of day0)", "Code": "dat$sec_midnight.day0 <- dat$start.secmidnight + dat$day * 86400", "Label": "Data Variable", "Source": "https://osf.io/6krj7/", "File": "01_addlagvars.R" }, { "ID": 1068, "Comment": "create a variable indicating, if the current observation is the first observation of the day (\"morning\") this variable is 1, if the current observation was obtained on a different day than the previous observation", "Code": "dat[!is.na(dat$lagday) & dat$day!=dat$lagday, \"morning\"] <- 1 dat[!is.na(dat$lagday) & dat$day==dat$lagday, \"morning\"] <- 0", "Label": "Data Variable", "Source": "https://osf.io/6krj7/", "File": "01_addlagvars.R" }, { "ID": 1069, "Comment": "create continuous time variable in hours and seconds", "Code": "data$time_to_hours = lubridate::hour(data$timestamp.corrected) + lubridate::minute(data$timestamp.corrected)/60 + lubridate::second(data$timestamp.corrected)/3600 data$time_to_sec = data$time_to_hours*60*60 return(data) }", "Label": "Data Variable", "Source": "https://osf.io/b7krz/", "File": "timestamp_correction.R" }, { "ID": 1070, "Comment": "set font size for facet labels", "Code": "strip.text.x = element_text(size = font_size_facets_x), strip.text.y = element_text(size = font_size_facets_y), strip.text.x = element_text(size = font_size_facets_x), strip.text.y = element_text(size = font_size_facets_y),", "Label": "Visualization", "Source": "https://osf.io/dpkyb/", "File": "my_ggplot_themes.R" } ]