NavelGazer <- function(month = NULL, year = NULL, entire = FALSE,
  list = "r-help", n = 50, plot = TRUE) {
  # Ben Bolker came up with most of the code
  # Henrique Dallazuanna provided an edit to the z <- line of code
  # Brian Diggs provided capwords() to properly count Peter Dalgaard
  # Joshua Wiley adapted all of it to one function

  if(is.null(month)) {
    month <- format(Sys.Date(), format = "%B")
  }
  if(isTRUE(entire)) {
    month <- unique(months(as.Date(1:365, "2000-01-01")))
  }
  if(is.null(year)) {
    year <- format(Sys.Date(), format = "%Y")
  }
  if(length(year) > 1) {
    tmp <- vector(mode = "list", length = length(year))
    for(i in seq_along(year)) {
      tmp[[i]] <- paste(year[i], month, sep = "-")
    }
    times <- unlist(tmp)
  } else {
    times <- paste(year, month, sep = "-")
  }

  times <- sort(as.yearmon(times, "%Y-%B"))
  current <- as.yearmon(Sys.Date(), "%Y-%m")
  times <- format(times[times <= current], "%Y-%B")

  # Function to extract the names
  # Originally by Ben Bolker
  namefun <- function(x) {
    gsub("\\n","",gsub("^.+<I>","",gsub("</I>.+$","",x)))
  }

  # Based on a suggestion by Brian Diggs
  # Capitalizes the first letter of each word
  capwords <- function(s, strict = FALSE) {
    cap <- function(s) paste(toupper(substring(s,1,1)),
                         {s <- substring(s,2); if(strict) tolower(s) else s},
                         sep = "", collapse = " " )
    sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
  }

  # Collects the author names for the relevant month and list
  # from the R archives
  # Originally by Ben Bolker
  grabber <- function(month, list, n) {
    baseurl <- "https://stat.ethz.ch/pipermail/"
    z <- getURL(paste(baseurl,list,"/", month,"/author.html",sep=""),
                ssl.verifypeer = FALSE)
    zz <- strsplit(z, "<LI>")[[1]]
    cnames <- capwords(sapply(zz[3:(length(zz) - 1)], namefun))
    rr <- rev(sort(table(cnames)))
    output <- rr[1:n]
    dfout <- data.frame(Posts = as.vector(output),
      Name = strtrim(names(output), width = 40))
    return(dfout)
  }

  # Create dot plots of the number of posts
  # lattice dotplot() code primarily by Ben Bolker
  plotter <- function(dat) {
    dat <- by(dat, list(dat$month, dat$year), `[`)
    if(length(dat) > 1) {
      old.par <- par(no.readonly = TRUE)
      on.exit(par(old.par))
      par("ask" = TRUE)
    }
    for(i in seq_along(dat)) {
      x <- dat[[i]]
      if (is.null(dat[[i]])) next
      x <- x[order(x$Posts), ]
      x$Name <- factor(x$Name, levels = x$Name)
      print(dotplot(Name ~ Posts, data = x, xlab = "Number of posts",
                    main = paste(x$year[1], x$month[1])))
    }
    invisible()
  }

  numbers <- lapply(times, function(x) {
    data.frame(grabber(month = x, list = list, n = n),
      year = gsub("-.+", "", x), month = gsub(".+-", "", x))
  })

  numbers <- do.call("rbind", numbers)

  if(plot) {plotter(dat = numbers)}

  return(numbers)
}

## I wrote this function because I was tired of manually writing
## code everytime I wanted to create a graph of a distribution
## with the alpha region coloured.  I tried to make the function
## smart, but the user still needs to use common sense, particularly
## with things like the binomial distribution

PlotDist <- function(alpha, from = -5, to = 5, n = 1000, filename = NULL,
              alternative = c("two.tailed", "greater", "lesser"),
              distribution = c("normal", "t", "F", "chisq", "binomial"),
              colour = "black", fill = "skyblue2", ...) {

  alternative <- match.arg(alternative)
  ## Calculate alpha level given hypothesis
  alt.alpha <- switch(alternative,
                      two.tailed = alpha/2,
                      greater = alpha,
                      lesser = alpha)
  ## use a 'switch' to pick the right functions based on distribution
  MyDen <- switch(distribution,
                   normal = dnorm,
                   t = dt,
                   F = df,
                   chisq = dchisq,
                   binomial = dbinom)
  MyDist <- switch(distribution,
                    normal = qnorm,
                    t = qt,
                    F = qf,
                    chisq = qchisq,
                    binomial = qbinom)
  ## Developmental work on auto-titling the plots
  ## there are problems properly substituting arguments
  ## from ... to actual values in the expressions for plotmath
  # main <- switch(distribution,
  #                normal = substitute(N(mu~variance),
  #                  list(mu = mean, var = sd)),
  #                t = substitute(t[(df)], list(df = df)),
  #                F = substitute(F[(first~second)],
  #                  list(first = df1, second = df2)),
  #                chisq = substitute(chi[(df)]^2, list(df = df)),
  #                binomial = substitute(Binomial(size~probability),
  #                  list(size = size, probability = prob))
  #                )
  ## Additional arguments passed via '...' e.g., degrees of freedom
  crit.lower <- MyDist(p = alt.alpha, lower.tail = TRUE, ...)
  crit.upper <- MyDist(p = alt.alpha, lower.tail = FALSE, ...)

  ## Calculate alpha (lower) region coordinates
  cord.x1 <- c(from, seq(from = from, to = crit.lower,
                         length.out = 100), crit.lower)
  cord.y1 <- c(0, MyDen(x = seq(from = from, to = crit.lower,
                           length.out = 100), ...), 0)

  ## Calculate alpha (upper) region coordinates
  cord.x2 <- c(crit.upper, seq(from = crit.upper, to = to,
                               length.out = 100), to)
  cord.y2 <- c(0, MyDen(x = seq(from = crit.upper, to = to,
                           length.out = 100), ...), 0)
  ## Logic test to choose which graphic device to open
  if(is.null(filename)) {
    dev.new()
  } else {
    pdf(file = filename)
  }

  ## plot distribution
  curve(MyDen(x, ...), from = from, to = to,
    n = n, col = colour, lty = 1, lwd = 2,
    ylab = "Density", xlab = "Values")

  ## Add alpha region(s) based on given hypothesis
  if (!identical(alternative, "greater")) {
    polygon(x = cord.x1, y = cord.y1, col = fill)
  }
  if (!identical(alternative, "lesser")) {
    polygon(x = cord.x2, y = cord.y2, col = fill)
  }

  ## If the PDF device was started, shut it down
  if (!is.null(filename)) {dev.off()}
}

RegSurfaceDemo <- function(formula, data, xlim = NULL, ylim = NULL,
                           zlim = NULL, resolution = 10) {
 vars <- attr(terms(formula), "variables")
 varnames <- sapply(vars, function(x)
   paste(deparse(x, width.cutoff = 500), collapse = " "))[-1]

 mdata <- eval(vars, data)
 names(mdata) <- varnames

 ## if no limits set, expand by 15% each direction
 ranger <- function(x) {
   rx <- range(x, na.rm = TRUE)
   expand <- c(ifelse(identical(sign(rx[1]), 1), .85, 1.15),
               ifelse(identical(sign(rx[2]), 1), 1.15, .85))
   return(rx * expand)
 }

 if (is.null(zlim)) {zlim <- ranger(mdata[[1]])}
 if (is.null(xlim)) {xlim <- ranger(mdata[[2]])}
 if (is.null(ylim)) {ylim <- ranger(mdata[[3]])}

 ## function to pass to outer to create the matrix
 ## for the regression surface
 f <- function (x, y) {
   newdat <- data.frame(x, y)
   colnames(newdat) <- c(varnames[2], varnames[3])
   predict(model, newdata = newdat)
 }

 ## Fit model
 model <- lm(formula = formula, data = data)

 ## Create X, Y, and Z grids
 X <- seq(from = xlim[1], to = xlim[2], length.out = resolution)
 Y <- seq(from = ylim[1], to = ylim[2], length.out = resolution)
 Z <- outer(X, Y, f)

 ## Create 3d scatter plot and add the regression surface
 open3d()
 plot3d(x = mdata[[2]], y = mdata[[3]], z = mdata[[1]],
        xlab = varnames[2], ylab = varnames[3], zlab = varnames[1],
        xlim = xlim, ylim = ylim, zlim = zlim)
 par3d(ignoreExtent = TRUE)
 surface3d(X, Y, Z, col = "blue", alpha = .6)
 par3d(ignoreExtent = FALSE)
 return(summary(model))
}

###View Colors in R###
##Function returns R colors whose names contain the characters passed in the argument, along with their rgb codes and creates sample plots to view the actual color
ColorViewer <- function(color) {

  ColorGraphing <- function(x) {
    old.par <- par(no.readonly = TRUE)
    on.exit(par(old.par))
     n <- length(x)
    max.chars <- max(nchar(x))
    mar.adjust <- ifelse(max.chars>9, ((max.chars-10)/2), 0)
    par(ask=TRUE, mar=par("mar")+c(mar.adjust+.5,0,0,0))
    plot.new()
    plot.window(xlim=c(0, n+2), ylim=c(0,5), asp=1/2)
    axis(side=1, at=seq_along(x), labels=x, las=3)
    for(i in seq_along(x)) {
      abline(v = i, col = x[i], lty = 1, lwd = 5)
    }
    title(main = paste("R Colors with '", color, "' in their names \n",
            "Each bar has the name R recognizes printed below it", sep=""))
  }

  col.results <- grep(color, colors(), value=TRUE, fixed=FALSE, ignore.case=FALSE)
  rgb.col.results <- col2rgb(col.results)
  index <- (0:floor(length(col.results)/10))*10
  for(i in seq_along(index)) {
    ColorGraphing(col.results[index[i]+1:10]) }
  temp <- NULL
  for(i in seq_along(col.results)) {
    temp[[col.results[i]]] <- rgb.col.results[,i] }
  temp2 <- paste(col.results, temp)
  print(temp2)
}

###Plot a Correlation Matrix###
## function plots a correlation  matrix and tries to order variables sensibly using hierarchical clustering
## if coverage is passed, will also add a bubble plot with the area proportional to the proportion of
## data present for any given cell
## defaults are set, but it is possible to use a named list of quote()d ggplot calls to override all defaults
## this is not expected for typical use.  Particularly main, points, and text as these rely on internal variable names
## however, labels, the graadient color, and area scaling can be adjusted more safely
corplot <- function(x, coverage, points = TRUE, digits = 2, plot = TRUE, ..., control.grobs = list()) {
  hc <- hclust(as.dist(1 - x), ...)
  n <- colnames(x)[hc$order]
  mx <- melt(x, value.name = "r")
  mx$Var1 <- factor(mx[, "Var1"], levels = n)
  mx$Var2 <- factor(mx[, "Var2"], levels = n)
  mx$correlation <- gsub(".+\\.", ".", format(round(mx[, "r"],
    digits = digits), digits = digits, nsmall = digits))
  mx$correlation[mx[, "Var1"] == mx[, "Var2"]] <- ""
  if (!missing(coverage)) {
    mx$coverage <- melt(coverage, value.name = "coverage")[, "coverage"]
  }

  defaults <- list(
    main = quote(ggplot(mx, aes(x = Var1, y = Var2, fill = r))),
    tiles = quote(geom_tile()),
    labels = quote(labs(list(x = NULL, y = NULL))),
    gradient = quote(scale_fill_gradientn(name = "Correlation",
      guide = guide_colorbar(),
      colours = c("blue", "white", "red"), limits = c(-1, 1),
      breaks = c(-.99, -.5, 0, .5, .99), labels = c("-1", "-.5", "0", "+.5", "+1"))),
    points = quote(geom_point(aes(size = coverage))),
    area = quote(scale_area()),
    text = quote(geom_text(aes(label = correlation), size = 3, vjust = 0)))

  i <- names(defaults)[!names(defaults) %in% names(control.grobs)]
  control.grobs[i] <- defaults[i]

  p <- substitute(main + tiles + labels + gradient, control.grobs)

  if (points & !missing(coverage)) {
    p <- substitute(main + tiles + labels + gradient + points + area, control.grobs)
  } else {
    p <- substitute(main + tiles + labels + gradient + text, control.grobs)
  }

  p <- eval(p)

  if (plot) print(p)
  return(invisible(p))
}

# corplot(x = s$sSigma, coverage = s$coverage)
# corplot(x = s$sSigma, coverage = s$coverage, control.grobs = list(area = quote(scale_area(limits = c(0, 1)))))
