callingcards / scripts /quantify_regions.R
cmatkhan's picture
adding batches 7488 and 7489, and casting genome_map columns to consistent datatypes
860378b
# Calling Cards Region Quantification
# Quantifies enrichment of insertions/hops in genomic regions
#
# This script:
# 1. Counts insertions overlapping each genomic region (experiment and background)
# 2. Calculates enrichment scores
# 3. Computes Poisson and hypergeometric p-values
#
# Works with any data in BED3+ format (chr, start, end, ...)
# For calling cards: each insertion is counted once regardless of depth
#
# COORDINATE SYSTEMS:
# - Input BED files are assumed to be 0-indexed, half-open [start, end)
# - GenomicRanges uses 1-indexed, closed [start, end]
# - Conversion: GR_start = BED_start + 1, GR_end = BED_end
#
# Usage:
# Rscript quantify_regions.R
library(tidyverse)
library(GenomicRanges)
# Statistical Functions ---------------------------------------------------
#' Calculate enrichment (calling cards effect)
#'
#' @param total_background_hops Total number of hops in background (scalar or vector)
#' @param total_experiment_hops Total number of hops in experiment (scalar or vector)
#' @param background_hops Number of hops in background per region (vector)
#' @param experiment_hops Number of hops in experiment per region (vector)
#' @param pseudocount Pseudocount to avoid division by zero (default: 0.1)
#' @return Enrichment values
calculate_enrichment <- function(total_background_hops,
total_experiment_hops,
background_hops,
experiment_hops,
pseudocount = 0.1) {
# Input validation
if (!all(is.numeric(c(total_background_hops, total_experiment_hops,
background_hops, experiment_hops)))) {
stop("All inputs must be numeric")
}
# Get the length of the region vectors
n_regions <- length(background_hops)
# Ensure experiment_hops is same length as background_hops
if (length(experiment_hops) != n_regions) {
stop("background_hops and experiment_hops must be the same length")
}
# Recycle scalar totals to match region length if needed
if (length(total_background_hops) == 1) {
total_background_hops <- rep(total_background_hops, n_regions)
}
if (length(total_experiment_hops) == 1) {
total_experiment_hops <- rep(total_experiment_hops, n_regions)
}
# Now check all are same length
if (length(total_background_hops) != n_regions ||
length(total_experiment_hops) != n_regions) {
stop("All input vectors must be the same length or scalars")
}
# Calculate enrichment
numerator <- experiment_hops / total_experiment_hops
denominator <- (background_hops + pseudocount) / total_background_hops
enrichment <- numerator / denominator
# Check for invalid values
if (any(enrichment < 0, na.rm = TRUE)) {
stop("Enrichment values must be non-negative")
}
if (any(is.na(enrichment))) {
stop("Enrichment values must not be NA")
}
if (any(is.infinite(enrichment))) {
stop("Enrichment values must not be infinite")
}
return(enrichment)
}
#' Calculate Poisson p-values
#'
#' @param total_background_hops Total number of hops in background (scalar or vector)
#' @param total_experiment_hops Total number of hops in experiment (scalar or vector)
#' @param background_hops Number of hops in background per region (vector)
#' @param experiment_hops Number of hops in experiment per region (vector)
#' @param pseudocount Pseudocount for lambda calculation (default: 0.1)
#' @param ... additional arguments to `ppois`. note that lower tail is set to FALSE
#' already
#' @return Poisson p-values
calculate_poisson_pval <- function(total_background_hops,
total_experiment_hops,
background_hops,
experiment_hops,
pseudocount = 0.1,
...) {
# Input validation
if (!all(is.numeric(c(total_background_hops, total_experiment_hops,
background_hops, experiment_hops)))) {
stop("All inputs must be numeric")
}
# Get the length of the region vectors
n_regions <- length(background_hops)
# Ensure experiment_hops is same length as background_hops
if (length(experiment_hops) != n_regions) {
stop("background_hops and experiment_hops must be the same length")
}
# Recycle scalar totals to match region length if needed
if (length(total_background_hops) == 1) {
total_background_hops <- rep(total_background_hops, n_regions)
}
if (length(total_experiment_hops) == 1) {
total_experiment_hops <- rep(total_experiment_hops, n_regions)
}
# Now check all are same length
if (length(total_background_hops) != n_regions ||
length(total_experiment_hops) != n_regions) {
stop("All input vectors must be the same length or scalars")
}
# Calculate hop ratio
hop_ratio <- total_experiment_hops / total_background_hops
# Calculate expected number of hops (mu/lambda parameter)
# Add pseudocount to avoid mu = 0
mu <- (background_hops + pseudocount) * hop_ratio
# Observed hops in experiment
x <- experiment_hops
# Calculate p-value: P(X >= x) = 1 - P(X < x) = 1 - P(X <= x-1)
# This is equivalent to: 1 - CDF(x) + PMF(x)
# Using the upper tail directly with lower.tail = FALSE
pval <- ppois(x - 1, lambda = mu, lower.tail = FALSE, ...)
return(pval)
}
#' Calculate hypergeometric p-values
#'
#' @param total_background_hops Total number of hops in background (scalar or vector)
#' @param total_experiment_hops Total number of hops in experiment (scalar or vector)
#' @param background_hops Number of hops in background per region (vector)
#' @param experiment_hops Number of hops in experiment per region (vector)
#' @param ... additional arguments to phyper. Note that lower tail is set to
#' false already
#' @return Hypergeometric p-values
calculate_hypergeom_pval <- function(total_background_hops,
total_experiment_hops,
background_hops,
experiment_hops,
...) {
# Input validation
if (!all(is.numeric(c(total_background_hops, total_experiment_hops,
background_hops, experiment_hops)))) {
stop("All inputs must be numeric")
}
# Get the length of the region vectors
n_regions <- length(background_hops)
# Ensure experiment_hops is same length as background_hops
if (length(experiment_hops) != n_regions) {
stop("background_hops and experiment_hops must be the same length")
}
# Recycle scalar totals to match region length if needed
if (length(total_background_hops) == 1) {
total_background_hops <- rep(total_background_hops, n_regions)
}
if (length(total_experiment_hops) == 1) {
total_experiment_hops <- rep(total_experiment_hops, n_regions)
}
# Now check all are same length
if (length(total_background_hops) != n_regions ||
length(total_experiment_hops) != n_regions) {
stop("All input vectors must be the same length or scalars")
}
# Hypergeometric parameters
# M: total number of balls (total hops)
M <- total_background_hops + total_experiment_hops
# n: number of white balls (experiment hops)
n <- total_experiment_hops
# N: number of draws (hops in region)
N <- background_hops + experiment_hops
# x: number of white balls drawn (experiment hops in region) - 1 for upper tail
x <- experiment_hops - 1
# Handle edge cases
valid <- (M >= 1) & (N >= 1)
pval <- rep(1, length(M))
# Calculate p-value for valid cases: P(X >= x) = 1 - P(X <= x-1)
if (any(valid)) {
pval[valid] <- phyper(x[valid], n[valid], M[valid] - n[valid], N[valid],
lower.tail = FALSE, ...)
}
return(pval)
}
# GRanges Conversion Functions --------------------------------------------
#' Convert BED format data frame to GRanges
#'
#' Handles coordinate system conversion from 0-indexed half-open BED format
#' to 1-indexed closed GenomicRanges format
#'
#' @param bed_df Data frame with chr, start, end columns in BED format (0-indexed, half-open)
#' @param zero_indexed Logical, whether input is 0-indexed (default: TRUE)
#' @return GRanges object
bed_to_granges <- function(bed_df, zero_indexed = TRUE) {
if (!all(c("chr", "start", "end") %in% names(bed_df))) {
stop("bed_df must have columns: chr, start, end")
}
# Convert from 0-indexed half-open [start, end) to 1-indexed closed [start, end]
if (zero_indexed) {
gr_start <- bed_df$start + 1
gr_end <- bed_df$end
} else {
gr_start <- bed_df$start
gr_end <- bed_df$end
}
# Create GRanges object (strand-agnostic for calling cards)
gr <- GRanges(
seqnames = bed_df$chr,
ranges = IRanges(start = gr_start, end = gr_end),
strand = "*"
)
# Add any additional metadata columns
extra_cols <- setdiff(names(bed_df), c("chr", "start", "end", "strand"))
if (length(extra_cols) > 0) {
mcols(gr) <- bed_df[, extra_cols, drop = FALSE]
}
return(gr)
}
#' Deduplicate insertions in GRanges object
#'
#' For calling cards, if an insertion is found at the same coordinate,
#' only one record is retained
#'
#' @param gr GRanges object
#' @return Deduplicated GRanges object
deduplicate_granges <- function(gr) {
# Find unique ranges (ignores strand and metadata)
unique_ranges <- !duplicated(granges(gr))
gr[unique_ranges]
}
#' Count overlaps between insertions and regions
#'
#' @param insertions_gr GRanges object with insertions
#' @param regions_gr GRanges object with regions
#' @param deduplicate Whether to deduplicate insertions (default: TRUE)
#' @return Integer vector of overlap counts per region
count_overlaps <- function(insertions_gr, regions_gr, deduplicate = TRUE) {
# Deduplicate if requested
if (deduplicate) {
n_before <- length(insertions_gr)
insertions_gr <- deduplicate_granges(insertions_gr)
n_after <- length(insertions_gr)
if (n_before != n_after) {
message(" Deduplicated: ", n_before, " -> ", n_after,
" (removed ", n_before - n_after, " duplicates)")
}
}
# Count overlaps per region
# countOverlaps returns an integer vector with one element per region
counts <- countOverlaps(regions_gr, insertions_gr)
return(counts)
}
# Main Analysis Function --------------------------------------------------
#' Call peaks/quantify regions using calling cards approach
#'
#' @param experiment_gr GRanges object with experiment insertions
#' @param background_gr GRanges object with background insertions
#' @param regions_gr GRanges object with regions to quantify
#' @param deduplicate_experiment Whether to deduplicate experiment insertions (default: TRUE)
#' @param pseudocount Pseudocount for calculations (default: 0.1)
#' @return GRanges object with regions and statistics as metadata columns
enrichment_analysis <- function(experiment_gr,
background_gr,
regions_gr,
deduplicate_experiment = TRUE,
pseudocount = 0.1) {
message("Starting enrichment analysis...")
# Validate inputs
if (!inherits(experiment_gr, "GRanges")) {
stop("experiment_gr must be a GRanges object")
}
if (!inherits(background_gr, "GRanges")) {
stop("background_gr must be a GRanges object")
}
if (!inherits(regions_gr, "GRanges")) {
stop("regions_gr must be a GRanges object")
}
# Count overlaps for experiment (with deduplication if requested)
message("Counting experiment overlaps...")
if (deduplicate_experiment) {
message(" Deduplication: ON")
} else {
message(" Deduplication: OFF")
}
experiment_counts <- count_overlaps(
experiment_gr, regions_gr,
deduplicate = deduplicate_experiment
)
# Count overlaps for background (never deduplicated)
message("Counting background overlaps...")
message(" Deduplication: OFF (background should not be deduplicated)")
background_counts <- count_overlaps(
background_gr, regions_gr,
deduplicate = FALSE
)
# Calculate total hops AFTER any deduplication
if (deduplicate_experiment) {
experiment_gr_dedup <- deduplicate_granges(experiment_gr)
total_experiment_hops <- length(experiment_gr_dedup)
} else {
total_experiment_hops <- length(experiment_gr)
}
total_background_hops <- length(background_gr)
message("Total experiment hops: ", total_experiment_hops)
message("Total background hops: ", total_background_hops)
if (total_experiment_hops == 0) {
stop("Experiment data is empty")
}
if (total_background_hops == 0) {
stop("Background data is empty")
}
# Add counts and totals as metadata columns
mcols(regions_gr)$experiment_hops <- as.integer(experiment_counts)
mcols(regions_gr)$background_hops <- as.integer(background_counts)
mcols(regions_gr)$total_experiment_hops <- as.integer(total_experiment_hops)
mcols(regions_gr)$total_background_hops <- as.integer(total_background_hops)
# Calculate statistics
message("Calculating enrichment scores...")
mcols(regions_gr)$callingcards_enrichment <- calculate_enrichment(
total_background_hops = total_background_hops,
total_experiment_hops = total_experiment_hops,
background_hops = background_counts,
experiment_hops = experiment_counts,
pseudocount = pseudocount
)
message("Calculating Poisson p-values...")
mcols(regions_gr)$poisson_pval <- calculate_poisson_pval(
total_background_hops = total_background_hops,
total_experiment_hops = total_experiment_hops,
background_hops = background_counts,
experiment_hops = experiment_counts,
pseudocount = pseudocount
)
message("Calculating log Poisson p-values...")
mcols(regions_gr)$log_poisson_pval <- calculate_poisson_pval(
total_background_hops = total_background_hops,
total_experiment_hops = total_experiment_hops,
background_hops = background_counts,
experiment_hops = experiment_counts,
pseudocount = pseudocount,
log.p = TRUE
)
message("Calculating hypergeometric p-values...")
mcols(regions_gr)$hypergeometric_pval <- calculate_hypergeom_pval(
total_background_hops = total_background_hops,
total_experiment_hops = total_experiment_hops,
background_hops = background_counts,
experiment_hops = experiment_counts
)
message("Calculating log hypergeometric p-values...")
mcols(regions_gr)$log_hypergeometric_pval <- calculate_hypergeom_pval(
total_background_hops = total_background_hops,
total_experiment_hops = total_experiment_hops,
background_hops = background_counts,
experiment_hops = experiment_counts,
log.p = TRUE
)
# Calculate adjusted p-values
message("Calculating adjusted p-values...")
mcols(regions_gr)$poisson_qval <- p.adjust(mcols(regions_gr)$poisson_pval, method = "fdr")
mcols(regions_gr)$hypergeometric_qval <- p.adjust(mcols(regions_gr)$hypergeometric_pval, method = "fdr")
message("Analysis complete!")
return(regions_gr)
}
# Example Usage -----------------------------------------------------------
# add another batch to the genome map
genomic_features = arrow::read_parquet("~/code/hf/yeast_genome_resources/brentlab_features.parquet")
genome_map_replicate_ds = arrow::open_dataset("~/code/hf/callingcards/genome_map")
genome_map_replicate_meta = arrow::read_parquet("~/code/hf/callingcards/genome_map_meta.parquet")
max_gm_id = max(genome_map_replicate_meta$id)
rs_rl_map = dplyr::select(genomic_features,
regulator_locus_tag = locus_tag,
regulator_symbol = symbol) %>%
filter(regulator_symbol %in% c("MED2", "XBP1", "UME1", "RPH1"))
run_7488_qbed = list.files("~/htcf_local/cc/yeast/results/run_7488/hops",
"*qbed",
full.names=TRUE)
run_7488_qbed = run_7488_qbed[str_detect(run_7488_qbed, "undetermined", negate=TRUE)]
run_7489_qbed = list.files("~/htcf_local/cc/yeast/results/run_7489/hops",
"*qbed",
full.names=TRUE)
run_7489_qbed = run_7489_qbed[str_detect(run_7489_qbed, "undetermined", negate=TRUE)]
new_metadata = read_tsv("~/htcf_local/cc/yeast/data/run_7488/JP094_barcodes.txt",
col_names = c("regulator", "bc1", "bc2")) %>%
mutate(condition = ifelse(str_detect(regulator, "∆"), "del_MSN2", "standard")) %>%
mutate(regulator_symbol = str_remove(regulator, "∆.*")) %>%
mutate(batch = "run_7488") %>%
bind_rows(
read_tsv("~/htcf_local/cc/yeast/data/run_7489/JP095_barcodes.txt",
col_names = c("regulator", "bc1", "bc2")) %>%
mutate(condition = ifelse(str_detect(regulator, "∆"), "del_MSN2", "standard")) %>%
mutate(regulator_symbol = str_remove(regulator, "∆.*")) %>%
mutate(batch = "run_7489")) %>%
mutate(binding_id = "NA") %>%
left_join(rs_rl_map) %>%
mutate(replicate = 1,
notes = "none") %>%
# missing qbed, maybe barcode issue?
filter(!(batch=="run_7488" & regulator_symbol == "XBP1" & condition == "standard")) %>%
mutate(id = max_gm_id + row_number()) %>%
dplyr::select(id, binding_id, regulator_locus_tag, regulator_symbol,
batch, replicate, notes, condition) %>%
left_join(
tibble(qbed = c(run_7488_qbed, run_7489_qbed)) %>%
mutate(batch = str_extract(basename(qbed), "run_\\d+")) %>%
mutate(condition = ifelse(str_detect(basename(qbed), "del"), "del_MSN2", "standard")) %>%
mutate(regulator_symbol = str_remove_all(basename(qbed), "run_\\d+_|del.*|.qbed")) %>%
filter(regulator_symbol != "undetermined")
)
new_data = map(c(run_7488_qbed, run_7489_qbed), ~{
in_path = .
read_tsv(in_path) %>%
mutate(chr = paste0("chr", chr)) %>%
mutate(chr = ifelse(chr=="chrMT", "chrM", chr)) %>%
mutate(qbed = in_path) %>%
left_join(dplyr::select(new_metadata, qbed, id, batch)) %>%
dplyr::select(id, chr, start, end, depth, strand, batch)
}) %>%
bind_rows()
# arrow::write_dataset(
# new_data,
# path = "/home/chase/code/hf/callingcards/genome_map",
# format = "parquet",
# partitioning = c("batch"),
# existing_data_behavior = "overwrite",
# compression = "zstd",
# write_statistics = TRUE,
# use_dictionary = c(
# id = TRUE
# )
# )
genome_map_replicate_meta_new = genome_map_replicate_meta %>%
bind_rows(dplyr::select(new_metadata, -qbed))
# arrow::write_parquet(genome_map_replicate_meta_new,
# "~/code/hf/callingcards/genome_map_meta.parquet")
# This is a template for how to use these functions
# Uncomment and modify for your actual data
genome_map_replicate_ds = arrow::open_dataset("~/code/hf/callingcards/genome_map",
unify_schemas = TRUE) %>%
filter(batch %in% c("run_7488", "run_7489"))
genome_map_replicate_meta = arrow::read_parquet("~/code/hf/callingcards/genome_map_meta.parquet") %>%
filter(batch %in% c("run_7488", "run_7489"))
background_gr <- read_tsv("~/code/hf/callingcards/adh1_background_ucsc.qbed") %>%
mutate(id = "adh1_bg",
score = scales::rescale(depth, to = c(1,1000))) %>%
dplyr::select(chr, start, end, id, score, strand) %>%
bed_to_granges()
regions_gr <- read_tsv("~/code/hf/yeast_genome_resources/yiming_promoters.bed",
col_names = c('chr', 'start', 'end', 'locus_tag', 'score', 'strand')) %>%
bed_to_granges()
# # Run analysis with deduplication (default for calling cards)
results_replicates = map(genome_map_replicate_meta$id, ~{
enrichment_analysis(
experiment_gr = genome_map_replicate_ds %>%
filter(id == .x) %>%
collect() %>%
dplyr::rename(score = depth) %>%
relocate(chr, start, end, id, score, strand) %>%
mutate(depth = scales::rescale(score, to = c(1,1000))) %>%
bed_to_granges(),
background_gr = background_gr,
regions_gr = regions_gr,
deduplicate_experiment = TRUE,
pseudocount = 0.1
)
})
names(results_replicates) = genome_map_replicate_meta$id
results_replicates_df = bind_rows(map(results_replicates, as_tibble), .id = "id") %>%
mutate(id = as.integer(id)) %>%
left_join(select(genomic_features, locus_tag, symbol)) %>%
dplyr::rename(target_locus_tag = locus_tag,
target_symbol = symbol) %>%
left_join(genome_map_replicate_meta) %>%
select(id, batch,
target_locus_tag, target_symbol, experiment_hops, background_hops,
total_background_hops, total_experiment_hops,
callingcards_enrichment,
poisson_pval, log_poisson_pval, poisson_qval,
hypergeometric_pval, log_hypergeometric_pval, hypergeometric_qval)
# arrow::write_dataset(
# results_replicates_df,
# path = "/home/chase/code/hf/callingcards/annotated_features_orig_reprocess",
# format = "parquet",
# partitioning = c("batch"),
# existing_data_behavior = "overwrite",
# compression = "zstd",
# write_statistics = TRUE,
# use_dictionary = c(
# id = TRUE
# )
# )
# id 9 corresponds to the binding sample -- can get from genome_map and
# annotated_feature metadata
#
# NOTE: there are some expected differences due to a change in how I am handling
# the promoter boundaries. The implementation here is correct -- please use
# this from now on. If you need to compare or doubt something, please let
# me know
#
# experiment_gr = arrow::read_parquet("~/code/hf/callingcards/genome_map/batch=run_5801/part-0.parquet") %>%
# filter(id == 707) %>%
# dplyr::rename(score = depth) %>%
# relocate(chr, start, end, id, score, strand) %>%
# mutate(depth = scales::rescale(score, to = c(1,1000))) %>%
# bed_to_granges()
#
# curr_db_annotated_feature = arrow::read_parquet("~/code/hf/callingcards/annotated_features/batch=run_5801/part-0.parquet") %>%
# filter(id == 9)
#
# comp_df = curr_db_annotated_feature %>%
# select(target_locus_tag, experiment_hops,
# background_hops, background_total_hops,
# experiment_total_hops) %>%
# left_join(results %>%
# as_tibble() %>%
# select(locus_tag, total_background_hops,
# total_experiment_hops,
# experiment_hops, background_hops) %>%
# dplyr::rename(target_locus_tag = locus_tag,
# new_exp_hops = experiment_hops,
# new_bg_hops = background_hops,
# new_bg_total = total_background_hops,
# new_expr_total = total_experiment_hops))