library(dplyr)
library(tidyr)
library(ggplot2)
library(viridis)
library(ggpubr)
library(cowplot)
library(gridExtra)
library(stringr)
library(scales)
library(logistf)
library(yardstick)

# LRmodel Performs logistic regression analysis with dichotomized signature attributions as dependent variables and epidemiological factors as independent variables.
# i.e., signature ~ parameters + confounders
# Firth penalized logistic regression implemented if parameter or confounder has perfect or near perfect separation
# The function returns a data frame with model results, metrics, and forest plots

# data = data frame with all variables included in the model
# signatures = character vector with signature names
# parameters = character vector with parameter names
# confounders = character vector with parameter names
# pval = nomial p value cutoff to plot LR and include in regression_param output
# output directory. Default id output folder
# make_plot: logical. Generate plot.

LRmodel <- function(data, signatures, parameters, confounders,
                    pval = 0.05,
                    dir = "output/",
                    make_plot = TRUE) {
  regression_list <- list()
  regression_param <- data.frame()
  metrics <- data.frame()

  for (s in signatures) {
    for (p in parameters) {
      dat <- data[!is.na(data[, p]) & !is.na(data[, s]), ]

      # check if the tested parameter has only 1 level or same results for the signature
      if (ncol(table(as.character(dat[, p]), as.character(dat[, s]))) < 2 | nrow(table(as.character(dat[, p]), as.character(dat[, s]))) < 2) {
        print(paste("** Warning: Parameter", p, "in signature", s, "has only one level and will not be evaluated"))
        next
      }

      # remove confounders that overlap with the parameter of interest
      confounders_minus <- confounders
      if (p %in% c("tob_alc", "known_riskfactor")) {
        confounders_minus <- confounders_minus[!confounders_minus %in% c("alcohol_ever", "tobacco_ever")]
      }
      if (p %in% c("tobacco", "heavy_smoker", "heavy_smoker_dic", "years_since_stop_tob_cat")) {
        confounders_minus <- confounders_minus[confounders_minus != "tobacco_ever"]
      }
      if (p %in% c("alcohol", "heavy_drinker", "heavy_drinker_dic", "years_since_stop_alc_cat")) {
        confounders_minus <- confounders_minus[confounders_minus != "alcohol_ever"]
      }
      if (p %in% c("OC", "OPC", "Larynx", "Hypopharynx")) {
        confounders_minus <- confounders_minus[confounders_minus != "subsite"]
      }
      if (p %in% c("country", "incidence", "argentina", "greece", "italy", "brazil", "czechRepublic", "romania", "predicted_ancestry")) {
        confounders_minus <- confounders_minus[confounders_minus != "region"]
      }
      if (p %in% c("age_group", "age")) {
        confounders_minus <- confounders_minus[confounders_minus != "age_group"]
      }
      # remove counfonders with only 1 level in the analyzed data frame
      for (c in confounders_minus) {
        if (ncol(table(as.character(dat[, p]), as.character(dat[, c]))) < 2) {
          print(paste("** Warning: Confounding", c, "has only one level for parameter", p, "in signature", s, "and will be removed"))
          confounders_minus <- confounders_minus[confounders_minus != c]
        }
      }

      myformula <- paste0(s, " ~ ", paste(c(p, confounders_minus), collapse = " + "))
      print(myformula)

      # check if requirements for Firth regression are met
      firth_method <- F
      if (!is.numeric(data[, p])) { #  the near-separation rule doesn't apply to continuous variables such as age
        for (category in dat[, p] %>% unique()) {
          category_counts <- dat[dat[, p] == category, ][s] %>%
            group_by(get(s)) %>%
            tally()
          if (length(category_counts$n) == 1 | category_counts$n[1] <= 1 | category_counts$n[2] <= 1) {
            print(paste("** Factor", p, "in signature", s, "has perfect or near-perfect separation for one or more levels. Using the penalised approach (Firth method)."))
            firth_method <- T
            break
          }
        }
      }

      for (c in confounders_minus[confounders_minus != "age"]) { ## age is a continuous variable so should not be taken into account for this
        for (category in dat[, c] %>% unique()) {
          category_counts <- dat[dat[, c] == category, ][s] %>%
            group_by(get(s)) %>%
            tally()
          if (length(category_counts$n) == 1 | category_counts$n[1] <= 1 | category_counts$n[2] <= 1) {
            print(paste("** Covariate", c, "in signature", s, "has perfect or near-perfect separation for one or more levels. Using the penalised approach (Firth method)."))
            firth_method <- T
            break
          }
        }
      }

      # Regular logistic regression
      if (firth_method == F) {
        uni_model <- glm(myformula, data = dat, family = binomial("logit"))

        actual_resp <- dat[, s]
        predicted_resp <- round(fitted(uni_model))
        outcomes <- table(predicted_resp, actual_resp)

        if (nrow(outcomes) == 2) {
          accuracy <- summary(conf_mat(outcomes)) %>% slice(1)
          m <- data.frame(signature = s, parameter = p, accuracy = accuracy$.estimate)
        } else {
          m <- data.frame(signature = s, parameter = p, accuracy = NA)
        }
        metrics <- rbind(metrics, m)

        a <- summary(uni_model)$coefficients
        b <- confint.default(uni_model)
        result <- cbind(a, b) %>%
          as.data.frame() %>%
          mutate(
            p_adj = ifelse(`Pr(>|z|)` * length(signatures[!grepl("_cat2", signatures)]) < 1, `Pr(>|z|)` * length(signatures[!grepl("_cat2", signatures)]), 1),
            `2.5 %` = exp(`2.5 %`),
            `97.5 %` = exp(`97.5 %`),
            OR = exp(Estimate),
            model = "logistic model"
          ) %>%
          mutate(signature = s, .before = "Estimate") %>%
          rename(p_val = "Pr(>|z|)") %>%
          select(-c("Std. Error", "z value")) %>%
          tibble::rownames_to_column("independent_vars")

        regression_list[[paste0(s, "_", p)]] <- result
        result.plot <- result %>% filter(independent_vars != "(Intercept)")
      }

      # Firth’s penalized logistic regression

      else {
        simple <- logistf(formula = myformula, data = dat)
        actual_resp <- dat[, s]
        predicted_resp <- round(simple$predict)
        outcomes <- table(predicted_resp, actual_resp)
        if (nrow(outcomes) == 2) {
          accuracy <- summary(conf_mat(outcomes)) %>% slice(1)
          m <- data.frame(signature = s, parameter = p, accuracy = accuracy$.estimate)
        } else {
          m <- data.frame(signature = s, parameter = p, accuracy = NA)
        }
        metrics <- rbind(metrics, m)
        a <- data.frame(Estimate = simple$coefficients)
        b <- confint.default(simple)
        result <- cbind(a, b) %>%
          as.data.frame() %>%
          mutate(p_val = simple$prob, .after = "Estimate") %>%
          mutate(
            `2.5 %` = exp(`2.5 %`),
            `97.5 %` = exp(`97.5 %`),
            p_adj = ifelse(p_val * length(signatures[!grepl("_cat2", signatures)]) < 1, p_val * length(signatures[!grepl("_cat2", signatures)]), 1),
            OR = exp(Estimate),
            model = "firth model"
          ) %>%
          mutate(signature = s, .before = "Estimate") %>%
          tibble::rownames_to_column("independent_vars")

        regression_list[[paste0(s, "_", p)]] <- result
        result.plot <- result %>% filter(independent_vars != "(Intercept)")
      }

      # Forest plot
      param <- result.plot %>% filter(grepl(p, independent_vars))
      if (sum(param$p_val <= pval) != 0) {
        regression_param <- rbind(regression_param, param)

        if (make_plot == TRUE) {
          order <- result.plot$independent_vars
          ggplot(result.plot, aes(x = OR, y = independent_vars)) +
            geom_point(size = 2.5) +
            geom_errorbar(aes(xmin = `2.5 %`, xmax = `97.5 %`), size = 0.4, width = .1) +
            geom_vline(aes(xintercept = 1), color = "blue", size = 0.5, alpha = 0.3) +
            # log 2 scale transformation
            scale_x_continuous(trans = log2_trans()) +
            scale_y_discrete(limits = rev(order)) +
            ggtitle(paste0("OR for ", gsub("_cat", " ", s))) +
            labs(y = "", x = "") +
            theme_minimal()
          print(last_plot())
          ggsave(
            plot = last_plot(),
            filename = paste0(dir, "/OR_", s, "_", p, "_", Sys.Date(), ".tiff"),
            device = "tiff", dpi = 600, width = 4.74, height = 4.85, units = "in"
          )
        }
      }
    }
  }
  return(list(
    "regression_list" = regression_list,
    "regression_param" = regression_param,
    "metrics" = metrics
  ))
}
