#!/usr/bin/env nextflow

params.input = null
params.output_dir = 'rnavar_out'
params.fasta = null
params.dict = null
params.known_site1 = null
params.known_site2 = null
params.tbi1 = null
params.tbi2 = null
params.interval_bed = null
params.bam_intervals = null
params.vcf_intervals = null
params.n_xargs = 4
params.dupmarker = 'gatk'
params.skip_dedup = false
params.skip_addrg = false
params.help = false

// Add at the beginning after params declarations
def help_message() {
    log.info """
    BILL RNAVAR Pipeline
    ===================
    Required Arguments:
      --input         CSV file with sample information (2 col: "readgroup", abs path to "bam", optional unique "basename")
      --fasta         Reference genome FASTA
      --dict         Reference genome dictionary
      
    Optional Arguments:
      --output_dir   Output directory (default: 'rnavar_out')
      --n_xargs      Number of parallel processes for xargs (default: 4)
      --help         Display help message
      --dupmarker    Tools to mark duplicates. Can be 'gatk' or 'biobb' (default: 'gatk')
      --skip_dedup   Skip duplicate marking step (default: false)
      --skip_addrg   Skip readgroup adding step (default: false)
    """
}

process bulkStarAlign {
    conda '/home/gjsx/micromamba/envs/star'
    cpus 16
    memory 36.GB
    input:
    tuple val(basename), path(input_fq), val(readgroup)
    path(star_index)

    output:
    val basename
    path '*.out.bam'

    script:
    """
    ulimit -HSn 16384
    # bulk seq command
    STAR --genomeDir ${star_index} \
    --readFilesCommand zcat \
    --outSAMattrRGline 'ID:${basename}' 'SM:${readgroup}' 'PL:illumina' \
    --runThreadN 16 --outSAMtype BAM SortedByCoordinate \
    --readFilesIn ${input_fq} --outFileNamePrefix ${basename}.
    """

    stub:
    """
    
    """
}

process addReadGroup {
    cpus 16
    memory 36.GB
    tag "${basename}"
    
    input:
    tuple val(basename), path(input_bam), val(readgroup)

    output:
    val basename
    path '*.readgroup.bam'

    script:
    """
    samtools addreplacerg -@16 -w -r "ID:${basename}\tSM:${readgroup}\tPL:illumina" \
    -o ${basename}.readgroup.bam ${input_bam}
    """

    stub:
    """
    
    """
}

process removeDuplicate {
  conda '/home/gjsx/micromamba/envs/gatk'
  cpus 16
  memory 36.GB
  tag "${basename}"
  
  input:
    val(basename)
    path(input_bam)
  
  output:
    val basename
    path '*.dedup.bam'
  
  script:
  """
  gatk MarkDuplicates --java-options "-Xmx36g" \
    --INPUT ${input_bam} \
    --OUTPUT ${basename}.dedup.bam \
    --METRICS_FILE dedup.metrics --TMP_DIR . -REMOVE_DUPLICATES true \
    --ASSUME_SORTED true --VALIDATION_STRINGENCY LENIENT
  """
}

process removeDuplicate_biobb {
  conda '/home/gjsx/micromamba/envs/biobb'
  cpus 16
  memory 36.GB
  tag "${basename}"
  
  input:
    tuple val(basename), path(input_bam)
  
  output:
    val basename
    path '*.dedup.bam'
  
  script:
  """
  bammarkduplicates I=${input_bam} O=${basename}.dedup.bam \
  M=bbb.dedup.metrics rmdup=1 markthreads=16
  """
}

process splitNcigar {
  conda '/home/gjsx/micromamba/envs/gatk'
  cpus 16
  memory 144.GB
  tag "${basename}"
  
  input:
  val basename
  path input_bam
  path bam_intervals
  
  output:
  val basename
  path '*.splited.bam'
  
  script:  
  """
  samtools index -@16 ${input_bam}
  
  xargs -P 4 -a ${bam_intervals} \
  -n 2 bash myspliter.sh ${input_bam}
  
  samtools merge -o ${basename}.splited.bam *scattered.bam
  """
}

process recalibrateBQSR {
  conda '/home/gjsx/micromamba/envs/gatk'
  cpus 16
  memory 12.GB
  tag "${basename}"
  
  input:
  val basename
  path input_bam
  path interval_bed
  path fasta
  path dict
  path known_site1
  path known_site2
  path tbi1
  path tbi2
  
  output:
  path '*recal.bam'
  
  script:
  """
  samtools index -@16 ${input_bam}
  
  samtools faidx ${fasta}
  
  gatk --java-options "-Xmx12g" BaseRecalibrator  \
        --input ${input_bam} \
        --output recal.table \
        --reference ${fasta} \
        --intervals ${interval_bed} \
        --known-sites ${known_site1} --known-sites ${known_site2} \
        --tmp-dir . \
        --use-original-qualities
  
  gatk --java-options "-Xmx12g" ApplyBQSR \
        --input $input_bam \
        --output ${basename}.recal.bam \
        --reference ${fasta} \
        --bqsr-recal-file recal.table \
        --intervals ${interval_bed} \
        --tmp-dir . \
        --use-original-qualities --add-output-sam-program-record
  """
}

process haploCall {
  publishDir "${params.output_dir}", mode: 'move'
  conda '/home/gjsx/micromamba/envs/gatk'
  cpus 40
  memory 144.GB
  
  input:
  path input_bam
  path fasta
  path dict
  path vcf_intervals
  
  output:
  path 'merged.bam*'
  path 'haploCall.vcf.gz*'
  path 'filtered.vcf.gz*'
  
  script:
  """
  samtools faidx ${fasta}
  
  samtools merge -@16 --write-index \
  --reference ${fasta} \
  -o merged.bam ${input_bam}
  
  xargs -P 4 -a ${vcf_intervals} \
  mycaller.sh merged.bam
  
  ls -1 *.vcf.gz > vcf_list
  
  gatk --java-options "-Xmx36g" MergeVcfs \
        --INPUT vcf_list \
        --OUTPUT haploCall.vcf.gz \
        --SEQUENCE_DICTIONARY ${dict} \
        --TMP_DIR .
  
  gatk --java-options "-Xmx36G" VariantFiltration \
        --variant haploCall.vcf.gz \
        --output filtered.vcf.gz \
        --reference ${fasta} \
        --tmp-dir . \
        --window 35 --cluster 3 --filter-name "FS" --filter "FS > 30.0" \
        --filter-name "QD" --filter "QD < 2.0"
  """
}

workflow {
  if (params.help) {
    help_message()
    exit 0
  }

  if (!params.input || !params.fasta || !params.dict) {
    log.error "Missing required parameters"
    help_message()
    exit 1
  }

  log.info """
    Starting BILL RNAVAR Pipeline
    =============================
    Input CSV    : ${params.input}
    Output dir   : ${params.output_dir}
    Reference    : ${params.fasta}
    """

    // Input validation and channel creation
      Channel
        .fromPath(params.input)
        .splitCsv(header: true)
        .map { row -> 
            // Check required columns
            if (!row.readgroup || !row.bam) {
                error "Input CSV must have 'readgroup' and 'bam' columns"
            }
            
            def bam_file = file(row.bam)
            if (!bam_file.exists()) {
                error "BAM file not found: ${row.bam}"
            }
            
            // Use provided basename or derive from BAM filename
            def basename = row.containsKey('basename') ? row.basename : bam_file.name.replaceAll(/\.bam$/, '')
            
            return tuple(basename, bam_file, row.readgroup)
        }
        .groupTuple()
        .map { basename, bams, readgroups ->
            if (bams.size() > 1) {
                error "Duplicate basename found: ${basename}"
            }
            return tuple(basename, bams[0], readgroups[0])
        }
        .set { ch_input }

    // Reference files validation and channels
    def ref_files = [
        fasta: params.fasta,
        dict: params.dict,
        known_site1: params.known_site1,
        known_site2: params.known_site2,
        interval_bed: params.interval_bed,
        bam_intervals: params.bam_intervals,
        vcf_intervals: params.vcf_intervals,
        tbi1: params.tbi1,
        tbi2: params.tbi2
    ]

    // Validate reference files existence
    ref_files.each { _key, path ->
        if (!file(path).exists()) {
            error "Reference file not found: ${path}"
        }
    }

    // Create reference channels
    fasta_ch = Channel.value(file(params.fasta))
    dict_ch = Channel.value(file(params.dict))
    site1_ch = Channel.value(file(params.known_site1))
    site2_ch = Channel.value(file(params.known_site2))
    bed_ch = Channel.value(file(params.interval_bed))
    tbi1_ch = Channel.value(file(params.tbi1))
    tbi2_ch = Channel.value(file(params.tbi2))
    bam_intervals_ch = Channel.fromPath(params.bam_intervals)
    vcf_intervals_ch = Channel.fromPath(params.vcf_intervals)

    // Main workflow execution
    if (!params.skip_addrg) {
      ch_readgroup = addReadGroup(ch_input)
    } else {
      ch_readgroup = tuple(ch_input[0], ch_input[1])
    }

    if (!params.skip_dedup) {
        if (params.dupmarker == 'biobb') {
            dedup_ch = removeDuplicate_biobb(ch_readgroup)
        } else {
            dedup_ch = removeDuplicate(ch_readgroup[0], ch_readgroup[1])
        }
    } else {
        // If skipping deduplication, pass the input directly
        dedup_ch = ch_readgroup
    }

    splited_ch = splitNcigar(
        dedup_ch[0], dedup_ch[1],
        bam_intervals_ch.first()
    )

    recal_ch = recalibrateBQSR(
        splited_ch[0], splited_ch[1],
        bed_ch,
        fasta_ch,
        dict_ch,
        site1_ch,
        site2_ch,
        tbi1_ch,
        tbi2_ch
    )

    // Final variant calling
    haploCall(
        recal_ch.collect(),
        fasta_ch,
        dict_ch,
        vcf_intervals_ch.first()
    )
}
