#!/bin/bash -e
function info() {
echo Usage: `basename $0` 'r1 r2'
exit 2
}

while getopts  ":p:f:D" opt; do
	case  $opt  in
		p) out_prefix=$OPTARG;;
		f) suffix=$OPTARG;;
        D) makedir=F;;
		*) info;;
	esac
done
shift $(($OPTIND - 1))

if [ $# -lt 2 ]; then info; fi

r1=$(path_abs.sh $1)
r2=$(path_abs.sh $2)



makedir=${makedir:=T}

. $var

echo;echo $r1 $r2
adapters_file=$tools_path/Trimmomatic-0.36/adapters/TruSeq3-PE.fa
primary_bed=primary.bed 
capture_bed=capture.bed
ref_genome_size=ref_genome.size.txt

:<<done

${java_run}/trimmomatic PE -threads $threads -phred33 \
$r1 $r2 \
$out_prefix.1.trim.fq \
$out_prefix.1.unpair.fq \
$out_prefix.2.trim.fq \
$out_prefix.2.unpair.fq \
ILLUMINACLIP:${adapters_file}:2:30:10 \
LEADING:20 \
TRAILING:20 \
SLIDINGWINDOW:5:20 \
MINLEN:75 

echo;echo bsmap
bsmap  -r 0 -s 16 -n 1 \
-p $threads \
-a $out_prefix.1.trim.fq \
-b $out_prefix.2.trim.fq \
-d $ref_genome \
-o $out_prefix.sam 


echo;echo Add Read Group and  Convert to BAM 
# $java_run/$picard/AddOrReplaceReadGroups.jar 
# VALIDATION_STRINGENCY=LENIENT INPUT=SAMPLE.sam  OUTPUT=SAMPLE.bam  INDEX=TRUE 
# RGID=SAMPLE RGLB=SAMPLE RGPL=illumina RGSM=SAMPLE RGPU= platform_unit 

$java_run/picard/AddOrReplaceReadGroups.jar \
VALIDATION_STRINGENCY=LENIENT \
INPUT=$out_prefix.sam  \
OUTPUT=$out_prefix.bam  \
CREATE_INDEX=true \
RGID=$sample_name RGLB=$sample_name RGPL=illumina RGSM=$$out_prefix_name RGPU=xunit


echo;echo Split BAM
bamtools split -tag ZS -in $out_prefix.bam



echo;echo Merge strand BAM files 
bamtools merge -in $out_prefix.TAG_ZS_++.bam -in $out_prefix.TAG_ZS_+-.bam -out $out_prefix.top.bam 
 
bamtools merge -in $out_prefix.TAG_ZS_-+.bam -in $out_prefix.TAG_ZS_--.bam -out $out_prefix.bottom.bam  




echo;echo Sort BAM Files  
samtools sort $out_prefix.top.bam $out_prefix.top.sorted 
 
samtools sort $out_prefix.bottom.bam $out_prefix.bottom.sorted

echo;echo Remove  Duplicates  
$java_run/picard/MarkDuplicates.jar VALIDATION_STRINGENCY=LENIENT INPUT=$out_prefix.top.sorted.bam OUTPUT=$out_prefix.top.rmdups.bam METRICS_FILE=$out_prefix.top.rmdups_metrics.txt REMOVE_DUPLICATES=true ASSUME_SORTED=true CREATE_INDEX=true
 
$java_run/picard/MarkDuplicates.jar VALIDATION_STRINGENCY=LENIENT INPUT=$out_prefix.bottom.sorted.bam OUTPUT=$out_prefix.bottom.rmdups.bam METRICS_FILE=$out_prefix.bottom.rmdups_metrics.txt REMOVE_DUPLICATES=true ASSUME_SORTED=true CREATE_INDEX=true

echo;echo Merge duplicate removed BAM files 
bamtools merge -in $out_prefix.top.rmdups.bam -in $out_prefix.bottom.rmdups.bam -out $out_prefix.rmdups.bam 



echo;echo Filter BAM  
bamtools  filter -isMapped true -isPaired true -isProperPair true -forceCompression  -in $out_prefix.rmdups.bam  -out $out_prefix.filtered.bam  



echo;echo Filter BAM  
bam clipOverlap --stats --in $out_prefix.filtered.bam --out $out_prefix.clipped.bam

samtools  index $out_prefix.clipped.bam




echo;echo Create a Picard Interval List Header
samtools view -H $out_prefix.clipped.bam > $out_prefix.bam_header.txt

cat $out_prefix.bam_header.txt|cut -f2-3|sed -n s/^SN://p|sed  s/LN:// > $ref_genome_size

echo;echo Create a Picard Target Interval List Body 
cat $primary_bed | 
awk '{print  $1 "\t" $2+1 "\t" $3 "\t+\tinterval_" NR}' > target_body.txt

echo;echo Concatenate to Create a Picard Target Interval List 
cat $out_prefix.header.txt target_body.txt > target_intervals.txt

# echo;echo Create a Picard Interval List Header 
# samtools view  –H $out_prefix.bam > $out_prefix.bam_header.txt 



echo;echo Create a Picard Bait Interval List Body 
cat $capture_bed | 
gawk '{print $1 "\t" $2+1 "\t" $3 "\t+\tinterval_" NR}' > bait_body.txt
echo;echo Concatenate to Create a Picard Bait  Interval List  
cat $out_prefix.bam_header.txt bait_body.txt > bait_intervals.txt 



# The level(s) at which to accumulate metrics
echo;echo picard CollectAlignmentSummaryMetrics
$java_run/picard/CollectAlignmentSummaryMetrics.jar \
METRIC_ACCUMULATION_LEVEL=ALL_READS \
INPUT=$out_prefix.clipped.bam \
OUTPUT=$out_prefix.picard_alignment_metrics.txt \
REFERENCE_SEQUENCE=$ref_genome \
VALIDATION_STRINGENCY=LENIENT



echo;echo picard CalculateHsMetrics
$java_run/picard/CalculateHsMetrics.jar \
BAIT_INTERVALS=bait_intervals.txt \
TARGET_INTERVALS=target_intervals.txt \
INPUT=$out_prefix.clipped.bam \
OUTPUT=$out_prefix.picard_hs_metrics.txt \
METRIC_ACCUMULATION_LEVEL=ALL_READS \
REFERENCE_SEQUENCE=$ref_genome \
VALIDATION_STRINGENCY=LENIENT

echo;echo Estimate Insert Size Distribution  
$java_run/picard/CollectInsertSizeMetrics.jar \
INPUT=$out_prefix.filtered.bam \
OUTPUT=$out_prefix.picard_insert_size_metrics.txt \
HISTOGRAM_FILE=$out_prefix.picard_insert_size_plot.pdf \
VALIDATION_STRINGENCY=LENIENT



echo;echo Pad , Sort and Merge Overlapping and Book-Ended  Regions  
bedtools slop  -i $capture_bed -b 100 -g $ref_genome_size | bedtools sort -i - | bedtools merge -i - > padded_capture_target.bed 



# bedtools genomecov  -i $primary_bed -g $ref_genome_size -max 1 | grep -P "genome\t1" | cut -f3 > $out_prefix.tes.txt
# echo;echo Alternatively, a single  gawk command can be used: 
cat $capture_bed | gawk -F'\t' 'BEGIN{SUM=0}{SUM+=$3-$2}END{print SUM}' > $out_prefix.sum.txt

echo;echo Count On-Target Reads
bedtools intersect  -bed -abam $out_prefix.clipped.bam  -b $primary_bed > $out_prefix.intersect.primary.txt
bedtools intersect  -bed -abam $out_prefix.clipped.bam  -b $capture_bed > $out_prefix.intersect.capture.txt

done

echo;echo DepthOfCoverage 1
$java_run/gatk -T DepthOfCoverage \
-R $ref_genome \
-I $out_prefix.clipped.bam \
-o $out_prefix.primary_coverage.txt \
-L $primary_bed \
-ct 1  -ct 10 -ct 20  


echo;echo DepthOfCoverage 2
$java_run/gatk -T DepthOfCoverage \
-R $ref_genome \
-I $out_prefix.clipped.bam 
-o $out_prefix.capture_coverage.txt \
-L $capture_bed \
-ct 1 -ct 10 -ct 20 


echo;echo Determine methylation percentage 
methratio.py  -d $ref_genome \
-s samtools -m 1 -z -i skip \
-o $out_prefix.methylation_results.txt \
$out_prefix.clipped.bam 

echo;echo Determine methylation percentage 
methratio.py  -d $ref_genome \
-s samtools -m 1 -z -i skip \
-c $sample_name \
-o $out_prefix.$sample_name.methylation_results.txt \
$out_prefix.clipped.bam 


echo;echo Base Quality Recalibration 
$java_run/bissnp -R $ref_genome \
-I $out_prefix.clipped.bam \
-T BisulfiteCountCovariates \
-cov ReadGroupCovariate  \
-cov QualityScoreCovariate \
-cov CycleCovariate \
-recalFile $out_prefix.recalFile_before.csv \
-nt 4 \
genome.snps.vcf 
# -knownSites \
 
$java_run/bissnp -R $ref_genome \
-I $out_prefix.clipped.bam \
-o $out_prefix.recal.bam \
-T BisulfiteTableRecalibration \
-recalFile $out_prefix.recalFile_before.csv \
-maxQ 40

echo;echo Combined SNP/methylation calling 
$java_run/bissnp -R $ref_genome \
-I $out_prefix.recal.bam \
-T BisulfiteGenotyper \
-D genome.snps.vcf \
-vfn1 $out_prefix.cpg.raw.vcf  \
-vfn2 $out_prefix.snp.raw.vcf  \
-L $capture_bed \
-stand_call_conf 20 \
-stand_emit_conf 0  \
-mmq 30 \
-mbq 0  \
-nt 4

echo;echo Sort VCF files 
sortByRefAndCor.pl --k 1 --c 2 \
$out_prefix.snp.raw.vcf  $ref_genome.fai > 
$out_prefix.snp.raw.sorted.vcf 

sortByRefAndCor.pl --k 1 --c 2 $out_prefix.cpg.raw.vcf \
$ref_genome.fai > 
$out_prefix.cpg.raw.sorted.vcf 

 
echo;echo Filter SNP/methylation calls  
$java_run/bissnp -R $ref_genome \
-T VCFpostprocess \
-oldVcf $out_prefix.snp.raw.sorted.vcf \
-newVcf $out_prefix.snp.filtered.vcf  \
-snpVcf $out_prefix.snp.raw.sorted.vcf \
-o $out_prefix.snp.filter.summary.txt  
 
$java_run/bissnp -R $ref_genome \
-T VCFpostprocess \
-oldVcf $out_prefix.cpg.raw.sorted.vcf \
-newVcf $out_prefix.cpg.filtered.vcf  \
-snpVcf $out_prefix.snp.raw.sorted.vcf \
-o $out_prefix.cpg.filter.summary.txt

echo;echo Convert VCF to BED file
vcf2bed6plus2.strand.pl  $out_prefix.snp.filtered.vcf
vcf2bed6plus2.strand.pl  $out_prefix.cpg.filtered.vcf

. $cmd_done