#!/bin/bash -e
function info() {
echo Usage: `basename $0` '[-st] r1 r2'
exit 2
}

while getopts  ":p:f:s:t:" opt; do
	case  $opt  in
		p) out_prefix=$OPTARG;;
		f) suffix=$OPTARG;;
        s) sample_name=$OPTARG;;
        t) run_continue_dir=$OPTARG;;
		*) info;;
	esac
done
shift $(($OPTIND - 1))

if [ $# -lt 2 ]; then info; fi



# makedir=${makedir:=T}

. $var

mnt=mnt.sh
path_abs=path_abs.sh
makedir=makedir.sh
sam2b=sam2b_samtools.sh
filter_bam_gatk=filter_bams_gatk.sh
filter_bam_gatk_suf=filter_gatk
merge_bam=merge_bam.sh
deldup=deldup.sh
summ=summ.sh
summ_gatk=summ_gatk.sh

samtools_path=/mnt/ilustre/app/rna/bin
java=$tools_path/jre1.6.0_45/bin/java
java_run1="$java $java_mem -jar $tools_path/jars"
# export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar


r1=$(path_abs.sh $1)
r2=$(path_abs.sh $2)


if test -z "$run_continue_dir"; then
run_continue_dir=`$makedir -p$sample_name` && cd $run_continue_dir
else 
cd $run_continue_dir
fi

# $mnt `basename $0`.`basename $run_continue_dir` & # `basename $run_continue_dir` for strip / if exist in dir's end.

$mnt $$ `basename $0 .sh`.`basename $(pwd)` &


echo;echo $r1 $r2
adapters_file=$tools_path/Trimmomatic-0.36/adapters/TruSeq3-PE.fa
primary_bed=$data_path/intervals/1/primary.bed 
capture_bed=$data_path/intervals/1/capture.bed
ref_genome_size=ref_genome.size.txt

#--------------------------------------------------------

:<<run_done

${java_run}/trimmomatic PE -threads $threads -phred33 \
$r1 $r2 \
$out_prefix.1.trim.fq \
$out_prefix.1.unpair.fq \
$out_prefix.2.trim.fq \
$out_prefix.2.unpair.fq \
ILLUMINACLIP:${adapters_file}:2:30:10 \
LEADING:20 \
TRAILING:20 \
SLIDINGWINDOW:5:20 \
MINLEN:75 

echo;echo bsmap
bsmap  -r 0 -s 16 -n 1 \
-p $threads \
-a $out_prefix.1.trim.fq \
-b $out_prefix.2.trim.fq \
-d $ref_genome \
-o $out_prefix.sam 
# -m TC

$sam2b -p$out_prefix $out_prefix.sam

echo;echo Add Read Group
$java_run/picard/AddOrReplaceReadGroups.jar \
VALIDATION_STRINGENCY=LENIENT \
INPUT=$out_prefix.sort.bam  \
OUTPUT=$out_prefix.rg.bam  \
CREATE_INDEX=true \
RGID=$sample_name RGSM=$sample_name RGLB=$sample_name RGPL=illumina RGPU=xunit


$filter_bam_gatk $out_prefix.rg.bam
ln -fs $out_prefix.$filter_bam_gatk_suf.bam $out_prefix.bam
ln -fs $out_prefix.$filter_bam_gatk_suf.bai $out_prefix.bai


echo;echo Split BAM
bamtools split -tag ZS -in $out_prefix.bam


$merge_bam -p$out_prefix.top $out_prefix.TAG_ZS_++.bam $out_prefix.TAG_ZS_+-.bam \
&& ln -fs $out_prefix.top.merge.bam $out_prefix.top.sorted.bam

$merge_bam -p$out_prefix.bottom $out_prefix.TAG_ZS_-+.bam $out_prefix.TAG_ZS_--.bam \
&& ln -fs $out_prefix.bottom.merge.bam $out_prefix.bottom.sorted.bam

$deldup -p$out_prefix.top $out_prefix.top.sorted.bam
$deldup -p$out_prefix.bottom $out_prefix.bottom.sorted.bam


$merge_bam -p$out_prefix.deldup $out_prefix.top.deldup.bam $out_prefix.bottom.deldup.bam \
&& ln -fs $out_prefix.deldup.merge.bam $out_prefix.rmdups.bam

echo;echo Filter BAM  
bamtools  filter -isMapped true -isPaired true -isProperPair true -forceCompression  \
-in $out_prefix.rmdups.bam  \
-out $out_prefix.filtered.bam  

echo;echo Filter BAM  
bam clipOverlap --stats \
--in $out_prefix.filtered.bam \
--out $out_prefix.clipped0.bam

samtools index $out_prefix.clipped0.bam

$filter_bam_gatk -p$out_prefix.2 $out_prefix.clipped0.bam # clipped0 has whole soft clipped reads
ln -fs $out_prefix.2.$filter_bam_gatk_suf.bam $out_prefix.clipped.bam
ln -fs $out_prefix.2.$filter_bam_gatk_suf.bai $out_prefix.clipped.bai


echo;echo Create a Picard Interval List Header
samtools view -H $out_prefix.clipped.bam > $out_prefix.bam_header.txt

cat $out_prefix.bam_header.txt|cut -f2-3|sed -n s/^SN://p |sed  s/LN:// > $ref_genome_size



echo;echo Pad , Sort and Merge Overlapping and Book-Ended  Regions  
bedtools slop  -i $capture_bed -b 100 -g $ref_genome_size |\
 bedtools sort -i - | bedtools merge -i - > padded_capture_target.bed 
 
cat $capture_bed | gawk -F'\t' 'BEGIN{SUM=0}{SUM+=$3-$2}END{print SUM}' > $out_prefix.sum.txt

echo;echo Create a Picard Target Interval List Body 
cat $primary_bed | 
awk '{print  $1 "\t" $2+1 "\t" $3 "\t+\tinterval_" NR}' > target_body.txt

echo;echo Concatenate to Create a Picard Target Interval List 
cat $out_prefix.bam_header.txt target_body.txt > target_intervals.txt

echo;echo Create a Picard Bait Interval List Body 
cat $capture_bed | 
gawk '{print $1 "\t" $2+1 "\t" $3 "\t+\tinterval_" NR}' > bait_body.txt

echo;echo Concatenate to Create a Picard Bait  Interval List  
cat $out_prefix.bam_header.txt bait_body.txt > bait_intervals.txt 


# The level(s) at which to accumulate metrics
echo;echo picard CollectAlignmentSummaryMetrics
$java_run/picard/CollectAlignmentSummaryMetrics.jar \
METRIC_ACCUMULATION_LEVEL=ALL_READS \
INPUT=$out_prefix.clipped.bam \
OUTPUT=$out_prefix.picard_alignment_metrics.txt \
REFERENCE_SEQUENCE=$ref_genome \
VALIDATION_STRINGENCY=LENIENT


echo;echo picard CalculateHsMetrics
$java_run/picard/CalculateHsMetrics.jar \
BAIT_INTERVALS=bait_intervals.txt \
TARGET_INTERVALS=target_intervals.txt \
INPUT=$out_prefix.clipped.bam \
OUTPUT=$out_prefix.picard_hs_metrics.txt \
METRIC_ACCUMULATION_LEVEL=ALL_READS \
REFERENCE_SEQUENCE=$ref_genome \
VALIDATION_STRINGENCY=LENIENT

echo;echo Estimate Insert Size Distribution  
$java_run/picard/CollectInsertSizeMetrics.jar \
INPUT=$out_prefix.filtered.bam \
OUTPUT=$out_prefix.picard_insert_size_metrics.txt \
HISTOGRAM_FILE=$out_prefix.picard_insert_size_plot.pdf \
VALIDATION_STRINGENCY=LENIENT


echo;echo Count On-Target Reads
bedtools intersect  -bed -abam $out_prefix.clipped.bam  -b $primary_bed > $out_prefix.intersect.primary.txt
bedtools intersect  -bed -abam $out_prefix.clipped.bam  -b $capture_bed > $out_prefix.intersect.capture.txt

#-----------------------------------
bedtools intersect -abam $out_prefix.filtered.bam -b $primary_bed | samtools view - | wc -l > sample.on-target.txt

bedtools coverage -hist -abam $out_prefix.filtered.bam -b $primary_bed > sample.hist.txt
#-----------------------------------



# echo;echo DepthOfCoverage 1
# $java_run/gatk -T DepthOfCoverage \
# -R $ref_genome \
# -I $out_prefix.clipped.bam \
# -o $out_prefix.primary_coverage.txt \
# -L $primary_bed \
# -ct 1  -ct 10 -ct 20  



# echo;echo DepthOfCoverage 2
# $java_run/gatk -T DepthOfCoverage \
# -R $ref_genome \
# -I $out_prefix.clipped.bam 
# -o $out_prefix.capture_coverage.txt \
# -L $capture_bed \
# -ct 1 -ct 10 -ct 20 



# $summ -g -p$out_prefix.primary $out_prefix.clipped.bam $primary_bed
# $summ -g -p$out_prefix.capture $out_prefix.clipped.bam $capture_bed

# time-consuming, move to then end.
# $summ_gatk -G -p$out_prefix.primary_coverage.txt $out_prefix.clipped.bam $primary_bed
# $summ_gatk -G -p$out_prefix.capture_coverage.txt $out_prefix.clipped.bam $capture_bed



# Determine methylation percentage using BSMAP
# support samtools 0.19.
# very memory eatting, min 30g
echo;echo Determine methylation percentage 
methratio.py  -d $ref_genome \
-s $samtools_path \
-m 1 -z -i skip \
-o $out_prefix.methylation_results.txt \
$out_prefix.clipped.bam 

# echo;echo Determine methylation percentage 
# methratio.py  -d $ref_genome \
# -s samtools -m 1 -z -i skip \
# -c $sample_name \
# -o $out_prefix.$sample_name.methylation_results.txt \
# $out_prefix.clipped.bam 


# Determine bisulfite conversion efficiency using BSMAP
echo;echo Base Quality Recalibration first step
$java_run1/bissnp -R $ref_genome \
-I $out_prefix.clipped.bam \
-T BisulfiteCountCovariates \
-cov ReadGroupCovariate \
-cov QualityScoreCovariate \
-cov CycleCovariate \
-recalFile $out_prefix.recalFile_before.csv \
-knownSites $data_path/ncbi/dbsnp/All_20150605.vcf
# -nt 4


echo;echo Base Quality Recalibration second step
$java_run1/bissnp -R $ref_genome \
-I $out_prefix.clipped.bam \
-o $out_prefix.recal.bam \
-T BisulfiteTableRecalibration \
-recalFile $out_prefix.recalFile_before.csv \
-maxQ 40



echo;echo Combined SNP/methylation calling 
$java_run1/bissnp -R $ref_genome \
-I $out_prefix.recal.bam \
-T BisulfiteGenotyper \
-vfn1 $out_prefix.cpg.raw.vcf  \
-vfn2 $out_prefix.snp.raw.vcf  \
-L $capture_bed \
-stand_call_conf 20 \
-stand_emit_conf 0  \
-mmq 30 \
-mbq 0  \
-nt 4
# -D genome.snps.vcf \


## maybe it's no need to sort
## sortByRefAndCor.pl makes mistake by inverse the order of header and context.
# echo;echo Sort VCF files 
# sortByRefAndCor.pl --k 1 --c 2 \
# $out_prefix.snp.raw.vcf \
# $ref_genome.fai \
# > $out_prefix.snp.raw.sorted.vcf


# sortByRefAndCor.pl --k 1 --c 2 \
# $out_prefix.cpg.raw.vcf \
# $ref_genome.fai \
# > $out_prefix.cpg.raw.sorted.vcf
# ln -fs $out_prefix.snp.raw.vcf $out_prefix.snp.raw.sorted.vcf
# ln -fs $out_prefix.cpg.raw.vcf $out_prefix.cpg.raw.sorted.vcf

run_done

cat $out_prefix.snp.raw.vcf|vcf-sort -c >$out_prefix.snp.raw.sorted.vcf
cat $out_prefix.cpg.raw.vcf|vcf-sort -c >$out_prefix.cpg.raw.sorted.vcf

echo;echo Filter SNP/methylation calls  
$java_run1/bissnp -R $ref_genome \
-T VCFpostprocess \
-oldVcf $out_prefix.snp.raw.sorted.vcf \
-newVcf $out_prefix.snp.filtered.vcf  \
-snpVcf $out_prefix.snp.raw.sorted.vcf \
-o $out_prefix.snp.filter.summary.txt


echo;echo VCFpostprocess
$java_run1/bissnp -R $ref_genome \
-T VCFpostprocess \
-oldVcf $out_prefix.cpg.raw.sorted.vcf \
-newVcf $out_prefix.cpg.filtered.vcf  \
-snpVcf $out_prefix.snp.raw.sorted.vcf \
-o $out_prefix.cpg.filter.summary.txt


echo;echo Convert VCF to BED file
vcf2bed6plus2.pl  $out_prefix.snp.filtered.vcf
vcf2bed6plus2.pl  $out_prefix.cpg.filtered.vcf

# echo;echo summ_gatk
# $summ_gatk -G -p$out_prefix.primary_coverage.txt $out_prefix.clipped.bam $primary_bed
# $summ_gatk -G -p$out_prefix.capture_coverage.txt $out_prefix.clipped.bam $capture_bed

. $cmd_done