#!/bin/bash -e
function info() {
echo Usage: `basename $0` '[-s sample_name -i panel1|panel2 -t -d] r1 r2'
exit 2
}

while getopts  ":p:f:s:t:i:d:" opt; do
	case  $opt  in
		p) out_prefix=$OPTARG;;
		f) suffix=$OPTARG;;
        s) sample_name=$OPTARG;;
        t) run_continue_dir=$OPTARG;;
        i) interval=$OPTARG;;
        d) destination=$OPTARG;;
		*) info;;
	esac
done
shift $(($OPTIND - 1))

if [ $# -lt 2 ]; then info; fi

mnt=mnt.sh
path_abs=path_abs.sh
makedir=makedir.sh
sam2b=sam2b_samtools.sh
filter_bam_gatk=filter_bams_gatk.sh
filter_bam_gatk_suf=filter_gatk
merge_bam=merge_bam.sh
deldup=deldup.sh
summ=summ.sh
summ_gatk=summ_gatk.sh
fastqc='fastqc.sh -t8 -s$sample_name'
fqstat='fqstat.sh -t8 -s$sample_name'
csv2xls=csv2xls.pl
tab2xls=tab2xls.pl
bk='bk.sh -tproject/$destination'


samtools_path=/mnt/ilustre/app/rna/bin #special version needed
java=$tools_path/jre1.6.0_45/bin/java
java_run1="$java $java_mem -jar $tools_path/jars" #specific version needed
# export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar


r1=$(path_abs.sh $1)
r2=$(path_abs.sh $2)


if test -z "$run_continue_dir"; then
run_continue_dir=`$makedir -p$sample_name` && cd $run_continue_dir
else 
cd $run_continue_dir
fi

# ------------- end makedir ------------------

. $var

# $mnt `basename $0`.`basename $run_continue_dir` & # `basename $run_continue_dir` for strip / if exist in dir's end.

$mnt $$ `basename $0 .sh`.`basename $(pwd)` &
mnt_pid=$!


echo;echo $r1 $r2
adapters_file=$tools_path/Trimmomatic-0.36/adapters/TruSeq3-PE.fa
primary_bed=$data_path/intervals/1/primary.bed 
capture_bed=$data_path/intervals/1/capture.bed
ref_genome_size=ref_genome.size.txt

#--------------------------------------------------------

:<<_done

$fqstat $r1 $r2
$fastqc $r1 $r2


${java_run}/trimmomatic PE -threads $threads -phred33 \
$r1 $r2 \
$out_prefix.1.trim.fq \
$out_prefix.1.unpair.fq \
$out_prefix.2.trim.fq \
$out_prefix.2.unpair.fq \
ILLUMINACLIP:${adapters_file}:2:30:10 \
LEADING:20 \
TRAILING:20 \
SLIDINGWINDOW:5:20 \
MINLEN:75 

echo;echo bsmap
bsmap  -r 0 -s 16 -n 1 \
-p $threads \
-a $out_prefix.1.trim.fq \
-b $out_prefix.2.trim.fq \
-d $ref_genome \
-o $out_prefix.sam 
# -m TC

$sam2b -p$out_prefix $out_prefix.sam

echo;echo Add Read Group
$java_run/picard/AddOrReplaceReadGroups.jar \
VALIDATION_STRINGENCY=LENIENT \
INPUT=$out_prefix.sort.bam  \
OUTPUT=$out_prefix.rg.bam  \
CREATE_INDEX=true \
RGID=$sample_name RGSM=$sample_name RGLB=$sample_name RGPL=illumina RGPU=xunit


$filter_bam_gatk $out_prefix.rg.bam
ln -fs $out_prefix.$filter_bam_gatk_suf.bam $out_prefix.bam
ln -fs $out_prefix.$filter_bam_gatk_suf.bai $out_prefix.bai


echo;echo Split BAM
bamtools split -tag ZS -in $out_prefix.bam


$merge_bam -p$out_prefix.top $out_prefix.TAG_ZS_++.bam $out_prefix.TAG_ZS_+-.bam \
&& ln -fs $out_prefix.top.merge.bam $out_prefix.top.sorted.bam

$merge_bam -p$out_prefix.bottom $out_prefix.TAG_ZS_-+.bam $out_prefix.TAG_ZS_--.bam \
&& ln -fs $out_prefix.bottom.merge.bam $out_prefix.bottom.sorted.bam

$deldup -p$out_prefix.top $out_prefix.top.sorted.bam
$deldup -p$out_prefix.bottom $out_prefix.bottom.sorted.bam


$merge_bam -p$out_prefix.deldup $out_prefix.top.deldup.bam $out_prefix.bottom.deldup.bam \
&& ln -fs $out_prefix.deldup.merge.bam $out_prefix.rmdups.bam

echo;echo Filter BAM  
bamtools  filter -isMapped true -isPaired true -isProperPair true -forceCompression  \
-in $out_prefix.rmdups.bam  \
-out $out_prefix.filtered.bam  

echo;echo Filter BAM  
bam clipOverlap --stats --unmapped \
--in $out_prefix.filtered.bam \
--out $out_prefix.clipped0.bam

samtools index $out_prefix.clipped0.bam

$filter_bam_gatk -p$out_prefix.2 $out_prefix.clipped0.bam # clipped0 has whole soft clipped reads
ln -fs $out_prefix.2.$filter_bam_gatk_suf.bam $out_prefix.clipped.bam
ln -fs $out_prefix.2.$filter_bam_gatk_suf.bai $out_prefix.clipped.bai

#------------------ end getting a clipped bam file -------------------------

_done

# Determine methylation percentage using BSMAP
# support samtools 0.19.
# very memory eatting, min 30g
echo;echo Determine methylation percentage 
methratio.py  -d $ref_genome \
-s $samtools_path \
-m 1 \
-i skip \
-o $out_prefix.methylation_results.txt \
$out_prefix.clipped.bam 

# -u, --unique process only unique mappings/pairs
# -r, --remove-duplicate remove duplicated reads.
# -m FOLD, --min-depth=FOLD report loci with sequencing depth>=FOLD. [default: 1]
# -z, --zero-meth  report loci with zero methylation ratios. (depreciated, -z will be always enabled)
# -g, --combine-CpG     combine CpG methylaion ratios on both strands.
# -n, --no-header       don't print a header line
# -i CT_SNP, --ct-snp=CT_SNP how to handle CT SNP ("no-action", "correct", "skip"), default: "correct". The ‘-i’ switch directs the script to ignore positions were there is a possible C ->T SNP.
# -x TYPE, --context=TYPE methylation pattern type [CG|CHG|CHH], multiple types separated by ','. [default: all]

# echo;echo Determine methylation percentage 
# methratio.py  -d $ref_genome \
# -s samtools -m 1 -z -i skip \
# -c $sample_name \
# -o $out_prefix.$sample_name.methylation_results.txt \
# $out_prefix.clipped.bam 

# --------------------end bsmap methylaion percentage -----------------------------




# Determine bisulfite conversion efficiency using BSMAP
echo;echo Base Quality Recalibration first step
$java_run1/bissnp -R $ref_genome \
-I $out_prefix.clipped.bam \
-T BisulfiteCountCovariates \
-cov ReadGroupCovariate \
-cov QualityScoreCovariate \
-cov CycleCovariate \
-recalFile $out_prefix.recalFile_before.csv \
-knownSites $data_path/ncbi/dbsnp/All_20150605.vcf
# -nt 4


echo;echo Base Quality Recalibration second step
$java_run1/bissnp -R $ref_genome \
-I $out_prefix.clipped.bam \
-o $out_prefix.recal.bam \
-T BisulfiteTableRecalibration \
-recalFile $out_prefix.recalFile_before.csv \
-maxQ 40



echo;echo Combined SNP/methylation calling 
$java_run1/bissnp -R $ref_genome \
-I $out_prefix.recal.bam \
-T BisulfiteGenotyper \
-vfn1 $out_prefix.cpg.raw.vcf  \
-vfn2 $out_prefix.snp.raw.vcf  \
-L $capture_bed \
-stand_call_conf 20 \
-stand_emit_conf 0  \
-mmq 30 \
-mbq 0  \
-nt 4
# -D genome.snps.vcf \


## maybe it's no need to sort
# echo;echo Sort VCF files 
# sortByRefAndCor.pl --k 1 --c 2 \
# $out_prefix.snp.raw.vcf \
# $ref_genome.fai \
# > $out_prefix.snp.raw.sorted.vcf


# sortByRefAndCor.pl --k 1 --c 2 \
# $out_prefix.cpg.raw.vcf \
# $ref_genome.fai \
# > $out_prefix.cpg.raw.sorted.vcf

# ln -fs $out_prefix.snp.raw.vcf $out_prefix.snp.raw.sorted.vcf
# ln -fs $out_prefix.cpg.raw.vcf $out_prefix.cpg.raw.sorted.vcf

cat $out_prefix.snp.raw.vcf|vcf-sort -c >$out_prefix.snp.raw.sorted.vcf
cat $out_prefix.cpg.raw.vcf|vcf-sort -c >$out_prefix.cpg.raw.sorted.vcf

 
echo;echo Filter SNP/methylation calls  
$java_run1/bissnp -R $ref_genome \
-T VCFpostprocess \
-oldVcf $out_prefix.snp.raw.sorted.vcf \
-newVcf $out_prefix.snp.filtered.vcf  \
-snpVcf $out_prefix.snp.raw.sorted.vcf \
-o $out_prefix.snp.filter.summary.txt


echo;echo VCFpostprocess
$java_run1/bissnp -R $ref_genome \
-T VCFpostprocess \
-oldVcf $out_prefix.cpg.raw.sorted.vcf \
-newVcf $out_prefix.cpg.filtered.vcf  \
-snpVcf $out_prefix.snp.raw.sorted.vcf \
-o $out_prefix.cpg.filter.summary.txt


echo;echo Convert VCF to BED file
vcf2bed6plus2.pl  $out_prefix.snp.filtered.vcf
vcf2bed6plus2.pl  $out_prefix.cpg.filtered.vcf

# ------------ end bissnp methylaion calling ----------------------------




# --------------- some statistics -------------------------

#-----------------summary-------------------------

$fastqc $out_prefix.recal.bam

echo;echo summ_gatk
$summ_gatk -G -p$out_prefix.primary_coverage.txt $out_prefix.clipped.bam $primary_bed
$summ_gatk -G -p$out_prefix.capture_coverage.txt $out_prefix.clipped.bam $capture_bed


echo;echo Create a Picard Interval List Header
samtools view -H $out_prefix.clipped.bam > $out_prefix.bam_header.txt

cat $out_prefix.bam_header.txt|cut -f2-3|sed -n s/^SN://p |sed  s/LN:// > $ref_genome_size



echo;echo Pad, Sort and Merge Overlapping and Book-Ended Regions  
bedtools slop  -i $capture_bed -b 100 -g $ref_genome_size |\
 bedtools sort -i - | bedtools merge -i - > padded_capture_target.bed 
 
cat $capture_bed | gawk -F'\t' 'BEGIN{SUM=0}{SUM+=$3-$2}END{print SUM}' > $out_prefix.sum.txt

echo;echo Create a Picard Target Interval List Body 
cat $primary_bed | 
awk '{print  $1 "\t" $2+1 "\t" $3 "\t+\tinterval_" NR}' > target_body.txt

echo;echo Concatenate to Create a Picard Target Interval List 
cat $out_prefix.bam_header.txt target_body.txt > target_intervals.txt

echo;echo Create a Picard Bait Interval List Body 
cat $capture_bed | 
gawk '{print $1 "\t" $2+1 "\t" $3 "\t+\tinterval_" NR}' > bait_body.txt

echo;echo Concatenate to Create a Picard Bait Interval List  
cat $out_prefix.bam_header.txt bait_body.txt > bait_intervals.txt 


# picard BedToIntervalList
$java_run/picard/BedToIntervalList.jar \
I=$primary \
O=$primary.interval.list \
SD=$ref_genome.dict \
SORT=true

$java_run/picard/BedToIntervalList.jar \
I=$capture \
O=$capture.interval.list \
SD=$ref_genome.dict \
SORT=true


# The level(s) at which to accumulate metrics
echo;echo picard CollectAlignmentSummaryMetrics
$java_run/picard/CollectAlignmentSummaryMetrics.jar \
METRIC_ACCUMULATION_LEVEL=ALL_READS \
INPUT=$out_prefix.clipped.bam \
OUTPUT=$out_prefix.picard_alignment_metrics.txt \
REFERENCE_SEQUENCE=$ref_genome \
VALIDATION_STRINGENCY=LENIENT


# Hybrid selection is a method that enables selection of specific sequences from a pool of genomic DNA for targeted sequencing analyses via pull-down assays. Typical applications include the selection of exome sequences or pathogen-specific sequences in complex biological samples. Hybrid selection involve the use baits to select desired fragments.

# Briefly, baits are RNA (or sometimes DNA) molecules synthesized with biotinylated nucleotides. The biotinylated nucleotides are ligands for streptavidin enabling enabling RNA:DNA hybrids to be captured in solution. The hybridization targets are sheared genomic DNA fragments, which have been "polished" with synthetic adapters to facilitate PCR cloning downstream. Hybridization of the baits with the denatured targets is followed by selective capture of the RNA:DNA "hybrids" using streptavidin-coated beads via pull-down assays or columns.

# Systematic errors, ultimately leading to sequence bias and incorrect variant calls, can arise at several steps. See the GATK dictionary entries bait bias and pre-adapter artifacts for more details.
# https://software.broadinstitute.org/gatk/guide/article?id=6331

echo;echo picard CollectHsMetrics # CalculateHsMetrics DEPRECATED: Use CollectHsMetrics instead.
$java_run/picard/CollectHsMetrics.jar \
BAIT_INTERVALS=bait_intervals.txt \
TARGET_INTERVALS=target_intervals.txt \
INPUT=$out_prefix.clipped.bam \
OUTPUT=$out_prefix.picard_hs_metrics.txt \
METRIC_ACCUMULATION_LEVEL=ALL_READS \
REFERENCE_SEQUENCE=$ref_genome \
VALIDATION_STRINGENCY=LENIENT

echo;echo Estimate Insert Size Distribution  
$java_run/picard/CollectInsertSizeMetrics.jar \
INPUT=$out_prefix.filtered.bam \
OUTPUT=$out_prefix.picard_insert_size_metrics.txt \
HISTOGRAM_FILE=$out_prefix.picard_insert_size_plot.pdf \
VALIDATION_STRINGENCY=LENIENT


echo;echo Count On-Target Reads
bedtools intersect  -bed -abam $out_prefix.clipped.bam  -b $primary_bed > $out_prefix.intersect.primary.txt
bedtools intersect  -bed -abam $out_prefix.clipped.bam  -b $capture_bed > $out_prefix.intersect.capture.txt

#-----------------------------------
bedtools intersect -abam $out_prefix.filtered.bam -b $primary_bed | samtools view - | wc -l > sample.on-target.txt

bedtools coverage -hist -abam $out_prefix.filtered.bam -b $primary_bed > sample.hist.txt
#-----------------------------------

# time-consuming, move to then end.
# $summ_gatk -G -p$out_prefix.primary_coverage.txt $out_prefix.clipped.bam $primary_bed
# $summ_gatk -G -p$out_prefix.capture_coverage.txt $out_prefix.clipped.bam $capture_bed






for i in $(ls *metrics.txt); do
    cat $i|grep -v '^#'|$tab2xls - $i.xls
done
# $csv2xls *summary x.xls
# $tab2xls some.txt x.xls

# $bk *.pdf *.xls some.txt some.bed
$bk *.pdf *.xls

. $cmd_done