#!/BIN/bash

#######
# This software is Copyright 2013 Greg Gloor and is distributed under the 
#    terms of the GNU General Public License.
#
# This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#    along with this program. It should be located in gnu_license.txt
#    If not, see <http://www.gnu.org/licenses/>.
########
#DON'T EVEN THINK OF TRYING TO RUN THIS WITHOUT READING THROUGH THIS INITIAL INFORMATION
#if the program fails, throw the newly created files in the data and analysis directory away
########
##Prior to running workflow.sh you must edit lines 56 and 60 to include your version of uclust/usearch and the location of the Illumina_bin folder

##I assume you will use usearch version 6 or greater, if not you will need to edit lines 200 and 202 as appropriate

#this is the final pipeline
#
# to run it
#
#./workflow.sh name cluster% variable_region
#
##it will print a log of messages to STDOUT

##### MULTIPLE REQUIREMENTS ############
# 1 #
#
#Requires 2 input files:
#data_name/primers.txt -- A file that contains the sequences of the left and right end PCR primers
#               written 5' to 3'
#               Degenerate positions are indicated thusly [ACG]
#				the left primer is on line 1
#				the right primer is on line 2
#
#data_name/samples.txt --A file that contains the meta information
#
# 2 #
#tested on the following versions of uclust
#uclust1.0.50_osxi86_64 
#uclust3.0.617_i86darwin32
#when you install the uclust software modify the following to reflect your version
#
#old version
#uclust=uclust3.0.617_i86darwin32
uclust=usearch4.2.66_i86darwin32
#
#3#
#BIN folder location change this to reflect your bin folder that contains the scripts
BIN="/Volumes/disk1/illumina_bin"
#
#
###### RECOMMENDED DIRECTORY STRUCTURE
#
# IF YOU WANT A DIFFERENT STRUCTURE, YOU MUST EDIT THIS SCRIPT YOURSELF
#
#programs in BIN
#data in data
#analysis in analysis
#reads in reads
#
#### END REQUIREMENTS ########

#GOT TO HERE AND UNDERSTAND THE REQUIREMENTS? GO AHEAD AND RUN IT!

######THE SCRIPT STARTS HERE###########

#command line variables
name=$1 #name to prepend to data and analysis directories
cluster=$2 #cluster percentage
VARREG=$3 #variable region
#################### FIRST GET THE PRIMER AND SEQUENCE TAG INFORMATION
#DATA STORED IN THE FOLLOWING VARIABLES
#Lp, Rp, rRp, VALIDTAGS

# bash check if directory structure is correct
if [ -d $BIN ]; then
	echo "BIN directory exists"
else 
	echo "BIN directory does not exist"
	echo "please try and re-install the analysis pipeline"
fi 
# bash check if directory structure is correct
if [ -d data_$name ]; then
	echo "data directory exists"
else 
	echo "data directory was created"
	echo "you must put the primers and sampls file, here before proceeding"
	mkdir data_$name
	exit 1
fi 

# bash check if directory structure is correct
if [ -d analysis_$name ]; then
	echo "analysis directory exists"
	echo ""
else 
	echo "analysis directory was created"
	echo ""
	mkdir analysis_$name
fi 


#file names for the required data files, many of these will be deleted ultimately
overlapped_startfile=data_$name/start_overlapped_tab.txt
finaltabbedfile=data_$name/overlapped_tab.txt

groups_file=data_$name/groups.txt
reads_in_groups_file=data_$name/reads_in_groups.txt
groups_fa_file=data_$name/groups.fa
c95file=data_$name/results.uc
mappedfile=data_$name/mapped_otu_isu_reads.txt

#if temprimers file does not exist
#create it
if [[ ! -e data_$name/temprimers ]]; then
	
	#first get the primers and assign them to the
	#variables Lp, Rp and rRp
	#reverse the primers and save in a tempfile
	cat data_$name/primers.txt > data_$name/temprimers
	rev < data_$name/primers.txt > data_$name/revtemp
	
	#now get the complement and append to temprimers
	tr "ACGT[]" "TGCA][" < data_$name/revtemp >> data_$name/temprimers
	
	#remove the unnecessary temp file
	rm data_$name/revtemp 
		
	#Declare array 
	declare -a ARRAY
	
	#Open file for reading to array
	#exec attaches a filehandle to the filename
	exec 10<data_$name/temprimers
		let count=0
		
		while read LINE <&10; do
			#echo $LINE $count
			ARRAY[$count]=$LINE
			((count++))
		done
		
		#get the number of elements in the array
		#echo Number of elements: ${#ARRAY[@]}
		Lp=${ARRAY[0]}
		Rp=${ARRAY[1]}
		rRp=${ARRAY[3]}
		# echo array's content
		#echo ${ARRAY[@]}
	# close file 
	exec 10>&-
	
	#remove the unnecessary temp file
	rm data_$name/temprimers 
	
	#get the list of valid tags
	$BIN/make_valid_tags.pl data_$name/samples.txt >  data_$name/valid_pairs.txt
	
	declare -a VALIDTAG
	exec 10<data_$name/valid_pairs.txt
	let count=0
	while read LINE <&10; do
		VALIDTAG[$count]=$LINE
		((count++))
	done
	exec 10>&-
fi

VALIDTAGS=${VALIDTAG[@]}

echo primer 1 is............$Lp
echo primer 2 is............$Rp
echo revcom of primer 2 is..$rRp
echo valid tag pairs are....$VALIDTAGS

######################### DONE GETTING THE PRIMER AND SEQUENCE TAG INFORMATION
echo DONE GETTING THE PRIMER AND SEQUENCE TAG INFORMATION

if [[ -e $finaltabbedfile ]]
	then
	echo "final tabbed file, $finaltabbedfile, already made"
elif [ ! -e $finaltabbedfile ] 
	then
	echo "making starting tabbed file"
	$BIN/fastq_to_tab.pl reads/overlap.fastq $VARREG > $overlapped_startfile
	echo "making $finaltabbedfile"
	$BIN/get_validtags.pl $overlapped_startfile $Lp $rRp "$VALIDTAGS" > $finaltabbedfile
fi

#making the ISU groups
if [[ -e $groups_fa_file ]] 
	then
	echo "final groups already made"
	echo "final dataset already made, data in: $c95file, $mappedfile"
elif [ ! -e data_$name/groups.txt ]
	then
	echo "making ISU groups, data in: groups.txt, reads_in_groups.txt"
	$BIN/group.pl $finaltabbedfile $name 
	echo "making fasta file. data in: groups.fa"
	awk '{print$1 "\n"  $2}' $groups_file > $groups_fa_file
	echo "final groups made. data in: groups.txt, reads_in_groups.txt, groups.fa, moving on to next steps"
fi

if [[ -e $c95file ]] 
	then
	echo "clustered already made, data in: $c95file"
else
	echo "clustering into OTUs at $cluster % ID"
	#uclust command line
	#BIN/$uclust --usersort --input $groups_fa_file --uc $c95file --id $cluster
	#usearch v5 or lower command line
#	$BIN/$uclust --usersort --cluster $groups_fa_file --uc $c95file --id $cluster
	#usearch v6 command line
	$BIN/$uclust --cluster_smallmem $groups_fa_file --uc $c95file --id $cluster
	echo "clustering done data in: $c95file, moving on to next steps"
fi

if [ ! -e $mappedfile ]
	then
	echo "mapping ISU, OTU information back to reads"
	echo ""
	$BIN/map_otu_isu_read.pl $c95file $reads_in_groups_file $finaltabbedfile > $mappedfile
	echo "final dataset made, data in: $mappedfile"
	echo ""
	echo "now cleaning up intermediate files"
	echo "removing:  groups.txt"
	echo "leaving: reads_in_groups.txt, groups.fa, $c95file, $mappedfile $finaltabbedfile $overlapped_startfile"
	rm   $groups_file
fi

if [[ -e $mappedfile ]]
then
	#the program identifies the OTUs or ISUs that are present in any of the samples at over 1% abundance
	#these common OTUs are identified in the table
	CUTOFF=0.1
	echo ""
	echo "attaching read counts to sequence tag pairs with a $CUTOFF % abundance cutoff in any sample"
	$BIN/get_tag_pair_counts.pl $mappedfile $CUTOFF $name
	echo "tag pair read counts in analysis_$name/ISU_tag_mapped.txt and analysis_$name/OTU_tag_mapped.txt"
	echo ""
	echo "to use a different cutoff run the following command:"
	echo "$BIN/get_tag_pair_counts.pl $mappedfile 1"
	echo "and change the 1 to your preferred abundance cutoff" 
	
	echo "getting the seed OTU sequences"
	$BIN/get_seed_otus.pl $c95file $groups_fa_file analysis_$name/OTU_tag_mapped.txt > analysis_$name/OTU_seed_seqs.fa
	
	echo "adding the meta information" 
	$BIN/add_meta.pl data_$name/samples.txt analysis_$name/OTU_tag_mapped.txt > analysis_$name/meta1_OTU_tag_mapped.txt
	awk -F "\t" ' {sub (/ /, "_")} {print $0}' analysis_$name/meta1_OTU_tag_mapped.txt > analysis_$name/meta_OTU_tag_mapped.txt
	rm analysis_$name/meta1_OTU_tag_mapped.txt
fi

echo "end of pipeline.sh"
