#-----------------------------------------------------------#
# By Keunsoo Kang @ Dankook University (kangk1204@gmail.com)
# Version 1.0.0
# Generate a list of sra files
#-----------------------------------------------------------#


#------- Search Keyword example for mouse ChIP-seq ---------#
# (mouse[organism]) AND ("Genome binding/occupancy profiling by high throughput sequencing"[DataSet Type])
# http://www.ncbi.nlm.nih.gov/gds/?term=%28mouse[organism]%29+AND+%28%22Genome+binding%2Foccupancy+profiling+by+high+throughput+sequencing%22[DataSet+Type]%29

use strict;
use warnings;
use Path::Class;


# Genome assembly defined
my %SPE_DB = (
	"Mus musculus" => "mm10",
	"Homo sapiens" => "hg19",
	"Drosophila melanogaster" => "dm3",
);

# Directories
my $SRA_DIR = "00_SRA_files\/";	# All .sra files will be stored
my $INFO_DIR = "01_SRAinfo_files\/";	# Information files related to .sra files will be stored
my $LIST_DIR = "02_LIST_dir\/";	# List output will be stored
my $HOMER_DIR = "03_Subread_HOMER_dir\/";	# Final bigwig output by HOMER will be stored
my $INDEL_DIR = "04_Subread_INDEL_dir\/";	# Final INDEL output by subread will be stored

# Commands
my $WGET_CMD_HEAD = "wget ";
my $WGET_CMD_TAIL = " -c -o ";	# -P DIRECTORY
my $CURL_CMD = "curl ";
my $ASPERA_CMD = "~/.aspera/connect/bin/./ascp -i ~/.aspera/connect/etc/asperaweb_id_dsa.openssh -k 1 -Tr -l500m anonftp\@ftp-private.ncbi.nlm.nih.gov:";

# Output list file and database
my $LIST_OUT_FILE;	# List output
my %COMP_DB;	#	Previously completed bigwig files
my %SRA_DB;	#	SRA file information
my %GSM_DB;	#	SRA to GSM information



#-------------------------------------[ Main started ]-------------------------------------#
PREPARATION();	# Check the directories and prepare files
CHECK_PROCESSED_HOMER();	# Check the processed files by HOMER (.bigWig)
PARSING_LIST();	# Extract information about SRP (study)
WGET_SRAINFO();	# Extract information about SRA (sample)
UPDATE_LIST();	# Update the list




#---------------------------------------[ Functions ]--------------------------------------#
sub PREPARATION{
	my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime(time);
	my $a_year = (sprintf "%4d", $year+1900);
	my $a_mon = (sprintf "%02d", $mon+1);
	my $a_mday = (sprintf "%02d", $mday);
	my $a_hour = (sprintf "%02d", $hour);
	my $a_min = (sprintf "%02d", $min);
	my $a_sec = (sprintf "%02d", $sec);

	unless(-d $SRA_DIR){
		my $mkdir = "mkdir -p $SRA_DIR";
		system $mkdir;
	}

	unless(-d $INFO_DIR){
		my $mkdir = "mkdir -p $INFO_DIR";
		system $mkdir;
	}

	unless(-d $LIST_DIR){
		my $mkdir = "mkdir -p $LIST_DIR";
		system $mkdir;
	}

	unless(-d $HOMER_DIR){
		my $mkdir = "mkdir -p $HOMER_DIR";
		system $mkdir;
	}

	unless(-d $INDEL_DIR){
		my $mkdir = "mkdir -p $INDEL_DIR";
		system $mkdir;
	}	

	$LIST_OUT_FILE = $a_year."_".$a_mon."_".$a_mday."_".$a_hour."_".$a_min."_".$a_sec."\_completed_list.octo";	

	# Grab all the .octo files and change the name
	my @octo_list;

	dir('.')->recurse(callback => sub{ 
		my $file = shift;
		if($file =~ /\.octo$/){
			my $name = $file->absolute->stringify;

			my $cmd = "mv $name $name\.old";
			system $cmd;
		}
	});	
}

sub CHECK_PROCESSED_HOMER{
	# Grab all the bigwig files
	my @octo_list;

	dir('.')->recurse(callback => sub{ 
		my $file = shift;
		if($file =~ /\_homer\.ucsc\.bigWig$/){
			my $name = $file->absolute->stringify;

			next if(-z $name);
	
			my @tmp = split/\//, $name;

			$COMP_DB{$tmp[$#tmp]} = 1;	# GSM id

			print "# Already_processed: $tmp[$#tmp]\n";
		}
	});	
}

sub PARSING_LIST{
	my @list_files;
	my @unique_desc;
	my @ftp_link;
	my @srp_id;
	my @series_id;
	my @time_order;

	# Grab the gds_result_list files (mainly in the 0_SRA_files directory)
	dir('.')->recurse(callback => sub{ 
		my $file = shift;
		if($file =~ /\.list$/){
			push @list_files, $file->absolute->stringify;
		}
	});	

	for my $f(0..$#list_files){
		my $match = 0;
		open(FO, $list_files[$f]) or die $!;
		while(<FO>){
			chomp;

			if($_ =~ /^[1234567890]+\./ ){
				$match = 0;
				my @tmp = split/^[1234567890]+\./, $_;
				my $desc = $tmp[1];
				my @tttt = split/\./, $_;

				my $isIt = 0;
				for my $h(0..$#unique_desc){
					if($desc eq $unique_desc[$h]){
						$isIt = 1;
						last;
					}
				}

				unless($isIt){
					push(@unique_desc, $desc);
					push(@time_order, $tttt[0]);
					$match = 1;
				}
			}elsif( ($_ =~ /^FTP download\:/) && ($match)){	# @FTP_LINK

				if($_ =~/SRA/g){
					my @info = split/\s+/, $_;
					my $srpid = $info[$#info-1];
					my $ftp_link = $info[$#info];
				
					push( @srp_id, $srpid );
					push( @ftp_link, $ftp_link );					
				}else{	# No NGS data but microarray
					push( @srp_id, "na");
					push( @ftp_link, "na");
				}
			}elsif( ($_ =~ /^Series/) && ($match)){	# @SERIES_ID
				my @info = split/\t/, $_;
				my @tmp = split/\s+/, $info[2];

				my $gse_id = $tmp[1];
				
				push( @series_id, $gse_id );
			}
		}
		close(FO);
	}

	# Only take the NGS data available
	@unique_desc = reverse @unique_desc;
	@ftp_link = reverse @ftp_link;
	@srp_id = reverse @srp_id;
	@series_id = reverse @series_id;
	@time_order = reverse @time_order;


	for my $i(0..$#unique_desc){
		unless($ftp_link[$i] eq "na"){
			my $desc = $unique_desc[$i];
			my $ftp = $ftp_link[$i];
			my $srp = $srp_id[$i];
			my $id = $series_id[$i];
			my $time = $time_order[$i];
			my @uid = split/GSE/, $id;
	
			$SRA_DB{$uid[1]}{desc} = $desc;
			$SRA_DB{$uid[1]}{ftp} = $ftp;
			$SRA_DB{$uid[1]}{srp} = $srp;
			$SRA_DB{$uid[1]}{gse} = $id;
			$SRA_DB{$uid[1]}{time} = $time;
		}
	}
}

sub WGET_SRAINFO{
	while(my $u_id = each(%SRA_DB)){
		my $desc = $SRA_DB{$u_id}{desc};
		my $ftp = $SRA_DB{$u_id}{ftp};
		my $srp = $SRA_DB{$u_id}{srp};
		my $gse = $SRA_DB{$u_id}{gse};
		my $time = $SRA_DB{$u_id}{time};

		unless(-d $INFO_DIR.$srp."\/"){
			my $mkdir = "mkdir ".$INFO_DIR.$srp."\/";
			system $mkdir;
		}

		timestamp();
		print "# $gse $srp $time data set..\n";

		# Get sra file name for the SRP
		my $curl_get_srp_info_cmd = $CURL_CMD.$ftp." > curl_tmp.txt";
		system $curl_get_srp_info_cmd;

		open(FO, "curl_tmp.txt") or die $!;
		while(<FO>){
			chomp;

			my @tmp = split/\s+/, $_;

			# Check see if the info file exists already!
			if( !(-e $INFO_DIR.$srp."\/".$tmp[$#tmp]."\.info") or (-z $INFO_DIR.$srp."\/".$tmp[$#tmp]."\.info")){
				my $get_cmd = "wget -O ".$INFO_DIR.$srp."\/".$tmp[$#tmp]."\.info"." \'http\:\/\/trace\.ncbi\.nlm\.nih\.gov\/Traces\/sra\/sra\.cgi\?save\=efetch\&db\=sra\&rettype\=runinfo\&term\=$tmp[$#tmp]\'";

				system $get_cmd;
			}

			# Extract information for each SRR file
			extract_srr_info($INFO_DIR.$srp."\/".$tmp[$#tmp]."\.info", $tmp[$#tmp], $ftp, $gse, $time, $desc);
		}
		close(FO);
	}
}

sub extract_srr_info{
	my $file = shift;
	my $srr = shift;
	my $ftp = shift;
	my $gse = shift;
	my $time = shift;
	my $desc = shift;

	open(FE, $file) or die $!;

	timestamp();		
	print "# Openning $file\n";

	my $begin = 1;
	my %file_info;
		
	while(<FE>){
		chomp;

		if($begin){				
			my @data = split/\,/, $_;
				
			for my $a(0..$#data){
				if($data[$a]  =~ /LibraryStrategy/i){
					$file_info{"LibraryStrategy"} = $a;
				}elsif($data[$a] =~ /LibraryLayout/i){
					$file_info{"LibraryLayout"} = $a;
				}elsif($data[$a] =~ /Platform/i){
					$file_info{"Platform"} = $a;
				}elsif($data[$a] =~ /SRAStudy/i){
					$file_info{"SRAStudy"} = $a;
				}elsif($data[$a] =~ /ScientificName/i){
					$file_info{"ScientificName"} = $a;
				}elsif($data[$a] =~ /Consent/i){
					$file_info{"Consent"} = $a;
				}elsif($data[$a] =~ /LibrarySource/i){
					$file_info{"LibrarySource"} = $a;
				}elsif($data[$a] =~ /SampleName/i){
					$file_info{"SampleName"} = $a;
				}elsif($data[$a] =~ /BioProject/i){
					$file_info{"BioProject"} = $a;
				}elsif($data[$a] =~ /Study_Pubmed_id/i){
					$file_info{"PMID"} = $a;
				}elsif($data[$a] =~ /^Run$/i){
					$file_info{"Srr"} = $a;
				}
			}				
			$begin = 0;
			next;
		}

		my @data = split/\,/, $_;
			
		# Several cutoff parameters
		last unless(exists $file_info{"Consent"} && ($data[$file_info{"Consent"}] =~ /public/gi));
		last unless(exists $file_info{"Platform"} && ($data[$file_info{"Platform"}] =~ /ILLUMINA/gi));
			
		my $u_label = join("", (split/\_/, $data[$file_info{"SampleName"}]));	# GSM_id mostly

		# Already downloaded and processed to bigWig
		my $processed = 0;
		while(my $bigwig_file = each(%COMP_DB)){
			if($bigwig_file =~ /\_$u_label\_/gi){
				$processed = 1;
				last;
			}
		}
		
		next if($processed);
		
		my @tmptmp = split/\./, $srr;
		my $srr_id_name = $tmptmp[0];

		next unless($data[$file_info{"Srr"}] =~ /$srr_id_name/gi);
						
		if(exists $file_info{"LibraryStrategy"}){
			$GSM_DB{$u_label}{"whichseq"} = $data[$file_info{"LibraryStrategy"}];	# ChIP-Seq or RNA-seq or etc..
		}else{
			$GSM_DB{$u_label}{"whichseq"} = "na";
		}
			
		if(exists $file_info{"LibraryLayout"}){
			$GSM_DB{$u_label}{"singlepaired"} = $data[$file_info{"LibraryLayout"}];	# Single-end or Paired-end
		}else{
			$GSM_DB{$u_label}{"singlepaired"} = "na";
		}
			
		if(exists $file_info{"SRAStudy"}){
			$GSM_DB{$u_label}{"srp"} = $data[$file_info{"SRAStudy"}];	# SRP id
		}else{
			$GSM_DB{$u_label}{"srp"} = "na";
		}
			
		if(exists $file_info{"ScientificName"}){
			$GSM_DB{$u_label}{"species"} = $data[$file_info{"ScientificName"}];	# Species
		}else{
			$GSM_DB{$u_label}{"species"} = "na";
		}			

		if(exists $file_info{"Study_Pubmed_id"}){
			$GSM_DB{$u_label}{"pmid"} = $data[$file_info{"Study_Pubmed_id"}];	# PMID
		}else{
			$GSM_DB{$u_label}{"pmid"} = "na";
		}

		push( @{$GSM_DB{$u_label}{"srr"}}, $srr );	# Some GSM contains multiple srr files
		push( @{$GSM_DB{$u_label}{"ftp"}}, $ftp );	# Some GSM contains multiple srr files
		$GSM_DB{$u_label}{"gse"} = $gse;
		$GSM_DB{$u_label}{"time"} = $time;
		$GSM_DB{$u_label}{"desc"} = $desc;
	}
	close(FE);
}

sub UPDATE_LIST{
	open(FOUT, ">".$LIST_DIR.$LIST_OUT_FILE) or die $!;
	while(my $gsm_id = each(%GSM_DB)){
		my $sra_files = join(",", @{$GSM_DB{$gsm_id}{"srr"}});
		my $ftp_link = join(",", @{$GSM_DB{$gsm_id}{"ftp"}});

		my $seq = $GSM_DB{$gsm_id}{"whichseq"};
		my $single_pair = $GSM_DB{$gsm_id}{"singlepaired"};
		my $srp = $GSM_DB{$gsm_id}{"srp"};
		my $spe = $GSM_DB{$gsm_id}{"species"};
		my $pmid = $GSM_DB{$gsm_id}{"pmid"};
		my $gse = $GSM_DB{$gsm_id}{"gse"};
		my $time = $GSM_DB{$gsm_id}{"time"};
		my $desc = $GSM_DB{$gsm_id}{"desc"};

		my $genome = "unknown";

		if(defined $SPE_DB{$spe}){
			$genome = $SPE_DB{$spe};
		}

		print FOUT "$time\t$gse\t$gsm_id\t$srp\t$genome\t$seq\t$single_pair\t$pmid\t$sra_files\t$ftp_link\t$desc\n";
	}
	close(FOUT);
}

#-------------------------------------[ MISC functions ]-------------------------------------#
sub timestamp{
	my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime(time);
	printf "%4d-%02d-%02d %02d:%02d:%02d ", $year+1900,$mon+1,$mday,$hour,$min,$sec;
}

sub uniqueElements {
	my ($item, %seen, @result);
	foreach $item (@_) {
		push(@result, $item) unless $seen{$item}++;
	}
	
	return @result;
}
