#!/usr/bin/perl -w
use strict;
use DBI;
use DBD::Pg;
use Getopt::Long;
use Digest::MD4;
use FindBin;
use lib ("$FindBin::Bin/..");
use parse_pfam;

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#	Usage

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
my $usage = <<'USAGE';

USAGE:

    panda_omim_from_sprot.pl  --dbname STRING [--verbose] [FILE]

    Mandatory arguments:

         --dbname STRING    the postgreSQL database
    
    Optional arguments:
        
        [--verbose]         dots printed to STDERR every 2000 records
        [--user STRING]     the postgreSQL user name
        [--host STRING]     the postgreSQL database server
        [--password STRING] the postgreSQL database password

        [FILE]              The SwissProt file you wish to process e.g. swissprot.dat
                            This can be specified on the command line or via STDIN 
                            

        [--help]
                            
RESULTS:
    
    STDOUT: The resulting fasta file in NCBI nr-like format with NCBI taxonomy IDs
            

    This script converts SWISSProt data files into FASTA format, retaining
       the taxonomical NCBI ID.

USAGE


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#    Command line options

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options variables
my $user			= 'postgres';
my $password		= '';
my $host			= '';

my $help			= '';
my $dbname			= '';
my $verbose;


#output
GetOptions(	'verbose'		=> \$verbose,
			'user=s'		=> \$user,
			'password'		=> \$password,
			'host=s'		=> \$host,
			'dbname=s'		=> \$dbname,
			'help'	   		=> \$help);

die $usage if ($help);
die "\nERROR:\n\tMissing Arguments [dbname]:\n" .$usage
				unless ($dbname);

my $md4 = new Digest::MD4;

	

	

	

	
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888


#	uniquify

#			removes duplicate entries in an array

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

sub uniquify(\@)
{
	my %hash;
	@hash{@{$_[0]}} = ();
	@{$_[0]} = keys %hash;	
	return @{$_[0]};
}	


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888


#	line_wrap

#			puts a newline every n letters
#			n.b. Do not use for constant parameters eg "$3"

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

sub line_wrap($$)
{
	my @array;
	my $seq = $_[0];
	while (length $seq)
	{
		push @array, substr $seq, 0, $_[1], "";
	}
	return join "\n", @array;
}






#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#	per_record_call_back

#		parse_pfam::parse_records will call this routine once per PFAM record

#		Description
#			A) Process ID AC DE OS and OX fields corresponding to
#				the SwissProtID, gene name, description, Taxa name, and Taxa ID
#			   Print out accession and sequence in fasta format

#			B) Get Pubmed IDs associated with this sequence (via panda numbers).

#			C) Get OMIM codes associated with this sequence (via panda numbers).
#			   Some of these will not be disease codes.
#				i.e. a post-filtering SQL step is needed.

#			D) Get variants codes associated with this sequence (via panda numbers)
#				if this is an OMIM linked sequence.
#			   N.B. We are interested in disease associated sequences (see (C) above)
#				i.e. a post-filtering SQL step is also needed here.

#			


#		Paramters:
#			1) record data: a reference to a hash whose keys are field names
#							 e.g. $pfam_data->{'DE'} is the description field
#							 these are all arrays (ie. $pfam_data->{'DE'}[0..n])
#							 except for $pfam_data->{'SQ'} which contains the sequence
#			2) file position of the record start
#			3) record number (indexed from 1) of the current record
#			4) user defined data. In this case, it is an array of
#								taxid_to_name lookups
#								file handles for pubmed, omim codes, variants

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
sub per_record_call_back($$$$)
{
	#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
	#	FASTA Sequence

	my ($pfam_data, $recstart, $reccnt, $custom_data) = @_;
	my ($taxid_to_name		,
		$obsolete_taxids	,
		$verbose			,
		$FH_fasta			,
		$FH_pubmed			,
		$FH_OMIM_codes		,
		$FH_OMIM_variants	,
		$cnt_pubmed			,
		$cnt_omim			,
		$cnt_variants		,
		$cnt_missense       ) = @$custom_data;

	#print progress dots
	if ($verbose && $reccnt % 2000 == 0)
	{
		print STDERR "\n\t" if (($reccnt / 2000) % 50 == 0);
		print STDERR '.';
	}

	# make sure all required fields are present
	for (qw (DE ID OX AC SQ OS))
	{
		die "\nFatal Error:\n".
			"The field '$_' is missing ".
			"in record # $reccnt beginning on line $recstart\n"
			unless exists $pfam_data->{$_};
	}

	
	# AC
	# accession code (name)
	# only keep the first (primary) accession
	$pfam_data->{'AC'} = $pfam_data->{'AC'}[0];
	$pfam_data->{'AC'} =~ s/;.*//;
	
	
	# DE
	# join description line and remove duplicate spaces
	$pfam_data->{'DE'} = join (' ', @{$pfam_data->{'DE'}});
	$pfam_data->{'DE'} =~ s/ +/ /g;

	
	# ID
	# SP-TrEMBL ID
	die "\nFatal Error:\n".
		"More than one ID line in record #$reccnt beginning on line $recstart\n"
		unless @{$pfam_data->{'ID'}} == 1;
	# remove everything after the first whitespace
	$pfam_data->{'ID'} = $pfam_data->{'ID'}[0];
	$pfam_data->{'ID'} =~ s/[ \t]+.*//;


	# OX
	# NCBI TAX IDs
	$pfam_data->{'OX'} = join ('', @{$pfam_data->{'OX'}});
	unless ($pfam_data->{'OX'} =~ /NCBI_TaxID=\s*(\d+)/)
	{
		warn "\nWarning:\n".
			"The 'OX' line did not begin with 'NCBI_TaxID='\n".
			"in record ($pfam_data->{'AC'}) #$reccnt beginning on line $recstart\n".
			"OX   $pfam_data->{'OX'}\n";
		return;
	}
	$pfam_data->{'OX'} = [ $pfam_data->{'OX'} =~ /(\d+)/g];
	uniquify(@{$pfam_data->{'OX'}});

	# OS
	# taxonomy names, used only as backup if out of sync NCBI Tax IDs used!!
	$pfam_data->{'OS'} = '['. join ('', @{$pfam_data->{'OS'}}).']';



	# create accession line in the NCBI nr style:
	# Duplicate the whole entry for each taxid and concatenate with the ascii \x01 symbol
	my @acc_fragments;
	foreach my $tax_id (@{$pfam_data->{'OX'}})
	{
		my $acc_fragment = 	'SP|' . $pfam_data->{'AC'}.			# accession code
							'|NCBI_TAXID|'.$tax_id.'|'.			# taxid
							$pfam_data->{'ID'}.' '.				# gene name
							$pfam_data->{'DE'};					# description

		# add tax name in brackets corresponding to the taxid
		if (exists $taxid_to_name->{$tax_id})
		{
			$acc_fragment .= '['.$taxid_to_name->{$tax_id}.']'
		}
		else
		{
			$obsolete_taxids->{$tax_id}++;
			$acc_fragment .= $pfam_data->{'OS'};
		}
		push(@acc_fragments, $acc_fragment);
	}
	my $acc_line = '>'. join("\x01", @acc_fragments);

	print $FH_fasta $acc_line,							"\n",	# accession
					line_wrap($pfam_data->{'SQ'}, 60),	"\n";	# sequence
	

	#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
	#	Additional features

	# get pi numbers
	my @digest = unpack("L*", $md4->hash($pfam_data->{'SQ'}));

	#-------------------------------------------------------------------------------------
	# pubmed ID
	# RX
	my @PubMed;
	foreach (@{$pfam_data->{'RX'}})
	{
		push(@PubMed, $1) if(/PubMed=(\d+)/);
	}
	uniquify(@PubMed);
	$$cnt_pubmed += @PubMed;
	print $FH_pubmed join ("\t", @digest, "$_\n") foreach (@PubMed);

	#-------------------------------------------------------------------------------------
	# OMIM codes
	# DR
	my @OMIM_codes;
	foreach (@{$pfam_data->{'DR'}})
	{
		push(@OMIM_codes, $1) if(/MIM;\s*(\d+)/);
	}
	uniquify(@OMIM_codes);
	$$cnt_omim += @OMIM_codes;
	print $FH_OMIM_codes join ("\t", @digest, "$_\n") foreach (@OMIM_codes);

	#-------------------------------------------------------------------------------------
	# OMIM associated missense / missing variants
	# FT
	my @VARIANTS;
	if (@OMIM_codes)
	{
		foreach (@{$pfam_data->{'FT'}})
		{
			# insertions / missense
			if (/^VARIANT +(\d+) +(\d+) +([AC-IK-NP-TVWY]+) *\-> *([AC-IK-NP-TVWY]+)/)
			{
				# make sure the 'from' amino acid matches that in the sequence
				my $aa_from_seq	=	substr($pfam_data->{'SQ'}, $1-1, $2 - $1 +1);
				die "\nFatal Error:\n".
					"The residues [$3] in the 'FT VARIANT' (Feature) field do ".
					"not match those at the corresponding position [$1] of the 'SQ' ".
					"(sequence) field [$aa_from_seq]\n".
					"in record ($pfam_data->{'AC'}) #$reccnt beginning on line $recstart\n".
					"[$_]\n$pfam_data->{'SQ'}\n"
					unless $aa_from_seq eq $3;
				
				$$cnt_missense++ if (length($3) == 1 and length($4) ==1);
				$$cnt_variants++;
				
				print $FH_OMIM_variants	join ("\t", @digest),
										"\t".
										"$1\t",
										($1 == $2 and length($4)==1) ? 0 : $2,"\t",
										"$3\t".
										"$4\n";
			
			}
			# deletions
			elsif (/^VARIANT +(\d+) +(\d+) +MISSING/)
			{
				my $aa_from_seq	=	substr($pfam_data->{'SQ'}, $1-1, $2 - $1+1);

				$$cnt_variants++;
				print $FH_OMIM_variants	join ("\t", @digest),
										"\t".
										"$1\t".
										"$2\t".
										"$aa_from_seq\t".
										"\n",
			}
		}
	}# end OMIM variants
}



#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#	Open files

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

my $sprot_input = $ARGV[0];
open (SPROT, "$sprot_input") or die "Could not open the file $sprot_input\n$!";
open (TAXID_NAMES,  "$taxid_input") or die "Could not open the file $taxid_input\n$!";

open (FASTA, ">$fasta_output") or die "Could not open the file $fasta_output\n$!";
open (PUBMED, ">$pubmed_output") or die "Could not open the file $pubmed_output\n$!";
open (OMIM, ">$omim_output") or die "Could not open the file $omim_output\n$!";
open (VARIANTS, ">$variants_output") or die "Could not open the file $variants_output\n$!";
# if this is windows, make sure no \r\n. I.e. just \n
binmode FASTA;
binmode PUBMED;
binmode OMIM;
binmode VARIANTS;


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#	map NCBI taxonomy IDs to names

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
my %taxid_to_names;
print STDERR "\tReading taxonomy names...\n\t[" if ($verbose);
my $cnt_tax_names;
while (<TAXID_NAMES>)
{
	#2	|	Bacteria	|		|	scientific name	|
	next unless /(\d+)		\t\|\t
				 ([^\t]+)	\t\|\t
				 ([^\t]*)	\t\|\t
				 scientific\sname/x;

	if ($verbose and ++$cnt_tax_names % 2000 == 0)
	{
		print STDERR "\n\t" if (($cnt_tax_names / 2000) % 50 == 0);
		print STDERR '.';
	}
	$taxid_to_names{$1} = $2;
}
print STDERR "]\n\t$cnt_tax_names taxonomy names read.\n" if ($verbose);


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#	main

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

print STDERR "\tParsing Swiss Prot file...\n\t[" if ($verbose);

my $fh_SwissProt = *STDIN;
if (@ARGV)
{
	open SP_FILE $ARGV[0] or die "Error\n\tCould not open $ARGV[0]\n$@\n";
	$fh_SwissProt = *SPFILE;
}

my $cnt_pubmed		= 0;
my $reccnt = parse_records(	$fh_SwissProt, 					# swiss prot file handle
							@{[qw (ID OX AC DE OS FT DR RX)]},# desired sprot fields
							\&per_record_call_back,			# call back function
							[	$verbose,
								\$cnt_pubmed,
								);				# ..

# swiss prot seems to use taxids which are no longer present in NCBI!
print STDERR "]\n\t$reccnt\tpfam records were parsed.\n" if ($verbose);

# print summary
if ($verbose)
{
	print STDERR "\t", $cnt_pubmed, "\tpubmed IDs parsed.\n";
	
}
print STDERR "\n";


