// parse_FA_sequences.cpp
//

#define _MY_STL_MACROS
#include <sstream>
#include <string>
#include <iostream>
#include <algorithm>
#include <boost/regex.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/lexical_cast.hpp>
#include <assert.h>
#include "components/stlhelper.h"
#include "components/perlutil.h"
#include "parse_FA_sequences.h"
using std::stringstream;
using std::string;
using std::cerr;
using std::vector;
using std::cout;
using std::ostream;
using std::find;
using std::transform;
using std::pair;
using boost::tie;
using boost::tuple;
using boost::make_tuple;
using boost::lexical_cast;
using std::equal_range;

//	GRETA code
/*
#include "components/greta/regexpr2.h"
typedef regex::match_results::backref_type regex_bref;
*/


//*****************************************************************************
// for converting sequences to upper case and discarding everything else
//*****************************************************************************
namespace{
	char char_buf [256];
	bool bool_buf [256];

	char* Initcharbuf()
	{
		for (unsigned i = 0; i < 256; ++i)
			char_buf[i] = (char)i;

		for (char c = 'a'; c <= 'z'; ++c)
			char_buf[(int)c] = (char)(c - 'a' + 'A');
		return char_buf;
	}
	bool* Initboolbuf()
	{
		for (unsigned i = 0; i < 256; ++i)
			bool_buf[i] = false;

		for (char c = 'a'; c <= 'z'; ++c)
			bool_buf[(int)c] = true;
		for (char c = 'A'; c <= 'Z'; ++c)
			bool_buf[(int)c] = true;
		return bool_buf;
	}
}


char* do_tolower::pbuf = Initcharbuf();
bool* if_is_ascii::pbuf = Initboolbuf();





//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

//	parse_taxa

//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
bool parse_taxa(const std::string&	acc,
				const std::vector<t_digest_taxid>& hash_to_taxid,
				const std::vector<t_gi_taxid>& gi_to_taxid,
				std::string&		taxid)
{
	// get taxid
	// blow up if no default and no taxid found
	// get accession code
	static const boost::regex  taxid_regex("\\|NCBI_TAXID\\|(\\d+)");
	boost::match_results<std::string::const_iterator> taxid_match;
	if (regex_search(beg_to_end(acc), taxid_match, taxid_regex, boost::match_default))
	{
		taxid = taxid_match[1];
		return true;
	}


	// match gi
	static const boost::regex  gi_regex("gi\\|(\\d+)");
	boost::match_results<std::string::const_iterator> gi_match;
	if (regex_search(beg_to_end(acc), gi_match, gi_regex, boost::match_default))
	{
		string gi (gi_match[1]);
		t_gi_taxid test (lexical_cast<unsigned>(gi), 0);
		// look for taxid corresponding to gi
		vector<t_gi_taxid>::const_iterator find_beg, end;
		boost::tie(find_beg, end) = equal_range(beg_to_end(gi_to_taxid), test);
		if (find_beg != end)
		{
			taxid = lexical_cast<string>(find_beg->taxid);
			return true;
		}

	}


	// match square brackets
	static const boost::regex  taxon_regex("\\[([^\\[\\]]+)\\]");
	boost::match_results<std::string::const_iterator> taxon_match;
	if (regex_search(beg_to_end(acc), taxon_match, taxon_regex, boost::match_default))
	{
		// get md4 digest
		t_digest digest;
		md4_hash(taxon_match[1], digest);
		t_digest_taxid test(digest);

		// look for taxid corresponding to md4
		vector<t_digest_taxid>::const_iterator find_beg, end;
		boost::tie(find_beg, end) = equal_range(beg_to_end(hash_to_taxid), test);
		if (find_beg != end)
		{
			taxid = lexical_cast<string>(find_beg->id);
			return true;
		}
	}

	return false;

}

bool parse_acc_data(const std::string&	acc,
					const std::string&	acc_code_regex_str,
					std::string&		acc_code,
					std::string&		acc_name)
{
	// get accession code
	static const boost::regex  acc_code_regex(acc_code_regex_str);
	boost::match_results<std::string::const_iterator> acc_code_match;
	if (regex_search(beg_to_end(acc), acc_code_match,
						acc_code_regex, boost::match_default))
		acc_code = acc_code_match[1];
	if (!acc_code.length())
		return false;


	// get accession name starting from right-most |
	// empty string if cannot find match
	std::string::const_iterator acc_name_beg = find(acc.rbegin(), acc.rend(), '|').base();
	static const boost::regex  acc_name_regex("^([A-Z0-9]+_[A-Z0-9]+)");
	boost::match_results<std::string::const_iterator> acc_name_match;
	if (regex_search(acc_name_beg, acc.end(), acc_name_match, acc_name_regex,
							boost::match_default))
		acc_name = acc_name_match[1];

	return true;
}



//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//	parse_secIDs()

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool parse_secIDs(const string& acc_line, vector<string>& vec_secids)
{
	// match pir ids
	{
		static boost::regex pir_regex("pir\\|\\|([^ |]+) ");
		boost::match_results<std::string::const_iterator> pir_match;
		if (regex_search(beg_to_end(acc_line), pir_match, pir_regex, boost::match_default))
		{
			vec_secids.push_back(string("pir\t") + string(pir_match[1]));
		}
	}

	// match everything else
	bool conformant_sec_ids = true;
	{
		// ignore NCBI tax ids
		const string NCBI_str = "NCBI_TAXID";

		// stop at first space
		std::string::const_iterator parse_end = find(beg_to_end(acc_line), ' ');

		boost::match_flag_type match_flags = boost::match_default;
		static boost::regex sec_ids_regex("([\\-A-Z_0-9]+)\\|(\\-A-Z_0-9]+)");
		boost::match_results<std::string::const_iterator> sec_ids_match;
		std::string::const_iterator start = acc_line.begin();
		while (regex_search(start, parse_end, sec_ids_match, sec_ids_regex, match_flags))
		{
			start = sec_ids_match[0].second;
			match_flags |= boost::match_prev_avail;
			match_flags |= boost::match_not_bob;

			string db_code(sec_ids_match[1]);

			if (db_code == NCBI_str)
				continue;

			// make sure data is not too long
			if (db_code.length() > 10)
			{
				conformant_sec_ids = false;
				db_code.erase(10);
			}

			string sec_id(sec_ids_match[1]);

			// make sure data is not too long
			if (sec_id.length() > 30)
			{
				sec_id.erase(30);
				conformant_sec_ids = false;
			}

			vec_secids.push_back(db_code + "\t" + sec_id);
		}
	}
	// make sure no duplicates
	sort(beg_to_end(vec_secids));
	remove_duplicates(vec_secids);

	return conformant_sec_ids;


}




//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

//		Callback used for parsing each sequence

//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
bool t_process_sequence::operator()(	string& accession,
										string& sequence,
										unsigned acc_filepos,
										unsigned seq_filepos)
{
	// print progress dots
	++dots;

	// get line counts for error messages
	unsigned curr_acc_beg_line = acc_beg_line;
	acc_beg_line += std::count(beg_to_end(sequence), '\n') + 1;

	unsigned seq_len_on_file = sequence.length();

	// remove invalid characters
	prepare_sequence(sequence);

	// bad sequence!!!
	if (!sequence.length() || ! accession.length())
	{
		filepos_zero_length_sequence = curr_acc_beg_line;
		++count_zero_length_sequences;
		return true;
	}

	t_digest seq_hash;
	digest_message(sequence, seq_hash);

	// break up accessions
	const string delimiter(1, '\x1');
	vector<pair<string::iterator, string::iterator> > accession_frags;
	perl_split (beg_to_end(accession), delimiter, accession_frags, true);

	// make sure acc_acchash_accid_current is sorted when parsing first sequence
	assert(dots.value() >  1 ||
			stlhelper::is_sorted(beg_to_end(acc_acchash_accid_current)));

	// for each calculate hash
	for (unsigned i = 0; i < accession_frags.size(); ++i)
	{
		t_hash_id acc_hash;
		calculate_acc_hash(accession_frags[i].first, accession_frags[i].second,
							seq_hash, db_origin, acc_hash);
		vector<t_hash_id>::iterator find_beg, find_end;
		boost::tie(find_beg, find_end) = equal_range(beg_to_end(acc_acchash_accid_current),
														acc_hash);

		// save accession hashes in the fasta file not yet in panda
		if (find_beg == find_end)
		{
			unsigned acc_offset = accession_frags[i].first - accession.begin();
			short acc_len = (short)(accession_frags[i].second - accession_frags[i].first);
			// instead of pi, save pointer into seq_seqhash_pi_pos_new
			acc_seqid_pos_extra.push_back(t_id_pos(	seq_seqhash_pi_pos_new.size(),
													acc_offset + acc_filepos,
													acc_len));
		}

		// mark panda accession hashes which match those in the fasta file
		else
		{
			find_beg->id = (t_id)(-1);
		}
	}

	// save pi length and file position for this sequence for later retrieval
	seq_seqhash_pi_pos_new.push_back(	t_hash_id_pos(	seq_hash,
														0,		// uninitialized
                                                        seq_filepos,
														seq_len_on_file) );


	return true;
}
