// panda.cpp : Defines the entry point for the consolUe application.
//

#define SEQ_BATCH_SIZE 10000

#include <string>
#include <set>
#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <iterator>
#include <assert.h>
#include <vector>
#include <ostream>
#include <ios>
#include <bits/localefwd.h>	// For class locale

#include <boost/smart_ptr.hpp>
#include <boost/lambda/lambda.hpp>
#include <boost/tuple/tuple.hpp>

#define _MY_STL_MACROS
#include "components/stlhelper.h"
#include "components/set_mark_diff.h"
#include "components/bcpp_parsefasta.h"
#include "components/perlutil.h"
#include "components/portable_timer.h"
#include "components/progress_indicator.h"
#include "components/count_letters_in_file.h"
#include "components/pg_db_cursor.h"
#include "components/pg_copy_guard.h"


#include "panda.h"
#include "transfer_data_to_postgres.h"
#include "parse_FA_sequences.h"
#include "sql_commands.h"
#include "remove_dependent_orphans.h"
//#include <botan/md4.h>

using std::set;
using std::less;
using std::string;
using std::ostream;
using std::cerr;
using std::ifstream;
using std::back_inserter;
using std::ofstream;
using std::vector;
using std::deque;
using std::ostringstream;
using std::sort;
using std::find;
using std::copy;
using std::cout;
using std::adjacent_find;
using std::set;
using std::remove_if;
using std::getline;
using std::make_pair;
using std::equal_to;
using namespace boost::lambda;


#include "components/pg_db_cursor.h"
#include "components/libpq++/libpq++.h"
#include "get_arg.h"
#if _MSC_VER > 1000
// nasty hack for VC++ non-compliance with standard C++ for loop declaration scope
#define for if (0); else for
#endif

#define THRESHOLD_RECREATE_INDEX 300
//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

//	Description and enums for the statistics and durations from different elements
//		of the entire process

//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
char* Results_Times_Descriptions[] ={
	"Counting sequences ('>'s) from the FASTA file.",				//  0
    "Retrieving current accessions hashes from Panda.",				//  1
	"Parsing accessions and sequences from the FASTA file.",		//  2
	"Importing PIs for obsolete accessions into Panda.",            //  3
	"Archiving and removing obsolete accessions from Panda.",       //  4
	"Importing new accessions into PostgreSQL.",					//  5
    "PostgreSQL recreating database indices for the accessions.",	//  6
	"Importing new secondary IDs into PostgreSQL.",					//  7
    "PostgreSQL recreating database indices for the secondary IDs.",//  8
	"Retrieving from Panda the relationships for gi & TaxIDs.",     //  9
	"Retrieving from Panda the relationships for tax names & IDs.", // 10
	"Getting all sequences PIs currently in Panda.",                // 11
	"Comparing newly parsed and existing sequences PIs.",			// 12
	"Checking for PI hash collisions.",                             // 13
	"Importing sequences into PostgreSQL.",							// 14
	"PostgreSQL recreating indices for the 'sequences' table.",		// 15
	"Retrieve all unprocessed novel PIs from Panda.",               // 16
	"Add new PIs to the list of unprocessed sequences in Panda.",   // 17
	"Exporting PIs of all accessions from Panda.",					// 18
	"Comparing accession and sequence PIs.",						// 19
	"Removing obsolete sequences in Panda.",						// 20
    "Removing orphaned entries linked to obsolete sequences.",		// 21
    "PostgreSQL re-collecting database statistics.",				// 22
    "Total Time.",													// 23
    "END"															// 24
};

enum eRESULTS_TIMES
{
	TIME_CNT_SEQ,													//  0
	TIME_PG_IMPORT_ACC_HASH,                                        //  1
	TIME_PARSE_ACC,													//  2
	TIME_PG_IMPORT_OBSOLETE_ACC_PI,                                 //  3
	TIME_PG_REMOVE_OBSOLETE_ACC,									//  4
	TIME_PG_ADD_ACC,												//  5
	TIME_PG_RECREATE_ACC_INDICES,                                   //  6
	TIME_PG_ADD_SECIDS,                                             //  7
	TIME_PG_RECREATE_SECIDS_INDICES,                                //  8
	TIME_PG_IMPORT_GI_TAXID_LOOKUP,                                 //  9
	TIME_PG_IMPORT_TAXNAME_TAXID_LOOKUP,                            // 10
	TIME_PG_IMPORT_PANDA_SEQ_PI,                                    // 11
	TIME_COMPARE_SEQ_PI,											// 12
	TIME_CHECK_PI_COLLISIONS,                                       // 13
	TIME_PG_ADD_SEQ,												// 14
	TIME_PG_RECREATE_SEQ_INDICES,									// 15
	TIME_PG_EXPORT_NEW_SEQ_PI,                                      // 16
	TIME_PG_ADD_NEW_SEQ_PI,                                         // 17
	TIME_PG_EXPORT_ACC_PI,											// 18
	TIME_COMPARE_ACC_SEQ_PI,										// 19
	TIME_PG_REMOVE_ORPHANED_SEQ,									// 20
	TIME_PG_SYNC_PI_DEPENDENTS,										// 21
	TIME_PG_VACUUM,													// 22
	TIME_TOTAL,														// 23
	TIME_END														// 24
};


unsigned SAVED_TIMES[] =
{
	TIME_CNT_SEQ,													//  0
	TIME_PG_IMPORT_ACC_HASH,                                        //  1
	TIME_PARSE_ACC,													//  2
	TIME_PG_IMPORT_OBSOLETE_ACC_PI,                                 //  3
	TIME_PG_REMOVE_OBSOLETE_ACC,									//  4
	TIME_PG_ADD_ACC,												//  5
	TIME_PG_RECREATE_ACC_INDICES,                                   //  6
	TIME_PG_ADD_SECIDS,                                             //  7
	TIME_PG_RECREATE_SECIDS_INDICES,                                //  8
	TIME_PG_IMPORT_PANDA_SEQ_PI,                                    //  9
	TIME_COMPARE_SEQ_PI,											// 10
	TIME_CHECK_PI_COLLISIONS,                                       // 11
	TIME_PG_ADD_SEQ,												// 12
	TIME_PG_RECREATE_SEQ_INDICES,									// 13
	TIME_PG_EXPORT_NEW_SEQ_PI,                                      // 14
	TIME_PG_ADD_NEW_SEQ_PI,                                         // 15
	TIME_PG_EXPORT_ACC_PI,											// 16
	TIME_COMPARE_ACC_SEQ_PI,										// 17
	TIME_PG_REMOVE_ORPHANED_SEQ,									// 18
	TIME_PG_SYNC_PI_DEPENDENTS,										// 19
	TIME_PG_VACUUM,													// 20
	TIME_TOTAL														// 21
};



char* Results_Counts_Descriptions[] ={
    "sequences parsed from the FASTA file.",						//  0
	"sequences without any taxa specified.",						//  1
    "sequences without a matching accession code.",					//  2
    "duplicate sequences in the FASTA file.",						//  3
	"matching accessions contained in the PostgreSQL database.",	//  4
    "obsolete accessions removed from the PostgreSQL database.",	//  5
    "new accessions added to the PostgreSQL database.",				//  6
    "new secondary IDs added to the PostgreSQL database.",			//  7
	"sequences contained in the PostgreSQL database.",				//  8
    "obsolete sequences removed from the PostgreSQL database.",		//  9
	"new sequences added to the PostgreSQL database.",				// 10
	"END"															// 11
};																	
enum eRESULTS_COUNTS
{
	CNT_FASTA_SEQ_PARSED,											//  0
	CNT_FASTA_ACC_SANS_TAXA,                                        //  1
	CNT_FASTA_ACC_SANS_CODES,                                       //  2
	CNT_FASTA_PI_DUPLICATES,	                                    //  3
	CNT_ACC_SAME_ORIGIN_IN_PANDA,                                   //  4
	CNT_ACC_OBSOLETE,                                               //  5
	CNT_ACC_ADDITIONAL,			                                    //  6
	CNT_SECIDS_ADDITIONAL,                                          //  7
	CNT_SEQ_IN_PANDA,                                               //  8
	CNT_SEQ_OBSOLETE,                                               //  9
	CNT_SEQ_ADDITIONAL,                                             // 10
	CNT_END						                                    // 11
};


//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

//	t_program_results

//		statistics from and the duration of each element of the process

//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
struct t_program_results
{
	float		times [TIME_END];
	unsigned	counts [CNT_END];
	t_program_results()
	{
		memset(times, 0, sizeof (float) * TIME_END);
		memset(counts, 0, sizeof (unsigned) * CNT_END);
	}
	string GetCSV() const
	{
		ostringstream os;

		// write out counts with descriptions
		for (unsigned i = 0; i < CNT_END; ++i)
			os << counts[i] << ", ";

		// timing info printed out at +/- 0.1 s
		os << std::setprecision(1) << std::fixed;
			
		// write out times with descriptions
		
		// first value
		os << times[SAVED_TIMES[0]];
		
		for (unsigned i = 1; i < sizeof(SAVED_TIMES) / sizeof(SAVED_TIMES[0]); ++i)
			os << "," << times[SAVED_TIMES[i]];
		
		return(os.str());
	}
};

template <typename ostrm>
ostrm& operator << (ostrm& os, const t_program_results& results)
{
	assert (!strcmp(Results_Counts_Descriptions[CNT_END], "END"));
	assert (!strcmp(Results_Times_Descriptions[TIME_END], "END"));

	// write out counts with descriptions
	for (unsigned i = 0; i < CNT_END; ++i)
		os << '\t'	<< results.counts[i] << "\t "
					<< Results_Counts_Descriptions[i] << "\n";

	// timing info printed out at +/- 0.1 s
	os << std::setprecision(1) << std::fixed;
		
	// write out times with descriptions	
	for (unsigned i = 0; i < TIME_END; ++i)
		if (results.times[i] > 4.0)
			os	<< '\t' << results.times[i] << "s\t: "
				<< Results_Times_Descriptions[i] << "\n";
	os << "\n";

	// make sure worked
	assert(os.good());

	return os;
}





//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

//	Current_operation

//		statistics from and the duration of each element of the process

//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
namespace
{
	
	class Current_operation
	{
		eRESULTS_TIMES		op_id;
		ostream&			VERBOSE;
		t_program_results&	results;
		t_portable_timer	timer;
	public:
		Current_operation (eRESULTS_TIMES op_id_, ostream&	VERBOSE_,
										t_program_results& results_):
			op_id (op_id_), VERBOSE(VERBOSE_), results(results_)
		{
			assert((unsigned)op_id < (unsigned)(sizeof Results_Times_Descriptions / sizeof(char*)));
			VERBOSE << '\t' << Results_Times_Descriptions[op_id] << ".." << std::endl;
		}
		
		~Current_operation()
		{
			results.times[op_id] = timer.reset();
		}
		
	};
}



//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

//	t_auto_log_results

//		writes programme statistics to the error log automatically on
//			premature programme exit

//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
struct t_auto_log_results
{
	t_program_results&	results;
	ostream&			CLOG;
	bool				do_log;
	t_auto_log_results(t_program_results& r, ostream& l):
								results(r), CLOG(l), do_log(true) {}
	~t_auto_log_results(){if (do_log) CLOG << results; }
	void abandon_log(){ do_log = false;	}
};









//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//		count_accession_lines

//			count number of '>' in file
//			assume this it the number of sequences
//			to allow better memory management (vector allocates exponentially)

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
boost::tuple<unsigned, unsigned, bool>
count_accession_lines(ifstream& fasta_file, t_program_args& args, t_program_results& results)
{
	Current_operation op(TIME_CNT_SEQ, args.VERBOSE(), results);
	//                   ^^^^^^^^^^^^
	
	std::vector<unsigned> counts =
			count_letters_in_file(	fasta_file, "\001>\r",
									t_progress_indicator(args.VERBOSE(), 80));
	return boost::make_tuple(counts[0] + counts[1], counts[1], (counts[2] == 0));
}



//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//	retrieve_accession_hashes_from_panda

//		get hashes for accessions

//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
bool retrieve_accession_hashes_from_panda(	t_pg_cursor&		pg_db,
											vector<t_hash_id>&	acc_acchash_accid_current,
											unsigned 			acc_cnt_estimate,
											t_program_args&		args,
											t_program_results&	results)
{
	//____________________________________________________________________________________
	// 		Getting accessions from PostgreSQL.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_IMPORT_ACC_HASH, args.VERBOSE(), results);

		// create binary cursor
		string SQL_cmd = "SELECT acc_hash[1], acc_hash[2], acc_hash[3], acc_hash[4], "
							"accid FROM accessions "
							"WHERE db_origin ='" +
							args.db_origin + "'";
		if (!pg_db.Declare(SQL_cmd, true))
			return pg_print_sql_err("create a binary cursor to retrieve accession PIs",
										pg_db, SQL_cmd, args.CERR().strm());
		
		
		unsigned acc_batch_size = acc_cnt_estimate / 50 + 1;

		// get data in batches
		deque<t_hash_id> t_acchash_accid;
		t_progress_indicator dots(args.VERBOSE(), 1);
		
		vector<unsigned> hash(4);
		while (pg_db.Fetch(acc_batch_size))
		{
			int curr_batch_size = pg_db.Tuples();
			if (!curr_batch_size)
				break;
			dots++;
			for (int i = 0; i < curr_batch_size; ++i)
				t_acchash_accid.push_back(
					t_hash_id(
						t_digest(	pg_db.getvalue<unsigned>(i, 0),
									pg_db.getvalue<unsigned>(i, 1),
									pg_db.getvalue<unsigned>(i, 2),
									pg_db.getvalue<unsigned>(i, 3)),
						pg_db.getvalue<t_id>(i, 4)));
		}

		// close cursor
		if (!pg_db.Close())
			return pg_print_sql_err("close a binary cursor for retrieving accession PIs",
										pg_db, "CLOSE panda_cursor", args.CERR().strm());
	
		// get rid of excess memory
		sort(beg_to_end(t_acchash_accid));
		remove_duplicates(t_acchash_accid);
		acc_acchash_accid_current.clear();
		acc_acchash_accid_current.reserve(t_acchash_accid.size());
		copy(	beg_to_end(t_acchash_accid),
				back_inserter(acc_acchash_accid_current));
	}
	
	results.counts[CNT_ACC_SAME_ORIGIN_IN_PANDA] = acc_acchash_accid_current.size();
	return true;
}





//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
//		parse_sequences_in_fasta

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool parse_sequences_in_fasta(	t_pg_cursor& 			pg_db,

								//[in]
								vector<t_hash_id>&		acc_acchash_accid_current,
								unsigned				seq_cnt_estimate,

								//[out]
								deque<t_id_pos>&		acc_seqid_pos_extra,
								vector<t_hash_id_pos>&	seq_seqhash_pi_pos_new,
								vector<t_id>&			acc_accid_obsolete,
								
								t_program_args& 		args,
								t_program_results& 		results)
{
	Current_operation op(TIME_PARSE_ACC, args.VERBOSE(), results);
	//                   ^^^^^^^^^^^^^^
	
	// clear end of file condition and start from beginning
	args.fasta_db.clear();
	args.fasta_db.seekg(0);
	assert(args.fasta_db.good());
	
	
	// pass callback to BioCPP:: fasta library function
	dual_streams CERR = args.CERR();
	t_process_sequence new_db_data(	args.verbose, args.db_origin,
									acc_acchash_accid_current,
									t_progress_indicator(args.VERBOSE(),
															seq_cnt_estimate / 50 + 1),
									CERR.strm());
	// pre-allocate memory for sequences
	new_db_data.seq_seqhash_pi_pos_new.reserve(seq_cnt_estimate + 100);
	
	// parse sequences
	BioCPP::Process_FA_Sequences_tellg(args.fasta_db, new_db_data);
	new_db_data.dots.finish();

	
	//____________________________________________________________________________________
	// 		collect parsing statistics
	//____________________________________________________________________________________
	// print out diagnostic if any sequences have zero lengths
	if (new_db_data.count_zero_length_sequences)
		args.CERR().strm()	<<	"\nWARNING!!\n\t"
							<<		new_db_data.count_zero_length_sequences
							<<		" accesssions with !!!zero!!! length\t"
									"sequences including one starting on line # "
							<< 		new_db_data.filepos_zero_length_sequence << "\n\n";
	
	// make sure that some sequences were parsed
	if (!new_db_data.seq_seqhash_pi_pos_new.size())
	{
		args.CERR().strm()
					<< "\nERROR!!\n\t"
						"No sequences could be parsed from the sequence database in \n\t"
						<< args.fasta_path << "\n\n";
		return false;
	}
	
	results.counts[CNT_FASTA_SEQ_PARSED]	 = new_db_data.seq_seqhash_pi_pos_new.size();

	
	
	//____________________________________________________________________________________
	// 		Return data, using memory very conservatively
	//____________________________________________________________________________________
	
	// Obsolete accessions

	// ignore accid for accessions
	//	which are common to the current panda data and the fasta file
	acc_acchash_accid_current.erase(	remove_if(	beg_to_end(acc_acchash_accid_current),
    		                                        op_empty_accid()	
													),
										acc_acchash_accid_current.end());
	// save accid
	acc_accid_obsolete.reserve(acc_acchash_accid_current.size());
	transform(	beg_to_end(acc_acchash_accid_current),
				back_inserter(acc_accid_obsolete), op_get_id());
	vector_release_mem(acc_acchash_accid_current);
	

	// Extra accessions to be added to panda
	// seqid is the index of the associated sequence in seq_seqhash_pi_pos_new
	acc_seqid_pos_extra = new_db_data.acc_seqid_pos_extra;

	
	// Sequences PI / lengths

	seq_seqhash_pi_pos_new = new_db_data.seq_seqhash_pi_pos_new;


	
	return true;
}




//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
//		remove_obsolete_accessions_from_panda

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool remove_obsolete_accessions_from_panda(	t_pg_cursor& 			pg_db,
											vector<t_id>&			acc_accid_obsolete,
											t_program_args& 		args,
											t_program_results& 		results)
{
	// no data
	if (!acc_accid_obsolete.size())
		return true;

	// the accessions and secondary IDs are added in order
	// so might as well delete the same way
	sort(beg_to_end(acc_accid_obsolete));
	remove_duplicates(acc_accid_obsolete);

	results.counts[CNT_ACC_OBSOLETE]	= acc_accid_obsolete.size();
	
	
	//____________________________________________________________________________________
	// 		Create temp tables for obsolete accessions
	//____________________________________________________________________________________
	args.VERBOSE() << "\t[";
	if (!ExecutePandaSQL(pg_db, eCREATE_OBSOLETE_ACC_TABLES, args.VERBOSE(),
												args.CERR().strm()))
		return false;
	args.VERBOSE() << "]\n";
	

	//____________________________________________________________________________________
	// 		Exporting accids for obsolete accessions into Panda.
	//____________________________________________________________________________________
	dual_streams CERR = args.CERR();
	{
		Current_operation op(TIME_PG_IMPORT_OBSOLETE_ACC_PI, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
		t_pg_copy_guard copy_buffer(pg_db, "obsolete_accid", CERR.strm());
		if (!copy_buffer)
			return false;

		t_progress_indicator dots(args.VERBOSE(), acc_accid_obsolete.size() / 50 + 1,
									acc_accid_obsolete.size());
		for(; dots.iterating(); ++dots)
		{
			ultoa(acc_accid_obsolete[dots.value()], copy_buffer.buf) += '\n';
			copy_buffer.cached_write();
		}
		if (!copy_buffer.end_copy())
			return false;
	}
	

	//____________________________________________________________________________________
	// 		Archiving and removing obsolete accessions from Panda.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_REMOVE_OBSOLETE_ACC, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^
		// sql analyze in preparation for deletion must take place outside of a
		// transaction which is fine since all we have done use temp tables etc.
		pg_db.Commit();
		args.VERBOSE() << "\t[";
		if (!ExecutePandaSQL(pg_db, eANALYSE_OBSOLETE_ACC, args.VERBOSE(),
											args.CERR().strm()))
			return false;
		// transaction begins here
		pg_db.BeginTransaction();
		if (!ExecutePandaSQL(pg_db, eREMOVE_OBSOLETE_ACC, args.VERBOSE(),
											args.CERR().strm()))
			return false;
		args.VERBOSE() << "]\n";
	}
	return true;
}



//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//	convert_seqhash_to_pi_from_panda

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
struct digest_to_unsigned
{
	unsigned operator()(const t_digest& digest) const
	{	return digest.trunc_to32();	}
};
bool convert_seqhash_to_pi_from_panda(	t_pg_cursor&		   	pg_db,
										// [in / out]
										vector<t_hash_id_pos>&	seq_seqhash_pi_pos_new,
										// [out]
										deque<t_id>&			all_pis,
										t_program_args&			args,
										t_program_results&		results)
{
	//____________________________________________________________________________________
	// 		Getting sequences PIs from PostgreSQL.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_IMPORT_PANDA_SEQ_PI, args.VERBOSE(), results);

		assert(stlhelper::is_sorted(beg_to_end(seq_seqhash_pi_pos_new)));

		// create binary cursor
		string SQL_cmd = "SELECT seq_hash[1], seq_hash[2], seq_hash[3], seq_hash[4], "
							"pi FROM sequences";
		if (!pg_db.Declare(SQL_cmd, true))
			return pg_print_sql_err("create a binary cursor to retrieve sequence PIs",
										pg_db, SQL_cmd, args.CERR().strm());
		
		
		// get data in batches
		t_progress_indicator dots(args.VERBOSE(), 1);
		unsigned seq_batch_size = 30000;
		vector<unsigned> hash;
		while (pg_db.Fetch(seq_batch_size))
		{
			int curr_batch_size = pg_db.Tuples();
			if (!curr_batch_size)
				break;
			dots++;

			for (int i = 0; i < curr_batch_size; ++i)
			{
				t_digest	seq_hash(	pg_db.getvalue<unsigned>(i, 0),
										pg_db.getvalue<unsigned>(i, 1),
										pg_db.getvalue<unsigned>(i, 2),
										pg_db.getvalue<unsigned>(i, 3));
				t_id		pi		( pg_db.getvalue<unsigned>(i, 4));

				all_pis.push_back(pi);

				
				vector<t_hash_id_pos>::iterator find_beg, find_end;
				boost::tie(find_beg, find_end) =
								equal_range(beg_to_end(seq_seqhash_pi_pos_new),
														seq_hash, op_cmp_digest());

				// save pis for matching sequences
				while (find_beg != find_end)
				{
					// This sequence hash is already in panda, no need to update:
					// just use for linking accessions
					assert(t_digest(*find_beg) == seq_hash);
					find_beg->len = 0;
					find_beg->id = pi;
					++find_beg;
				}
				

				/*
				vector<t_hash_id_pos>::iterator find_beg, find_end;
				boost::tie(find_beg, find_end) =
								equal_range(beg_to_end(seq_seqhash_pi_pos_new),
														seq_hash, op_cmp_digest());
				// save pis for matching sequences
				while (find_beg != find_end)
				{
					// This sequence hash is already in panda, no need to update:
					// just use for linking accessions
					assert(t_digest(*find_beg) == seq_hash);
					find_beg->len = 0;
					find_beg->id = pi;
					++find_beg;
				}
				*/
			}
	
		}
	
		// close cursor
		if (!pg_db.Close())
			return pg_print_sql_err("close a binary cursor for retrieving sequence PIs",
										pg_db, "CLOSE panda_cursor", args.CERR().strm());
	}
	results.counts[CNT_SEQ_IN_PANDA] = all_pis.size();
	return true;
}

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//	add_pi_for_novel_sequences

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool add_pi_for_novel_sequences(	t_pg_cursor&				pg_db,
									vector<t_hash_id>&			duplicate_pis,
									vector<t_hash_id_pos>&		seq_seqhash_pi_pos_new,
									deque<t_id>					all_pis,
									t_program_args&				args,
									t_program_results&			results)
{
	// sequences are added in order of increasing pis.
	// This should help performance indexed on pi
	assert(stlhelper::is_sorted(beg_to_end(seq_seqhash_pi_pos_new),
				op_cmp_filepos<t_hash_id_pos>()));
	assert(stlhelper::is_sorted(beg_to_end(duplicate_pis), op_cmp_digest()));

	t_id current_pi;
	if (!get_pgsql_sequence_currval(pg_db, "s_pi", current_pi, args.CERR().strm()))
		return false;

	vector<t_hash_id_pos>::iterator seq_iter = seq_seqhash_pi_pos_new.begin();
	vector<t_hash_id_pos>::iterator seq_end	 = seq_seqhash_pi_pos_new.end();
	
	
	// go through all seqhashes looking for those with unassigned pis (i.e. ==0 )
	while (seq_iter != seq_end)
	{
		if (!seq_iter->id)
		{
			// check if this sequence is duplicated
			vector<t_hash_id>::iterator find_beg, find_end;
			boost::tie(find_beg, find_end) =
					equal_range(beg_to_end(duplicate_pis), *seq_iter, op_cmp_digest());
			if (find_beg == find_end)
			{
				// not duplicated, just assign next pi
				seq_iter->id = ++current_pi;
				all_pis.push_back(current_pi);
			}
			else
			{
				// duplicated but pi not assigned, just assign next pi
				if (!find_beg->id)
				{
					find_beg->id = seq_iter->id = ++current_pi;
					all_pis.push_back(current_pi);
				}
				
				// pi already assigned to duplicate, use the same pi
				else
					seq_iter->id = find_beg->id;
			}

		}
		++seq_iter;

	}
	
	if (!set_pgsql_sequence_currval(pg_db, "s_pi", current_pi, args.CERR().strm()))
		return false;

	return true;
}



//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//	read_text_from_stream()

//			retrieves accession line at a particular file position
//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
string& read_text_from_stream(std::ifstream& is, unsigned filepos, unsigned len, string& str)
{
	// Titan is only 80,000
	assert(len < 131072);
	
	static boost::scoped_array<char> buf;
	static unsigned buf_len = 0;
	// allocate more memory if insufficient
	if (len > buf_len)
	{
		buf_len = std::max(128u, len * 2);
		buf.reset(new char[buf_len]);
	}
	
	is.seekg(filepos);
	is.read(buf.get(), len);
	

	buf[len] = '\x00';
	str.assign(buf.get(),  len);
	assert(is.good());
	return str;
}



//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//		write_accession_data_for_pgcopy

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool write_accession_data_for_pgcopy(	t_pg_copy_guard&			copy_buffer,
										string&						accession,
										const string&				db_origin,
										const string&				acc_code_regex_str,
										const vector<t_digest_taxid>& hash_to_taxid,
										const vector<t_gi_taxid>&	gi_to_taxid,
										const string&				default_taxid_str,
										const t_hash_id_pos&		seq_hash,
										unsigned					accid,
										const string& 				current_time,
										vector<string>&				non_conforming_lines,
										t_program_results& 			results)
{
	string acc_code, acc_name;
	if (!parse_acc_data(accession, acc_code_regex_str, acc_code, acc_name))
	{
		++results.counts[CNT_FASTA_ACC_SANS_CODES];
		return false;
	}

	string taxid;
	if (default_taxid_str.length())
		taxid = default_taxid_str;
	else
		if (!parse_taxa(accession, hash_to_taxid, gi_to_taxid, taxid))
		{
			++results.counts[CNT_FASTA_ACC_SANS_TAXA];
			return false;
		}


	// truncate and warn if accession code and names too long
	/*if (acc_code.length() > 30 ||
		acc_name.length() > 30)
	{
		if (non_conforming_lines.size() < 60)
		{
			non_conforming_lines.push_back(accession);
			non_conforming_lines.push_back(acc_code);
			non_conforming_lines.push_back(acc_name);
		}
		if (acc_code.length() > 30)
			acc_code.erase(30);
		if (acc_name.length() > 30)
			acc_name.erase(30);
	}
	*/
	// write to postgreSQL

	// pi of linked sequence
	ultoa(seq_hash.id, copy_buffer.buf)		+=	"\t0\t";

	// accid
	ultoa(accid, copy_buffer.buf)			+=	"\t0\t";

	// taxid
	copy_buffer								+= taxid		+	'\t';

	// accession digest
	t_digest acc_hash;
	calculate_acc_hash(beg_to_end(accession), seq_hash, db_origin, acc_hash);
	copy_buffer.buf							+=	acc_hash;
	copy_buffer								+=	'\t';

	// acc_code, name and db_origin
	copy_buffer								+= acc_code	+	'\t';
	copy_buffer								+= acc_name	+	'\t';
	copy_buffer								+= db_origin			+	'\t';

	// accession and current time, first replacing backslashes with double backslashes
	std::string::size_type pos = 0;
	while ((pos = accession.find('\\', pos))  != std::string::npos)
	{
		accession.replace(pos, 1, "\\\\");
		pos += 2;
	}
	copy_buffer								+= accession			+	'\t';
	copy_buffer								+= current_time			+	'\n';
	return true;

}

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//		warn_if_errors_parsing_accessions

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
void warn_if_errors_parsing_accessions(	const std::vector<string>& 	non_conforming_lines,
										t_program_args& 			args,
										t_program_results& 			results)
{
	// print out diagnostic if any accession (fragments) failed to parse
	if (results.counts[CNT_FASTA_ACC_SANS_CODES])
		args.CERR().strm()
					<<	"\nWARNING!!\n\t"
					<< 		results.counts[CNT_FASTA_ACC_SANS_CODES]
					<< 		" accesssions without matching acc. codes.\n\t"
					<<		"The regular expression used was "
							"[" << args.regex_str << "]\n\n";
		
	// make sure that some sequences were parsed
	if (results.counts[CNT_FASTA_ACC_SANS_TAXA])
		args.CERR().strm()
					<<	"\nWARNING!!\n\t"
					<<		results.counts[CNT_FASTA_ACC_SANS_TAXA]
					<< 		" accessions without NCBI tax IDs attached\n\n";
	
	
	if (non_conforming_lines.size())
	{
		args.CERR().strm()
					<<	"\nWARNING!!\n\t"
							"Some accession names or codes on the following accession "
							"lines were too long:\n";
		for (unsigned i = 0; i < non_conforming_lines.size(); ++i)
			args.CERR().strm() << "\t[" << non_conforming_lines[i].substr(0,60) << "]\n";
		args.CERR().strm() << "\n";
	}
}


//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//		retrieve_tax_data_from_panda

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool retrieve_tax_data_from_panda(t_pg_cursor&	pg_db,
										vector<t_digest_taxid>& hash_to_taxid,
										vector<t_gi_taxid>&	gi_to_taxid,
										t_program_args&		args,
										t_program_results&	results)
{

	//____________________________________________________________________________________
	// 		Getting gi_to_taxid from PostgreSQL.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_IMPORT_GI_TAXID_LOOKUP, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
		// create binary cursor
		string SQL_cmd = "SELECT gi, tax_id FROM gi_to_taxid";
		if (!pg_db.Declare(SQL_cmd, true))
			return pg_print_sql_err("create a binary cursor to retrieve taxonomy data",
										pg_db, SQL_cmd, args.CERR().strm());
		
		// get data in batches
		deque<t_gi_taxid> t_gi_to_taxid;
		t_progress_indicator dots(args.VERBOSE(), 1);
		
		vector<unsigned> hash(4);
		while (pg_db.Fetch(100000))
		{
			int curr_batch_size = pg_db.Tuples();
			if (!curr_batch_size)
				break;
			dots++;
			for (int i = 0; i < curr_batch_size; ++i)
				t_gi_to_taxid.push_back(
					t_gi_taxid(
								pg_db.getvalue<unsigned>(i, 0),
								pg_db.getvalue<unsigned>(i, 1)));
		}

		// close cursor
		if (!pg_db.Close())
			return pg_print_sql_err("close a binary cursor to retrieve taxonomy data",
										pg_db, "CLOSE panda_cursor", args.CERR().strm());
	
		// get rid of excess memory
		sort(beg_to_end(t_gi_to_taxid));
		gi_to_taxid.clear();
		gi_to_taxid.reserve(t_gi_to_taxid.size());
		copy(	beg_to_end(t_gi_to_taxid),
				back_inserter(gi_to_taxid));
	}
	
	//____________________________________________________________________________________
	// 		Getting tax name hash to taxid from PostgreSQL.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_IMPORT_TAXNAME_TAXID_LOOKUP, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
		// create binary cursor
		string SQL_cmd = "SELECT tax_name_hash[1], tax_name_hash[2], "
								"tax_name_hash[3], tax_name_hash[4], "
								"tax_id FROM taxa_names_to_taxid";
		if (!pg_db.Declare(SQL_cmd, true))
			return pg_print_sql_err("create a binary cursor to retrieve taxonomy data",
										pg_db, SQL_cmd, args.CERR().strm());
		
		// get data in batches
		deque<t_digest_taxid> t_hash_to_taxid;
		t_progress_indicator dots(args.VERBOSE(), 1);
		
		vector<unsigned> hash(4);
		while (pg_db.Fetch(10000))
		{
			int curr_batch_size = pg_db.Tuples();
			if (!curr_batch_size)
				break;
			dots++;
			for (int i = 0; i < curr_batch_size; ++i)
				t_hash_to_taxid.push_back(
					t_digest_taxid(
						t_digest(	pg_db.getvalue<unsigned>(i, 0),
									pg_db.getvalue<unsigned>(i, 1),
									pg_db.getvalue<unsigned>(i, 2),
									pg_db.getvalue<unsigned>(i, 3)),
						pg_db.getvalue<unsigned>(i, 4)));
		}

		// close cursor
		if (!pg_db.Close())
			return pg_print_sql_err("close a binary cursor to retrieve taxonomy data",
										pg_db, "CLOSE panda_cursor", args.CERR().strm());
	
		// get rid of excess memory
		sort(beg_to_end(t_hash_to_taxid));
		hash_to_taxid.clear();
		hash_to_taxid.reserve(t_hash_to_taxid.size());
		copy(	beg_to_end(t_hash_to_taxid),
				back_inserter(hash_to_taxid));
	}

	if (gi_to_taxid.size() == 0  && hash_to_taxid.size() ==0)
		args.CERR().strm()
					<<	"\nWARNING!!\n\t"
					<< 		"There are no relationships stored in Panda for looking "
					<< 		"up TaxIDs from taxonomical names or GIs.\n\t"
					<<		"Have you run Panda_load_ncbi_taxonomy_tree.pl?\n\n";


	return true;
}




//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//		add_extra_accessions_to_panda

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool add_extra_accessions_to_panda(t_pg_cursor& 				pg_db,
								   deque<t_id_pos>&				acc_seqid_pos_extra,
								   const vector<t_hash_id_pos>&	seq_seqhash_pi_pos_new,
								   t_id							next_accid,
								   const string& 				current_time,
								   t_program_args& 				args,
								   t_program_results& 			results)
{
	// clear end of file condition and start from beginning
	args.fasta_db.clear();
	args.fasta_db.seekg(0);
	
	
	// no data
	if (!acc_seqid_pos_extra.size())
		return true;
	
	// sort by file position to avoid disk thrashing
	assert(stlhelper::is_sorted(beg_to_end(acc_seqid_pos_extra),
											op_cmp_filepos<t_id_pos>()));
	
	//____________________________________________________________________________________
	// 		drop indices if necessary
	//____________________________________________________________________________________
	// removing indices will speed up large inserts
	bool recreate_indices = acc_seqid_pos_extra.size() > THRESHOLD_RECREATE_INDEX &&
																	args.recreate_indices;
	vector<string> SQL_commands_recreating_indices;
	if (recreate_indices)
	{
		args.VERBOSE() << "\tRemoving indices for the 'accessions' table..."
																		<< std::endl;
		if(!DropIndices(pg_db, "accessions",
						SQL_commands_recreating_indices, args.CERR().strm()))
			return false;
	}
	

	assert(stlhelper::is_sorted(beg_to_end(seq_seqhash_pi_pos_new),
												op_cmp_filepos<t_hash_id_pos>()));
	//____________________________________________________________________________________
	// 		Importing new accessions into PostgreSQL.
	//____________________________________________________________________________________
	{
		//
		// retrieve tax name or gi to taxid lookups stored in panda
		//
		vector<t_digest_taxid> hash_to_taxid;
		vector<t_gi_taxid>	gi_to_taxid;
		if (!args.default_taxid.length() &&
			!retrieve_tax_data_from_panda(pg_db, hash_to_taxid, gi_to_taxid, args, results))
			return false;
		
		Current_operation op(TIME_PG_ADD_ACC, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^
		
		dual_streams CERR = args.CERR();
		t_pg_copy_guard copy_buffer (pg_db, "accessions", CERR.strm());
		if (!copy_buffer)
			return false;
		

		string accession;
		vector<string> non_conforming_lines;
		unsigned valid_acc_count = 0;
		for (t_progress_indicator dots(args.VERBOSE(), acc_seqid_pos_extra.size() / 50 + 1,
									acc_seqid_pos_extra.size());
			  				dots.iterating(); ++dots)
		for(unsigned i = 0; i < acc_seqid_pos_extra.size(); ++i, ++dots)
		{
			read_text_from_stream(	args.fasta_db,
									acc_seqid_pos_extra[dots.value()].pos,
									acc_seqid_pos_extra[dots.value()].len,
									accession);
			
    		
			t_id seq_index = acc_seqid_pos_extra[dots.value()].id;
			
			if (!write_accession_data_for_pgcopy(copy_buffer,
												accession,
												args.db_origin,
												args.regex_str,
												hash_to_taxid,
												gi_to_taxid,
												args.default_taxid,
												seq_seqhash_pi_pos_new[seq_index],
												next_accid++,
												current_time,
												non_conforming_lines,
												results))
			{
				// don't process secondary ids for this accession either
				acc_seqid_pos_extra[dots.value()].len = 0;
				continue;
			}
			valid_acc_count++;
			copy_buffer.cached_write();
		}
		results.counts[CNT_ACC_ADDITIONAL]	= valid_acc_count;
		if (!copy_buffer.end_copy())
			return false;
		
		//________________________________________________________________________________
		// 		Issue warnings if parsing errors
		//________________________________________________________________________________
		warn_if_errors_parsing_accessions(non_conforming_lines, args, results);
		if (results.counts[CNT_ACC_ADDITIONAL] == 0)
		{
			args.CERR().strm()
					<<	"\nError!!\n\t"
					<< 		"No valid accession lines were founded.\n\t"
					<<		"Please check the regular expression used to match "
							"accession codes.\n\n";
			return false;

		}
	}
	
	
	//____________________________________________________________________________________
	// 		PostgreSQL recreating indices
	//____________________________________________________________________________________
	if (recreate_indices)
	{
		Current_operation op(TIME_PG_RECREATE_ACC_INDICES, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
		if(!ExecuteSQL(pg_db, "Could not recreate indices for the sequences table",
						SQL_commands_recreating_indices, args.CERR().strm()))
			return false;
	}
	
	return true;
	
}


//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

//	add_extra_secondary_ids_to_panda

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool add_extra_secondary_ids_to_panda(
									t_pg_cursor& 		 		 pg_db,
									const deque<t_id_pos>&		 acc_seqid_pos_extra,
									const vector<t_hash_id_pos>& seq_seqhash_pi_pos_new,
									t_id						 next_accid,
									t_program_args& 			 args,
									t_program_results& 			 results)

{
	// clear end of file condition and start from beginning
	args.fasta_db.clear();
	args.fasta_db.seekg(0);
	
	// no data
	if (!acc_seqid_pos_extra.size())
		return true;
	
	
	assert(stlhelper::is_sorted(beg_to_end(acc_seqid_pos_extra),
												op_cmp_filepos<t_id_pos>()));
	assert(stlhelper::is_sorted(beg_to_end(seq_seqhash_pi_pos_new),
												op_cmp_filepos<t_hash_id_pos>()));

	//____________________________________________________________________________________
	// 		drop indices if necessary
	//____________________________________________________________________________________
	// removing indices will speed up large inserts
	bool recreate_indices = acc_seqid_pos_extra.size() > THRESHOLD_RECREATE_INDEX &&
																	args.recreate_indices;
	vector<string> SQL_commands_recreating_indices;
	if (recreate_indices)
	{
		args.VERBOSE() << "\tRemoving indices for the 'secondary IDs' table..."
																		<< std::endl;
		if(!DropIndices(pg_db, "secondary_ids",
						SQL_commands_recreating_indices, args.CERR().strm()))
			return false;
	}
	

	//____________________________________________________________________________________
	// 		Importing new secondary IDs into PostgreSQL.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_ADD_SECIDS, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^

		dual_streams CERR = args.CERR();
		t_pg_copy_guard copy_buffer (pg_db, "secondary_ids", CERR.strm());
		if (!copy_buffer)
			return false;
		
		
		vector<string> non_conforming_lines;

		unsigned cnt_secids_extra = 0;
		string accession;
		t_progress_indicator dots(args.VERBOSE(), acc_seqid_pos_extra.size() / 50 + 1);
		for(unsigned i = 0; i < acc_seqid_pos_extra.size(); ++i, ++dots)
		{
			read_text_from_stream(	args.fasta_db,
									acc_seqid_pos_extra[i].pos,
									acc_seqid_pos_extra[i].len,
									accession);
			
			vector<string> vec_secids;
			if (!parse_secIDs(accession, vec_secids))
			{
				if (non_conforming_lines.size() < 20)
					non_conforming_lines.push_back(accession);
			}
	
			// pi and accid of linked sequence
			string pi_accid;
			ultoa(seq_seqhash_pi_pos_new[acc_seqid_pos_extra[i].id].id,
															pi_accid)	+=	"\t0\t";
			ultoa(next_accid + i, pi_accid)								+=	"\t0\t";

			for (unsigned j = 0; j < vec_secids.size(); ++j, cnt_secids_extra++)
			{
				
				// pi and accid of linked sequence
				copy_buffer.buf += pi_accid;
					
				// db_code secondary_id
				copy_buffer.buf += vec_secids[j] + "\t";
	
				// secid_db_origin
				copy_buffer.buf += args.db_origin + '\n';
					
				copy_buffer.cached_write();
			}
		}
		results.counts[CNT_SECIDS_ADDITIONAL]		= cnt_secids_extra;
		if (!copy_buffer.end_copy())
			return false;

		// print out non-conforming lines
		if (non_conforming_lines.size())
		{
			args.CERR().strm()<<"\nWARNING!!\n\t"
									"Some secondary IDs on the following accession "
									"lines were too long:\n";
			for (unsigned i = 0; i < non_conforming_lines.size(); ++i)
				args.CERR().strm()	<< "\t[" << non_conforming_lines[i].substr(0,60)
									<< "]\n";
			args.CERR().strm() << "\n";
		}
	}
	
	
	//____________________________________________________________________________________
	// 		PostgreSQL recreating indices
	//____________________________________________________________________________________
	if (recreate_indices)
	{
		Current_operation op(TIME_PG_RECREATE_SECIDS_INDICES, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
		if(!ExecuteSQL(pg_db, "Could not recreate indices for the sequences table",
						SQL_commands_recreating_indices, args.CERR().strm()))
			return false;
	}
	
	return true;
}



//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
//		update_panda_accessions

//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
bool update_panda_accessions(	
								// [in]
								t_pg_cursor& 				pg_db,
								const string&				current_time,
								// [out]
								vector<t_hash_id_pos>&		seq_seqhash_pi_pos_new,
								deque<t_id>&				all_pis,
								t_program_args& 			args,
								t_program_results& 			results)
{

	//____________________________________________________________________________________
	// 		Counting accession lines ('>'s) from the FASTA file.
	//____________________________________________________________________________________
	unsigned seq_cnt_estimate, acc_cnt_estimate;
	bool is_file_unix;
	boost::tie(acc_cnt_estimate, seq_cnt_estimate, is_file_unix) =
								count_accession_lines(args.fasta_db, args, results);
	
	if (!is_file_unix)
	{
		args.CERR().strm()	<<	"WARNING!!\n\tThe file " << args.fasta_path << "] "
								"includes 'Windows/DOS' end of lines";
		return false;
	}
	if (seq_cnt_estimate == 0 || acc_cnt_estimate == 0)
	{
		args.CERR().strm() << "ERROR!!\n\tNo sequences could be read from the file ["
									 << args.fasta_path <<"].\n\n";
		return false;
	}
	args.VERBOSE() << "\t" << seq_cnt_estimate	<< " sequences in the fasta file.\n";
	args.VERBOSE() << "\t" << acc_cnt_estimate	<< " accessions in the fasta file.\n";


	//____________________________________________________________________________________
	// 		Retrieving current accession hashes from postgres
	// 					sorted by hash
	//____________________________________________________________________________________
	vector<t_hash_id> acc_acchash_accid_current;
	if (!retrieve_accession_hashes_from_panda(pg_db, acc_acchash_accid_current, acc_cnt_estimate,
																		args, results))
		return false;

	//____________________________________________________________________________________
	// 		Parsing accessions from the FASTA file.
	//____________________________________________________________________________________
	vector<t_id>				acc_accid_obsolete;		// not really sorted
	deque<t_id_pos>				acc_seqid_pos_extra;	// sorted by file pos
							//	seq_seqhash_pi_pos_new 	// sorted by file pos			
	if (!parse_sequences_in_fasta(	pg_db,
									acc_acchash_accid_current,
									seq_cnt_estimate,
									acc_seqid_pos_extra,
									seq_seqhash_pi_pos_new,
									acc_accid_obsolete,
									args,
									results))
		return false;
		

	//____________________________________________________________________________________
	//	remove obsolete accessions
	//____________________________________________________________________________________
	if (!remove_obsolete_accessions_from_panda(pg_db, acc_accid_obsolete, args, results))
		return false;
 	vector_release_mem(acc_accid_obsolete);


	//____________________________________________________________________________________
	//	count hash duplicates
	//____________________________________________________________________________________
	// sort seq_seqhash_pi_pos_new by hash
	vector<t_hash_id> duplicate_pis;
	{
		sort(beg_to_end(seq_seqhash_pi_pos_new));
		vector<t_hash_id_pos>::iterator i	= seq_seqhash_pi_pos_new.begin();
		vector<t_hash_id_pos>::iterator end = seq_seqhash_pi_pos_new.end();
		while (i != end)
		{
			i = adjacent_find(i, end);
			if (i != end)
			{
				duplicate_pis.push_back(*i);
				++i;
			}
		}
		results.counts[CNT_FASTA_PI_DUPLICATES] = duplicate_pis.size();
		remove_duplicates(duplicate_pis);
	}


	//____________________________________________________________________________________
	//	convert seqhashes to pi
	//____________________________________________________________________________________
	if (!convert_seqhash_to_pi_from_panda(	pg_db,
											seq_seqhash_pi_pos_new,
											all_pis,
											args,
											results))
		return false;
	
	
	//____________________________________________________________________________________
	//	add pis for novel sequences
	//____________________________________________________________________________________
	// sequences are added in order of increasing pis.
	// This should help performance indexed on pi
	sort(beg_to_end(seq_seqhash_pi_pos_new), op_cmp_filepos<t_hash_id_pos>());
	if (!add_pi_for_novel_sequences(	pg_db,
										duplicate_pis,
										seq_seqhash_pi_pos_new,
										all_pis,
										args,
										results))
		return false;
	vector_release_mem(duplicate_pis);


	t_id current_accid;
	if (!get_pgsql_sequence_currval(pg_db, "s_accid", current_accid, args.CERR().strm()))
		return false;
	if (!set_pgsql_sequence_currval(pg_db, "s_accid", 	
									current_accid + acc_seqid_pos_extra.size(),
									args.CERR().strm()))
		return false;
	
	//____________________________________________________________________________________
	//	input accessions and secondary ID data into panda
	//____________________________________________________________________________________
	if (!add_extra_accessions_to_panda(pg_db, acc_seqid_pos_extra, seq_seqhash_pi_pos_new,
                                       current_accid + 1, current_time, args, results))
		return false;
	
	// remove invalid accessions without acc codes/taxids
	acc_seqid_pos_extra.erase(	remove_if(	beg_to_end(acc_seqid_pos_extra),
                                            op_invalid_len()	
										 ),
								acc_seqid_pos_extra.end());
	
	
	if (!add_extra_secondary_ids_to_panda(	pg_db,
											acc_seqid_pos_extra,
											seq_seqhash_pi_pos_new,
											current_accid + 1,
											args, results))
		return false;

	
	return true;

}



//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS

//	disregard_pis_already_in_panda

//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
void disregard_pis_already_in_panda(vector<t_hash_id_pos>&		seq_seqhash_pi_pos_new,
									vector<t_hash_id_pos>&		seq_seqhash_pi_pos_extra)
{
/*	DEBUG
	std::cerr << "Before:" << seq_seqhash_pi_pos_new.size();
*/
	seq_seqhash_pi_pos_new.erase(	remove_if(	beg_to_end(seq_seqhash_pi_pos_new),
													op_zero_len()),
										seq_seqhash_pi_pos_new.end());
	seq_seqhash_pi_pos_extra = seq_seqhash_pi_pos_new;

	vector_release_mem(seq_seqhash_pi_pos_new);
/*	DEBUG
	std::cerr << "\tAfter:" << seq_seqhash_pi_pos_extra.size() << "\n";
*/
}



//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS

//		add_extra_sequences_to_panda

//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
bool add_extra_sequences_to_panda(	t_pg_cursor& 				pg_db,
									vector<t_hash_id_pos>&		seq_seqhash_pi_pos_extra,
									const string&				current_time,
									t_program_args& 			args,
									t_program_results& 			results)
{
	args.fasta_db.clear();
	
	
	// no data
	if (!seq_seqhash_pi_pos_extra.size())
		return true;
	results.counts[CNT_SEQ_ADDITIONAL]	= seq_seqhash_pi_pos_extra.size();
	
	// sort by file position to avoid disk thrashing
	sort(beg_to_end(seq_seqhash_pi_pos_extra), 	op_cmp_filepos<t_hash_id_pos>());

	//____________________________________________________________________________________
	// 		drop indices if necessary
	//____________________________________________________________________________________
	// removing indices will speed up large inserts
	bool recreate_indices = seq_seqhash_pi_pos_extra.size() > THRESHOLD_RECREATE_INDEX &&
																	args.recreate_indices;
	vector<string> SQL_commands_recreating_indices;
	if (recreate_indices)
	{
		args.VERBOSE() << "\tRemoving indices for the 'sequences' table..."
																		<< std::endl;
		if(!DropIndices(pg_db, "sequences",
						SQL_commands_recreating_indices, args.CERR().strm()))
			return false;
	}
	
	

	//____________________________________________________________________________________
	// 		Importing new sequences into PostgreSQL.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_ADD_SEQ, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^
		
		dual_streams CERR = args.CERR();
		t_pg_copy_guard copy_buffer (pg_db, "sequences", CERR.strm());
		if (!copy_buffer)
			return false;
		
		t_progress_indicator dots(args.VERBOSE(), seq_seqhash_pi_pos_extra.size() / 50 + 1);
		for(unsigned i = 0; i < seq_seqhash_pi_pos_extra.size(); ++i, ++dots)
		{
			string sequence;
			read_text_from_stream(	args.fasta_db,
									seq_seqhash_pi_pos_extra[i].pos,
									seq_seqhash_pi_pos_extra[i].len,
									sequence);

		

			// remove extraneous characters including newlines
			prepare_sequence(sequence);
			
			// pi
			ultoa(seq_seqhash_pi_pos_extra[i].id, copy_buffer.buf)	+=	"\t0\t";
			
			// length
			ultoa(sequence.length(), copy_buffer.buf)				+=	"\t";

			// message digest
			copy_buffer.buf += seq_seqhash_pi_pos_extra[i];
			copy_buffer.buf											+=	"\t";

			// sequence
			copy_buffer.buf += sequence								+	"\t";
			
			// date added
			copy_buffer.buf += current_time							+	"\n";
			copy_buffer.cached_write();
		}
		if (!copy_buffer.end_copy())
			return false;
	}
	
	//____________________________________________________________________________________
	// 		PostgreSQL recreating indices
	//____________________________________________________________________________________
	if (recreate_indices)
	{
		Current_operation op(TIME_PG_RECREATE_SEQ_INDICES, args.VERBOSE(), results);
		//                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
		if(!ExecuteSQL(pg_db, "Could not recreate indices for the sequences table",
						SQL_commands_recreating_indices, args.CERR().strm()))
			return false;
	}

	return true;
}


//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
//		add_extra_PIs_pending_HMM_to_panda

//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
bool add_extra_PIs_pending_HMM_to_panda(t_pg_cursor& 		pg_db,
										deque<t_pi>&		PIs_pending_new,
										t_program_args& 	args,
										t_program_results& 	results)
{
	if (!PIs_pending_new.size())
		return true;

	//____________________________________________________________________________________
	// 		Retrieve the current list of unprocessed novel sequences from Panda
	//____________________________________________________________________________________
	deque<t_pi> PIs_pending_current;
	{
		Current_operation op(TIME_PG_EXPORT_NEW_SEQ_PI, args.VERBOSE(), results);
		//					 ^^^^^^^^^^^^^^^^^^^^^^^^^
		if (!pg_import_PIs_from_table(pg_db, "new_sequences", PIs_pending_current,
										0, args.VERBOSE(),	
										args.CERR().strm()))
			return false;
	}
	assert(stlhelper::is_sorted(beg_to_end(PIs_pending_current), less<t_pi>()));
	assert(stlhelper::is_unique(beg_to_end(PIs_pending_current), equal_to<t_pi>()));
	

	//____________________________________________________________________________________
	// 		Add new sequences to the list of unprocessed novel sequences in Panda
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_ADD_NEW_SEQ_PI, args.VERBOSE(), results);
		//					 ^^^^^^^^^^^^^^^^^^^^^^
		deque<t_pi> PIs_pending_extra;
		sort(beg_to_end(PIs_pending_new));
		assert(stlhelper::is_unique(beg_to_end(PIs_pending_new)));
		set_difference(	beg_to_end(PIs_pending_new),
						beg_to_end(PIs_pending_current),
						back_inserter(PIs_pending_extra));
		if (!pg_export_PIs_to_table  (pg_db, "new_sequences", PIs_pending_extra,
										args.VERBOSE(),	args.CERR().strm()))
			return false;
	}

	return true;
}


//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
//		remove_obsolete_sequences

//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
bool remove_obsolete_sequences(	t_pg_cursor& 			pg_db,
								deque<t_pi>&			seq_pi_all,
								t_program_args& 		args,
								t_program_results& 		results)
{
	deque<t_pi>			acc_pi_all;
	args.VERBOSE() << "\tRemoving obsolete sequences...\n";
	//____________________________________________________________________________________
	// 		Exporting PIs of all accessions from PostgreSQL.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_EXPORT_ACC_PI, args.VERBOSE(), results);
		//					 ^^^^^^^^^^^^^^^^^^^^^
		if (!pg_import_PIs_from_table(pg_db, "accessions", acc_pi_all,
										0,
										args.VERBOSE(),	args.CERR().strm()))
			return false;
	}

	//____________________________________________________________________________________
	// 		Comparing accession and sequence PIs.
	//				Get sequence PIs that are not in accessions
	//				i.e. do a set difference
	//____________________________________________________________________________________
	deque<t_pi> seq_pi_obsolete;
	
	{
		Current_operation op(TIME_COMPARE_ACC_SEQ_PI, args.VERBOSE(), results);
		//					 ^^^^^^^^^^^^^^^^^^^^^^^
		// sort and uniquify
		assert(stlhelper::is_sorted(beg_to_end(acc_pi_all)));
		assert(stlhelper::is_unique(beg_to_end(acc_pi_all)));

		assert(stlhelper::is_sorted(beg_to_end(seq_pi_all)));
		
/*	DEBUG
		std::cerr << string (60, '*') << "\n";
		for (unsigned i = 0; i < seq_pi_all.size(); ++i)
			std::cerr<< seq_pi_all[i] << "\n";
		std::cerr << string (60, '*') << "\n";
		for (unsigned i = 0; i < acc_pi_all.size(); ++i)
			std::cerr<< acc_pi_all[i] << "\n";
		std::cerr << string (60, '*') << "\n";
*/

		deque<t_pi> acc_pi_missing;
		set_mark_diff(	beg_to_end(seq_pi_all),
						beg_to_end(acc_pi_all),
						less<t_pi>(),
						op_save_in_deque<t_pi>(seq_pi_obsolete),
						op_save_in_deque<t_pi>(acc_pi_missing));


		// print out diagnostic if any accessions with invalid PIs
		if (acc_pi_missing.size())
		{
			args.CERR().strm()
						<< "\nWARNING!! "
						<< acc_pi_missing.size()
						<< " accesssions with PIs not from any sequence!!\n\n";
		}

		// remove obsolete pis from list of all extant pis
		deque<t_pi> seq_pi_all_sans_obsolete;

		set_difference(	beg_to_end(seq_pi_all),
						beg_to_end(seq_pi_obsolete),
						back_inserter(seq_pi_all_sans_obsolete));
		seq_pi_all_sans_obsolete.swap(seq_pi_all);


		// count orphans	
		results.counts[CNT_SEQ_OBSOLETE] = seq_pi_obsolete.size();
	}
	

	//____________________________________________________________________________________
	// 		PostgreSQL removing orphaned sequences.
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_REMOVE_ORPHANED_SEQ, args.VERBOSE(), results);
		//					 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
		// transfer PIs of orphaned sequences to file
		if (!pg_remove_PIs_from_table(pg_db,
									  "sequences",
									  seq_pi_obsolete,
									  args.VERBOSE(),
									  args.CERR().strm(),
									  "len,seq_hash,sequence,seq_date_added"))
			return false;
			
	}

	return true;

}


//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
//		update_panda_sequences

//SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
bool update_panda_sequences(t_pg_cursor&	 			pg_db,
							vector<t_hash_id_pos>&		seq_seqhash_pi_pos_new,
							deque<t_id>&				all_pis,
							const string&				current_time,
							t_program_args& 			args,
							t_program_results& 			results)
{
	sort(beg_to_end(seq_seqhash_pi_pos_new), op_cmp_id<t_hash_id_pos>());
	
	remove_duplicates(seq_seqhash_pi_pos_new, op_eq_id<t_hash_id_pos>());
	
	vector<t_hash_id_pos>	seq_seqhash_pi_pos_extra;
	disregard_pis_already_in_panda(seq_seqhash_pi_pos_new, seq_seqhash_pi_pos_extra);
	assert(stlhelper::is_sorted(beg_to_end(seq_seqhash_pi_pos_new),
							op_cmp_filepos<t_hash_id_pos>()));


	//____________________________________________________________________________________
	// 		Get list of all current and novel PIs
	//____________________________________________________________________________________
	deque<t_id> extra_pis;
	for (unsigned i = 0; i < seq_seqhash_pi_pos_extra.size(); ++i)
		extra_pis.push_back(seq_seqhash_pi_pos_extra[i].id);

	

	//____________________________________________________________________________________
	//	input sequences data into panda
	//____________________________________________________________________________________
	if (!add_extra_sequences_to_panda(pg_db, seq_seqhash_pi_pos_extra, current_time,
											args, results))
		return false;
	
	//____________________________________________________________________________________
	//	add newly added PIs to pending HMM table in panda
	//____________________________________________________________________________________
	if (!add_extra_PIs_pending_HMM_to_panda(pg_db, extra_pis, args, results))
		return false;
	std::copy(beg_to_end(extra_pis), back_inserter(all_pis));
	sort(beg_to_end(all_pis));
	remove_duplicates(all_pis);
	deque_release_mem(extra_pis);
	
	//____________________________________________________________________________________
	// 		PostgreSQL removing orphaned sequences.
	//____________________________________________________________________________________
	// obsolete pis will also be removed from all_pis
	if (!remove_obsolete_sequences(pg_db, all_pis, args, results))
		return false;
	
	//____________________________________________________________________________________
	// 		Synching tables dependent on PIs
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_SYNC_PI_DEPENDENTS, args.IGNORE(), results);
		args.VERBOSE() << "    " << Results_Times_Descriptions[TIME_PG_SYNC_PI_DEPENDENTS]
					   << ".." << std::endl;
		// for each dependent table remove orphans with no corresponding sequences
		if (args.orphans &&
			!synchronize_sequence_dependent_tables(pg_db, all_pis, args.VERBOSE(),
														args.CERR().strm()))
			return false;
	
	}
	
	return true;
}




//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

//		Main

//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
int main(int argc, char* argv[])
{
	t_portable_timer timer;
	
	// don't use C streams
	std::ios::sync_with_stdio(false);

	// saving parameters
	string exe_parameters;
	for (int i = 0; i < argc; ++i)
		exe_parameters += string(argv[i]) + ' ';

	// we use cout to print progress so flush per call
	cout.setf(std::ios_base::unitbuf);

	// get command line arguments
	t_program_args args;
	if (!GetArguments(argc, argv, args))
		return 1;
	assert(args.db_origin.length() > 0 && args.db_origin.length() <= 5);

	// write results to the error log automatically on failure / errors
	t_program_results results;
	dual_streams CLOG = args.CLOG();
	t_auto_log_results auto_log_results(results, CLOG.strm());

	args.CLOG().strm() << "    " << string(79, '*') << "\n";
	args.CLOG().strm() << "\n    Updating the database from the FASTA file "
					   << args.fasta_name << "\n";
		
		
	//____________________________________________________________________________________
	// 		Connect to PostgreSQL
	//____________________________________________________________________________________
	t_pg_cursor pg_db(args.db_login, "panda_cursor");	//"dbname=test user=postgres"
	if ( pg_db.ConnectionBad() )
	{
		args.CERR().strm()	<< "\nERROR!! Connection to database ["
							<< args.db_login << "] failed.\n"
							<< pg_db.ErrorMessage() << "\n\n";
		return 1;
	}
	

	PrepareSQLCommands(pg_db, args.db_origin);

	//____________________________________________________________________________________
	// 		Open fasta file stream
	//____________________________________________________________________________________
	args.fasta_db.open(args.fasta_path.c_str(), std::ios::binary| std::ios::in);

	if (!args.fasta_db)
	{
		args.CERR().strm() << "ERROR!!\n\tCould not open the database file ["
									 << args.fasta_path <<"].\n\n";
		return 1;
	}


	
	//____________________________________________________________________________________
	// 		get current time stamp in binary from postgres
	//____________________________________________________________________________________
	string SQL_command = "SELECT CURRENT_TIMESTAMP";
	if (!pg_db.ExecTuplesOk(SQL_command.c_str()))
	{
		pg_print_sql_err("retrieve the current time from postgreSQL",
							pg_db, SQL_command, args.CERR().strm());
		return 1;
	}
	string current_time = pg_db.GetValue(0, 0);

	
	//____________________________________________________________________________________
	// 		process added and deleted accessions
	//____________________________________________________________________________________
	args.VERBOSE() << "    Updating accessions...\n";
	// parse sequence pi len for accessions update and use in sequences update
	vector<t_hash_id_pos>	seq_seqhash_pi_pos_new;
	deque<t_id>				all_pis;
	if(!update_panda_accessions(pg_db, current_time,
									seq_seqhash_pi_pos_new, all_pis, args, results))
		return 1;

	//____________________________________________________________________________________
	// 		process added and obsoleted sequences
	//____________________________________________________________________________________
	args.VERBOSE() << "    Updating sequences...\n";
	if (!update_panda_sequences(pg_db, seq_seqhash_pi_pos_new, all_pis,
								current_time, args, results))
		return 1;



	
	

	//____________________________________________________________________________________
	// 		PostgreSQL re-collecting database statistics.
	// 			commit transaction and reanalyse database
	//____________________________________________________________________________________
	{
		Current_operation op(TIME_PG_VACUUM, args.IGNORE(), results);
		args.VERBOSE()	<< "    " << Results_Times_Descriptions[TIME_PG_VACUUM]
						<< ".." << std::endl;
		CompleteSQL(pg_db, args.vacuum, args.CERR().strm());
	}
	

	
	//____________________________________________________________________________________
	// 		save results / timing statistics
	//____________________________________________________________________________________
	//
	{
		results.times[TIME_TOTAL] = timer.reset();
		
		string SQL_command = "INSERT INTO updates_log VALUES('"
								+ args.fasta_name	+ "',"
								+ results.GetCSV()			+ ",'"
								+ exe_parameters			+ "')";
		if( !pg_db.ExecCommandOk(SQL_command.c_str()))
			pg_print_sql_err("log update statistics and timings into PostgreSQL",
								pg_db, SQL_command, args.CERR().strm());
	}

	
	// succeeded: no need to write results to error log
	// print results / timing statistics only if verbose
	auto_log_results.abandon_log();
	args.VERBOSE() << "\n";
	args.VERBOSE() << "    " << string(60, '*') << "\n";
	args.VERBOSE() << results;
	args.VERBOSE() << "    " << string(60, '*') << "\n";
		
	args.CLOG().strm() << "Finished processing " << args.fasta_name << "\n";

	return 0;
}




