#include <vector>
	using std::vector;
#include <string>
	using std::string;
#include <stdexcept>
	using std::runtime_error;
#include <iostream>
	using std::cerr;
	using std::ostream;
	using std::istream;
#include <iterator>
	using std::back_inserter;
#include <algorithm>
	using std::max;
	using std::min;
#include <map>
	using std::map;
//
//  Boost headers
// 
#include <boost/foreach.hpp>

#include <boost/range/algorithm/copy.hpp>
	using boost::range::copy;
#include <boost/spirit/include/qi.hpp>
	namespace qi = boost::spirit::qi;
#include <boost/format.hpp>
	using boost::format;
#include <boost/range/algorithm_ext/push_back.hpp>
    using boost::push_back;

#include <boost/tr1/unordered_map.hpp>
	using std::tr1::unordered_map;


#include <boost/spirit/include/phoenix_core.hpp>
#include <boost/spirit/home/phoenix/core/argument.hpp>
#include <boost/spirit/include/phoenix_operator.hpp>
#include <boost/spirit/home/phoenix/object/static_cast.hpp>
	using boost::phoenix::static_cast_;
#include <boost/spirit/home/phoenix/algorithm.hpp>
namespace phoenix = boost::phoenix;
#include "qi_parse_error_handler.h"

//
//  TBB headers
// 
#include "tbb/concurrent_queue.h"
    // tbb::concurrent_queue
#include "tbb/cache_aligned_allocator.h"
    // tbb::cache_aligned_allocator
#include "tbb/pipeline.h"
    // tbb::flow_control
#include "tbb/task_scheduler_init.h"
    // tbb::task_scheduler_init


//
//  Project headers
// 
#include "lines_buffer.h"
#include "recyclable_buffers.h"

//
//  My common headers
// 
#include <progress_indicator.h>

//
//  tr1 headers
// 
#include <boost/tr1/tuple.hpp>
	using std::tr1::tuple;
	using std::tr1::make_tuple;
	using std::tr1::get;
	

#include "intervals_per_contig.h"
	using intervals_per_contig::t_interval;
	using intervals_per_contig::t_intervals;
	using intervals_per_contig::t_intervals_per_contig;
	using intervals_per_contig::t_vec_intervals;
	using intervals_per_contig::t_map_str_intervals;
	using intervals_per_contig::t_interval_search_cache;

#include "tbb_processor.h"


//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
// 
//      grammar
// 
//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

enum ePARSED_SNP_DATA {eCONTIG, ePOS, eCATEGORY};
typedef tuple<string, unsigned, string> t_parsed_snp_data;

phoenix::function<errorHandlerT> const errorHandler_body = errorHandlerT("SNP positions");


template <typename Iterator>
struct snp_pos_grammar
: qi::grammar<Iterator, t_parsed_snp_data()>
{
    snp_pos_grammar(unsigned& curr_line_number_, bool has_category)
    : snp_pos_grammar::base_type(parsed_snp_data), curr_line_number(curr_line_number_)
    {
		using namespace qi::labels;
        using qi::char_;
		using qi::lit;
        using qi::int_;

		tab				=  lit('\t');
		chromosome  	%= +(char_ - '\t');
        position    	%= int_;  
		snp_category	%= +(char_ - '\t');

		if (has_category)
			parsed_snp_data     %=    	  		chromosome
										> tab > position
										> tab > snp_category;
		else
			parsed_snp_data     %=    	  		chromosome
										> tab > position;

        // give names for debug
        tab                 .name("TAB");
        chromosome          .name("Chromosome");
        position            .name("position");
        snp_category	   	.name("SNP Category Name");

        qi::on_error<qi::fail>                               
        (parsed_snp_data, ::errorHandler_body(qi::_1, qi::_2, qi::_3, qi::_4, phoenix::ref(curr_line_number)));
    }
    qi::rule<Iterator, std::string()>     	chromosome    	;   
    qi::rule<Iterator, int()>             	position      	;   
    qi::rule<Iterator, void()>            	tab           	;   
	qi::rule<Iterator, std::string()>     	snp_category   	;   
    qi::rule<Iterator, t_parsed_snp_data()> parsed_snp_data ; 
	unsigned&								curr_line_number;
};










//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
// 
//      Step 2
// 
//  		Lines -> tabs
//  		tabs	-> contig / pos / before / after strings
// 
//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
//________________________________________________________________________________________
// 
//		t_parallel_data 
//________________________________________________________________________________________
struct t_parallel_data
{
	typedef std::tr1::unordered_map<std::string, int> t_category_names_to_indices;

	unsigned						cnt_sites;
    unsigned 						cnt_line;
	t_counts						counts_per_category_per_interval_type;
	t_category_names_to_indices		category_names_to_indices;
	unsigned						cnt_interval_types;
    t_parallel_data (unsigned cnt_lines_per_chunk) : 
        cnt_sites(0), cnt_line(0), cnt_interval_types(0)
	{
	}

	//!_______________________________________________________________________________________
	//!     
	//!		get_category_name_index
	//!			look up index for category name and create new entry if necessary
	//!     
	//!		\author lg (3/3/2011)
	//!     
	//!     \param category_name 
	//!     
	//!     \return int 
	//!_______________________________________________________________________________________
	int get_category_name_index(const string& category_name)
	{
		if (category_names_to_indices.count(category_name))
			return category_names_to_indices[category_name];
		int new_index = category_names_to_indices.size();
		category_names_to_indices[category_name] = new_index;
		// also create new set of counts initialised to zero
		counts_per_category_per_interval_type.push_back(vector<unsigned>(cnt_interval_types, 0U));
		return new_index;
	}

	//!_______________________________________________________________________________________
	//!     
	//!		clear
	//!			clear memory for each iteration, possibly in another thread
	//!     
	//!		\author lg (3/3/2011)
	//!     
	//!     \param cnt_line_ 
	//!     \param cnt_interval_types_ 
	//!_______________________________________________________________________________________
	void clear(unsigned cnt_line_, unsigned cnt_interval_types_)
	{
		// 
		// Initialise cnt_interval_types here makes the code simpler
		// 
		cnt_interval_types = cnt_interval_types_;
        cnt_sites = 0;
        cnt_line = cnt_line_;
		counts_per_category_per_interval_type.clear();
		category_names_to_indices.clear();
	}
};




// recyclable buffer of t_parallel_data
typedef t_recyclable_buffers<t_parallel_data, int> t_parallel_data_buffers;
//________________________________________________________________________________________
// 
//		t_parallel_calculations_processor 
//________________________________________________________________________________________
class t_parallel_calculations_processor
{
	unordered_map<string, long long>						contig_genome_offsets;
	int		                   								offset_for_one_based_snps;           
	bool													intervals_only;
	const vector<intervals_per_contig::t_intervals>&		intervals;

public:
	t_parallel_calculations_processor(const t_parallel_calculations_processor& other):
				contig_genome_offsets(other.contig_genome_offsets),
				offset_for_one_based_snps(other.offset_for_one_based_snps),
				intervals_only(other.intervals_only),
				intervals(other.intervals)
		{}
	t_parallel_calculations_processor(const unordered_map<string, long long>& 				contig_genome_offsets_,
							bool												zero_based_snps_,
							bool												intervals_only_,
							const vector<intervals_per_contig::t_intervals>&	intervals_)
		:contig_genome_offsets(contig_genome_offsets_),
		offset_for_one_based_snps(zero_based_snps_ ? 0 : 1),
		intervals_only(intervals_only_),
		intervals(intervals_)
	{
	}

	t_parallel_data* operator()( t_lines_buffer* input ) const
    {
        t_parallel_data& parsed_lines = *t_parallel_data_buffers::allocate();
        parsed_lines.clear(input->line_count_offset, intervals.size());

		// build grammar
		snp_pos_grammar<t_storage::const_iterator> grammar(parsed_lines.cnt_line, intervals_only);
		t_parsed_snp_data parsed_snp_data;

		// search cache for each interval type
		vector<t_interval_search_cache> search_cache(intervals.size());


        t_storage::const_iterator beg = input->storage.begin();
        BOOST_FOREACH(t_storage::const_iterator end, input->line_ends) 
        {
            parsed_lines.cnt_line ++;

			// skip empty or comment lines
            if (beg == end || *beg == '#') 
			{
				beg = end;
				continue;
			}

			get<eCONTIG>(parsed_snp_data).clear();
			get<eCATEGORY>(parsed_snp_data).clear();
			if (!qi::parse(beg, end, grammar, parsed_snp_data))
				throw runtime_error((boost::format("Failed on line#%1$") % parsed_lines.cnt_line).str());

			unordered_map<string, long long>::const_iterator ii = contig_genome_offsets.find(get<eCONTIG>(parsed_snp_data));
			if (ii == contig_genome_offsets.end())
				throw runtime_error((boost::format("No contig %2$s on line#%1$d") % parsed_lines.cnt_line
														% get<eCONTIG>(parsed_snp_data)).str());
			unsigned genome_pos = static_cast<unsigned>(static_cast<long long> (get<ePOS>(parsed_snp_data)) + ii->second);


			// TODO 
			//  if get category name
			//  add category if necessary
			//  otherwise increment count after interval lookup
			// 
			int category_index = parsed_lines.get_category_name_index(get<eCATEGORY>(parsed_snp_data));
			for (unsigned interval_type = 0; interval_type < intervals.size(); ++interval_type)
				if (intervals[interval_type].contains(genome_pos, search_cache[interval_type]))
				{
					++parsed_lines.counts_per_category_per_interval_type[category_index][interval_type];
					break;
				}


			parsed_lines.cnt_sites += 1;
            beg = end;
        }
        t_recyclable_buffers<t_lines_buffer, t_lines_buffer_init>::deallocate(input);

        return &parsed_lines;
    }
};


//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
// 
//  	Step 3
//  		Output
// 
//  		Lines -> tabs
//  		tabs	-> contig / pos / before / after strings
// 
//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
//________________________________________________________________________________________
// 
//      t_gather_results_processor
// 
//________________________________________________________________________________________
typedef std::tr1::unordered_map<std::string, int> t_category_names_to_indices;
class t_gather_results_processor
{

	t_counts&						counts_per_category_per_interval_type;
	unsigned						cnt_interval_types;
	t_category_names_to_indices&	category_names_to_indices;
    t_progress_indicator& 			progress;
    unsigned&       				cnt_all_sites;

public:

	t_gather_results_processor(	t_counts&							counts_per_category_per_interval_type_,
								unsigned							cnt_interval_types_,
								t_category_names_to_indices&		category_names_to_indices_,
								t_progress_indicator&				progress_, 
								unsigned& 							cnt_all_sites_):

				counts_per_category_per_interval_type(counts_per_category_per_interval_type_),
				cnt_interval_types(cnt_interval_types_),
				category_names_to_indices(category_names_to_indices_),
				progress(progress_), 
				cnt_all_sites(cnt_all_sites_)
		{}
	t_gather_results_processor(const t_gather_results_processor& other):
		counts_per_category_per_interval_type(other.counts_per_category_per_interval_type),
		cnt_interval_types(other.cnt_interval_types),
		progress(other.progress), 
		cnt_all_sites(other.cnt_all_sites),
		category_names_to_indices(other.category_names_to_indices)
	{}

	//!_______________________________________________________________________________________
	//!     
	//!		get_category_name_index
	//!			look up index for category name and create new entry if necessary
	//!     
	//!		\author lg (3/3/2011)
	//!     
	//!     \param category_name 
	//!     
	//!     \return int 
	//!_______________________________________________________________________________________
	int get_category_name_index(const string& category_name) const
	{
		if (category_names_to_indices.count(category_name))
			return category_names_to_indices[category_name];
		int new_index = category_names_to_indices.size();
		category_names_to_indices[category_name] = new_index;
		// also create new set of counts initialised to zero
		counts_per_category_per_interval_type.push_back(vector<unsigned>(cnt_interval_types, 0U));
		return new_index;
	}

	//!_______________________________________________________________________________________
	//!     
	//!		operator ()
	//! 		Add up counts for each parellel iteration
	//! 		The counts have to be mapped to the right category names whose order and
	//!     		number are likely to be different in different parallel iterations.
	//!     	So we need to map the category (index) of each fragmentary result to the
	//!     		final category (index).
	//!  
	//!     
	//!		\author lg (3/3/2011)
	//!     
	//!     \param output 
	//!_______________________________________________________________________________________
	void operator() ( t_parallel_data* fragment ) const 
	{
		progress += fragment->cnt_sites;

		// 
		//  Turn category names to indices into a vector of category names
		// 
		vector<string> category_names(fragment->category_names_to_indices.size());
		BOOST_FOREACH(t_category_names_to_indices::value_type& Pair, fragment->category_names_to_indices) 
			category_names[Pair.second] = Pair.first;

		//
		//	Make look up table of fragment category indices to aggregate category indices
		// 
		vector<unsigned> category_indices;
		BOOST_FOREACH(const string& category_name, category_names) 
			category_indices.push_back(get_category_name_index(category_name));


		for (unsigned ii = 0; ii < fragment->counts_per_category_per_interval_type.size(); ++ii)
		{
			unsigned category_index = category_indices[ii];
			for (unsigned jj = 0; jj < cnt_interval_types; ++jj)
				counts_per_category_per_interval_type[category_index][jj] += 
									fragment->counts_per_category_per_interval_type[ii][jj];
		}

		cnt_all_sites += fragment->cnt_sites;
		t_parallel_data_buffers::deallocate(fragment);
	}
};



//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
// 
//	Three step pipeline 
// 
//8888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

//________________________________________________________________________________________
// 
//  parse_genotypes_pipeline
// 
//________________________________________________________________________________________
unsigned parse_lines_pipeline(	unsigned 							cnt_threads_parallel_tasks, 
								istream& 							input_stream,
								ostream& 							verbose_strm,	
								t_contig_genome_offsets&			contig_genome_offsets,
								bool								zero_based_snps,
								bool								intervals_only,
								t_counts&							counts_per_category_per_interval_type,
								vector<string>&						category_names,
								const std::vector<t_intervals>&		intervals)
{
	typedef t_recyclable_buffers< t_lines_buffer, t_lines_buffer_init> t_line_chunk_buffers;
	typedef t_recyclable_buffers< t_parallel_data, int>					t_parallel_data_buffers;
	using namespace tbb;
	using tbb::filter_t;

    // 
    // initialise recyclable buffer of cnt_lines_per_chunk per thread to store chunks of lines
    // which can be processed in parallel
	// 
    t_lines_buffer_init 						lines_buffer_init(CNT_LINES_PER_CHUNK);
    t_line_chunk_buffers						line_chunk_buffers(lines_buffer_init,  cnt_threads_parallel_tasks);
    filter_t<void, t_lines_buffer *> 			f1(filter::serial_in_order,	t_lines_buffer_processor(1, input_stream) );


    // 
    // parse lines
	// 
	t_parallel_data_buffers 						parsed_lines_buffers(CNT_LINES_PER_CHUNK,  cnt_threads_parallel_tasks);
	t_parallel_calculations_processor			processor(contig_genome_offsets, zero_based_snps, intervals_only,
														   intervals);
	filter_t<t_lines_buffer *, t_parallel_data*> f2(filter::parallel, processor );




	// 
	// combine
	// 
    t_progress_indicator progress(verbose_strm, 200000U, 29000000U);
    progress.use_timer();
    unsigned 									cnt_all_sites = 0;
	t_category_names_to_indices					category_names_to_indices;
    filter_t<t_parallel_data*,void> 			f3(filter::serial_in_order, t_gather_results_processor(counts_per_category_per_interval_type,
																										static_cast<unsigned>(intervals.size()),
																										category_names_to_indices,
																										progress, cnt_all_sites) );
    filter_t<void,void> f = f1 & f2 & f3;




	tbb::parallel_pipeline(cnt_threads_parallel_tasks, f);


	// 
	//  Turn category names to indices into a vector of category names
	// 
	category_names.assign(category_names_to_indices.size(), string());
	BOOST_FOREACH(t_category_names_to_indices::value_type& Pair, category_names_to_indices) 
		category_names[Pair.second] = Pair.first;
	return cnt_all_sites;

}



