/* Copyright 2012 Tobias Marschall
 *
 * This file is part of CLEVER.
 *
 * CLEVER is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * CLEVER is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with CLEVER.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <iostream>
#include <ctime>

#include <boost/tokenizer.hpp>
#include <boost/program_options.hpp>
#include <boost/unordered_map.hpp>

#include <bamtools/api/BamWriter.h>
#include <bamtools/api/SamReadGroup.h>

#include "GroupWiseBamReader.h"
#include "BamHelper.h"
#include "PositionSet.h"
#include "VariationListParser.h"
#include "VariationIndex.h"
#include "DefaultGapCostDistributions.h"
#include "VersionInfo.h"

using namespace std;
namespace po = boost::program_options;

void usage(const char* name, const po::options_description& options_desc) {
	cerr << "Usage: " << name << " [options] <insert-length-dist>" << endl;
	cerr << endl;
	cerr << "Reads a BAM input generated by laser-core from stdin and" << endl;
	cerr << "recalibrates alignment scores based on distributions for indel" << endl;
	cerr << "frequencies and internal segment size." << endl;
	cerr << endl;
	cerr << options_desc << endl;
	exit(1);
}

typedef boost::unordered_map<std::string, HistogramBasedDistribution*> insert_size_dist_map_t;

/** Reads a file given filenames of insert size distributions for every read group and 
  * then reads all these per-readgroup files. */
void read_insert_size_distributions(const std::string& filename, insert_size_dist_map_t& map) {
	typedef boost::tokenizer<boost::char_separator<char> > tokenizer_t;
	boost::char_separator<char> separator(" \t");
	ifstream f(filename.c_str());
	if (f.fail()) {
		ostringstream oss;
		oss << "Could not open file \"" << filename << "\"";
		throw std::runtime_error(oss.str());
	}
	string line;
	size_t line_nr = 0;
	while (getline(f, line)) {
		line_nr += 1;
		tokenizer_t tokenizer(line,separator);
		vector<string> tokens(tokenizer.begin(), tokenizer.end());
		if (tokens.size() != 2) {
			ostringstream oss;
			oss << "Error parsing file \"" << filename << "\", offending line: " << line_nr;
			throw std::runtime_error(oss.str());
		}
		string readgroup = tokens[0];
		string insert_size_filename = tokens[1];
		if (map.find(readgroup) != map.end()) {
			ostringstream oss;
			oss << "Error parsing file \"" << filename << "\", duplicate read group: " << readgroup;
			throw std::runtime_error(oss.str());
		}
		map[readgroup] = new HistogramBasedDistribution(insert_size_filename);
	}
}

string get_readgroup(const BamHelper::read_t& read, bool readgroups_from_names) {
	assert(read.alignments1.size() > 0);
	const BamTools::BamAlignment& aln = *(read.alignments1[0]);
	if (readgroups_from_names) {
		size_t n = aln.Name.find("_");
		if (n == string::npos) {
			ostringstream oss;
			oss << "Error: No underscore present in read name \"" << aln.Name << "\"." << endl;
			throw std::runtime_error(oss.str());
		}
		return aln.Name.substr(0, n);
	} else {
		string readgroup;
		if (!aln.GetTag("RG", readgroup)) {
			ostringstream oss;
			oss << "Error: No readgroup present in read \"" << aln.Name << "\"." << endl;
			throw std::runtime_error(oss.str());
		}
		return readgroup;
	}
}

int main(int argc, char* argv[]) {
	VersionInfo::checkAndPrintVersion("laser-recalibrate", cerr);
	string commandline = VersionInfo::commandline(argc, argv);

	// PARAMETERS
	bool omit_alt_cigars = false;
	bool omit_secondary_aln = false;
	string snp_filename = "";
	int phred_offset;
	bool simple_cigar;
	string variations_filename = "";
	bool readgroups_from_names = false;
	string readgroup_header_filename = "";
	bool readgroup_wise_insert_sizes = false;
	string insertion_length_filename = "";
	string deletion_length_filename = "";
	int max_pair_distance;
	bool distant_pairs = false;
	int strict_mapq_filter = 0;
	bool default_readgroup = false;
	int softclip_open_costs;
	int softclip_extend_costs;

	po::options_description options_desc("Allowed options");
	options_desc.add_options()
		("insertion_length_dist,I", po::value<string>(&insertion_length_filename)->default_value(""), "File name of empiric insertion size distribution.")
		("deletion_length_dist,D", po::value<string>(&deletion_length_filename)->default_value(""), "File name of empiric deletion size distribution.")
		("snp,S", po::value<string>(&snp_filename)->default_value(""), "File to read SNP positions from. Mismatches at these positions will not incur alignment costs.")
		("variations,V", po::value<string>(&variations_filename)->default_value(""), "File to read known indels from. These indels will not incur alignment costs. Will also be taken into account for internal segment size probability.")
		("phred_offset,p", po::value<int>(&phred_offset)->default_value(33), "Value to subtract from ASCII code to get the PHRED quality.")
		("max_pair_distance,m", po::value<int>(&max_pair_distance)->default_value(50000), "Maximum distance of reads in a \"regular\" pair. If distance is larger/interchromosomal, reads can still be paired, but only if option --distant-pairs is given and no pair with smaller distance is present.")
		("distant-pairs", po::value<bool>(&distant_pairs)->zero_tokens(), "Allow long distance and interchromosomal alignments to be paired.")
		("omit_alt_cigars,c", po::value<bool>(&omit_alt_cigars)->zero_tokens(), "Omit alternative cigar strings stored in YA tags.")
		("omit_secondary_aln,s", po::value<bool>(&omit_secondary_aln)->zero_tokens(), "Omit secondary alignments.")
		("m_in_cigar,M", po::value<bool>(&simple_cigar)->zero_tokens(), "Use M for matches and mismatches in CIGAR strings (instead of '=' and 'X').")
		("mapq_cutoff", po::value<int>(&strict_mapq_filter)->default_value(0), "Only report properly paired reads for which each read has a MAPQ above the given level. Other alignments will be omitted. Requires option -s.")
		("readgroup_from_name,R", po::value<bool>(&readgroups_from_names)->zero_tokens(), "Convert readnames of the format <readgroup>_<name> back to <name> and set respective read group tag.")
		("readgroup_header,H", po::value<string>(&readgroup_header_filename), "BAM file from which the @RG lines in the header are to be copied.")
		("readgroup_wise_stats,r", po::value<bool>(&readgroup_wise_insert_sizes)->zero_tokens(), "Use separate insert size distributions for every read group. If set, <insert-length-dist> must be a two column text file contain read group names and filenames of corresponding insert size distributions.")
		("default_readgroup,d", po::value<bool>(&default_readgroup)->zero_tokens(), "Put all alignments into readgroup \"default\".")
		("soft_clip_open_cost", po::value<int>(&softclip_open_costs)->default_value(35), "Cost for soft clipping a read.")
		("soft_clip_extend_cost", po::value<int>(&softclip_extend_costs)->default_value(3), "Cost for \"extending\" a soft clip; i.e., softclipping k characters from a read will cost soft_clip_open_cost+k*soft_clip_extend_cost.")
	;
	
	if (isatty(fileno(stdin)) || (argc<2)) {
// 	if (argc<4) {
		usage(argv[0], options_desc);
	}

	string insert_length_filename(argv[argc-1]);
	argc -= 1;

	po::variables_map options;
	try {
		po::store(po::parse_command_line(argc, argv, options_desc), options);
		po::notify(options);
	} catch(exception& e) {
		cerr << "error: " << e.what() << "\n";
		return 1;
	}
	if ((strict_mapq_filter > 0) && (!omit_secondary_aln)) {
		cerr << "Option -M can only be used jointly with -s." << endl;
		return 1;
	}
	if (default_readgroup) {
		if (readgroup_header_filename.size() > 0) {
			cerr << "Options -H and -d cannot be combined" << endl;
			return 1;
		}
		if (readgroups_from_names) {
			cerr << "Options -H and -R cannot be combined" << endl;
			return 1;
		}
		if (readgroup_wise_insert_sizes) {
			cerr << "Options -H and -r cannot be combined" << endl;
			return 1;
		}
	}
	cerr << "Commandline: " << commandline << endl;
	clock_t clock_start = clock();
	PositionSet* snp_set = 0;
	auto_ptr<vector<Variation> > variations(0);
	VariationIndex* variation_index = 0;
	// insert size distribution (in case option -r is NOT used)
	HistogramBasedDistribution* insert_length_distribution = 0;
	// read group specific insert size distributions (if requested by option -r)
	insert_size_dist_map_t insert_size_dist_map;
	try {
		// open input BAM file
		GroupWiseBamReader bam_reader("/dev/stdin", true, false);
// 		GroupWiseBamReader bam_reader("input.bam", true, false);
		const BamTools::RefVector& bam_ref_data = bam_reader.getReferenceData();
		const BamTools::SamHeader& sam_header = bam_reader.getHeader();
		bam_reader.enableProgressMessages(cerr, 200000);
		// read SNPs, if requested
		if (snp_filename.size() > 0) {
			snp_set = new PositionSet();
			snp_set->loadFromFile(snp_filename, bam_reader.getReferenceData());
		}
		// read variations, if requested
		if (variations_filename.size() > 0) {
			ifstream variants_stream(variations_filename.c_str());
			if (variants_stream.fail()) {
				cerr << "Error: could not open \"" << variations_filename << "\"." << endl;
				return 1;
			}
			variations = VariationListParser::parse(variants_stream, false);
			cerr << "Read " << variations->size() << " variations." << endl;
			variation_index = new VariationIndex(*variations, true, &bam_ref_data);
			cerr << "Created variation index" << endl;
		}
		// read insert size distribution
		if (readgroup_wise_insert_sizes) {
			read_insert_size_distributions(insert_length_filename, insert_size_dist_map);
		} else {
			insert_length_distribution = new HistogramBasedDistribution(insert_length_filename);
		}
		// read insertion and deletion size distributions
		auto_ptr<IndelLengthDistribution> insertion_costs(0);
		auto_ptr<IndelLengthDistribution> deletion_costs(0);
		if (insertion_length_filename.size() > 0) {
			HistogramBasedDistribution insertion_length_distribution(insertion_length_filename);
			insertion_costs = auto_ptr<IndelLengthDistribution>(new IndelLengthDistribution(insertion_length_distribution));
		} else {
			cerr << "Using default insertion cost distribution" << endl;
			insertion_costs = DefaultGapCostDistributions::insertionCosts();
		}
		if (deletion_length_filename.size() > 0) {
			HistogramBasedDistribution deletion_length_distribution(deletion_length_filename);
			deletion_costs = auto_ptr<IndelLengthDistribution>(new IndelLengthDistribution(deletion_length_distribution));
		} else {
			cerr << "Using default deletion cost distribution" << endl;
			deletion_costs = DefaultGapCostDistributions::deletionCosts();
		}
// 		cerr << "Insertions: " << *insertion_costs << endl;
// 		cerr << "Deletions: " << *deletion_costs << endl;
		// create output BAM file
		BamTools::BamWriter bam_writer;
		BamTools::SamHeader new_header = sam_header;
		BamTools::SamProgram laser_recal_program("laser-recalibrate");
		laser_recal_program.CommandLine = commandline;
		laser_recal_program.Version = VersionInfo::version();
		laser_recal_program.Name = "laser-recalibrate";
		new_header.Programs.Add(laser_recal_program);
		if (readgroup_header_filename.size() > 0) {
			BamTools::BamReader br;
			if (!br.Open(readgroup_header_filename)) {
				cerr << "Error: could not open \"" << readgroup_header_filename << "\"" << endl;
				return 1;
			}
			new_header.ReadGroups = br.GetHeader().ReadGroups;
			cerr << "Read read group header information from \"" << readgroup_header_filename << "\"" << endl;
		}
		string* default_readgroup_string = 0;
		if (default_readgroup) {
			default_readgroup_string = new string("default");
			BamTools::SamReadGroup rg(*default_readgroup_string);
			rg.Sample = *default_readgroup_string;
			new_header.ReadGroups.Add(rg);
		}
		if (!bam_writer.Open("/dev/stdout", new_header, bam_ref_data)) {
// 		if (!bam_writer.Open("/dev/null", sam_header, bam_ref_data)) {
			cerr << "Error writing BAM to stdout." << endl;
			return 1;
		}
		while ( bam_reader.hasNext() ) {
			bam_reader.advance();
			const vector<BamTools::BamAlignment*>& alignments1 = bam_reader.getAlignmentsFirst();
			const vector<BamTools::BamAlignment*>& alignments2 = bam_reader.getAlignmentsSecond();
			auto_ptr<BamHelper::read_t> read = BamHelper::createReadRecord(alignments1, alignments2);
			assert(read->subalignments1.size() == alignments1.size());
			assert(read->subalignments2.size() == alignments2.size());
// 			cerr << bam_reader.getReadName() << endl;
			for (size_t i=0; i<alignments1.size(); ++i) {
				BamHelper::recalibratePhredScores(&(read->subalignments1[i]), *(alignments1[i]), *insertion_costs, *deletion_costs, softclip_open_costs, softclip_extend_costs, snp_set, phred_offset, variation_index);
			}
			for (size_t i=0; i<alignments2.size(); ++i) {
				BamHelper::recalibratePhredScores(&(read->subalignments2[i]), *(alignments2[i]), *insertion_costs, *deletion_costs, softclip_open_costs, softclip_extend_costs, snp_set, phred_offset, variation_index);
			}
			// Use read group specific insert size distribution?
			HistogramBasedDistribution* ild = 0;
			if (readgroup_wise_insert_sizes) {
				string readgroup = get_readgroup(*read, readgroups_from_names);
				insert_size_dist_map_t::const_iterator it = insert_size_dist_map.find(readgroup);
				if (it == insert_size_dist_map.end()) {
					cerr << "Error: no insert size distribution given for read group " << readgroup << endl;
					return 1;
				}
				ild = it->second;
			} else {
				ild = insert_length_distribution;
			}
			assert(ild != 0);
			auto_ptr<BamHelper::pairing_information_t> pairing_info = BamHelper::pairUpReads(*read, *ild, variation_index, max_pair_distance, distant_pairs);
			BamHelper::writeAlignments(bam_writer, *read, *pairing_info, !omit_secondary_aln, !omit_alt_cigars, simple_cigar, readgroups_from_names, strict_mapq_filter, default_readgroup_string);
		}
		bam_writer.Close();
	} catch(exception& e) {
		cerr << "Error: " << e.what() << "\n";
		return 1;
	}
	if (snp_set != 0) delete snp_set;
	if (variation_index != 0) delete variation_index;
	if (insert_length_distribution != 0) delete insert_length_distribution;
	double cpu_time = (double)(clock() - clock_start) / CLOCKS_PER_SEC;
	cerr << "Total CPU time: " << cpu_time << endl;
	return 0;
}
