#include "CBT.hh"
//#include <strstream.h>
//#include <hash_set.h>
//#include <hash_map.h>

#include "MerHashMap.hh"
#include "MerOverlapper.hh"
#include "SimpleMerHash.hh"


//#include "zipiostream.hh"

#include <sys/stat.h>


string OptSourceFileName="";
string OptQueryFileName="";
string OptOutFileName="";
string OptQueryFOFName="";


float  OptRepeatThreshold = 1;
int    OptMaxMismatchPercent = 2;
int    OptHashMerLimit = 1000;
int    OptRepeatHardLimit = -1;

int    OptBeginningSource = -1;
int    OptEndSource = -1;
int    OptBandSize = 2;

bool   OptMultipleEdgeValues = false;
bool   OptZipOutput = false;
bool   OptOriginRelative = false;
bool   OptFlipDirection = false;

int    OptBucketSize = 100000;

int    OptMaxSeed = 200;
int    OptOligoSize = 32;


int OptMinReadSize = 100;
bool OptInteriorSampling = false;
int OptSamplesPerRead = 10;
int OptSampleGap = 0;

int  OptRepeatListMinCount = 0;
string OptRepeatListName = "";


int OptKillHashSelection = 5;
long OptKillHashSize = 0;
string OptKillHashSizeStr="";
long HashSizes[]={357913931,101000777,79999987,59999999,49999991,39999943,29999947};
int OptHashThinningPrime = 1;

vector<string> OptQueryList;


const string gVersion="1.70";



//---------------------------------------------------
// * PrintVersion
//---------------------------------------------------
//
void PrintVersion()
{
  cout<<"\nVersion %s"<<gVersion<<endl;
}

//---------------------------------------------------
// * PrintHelp
//---------------------------------------------------
//
void PrintHelp()
{
printf("\n"
"\n"
"\n atlas-overlapper  -s {SourceFile} -q {QueryFile}  -o {OutFile} [{heurisitc options}]  {query2  query3  query4  ....}"
"\n"
"\n"
"\n    IMPORTANT LIMITATIONS:"
"\n"
"\n    Query read sequences can not be longer than 4095bp. (2^12)"
"\n    There can not be more than 524,000 source reads. (2^19) "
"\n"
"\n    UNPREDICTABLE behavior will result if these limits are exceeded. "
"\n"
"\n    Uses a filtration technique to compare all of the sequences "
"\n    in a FASTA file to each other, identifying all of the overlaps "
"\n    between the sequences.  Small samples are taken from each read "
"\n    and compared with the full sequence of all other reads using a fast"
"\n    exact match algorithm (hash table in current version; suffix trees "
"\n    have been used in other versions).  Repeats are filtered out, de novo,"
"\n    in order to produce physical overlaps only.   A banded alignment of all pairs "
"\n    that contain a common low-copy n-mer is performed to determine true overlap "
"\n    amounts to be reported in a graph file.  The output will be in the form: "
"\n"
"\n    SourceName    QueryName1{QuerySpan,QueryScore,LeftExtension,RightExtension,SeedCopyCount} QueryName2..."
"\n"
"\n"
"\n    A single query file can be specified with the -q option (for backward compatibility), "
"\n    but multiple query files can also be added onto the line, with no arbitrary limit. "
"\n"
"\n"
"\n    -s {SourceFile}        Name of source FASTA file. Hash will be built from "
"\n                           source. "
"\n"
"\n    -q {QueryFile}         Name of query FASTA file. Query reads will be sampled"
"\n                           and samples compared to source hash. "
"\n"
"\n    -Q {QueryFOF}          A file which lists the query files to use. Can be used in "
"\n                           conjunction with -q or with a list of queries as free arguments."
"\n                           (The purpose of this option is to allow very long query lists).  "
"\n"
"\n    -o {OutFile}           Name of output file.  This will be an adjacency list graph"
"\n                           of the overlaps for each sequence in the source file. "
"\n"
"\n    -v                     Print version and version history. "
"\n"
"\n"
"\n------ Misc       -----------------------------"
"\n"
"\n    -b {Begin with}        Number of Sequence in source to start with. (0 based). "
"\n    -e {End with}          Number of Sequence in source to end with.  "
"\n                           The b and e options are for building the hash from a large FASTA file"
"\n                           (e.g. > 50,000 reads), where the file has to be compared in chunks.  "
"\n                           Since it scans through the source until it reaches read b, this is not "
"\n                           as efficient as creating several separate source files and running the "
"\n                           program several separate times,  but it is often more convenient. "
"\n"
"\n    -Z                     gzip the output graph.  Default is not-gzipped.   Output graphs can get "
"\n                           quite large, especially now that multiple values are being reported for each "
"\n                           edge.  This option will save the graph output directly to a gzip file. "
"\n                           NOTE: Does NOT automatically tack on .gz to file name.  "
"\n"
"\n    -F                     Output format:  Flip sense of overlap extensions so they are "
"\n                           relative to the read that is origin of the edge (default relative"
"\n                           to sink of edge)."
"\n"
"\n    -f                     Output format:  Flip direction of edges to go from source read to "
"\n                           query read (default is from query to source)."
"\n"
"\n------ Heuristics ------------------------------ "
"\n"
"\n"
"\n   -P                      Prime numbered kill hash size.  A good value will be at least 1.2 times "
"\n                           the number of kill mers you have. Integer value selects one of the "
"\n                           following (5 is default):"
"\n                           "
"\n                           0  357913931   Roughly 3 GB of space. "
"\n                           1  101000777   Roughly 900MB of space. "
"\n                           2   79999987   Roughly 800 MB of space. "
"\n                           3   59999999           687 MB"
"\n                           4   49999991           429 MB"
"\n                           5   39999943           340 MB"
"\n                           6   29999947            258MB"
"\n"
"\n    -p                     Directly specify prime numbered kill hash size (overrides -P). "
"\n    -t {HashThinningPrime} This is used to thin the kill hash for efficiency. \n                           If specified, only one in that number of entries   \n                           in the imported kill hash will be stored in        \n                           memory, and only one in that number of k-mers in   \n                           reads will be considered.  The selection of k-mers \n                           is based on their sequence, not their location, so \n                           any k-mer will either be retained (if found) on    \n                           both sides or omitted on both sides.  Hash         \n                           thinning should not be combined with sampling      \n                           (-G > 1)."
"\n  "
"\n    -O {oligo size}        Size of k-mer to use for seeding banded alignments (4-32). "
"\n                           Allow the sample length to be specified.  Default 32bp."
"\n"
"\n    -B {band size}         Band size for banded alignments (Default=2, which is +/- 2 from diagonal). "
"\n                           Valid range: 1-25.  "
"\n"
"\n    -R {cutoff mers)       This is the repeat cutoff specified as a number of mers, rather than "
"\n                           as a number of standard deviations above the mean.  Overrides -r option. "
"\n                           Default is to use -r.   "
"\n"
"\n    -r {RepeatThresh}      Cutoff for repeats.  Samples occuring more than mean+r*sigma"
"\n                           are ignored as repeat samples.  This cutoff attempts to be "
"\n                           a function of the actual coverage, since the mean number of "
"\n                           occurences of a random sample will be close to the mean coverage, "
"\n                           skewed high a bit by the presence of repeats.   For low-coverage"
"\n                           data (C < 2) r may need to be 2 or 3 since sigma is likely to be "
"\n                           small.   For high-coverage data, r closer to 0 is probably "
"\n                           appropriate.   I plan to make this more sophisticated when I get"
"\n                           a chance so that the user doesn't have to think about it.   Default = 1. "
"\n"
"\n    -K {RepeatFileName}    When specified a file of known repeat mers is read in and used to filter"
"\n                           repeats.  File lists mer, a tab, and mer count on each line. K for kill list.  "
"\n"
"\n    -k {MinRepeatCont}     When reading in -K repeat file, save in RAM only those mers with count "
"\n                           >= MinRepeatCount. "
"\n"
"\n    -Y {MaxOverlapSeed}    Max seed.  Largest mer count that will be used to seed an overlap.  This is "
"\n                           analgous to the -R option, except that it is based on the mer frequencies "
"\n                           reported in RepeatFile (-K), which are globally derived.   This option works"
"\n                           before the -R option, so the -R stats will be computed on the mers that remain. "
"\n"
"\n"
"\n    -H {HashMerLimit}      This is another form of repeat cutoff.  The purpose of this limit "
"\n                           is to constrain the size of the hash.  Basically, HashMerLimit is "
"\n                           a hard limit on the number of locations that will be saved for any "
"\n                           mer.   It should be set to something just slightly larger than "
"\n                           what you expect mean+r*sigma above to be.  100 or 1000 are pretty "
"\n                           safe values in most cases.   The smaller this number, the more "
"\n                           reads can be put into each hash, and so quicker a set of jobs will go. "
"\n                           Default value is 1000. "
"\n"
"\n    -m {MaxMismatch%}      Maximum mismatch percent to accept in the algined region.  Edges which "
"\n                           reflect more than this fraction of mismatches will not be reported.  "
"\n                           This is translated into a score cutoff as follows.  Since each match is "
"\n                           +1 and each mismatch is -1 (or -2 for indel, which we ignore here), there "
"\n                           is a double penalty to a mismatch so far as the score is concerned (no "
"\n                           +1 for the match, and a -1 penalty).   So, for a particular span, the "
"\n                           score that is reflected by a x% fraction of mismatches will be "
"\n                           score=(1-(x/100))*span - (x/100)*span = (1-(2x/100))* span.   So the cutoff"
"\n                           is simply cutoff = (1-(2x/100))*span.  If score < cutoff, the edge is "
"\n                           omitted from the graph.  Default value is 2 (i.e. 2%).  "
"\n"
"\n     -M (Min Read Size)    Minimum size of reads to consider.   Default is 100bp. "
"\n     -S (SamplesPerRead)   Samples per read.  Code tries to take this many samples, but read may be too "
"\n                           short in which case it takes as much as it can. "
"\n"
"\n     -I (SampleInterior)   Samples across entire read.  Default is end sampling only, where SamplesPerRead/2 "
"\n                           samples are taken from each end of the read. End sampling typically gives you more"
"\n                           sensitivity per sample since we are looking for end-to-end matches, but there arise"
"\n                           situations where it is necessary to look at the interior of reads also. "
"\n"
"\n     -G (SampleGap)        Step between samples.  Default is now 1."
"\n"
"\n    --- Experimental options -----"
"\n "
"\n    -x  BucketSize         Set the initial hash bucket size. "
"\n"
"\n"
"\n"
"\n    Written by: James Durbin (kdurbin@bcm.tmc.edu)"
"\n");

 PrintVersion();
}


void PrintHistory(){
  /*
  PrintVersion();

 printf(" 
    Version History:  
    ----------------
    1.70    Tweaking of boundary conditions in MerOverlapper:Search*SamplesAgainstHash.

    1.69    Support for flipping direction of edges (default is query as origin, source as sink),
            for making overlap extensions relative to origin (default is relative to sink).
            SampleGap is now skip distance from start of one sample to next (default
            equal to sample size, for perfect tiling).

    1.68    Added support for small mer sizes. 

    1.66    Fixed switched order of Source/Query in output graph.  Now is correct: Source->Query{...} Query{....} 
    1.65    Fixed NX bug in Sequence3Bit.  Added user selectable kill hash size. 
    1.60    Added more efficient mer kill hash.  Added support for tracking seed frequencies.  
    1.51    1.50 is apparently wrong version.  1.51 is installation of what 1.50 was supposed to be. 
    1.50    Added support to read in a mer kill list. 
    1.42    Added additional options for sampling (SampleInterior, MinReadSize,SampleGap,SamplesPerRead)
    1.40    Added support for queries to be given as a file of query file names. 
    1.36    Fixed support for left and right extension.   
    1.34    Added support for multiple query files on a single line. 
    1.32    Added left extent, right extent, and sense to edges for graphs.  Added option 
            to save output to a gzip file.  Removed option to save graph with single value
            edges. BUG FIX:  Pervious version banded alignments for reverse-complement samples were 
            not being performed with reverse complement version of sequence (ugh).  Effectively 
            this prevented any reverse edges from being reported.   Fixed in 1.32.  

    1.30a   Improved handling of quality trim.  Only minimaly tested, so this is an alpha 
            release.   This version should handle trimmed sequence lengths correctly in 
            alignments.   

    1.21    Added option to specify repeat cutoff in Mers.   Added option to output 
            both span and score with edges in form {span,score}

    1.20    Added banded alignment of probable matches (heruistic from 1.1 removed). 

    1.10    Added some crude heuristics to ensure that overlaps are in proper orientation. 
        
    1.00    All new version of overlapper, with 2-bit mer encoding
            and good speed performance. 	 
");
*/
}

//---------------------------------------------------
// * SetupOptions
//---------------------------------------------------
//
void SetupOptions(int argc, char**argv)
{

  // Print it for the record
  for(int i = 0;i< argc;i++) cout<<argv[i]<<" ";
  cout<<endl;
  
  
  // Create parser object and define valid options
  // Options with a : require a parameter. 
  GetOption Parser(argc,argv,"p:P:t:Y:Zx:b:e:s:q:r:o:m:H:R:B:Q:hM:S:IG:K:k:v:O:Ff");

  // Handle the options...
  char theOption;
  while((theOption = Parser.NextOption()) != OPTION_EOF){
    switch(theOption){
		case 'P': OptKillHashSelection = Parser.GetIntArg();break;
		case 'p': OptKillHashSizeStr = Parser.GetStringArg();
		  OptKillHashSize = atol(OptKillHashSizeStr.c_str());
		  break;
		case 'Q': OptQueryFOFName = Parser.GetStringArg();break;
    case 't': OptHashThinningPrime = Parser.GetIntArg();break;      
		case 's': OptSourceFileName  = Parser.GetStringArg();break;
		case 'q': OptQueryFileName  = Parser.GetStringArg();break;
		case 'o': OptOutFileName = Parser.GetStringArg();break;
		case 'r': OptRepeatThreshold = Parser.GetFloatArg();break;
		case 'm': OptMaxMismatchPercent = Parser.GetIntArg();break;
		case 'Y': OptMaxSeed = Parser.GetIntArg();break;
		case 'H': OptHashMerLimit = Parser.GetIntArg();break;

		case 'b': OptBeginningSource  = Parser.GetIntArg();break;
		case 'e': OptEndSource  = Parser.GetIntArg();break;
		
		case 'B': OptBandSize  = Parser.GetIntArg();break;
		case 'R': OptRepeatHardLimit  = Parser.GetIntArg();break;

		case 'Z': OptZipOutput = true;break;

		case 'x': OptBucketSize = Parser.GetIntArg();break;
		
		  // Sampling options
		case 'M': OptMinReadSize = Parser.GetIntArg();break;
		case 'I': OptInteriorSampling = true; break;
		case 'S': OptSamplesPerRead = Parser.GetIntArg();break;
		case 'G': OptSampleGap = Parser.GetIntArg();break;

		case 'K': OptRepeatListName = Parser.GetStringArg();break;
		case 'k': OptRepeatListMinCount = Parser.GetIntArg();break;

		case 'F': OptOriginRelative = true; break;
		case 'f': OptFlipDirection = true; break;
		case 'O': OptOligoSize = Parser.GetIntArg();break;

		case 'v': PrintHistory(); exit(1);break;
    default: cout <<"Invalid option: " << theOption << endl;
      PrintHelp(); exit(1); break;
    }
  }
  if (OptSampleGap == 0) { // default equal to sample size for perfect tiling
    // Disabled!  The default is now 1.
    // OptSampleGap = OptOligoSize;
    OptSampleGap = 1;
  }
  if ((OptSampleGap > 1) && (OptHashThinningPrime > 1)){
    cout << "You cannot specify both -G and -t." << endl;
    PrintHelp();
    exit(1);
  }
  if (OptSourceFileName == ""){
	 PrintHelp();exit(1);
  }


  // Set up the Query List
  // Read from file if there is one...
  if (OptQueryFOFName != ""){
	 ReadFOF(OptQueryFOFName,OptQueryList);
	 cout<<OptQueryList.size()<<" queries found in query list. "<<endl;
  }
  // Add -q option query name
  if (OptQueryFileName != ""){
	 //cout<<"Adding "<<OptQueryFileName<<" to query list."<<endl;
	 OptQueryList.push_back(OptQueryFileName);
  }
  
  // Add query files on command line to list of query files.
  string temp = Parser.GetFreeArg();
  while(temp != ""){
	 OptQueryList.push_back(temp);
	 temp = Parser.GetFreeArg();
  }

  // Build hash from only a subset of sequence, if called for. 
  if ((OptBeginningSource == -1) || (OptEndSource == -1)){
	 cout<<"Automatic specification of begin and end sequence is temporarialy disabled."<<endl;
	 cout<<"Please specify beginning and ending sequence with -b and -e."<<endl;
	 exit(1);
  }

}

//-------------------------------------------
// * FileBad
//-------------------------------------------
//
bool FileBad(string &fname)
{
  struct stat statInfo;
  
  int status;
  status = stat(fname.c_str(),&statInfo);
  if (status == -1) {
	 cout<<fname<<" has some problem.  Unable to get status of file. *****"<<endl;
	 return(true);
  }

  // Small 40 byte files exist for some reason, but are 
  // not valid zip files.  
  if (statInfo.st_size < 100) {
  	 cout<<fname<<" size is too small to be valid.  Size="<<statInfo.st_size<<" *****"<<endl;
  	 return(true);
  }

  return(false);
}

void OpenOrQuit(string &fName,ozipstream &f)
{
  f.open(fName.c_str());
  if (f.fail()){
	 cout<<fName<<" file open failed"<<endl;
	 exit(1);
  }
}


void OpenOrQuit(string &fName,ofstream &f)
{
  f.open(fName.c_str());
  if (f.fail()){
	 cout<<fName<<" file open failed"<<endl;
	 exit(1);
  }
}

void OpenOrQuit(string &fName,ifstream &f)
{
  f.open(fName.c_str());
  if (f.fail()){
	 cout<<fName<<" file open failed"<<endl;
	 exit(1);
  }
}


//---------------------------------------------------
// * main
//---------------------------------------------------
//
int main(int argc,char **argv){

  SetupOptions(argc,argv);

	// Since this is a global value that lots of other code refers to, it 
	// should be defined as early as possible. 
	Mer::mMerSize = OptOligoSize;

  MerOverlapper Overlapper;
  Overlapper.mMaxSeed = OptMaxSeed;

  // Create the kill hash.
  if (OptKillHashSize != 0){ 
	 cout<<"Explicit hash size: "<<OptKillHashSize<<endl;
	 Overlapper.mRepeatMers.Create(OptKillHashSize,
				       OptHashThinningPrime);
  }else{ 
	 cout<<"Selected hash size: "<<HashSizes[OptKillHashSelection]<<endl;
	 Overlapper.mRepeatMers.Create(HashSizes[OptKillHashSelection],
				       OptHashThinningPrime); 
  }

  // Read the repeat list if one is given. 
  if (OptRepeatListName != ""){
	 izipstream rin;
	 if (!FileBad(OptRepeatListName)){
		rin.open(OptRepeatListName.c_str());
	 }
	 cout<<"Reading repeat mer kill list from "<<OptRepeatListName<<"..."<<endl;
	 Overlapper.ReadRepeatMers(rin,OptRepeatListMinCount);
	 cout<<Overlapper.mRepeatMers.size()<<" mers in kill set."<<endl;

	 //Overlapper.ReadRepeatMers(rin,OptRepeatListMinCount);
	 //cout<<Overlapper.mRepeatMers.size()<<" mers in kill set."<<endl;
  }

  Overlapper.mSourceHash.mMaxMerLocations = OptHashMerLimit; // Limit Mer locations to record 
  Overlapper.mSourceHash.mMaxSeed = OptMaxSeed;      // Maximum count value for mers to be used as overlap seeds. 

  // Create the source hash.  Source sequence isn't saved, but the names of 
  // the sources are placed in SourceNames. 
	izipstream fin;
  //ifstream fin;
  if (!FileBad(OptSourceFileName)){
	 fin.open(OptSourceFileName.c_str());
  }
  long StartTime;
  cout<<endl;
  cout<<"Adding "<<OptSourceFileName<<" to hash map..."<<endl;
  StartTiming(StartTime);

  // KJD I know, this is getting silly.  I'll rearrange this later...
  Overlapper.mSourceHash.AddToHashMap(fin,OptBeginningSource,OptEndSource,Overlapper.mRepeatMers);

  float elapsed = EndTiming(StartTime);
  fin.close();
  cout<<"done."<<endl;
  cout<<"NumReads       = "<<Overlapper.mSourceHash.mSourceNames.size()<<endl;
  cout<<"Distinct mers  = "<<Overlapper.mSourceHash.size()<<endl;
  cout<<"Killed mers = "<<Overlapper.mSourceHash.mKillFilteredCount<<endl;
  
  cout<<"Hash create time   = "<<elapsed<<endl;
  cout<<endl;

  StartTiming(StartTime);
  cout<<"Compute Source mer stats..."<<flush;
  // Compute the repeat cutoff threshold.  KJD Kind of crude. 
  Overlapper.mSourceHash.ComputeMerStats();
  cout<<"done.";
  elapsed = EndTiming(StartTime);
  cout<<"  Mer Stat Time = "<<elapsed<<endl;
  // Assumes compute mer stats has happened.
  double  meanMerFreq = Overlapper.mSourceHash.mMeanMerFrequency;
  double  stDevMerFreq =Overlapper.mSourceHash.mStDevMerFrequency;

  cout<<"Mean mer occurrence = "<<meanMerFreq<<endl;
  cout<<"St.Dev mer occurrence = "<<stDevMerFreq<<endl;   

  int RepeatCutoff;
  if(OptRepeatHardLimit > 0){
	 RepeatCutoff = OptRepeatHardLimit;
  }else{
	 RepeatCutoff = (int)((OptRepeatThreshold*stDevMerFreq)+meanMerFreq); 
  }

  cout<<endl;
  cout<<"Repeat Cutoff = "<<RepeatCutoff<<endl;

  // KJD Experimental
  cout<<"Remove source repeat mers from hash..."<<flush;
  StartTiming(StartTime);
  Overlapper.mSourceHash.RemoveRepeats(RepeatCutoff);
  elapsed = EndTiming(StartTime);
  cout<<"done.";
  cout<<"  Repeat Remove Time = "<<elapsed<<endl;
  cout<<"RepeatFilteredCount="<<Overlapper.mSourceHash.mRepeatFilteredCount<<endl;
  cout<<"Mean # copies of filtered repeats = "<<Overlapper.mSourceHash.mMeanFilteredRepeatOccurence<<endl;
  cout<<"Max # copies of filtered repeat = "<<Overlapper.mSourceHash.mMaxFilteredRepeatOccurrence<<endl;

  Overlapper.mStDevRepeatCutoff = OptRepeatThreshold;
  Overlapper.mMaxMismatchPercent = OptMaxMismatchPercent;
  Overlapper.mRepeatCutoff = RepeatCutoff;
  Overlapper.mMultipleEdgeValues = OptMultipleEdgeValues;
  Overlapper.SetBandSize(OptBandSize);

  Overlapper.mSamplesPerRead = OptSamplesPerRead;
  Overlapper.mMinReadSize = OptMinReadSize;
  Overlapper.mInteriorSampling = OptInteriorSampling;
  Overlapper.mSampleGap = OptSampleGap;

  Overlapper.mFlipDirection = OptFlipDirection;
  Overlapper.mOriginRelative = OptOriginRelative;

  cout<<endl;
  cout<<"Computing overlaps..."<<endl;

  ozipstream zout;
  ofstream fout;

  if (OptZipOutput){
	 OpenOrQuit(OptOutFileName,zout);
  }else{
	 OpenOrQuit(OptOutFileName,fout);
  }

  cout<<OptQueryList.size()<<" Query files to search. "<<endl;
  for(int qIdx = 0;qIdx < OptQueryList.size();qIdx++){	 

		// ifstream qlin;
		//do{
		//string line;
		//GetLine(qlin,line);
		//cout<<"Line="<<line<<endl;
		//}while(!qlin.eof());
		//qlin.close();

	 izipstream qin;
	 if (!FileBad(OptQueryList[qIdx])){
		qin.open(OptQueryList[qIdx].c_str());
		cout<<OptQueryList[qIdx]<<endl;
	 }

	 if (OptZipOutput){
		StartTiming(StartTime);
		Overlapper.ComputeAllOverlaps(qin,zout);
	 }else{
		StartTiming(StartTime);
		Overlapper.ComputeAllOverlaps(qin,fout);
	 }
	 	 
	 qin.close();
  }	
  elapsed = EndTiming(StartTime);
  cout<<"done."<<endl;
  cout<<"Query Time = "<<elapsed<<endl;

  exit(0); // Maybe this will skip all of the class destructors?? 

  if (OptZipOutput) zout.close();
  else fout.close();


}	

