#include "stdafx.h" //inside project path

//#include "filemanage.h"
#include "sequence.h"
#include "AlignSequence.h" 
#include "strutils.h"



int getnumberfromkeyboard ()
{
	int returnkeyvalue = -1; 
	cin >> returnkeyvalue; 
	/*std::cerr << "user hit key " << returnkeyvalue << endl;*/
	return returnkeyvalue;
}

int GetUserChoice ()
{
	cout <<"Please enter choice for drosophila.nt \n"; 
	cout <<"  1 = Create Library \n"; 
	cout <<"  2 = N/A \n"; 
	cout <<"  3 for perfect alignment test \n"; 
	cout <<"  4 for not perfect alignment text \n"; 
	cout <<"  5 for not perfect alignment text with deletions \n";
	cout <<"  6 for C++ AMP Metagenomics REAL DATA \n";
	cout <<"  7 for C++ AMP Metagenomics THEORETICAL DATA \n";
	cout <<"  8 for C++ AMP Performance Comparison \n";
	cout <<"  9 for C++ AMP Performance Comparison (trial1) \n";
	cout <<"  10 for C++ AMP Performance Comparison (OverClock) \n";
	cout <<" \n\nany other key to quit \n"; 
	cout <<"--->"; 

	return getnumberfromkeyboard ();
}
int GetHashCollisionTheoretical(unsigned int numberOfItemsToStore, unsigned int hashTableSize)
{
//	http://en.wikipedia.org/wiki/Birthday_problem#Generalizations
    double intermediateResult =( (double) hashTableSize - 1.0 )/ (double)hashTableSize;
    intermediateResult = pow(intermediateResult, (double) numberOfItemsToStore);
    intermediateResult = intermediateResult*hashTableSize - hashTableSize + numberOfItemsToStore;

    int collisionCount = (int)intermediateResult;
    return collisionCount;
}
int _tmain(int argc, _TCHAR* argv[])
{
	Sequence Library ("d:\\drosoph.nt");
	Sequence PerfectProbe ("D:\\TestSequenceMod1.txt");
	Sequence NotPerfectProbe ("D:\\TestsequenceMod3.txt"); 
	Sequence NotPerfectProbe2 ("D:\\TestsequenceMod4.txt"); 
	AlignSequence Aligner; 


	while (true)
	{

		switch (GetUserChoice ())
		{
		case 1:
			//Create Hash Table 
			/*Aligner.CreateLibrary (Library); */
			Library.createHashTable (); 
			cout <<"Nucleoties in Hash = " << Library.NucleotiesInHash << endl; 
			cout <<"Sequence Length = " << Library.sequenceLength << endl; 
			cout <<"Expected collisions =" << Library.expectedHashCollisions << endl; 
			cout <<"Hash Table Size = " << Library.getHashTableSize () << endl; 
			cout <<"Actual Collisions = " << Library.hashCollisions << endl; 
			cout <<"\n Library = " << Library.header << endl; 
			break; 
		case 2:
			//find probe in library 
			Library.createHashTable (); 
			break;
		case 3:
			cout << "\n\n\n Perfect Alignment Test, Expected 1600 got " << Aligner.Align (PerfectProbe, Library)<< "\n\n\n";
			cout << "\n Time to Align " << Aligner.getTimetoAlign () << "\n";
			break;
		case 4:
			cout << "\n\n\n NotPerfectProbe Alignment Test, Expected 1600 got " << Aligner.Align (NotPerfectProbe, Library)<< "\n\n\n";
			cout << "\n Time to Align " << Aligner.getTimetoAlign () << "\n";
			break;
		case 5: 
			cout << "\n\n\n NotPerfectProbe2 Alignment Test, Expected 1600 got " << Aligner.Align (NotPerfectProbe2, Library,9)<< "\n\n\n";
			cout << "\n Time to Align " << Aligner.getTimetoAlign () << "\n";
			break;
		case 6: 
			{
				Sequence libraryRef("D:\\ame_ref_Amel_4.5_chrLG1.nt");
				if(libraryRef.sequenceLength > 1200000)
					libraryRef.NucleotiesInHash = 11;
				if(libraryRef.sequenceLength > 2000000)
					libraryRef.NucleotiesInHash = 12;

				libraryRef.SetTitle("D:\\ame_ref_Amel_4.5_chrLG1.nt");
				libraryRef.createHashTableWithAMP();
				Sequence probeRef1, probeRef2, probeRef3;
				int beginRegion = libraryRef.sequenceLength /10;
				int middleRegion = libraryRef.sequenceLength /2;
				int EndRegion = libraryRef.sequenceLength - beginRegion;
				libraryRef.RetrieveSubseqence(beginRegion,75,probeRef1);
				libraryRef.RetrieveSubseqence(middleRegion,75,probeRef2);
				libraryRef.RetrieveSubseqence(EndRegion,75,probeRef3);
				int location = 0;
				location = probeRef1.ResolveAlignment(libraryRef);
				location = probeRef2.ResolveAlignment(libraryRef);
				location = probeRef3.ResolveAlignment(libraryRef);
			}
				break;
		case 7:
			{
			Sequence libraryRef(4096000,12, 1);
				libraryRef.SetTitle("4Million Library (1)");
				libraryRef.createHashTableWithAMP();
				Sequence probeRef1, probeRef2, probeRef3;
				probeRef1.SetTitle("Perfect 75 subsequence probe from Location 5000");
				libraryRef.RetrieveSubseqence(5000,75,probeRef1);
				probeRef2.SetTitle("Perfect 75 subsequence probe from Location 2M");
				libraryRef.RetrieveSubseqence(2000000,75,probeRef2);
				probeRef3.SetTitle("Perfect 75 subsequence probe from Location 4M");
				libraryRef.RetrieveSubseqence(4000000,75,probeRef3);
				int location = 0;
				location = probeRef1.ResolveAlignment(libraryRef);
				location = probeRef2.ResolveAlignment(libraryRef);
				location = probeRef3.ResolveAlignment(libraryRef);
			}
			break;
		case 8:
			//hash table performance testing
			{
				ofstream out("D:\\bioresults6.csv");
				out << "Library Size, NucleotidesInHash, HastTableSize,TestRun, Collisions Expected, Collisions Actual,HashTimeNormal, HashTimeAMP, ImprovementFactor\n" << endl;

				for(int libsize = 500; libsize < 6000000; libsize*=2)
				{
					for(int nucleotideHashLen = 4; nucleotideHashLen < 13; nucleotideHashLen++)
					{
						//Skip any trials where we loose too much to collisions
						unsigned int hastTableSize =  pow (4,nucleotideHashLen);
						unsigned int collisions = GetHashCollisionTheoretical(libsize,hastTableSize);

						//don't allow more than 20% of the library to be lost due to collisions

						double collisionLossPercentage = (collisions* 100 / libsize);
						if(collisionLossPercentage > 20)
							continue;

						//don't let the random likelihood of a random 'hash hit' exceed 25%
						double hashTableFillPercentage = (libsize - collisions)*100/hastTableSize;
						if(hashTableFillPercentage > 25)
							continue;


						for (int testnumber = 0; testnumber < 2; testnumber++)
						{
							
							Sequence library(libsize,nucleotideHashLen, testnumber+1);
							library.createHashTable();
							// create normal hash and store times
							double normalHashTime = library.timeToCreateHash;
							// create AMP hash and store times
							library.createHashTableWithAMP();
							double ampHashTime = library.timeToCreateHash;

							//compute hash speed up
							double percentImproved = normalHashTime/ampHashTime;
							if(testnumber == 0)
								continue;

							 

							out << libsize << ",";
							out << nucleotideHashLen << ",";
							out << library.getHashTableSize() << ",";
							out << testnumber << ",";
							out << library.GetHashCollisionTheoretical(libsize,library.getHashTableSize()) << ",";
							out << library.GetActualHashCollsions() << ",";
							out << normalHashTime << ",";
							out << ampHashTime << ",";
							out << percentImproved << "\n";

							cout << "Test " << testnumber << " libsize=" << libsize << " nucleotideHashLen=" 
								<< nucleotideHashLen << " % improve=" << percentImproved << endl;
						}


					}


				}
				out.close();
			}

			break;
		case 9:
			//hash table performance testing
			{
				
							
				ofstream out("D:\\biosearch7.csv");
				out << "Library Size, NucleotidesInHash, HastTableSize,TestRun, Collisions Expected, Collisions Actual,HashTimeNormal, HashTimeAMP, ImprovementFactor\n" << endl;

				for(int libsize = 500; libsize < 6000000; libsize*=2)
				{
					for(int nucleotideHashLen = 4; nucleotideHashLen < 13; nucleotideHashLen++)
					{
						//Skip any trials where we loose too much to collisions
						unsigned int hastTableSize =  pow (4,nucleotideHashLen);
						unsigned int collisions = GetHashCollisionTheoretical(libsize,hastTableSize);

						//don't allow more than 20% of the library to be lost due to collisions

						double collisionLossPercentage = (collisions* 100 / libsize);
						if(collisionLossPercentage > 20)
							continue;

						//don't let the random likelihood of a random 'hash hit' exceed 25%
						double hashTableFillPercentage = (libsize - collisions)*100/hastTableSize;
						if(hashTableFillPercentage > 25)
							continue;


						for (int testnumber = 0; testnumber < 2; testnumber++)
						{
							
							Sequence library(libsize,nucleotideHashLen, testnumber+1);
							library.createHashTable();
							// create normal hash and store times
							double normalHashTime = library.timeToCreateHash;
							// create AMP hash and store times
							library.createHashTableWithAMP();
							double ampHashTime = library.timeToCreateHash;

							//compute hash speed up
							double percentImproved = normalHashTime/ampHashTime;
							if(testnumber == 0)
								continue;

							 

							out << libsize << ",";
							out << nucleotideHashLen << ",";
							out << library.getHashTableSize() << ",";
							out << testnumber << ",";
							out << library.GetHashCollisionTheoretical(libsize,library.getHashTableSize()) << ",";
							out << library.GetActualHashCollsions() << ",";
							out << normalHashTime << ",";
							out << ampHashTime << ",";
							out << percentImproved << "\n";

							cout << "Test " << testnumber << " libsize=" << libsize << " nucleotideHashLen=" 
								<< nucleotideHashLen << " % improve=" << percentImproved << endl;
						}


					}


				}
				out.close();
			}

			case 10:
			//hash table performance testing
			{
				
							
				ofstream out("D:\\biosearch8.csv");
				out << "Library Size, NucleotidesInHash, HastTableSize,TestRun, Collisions Expected, Collisions Actual,HashTimeNormal, HashTimeAMP, ImprovementFactor\n" << endl;

				for(int libsize = 500; libsize < 6000000; libsize*=2)
				{
					for(int nucleotideHashLen = 4; nucleotideHashLen < 13; nucleotideHashLen++)
					{
						//Skip any trials where we loose too much to collisions
						unsigned int hastTableSize =  pow (4,nucleotideHashLen);
						unsigned int collisions = GetHashCollisionTheoretical(libsize,hastTableSize);

						//don't allow more than 20% of the library to be lost due to collisions

						double collisionLossPercentage = (collisions* 100 / libsize);
						if(collisionLossPercentage > 20)
							continue;

						//don't let the random likelihood of a random 'hash hit' exceed 25%
						double hashTableFillPercentage = (libsize - collisions)*100/hastTableSize;
						if(hashTableFillPercentage > 25)
							continue;


						for (int testnumber = 0; testnumber < 2; testnumber++)
						{
							
							Sequence library(libsize,nucleotideHashLen, testnumber+1);
							library.createHashTable();
							// create normal hash and store times
							double normalHashTime = library.timeToCreateHash;
							// create AMP hash and store times
							library.createHashTableWithAMP();
							double ampHashTime = library.timeToCreateHash;

							//compute hash speed up
							double percentImproved = normalHashTime/ampHashTime;
							if(testnumber == 0)
								continue;

							 

							out << libsize << ",";
							out << nucleotideHashLen << ",";
							out << library.getHashTableSize() << ",";
							out << testnumber << ",";
							out << library.GetHashCollisionTheoretical(libsize,library.getHashTableSize()) << ",";
							out << library.GetActualHashCollsions() << ",";
							out << normalHashTime << ",";
							out << ampHashTime << ",";
							out << percentImproved << "\n";

							cout << "Test " << testnumber << " libsize=" << libsize << " nucleotideHashLen=" 
								<< nucleotideHashLen << " % improve=" << percentImproved << endl;
						}


					}


				}
				out.close();
			}

			break;
		default: 
			exit (0);
		}

	}



	return 0;
}