/*
You have a file with millions of lines of data. 
Only two lines are identical;
the rest are all unique. 
Each line is so long that it may not even fit in memory. 
What is the most efficient solution for finding the identical lines? 
*/

#include "junix.h"
#include <errno.h>
#include <boost/functional/hash.hpp>
using namespace std;

typedef std::vector<off_t> CHAIN; 
typedef std::vector<CHAIN> HASH_TABLE;

void jfind(const char* file) {
	const uint64_t prime = 16715999;
	HASH_TABLE hashtable(prime);

	FILE *fp=fopen(file,"r");
	if(!fp) {
		perror("fopen");
		exit(EXIT_FAILURE);
	}

	const int sz = 8196;
	char buf[sz];
	boost::hash<const char*> hasher;
	off_t offset = 0;
	while (fgets(buf,sz,fp)!=NULL) {
		buf[sz-1]='\0';
		uint64_t code = hasher(buf);
		code %= prime;
		hashtable[code].push_back(offset);
		offset = ftell(fp);
	}

	for (int i=0;i<prime;i++) 
		if (hashtable[i].size() >= 2) 
			for (int j=0;j<hashtable[i].size();j++) {
				typedef std::vector<std::string> VS;
				VS vs;
				off_t off = hashtable[i][j];
				fseek(fp,off,SEEK_SET);
				if (fgets(buf,sz,fp)!=NULL) 
					vs.push_back(std::string(buf));
				sort(vs.begin(),vs.end());
				VS::iterator pos;
				pos = std::adjacent_find(vs.begin(),vs.end());
				if (pos != vs.end()) {
					printf("%s\n",pos->c_str());
				}
			}

}

int main(int argc, char **argv)
{
	jfind("BIGTXT");
}
