#!/usr/bin/env ruby

	NUM_OF_PAGES = 1
	NUM_OF_SEARCHS = 2
	P = 1211

	require 'readers'
	
	def init()
		textfile = "gpl.txt"
		textfile2 = "small_gpl.txt"
		@bsize = 5
		@table_size = 357
		@content = Array.new(0) # Array of elements [w, pos]
		@content_2ndfile = Array.new(0)
		@indexTable = Array.new(@table_size) {Array.new(0)}
		@expTable = Array.new(0)
		@last_fbvalue = 0 # value of first word hash of the block
		@last_hashvalue = 0 # value of the last block hash
		## Original file ##
		i = 0
		init_expTable()
		puts "== using module Readers =="
		begin
			text = Readers::get_text(textfile)
			@content = mysplit(text) 
		rescue ReaderError => e
			puts e
		end			
		
		puts "-- first --"
		while i < @content.length-1 
			wlist = get_words_master(i, @bsize)
			hashValue = blockhash(wlist)  # i is the index of the first word in @content
			@indexTable[hashValue] << i 
			puts "#{wlist} ---> index #{@content[i][1]} hashValue -> #{hashValue}"
			i = i+@bsize
		end # while
		
		## Second file ##
		i = 0
		puts "-- second --"
		begin
			text = Readers::get_text(textfile2)
			@content_2ndfile = mysplit(text) 
		rescue ReaderError => e
			puts e
		end		
		search_overlaps()
		puts "== end =="
	end #init
	
	def init_expTable()
		for i in 0...@bsize
			@expTable << (P**i) % @table_size
		end
	end
	
	def blockhash(a)
		hasharray = Array.new(0)
		sum = 0
		i = @bsize-1
		for x in a
			hasharray << (x.hash * @expTable[i]) % @table_size
			i = i-1
		end
		hasharray.each {|elem| sum = sum + elem}
		@last_hashvalue = sum % @table_size
		@last_fbvalue = hasharray[0] # farlo solo se non master document
		return (sum % @table_size) # return hash of block
	end #blockhash
	
	# blockhash using the Bentley Ilroy algorithm
	# a: word list
	def blockhash_Bentley(a)
		hasharray = Array.new(0)
		temp = 0
		temp = ((@last_hashvalue - @last_fbvalue) * P)+(a[@bsize-1].hash)
		@last_hashvalue = temp % @table_size
		@last_fbvalue = (((a[0].hash) * @expTable[@bsize-1])) % @table_size
		return (temp % @table_size) # return hash of block
	end # blockhash_Bentley

	# serach overlap between a and b --> array of [w, pos]
	def search_overlaps()
		flag = 0 # if zero call blockhash else call Bentley McIlroy algorithm
		i = 0
		
		while i < @content_2ndfile.length
			wlist = get_words(i, @bsize) # wlist contains the block
			return if (wlist.size < 5)
			if (i == 0) || (flag == 0)
				index = blockhash(wlist)
			else
				index = blockhash_Bentley(wlist)
			end #if
			if (search_hashValue(index) == true)
				for y in 0...@indexTable[index].size
					if (block_control(wlist, @indexTable[index][y], i))
						size = @extended_index["end_copy"] - @extended_index["start_copy"]
						puts "!!== block of #{size} words found ==!!" 
						ext_list = get_words(@extended_index["start_copy"], size)
						puts ext_list
						#puts ext_list
						i = i+size
						flag = 0
					else
						puts "== block not found =="
						puts wlist
						i = i+1
						flag = 1
					end #if
					#puts "-- found #{y+1} occurrence(s) of block ->"
					
				end #for
			else
				puts "== block not found =="
				puts wlist
				i = i+1
				flag = 1
			end #if
			
		end #while
	end #search
	
	# control if the founded block hash is the same block 
	# return the size of the founded string
	# index_first: index of the first word block in @content
	# index_second: index of the first word block in @content_2ndfile
	def block_control(wlist, index_first, index_second)
			wlist_master = get_words_master(index_first, @bsize) # wlist contains the block
			if (wlist_master == wlist)
				extend_block(wlist_master,index_first, index_second)
				return true
			end #if
			return false # if the founded block it's not the same return false (collision)
	end
	
	# i: index in @content
	# j: index in @content_2ndfile
	# usare slo gli indici
	def extend_block(wlist_master, i, j)
		extension_i = i-1
		extension_j = j-1
		@extended_index = {} # hash with key "start_master", "start_copy", "end_master" and "end_copy"
		# left extension
		while (@content[extension_i][0] == @content_2ndfile[extension_j][0])
			extension_i -= 1
			extension_j -= 1
			break if (extension_i < 0) || (extension_j < 0)
		end  #while
		@extended_index["start_master"] = extension_i+1
		@extended_index["start_copy"] = extension_j+1
		# right extension
		extension_i = i+@bsize
		extension_j = j+@bsize
		while (@content[extension_i][0] == @content_2ndfile[extension_j][0])
			extension_i += 1
			extension_j += 1
			break if (extension_i > @content.size-1) || (extension_j > @content_2ndfile.size-1)
		end #while
		@extended_index["end_master"] = extension_i
		@extended_index["end_copy"] = extension_j
		if extension_i > i+@bsize || @extended_index["start_master"] < i
			puts "< extension done >"
		end
	end # extend_block
	
	# convert accented vowels in place
	def remove_accent!(s)
		s.gsub!(/[àéèìòù]/) do |c|
			case c
				when  /à/: "a"
				when  /ì/: "i"
				when  /ò/: "o"
				when  /ù/: "u"
				else       "e"
			end
		end
		return s
	end

	# parse s into a sequence of words returning the list of words 
	# together with their starting position in the input
	def mysplit(s,wlimit=3)
		wlist = []
		word_def = /[[:alpha:]|àèéìòù]+/ # regex
		s.scan(word_def) do |w| 
			if w.size >= wlimit
				wpos = $`.size          # starting position in s of w
				remove_accent!(w)       # convert accented chars in place
				w.downcase!             # convert case
				wlist << [w, wpos]      # save in list
			end
		end
		wlist
	end

	def get_words(n, k)
		wordlist = []
		for i in n...n+k
			begin
				wordlist << @content_2ndfile[i][0]
			rescue NoMethodError
				return wordlist
			end #rescue
		end
		return wordlist
	end #get_words
	
	def get_words_master(n, k)
		wordlist = []
		for i in n...n+k
			begin
				wordlist << @content[i][0]
			rescue NoMethodError
				return wordlist
			end #rescue
		end
		return wordlist
	end #get_words

	def search_hashValue(value)
		return @indexTable[value][0] != nil
	end #search hash value
init()

=begin	
	Da implementare: 
=end
