# !/usr/bin/env python
# 
# patchchecker.py
# 
#  This file contains functions for recognizing and checking for the match of a patch
from patch import *
import _pg, pg
import datetime
import re
import time
import os
import popen2
import sys
#import threading, copy
import pickle, pickler
import hotshot, hotshot.stats # for profiling

def extractPatchContext(body):
	"takes in an email and returns a list of (context) patch objects contained therein"
	patches = [] # instantiate return list
	
	patchHasContextDiff = False # we set this true if patch has unified patches, otherwise won't waste time parsing it

	contextHunkFirst = re.compile("\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$")
	
	contextHunkSecond = re.compile("\*\*\* [0-9,]+ \*\*\*\*$")
	contextHunkSecondA = re.compile("\*\*\* [0-9]+,[0-9]+ \*\*\*\*$")
	contextHunkSecondB = re.compile("\*\*\* [0-9]+ \*\*\*\*$")
	
	contextHunkMiddle = re.compile("--- [0-9,]+ ----$")
	contextHunkMiddleA = re.compile("--- [0-9]+,[0-9]+ ----$")
	contextHunkMiddleB = re.compile("--- [0-9]+ ----$")
	
	contextHeaderStart = re.compile("\*\*\*\s\S+\s+[^-*]")# *** filename whilespace nonwhilespace(filename) whitespace [anything except - and *] (prevents catching headers and some hunk headers)
	contextHeaderEnd = re.compile("---\s\S+\s+[^-]")
	contextHunkLine = re.compile("  |! |\+ |- |\t") # catch any normal hunk line
	# logic: valid patch lines start with: ' ' or + or - or [tab]

	emailLines = body.split('\n')
	for line in emailLines:
		if contextHunkSecond.match(line):
			patchHasContextDiff = True
			continue
		
	inEmail, headerStart, headerStartWrap, headerEnd, headerEndWrap, hunkFirst, hunkSecond, hunkMiddle, inFirstHalf, inSecondHalf = range(10)
	state = inEmail	
	line = -1
	
	# working instance of patch
	patch = Patch()
	
	debug = open("contextlog", 'w')
	if patchHasContextDiff:
		while(line + 1 < len(emailLines)):
			# print state, emailLines[line+1]
			line += 1
			if state == inEmail:
				if contextHeaderStart.match(emailLines[line]):
					# found start of patch
					state = headerStart
					continue
			elif state == inFirstHalf:
				
				if contextHeaderStart.match(emailLines[line]):
					# found start of new patch
					state = headerStart
					patches.append(patch)
					patch = Patch()
					continue
				elif contextHunkMiddle.match(emailLines[line]):
					state = hunkMiddle
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				# this can happen if there are no lines substracted or changed, only added, so the secondhalf of hunk is skipped
				elif contextHunkFirst.match(emailLines[line]):
					state = hunkFirst
					patch.body = patch.body + '\n' +emailLines[line]
					continue
				elif contextHunkLine.match(emailLines[line]) or emailLines[line] == '':
					patch.body = patch.body + '\n' + emailLines[line]
					oldLines -= 1
					continue
				elif oldLines > 0:
					# we can assume unrecognized line here is a wrap case
					patch.body = patch.body + ' ' + emailLines[line]
					continue
				elif oldLines == 0:
					# this is the case of a unrecognized line AFTER all the expected lines. Its not a hunkMiddle line
					# so unless the patch got cut off, lets assume its a wrap case. However, only do this once to avoid
					# taking in a whole bunch of email into the patch
					oldLines -= 1
					patch.body = patch.body + ' ' + emailLines[line]
					continue
				else:
					# unexpected case...
					state = inEmail
					p = Patch()
					continue
				
			elif state == inSecondHalf:
				if contextHeaderStart.match(emailLines[line]):
					# found start of patch
					state = headerStart
					# package this one up
					patches.append(patch)
					patch = Patch()
					continue
				elif contextHunkFirst.match(emailLines[line]):
					state = hunkFirst
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				elif contextHunkLine.match(emailLines[line]) or emailLines[line] == '':
					patch.body = patch.body + '\n' + emailLines[line]
					newLines -= 1
					continue
				elif newLines > 1:
					# bad line assume wrap
					patch.body = patch.body + ' ' + emailLines[line]
					continue
				else:
					# fell into email. Note we can't wrap the last line, since often patches are followed by non patch lines, but they should not be unwrapped. 
					
					
					patches.append(patch)
					patch = Patch()
					state = inEmail
					continue
				
			elif state == headerStart:
				patch.format = "context"

				# info we want is one line back by now
				path = re.search("\s\S+\s", emailLines[line-1])
				path = re.sub('\s', '', path.group())
				patch.sourceDirectory = re.sub('[^/]*$','', path) # just get dir, erase all after last '/'
				patch.sourceFileName = path.split('/')[-1] # parse out filename, independant of path
				
				patch.body = emailLines[line-1]
				
				if contextHeaderEnd.match(emailLines[line]):
					state = headerEnd
					continue
				else:
					# start line must have wrapped
					state = headerStartWrap
					continue
			elif state == headerStartWrap:
				patch.body = patch.body + ' ' + emailLines[line-1]
				if contextHeaderEnd.match(emailLines[line]):
					# continue to next part of header
					state = headerEnd
					continue
			elif state == headerEnd:
				# info we want is one line back by now
				path = re.search("\s\S+\s", emailLines[line-1])
				path = re.sub('\s', '', path.group())
				patch.targetDirectory = re.sub('[^/]*$','', path) # just get dir, erase all after last '/'
				patch.targetFileName = path.split('/')[-1] # parse out filename, independant of path
				
				patch.body = patch.body + '\n' + emailLines[line-1]
				
				if contextHunkFirst.match(emailLines[line]):
					patch.body = patch.body + '\n' + emailLines[line]
					state = hunkFirst
					continue
				else:
					# second line of header wrapped, record
					state = headerEndWrap
					continue
					
			elif state == headerEndWrap:
				# record 2nd target file here
				patch.body = patch.body + ' ' + emailLines[line-1]
				if contextHunkFirst.match(emailLines[line]):
					state = hunkFirst
					patch.body = patch.body + '\n' + emailLines[line]
					continue
			elif state == hunkFirst:
				if contextHunkSecond.match(emailLines[line]):
					state = hunkSecond
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				else:
					print "context parser error: didn't find second line in hunk header", line
					print "There was probably code after the hunk header bit, one the same line"
					print emailLines[line]
					patch = Patch()
					state = inEmail
					continue
			elif state == hunkSecond:
				# want info about size of hunk in old file
				
				if contextHunkSecondA.match(emailLines[line-1]):
					data = re.split('[\s,]', emailLines[line-1]) # re.split('[\s,]', '*** 23,34 ****) = ['***', '23', '34', '****']
					oldLines = int(data[2]) - int(data[1]) + 1
				elif contextHunkSecondB.match(emailLines[line-1]):
					oldLines = 0
				else:
					print "error in hunk format! line:", line
					state=inEmail
					patch = Patch()
					continue
				if contextHeaderStart.match(emailLines[line]):
					# found start of patch
					state = headerStart
					
					patches.append(patch)
					patch = Patch()
					continue
				elif contextHunkMiddle.match(emailLines[line]):
					state = hunkMiddle
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				elif contextHunkLine.match(emailLines[line]) or emailLines[line] == '':
					state = inFirstHalf
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				else:
					print "context parser warning: bad line after 2nd line of hunk header", line
					print emailLines[line]
					# print body
					state = inEmail
					patch = Patch()
					
			elif state == hunkMiddle:
				# want info about size of hunk in new file
				if contextHunkMiddleA.match(emailLines[line-1]):
					data = re.split('[\s,]', emailLines[line-1]) # re.split('[\s,]', '--- 23,34 ----) = ['---', '23', '34', '----']
					newLines = int(data[2]) - int(data[1]) + 1
				elif contextHunkMiddleB.match(emailLines[line-1]):
					newLines = 0
				else:
					print "error in hunk format! line:", line
					state=inEmail
					patch = Patch()
					continue
				if contextHeaderStart.match(emailLines[line]):
					# found start of patch
					state = headerStart
					
					patches.append(patch)
					patch = Patch()
					continue
				elif contextHunkFirst.match(emailLines[line]):
					state = hunkFirst
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				elif contextHunkLine.match(emailLines[line]) or emailLines[line] == '':
					state = inSecondHalf
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				else:
					patches.append(patch)
					patch = Patch()
					state = inEmail
					continue
		# if we reach end of file and we have a targetfile name, just throw the patch in and see if it works
		if patch.targetFileName != '':
			patches.append(patch)	
	return patches
	

def extractPatchUnified(body):
	"takes in an email and returns a list of (unified) patch objects contained therein"
	patches = [] # instantiate return list
	
	patchHasUnifiedDiff = False # we set this true if patch has unified patches, otherwise won't waste time parsing it

	unifiedHunkStart = re.compile("@{2,2} -[0-9,]+ \+[0-9,]+ @{2,2}")
	
	unifiedHeaderStart = re.compile("---\s\S+\s+[^-]")# --- filename (date or any string which isn't lead by -, this stops from parsing some condensed hunk headers) 
	unifiedHeaderEnd = re.compile("\+\+\+\s\S+\s+[^-]")
	
	unifiedHunkLine = re.compile(" |\+|-|\t") # catch any patch line
	# logic: valid patch lines start with: ' ' or + or - or [tab]
	

	emailLines = body.split('\n')
	for line in emailLines:
		if unifiedHunkStart.match(line):
			patchHasUnifiedDiff = True
			continue
	
	inEmail, headerStart, headerStartWrap, headerEnd, headerEndWrap, hunkHeader, hunkBody  = range(7)
	state = inEmail
	line = 0
	
	patch = Patch()
	newLines = oldLines = 0
	
	if patchHasUnifiedDiff:
		while(line + 1 < len(emailLines)):
			line += 1
			# print state, emailLines[line-1]
			if state == inEmail:
				if unifiedHeaderStart.match(emailLines[line]):
					# we're on a unified diff header
					state = headerStart
					continue
			elif state == headerStart:
				patch.format = "unified"
				# info we want is one line back
				path = re.search("\s\S+\s", emailLines[line-1])
				path = re.sub('\s', '', path.group())
				patch.sourceDirectory = re.sub('[^/]*$','', path) # just get dir, erase all after last '/'
				patch.sourceFileName = path.split('/')[-1] # parse out filename, independant of path
				
				patch.body = emailLines[line-1]
					
				if unifiedHeaderEnd.match(emailLines[line]):
					state = headerEnd
					continue
				else:
					# start line must have wrapped
					state = headerStartWrap
					continue
			elif state == headerStartWrap:
				patch.body = patch.body + ' ' + emailLines[line-1]
				if unifiedHeaderEnd.match(emailLines[line]):
					# continue to next part of header
					state = headerEnd
					patch.body = patch.body + '\n' + emailLines[line]
					continue
			elif state == headerEnd:
				# info we want is one line back by now
				path = re.search("\s\S+\s", emailLines[line-1])
				path = re.sub('\s', '', path.group())
				patch.targetDirectory = re.sub('[^/]*$','', path) # just get dir, erase all after last '/'
				patch.targetFileName = path.split('/')[-1] # parse out filename, independant of path
				
				patch.body = patch.body + '\n' + emailLines[line-1]
				
				if unifiedHunkStart.match(emailLines[line]):
					patch.body = patch.body + '\n' + emailLines[line]
					state = hunkHeader
					continue
				else:
					# second line of header wrapped, record
					state = headerEndWrap
					continue
					
			elif state == headerEndWrap:
				# unwrap the line, add implied space
				patch.body = patch.body + ' ' + emailLines[line-1]
				if unifiedHunkStart.match(emailLines[line]):
					state = hunkHeader
					patch.body = patch.body + '\n' + emailLines[line]
					continue
				# else loop in this state, unwrapping each time, untill we hit the first patch hunk header line. This might be too naive. 
				
			elif state == hunkHeader:
				# data is one line behind where we are
				hunkData = re.split('[, ]', emailLines[line-1])
				# turns @@ -16,3 +16,4 @@ into ['@@', '-16', '3', '+16', '4', '@@']
				# the problem is that -16,3 will just be 16 sometimes if the version is just one line
				if re.match('\+', hunkData[2]):
					oldLines = 1
					if(re.match('@@', hunkData[3])):
						newLines = 1
					else:
						newLines = int(hunkData[3])
				else:
					oldLines = int(hunkData[2])
					if(re.match('@@', hunkData[4])):
						newLines = 1
					else:
						newLines = int(hunkData[4])
				
				if unifiedHunkLine.match(emailLines[line]) or emailLines[line] == '':
					# normal diff line
					patch.body = patch.body + '\n' + emailLines[line] # record line
					state = hunkBody
				elif unifiedHeaderStart.match(emailLines[line]):
					patch.body = patch.body + '\n' + emailLines[line] # record line
					# we're on a unified diff header
					state = headerStart
				else:
					# guess we're on an invalid line
					p = Patch()
					state = inEmail
			elif state == hunkBody:
				# count lines ... always check the line we came from
				
				if(emailLines[line-1] == ''): # this prevents index error below if line is ''
					# normal line
					newLines -= 1
					oldLines -= 1
				elif emailLines[line-1][0] == '+':
					# this counts as a line in the new file
					newLines -= 1
				elif emailLines[line-1][0] == '-':
					# line in old version
					oldLines -= 1
				else:
					# normal line
					newLines -= 1
					oldLines -= 1
				
					
				
				if unifiedHeaderStart.match(emailLines[line]):
					# we're on a unified diff header
					state = headerStart
					# package what we got
					patches.append(patch)
					patch = Patch()
					continue
				elif unifiedHunkStart.match(emailLines[line]):
					# got into hunk header
					patch.body = patch.body + '\n' + emailLines[line] # record line
					state = hunkHeader
					
				elif unifiedHunkLine.match(emailLines[line]) or emailLines[line] == '':

					patch.body = patch.body + '\n' + emailLines[line] # record line
					
				else:
					# print oldLines, newLines
					# invalid line!
					
					

					if(newLines > 0 or oldLines > 0):
						# Since we still have lines to go in this hunk, lets try and wrap it
						patch.body = patch.body + ' ' + emailLines[line]
						# since this was not a patch line, we need to NOT count this as a line. New/old line counters will have been both decramented wrongly
						newLines += 1
						oldLines += 1
					else:
						# invalid line but we have accounted for all the hunk lines, so we must be in the email now
						state = inEmail
						# package what we got..
						patches.append(patch)
						patch = Patch()
						continue
		if patch.targetFileName != '':
			patches.append(patch)
	return patches
		


def findPatches(db):
	"""extract patches from database and put them into a list of patches"""
	s = """select messageid, datetime, body from messages order by datetime""" # datetime ordering is important. If table is
	# constrained so that we don't have duplicate fileid's referenced, we want the first patch for that filename to be stored
	
	patches = []
	count = 0
	
	for (id, date, body) in db.query(s).getresult():
		count += 1
		if (count % 1000) == 0:
			print "scanned %i messages, there have been %i patches found" % (count, len(patches))
						
			
		parsedPatches = extractPatchContext(body)
		parsedPatches.extend(extractPatchUnified(body))
		
		for p in parsedPatches:
			p.date = datetime.date(int(date[:4]), int(date[5:7]), int(date[8:10]))
			p.emailbody = body
			p.messageID = id
			patches.append(p)
	return patches
	
	

def _findPossibleSourceFiles(db,patch):

	# Alex G's comment:
	# the diff files don't have directory information all the time, 
	# so we could get a name that fits several extended filenames
	# i'm being very methodical here. Each patch has 2 filenames, 
	# one is usually .old or .orig or something. I maintain
	# both filenames mentioned in the patch and try to find similar 
	# results to both
	
	# Alex B's comment
	# So what's happening here is that someone will submit a patch for a file,
	# main.py, for instance.  The thing is, it could be in 5 directories:
	# hello/main.py goodbye/main.py friends/main.py, and we won't know which
	# main.py it's really for.  So we need to make a list of all these possible
	# main.py's and see if it successfully patched any of them.
	
	# THIS FUNCTION MAY BE DEPRECIATED PENDING THE LINES OF CODE NOW PLACED ABOVE WHERE IT'S CALLED
	
	results = set([])
	s = "select distinct filename from files where '%s' = substring(filename from '[^/]*$')" % patch.sourceFileName.encode("string_escape")
#	print s
	pgo = db.query(s)
	results.update(pgo.getresult())
#	print results
	s = "select distinct filename from files where '%s' = substring(filename from '[^/]*$')" % patch.targetFileName.encode("string_escape")
#	print s
	pgo = db.query(s)
	# union results into set
#	print pgo.getresult()
	results.update(pgo.getresult())	
#	print results
	return results
		
def _findRevDateRange(db, patch, timeDeltaBack, timeDeltaForward):
	# DEPENDANCY on the threads table - a table linking all the messages in the same thread. In this case we used it to check for the min/max date in the thread to 
	# avoid being tricked by a false date on the submission email. Of course we can still be tricked, but we error on the side of caution. 
	# find the minimum and maximum datetimes in the thread continaining the message we are examining. If you don't have the threads table, hack past this and just use
	#  the date of the patch. 
	minDate = db.query("select min(date) from ((select date(datetime) from threads, messages where threads.messageid = '%s' and threads.responseid = messages.messageid) UNION (select date(datetime) from threads, messages where threads.responseid = '%s' and threads.messageid = messages.messageid) UNION (select date(datetime) from messages where messageid = '%s')) as A" %(patch.messageID, patch.messageID, patch.messageID) ).getresult()[0][0]
	maxDate = db.query("select max(date) from ((select date(datetime) from threads, messages where threads.messageid = '%s' and threads.responseid = messages.messageid) UNION (select date(datetime) from threads, messages where threads.responseid = '%s' and threads.messageid = messages.messageid) UNION (select date(datetime) from messages where messageid = '%s')) as A" %(patch.messageID, patch.messageID, patch.messageID) ).getresult()[0][0]
	
	earlyDate = datetime.date(int(minDate[:4]), int(minDate[5:7]), int(minDate[8:10]))
	lateDate = datetime.date(int(maxDate[:4]), int(maxDate[5:7]), int(maxDate[8:10]))

	earlyDate -= datetime.timedelta(timeDeltaBack)
	lateDate +=  datetime.timedelta(timeDeltaForward) # gogo magic numbers (that number is in days)
	
	return (earlyDate, lateDate)
		
def _setupPatchTables(db):
		# --- Sets up Patch tables for clean run (db) ---
	# XXX might want to parameterize location of sql files
	
	try:
		print "cleaning Patch table..."
		dropTables = open("../sql/dropPatchTables.sql")
		db.query(dropTables.read())
		print "...OK!"
	except _pg.ProgrammingError:
			print "could not drop tables"
	try:
		print "creating patch table..."
		createTables = open("../sql/createPatchTables.sql")
		db.query(createTables.read())
		print "...OK!"
	except _pg.ProgrammingError:
			print "could not create patch tables"
	# ------

	
def _findPossibleTargets(db, results, patch, earlyDate, lateDate):
	# This function all the possible revisions of a file that this patch
	# could have been applied to.
	# All of the possible targets are stored in patch.possibleTargets
	# (and thus not returned)
	 
	
	# loop over each possible filename
	for filename in results:
#		print "filename in results is", filename
		if len(filename)  < 1:
			continue
		
		# print "Checking for changes to file:", filename, "in temporal proximity to the patch posting..."
		s = "select fileid, filename, revision from files where filename = '" + filename +"' and datetime > '" + earlyDate.isoformat() + "' and datetime < '" + lateDate.isoformat() + "' order by datetime"
		#print s
		pgo = db.query(s)
		if len(pgo.getresult())>0:
#			print "hits from sql for:", filename[0], "--",pgo.getresult()
			patch.possibleTargets.extend(pgo.getresult())

def _showstatus(percent, patches, patchCount, ltime):
	newpercent=float(patchCount)/float(len(patches))
	ntime=time.localtime()
	if(int (newpercent)-percent):
		timePerPercent=ntime-ltime
		print "%d\% done, about %d:%d left" % newpercent, timePerPercent/60, timePerPercent%60
		ltime=ntime
		
	return (int(newpercent), ltime)
	
def _buildFilenameHash(db):
	#build a list of filenames
	dbfilenames = db.query("select distinct filename from files").getresult()
	#index a list of full filenames, keyed by the filename sans dir structure
	filenamesHash={}
	for dirPlusName in dbfilenames:
		dirPlusName=dirPlusName[0] #it's in a tuple with nothing else.
		justName = re.sub(".*/","",dirPlusName)
		if filenamesHash.has_key(justName):
			filenamesHash[justName].append(dirPlusName)
		else:
			filenamesHash[justName] = [dirPlusName]
			
	return filenamesHash
		
def _getPossibleFilenameSet(patch, filenamesHash):
	results=set([])
	if(filenamesHash.has_key(patch.targetFileName)):
		results.update(filenamesHash[patch.targetFileName])
	if(filenamesHash.has_key(patch.sourceFileName)):
		results.update(filenamesHash[patch.sourceFileName])
	return results
	
def _findPossiblePatches(db, patches, timeDeltaBack, timeDeltaForward):
	
	# This loop is going to go through each patch and find all the possible
	# files a patch could apply to.
	# 	First we'll limit it to all the files that have the same name
	# 		Note: files with the same name can exist all across a directory structure
	#			and be different files
	#			_findPossibleSourceFiles looks for these possibilities
	#		Then from these possible matches, we filter the ones that were not updated
	#			Within the time frame the patch was sent.  
	#		_findRevDateRange gets the possible date range
	#		and _findPossibleTargets then filters the possible source files with the
	#		date range.
	#		If, in the end, there are no possible targets, then we can throw away 
	# 	this patch, as it does not need to be analyzed
	#	At the end of this loop patches has all the patches that were possibly
	# Accepted and may have been applyed.
	
	# variables reported to the screen/utility	
	patchCount = 0
	deleted=0
	lesspatches=[]
	percent=0
	lasttime=time.localtime()
	
	filenamesHash=_buildFilenameHash(db)
	
	for patch in patches:
		(percent, lasttime)=_showstatus(percent, patches, patchCount, lasttime)
		patchCount += 1
		if(patchCount%10==0):
			print "%d patches checked, %d accepted" % (patchCount, len(lesspatches))
		

		results = _getPossibleFilenameSet(patch, filenamesHash)
		if(len(results) == 0):
			continue
		
		#results=_findPossibleSourceFiles(db, patch)
#		print results

		(earlyDate, lateDate)=_findRevDateRange(db, patch, timeDeltaBack, timeDeltaForward)
#		print earlyDate, lateDate

		_findPossibleTargets(db, results, patch, earlyDate, lateDate)
						
#		print "possible targets for patch:", patch.possibleTargets
		
		# if a patch has no possible targets, we have no need to analyze it
		if patch.possibleTargets==[]:
			continue
		else:
			#save the patch
			lesspatches.append(patch)
			deleted+=1
			
	print "Deleted", deleted, " patches because they had no possible targets"
	return lesspatches
	
	
def findAcceptedPatches(db, repository, timeDeltaBack, timeDeltaForward, log):
	# This is the main function called.
	# table stuff
	
	#_setupPatchTables(db)

	# first get a list of patchs
	patches = findPatches(db)
	print "found %d patches" % len(patches)

	# store cur dir so we can be polite later and return to it
	scriptPath = os.getcwd()
	# output some information to a file for debuging
	# out = file("patchresults", 'w')

	patchCount = 0
	totalFilesExamined = 0
	goodUnified = 0
	goodContext = 0

	
	repository.prepUpdate()
	
	#build a list of filenames
	filenamesHash=_buildFilenameHash(db)
			
		
	for patch in patches:
		patchCount += 1
		if patchCount % 10 == 0:
			print "examined %d patches" % patchCount
			print "good unified patches:", goodUnified
			print "good context patches:", goodContext
		foundPatchTarget = False # this variable goes true when we get a good patch hit
		
		#lookup possible full filenames+dir structures a patch could be meant for

		results = _getPossibleFilenameSet(patch, filenamesHash)
		#print patch.sourceFileName, patch.targetFileName, "results:", results
		if(len(results) == 0):
			continue
		
		#results=_findPossibleSourceFiles(db, patch)
#		print results

		(earlyDate, lateDate)=_findRevDateRange(db, patch, timeDeltaBack, timeDeltaForward)
#		print earlyDate, lateDate
		
		_findPossibleTargets(db, results, patch, earlyDate, lateDate)
						
		print "possible targets for patch:", patch.possibleTargets
		
		# loop until we get a good hit, then stop since we only get one hit per a patch	

		for fileid, filename, revision in patch.possibleTargets:
			totalFilesExamined += 1 # keep track of files examined for statistical purposes
			# print "** possible candidate is filename: %s and revision %s" %(filename, revision) 
			
			# now the fun part, check out the file in repository and see if the patch can be applied backwards to it.
			repository.updateFileToRev(filename, revision)
			# make a file of the patch
			tempfile = 'patch.tmp' # XXX might need to append a number to it, if residing on same file system
			# reverse patch it...
			patchCmd = ''
			if(patch.format == "context"):
				patchCmd = "patch --dry-run --verbose -lRcN -F2 %s %s" % (filename, tempfile)
			elif(patch.format == "unified"):
				patchCmd = "patch --dry-run --verbose -lRuN -F2 %s %s" % (filename, tempfile)
			else:
				continue
			
			# reset stats
			fail = 0
			suc = 0
			fuzzSuc = 0
			offsetSuc = 0
			
			unreversed = stderr = False
					
			os.chdir(repository.path)
			patchfile = open(tempfile, 'w')
			patchfile.write(patch.body)
			patchfile.write('\n')# see if this fixes the error in middle of line problems with the patching process
			patchfile.close()
			
			
			# reverse patch it...
			patchOut = popen2.popen4(patchCmd)[0].read()
			#print patchCmd
			#print patchOut
			
			#go back to script dir, since we're done running patch comparisons
			os.chdir(scriptPath)
			
			for diffLine in patchOut.split('\n'):
				if "hunk" in diffLine:
					if "failed" in diffLine:
						fail += 1
					if "succeeded" in diffLine:
						suc += 1
						if(diffLine.find("offset") >= 0):
							offsetSuc += 1
						elif(diffLine.find("fuzz") >= 0):
							fuzzSuc += 1
				elif(diffLine.find("nrevers") >= 0):
					unreversed = True
				elif(diffLine.find("patch: ****") >= 0):
					stderr = True			
			error = "no error"
			if unreversed:
				error = "unreversed"
			if stderr:
				error = "stderr"
			if stderr and unreversed:
				error = "stderr and unreversed"
				
			patch.patchResults.append((suc, fail, error, patchOut))	# XXX see where this is going
			
			# this is TOTALLY arbitrary. Set the value patchScore is compared against to 0 to only allow perfect matches, but the 'elif' below catches these anything.
			if suc >= 1 and not unreversed and not stderr:
				# ex: 1/2 -> .25, 1/3 -> 1/9, 3/10-> 9/100
				patchScore = float(fail*fail) / float(suc*suc)
				# print "Patch score is ", patchScore
				# another magic number, see above math. Basically you need better than 1/3 or 2/7 for .1
				if patchScore <= .10:
					foundPatchTarget = True
					targetFileID = fileid 
					break # leave for loop, as we have this patch wrapped up
								
			# if the patch *just applies* nothing is said in the patch output as far as suc's and fail's go
			elif fail == 0 and suc == 0 and not unreversed and not stderr:
				# print "******* PATCH ACCEPTED BY LACK OF REASON TO REJECT IT ********"
				foundPatchTarget = True
				targetFileID = fileid
				break # leave for loop
			#  case where fail > 0 and suc = 0 can fall through
			
		#XXX This section enters to the database (out of the for loop should it be?)
		if foundPatchTarget:
			if(patch.format == "unified"):
				goodUnified += 1
			elif(patch.format == "context"):
				goodContext += 1
			# save results
			s = "insert into Patches (TargetFileID, body, MessageID, patchOutput, targetFileName, targetDir, sourceFileName, sourceDir, suc, fail, fuzzSuc, offsetSuc) VALUES(%d, '%s', '%s', '%s', '%s', '%s', '%s', '%s', %d,%d,%d,%d)" % (targetFileID, patch.body.encode("string_escape"), patch.messageID, patchOut.encode("string_escape"), patch.targetFileName, patch.targetDirectory, patch.sourceFileName, patch.sourceDirectory, suc, fail, fuzzSuc, offsetSuc)

			try:
				db.query(s);
			except pg.ProgrammingError:
				print "duplicated key added, ignored"
		else:
			# save results anyway, leave fileid NULL
			s = "insert into Patches (body, MessageID, targetFileName, targetDir, sourceFileName, sourceDir) VALUES('%s', '%s', '%s', '%s', '%s', '%s')" % (patch.body.encode("string_escape"), patch.messageID, patch.targetFileName, patch.targetDirectory, patch.sourceFileName, patch.sourceDirectory)

			try:
				db.query(s);
			except pg.ProgrammingError:
				print "duplicated key added, ignored"
				
	print "total patches:", len(patches)
	print "good unified patches:", goodUnified
	print "good context patches:", goodContext
	
	print "average hits per a patch is:", float(totalFilesExamined) / float(len(patches))
	print "with a total of", totalFilesExamined, "possible hits discovered"
		
	# clean up
	os.system("rm -f *.rej")
	os.system("rm -f *.tmp")
	# return to dir we started in
	os.chdir(scriptPath)
	

	
def findAcceptedPatchesCluster(db, repository, timeDeltaBack, timeDeltaForward,cp, usingPickles=1):
	# This is the main function called.
	
	# Because there are steps in this function that take SUCH a long time to run
	# I've added a way to store the results of critical datamining steps so
	# that if you change underlying algorithms you don't have to remine data
	# to get your result.
	
	if(usingPickles):
		picklePath=pickler.initPickleDir(".pickles", "patchchecker")
	
	
	# store cur dir so we can be polite later and return to it
	scriptPath = os.getcwd()
	repository.prepUpdate()
	#if the pickle doesn't exist or we're not saving between runs, we need to
	# do this data collection step
	if(usingPickles and not pickler.pickleExists("patches.pickle", picklePath)): 
		_setupPatchTables(db)
	
		# first get a list of patchs
		patches = findPatches(db)
	#	print "found %d patches" % len(patches)
		
		#now filter the patches (see function for criteria)
		patches=_findPossiblePatches(db, patches, timeDeltaBack, timeDeltaForward)

		#save this object, just incase we need to rerun the script
		pickler.addPickle(patches, 'patches.pickle', picklePath)
		
	elif(usingPickles): 
		#It's an elif so you can see which if matches (I know it can be an else) 
		patches=pickler.getPickle('patches.pickle',picklePath)
		
	print "There are %d patches loaded." % len(patches)
	print "All patches found, getting ready to materialize"
	
	# Now this is where we prepare everything for the cluster nodes
	
	# Names of the directories that will be made in the repositories root.
	patchDir="tempPatches"
	objDir="tempPickles"
	# name of the object file the CP will be pickled in to
	CPObjFile="CP.pickle"
	
	
	# prepare the shell script
	shellscript = open("launcher.sh", 'w')
	header=\
"""#!bin/bash
#
#$ -cwd
#$ -j y
#$ -S /bin/bash
#
"""
	shellscript.write(header)
	
	#Now we need to make some directories
	os.chdir(repository.path)
	#save the aboslute path to be output
	RepoPath=os.getcwd()
	
	#make directories for patches and objects
	if(os.path.isdir(patchDir)): #if it's already here, delete it
		os.system("rm -rf %s" % patchDir)
	os.mkdir(patchDir)
	if(os.path.isdir(objDir)):
		os.system("rm -rf %s" % objDir)
	os.mkdir(objDir)
	
	#create CP objectfile
	CPObjFile=pickler.addPickle(cp, CPObjFile, os.path.join(os.getcwd(), objDir))
	print "created CP file in %s" % CPObjFile

	for patch in patches:
	# materialize each patch and patch object
	# I'm using the time as a way to make sure files don't have the same name
		t=time.time()
		patchF="patch.%f" %t
		patchObjF="patch.obj.%f" %t
		
		#write out the patch
		os.chdir(os.path.join(RepoPath, patchDir))
		fh = open(patchF, 'w')
		fh.write(patch.body)
		fh.write('\n')# see if this fixes the error in middle of line problems with the patching process
		fh.close()
		patchF=os.path.join(os.getcwd(),patchF)

		#write out the patch object
		pickler.addPickle(patch, patchObjF, os.path.join(RepoPath, objDir))
		patchobjectfilename=os.path.join(os.getcwd(),patchObjF)
		
		#now output the command that needs to be run by a node
		s="python clustermain.py %s %s %s\n" % (patchF, patchObjF, CPObjFile)
#		print s
		shellscript.write(s)

	os.chdir(scriptPath)
	shellscript.close()

	
	
			
			


