
import re
import _pg
import os
import datetime
import popen2
import threading
import time
import Queue

import repository

class FileError(Exception):
	pass

class CVSRepository(repository.Repository):
	
	def buildRepUsers(self, db, repLogFileName):
		try:
			print "cleaning dev table..."
			dropTables = open("../sql/dropDevTables.sql")
			db.query(dropTables.read())
			print "...OK!"
		except _pg.ProgrammingError, e:
			print "could not drop tables:", str(e)
		try:
			print "creatig dev table..."
			createTables = open("../sql/createDevTables.sql")
			db.query(createTables.read())
			print "...OK!"
		except _pg.ProgrammingError, e:
			print "could not create tables:", str(e)
		devnames = self.getDevNames(repLogFileName)
		
		for devname in devnames:
			sql = "INSERT INTO repusers (repname) values ('%s')" % devname
			db.query(sql)

	def createLog(self, cvsLogFileName):
		#change to cvs dir, saving cwd
		print "creating cvs log...", cvsLogFileName
		scriptPath = os.getcwd()
		os.chdir(self.path)
		s = 'cvs log > ' + cvsLogFileName
		os.system(s)
		#go back to where we ran script from			
		os.chdir(scriptPath)

	def getDevNames(self, cvsLogFileName):
		"takes in a cvslog and returns a list of people who commited to it"
		#make a set, add to set with dev names
		#open file
		try:
			print "opening file..."
			cvsLogFile = open(cvsLogFileName)
			print "...OK!"
		except FileError:
			print "Could not open file", cvsLogFileName
			sys.exit()
	
		names = set([])
		for line in cvsLogFile:
			mo = re.search("author: \w*;", line)
			if mo:
				line = re.sub("author: |;", '', mo.group(0))
				names.add(line)
		return names
			
	def parseLog(self, db, cvsLogFileName):
		#open file
		try:
			print "opening file..."
			cvsLogFile = open(cvsLogFileName)
			print "...OK!"
		except FileError:
			print "Could not open file", cvsLogFileName
			sys.exit()
		
		try:
			print "cleaning commit table..."
			dropTables = open("../sql/dropRepLogTables.sql")
			db.query(dropTables.read())
			print "...OK!"
		except _pg.ProgrammingError, e:
				print "could not drop tables:", str(e)
		try:
			print "creatig commit table..."
			createTables = open("../sql/createRepLogTables.sql")
			db.query(createTables.read())
			print "...OK!"
		except _pg.ProgrammingError, e:
				print "could not create tables:", str(e)
			
		#hopefully save some time in memory allocation
		filename = ''
		author = ''
		stime = ''
		linesAdded = '0'
		linesRemoved = '0'
		revision = ''
		nextLineIsRev = False
		nextLineIsData = False
		
		#files to be deleted
		delFiles = set()
		
	
		for line in cvsLogFile:
			if nextLineIsRev == True:
				#print line
				#previous line was a dashed line, so lets get the revision number
				if line.find("revision") < 0:
					#this means we hit a line with dashes INSIDE a commit comment
					continue 
				line = re.sub('^revision ', '', line)

				#print line
				#sometimes there is something after the revision, ex: revision 1.155  locked by: alex;
				line = re.sub('[^0-9]{2,}', '', line) #kill anything not numbers or a single non number (periods)
				line = re.sub('\n', '', line)
				#print line
				if line.find("locked") > 0:
					sys.exit()
				#if revision == "1.1.1.1":
				#	revision = '1.1'
				revision = line
				#print revision
				nextLineIsRev = False
				nextLineIsData = True
			elif nextLineIsData == True:
				#clip off date tag
				line = re.sub('^date: ', '', line)
				#find the chars leading up to the ;
				mo = re.match('[^;]*', line)
				stime = mo.group(0)
				#clip off the author tag
				line = re.sub('.*author: ', '', line)
				#find chars leading up to ;
				mo = re.match('[^;]*', line)
				author = mo.group(0)
				mo = re.search('lines', line)
				if mo:
					#clip off lines tag
					line = re.sub('.*lines: \+', '', line)
					#match next number
					mo = re.match('[0-9]*', line)
					linesAdded = mo.group(0)
					#clip off linesadded and newline
					line = re.sub('[0-9]* -|\n', '', line)
					linesRemoved = line
				else:
					linesadded = linesremoved = 0
				#flow control
				nextLineIsData = False
				
				#okay, we actually WANT attic files. If they have been deleted, but used in the past, 
				#they will have the attic in their name. We should strip 'Attic/' out to access them as normal. 
				filename = re.sub('Attic/', '', filename)
				
				#make statement to add commit data to sql
				#commitq = "INSERT INTO files (Datetime, AuthorPersonID, FileName, Revision) VALUES('" 
				#commitq = commitq + stime + "',(select PersonID from Cvsnames where CvsName = '"+ author +"'),'"
				if self.isInteresting(filename):
					isSource = True
				else:
					isSource = False
				#commitq = commitq + filename +"','"+ revision +"')"
				commitq = "INSERT INTO files (Datetime, RepID, FileName, Revision, linesAdded, linesRemoved, sourcefile) VALUES('%s',(select RepID from RepUsers where RepName = '%s'), '%s', '%s', %s, %s, %s)" % (stime, author, filename, revision, linesAdded, linesRemoved, isSource)
				print commitq
				db.query(commitq)
				#only parse source
				#if self.isInteresting(filename):
				#	try:
				#		#print "commiting file!"
				#		#print commitq
				#		db.query(commitq)
				#	except _pg.ProgrammingError, e:
				#		print "could not commit: " + filename
				#		print "because:", str(e)
				#		sys.exit()
				#	#else:
				#	#	pass
				#else:
				#	#we will delete this to a set of files so lxr doesn't waste time on it
				#	delFiles.add(filename)
				

				
			#if we come to a new RCS file section
			mo = re.match('^Working file: ', line)
			if mo:
				#print line
				filename = re.sub('Working file: ', '', line)#kill first directory, this is the module name...the rest will be the filepath in the repository
				filename = re.sub('\s', '', filename)
				#filename = re.sub(',v\n', '', filename)
				print 'file: ', filename
				continue
			#if we get a dashed page break
			mo = re.match('^----------------------------\n', line)
			if mo:
				nextLineIsRev = True
				continue
			mo = re.match('^revision [0-9]', line)
			
		#del files that are uninteresting
		for e in delFiles:	
			s = 'rm -f ' + e
			print s
			os.system(s)
		#go back to where we ran script from	
		
	def prepUpdate(self):
		"""updates cvs such that every file is present, this means we can individually update every file w/o error"""
		s = 'cvs update -CARd'
		scriptPath = os.getcwd()
		os.chdir(self.path)
		print s
		popen2.popen4(s)
		os.chdir(scriptPath)
		
	def updateFileToRev(self, filename, revision):
		s = 'cvs update -CAdr %s %s' % (revision, filename)
		print s
		scriptPath = os.getcwd()
		os.chdir(self.path)
		#try to update
		result = popen2.popen4(s)[0].read()
		#if result != '':
		print result
		while(result.find("[update aborted]") >= 0):#connec'tion refu'sed, shortened to maybe save time
			print s
			result = popen2.popen4(s)[0].read()
			print result
		os.chdir(scriptPath)

	def updateToDate(self, date):
		"takes in numerical date and year and updates the cvs working copy"
		#change to cvs dir, saving cwd
		scriptPath = os.getcwd()
		os.chdir(self.path)
		s = 'cvs up -C -A -d -P -D ' + str(date.year) + '-' + str(date.month) + '-' + str(date.day)
		print s
		os.system(s)
		os.chdir(scriptPath) #return to the directory we started in
		#else: 
		#	print "updated successful"


					
		
	def _tmaterialize(self,root, q, log=False):
		#This code is intended to be used by threads of the materialize function
		# here's the command to checkout one file.  In order to avoid collisions, 
		# each thread has a directory, it checks out a file to that directory and 
		# then moves the file into the main tree with the revision attached to the 
		# filename.  We rm -rf the thread's directory because CVS leaves droppings 
		# on each checkout
		if log==True:
			cmd = """
rm -rf %(threadname)s
cvs -d /home2/cvsroot co -d %(threadname)s -r %(revision)s %(root)s/%(filepath)s >> MATERIALZATIONlog.txt
mkdir -p %(root)s/%(directory)s
mv %(threadname)s/%(filename)s %(root)s/%(filepath)s-%(revision)s"""
		else:
			cmd = """
rm -rf %(threadname)s
cvs -d /home2/cvsroot co -d %(threadname)s -r %(revision)s %(root)s/%(filepath)s
mkdir -p %(root)s/%(directory)s
mv %(threadname)s/%(filename)s %(root)s/%(filepath)s-%(revision)s"""
		threadname = threading.currentThread().getName()
		#this is a lame way to kill the script, if you create a file
		#named "quit" in the directory where this script was started
		#then the threads will all stop.  Otherwise you have to wait
		#until the whole program ends
		while not os.path.exists("quit"):
			#print "this thread is", threading.currentThread().getName()
			try:
				(filepath, revision) = q.get(0)
			except Queue.Empty, e:
				#print e
				return
			#check to see if the file has already been materialized.  This
			#way we can stop the script and pick up where we left off
			if os.path.exists("%(root)s/%(filepath)s-%(revision)s" % locals()):
				#print filepath + "-" + revision + " already exists, skipping"
				continue
			#set up some local variables and invoke the command
			filename = os.path.basename(filepath)
			directory = os.path.dirname(filepath)
			#print filepath, revision
			#print cmd % locals()
			os.system(cmd % locals())
		
	def materialize(self,reposDir,db,destination=os.getcwd(), numthreads=100, log=False):
		mythreads=[]
		# See if the repository we want to materialize already exists
		#print "Materializing %(reposDir)s using data in %(databname)s to %(destination)s" % locals()
		if os.path.exists(os.path.join(destination, reposDir)):
			#if it does, give the user the option to over write it
			choice=raw_input(os.path.join(destination, reposDir)+" already exists.  Do you want to over write it? [Y/N]")
			if choice=='y' or choice=='Y':
				os.system("rm -rf "+os.path.join(destination, reposDir))
			else:
				# if they don't want to over write it, then we're done, and we can 
				# return the path
				return os.path.join(destination, reposDir)
		# Make a directory to materialize everything in pieces in: "tmpRep"		
		try:
			os.mkdir("tmpRep")
			os.chdir("tmpRep")
		except OSError:
			#if the directory already exists (from a previous, incomplete materialization)
			os.system("rm -rf tmpRep")
			os.mkdir("tmpRep")
			os.chdir("tmpRep")
			

		# I purposely make the queue bigger than I'll need so that 
		# a put never blocks.  There are better ways	
		# Queue is being used because its operations are thread safe
		q = Queue.Queue(100000)
		#fill up the queue with filename, revision pairs
		sql = "select filename, revision from files"
		for filename, revision in db.query(sql).getresult():
			q.put( (filename, revision) )
		#launch a bunch of threads with some names we give them
		for i in range(numthreads):
			t = threading.Thread(None, self._tmaterialize, "thread%i" % i, (reposDir, q, log))
			mythreads.append(t)
			t.start()
			
		
		#wait until all the threads are done
		for t in mythreads:
			print t.getName(), " joined"
			t.join()

		
		os.rename(os.path.join(os.getcwd(), reposDir), os.path.join(destination, reposDir))
		os.chdir("..")
		os.system("rm -rf tmpRep")
		
		return os.path.join(destination, reposDir)
				
				
		
		
		
