#    This file is part of SRIS.
#
#    SRIS is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    SRIS is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with SRIS.  If not, see <http://www.gnu.org/licenses/>.
#
#    Copyright 2011 David Irvine
#

import logging
import pprint
import os
import uuid
import gzip
import json
import time
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError

from sris.models import *

logger = logging.getLogger("sris.Processor")

def iterBuckets(items, bucket,lines):
	if '__END__' in bucket:
		line=[]
		for i in items:
			line.append(i)
		for i in (bucket['totalFiles'],bucket['totalSize'],bucket['files']):
			line.append(i)
		lines.append(line)
	else:
		for key in bucket.keys():
			ritems=items[:]
			ritems.append(key)
			iterBuckets(ritems,bucket[key],lines)


## Processes newly uploaded file system scans.  
#  Selects all scans in the DB that have the state SCANNED, these scans are not yet processed.
#  Loops through each scan, processing it in sequence. During processing, the 
#  state of the scan is set to "PROCESSING" after processing, it is set to "PROCESSED" indicating that processing is complete.
#
# \todo Currently this is not a parallel process, the easiest way is to use a 
# thread pool, but it would probably be sensible to enable multiple processes to
# run concurrently. 
class Command(BaseCommand):
	help = 'Process any scans that have been uploaded but not yet processed.'
	## handle is called by the Django management interface when the end user issues the process command.
	#
	#  Handle first enters an infinite loop, each iteration of the loop performs the following:  A query set
	#  is requested containing any scans that are in the state "UNPROCESSED", this may be an empty
	#  query set.  If the query set is empty, the loop simply repeats.  
	#
	#  \param NONE - there are no parameters to this method
	#
	#  \returns NONE - the only returns if the user interrupts the process.
	#
	def handle(self, *args, **options):
		while(True):
			id=uuid.uuid4()
			query=Scan.objects.filter(state__exact="SCANNED")
			for scan in query:
				## If there are unprocessed scans, then for each scan, the state is set to processing and the the
				#  processing id,  is set to the uuid for this process.  The scan is then processed using the
				#  process_scan method.
				#
				logger.info("Processing scan: %s" % scan)
				scan.state="PROCESSING"
				scan.processedBy=id
				scan.save()
				try:
					self.process_scan(scan)
				except KeyboardInterrupt:
					## If the user cancels processing by killing the command, then the scan being processed is
					#  set back to the SCANNED state
					logging.warn("Processing cancelled by user, cleaning up")
					scan.state="SCANNED"
					scan.save()
					return None
				except:
					## If there is an unhandled exception, then a stack trace is thrown to stdout and
					#  to the error log.  The scan that is being processed is rolled back to the SCANNED
					#  state and the next scan is attempted.
					#
					##\todo Log any errors not caught to standard output and to the error log.
					logging.error(e)
					logging.error("Unable to process scan: %s" % scan)
					scan.state="SCANNED"
					scan.save()
					return None
			else:
				logger.info("No unprocessed scans, sleeping for 60 seconds\n")
				time.sleep(60)


	## Processes the scanned data into buckets, each bucket aggregates the files by a series of attributes.
	#  This reduces the overall number of entries significantly, yet still provides enough granularity in 
	#  order to quickly generate reports.
	def process_scan(self,scan):
		##\todo Delete any child objects before starting, this avoids duplicate entries if a
		#  previous run has failed..
		scan.Buckets.all().delete()

		## Each scan may have multiple data files, each datafile contains a compressed json array, each
		#  json object in the array represents a file found on disk.  Each file is then put into a bucket, and
		#  the total size of all files in the bucket incremented by the size of the individual file, and the number
		#  of files incremented by one.  The json object is then appended to the files array in the bucket.
		buckets={}
		# Get the time of the scan
		scanEpoch=time.mktime(scan.ts.timetuple())
		# Iterate through each data file associated with the scan
		for sf in scan.dataFiles.all():
			# Files are in gzip format, so open using the gzip wrapper
			f=gzip.open(os.path.join(settings.SCANPATH,sf.fileName))
			# Files contain a json array, load this form the stream using json
			files=json.load(f)
			# Iterate through each object in the json array, each object represents a file on disk
			for file in files:
				logger.info("Processing file: %s" % sf.fileName)
				b_globalRoot=file['globalRoot']
				b_userName=file['uname']
				b_userId=file['uid']
				b_groupId=file['gid']
				b_groupName=file['gname']

				# Get the access time bucket
				for key in sorted(settings.TIMEOFFSETS.iterkeys(), reverse=True):
					if file['atime']<=scanEpoch-key:
						b_atime=settings.TIMEOFFSETS[key]
						break

				# Get the modification time bucket
				for key in sorted(settings.TIMEOFFSETS.iterkeys(), reverse=True):
					if file['mtime']<=scanEpoch-key:
						b_mtime=settings.TIMEOFFSETS[key]
						break

				# Get the creation time bucket
				for key in sorted(settings.TIMEOFFSETS.iterkeys(), reverse=True):
					if file['ctime']<=scanEpoch-key:
						b_ctime=settings.TIMEOFFSETS[key]
						break

				# Get the Size Bucket
				for key in sorted(settings.DATASIZES.iterkeys(), reverse=True):
					if file['size']>=key:
						b_size=settings.DATASIZES[key]
						break

				# Get the document type bucket
				b_docType="Unknown"
				for key,value in settings.DOCTYPES.iteritems():
					if file['localPath'].endswith(key):
						b_docType=value
						break
				
				fields=[
						b_globalRoot,
						b_userName,
						b_userId,
						b_groupId,
						b_groupName,
						b_atime,
						b_mtime,
						b_ctime,
						b_size,
						b_docType,
						]
				# Iterate through each value of vield, if there is no bucket
				# for that value yet, then create it.
				bucket=buckets
				for f in fields:
					if not f in bucket:
						bucket[f]={}
					bucket=bucket[f]

				# bucket is now the dictionary containing the aggregated data, if it
				# does not contain the appropriate keys, it is initialized.
				if not "totalFiles" in bucket:
					bucket['totalFiles']=0
					bucket['totalSize']=0
					bucket['files']=[]
					bucket['__END__']=True

				# The aggregate data is then updated
				bucket['totalFiles']+=1
				bucket['totalSize']+=file['size']
				bucket['files'].append(file)

		## Once each data file has been processed, then the buckets dictionary is fully populated with the
		#  data file the scanned files.  Each individual bucket is then created in the database.  iterBuckets
		#  is used to iterate through each node in the buckets data structure, and returns a list, each list
		#  contains the fields used to populate a Bucket() in the database.
		items=[]
		lines=[]
		iterBuckets(items, buckets,lines)
		for line in lines:
			bk=scan.Buckets.create()
			bk.globalRoot=line[0]
			bk.userName=line[1]
			bk.userId=line[2]
			bk.groupId=line[3]
			bk.groupName=line[4]
			bk.aTime=line[5]
			bk.mTime=line[6]
			bk.cTime=line[7]
			bk.size=line[8]
			bk.docType=line[9]
			bk.totalFiles=line[10]
			bk.totalSize=line[11]
			bk.save()
			logger.debug("Added Bucket: %s" % bk)
			for file in line[12]:
				f=bk.Files.create()
				f.globalPath=file['globalPath']
				f.localPath=file['localPath']
				f.uid=file['uid']
				f.gid=file['gid']
				f.groupName=file['gname']
				f.userName=file['uname']
				f.size=file['size']
				f.atime=file['atime']
				f.mtime=file['mtime']
				f.ctime=file['ctime']
				f.save()
		scan.state="PROCESSED"
		scan.save()
