'''
Created on 04.11.2011

@author: alex
'''

import base64, exceptions, hashlib, urlparse
import abstract, sacvoyage, filesys, util

class SacvoyageFSError(exceptions.Exception):
	pass

class Reader(abstract.Reader):
	def __init__(self, fS, fileName):
		self._fs=fS
		self._fileName=util.joinPath(fS._prefix, fileName)
		self._data=''
		self._chunkIds=[]
		
		document=self._fs._fileCollection.read(fileName)
		if document:
			if 'content_type' in document:
				self._contentType=document['content_type']
			else:
				self._contentType='application/octet-stream'
			if document and document.has_key('content'):
				self._data=base64.b64decode(document['content'])
			elif document and 'chunks' in document:
				self._chunkIds=document['chunks']
		else:
			raise SacvoyageFSError('not such file: %s'%fileName)
	def getContentType(self):
		return self._contentType
	def read(self, n=-1):
		if n>-1:
			if not self._data and self._chunkIds:
				chunk=self._fs._chunkCollection.read(self._chunkIds[0])
				self._data=base64.b64decode(chunk['data'])
				self._chunkIds=self._chunkIds[1:]
			
			m=min(n, len(self._data))
			result=self._data[:m]
			self._data=self._data[m:]

			return result
		else:
			data=''
			block=self.read(8190)
			while block:
				data+=block
				block=self.read(8190)
			return data
	
	def close(self):
		pass

class TextReaderWrapper(abstract.Reader):
	def __init__(self, reader):
		self._reader=reader
	def getContentType(self):
		return self._reader.getContentType()
	def setContentType(self, content_type):
		self._writer.setContentType(content_type)
	def read(self, n=-1):
		return unicode(self._reader.read(n))
	def readall(self):
		return unicode(self._reader.readall())
	def close(self):
		return self._reader.close()

class Writer(abstract.Writer):
	def __init__(self, fS, fileName):
		self._fs=fS
		self._fileName=filesys.joinPath(fS._prefix, fileName)
		
		self._sha1=hashlib.sha1()
		self._data=''
		self._chunkIds=[]
		self._content_size=0
		self._content_type='application/octet-stream'

	def setContentType(self, content_type):
		self._content_type=content_type
		
	def _flush(self, flushAll=False):
		while len(self._data)>=256*1024 or (flushAll and self._data):
			m=min(len(self._data), 256*1024)
			chunk={'data': base64.b64encode(self._data[:m]), 'size': m}
			self._data=self._data[m:]
			response=self._fs._chunkCollection.write(chunk)
			if response and response['status']=='ok':
				for doc in response['written']:
					self._chunkIds.append(doc['_id'])
			else:
				raise IOError('error writing chunk of file')	
	
	def write(self, data):
		self._data+=data
		self._content_size+=len(data)
		self._sha1.update(data)
		self._flush(False)
	
	def close(self):
		# delete previous file
		document=self._fs._fileCollection.read(self._fileName)
		if document and 'chunks' in document:
			for chunkId in document['chunks']:
				response=self._fs._chunkCollection.delete({'filter': {'_id': chunkId}})
				if response['status']!='ok':
					raise SacvoyageFSError('Unable to write data. Sacvoyage responsed:%s'%response['message'])
				
		document={
			'_id': self._fileName, 
			'content_encoding': 'base64',
			'content_size': self._content_size, 
			'content_sha1': self._sha1.hexdigest(),
			'content_type': self._content_type
		}
		self._flush(False)
		if self._chunkIds:
			self._flush(True)
			document['chunks']=self._chunkIds
		else:
			document['content']=base64.b64encode(self._data)
		response=self._fs._fileCollection.write(document)
		if response['status']!='ok':
			raise SacvoyageFSError('Unable to write data. Sacvoyage responsed:%s'%response['message'])
		
		return response['written'][0]['_rev'] if response['written'] else None
		
	
class TextWriterWrapper(abstract.Writer):
	def __init__(self, writer):
		self._writer=writer
	def setContentType(self, content_type):
		return self._writer.setContentType(content_type)		
	def write(self, data):
		return self._writer.write(str(data))
	def close(self):
		return self._writer.close()


class SacvoyageFS(abstract.FS):
	def __init__(self, url):
		parts=url.split(',')
		self._sacvoyage=sacvoyage.Sacvoyage(parts[0])
		self._fileCollectionName='db.files'
		self._chunkCollectionName='db.chunks'
		self._prefix=''
		
		for part in parts[1:]:
			if part.startsWith('collection='):
				self._fileCollectionName=part[11:]
				self._chunkCollectionName=self._fileCollection+'.chunks'
			elif part.startsWith('prefix='):
				self._collection=part[7:]
		
		self._fileCollection=self._sacvoyage.getCollection(self._fileCollectionName)
		self._chunkCollection=self._sacvoyage.getCollection(self._chunkCollectionName)

	def getRealmUrl(self):
		return self._sacvoyage.getUrl() + ',collection=' + self._fileCollectionName + ',prefix=' + self._prefix
		
	def open(self, fileName, mode='rt'):
		if   mode=='r' or mode=='rt':
			return TextReaderWrapper(Reader(self, fileName))
		elif mode=='w' or mode=='wt':
			return TextWriterWrapper(Writer(self, fileName))
		elif mode=='rb':
			return Reader(self, fileName)
		elif mode=='wb':
			return Writer(self, fileName)
		else:
			raise exceptions.AttributeError('mode %s not supported' % mode)
		
	def stamp(self, fileName):
		query={
			'filter': {'_id': util.joinPath(self._prefix, fileName)},
			'select': {'_id':True, '_rev':True, 'content_sha1':True}
		}
		documents=self._fileCollection.find(query)
		if documents:
			document=documents[0]
			return document['_rev']
		
	def hash(self, fileName):
		fullFileName=util.joinPath(self._prefix, fileName)
		# TODO: read not whole document
		document=self._fileCollection.read(fullFileName)
		if document and document.has_key('content_sha1'):
			return document['content_sha1']
		# TODO: eval hash manually if absend
		
	def delete(self, fileName):
		_id=util.joinPath(self._prefix, fileName)
		document=self._fileCollection.read(_id)
		if document and document.has_key('chunks'):
			self._chunkCollection.delete({'filter': {'_id': {'_or': document['chunks']}}})
		self._fileCollection.delete({'filter': {'_id': fileName}})
		
	
	def files(self):
		query={'select': {'_id':True, '_rev':True, 'content_sha1': True}}
		if self._prefix: 
			query['filter']={'_id': {'_regex': '^'+self._prefix+'.*'}}
		found=self._fileCollection.find(query)
		result={}
		for doc in found:
			stamp={'_rev': doc['_rev']}
			if 'content_sha1' in doc:
				stamp['sha1']=doc['content_sha1']
			fileName=doc['_id']
			assert fileName.startswith(self._prefix)
			fileName=fileName[len(self._prefix):].strip('/')
			result[fileName]=stamp
		return result
	
	