#!/usr/bin/env python
# Usage: s3fs.py bucket /mnt/point
# Author: Chris Moyer

import os, stat, errno, sys
import fuse
from fuse import Fuse

import logging, logging.handlers

import time
from datetime import datetime

import boto
from boto.sdb.persist import *
from boto.exception import SDBResponseError

from s3fs import *
from s3fs.s3object import *


log = logging.getLogger("s3fs")
logfile = "/var/log/s3fs.log"
log.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(logfile, "a", 5232880, 3)
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-5s %(message)s", "%x %X"))
log.addHandler(handler)


if not hasattr(fuse, '__version__'):
	raise RuntimeError, \
		"your fuse-py doesn't know of fuse.__version__, probably it's too old."

fuse.fuse_python_api = (0, 2)

class S3Stat(fuse.Stat):
	"""
	Stat Class based on an S3File from the boto persistance layer
	"""
	def __init__(self, attrs):
		self.st_mode = int(attrs.mode)
		self.st_ino = 0
		self.st_dev = 0
		self.st_nlink = 1
		self.st_uid = int(attrs.uid)
		self.st_gid = int(attrs.gid)
		self.st_size = int(attrs.size)
		self.st_atime = time.mktime(attrs.atime.timetuple())
		self.st_mtime = time.mktime(attrs.mtime.timetuple())
		self.st_ctime = time.mktime(attrs.ctime.timetuple())

class SimpleStat(fuse.Stat):
	"""
	Simple Stat class
	"""
	def __init__(self):
		self.st_mode = 0
		self.st_ino = 0
		self.st_dev = 0
		self.st_nlink = 1
		self.st_uid = 0
		self.st_gid = 0
		self.st_size = 0
		self.st_atime = self.st_mtime = self.st_ctime = time.time()

class S3FS(Fuse):
	"""
	S3 File system. Metadata is stored in an SDB Domain with the
	same name as this bucket. This SDB Domain will be created if it
	doesn't exist and is updated automatically on startup and readdir.
	"""

	def __init__(self, bucket, *args, **kw):
		log.info("Starting S3fs")
		log.info("S3FS: %s" % bucket)
		Fuse.__init__(self, *args, **kw)
		log.debug("Connecting to S3")
		conn = boto.connect_s3()
		log.debug("Grabbing bucket")
		self.bucket = conn.get_bucket(bucket)
		if(self.bucket == None):
			raise Exception("Bucket not found")
		log.debug("Setting up Metadata")
		set_domain(bucket)
		log.debug("Finished setting up S3FS")

	def getattr(self, path):
		log.debug("getattr '%s'" % path)
		try:
			d = S3Dir.get_by_path(path)
		except:
			return -errno.ENOENT
		if(d == None):
			return -errno.ENOENT
		st = S3Stat(d)
		return st

	def readdir(self, path, offset):
		log.debug("readdir %s %s" % (path, offset) )
		dir = S3Dir.get_by_path(path)
		yield fuse.Direntry(".")
		yield fuse.Direntry("..")

		#TODO make this use the Virtual Property when implimented
		for r in dir.get_related_objects('parent'):
			log.debug("\t%s" % r.name)
			yield fuse.Direntry(str(r.name))
	
	def mkdir(self, path, mode):
		log.debug("mkdir %s %s" % (path, oct(mode)) )
		d= S3Dir()
		d.path = path
		d.mode = int(mode)
		d.uid = os.getuid()
		d.gid = os.getgid()
		d.atime = d.ctime = d.mtime = datetime.now()
		# TODO: Base this off the path
		p = S3Dir.find(path="/").next()
		d.parent = p
		d.save()
		log.debug("\tDone")

	# TODO
	def open(self, path, flags):
		log.debug("open %s" % path)
		attrs = S3File.find(path[1:])
		if (attrs == None):
			log.debug("File not Found")
			return -errno.ENOENT
		#attrs.atime = datetime.now()

	# TODO
	def read(self, path, size, offset):
		log.debug("read %s" % (path) )
		key = self.bucket.get_key(path[1:])
		if (key == None):
			return -errno.ENOENT
		data = key.get_contents_as_string()
		slen = len(data)
		if offset < slen:
			if offset + size > slen:
				size = slen - offset
			buf = data[offset:offset+size]
		else:
			buf = ''
		return buf

	# TODO
	def write(self, path, buff, offset):
		log.debug("write %s %s" % (path, offset))
		key = self.bucket.new_key(path[1:])
		key.set_contents_from_string(buff)
		attrs = S3File.find(path[1:])
		if(attrs == None):
			return -errno.ENOENT
		attrs.ctime = datetime.now()
		log.debug("wrote: %s bytes" % (len(buff)) )
		return len(buff)
	
	# TODO
	def utime(self, path, times):
		log.debug("utime %s" % path)
		attrs = S3File.find(path[1:])
		attrs.mtime = datetime.now()
	
	def mknod(self, path, mode, dev):
		log.debug("mknod %s %s %s" % (path, oct(mode), dev))
		path = path.split("/")
		log.debug("path: %s", path)
		name = path.pop()
		log.debug("Node: %s" % name)
		parent_path = "/".join(path)
		if(parent_path == ""):
			parent_path = "/"
		log.debug("Parent: %s" % parent_path)
		parent = S3Dir.get_by_path(parent_path)
		f = S3File(name=name, parent=parent)
		f.mode = int(mode)
		f.uid = os.getuid()
		f.gid = os.getgid()
		f.save()
	
	# TODO
	def unlink(self, path):
		log.debug("unlink %s" % path)
		obj = S3Object.get_by_path(path)
		obj.delete()
	
	# TODO
	def link(self, target, soruce):
		log.debug("link %s > %s" % (target, source) )
		return -errno.ENOENT
	
	# TODO
	def truncate(self, target, size):
		log.debug("truncate: %s %s" % (target, size) )
	
	# TODO
	def fsync(self, path, isFsyncFile):
		log.debug("fsync %s" % path)

#	def statfs(self):
#		"""
#		- f_bsize - preferred size of file blocks, in bytes
#		- f_frsize - fundamental size of file blcoks, in bytes
#			 [if you have no idea, use the same as blocksize]
#		- f_blocks - total number of blocks in the filesystem
#		- f_bfree - number of free blocks
#		- f_files - total number of file inodes
#		- f_ffree - nunber of free file inodes
#		"""
#		log.debug("statfs")
#		st = fuse.StatVfs()
#		st.f_bsize = 512
#
#		files = 0
#		for i in self.bucket:
#			files += 1
#		st.f_files = files
#
#		#st.f_frsize = st.f_bsize
#		#st.f_blocks = 0
#		#st.f_bfree = 2**1024
#		#st.f_ffree = 2**1024
#
#		return st

	def chmod(self, path, mode):
		log.debug("chmod %s %s" % (path, oct(mode)))
		attrs = S3File.find(path[1:])
		attrs.mode = stat.S_IFREG | mode

	def chown(self, path, uid, gid):
		log.debug("chown %s %s %s" % (path, uid, gid) )
		attrs = S3File.find(path[1:])
		attrs.uid = int(uid)
		attrs.gid = int(gid)


def main():
	usage = "Usage: s3fs.py bucket /mount/path"
	if(len(sys.argv) < 2):
		raise Exception(usage)
	bucket = sys.argv[1]
	dir = sys.argv[2]
	sys.argv = ['s3fs.py', "-o", "ping_diskarb,volname='%s'" % (bucket), dir]

	server = S3FS(bucket, version="PythonS3",
					 usage=usage,
					 dash_s_do='setsingle')

	server.parse(errex=1)
	server.main()

if __name__ == '__main__':
	main()
