#!/usr/bin/env python
# Usage: s3fs.py bucket /mnt/point
# Author: Chris Moyer

import os, stat, errno, sys
import fuse
from fuse import Fuse

import logging, logging.handlers

import time
from datetime import datetime

import boto

from s3fs import *


log = logging.getLogger("s3fs")
logfile = "/var/log/s3fs.log"
log.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(logfile, "a", 5232880, 3)
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-5s %(message)s", "%x %X"))
log.addHandler(handler)


if not hasattr(fuse, '__version__'):
	raise RuntimeError, \
		"your fuse-py doesn't know of fuse.__version__, probably it's too old."

fuse.fuse_python_api = (0, 2)

class SimpleStat(fuse.Stat):
	"""
	Simple Stat class
	"""
	def __init__(self):
		self.st_mode = 0
		self.st_ino = 0
		self.st_dev = 0
		self.st_nlink = 1
		self.st_uid = 0
		self.st_gid = 0
		self.st_size = 0
		self.st_atime = self.st_mtime = self.st_ctime = time.time()

class S3FS(Fuse):
	"""
	S3 File system. Metadata is stored in an SDB Domain with the
	same name as this bucket. This SDB Domain will be created if it
	doesn't exist and is updated automatically on startup and readdir.
	"""

	def __init__(self, bucket, *args, **kw):
		log.info("Starting S3fs")
		log.info("S3FS: %s" % bucket)
		Fuse.__init__(self, *args, **kw)
		log.debug("Connecting to S3")
		conn = boto.connect_s3()
		log.debug("Grabbing bucket")
		self.bucket = conn.get_bucket(bucket)
		if(self.bucket == None):
			raise Exception("Bucket not found")
		log.debug("Finished setting up S3FS")

	def getattr(self, path):
		path = path[1:]
		log.debug("getattr '%s'" % path)
		if path == "":
			st = SimpleStat()
			st.st_mode = stat.S_IFDIR | 0777
		else:
			try:
				try:
					k = self.bucket.get_key(path)
				except:
					pass

				if not k:
					path = path + "/"
					k = self.bucket.get_key(path)
					log.debug("getattr '%s'" % path)

				if not k:
					raise
				st = SimpleStat()
				if path[-1] == "/":
					st.st_mode = stat.S_IFDIR | 0777
				else:
					st.st_mode = stat.S_IFREG | 0777
				os.environ['TZ'] = 'GMT'
				log.debug("Time: %s" % k.last_modified)
				st.st_ctime = st.st_mtime = time.mktime(time.strptime(k.last_modified, "%a, %d %b %Y %H:%M:%S %Z"))
				st.st_size = int(k.size)
				log.debug("Found")
			except:
				log.debug("Not found")
				return -errno.ENOENT
		return st

	def readdir(self, path, offset):
		path = path[1:]
		log.debug("readdir %s %s" % (path, offset) )
		yield fuse.Direntry(".")
		yield fuse.Direntry("..")
		for k in self.bucket:
			name = str(k.name)
			if name[-1] == "/":
				name = name[:-1]
			if name.startswith(path) and name != path:
				name = name.split('/')[-1]
				log.debug("\t%s" % name)
				yield fuse.Direntry(name)


	def mkdir(self, path, mode):
		path = str(path) + "/"
		log.debug("mkdir %s %s" % (path, oct(mode)) )
		self.mknod(path, mode, None)

	# TODO
	def open(self, path, flags):
		log.debug("open %s" % path)
		self.getattr(path)

	# TODO
	def read(self, path, size, offset):
		path = path[1:]
		log.debug("read %s" % (path) )
		key = self.bucket.get_key()
		if (key == None):
			return -errno.ENOENT
		data = key.get_contents_as_string()
		slen = len(data)
		if offset < slen:
			if offset + size > slen:
				size = slen - offset
			buf = data[offset:offset+size]
		else:
			buf = ''
		return buf

	# TODO
	def write(self, path, buff, offset):
		path = path[1:]
		log.debug("write '%s' %s" % (path, offset))
		key = self.bucket.get_key(path)
		if not key:
			key = self.bucket.create_key(path)
		log.debug("Got file")
		key.set_contents_from_string(buff)
		log.debug("File written")

		log.debug("wrote: %s bytes" % (len(buff)) )
		return len(buff)
	
	# TODO
	def utime(self, path, times):
		log.debug("utime %s" % path)
	
	def mknod(self, path, mode, dev):
		path = path[1:]
		log.debug("mknod %s %s %s" % (path, oct(mode), dev))
		self.bucket.new_key(path).set_contents_from_string("")

	
	def unlink(self, path):
		path = path[1:]
		log.debug("unlink '%s'" % path)
		self.bucket.delete_key(path)

	def rmdir(self, path):
		self.unlink(path + "/")
	
	# TODO
	def link(self, target, soruce):
		log.debug("link %s > %s" % (target, source) )
		return -errno.ENOENT
	
	# TODO
	def truncate(self, target, size):
		log.debug("truncate: %s %s" % (target, size) )
	
	# TODO
	def fsync(self, path, isFsyncFile):
		log.debug("fsync %s" % path)

#	def statfs(self):
#		"""
#		- f_bsize - preferred size of file blocks, in bytes
#		- f_frsize - fundamental size of file blcoks, in bytes
#			 [if you have no idea, use the same as blocksize]
#		- f_blocks - total number of blocks in the filesystem
#		- f_bfree - number of free blocks
#		- f_files - total number of file inodes
#		- f_ffree - nunber of free file inodes
#		"""
#		log.debug("statfs")
#		st = fuse.StatVfs()
#		st.f_bsize = 512
#
#		files = 0
#		for i in self.bucket:
#			files += 1
#		st.f_files = files
#
#		#st.f_frsize = st.f_bsize
#		#st.f_blocks = 0
#		#st.f_bfree = 2**1024
#		#st.f_ffree = 2**1024
#
#		return st

	def chmod(self, path, mode):
		log.debug("chmod %s %s" % (path, oct(mode)))

	def chown(self, path, uid, gid):
		log.debug("chown %s %s %s" % (path, uid, gid) )


def main():
	usage = "Usage: s3fs.py bucket /mount/path"
	if(len(sys.argv) < 2):
		raise Exception(usage)
	bucket = sys.argv[1]
	dir = sys.argv[2]
	sys.argv = ['s3fs.py', "-o", "ping_diskarb,volname='%s'" % (bucket), dir]

	server = S3FS(bucket, version="PythonS3",
					 usage=usage,
					 dash_s_do='setsingle')

	server.parse(errex=1)
	server.main()

if __name__ == '__main__':
	main()
