import os, stat, time, calendar, platform, hashlib, simplejson, uuid, random, threading
import urlparse

import fuse, logging
from webdav.Connection import Connection, WebdavError
from webdav.WebdavClient import ResourceStorer

Connection.MaxRetries = 1

HOSTNAME = platform.node()
STATNS = 'STAT:'
STATPROPS = (
    'mode',
    'uid',
    'gid',
    'ctime',
    'mtime',
    'atime',
)
DAVNS = 'DAV:'
DAVPROPS = (
    'creationdate',
    'getlastmodified',
    'getcontentlength',
)
DAVCTIMEFMT = '%Y-%m-%dT%H:%M:%SZ'
DAVMTIMEFMT = '%a, %d %b %Y %H:%M:%S GMT'

def split_hash(hash):
    # splits a hash into directories for efficiency.
    # premature optimization abadoned for now...
    return hash[0], hash[1], hash[2], hash[3], hash


def make_url(node, disk, hash):
    # TODO: make this optional?
    #return 'http://%s/%s/%s' % (node, disk, '/'.join(split_hash(hash)))
    return 'http://%s/%s/%s' % (node, disk, hash)


def make_connection(url):
    parts = urlparse.urlparse(url)
    port = parts.port or 80
    return Connection(parts.netloc, port, protocol=parts.scheme, timeout=3)


class CHash(object):
    """
    A consistent hash implementation. This class represents any list of objects.
    You can then locate a key into any one of them. It uses the ring topology
    common with consistent hash/distributed key stores.

    Adapted from:
    http://amix.dk/blog/post/19367

    Some major differences are that replica distribution is not random.
    Rather, replicas are stored in the nodes following the primary node. This
    is done because it works better for a hardware topology that will be used
    in the WebDAV cluster. Hashlib is used rather than the deprecated md5 module,
    also the hash function is exposed so that it can be used as part of an
    object's URL."""
    def __init__(self, nodes=[], replicas=3):
        self.replicas = replicas
        self.nodes = {}
        self.keys = []
        map(self.add_node, nodes)

    @classmethod
    def get_hash(cls, key):
        return hashlib.md5(key).hexdigest()

    @classmethod
    def get_key(cls, key, hash=None):
        if hash is None:
            hash = cls.get_hash(key)
        return long(hash, 16)

    def add_node(self, key, data=None):
        if data is None:
            value = key
        else:
            value = (key, data)
        kkey = self.get_key(key)
        self.nodes[kkey] = value
        self.keys.append(kkey)
        self.keys.sort()

    def get_node_pos(self, key, hash=None):
        if not self.nodes:
            return None, None
        kkey = self.get_key(key, hash=hash)
        for i, node in enumerate(self.keys):
            if kkey <= node:
                return self.nodes[node], i
        # wrap-around:
        return self.nodes[self.keys[0]], 0

    def get_primary_node(self, key, hash=None):
        return self.get_node_pos(key, hash=hash)[0]

    def get_replica_nodes(self, key, hash=None):
        node, pos = self.get_node_pos(key, hash=hash)
        for i in xrange(self.replicas):
            pos += i + 1
            # wrap-around:
            pos %= len(self.nodes)
            yield self.nodes[self.keys[pos]]


class NestedRingLocator(object):
    """Represents a nested ring of consistent hashes. Conceptually
    it is like a ring of rings. Where the first consistent hash lands
    you on a node, which is another ring. The second consistent hash
    then lands you on the final location of an object."""
    def __init__(self, nodes):
        self.nodes = CHash(replicas=2)
        for key, disks in nodes.items():
            disks = CHash(disks, replicas=2)
            self.nodes.add_node(key, data=disks)

    def get_primary_url(self, key):
        hash = CHash.get_hash(key)
        node, disks = self.nodes.get_primary_node(key, hash=hash)
        disk = disks.get_primary_node(key, hash=hash)
        return make_url(node, disk, hash)

    def get_replica_urls(self, key):
        hash = CHash.get_hash(key)
        node, disks = self.nodes.get_primary_node(key, hash=hash)
        disk = list(disks.get_replica_nodes(key, hash=hash))[0]
        yield make_url(node, disk, hash)
        node, disks = list(self.nodes.get_replica_nodes(key, hash=hash))[0]
        disk = disks.get_primary_node(key, hash=hash)
        yield make_url(node, disk, hash)


class ExistsThread(threading.Thread):
    def __init__(self, url):
        self.url = url
        self.exists = False
        threading.Thread.__init__(self)
        self.start()

    def run(self):
        c = ResourceStorer(self.url, connection=make_connection(self.url))
        try:
            c.readAllProperties()
            self.exists = True
        except WebdavError, e:
            if e.code != 404:
                raise


class WriteThread(threading.Thread):
    def __init__(self, url, buffer, extra_hdrs={}, lockToken=None):
        self.url = url
        self.buffer = buffer
        self.extra_hdrs = extra_hdrs
        self.lockToken = lockToken
        threading.Thread.__init__(self)
        self.start()

    def run(self):
        c = ResourceStorer(self.url, connection=make_connection(self.url))
        c.uploadContent(self.buffer, extra_hdrs=self.extra_hdrs, lockToken=self.lockToken)


class LockThread(threading.Thread):
    def __init__(self, url):
        self.url = url
        self.lockToken = None
        threading.Thread.__init__(self)
        self.start()

    def run(self):
        c = ResourceStorer(self.url, connection=make_connection(self.url))
        self.lockToken = c.lock(HOSTNAME)


class UnlockThread(threading.Thread):
    def __init__(self, url, lockToken):
        self.url = url
        self.lockToken = lockToken
        threading.Thread.__init__(self)
        self.start()

    def run(self):
        c = ResourceStorer(self.url, connection=make_connection(self.url))
        c.unlock(self.lockToken)


class DeleteThread(threading.Thread):
    def __init__(self, url, lockToken=None):
        self.url = url
        self.lockToken = lockToken
        threading.Thread.__init__(self)
        self.start()

    def run(self):
        c = ResourceStorer(self.url, connection=make_connection(self.url))
        try:
            c.delete(lockToken=self.lockToken)
        except WebdavError, e:
            if e.code != 404:
                raise


class WritePropertiesThread(threading.Thread):
    def __init__(self, url, props, lockToken=None):
        self.url = url
        self.props = props
        self.lockToken = lockToken
        threading.Thread.__init__(self)
        self.start()

    def run(self):
        c = ResourceStorer(self.url, connection=make_connection(self.url))
        c.writeProperties(self.props, lockToken=self.lockToken)


class Replica(object):
    def __init__(self, locator, path):
        self.urls = []
        self.urls.append(locator.get_primary_url(path))
        self.urls.extend(list(locator.get_replica_urls(path)))

    def exists(self):
        threads = []
        for dest in self.urls:
            threads.append(ExistsThread(dest))
        map(lambda x: x.join(), threads)
        return min(map(lambda x: x.exists, threads))

    def lock(self):
        threads = {}
        for dest in self.urls:
            threads[dest] = LockThread(dest)
        map(lambda x: x.join(), threads.values())
        locks = {}
        for tkey, thread in threads.items():
            locks[tkey] = thread.lockToken
        return locks

    def unlock(self, lockTokens):
        threads = []
        for dest in self.urls:
            lockToken = lockTokens.get(dest)
            threads.append(UnlockThread(dest, lockToken))
        map(lambda x: x.join(), threads)

    def write(self, buffer, offset=0, lockTokens={}):
        extra_hdrs = {}
        if offset != 0:
            extra_hdrs['Content-Range'] = 'bytes %s-%s/*' % (offset, offset+len(buffer)-1)
        threads = []
        for dest in self.urls:
            threads.append(WriteThread(dest, buffer, extra_hdrs))
        map(lambda x: x.join(), threads)
        return len(buffer)

    def read(self, length=None, offset=0):
        extra_hdrs = {}
        if length and offset != 0:
            extra_hdrs['Range'] = 'bytes=%s-%s' % (offset, offset+length-1)
        # randomize list of replicas, read from each until successful...
        sources = self.urls
        random.shuffle(sources)
        for source in sources:
            try:
                c = ResourceStorer(source, connection=make_connection(source))
                return c.downloadContent(extra_hdrs=extra_hdrs).read()
            except:
                pass # try next source
        raise Exception('Could not read from any replicas.')

    def delete(self, lockTokens={}):
        threads = []
        for dest in self.urls:
            lockToken = lockTokens.get(dest)
            threads.append(DeleteThread(dest, lockToken=lockToken))
        map(lambda x: x.join(), threads)

    def writeProperties(self, props, lockTokens={}):
        threads = []
        for dest in self.urls:
            lockToken = lockTokens.get(dest)
            threads.append(WritePropertiesThread(dest, props, lockToken))
        map(lambda x: x.join(), threads)

    def readProperties(self, prop_names):
        sources = self.urls
        random.shuffle(sources)
        for source in sources:
            try:
                c = ResourceStorer(source, connection=make_connection(source))
                return c.readProperties(*prop_names, ignore404=True)
            except:
                pass # try next source
        raise Exception('Could not read properties from any replicas.')


class ReplicatedFile(Replica):
    def __init__(self, locator, path):
        self.meta = Replica(locator, path)
        try:
            self.guid = self.meta.read()
            self._exists = True
        except:
            self.guid = uuid.uuid1().hex
            self._exists = False
        super(ReplicatedFile, self).__init__(locator, self.guid)

    @property
    def exists(self):
        return self._exists

    def create(self, mode):
        if self._exists:
            return
        self.meta.write(self.guid)
        self.set_props(mode=stat.S_IFREG | mode)
        self.write('')
        self._exists = True

    def delete(self):
        if not self._exists:
            return
        self.meta.delete()
        self.delete()
        self._exists = False

    def set_props(self, **kwargs):
        props = {}
        for prop, v in kwargs.items():
            if prop not in STATPROPS:
                continue
            props[(STATNS, prop)] = str(v)
        self.meta.writeProperties(props)

    def get_props(self):
        props = {}
        try:
            prop_names = []
            prop_names.extend(map(lambda x: (DAVNS, x), DAVPROPS))
            prop_names.extend(map(lambda x: (STATNS, x), STATPROPS))
            r = self.meta.readProperties(prop_names)
            for prop in STATPROPS:
                v = r.get((STATNS, prop))
                if not v:
                    continue
                v = v.textof()
                try:
                    props[prop] = int(v)
                except ValueError:
                    continue
            prop_names = map(lambda x: (DAVNS, x), DAVPROPS)
            r = self.readProperties(prop_names)
            # we can take advantage of some of the default DAV properties...
            props['size'] = int(r.get((DAVNS, DAVPROPS[2])).textof())
            if 'ctime' not in props:
                props['ctime'] = calendar.timegm(time.strptime(r.get((DAVNS, DAVPROPS[0])).textof(), DAVCTIMEFMT))
            if 'mtime' not in props or 'atime' not in props:
                mtime = calendar.timegm(time.strptime(r.get((DAVNS, DAVPROPS[1])).textof(), DAVMTIMEFMT))
                props.setdefault('mtime', mtime)
                props.setdefault('atime', mtime)
        except WebdavError, e:
            if e.code == 404:
                return
            raise
        return props


class ReplicatedDir(ReplicatedFile):
    def __init__(self, locator, path):
        super(ReplicatedDir, self).__init__(locator, path)

    @property
    def children(self):
        if hasattr(self, '_child_dirs'):
            return self._child_dirs
        child_dirs = {}
        if self._exists:
            try:
                child_dirs.update(simplejson.loads(self.read()))
            except:
                pass
        setattr(self, '_child_dirs', child_dirs)
        return child_dirs

    def create(self, mode):
        if self._exists:
            return
        self.meta.write(self.guid)
        self.set_props(mode=stat.S_IFREG | mode)
        self.write(simplejson.dumps({}))
        self._exists = True

    def add(self, name, type):
        self.children[name] = type
        self.write(simplejson.dumps(self.children))

    def remove(self, name):
        try:
            del self.children[name]
        except KeyError:
            pass
        self.write(simplejson.dumps(self.children))

    def create(self, mode):
        if self.exists:
            return
        self.meta.write(self.guid)
        self.set_props(mode=stat.S_IFDIR | mode)
        self.write(simplejson.dumps(self.children))


class DavManager(object):
    def __init__(self, locator):
        self.locator = locator
        self.DATA_BLOCK_CACHE = {}

    def initialize(self):
        root = ReplicatedDir(self.locator, '/')
        if not root.exists:
            root.create(0755)

    def get_stat(self, path):
        try:
            c = ReplicatedFile(self.locator, path)
            return c.get_props()
        except:
            return None

    def set_stat(self, path, **kwargs):
        c = ReplicatedFile(self.locator, path)
        c.set_props(**kwargs)

    def create(self, path, type, mode=0755, dev=0):
        dirname, basename = os.path.split(path)
        if stat.S_ISREG(type):
            klass = ReplicatedFile
        else:
            klass = ReplicatedDir
        c1 = ReplicatedDir(self.locator, dirname)
        c1.add(basename, type)
        c2 = klass(self.locator, path)
        c2.create(mode)

    def delete(self, path):
        dirname, basename = os.path.split(path)
        c1 = ReplicatedDir(self.locator, dirname)
        c1.remove(basename)
        c2 = ReplicatedFile(self.locator, path)
        c2.delete()

    def move(self, src, dst):
        sdirname, sbasename = os.path.split(src)
        ddirname, dbasename = os.path.split(dst)
        sc1 = ResourceStorer(self.locator.get_primary_url(sdirname))
        sc2 = ResourceStorer(self.locator.get_primary_url(src))
        dc2 = ResourceStorer(self.locator.get_primary_url(dst))
        sl1 = sc1.lock(HOSTNAME)
        dl1 = None
        try:
            # move metadata...
            siblings = simplejson.loads(sc1.downloadContent().read())
            type = siblings.pop(sbasename)
            if type is None:
                return
            if sdirname == ddirname:
                siblings[dbasename] = type
            sc1.uploadContent(simplejson.dumps(siblings), lockToken=sl1)
            if sdirname != ddirname:
                dc1 = ResourceStorer(self.locator.get_primary_url(ddirname))
                dl1 = dc1.lock(HOSTNAME)
                try:
                    # destination may not exist...
                    siblings = simplejson.loads(dc1.downloadContent().read())
                except WebdavError, e:
                    if e.code != 404:
                        raise
                    siblings = {}
                siblings[dbasename] = type
                dc1.uploadContent(simplejson.dumps(siblings), lockToken=dl1)
            # get content...
            content = sc2.downloadContent().read()
            if stat.S_ISDIR(type):
                children = simplejson.loads(content)
                # recursively move children...
                for child in children.keys():
                    self.move(os.path.join(src, child), os.path.join(dst, child))
            sl2 = sc2.lock(HOSTNAME)
            dl2 = dc2.lock(HOSTNAME)
            try:
                # get properties...
                prop_names = map(lambda x: (STATNS, x), STATPROPS)
                r = sc2.readProperties(*prop_names, ignore404=True)
                props = {}
                for prop in STATPROPS:
                    v = r.get((STATNS, prop))
                    if not v:
                        continue
                    props[(STATNS, prop)] = v.textof()
                # remove old resource...
                sc2.delete(lockToken=sl2) # releases lock...
                # set contents...
                dc2.uploadContent(content, lockToken=dl2)
                # set properties...
                dc2.writeProperties(props, lockToken=dl2)
            finally:
                dc2.unlock(dl2)
        finally:
            sc1.unlock(sl1)
            if dl1:
                dc1.unlock(dl1)

    def readdir(self, path, offset=None):
        c = ReplicatedDir(self.locator, path)
        for child, type in c.children.items():
            yield child, type

    def read(self, path, length, offset):
        f = ReplicatedFile(self.locator, path)
        return f.read(length, offset)

    def write(self, path, buffer, offset):
        f = ReplicatedFile(self.locator, path)
        return f.write(buffer, offset)
