# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name:        master_main.py
# Purpose:     GAEDrive: Master Server
#
# Author:      Crane Chu <cranechu (at) gmail.org>
#
# Created:     Nov. 20, 2009
# Copyright:   Copyright 2009,2010 Crane Chu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
#!/usr/bin/env python

__author__ = 'Crane Chu'


# MODULES
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import webapp
from stat import S_IFDIR, S_IFLNK, S_IFREG
from time import time
import wsgiref.handlers
import urllib
import random
import logging
import errno
import os
from common import *


# root entity of the group
_root_entity = File(key_name = 'rootentity')


class initMasterServer(webapp.RequestHandler):
    """ init the master server through visiting following URL in browser:
    https://master-server-id.appspot.com/format
    
    Args:
        NA
        
    Returns:
        string of initilization progress
    """
    def get(self):
        ''' create the root directory node. 
        - Only the root node is delete manually, the master server can be
        again initialized, and clear all data in datastore
        '''
        root = File.all().filter("name =", "/").get()
        if root:
            # can not format the master server
            self.response.out.write("DO NOT to init Master Server multiple times! <br>")
            self.response.out.write("Otherwise, manually delete the root node in GAE dashboard first... <br>")
            self.response.out.write('<a href="http://appengine.google.com/datastore/explorer?app_id=gaestorage0&viewby=kind&kind=File">Go...</a><br>')
        else:
            self._clear_master()
            
    """ format server for automation test:
    also remove the root
    Args:
        NA
        
    Returns:
        string of initilization progress
    """
    def post(self):
        ''' create the root directory node. 
        - Only the root node is delete manually, the master server can be
        again initialized, and clear all data in datastore
        '''
        passwd = self.request.get('passwd')
        if passwd == 'automation':
            self._clear_master()
        else:
            self.error(404)

    def _clear_master(self):
        self.response.out.write("Master Server Initialized...<br>")
        
        # clear all file and chunk record first.
        files = File.all().fetch(MAX_FETCH_COUNT)
        for file in files:
            for chunk in file.chunks:
                chunk.delete()
            file.delete()
            
        # clear all opened file entities
        opens = OpenedFile.all().fetch(MAX_FETCH_COUNT)
        for open in opens:
            open.delete()
            
        """
        # clear all probability recode
        arcs = ProbabilityArc.all().fetch(MAX_FETCH_COUNT)
        for arc in arcs:
            arc.delete()
        """ 
        self.response.out.write("Cleared all files and chunks! <br>")
        
        # clear all chunk server
        chunkservers = ChunkServer.all().fetch(MAX_FETCH_COUNT)
        for chunkserver in chunkservers:
            chunkserver.delete()
            
        self.response.out.write("Cleared all chunk servers! <br>")
        
        # create the root node.    
        root = File(parent = _root_entity)
        root.name = ROOT_DIR
        st = dict(st_mode = (S_IFDIR | 0755),
                    st_nlink = 2,
                    st_size = 0,
                    st_ctime = time(),
                    st_mtime = time(),
                    st_atime = time(),
                    st_uid = 0,
                    st_gid = 0
                )
        root.stat = simplejson.dumps(st)
        root.xattr = simplejson.dumps({})
        root.writing = 0
        root.owner = users.get_current_user()
        root.put()

        self.response.out.write("DONE. To init chunk servers...<br>")
        
        
class initChunkServer(webapp.RequestHandler):
    """ respond the request from chunk server. Init the chunk server
    
    Args:
        url: the url of chunk server
        
    Returns:
        OK if init the chunk successfully
        404 is any error
    """
    def post(self):
        """ add a new chunk server, tracing its space usage and url ...
        """
        url = self.request.get('url')
        chunkserver = ChunkServer.all().filter("url =", url).get()

        if not chunkserver:
            newchunkserver = ChunkServer()
            newchunkserver.url = url
            newchunkserver.free = int(self.request.get('space'))
            newchunkserver.used = 0
            newchunkserver.put()

            self.response.out.write("OK")
        else:
            self.error(404)


class AbstractStub(webapp.RequestHandler):
    def post(self):
        """ receive the command and dispatch
        
        Args:
            func: the func name of the command from client
            args: the arguments of the functions
            
        Returns:
            ret_str: the return value of the function in JSON
        """
        funcname = self.request.get('func')
        args = urllib.unquote(self.request.get('args'))
        
        # check the args
        funcname = funcname.lower()
        ret = False
        if not funcname.startswith('_') and funcname != 'post':
            func = getattr(self, funcname)
            args = simplejson.loads(args)
            try:
                ret = func(args)
            except:
                self.response.out.write(simplejson.dumps(False))
                raise
        else:
            ret = False
            
        self.response.out.write(simplejson.dumps(ret))
        
        
class stubChunk(AbstractStub):
    def create(self, (chunk_key,)):
        """ check the chunk if it has been created by master server
        
        Args:
            chunk_key: key of the chunk
            
        Returns:
            OK if all chunks are deleted
            404 is any error
        """
        if Chunk.all().filter('chunk_key =', chunk_key).get():
            self.response.out.write("OK")
        else:
            # someone is try to hack and create the chunks!!!
            self.error(404)
            
    def delete(self, (chunk_keys,)):
        """ check chunks if they are really delete. for security!!
        
        Args:
            chunks: key of chunks
            
        Returns:
            OK if all chunks are deleted
            404 is any error
        """
        for key in simplejson.loads(chunk_keys):
            if Chunk.all().filter('chunk_key =', key).get():
                # someone is try to hack and delete the chunks!!!
                self.error(404)

        self.response.out.write("OK")

    def truncate(self, (chunk_key, length)):
        """ check chunks if it is truncated to length. for security!!
        
        Args:
            chunk: key of chunks
            length: truncated length
            
        Returns:
            OK if all chunks are deleted
            404 is any error
        """
        chunk = Chunk.all().filter('chunk_key =', chunk_key).get()
        if chunk and chunk.length == length:
            self.response.out.write("OK")
        else:
            # someone is try to hack and truncate the chunks!!!
            self.error(404)

    def read(self, (chunk_key, rand)):
        """ check chunks if it has the premission to read. for security!!
        
        Args:
            chunk: key of chunks
            rand: file handle's rand number
            
        Returns:
            OK if all chunks are deleted
            404 is any error
        """
        chunk = Chunk.all().filter('chunk_key =', chunk_key).get()
        open = chunk.file.opens.filter('rand =', rand).get()
        if open and not open.readonly:
            self.response.out.write("OK")
        else:
            # someone is try to hack and write the chunks!!!
            self.error(404)

            
    def write(self, (chunk_key, rand)):
        """ check chunks if it has the premission to write. for security!!
        
        Args:
            chunk: key of chunks
            rand: file handle's rand num
            
        Returns:
            OK if all chunks are deleted
            404 is any error
        """
        chunk = Chunk.all().filter('chunk_key =', chunk_key).get()
        open = chunk.file.opens.filter('rand =', rand).get()
        if open and not open.readonly:
            self.response.out.write("OK")
        else:
            # someone is try to hack and write the chunks!!!
            self.error(404)
            

class stubClient(AbstractStub):
    def _parent(self, path):
        """ parent dir of the file
        path can not be ended with '/'
        The format of path is defined by GAEDrive, so not to call os.path
        
        Args:
            path: path name of the file
            
        Returns:
            the reference to the file node of parent dir
        """
        pdir = '/'.join(path.split('/')[:-1])
        if pdir == '':
            pdir = ROOT_DIR

        return self._searchFile(pdir)

    def _filename(self, path):
        """ base name of the file
        path can not be ended with '/'
        The format of path is defined by GAEDrive, so not to call os.path
        
        Args:
            path: path name of the file
        
        Returns: 
            the base file name 
        """
        return path.split('/')[-1]

    def _searchFile(self, path):
        """ search the entity of file in data store
        
        Args:
            path: path name of the file
        
        Returns:
            node: the entity of the file in data store, as i-node
        """
        # start from root node
        node = File.all().filter("name =", ROOT_DIR).get()
        if path != ROOT_DIR:
            # iterate each level of dir path
            for name in path.split('/')[1:]:
                node = node.subfiles.filter("name =", name).get()
                if not node:
                    break
                
        return node

    def _getattr(self, path):
        file = self._searchFile(path)
        # file.stat must exist
        return simplejson.loads(file.stat)

    def _getxattr(self, path):
        file = self._searchFile(path)
        # file.xattr must exist
        return simplejson.loads(file.xattr)
            
    def _allocateChunks(self, file, tn, offset):
        """ allocate tn chunks ahead of time for better performance
        
        RETURNS:
            the first chunk which will be currectly used
        """
        ret = None
        chunks = []
        chunk_offset = offset - offset % CHUNK_SIZE
        
        # find 8 most free chunk servers. 
        # Allocate them to up to 2 chunk servers.
        servers = ChunkServer.all().order('used').fetch(2)
        if len(servers) == 1:
            servers *= tn
        elif len(servers) == 2:
            assert tn % 2 == 0
            servers *= tn / 2
        
        # Asynchronous create chunks
        for server in servers:
            rpc = urlfetch.create_rpc()
            chunkserver_url = URLHEAD + server.url + \
                                    "/chunk_op?func=create"
            args = None
            payload = urllib.urlencode({'args' : simplejson.dumps(args)})
            urlfetch.make_fetch_call(rpc,
                                     chunkserver_url, payload, urlfetch.POST)
            chunks.append((rpc, file, chunk_offset, server))
            chunk_offset += CHUNK_SIZE
            
        # Finish all RPCs, and let callbacks process the results.
        for (rpc, file, chunk_offset, server) in chunks:
            result = rpc.get_result()
            if result.status_code == HTTP_OK:
                # create a chunk
                chunk = Chunk()
                chunk.file = file
                chunk.offset = chunk_offset
                chunk.length = CHUNK_SIZE
                chunk.server = server
                chunk.chunk_key = simplejson.loads(result.content)
                chunk.put()
                # update space statistics
                def _transaction(chunkserver, grow_size):
                    chunkserver.used += grow_size
                    chunkserver.free -= grow_size
                    chunkserver.put()
                db.run_in_transaction(_transaction, server, CHUNK_SIZE)
                if not ret:
                    ret = chunk
            else:
                ret = False
                break
                
        return ret

    def _freeChunks(self, chunks):
        ret = True
        servers = []
        chunkservers = ChunkServer.all().fetch(MAX_FETCH_COUNT)
        for chunkserver in chunkservers:
            delete_chunks = []
            for chunk in chunks:
                if chunk.server.url == chunkserver.url:
                    delete_chunks.append((chunk.length, chunk.chunk_key))
                    # delete in master before chunk server
                    chunk.delete()
                    
            if delete_chunks:
                shrink_size = sum([chunk[0] for chunk in delete_chunks])
                def _transaction(chunkserver, shrink_size):
                    chunkserver.used -= shrink_size
                    chunkserver.free += shrink_size
                    chunkserver.put()
                db.run_in_transaction(_transaction, chunkserver, shrink_size)
                # ask chunk server to delete them in async call
                rpc = urlfetch.create_rpc()
                chunkserver_url = URLHEAD + chunkserver.url + \
                                                "/chunk_op?func=delete"
                args = [chunk[1] for chunk in delete_chunks]
                payload = urllib.urlencode({'args' : simplejson.dumps(args)})
                urlfetch.make_fetch_call(rpc,
                                         chunkserver_url,
                                         payload,
                                         urlfetch.POST)
                servers.append(rpc)
        
        # get the urlfetch result
        for rpc in servers:
            result = rpc.get_result()
            if result.status_code == 404:
                ret = False
                
        return ret
        
    def _canwrite(self, file, rand):
        if file.opens.filter("rand =", rand).filter("readonly =", False).get():
            return True
        else:
            return False

    def _canread(self, file, rand):
        if file.opens.filter("rand =", rand).get():
            return True
        else:
            return False
                                
    def _closefile(self, file, rand):
        openfile = file.opens.filter("rand =", rand).get()
        if openfile:
            openfile.delete()

    
    """ file operations handlers 
    
    Args:
        path ...
    
    Returns:
        False: error, like file exists or no such entry
        True: success
        int: read/write length
        stat: dict of file stat
        ...
    """
    def getchunks(self, (path, rand)):
        file = self._searchFile(path)
        ret = []
        
        if file:
            # check the permission of read/write.
            if file.owner == users.get_current_user() or \
                    self._canread(file, rand):
                ret = [(c.server.url, c.chunk_key) for c in file.chunks]
        
        return ret
    
    def chmod(self, (path, mode)):
        file = self._searchFile(path)

        if file and file.owner == users.get_current_user():
            def _transaction(file, mode):
                file.st_mode &= 0770000
                file.st_mode |= mode
                file.put()
            db.run_in_transaction(_transaction, file, mode)
            return True
        else:
            logging.error("No such file or permission: %d" % path)
            return False

    def chown(self, (path, uid, gid)):
        file = self._searchFile(path)

        if file and file.owner == users.get_current_user():
            def _transaction(file, uid, gid):
                file.st_uid = uid
                file.st_gid = gid
                file.put()
            db.run_in_transaction(_transaction, file, uid, gid)
            return True
        else:
            logging.error("No such file or permission: %d" % path)
            return False

    def create(self, (path, mode)):
        file = self._searchFile(path)
        parentdir = self._parent(path)
        
        if file or (parentdir.name != '/' and \
                    parentdir.owner != users.get_current_user()):
            return False
        else:
            file = File(parent = _root_entity)
            file.name = self._filename(path)
            file.parentdir = parentdir
            now = time()
            st = dict(st_mode = (S_IFREG | int(mode)),
                        st_nlink = 1,
                        st_size = 0,
                        st_ctime = now,
                        st_mtime = now,
                        st_atime = now,
                        st_uid = 0,
                        st_gid = 0)
            file.stat = simplejson.dumps(st)
            file.xattr = simplejson.dumps({})
            file.owner = users.get_current_user()
            file.writing = 0
            file.put()

            # increment st_nlink of parent dir
            def _transaction(parentdir):
                parentdir.st_nlink += 1
                parentdir.put()
            db.run_in_transaction(_transaction, file.parentdir)
            return now
        
    def mkdir(self, (path, mode)):
        dir = self._searchFile(path)
        parentdir = self._parent(path)
        
        if dir or (parentdir.name != '/' and \
                   parentdir.owner != users.get_current_user()):
            return False
        else:
            dir = File(parent = _root_entity)
            dir.name = self._filename(path)
            dir.parentdir = parentdir
            now = time()
            st = dict(st_mode = (S_IFDIR | int(mode)),
                        st_nlink = 2,
                        st_size = 0,
                        st_ctime = now,
                        st_mtime = now,
                        st_atime = now,
                        st_uid = 0,
                        st_gid = 0)
            dir.stat = simplejson.dumps(st)
            dir.xattr = simplejson.dumps({})
            dir.owner = users.get_current_user()
            dir.writing = 0
            dir.put()

            # increment st_nlink of parent dir
            def _transaction(parentdir):
                parentdir.st_nlink += 1
                parentdir.put()
            db.run_in_transaction(_transaction, dir.parentdir)
            return now

    def open(self, (path, flags)):
        file = self._searchFile(path)
        ret = 0
        if file:
            # create the open file record
            openfile = OpenedFile()
            openfile.file = file
            # assign a random num to the opened file
            openfile.rand = random.randint(1, 0xfffffffe)
            ret = openfile.rand

            if flags & os.O_RDONLY:
                # read only
                if file.owner == users.get_current_user() or file.st_mode & 004:
                    openfile.readonly = True
                else:
                    del openfile
                    ret = -errno.EROFS
            else:
                # want to open file for write or append
                # TODO: check the permission
                if True or file.owner == users.get_current_user():
                    openfile.readonly = False
                    # inc writing
                    node = file.parentdir
                    while node.name != ROOT_DIR:    
                        def _transaction(file):    
                            file.writing += 1
                            file.put()
                        db.run_in_transaction(_transaction, node)
                        node = node.parentdir
                else:
                    # can not open for writing
                    del openfile
                    ret = -errno.EROFS
            if ret > 0:
                openfile.put()
        # only return the random number to authorized client and user
        return ret

    def readdir(self, (path,)):
        files = []
        
        if path == '':
            # return the attr of root dir
            files.append(('',
                          self._getattr(ROOT_DIR),
                          self._getxattr(ROOT_DIR),
                          [],
                          None))
        else:
            # find the dir
            dir = self._searchFile(path)
            if dir:
                # get all file's meta data in this dir
                for f in dir.subfiles:
                    # set the st_mode
                    stat = simplejson.loads(f.stat)
                    ''' set mode for other users
                    if f.owner != users.get_current_user():
                        mode = f.st_mode & 007
                        mode = f.st_mode & 0770000 + \
                                    mode << 6 + mode << 3 + mode
                        stat['st_mode'] = mode
                    '''
                    files.append((f.name, stat,
                                  simplejson.loads(f.xattr),
                                  [] if not f.chunks.get() else None,
                                  f.symlink))

        return files

    def readlink(self, (path,)):
        '''
        save the sym link in master node, instead of data chunk server
        '''
        file = self._searchFile(path)

        if file:
            return file.symlink
        else:
            return False
        
    def rename(self, (old, new)):
        '''
        note that the parent dir would be changed
        '''
        file = self._searchFile(old)
        newfile = self._searchFile(new)
        ret = False
        
        if file and not newfile and file.owner == users.get_current_user():
            def _transaction(file, parentdir, name):
                file.name = name
                oldpdir = file.parentdir
                file.parentdir = parentdir
                file.put()
    
                # update st_nlink of parent dir
                if oldpdir != file.parentdir:
                    file.parentdir.st_nlink += 1
                    file.parentdir.put()
                    oldpdir.st_nlink -= 1
                    oldpdir.put()
            db.run_in_transaction(_transaction, file,
                                                self._parent(new),
                                                self._filename(new))
            ret = True
            
        return ret

    def rmdir(self, (path,)):
        dir = self._searchFile(path)
        ret = False
        
        if dir and not dir.writing:
            if dir.owner == users.get_current_user():
                # delete all its subfiles
                chunks = []
                subfiles = dir.subfiles.fetch(MAX_FETCH_COUNT)
                file = subfiles.pop(0) if subfiles else None
                while file:
                    if file.st_mode & S_IFDIR:
                        # a dir, add more sub files if current is a dir
                        subfiles += file.subfiles.fetch(MAX_FETCH_COUNT)
                        file.delete()
                    elif not file.writing:
                        # a file, delete it
                        chunks += file.chunks
                        file.delete()
                        
                    #pop the file handled
                    file = subfiles.pop(0) if subfiles else None
                
                # remove all chunks
                self._freeChunks(chunks)
                # update st_nlink of the parent dir
                def _transaction(parentdir):
                    parentdir.st_nlink -= 1
                    parentdir.put()
                db.run_in_transaction(_transaction, dir.parentdir)
                dir.delete()
                
                ret = True
                
        return ret

    def statfs(self, (path,)):
        #=======================================================================
        # unsigned long f_bsize    File system block size. 
        # unsigned long f_frsize   Fundamental file system block size. 
        # fsblkcnt_t    f_blocks   Total number of blocks on file system in 
        #                         units of f_frsize. 
        # fsblkcnt_t    f_bfree    Total number of free blocks. 
        # fsblkcnt_t    f_bavail   Number of free blocks available to 
        #                         non-privileged process. 
        # fsfilcnt_t    f_files    Total number of file serial numbers. 
        # fsfilcnt_t    f_ffree    Total number of free file serial numbers. 
        # fsfilcnt_t    f_favail   Number of file serial numbers available to 
        #                         non-privileged process. 
        # unsigned long f_fsid     File system ID. 
        # unsigned long f_flag     Bit mask of f_flag values. 
        # unsigned long f_namemax  Maximum filename length. 
        # more: 
        # http://publib.boulder.ibm.com/infocenter/iseries/v5r3/index.jsp?topic=/apis/statvfs.htm
        #=======================================================================
        BLOCK_SIZE = CHUNK_SIZE
        if path == ROOT_DIR:
            # root, return the space statistics of all Chunk ServerS
            css = ChunkServer.all().fetch(MAX_FETCH_COUNT)
            free = sum([cs.free for cs in css])
            used = sum([cs.used for cs in css])
            return dict(f_bsize = BLOCK_SIZE,
                        f_blocks = (free + used) // BLOCK_SIZE,
                        f_bavail = free // BLOCK_SIZE)
        else:
            dir = self._searchFile(path)
            return dict(f_files = len(dir.subfiles.fetch(MAX_FETCH_COUNT)))

    def symlink(self, (target, source)):
        file = self._searchFile(target)
        sfile = self._searchFile(source)

        if file or not sfile:
            return False
        else:
            file = File(parent = _root_entity)
            file.name = self._filename(target)
            file.parentdir = self._parent(target)

            now = time()
            st = dict(st_mode = (S_IFLNK | 0777),
                        st_nlink = 1,
                        st_size = len(source),
                        st_ctime = now,
                        st_mtime = now,
                        st_atime = now,
                        st_uid = 0,
                        st_gid = 0)
            file.stat = simplejson.dumps(st)
            file.xattr = simplejson.dumps({})
            file.symlink = source
            file.owner = users.get_current_user()
            file.writing = 0
            file.put()

            # increment st_nlink of parent dir
            def _transaction(parentdir):
                parentdir.st_nlink += 1
                parentdir.put()
            db.run_in_transaction(_transaction, file.parentdir)
            return now

    def truncate(self, (path, length, rand)):
        file = self._searchFile(path)
        ret = False
        
        if file and file.owner == users.get_current_user():
            delete_chunk = []
            for chunk in file.chunks:
                if chunk.offset > length:
                    delete_chunk.append(chunk)
                elif chunk.offset - CHUNK_SIZE < length < chunk.offset:
                    # update chunk server
                    def _transcation(chunkserver, shrink_size):
                        chunkserver.used -= shrink_size
                        chunkserver.free += shrink_size
                        chunkserver.put()
                    shrink_size = chunk.length - length % CHUNK_SIZE
                    db.run_in_transaction(_transcation, chunk.server, \
                                                        shrink_size)
                    # update the chunk, and then call the chunk server
                    def _transcation2(chunk, length):
                        chunk.length = length
                        chunk.put()
                    chunk_length = length % CHUNK_SIZE
                    db.run_in_transaction(_transcation2, chunk,
                                                         chunk_length)
                    # truncate the chunk in chunk server
                    chunkserver_url = URLHEAD + chunk.server.url + \
                                        "/chunk_op?func=truncate"
                    args = (str(chunk.key()), chunk_length)
                    payload = urllib.urlencode({'args' : 
                                                simplejson.dumps(args)})
                    # ignore the return
                    urlfetch.fetch(chunkserver_url, payload, urlfetch.POST)
            # total deleted chunks 
            self._freeChunks(delete_chunk)
            # update file size
            def _transcation3(file, length):
                file.st_size = length
                file.put()
            db.run_in_transaction(_transcation3, file, length)
            
            ret = self.statfs('/')

        return ret

    def unlink(self, (path,)):
        file = self._searchFile(path)
        ret = False
        
        if file and not file.writing:
            if file.owner == users.get_current_user():
                self._freeChunks(file.chunks)
                # update st_nlink of the parent dir
                def _transaction(parentdir):
                    parentdir.st_nlink -= 1
                    parentdir.put()
                db.run_in_transaction(_transaction, file.parentdir)
                # remove this entry
                file.delete()
                ret = self.statfs(('/'),)

        return ret

    def utimens(self, (path, times)):
        file = self._searchFile(path)

        if file:
            now = time()
            atime, mtime = times if times else (now, now)
            def _transaction(file, atime, mtime):
                file.st_atime = atime
                file.st_mtime = mtime
                file.put()
            db.run_in_transaction(_transaction, file, atime, mtime)
            return (atime, mtime)
        else:
            logging.error("No such file: %d" % path)
            return False
    
    def write(self, (path, offset, length, rand)):
        # write just one chunk
        file = self._searchFile(path)
        now = time()
        if file and self._canwrite(file, rand):
            # update time stamps
            def _transaction(file, now, offset, length):
                file.st_atime = now
                file.st_mtime = now
                file.st_size = max(file.st_size, offset + length)
                file.put()
            db.run_in_transaction(_transaction, file, now, offset, length)
            # figure out the chunk according the offset
            chunk = Chunk.all().filter("file =", file).\
                                filter("offset =", offset).get()
            
            if not chunk:
                chunk = self._allocateChunks(file, PRE_ALLOC_CHUNKS, offset)
                if not chunk:
                    self.error(404)
            
            # return all chunk location, and now timestamp
            chunks = file.chunks.order("offset").fetch(MAX_FETCH_COUNT)
            return [(chunk.server.url, chunk.chunk_key) for chunk in chunks], \
                    now, self.statfs('/')
        else:
            logging.error("No such file: %s" % path)
            return False

    def read(self, (path, offset, size)):
        # read just one chunk
        now = time()
        file = self._searchFile(path)
        ret = (None, None)
        
        if file:
            if file.owner == users.get_current_user() or file.st_mode & 004:
                # update time stamps
                def _transaction(file, now):
                    file.st_atime = now
                    file.put()
                db.run_in_transaction(_transaction, file, now)
                # return all chunk location, and now timestamp
                chunks = file.chunks.order("offset").fetch(MAX_FETCH_COUNT)
                chunk_list = [(c.server.url, c.chunk_key) for c in chunks]
                ret = (chunk_list, now)

        return ret
        
    def flush(self, (path,)):
        # nothing to do for now
        return True
            
    def release(self, (path, size, now, rand)):
        # update the atime
        file = self._searchFile(path)
        
        if file:
            if self._canwrite(file, rand):
                # for writing opens
                node = file.parentdir
                while node.name != ROOT_DIR:
                    def _transaction(node):
                        node.writing -= 1
                        node.put()
                    db.run_in_transaction(_transaction, node)
                    node = node.parentdir
                    
                # change last chunk length
                last_chunk_offset = size - size % CHUNK_SIZE
                last_chunk = file.chunks.\
                                    filter("offset =", last_chunk_offset).get()
                if last_chunk:
                    def _transaction2(last_chunk, size):
                        last_chunk.length = size % CHUNK_SIZE
                        last_chunk.put()
                    db.run_in_transaction(_transaction2, last_chunk, size)
                
                # update mtime
                def _transaction3(file, now):
                    file.st_mtime = now
                    file.st_atime = now
                    file.st_size = size
                    file.put()
                db.run_in_transaction(_transaction3, file, now)
            else:
                # update atime
                def _transaction4(file, now):
                    file.st_atime = now
                    file.put()
                db.run_in_transaction(_transaction4, file, now)
            # free the openfile
            self._closefile(file, rand)
            return True
        else:
            return False
    
    def setxattr(self, (path, name, value)):
        ret = False
        file = self._searchFile(path)
        if file and file.owner == users.get_current_user():
            def _transaction(file):
                xattr = simplejson.loads(file.xattr)
                xattr[name] = value
                file.xattr = simplejson.dumps(xattr)
                file.put()
            db.run_in_transaction(_transaction, file)
            ret = True

        return ret
        
    def getxattr(self, (path, name)):
        ret = False
        file = self._searchFile(path)
        if file:
            xattr = simplejson.loads(file.xattr)
            ret = xattr[name] if name in xattr else ''

        return ret
        
    def listxattr(self, (path,)):
        ret = False
        file = self._searchFile(path)
        if file:
            xattr = simplejson.loads(file.xattr)
            ret = xattr.keys()

        return ret
        
    def removexattr(self, (path, name)):
        ret = False
        file = self._searchFile(path)
        if file and file.owner == users.get_current_user():
            def _transaction(file):
                xattr = simplejson.loads(file.xattr)
                if name in xattr:
                    del xattr[name]
                    file.xattr = simplejson.dumps(xattr)
                    file.put()
            db.run_in_transaction(_transaction, file)
            ret = True
                
        return ret
        

########==========main entry=============
application = webapp.WSGIApplication([
    ('/format', initMasterServer),
    ('/init_chunkserver', initChunkServer),
    ('/file_op', stubClient),
    ('/doublecheck', stubChunk),

], debug = True)

def main():
    wsgiref.handlers.CGIHandler().run(application)

if __name__ == '__main__':
    main()

