##@package S3
#This is the module that wraps all Amazon S3 funtionality.
#Check the functions for more details

from RestAPI.core.ResponseData import DataNode
from RestAPI.core.APIwrapper import RestRequest
from RestAPI.amazon.service import AWSError
from RestAPI.amazon.service import AWSservice
from RestAPI.amazon.service import AWSrequest
from RestAPI.amazon.service import AWSConfiguration
from RestAPI.amazon.security import User
import re,time,datetime
import base64
import md5,random,string
import hmac
import sys
import json
if sys.version_info[:2] > (2, 4): # pragma: no cover
    from urllib2 import quote
    from hashlib import sha1 # pylint: disable-msg=E0611
else:
    from urllib import quote
    from Crypto.Hash import SHA1 as sha1
    
## @class S3Request
#   The basic class to provide interface for S3. Wrapping the additional features S3 added into Http Requests.
#   Using it the same way as RestRequest but authorize the request using your AWS keyID and secret key to authorize the request before you
#   send it to amazon.
class S3Request (AWSrequest):
    reserved_subresources=("acl", "lifecycle", "location", "logging", "notification", "partNumber", "policy", "requestPayment", "torrent", "uploadId", "uploads", "versionId", "versioning", "versions", "website")
    UPLOADINFO=('content-type','content-disposition','expires','x-amz-security-token')
    def __init__(self,host="s3.amazonaws.com",method='GET',resource="/",subresources=None,headers=None,body=""):
        """
        Create a S3 style request. Initiate the request context. You can also setup the request context latter.
        """
        super(S3Request,self).__init__(host,method,resource,subresources,headers,body)
    def authorize(self,AuthKey=None,SecretAccessKey=None,securityToken=None):
        """
        Format the request to follow S3 requirement and authorize the request.
        """
        if type(AuthKey) is S3session:
            self.AuthKey=AuthKey
            self.SecretAccessKey=AuthKey.SecretAccessKey
            self.securityToken=AuthKey.securityToken
            if securityToken:
                self.setHeader('x-amz-security-token',self.securityToken)
            self.setHeader('Authorization',self.getSignature())
            return self
        if SecretAccessKey is not None:
            self.SecretAccessKey=SecretAccessKey
        if AuthKey is not None:
            self.AuthKey=AuthKey
#        if not hasattr(self,"SecretAccessKey") or self.SecretAccessKey is None:
#            return self
#        if not hasattr(self,"AuthKey") or self.AuthKey is None:
#            return self
        if securityToken is not None:
            self.securityToken=securityToken
        if hasattr(self,'securityToken') and self.securityToken is not None:
            self.securityToken=securityToken
            self.setHeader('x-amz-security-token',self.securityToken)
        #get string to sign
        self.setHeader('Authorization',self.getSignature())
        return self
    # Output
    def getForm(self,key,policy=None,acl='public',success_action_status=200,success_action_redirect=None,redirect=None):
        """
        Get the Html form for posting to S3
        Set the x-amz-security-token/Content-Type/Content-Encoding/Expires in header.
        """
        url=self.getUrl({})
        #REST-specific headers
        form_body=''
        key=key.replace('*','${filename}')
        #x-amz-security-token=
        for name in self.headers:
            if name in S3Request.UPLOADINFO:
                form_body+='<input type="hidden" name="%s" value="%s"/>\n' % (name,self.headers[name])
        if policy:
            form_body+='<input type="hidden" name="AWSAccessKeyId" value="%s"/\n' % self.AuthKey
            form_body+='<input type="hidden" name="Policy" value="%s"/>\n' % (policy.encoded())
            form_body+='<input type="hidden" name="Signature" value="%s"/>\n' % self.getSignatureFromPolicy(policy.encoded())
        form_body+='<input type="hidden" name="success_action_status" value="%s"/>\n' % success_action_status
        if success_action_redirect:
            form_body+='<input type="hidden" name="success_action_redirect" value="%s"/>\n' % success_action_redirect
        #TODO redirect is the same as success_action_redirect
        #TODO : more complicated form?
        if redirect:
            form_body+='<input type="hidden" name="redirect" value="%s"/>\n' % redirect
        form_body+='Upload to: <input type="input" name="key" value="%s" />\n' % key
        form_body+='File: <input type="file" name="file" />\n'
        form_body+='<!-- The elements after this will be ignored -->\n'
        form='<form action="%s" method="post" enctype="multipart/form-data">\n%s\n</form>\n' % (url,form_body)
        return form 
    def temporaryUrl(self):
        """
        Get the url for temporary access of the resource.
        """
        data={}
        data['Signature']=self.getSignatureForUrl(self.getStringToSignForUrl())
        data['Expires']="%s" % self.expire
        data['AWSAccessKeyId']=self.AuthKey
        return self.getUrl(data)
    def expires(self,expire_time):
        """
        Set up the expiration time of a temporary Url to fetch the protected resources inside the buckets. The parameter expire_time can be an time peroid after the time of requesting the temporary URL or an absolute time before the temporary URL expires.
        """
        try:
        #parse the abosulte time using GMT format.
            dt =int(time.mktime(time.strptime(expire_time, '%Y-%m-%d %H:%M:%S'))) - time.timezone
            self.expire=dt
            return self
        except:
            if type(expire_time) is int:
            #if the expire_time is in epoch time format
                self.expire=expire_time
                return self
       # parse the format using relative time 
        millisecond=re.compile('(\d+\.?\d?)\s*milliseconds?').findall(expire_time)
        if millisecond:
            millisecond=float(millisecond[0])
        else:
            millisecond=0
        microsecond=re.compile('(\d+\.?\d?)\s*microseconds?').findall(expire_time)
        if microsecond:
            microsecond=float(microsecond[0])
        else:
            microsecond=0
        second=re.compile('(\d+\.?\d?)\s*seconds?').findall(expire_time)
        if second:
            second=float(second[0])
        else:
            second=0
        minute=re.compile('(\d+\.?\d?)\s*minutes?').findall(expire_time)
        if minute:
            minute=float(minute[0])
        else:
            minute=0
        hour=re.compile('(\d+\.?\d?)\s*hours?').findall(expire_time)
        if len(hour):
            hour=float(hour[0])
        else:
            hour=0
        #TODO necessary to provide support for day/week? because temporary URL can only exist between 1 to 3 hours.
        day=re.compile('(\d+\.?\d?)\s*days?').findall(expire_time)
        if day:
            day=float(day[0])
        else:
            day=0
        week=re.compile('(\d+\.?\d?)\s*weeks?').findall(expire_time)
        if week:
            week=float(week[0])
        else:
            week=0
        self.expire="%i" % (time.time()+datetime.timedelta(day,second,hours=hour,minutes=minute,milliseconds=millisecond,microseconds=microsecond,weeks=week).total_seconds())
        return self
    # set up the request context.
    def setHost(self,host):
        """
        Overrided setHost for S3. It will recognize the virtual host style host S3 is using.
        """
        host=host.lower()
        #if can find s3.amazonaws.com, the host precede it is virtualHost
        host=host.replace('http://','')
        s3=re.search('s3[^\.]*\.amazonaws\.com',host)
        self.host=host
        if s3 is None:
            #this is CNAME type host name 
            self.bucket=host.split('/')[0].split(':')[0]
        else:
            idx=s3.start()
            if idx > 0:
                #virtual hosting in backet
                self.bucket=host[0:idx-1]
            else:
                #No virtual hosting
                self.bucket=''
        return self
    def addHeader(self,key,value):
        """
        Overrided addHeader for S3. Instead of override the old header value, it append the new value to the current values. 
        """
        key=key.lower()
        if key in self.headers:
            self.headers[key]=[self.headers[key],value]
        else: 
            self.headers[key]=value
        return self
    # Helpers 
    def CanonicalizeHeader(self):
        keys=self.headers.keys()
        keys.sort(key=str.lower)
        if not keys:    
            return ""
        newheader=''
        for key in keys:
            v=self.headers[key]
            key=key.lower()
            if key.startswith('x-amz-'):
                if v.__class__ is list:
                    v=','.join([ x.strip() for x in v]) 
                newheader+='%s:%s\n' % (key,v)
        return newheader 
    def timestamp(self):
        return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
    def CanonicalizeResource(self):
        if self.bucket :
            bucket="/"+self.bucket
        else:
            bucket=""
        subresources=[]
        for sub in sorted(self.subresources.keys()):
            if not (sub in S3Request.reserved_subresources):
                continue
            if self.subresources[sub] is None:
                subresources.append(sub)
            else :
                subresources.append(sub+'='+str(self.subresources[sub]))
        if len(subresources) > 0:
            subresources='?'+'&'.join(subresources)
        else:
            subresources=''
        #TODO notice: resource empty(/ or "") easy to mix 
        resource=bucket+self.resource+subresources
        return resource
    def getStringToSign(self):
        """
        Get string to sign for general S3 requests.
        """
        sstr=self.method.upper()+"\n"
        sstr+=self.content_md5()+"\n"
        sstr+=self.content_type()+"\n"
        time=self.timestamp()
        self.setHeader("Date", time);
        sstr+=time+"\n"
        sstr+=self.CanonicalizeHeader()
        sstr+=self.CanonicalizeResource()
        return sstr 
    def getStringToSignForUrl(self):
        """
        Get string to sign for creating Query String Request Authentication. Used to provide temmporary browser accessible URL to fetch the protected resources in the buckets.
        """
        sstr=self.method.upper()+"\n"
        sstr+=self.content_md5()+"\n"
        sstr+=self.content_type()+"\n"
        time=self.timestamp()
        self.setHeader("Date", time);
        sstr+="%s\n" % self.expire
        sstr+=self.CanonicalizeHeader()
        sstr+=self.CanonicalizeResource()
        return sstr 
    def getSignature(self,stringTosign=None):
        if stringTosign is None:
            stringTosign=self.getStringToSign()
        if not hasattr(self,'SecretAccessKey'):
            raise NoSecretAccessKey()
        if not hasattr(self,'AuthKey'):
            raise NoAuthKey()
        #hmac.new(unicode(self.SecretAccessKey).encode('utf-8'),unicode(stringTosign).decode('utf-8'),sha1)
        signature=base64.b64encode(hmac.new(self.SecretAccessKey,stringTosign,sha1).digest())
        return "AWS %s:%s" % (self.AuthKey,signature)
    def getSignatureForUrl(self,stringTosign=None):
        if stringTosign is None:
            stringTosign=self.getStringToSignForUrl()
        if not hasattr(self,'SecretAccessKey'):
            raise NoSecretAccessKey()
        if not hasattr(self,'AuthKey'):
            raise NoAuthKey()
        #hmac.new(unicode(self.SecretAccessKey).encode('utf-8'),unicode(stringTosign).decode('utf-8'),sha1)
        signature=base64.b64encode(hmac.new(self.SecretAccessKey,stringTosign,sha1).digest())
        return signature
    def getSignatureFromPolicy(self,stringTosign=None):
        signature=base64.b64encode(hmac.new(self.SecretAccessKey,stringTosign,sha1).digest())
        return signature
    def content_md5(self):
        if 'content-md5' in self.headers:
            return self.headers['content-md5']
        elif self.body:
            base64md5 = base64.encodestring(md5.md5(self.body).digest())
            if base64md5[-1] == '\n':
                self.headers['content-md5'] = base64md5[0:-1]
            return self.headers['content-md5']
        else:
            return ""
##
# A S3 session that can be used to interact with S3 online services.
class S3session (AWSservice):
    LOCATION=("EU" , "eu-west-1" , "us-west-1" , "us-west-2", "ap-southeast-1" , "ap-northeast-1" , "sa-east-1" , "")
    ACL_OPTIONS=("private" , "public-read" , "public-read" , "authenticated-read" , "bucket-owner-read" , "bucket-owner-full-control")
    def __init__(self,locale='us'):
        """
        Start a S3 session in given location. Possible locations are:
        oregon,california,eu,singapore,sao paulo
        """
        self.location=locale
        if locale.lower() == 'oregon':
            self.host='s3-us-west-2.amazonaws.com'
        elif locale.lower() == 'california':
            self.host='s3-us-west-1.amazonaws.com'
        elif locale.lower() == 'eu':
            self.host='s3-eu-west-1.amazonaws.com'
        elif locale.lower() == 'singapore':
            self.host='s3-ap-southeast-1.amazonaws.com'
        elif locale.lower() == 'tokyo':
            self.host='s3-ap-northeast-1.amazonaws.com'
        elif locale.lower() == 'sao paulo':
            self.host='s3-sa-east-1.amazonaws.com'
        else :
            self.host='s3.amazonaws.com'
    def setCredential(self,AuthKey=None,SecretAccessKey=None,securityToken=None):
        self.AuthKey=AuthKey
        self.SecretAccessKey=SecretAccessKey
        self.securityToken=securityToken
        #TODO caching the bucket information
        #self.__buckets__=self.buckets()
        return self
    def buckets(self):
        """
        List the buckets owned by the owner of current S3session.
        Returns a list of Bucket objects.
        """
        mybuckets=[]
        req=S3Request(self.host).GET('/').authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            result=DataNode(req.result)
            for bucket in result['Bucket']:
                mybuckets.append(Bucket(bucket.Name.str(),owner=User(result.Owner),created=bucket.CreationDate.str()).setCredential(self.AuthKey,self.SecretAccessKey,self.securityToken))
            return mybuckets
        else:
            raise AWSError(req.response)
    def createBucket(self,bucketName,loc="",acl=None):
        #setup the location constraint 
        if loc not in S3session.LOCATION:
            raise Exception('Unable to understand the specified bucket location constraint "%s"! Possible selections are:\n%s' % (loc,S3session.LOCATION))
        if loc:
            configuration='<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">\n \
  <LocationConstraint>%s</LocationConstraint>\n \
  </CreateBucketConfiguration>' % (loc)
        else:
            configuration=""
        req=S3Request(bucketName+"."+self.host).PUT('/',body=configuration)
        #setup the access control
        if acl:
            if acl not in S3session.ACL_OPTIONS:
                raise Exception('Unable to understand the specified acl constraint "%s"! Possible selections are:\n%s' % (acl,S3session.ACL_OPTIONS))
            self.acl=acl
            req.setHeader('x-amz-acl',acl)
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        #TODO debug createBucket 
        if req.send():
            mybucket=self.createBucketObj(bucketName)
            return mybucket
        else:
            # 409 :  Conflict with existed bucket!
            raise AWSError(req.response)
    def deleteBucket(self,bucketName):
        """
        Delete the bucket of given name. The content of the bucket must be removed at first.
        """
        req=S3Request(bucketName+"."+self.host).DELETE('/')
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        #TODO debug
        #TODO 403 mistake 
#        print req.host,req.resource,req.body,req.headers
#        print req.getStringToSign()
        if req.send():
            return True
        else:
            raise Exception(req.response.status)
            # 409 ->'Conflict '
            raise AWSError(req.response)
    def __getitem__(self,bucketName):
        """Get bucket by the given name."""
        return createBucketObj(bucketName)
    #Helpers 
    def createBucketObj(self,name,owner=None,created=None,host='s3.amazonaws.com'):
        """
        Initalize a bucket object in memory based on the given information.
        """
        bucket=Bucket(name,host=self.host,created=created,owner=owner)
        bucket.setCredential(self.AuthKey,self.SecretAccessKey,self.securityToken)
        return bucket
## 
#Data Holder for S3 bucket.
#
class Bucket (AWSservice):
    # intialization
    def __init__(self,name,owner=None,created="",host='s3.amazonaws.com'):
        self.name=name
        self.host=self.name+'.'+host
        self.objects={}
        self.owner=owner
    #Basic operations
    ## Add a bucket to remote server.
    def addRemote(self):
        pass
    ##The generator to list the content inside the bucket.
    #  You can create the generator by:
    #     listAction=self.bucket.ls()
    #  Then iterate over it:
    #     for page in listAction:
    #         for item in page:
    #             print item
    # You can limit the pageSize, or path 
    # You can also prevent getting version information(getVersionInfo=False) if the bucket is not version controlled.
    def ls(self,path='/',pageSize=1000,getVersionInfo=True):
        path=path.lstrip('/')
        req=S3Request(self.host).GET('/').setHeader('content-type','text/plain').addSubResource('delimiter','/').addSubResource("max-keys",pageSize)
        if path:
            req.addSubResource('prefix',path)
        if getVersionInfo:
            req.addSubResource('versions')
        #add headers
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        while req.send():
            listedObjects={}
            result=DataNode(req.result)
#            print result.__xml__(pretty_print=True)
            if getVersionInfo:
                for file in result['Version']:
                    #Basic information
                    fname=file.Key.str()
                    if fname in listedObjects:
                        fObject=listedObjects[fname]
                    else:
                        fObject=self.createFileObject(fname)
                    #Version Information
                    fObject.addVersion(file.VersionId.str(),
                                      file.LastModified.str(),
                                      etag=file.ETag.value(),
                                      size=file.Size.value())
                    fObject.owner=User(file.Owner)
                    fObject.storageType=file.StorageClass.str()
                    #add file 
                    fObject.existedRemote=True
                    listedObjects[fname]=fObject
                #delete markers
                for file in result['DeleteMarker']:
                    fname=file.Key.str()
                    if fname in listedObjects:
                        fObject=listedObjects[fname]
                    else:
                        fObject=self.createFileObject(fname)
                    #Version Information
                    fObject.addVersion(file.VersionId.str(),file.LastModified.str(),DeleteMarker=True)
                    #add mark
                    listedObjects[fname]=fObject
                    fObject.owner=file.Owner.DisplayName.str()
            else:
                for file in result['Contents']:
                    fname=file.Key.str()
                    fObject=self.createFileObject(fname)
                    listedObjects[fname]=fObject
                    fObject.lastModified=file.LastModified.str()
                    fObject.etag=file.ETag.str()
                    fObject.size=file.Size.str()
                    fObject.owner=file.Owner.DisplayName.str()
                    fObject.storageType=file.StorageClass.str()
                    fObject.existedRemote=True
            if len(listedObjects) > 0:
                lastKey=max(listedObjects.iterkeys())
            else:
                #if no result is given, end the list generator.
                return
            #The last key in the page may have some version information lost.
            if getVersionInfo:
                listedObjects[lastKey].getVersionInfo()
            yield listedObjects
            req.addSubResource('key-marker',lastKey)
        raise AWSError(req.response)
    def delete(self,filelist):
        """
        Delete the files in the file list.
        """
        for f in filelist:
            pass
    ##Create a upload form to upload into a bucket.
    # Upload Path and policy must be provided.
    def createUploadForm(self,policy,destPath='/'):
        if destPath != '/':
            policy.condition('')
    ##Check if the file obj or file exists remotely on S3.
    # The parameter can be a FileObject or the file name string.
    def has(self,fileobj):
        if type(fileobj) == FileObject:
            if fileobj.parentBucket.name == self.name:
                fileobj.updateInfo()
                return fileboj.existedRemote
            else:
                return False
        elif type(fileobj) == str:
            fobject=createFileObject(fileobj).updateInfo()
            return fobject.existedRemote
        else:
            return False
    ## Check if bucket1 == bucket2
    def __eq__(self,other):
        if self.name == other.name:
            return True
        else:
            return False
    ## Create a File object and download its content from the specified path of the bucket. 
    def get(self,destPath):
        obj=FileObject(destPath,self)
        obj.download()
        return obj
    ## Create a File object and upload its content(from source) to the specified path of the bucket. 
    def add(self,destPath,source):
        obj=FileObject(destPath,self)
        obj.upload(source)
        return obj
    def __getitem__(self,key):
        obj=createFileObject(self,key)
        #check if object exists
        return obj
    # Configurations
    ##Get the bucket access policy.
    def getPolicy(self):
        req=S3Request(self.host).GET('/').addSubResource('policy')
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            #TODO 
#            print req.response.getheaders()
#            print req.result
            return True
        else:
            raise AWSError(req.response)
    def updatePolicy(self,policy):
        if type(policy) == BucketPolicy:
            policy=policy.__json__()
        #send out the policy
    # Life Cyle Configruation
    ## Add a folder that have life cyle.
    def addTemporaryFolder(self):
        pass
    def listTemporaryFolders(self):
        pass
    def deleteTemporaryFolders(self):
        pass
    ##Payment policy
    def payby(self,payer=None):
        if payer is None:
            self.getRequestPayment()
        else:
            self.updateRequestPayment()
    #Helpers
    def createFileObject(self,name):
        """
        Create a File object in memory.
        """
        obj=FileObject(name,self)
        return obj
    #ACL
    def getACL(self):
        pass
    def createACL(self):
        pass
    def updateACL(self):
        pass
    def getLifeCycleRules(self):
        req=S3Request(self.host).GET('/').addSubResource('lifecycle')
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            print req.result
            return LifeCycleRules(req.result)
        else:
            if req.response.status == 404:
                # HTTP404 : there is no life cyle rules
                return LifeCycleRules()
            else:
                raise AWSError(req.response)
    def updateLifeCycleRules(self,LifeCycleRulesObj):
        if self.getVersionControl().enabled():
            raise Exception("Life cycle rules are not supported for versioned bucket.")
        text=LifeCycleRulesObj.content()
        req=S3Request(self.host).PUT('/').plainText(text)
        req.addSubResource('versioning').addHeaders(LifeCycleRulesObj.headers())
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            return True
        else:
            raise AWSError(req.response)
    def getVersionControl(self):
        req=S3Request(self.host).GET('/').addSubResource('versioning')
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            return VersionControl(req.result)
        else:
            if req.response.status == 404:
                # HTTP404 : there is no life cyle rules
                return VersionControl()
            else:
                raise AWSError(req.response)
    def updateVersionControl(self,versionControlObj):
        text=versionControlObj.content()
        req=S3Request(self.host).PUT('/').plainText(text)
        req.addSubResource('versioning').addHeaders(versionControlObj.headers())
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            #TODO
#            print req.response.getheaders()
            return True
        else:
            raise AWSError(req.response)
##
# Object to store 
class FileObject (AWSservice):
    ##
    # Init a remote File Object inside a bucket. You can latter upload/download/sync to/from local file.
    def __init__(self,name,parentBucket=None,content=None):
        self.target=name
        self.content=content
        self.parentBucket=parentBucket
        self.AuthKey=parentBucket.AuthKey
        self.SecretAccessKey=parentBucket.SecretAccessKey
        self.securityToken=parentBucket.securityToken
        self.host=parentBucket.host
        self.__versions__={}
    def addVersion(self,versionID,lastModified,etag=None,size=None,isLatest=True,DeleteMarker=False):
        """Add a verion to the file object."""
        lastModified=time.mktime(time.strptime(lastModified,"%Y-%m-%dT%H:%M:%S.000Z"))
        if isLatest and DeleteMarker:
            self.deleted=True
            self.deletedAt=lastModified
        if isLatest:
            self.latest=lastModified
        self.__versions__[lastModified]={'versionID':versionID,
                                  'etag':etag,
                                  'size':size,
                                  'DeleteMarker':DeleteMarker
                                }
        self.currentVersion=lastModified
        #or self.currentVersion=max(self.__versions__.iterkeys())
        self.etag=self.__versions__[self.currentVersion]['etag']
        self.size=self.__versions__[self.currentVersion]['size']
        self.versionID=self.__versions__[self.currentVersion]['versionID']
    def existed(self):
        """
        Check if this file object exists inside a S3 bucket.
        """
        if not hasattr(self,existedRemote) :
            self.updateInfo()
        return self.existedRemote
    def chmod(self,acl):
        """Change the access control list of the current file object."""
        pass
    def delete(self):
        """Delete the file object from the remote server."""
        pass
    def url(self,expires='1hour'):
        """
        Get the temporary URL to access the protected resource. The expiration time can be 1-3 hours.
        """
        req=S3Request(self.host).GET(self.target).authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        req.expires(expires)
        return req.temporaryUrl()
        
    def uploader(self,sourcefname,chuckSize=1024):
        """
        Upload large files one chunck at a time using multipart. Skip the already uploaded part.
        """
        yield
    def finishUpload(self):
        """
        Continue unfinished multipart upload.
        """
        return 
    def uploadForm(self):
        """
        Get the upload form for Browser upload.
        """
        req=S3Request(self.host).POST(self.target).authorize(self.AuthKey,self.SecretAccessKey,self.securityToken).expires("1 hour")
        return req.getForm()
    def save(self,path):
        """
        Save the content to local file.
        """
        if self.content:
            return self.content
        else:
            return self.download(path)
    def content(self):
        if self.content:
            return self.content
        else:
            return self.download()
    def upload(self,source):
        """
        Upload the content of local file to the File object on remote server.
        """
        #source=source.lstrip('/')
        req=S3Request(self.host).PUT(self.target).file(source)
        #print req.getStringToSign()
        #add headers
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            #print req.response.getheaders()
            return self
        else:
            raise AWSError(req.response)
    def sync(self,source):
        """
        Check if the file in remote(target) and local(source) is the same. If they are different, upload/download the file to make them consistent. Is it possible to store files in piece on remote server so that we can rsync? better performance(lower brandwidth) but more complicated.
        """
        return 
    def download(self):
        """
        Download the file and store the content in memory.
        """
        #TODO : problem -> what if the file is too big? Do not keep it in the memory!
        req=S3Request(self.host).GET(self.target)
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            self.size=req.response.getheader('content-length',0)
            self.lastModified=req.response.getheader('last-modified',0)
            self.etag=req.response.getheader('etag',0)
            self.content=req.result
            return self
        else:
            raise AWSError(req.response)
    def torrent(self,torrentFile=None):
        """
        Get the torrent for downloading the file. Return the torrent content if torrentFile name is not specified.
        """
        req=S3Request(self.host).GET('/'+self.target).addSubResource('torrent')
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            return 
        else:
            raise AWSError(req.response)
    def updateInfo(self):
        """
        Get the information about the current File Object from remote.
        """
        req=S3Request(self.host).HEAD(path)
        req.authorize(self.AuthKey,self.SecretAccessKey,self.securityToken)
        if req.send():
            self.size=req.response.getheader('content-length',0)
            self.lastModified=req.response.getheader('last-modified',0)
            self.etag=req.response.getheader('etag',0)
            self.content=req.result
            self.existedRemote=True
            return self
        else:
            if req.response.status == 404:
                #file do not exits
                self.size=None
                self.lastModified=None
                self.etag=None
                self.content=None
                self.existedRemote=False
            else:
                raise AWSError(req.response)
    def getVersionInfo(self):
        pass
    def versions(self):
        return self.__versions__
    def revision(self):
        """Return the revision number of current version"""
        return r
    def useVersion(self,versionID):
        """
        Change the file object to a specific version. 
        Example:
        """
        if type(versionID) is float:
            #epoch time 
            version=versionID
        elif type(versionID) is str:
            #revision number
            for version in self.__versions__:
                if self.__versions__[version]['versionID'] == versionID :
                    break
        else:
            raise Exception("Not supported type!")
        self.currentVersion=version
        self.etag=self.__versions__[self.currentVersion]['etag']
        self.size=self.__versions__[self.currentVersion]['size']
        self.versionID=self.__versions__[self.currentVersion]['versionID']
    def useRevision(self,revision):
        """
        Change the file object to a specific version. 
        Example:
        useRevision(19)
        useRevision('r19')
        useRevision('first')
        useRevision('last')
        """
        self.currentVersion=version
        self.etag=self.__versions__[self.currentVersion]['etag']
        self.size=self.__versions__[self.currentVersion]['size']
        self.versionID=self.__versions__[self.currentVersion]['versionID']
        return 
    def roll_back(self,offset):
        """
        Use the current selected version as the remote file Object latest version. 
        """
        pass
    def __repr__(self):
        return str(self.target)
    def __str__(self):
        return str(self.target)
        
# Helper Data Structures
# The configurations 
##
#Access Control List(ACL) configuration of the bucket and the objects.
class ACL (AWSConfiguration):
    PERMISSION_OPTOINS=('FULL_CONTROL','WRITE','WRITE_ACP','READ','READ_ACP')
    def __init__(self,):
        pass
    def allow(self,grantee,access):
        if access not in PERMISSION_OPTOINS:
            raise Exception("Access code %s not recognized! Possible values are:\n%s" % (access,PERMISSION_OPTOINS))
        self.rules[grantee]=access
    def __xml__(self):
        if self.data:
            acl=self.data
        else:
            acl=etree.Element("AccessControlPolicy")
            owner = etree.SubElement(acl, "Owner")
            AccessControlList=etree.SubElement(acl,"AccessControlList")
            id=etree.SubElement(owner, "ID")
            id.text=self.id
            name=etree.SubElement(owner,"DisplayName")
            name.text=self.name
        for grant in self.rules:
            if True:
                #if find the grant id inside self.data, modify the existed rule.
                pass
            else: 
                #if grantee not found, create new rule.
                acl_rule=etree.SubElement(AccessControlList, "Grant")
                grantee=etree.SubElement(acl_rule, "Grantee")
                #TODO user information
                permission=etree.SubElement(acl_rule, "Permission")
                permission.text=self.rules[grant]


class IAMPolicy(AWSConfiguration):
    pass
##
#The Configuration of bucket for reading&writting access control.
#  
class BucketPolicy (AWSConfiguration):
    ACTIONS=['GetObject','GetObjectVersion','PutObject','GetObjectAcl','GetObjectVersionAcl']
    S3CONDITIONS=[]
    CONDITIONS=['CurrentTime','MultiFactorAuthAge','SecureTransport','SourceIp','UserAgent','EpochTime','Referer']
    def __init__(self,data=None):
        self.data=data
        self.newpolicies=[]
        self.newpolicy=dict()
    ##
    #List the rules in the configuration
    def list(self):
        pass
    ##Restrict the resource to be accessed. 
    def applyto(self,resource):
        self.newpolicy["Resource"]="arn:aws:s3:::%s" % (bucket,path)
        return self
    ##Restrict the request conditions 
    def when(self,key,condition,value):
        key=key.lower()
        if not self.newpolicy["condition"]:
            self.newpolicy["condition"]={}
        if key == "sourceip":
            if condition == "==":
                self.newpolicy["condition"]["ipaddress"]['aws:sourceip']=value
            elif condition == "!=":
                self.newpolicy["condition"]["notipaddress"]['aws:sourceip']=value
            else:
                raise Exception("Condition not understood!")
        elif key == "UserAgent" or key == "Referer":
            self.__AddStringCondition__('aws:'+key,condition,value)
        elif key == "CurrentTime":
            pass
        elif key == "MultiFactorAuthAge":
            self.newpolicy["Condition"]["Null"]['aws:MultiFactorAuthAge']=(value == True)
        elif key in S3CONDITIONS:
        #S3 conditions
            self.__AddStringCondition__('s3:'+key,condition,value)
        else:
            raise Exception("Constraint not understood!")
        return self
    def __AddStringCondition__(self,key,condition,value):
        if condition == "==":
            if value.find("*") > 0 or value.find("?") > 0:
                self.newpolicy["Condition"]["StringLike"][key]=value
            else:
                self.newpolicy["Condition"]["StringEquals"][key]=value
        elif condition == "!=":
            if value.find("*") > 0 or value.find("?") > 0:
                self.newpolicy["Condition"]["StringNotLike"][key]=value
            else:
                self.newpolicy["Condition"]["StringNotEquals"][key]=value
        elif condition == "^=":
            #case insensitive equal
            self.newpolicy["Condition"]["StringEqualsIgnoreCase"][key]=value
        elif condition == "^!=":
            self.newpolicy["Condition"]["StringNotEqualsIgnoreCase"][key]=value
        else:
            raise Exception("Condition not understood!")
    ##Update the specified restraint and create a new policy.
    def updatePolicy(self,allow,actions,users={}):
        #action
        newpolicy["Action"]=[]
        for action in actions:
            if action not in IAMPolicy.ACTIONS:
                raise Exception("S3 action %s not recognized! Possible values are:\n%s" % (action,IAMPolicy.ACTIONS))
            newpolicy["Action"].append("s3:%s" % action)
        for user in users:
            pass
        if allow:
            newpolicy["Effect"]="Allow"
        else:
            newpolicy["Effect"]="Deny"
        self.newpolicies.append(newpolicy)
        #clear the newpolicy buffer
        self.newpolicy=dict()
        return self
    def allow(self,actions,users={}):
        return self.updatePolicy(True,actions,users)
    def deny(self,actions,users={}):
        return self.updatePolicy(False,actions,users)
    def __json__(self):
        if self.data is None:
            mypolicy={
            "Version" : "2008-10-17",
            "Id":"aaaa-bbbb-cccc-dddd",
            "Statemenet":[]
            }
        else:
            mypolicy=json.loads(self.data)
        #create policy ID
        mypolicy["Id"]="xxxxxx"
        #check if there is needs to modify the old rules?
        for rule in mypolicy["Statement"]:
            pass
        #add the new rules
        for rule in self.newpolicies:
            mypolicy["Statement"].append(rule) 

##
#The Configuration of a upload form.
#
class UploadPolicy (AWSConfiguration):
    def __init__(self):
        self.policy={}
        self.expires('1 hour')
        self.policy['conditions']=[]
    ##
    #Set up the expiration time of the upload policy.
    def expires(self,offset='1 hour'):
       # parse the format using relative time 
        millisecond=re.compile('(\d+\.?\d?)\s*milliseconds?').findall(offset)
        if millisecond:
            millisecond=float(millisecond[0])
        else:
            millisecond=0
        microsecond=re.compile('(\d+\.?\d?)\s*microseconds?').findall(offset)
        if microsecond:
            microsecond=float(microsecond[0])
        else:
            microsecond=0
        second=re.compile('(\d+\.?\d?)\s*seconds?').findall(offset)
        if second:
            second=float(second[0])
        else:
            second=0
        minute=re.compile('(\d+\.?\d?)\s*minutes?').findall(offset)
        if minute:
            minute=float(minute[0])
        else:
            minute=0
        hour=re.compile('(\d+\.?\d?)\s*hours?').findall(offset)
        if len(hour):
            hour=float(hour[0])
        else:
            hour=0
        #TODO necessary to provide support for day/week? because temporary URL can only exist between 1 to 3 hours.
        day=re.compile('(\d+\.?\d?)\s*days?').findall(offset)
        if day:
            day=float(day[0])
        else:
            day=0
        week=re.compile('(\d+\.?\d?)\s*weeks?').findall(offset)
        if week:
            week=float(week[0])
        else:
            week=0
        offset=datetime.timedelta(day,second,hours=hour,minutes=minute,milliseconds=millisecond,microseconds=microsecond,weeks=week)
        self.policy['expiration']=(datetime.datetime.utcnow()+offset).strftime("%Y-%m-%dT%H:%M:%SZ")
        
    ##
    #Set up the constraint condition for uploading contents.
    #Format:
    #key ? value
    #The relationship could be:
    #       == or eq or exact :  exact match the value of key and the given value..
    #       ~= or start-with :  match whether the value starts with the given value.The given value must be a string.
    #       between :  check if the value of the key is between the given range. The given value must be a two element turple.
    def condition(self,key,relationship,value):
        if relationship == '==' or relationship == 'eq' or relationship == 'exact':
            #exact matching condition
            newcondition={key:value}
        elif relationship == '~=' or relationship == 'starts-with':
            #string start with 
            newcondition=['starts-with',("$%s" %key),value]
        elif relationship == 'between':
            #range condition.
            lower,upper=value
            newcondition=[key,lower,upper]
        else:
            raise Exception("Not supported condition %s" % relationship)
        self.policy['conditions'].append(newcondition)
    def json(self):
        return json.dumps(self.policy)
    def encoded(self):
        return self.json().encode('utf-8').encode('base64').replace("\n","") 
##
#The configruation of whether or how long would objects with certain prefixes will expire and got deleted. . 
#TODO xml error reported by amazon.
class LifeCycleRules (AWSConfiguration):
    def __init__(self,conf=None):
        if conf:
            self.data=DataNode(conf)
        else:
            self.data=DataNode('LifecycleConfiguration',namespace='http://s3.amazonaws.com/doc/2006-03-01/')
    def enable(self,id):
        """Enable a rule with given id."""
        for rule in self.data['Rule']:
            if rule.ID.value() == id:
                rule.Status.set('Enabled')
    def disable(self,id):
        """Disable a rule with given id."""
        for rule in self.data['Rule']:
            if rule.ID.value() == id:
                rule.Status.set('Disabled')
    def addRule(self,loc,expires='365 days',id=None):
        """Set a location that the files are automatically removed after certain time."""
        #If the id is None, generate it automatically.
        if not id:
            id=''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10))
        #TODO convert "1 year","1 month"... etc. to days?
        day=re.compile('(\d+\.?\d?)\s*days?').findall(expires)
        if day:
            day=int(day[0])
        else:
            day=0
        week=re.compile('(\d+\.?\d?)\s*weeks?').findall(expires)
        if week:
            week=int(week[0])
        else:
            week=0
        month=re.compile('(\d+\.?\d?)\s*months?').findall(expires)
        if month:
            month=int(month[0])
        else:
            month=0
        year=re.compile('(\d+\.?\d?)\s*years?').findall(expires)
        if year:
            year=int(year[0])
        else:
            year=0
        expires=str(day+7*week+30*month+365*year)
        #TODO instead of adding new rules, modify the old rules?
        for rule in self.data['Rule']:
            #disable the old rules that defines the same location(prefix) ?
            if rule.Prefix.value() == loc:
                if rule.hasChild('Status'):
                    rule.Status.set('Disabled')
                else:
                    rule.append('Status','Disabled')
            #modify the existed rule.
            if rule.ID.value() == id:
                rule.Prefix.set(loc)
                rule.Expiration.Days.set(expires)
                rule.Status.set('Enabled')
        #Add new rule.
        rule=self.data.append("Rule")
        rule.append("ID",id)
        rule.append("Prefix",loc)
        rule.append("Status","Enabled")
        rule.append("Expiration").append("Days",expires)
    def content(self):
        return self.data.__xml__(pretty_print=True,xml_declaration=True,encoding="UTF-8")
##
#The configuration of how to store the logging information of a bucket.
class BucketLogging (AWSConfiguration):
    PERMISSION_OPTOINS=('FULL_CONTROL','READ','WRITE')
    def __init__(self,conf=None):
        self.grants={}
        self.bucket=""
        self.prefix=""
        if conf:
            self.data=DataNode(conf)
        else:
            self.data=DataNode('BucketLoggingStatus',namespace="http://doc.s3.amazonaws.com/2006-03-01")
    def save(self,path):
        """"Save the logging in target path(bucket&prefix). Give the path in a form of Bucket:prefix
        For example:
        >logging.save('www.bucket.com:testlocation')
        """
        self.bucket,self.prefix=split(path,":\\")
    def allow(grantee,permission):
        """Give READ/WRITE/FULL_CONTROL permission to a user(identified by his email address)."""
        if permission in PERMISSION_OPTOINS:
            self.grants[grantee]=permission
        else:
            raise Exception("Only options available to permission are: %s" % PERMISSION_OPTOINS) 
    def enabled(self,yes=None):
        if yes is None:
            #check the status of the logging.
            return self.data.hasChild('LoggingEnabled')
        elif yes:
        #enable the logging.
            if not self.data.hasChild('LoggingEnabled'):
                self.data.append('LoggingEnabled')
            if not self.data.hasChild('TargetBucket'):
                self.data.append('TargetBucket')
            if self.bucket:
                self.data.TargetBucket.set(self.bucket)
            if not self.data.hasChild('TargetPrefix'):
                self.data.append('TargetPrefix')
            if self.prefix:
                self.data.TargetPrefix.set(self.prefix)
            if not self.data.hasChild('TargetGrants'):
                self.data.append('TargetGrants')
            for existedgrant in self.data.TargetGrants['Grant']:
                if existedgrant.Grantee.EmailAddress.value() in self.grants:
                    existedgrant.permission=self.grants[existedgrant.Grantee.EmailAddress.value()]
                    del self.grants[existedgrant.Grantee.EmailAddress.value()]
            for newgrantee in self.grants:
                grant=self.data.append('Grant')
                grant.append('Grantee',namespace="http://www.w3.org/2001/XMLSchema-instance").append('EmailAddress',self.newgrantee)
                grant.append('Permission',self.grants[newgrantee])
                #TODO xsi:type
            return self
        else:
        #TODO remove the logging.
            self.data.delete('LoggingEnabled')
            return self
    def content(self):
        return self.data.__xml__()
##
#Notification of 
class Notification (AWSConfiguration):
    def __init__(self,):
        pass
##
#@class VersionControl
#Version Control configruations.
class VersionControl (AWSConfiguration):
    def __init__(self,content=None):
        self._mfa_enabled=False
        if content:
            self.data=DataNode(content)
        else:
            self.data=DataNode('VersioningConfiguration',namespace='http://s3.amazonaws.com/doc/2006-03-01/')
            self.data.append('Status','Disabled')
            self.data.append('MfaDelete','Disabled')
    ## Check if versioning is enabled. If a bool parameter is given, this will enable(True) or disable(False) the versioning.
    def enabled(self,ToEnable=None):
        if ToEnable is not None:
            if not self.data.hasChild('Status'):
                self.data.append('Status','Disabled')
            if ToEnable:
                self.data.Status.data.text='Enabled'
                return self
            else:
                self.data.Status.data.text='Suspended'
                return self
        else:
            if self.data.hasChild('Status'):
                if self.data.Status.value() == 'Enabled':
                    return True
                elif self.data.Status.value() == 'Suspended':
                    return False
                else:
                    return None
            else:
                return False
        return ToEnable
    ## Check if MfaDelete is enabled. If a bool parameter is given, this will enable(True) or disable(False) the MfaDelete.
    def MfaDeleteEnabled(self,ToEnable=None):
        if ToEnable is not None:
            if self.data.hasChild('MfaDelete'):
                self.data.append('MfaDelete','Disabled')
            if ToEnable:
                self._mfa_enabled=True
                self.data.MfaDelete.set('Enabled')
            else:
                self._mfa_enabled=False
                self.data.MfaDelete.set('Disabled')
        else:
            if not self.data.hasChild('MfaDelete'):
                return False
            if self.data.MfaDelete.value() == 'Enabled':
                self._mfa_enabled=True
                return True
            elif self.data.MfaDelete.value() == 'Disabled':
                self._mfa_enabled=False
                return False
            else:
                return None
        return ToEnable
    ## set the current Mfa code for MfaDelete protection. If this is set, then all operation that modifies a version will need the mfa codeto be given .
    def setMfaCode(self,mfa,token):
        self.mfa=mfa
        self.token=token
        self.MfaDeleteEnabled(True)
        return self
    ## returns the headers that need to be added in the S3 requests.
    def headers(self):
        if self._mfa_enabled:
            return { 'amz-x-mfa' : "%s %s" % (self.mfa,self.token)}
        else:
            return {}
    ## returns the request body that need to be added in the S3 requests.
    def content(self):
        return self.data.__xml__()
##
#The request payment configuration of a bucket.
#This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download.
class RequestPayment (AWSConfiguration):
    def __init__(self,):
        pass
##
# The configuration of the bucket if it is used as a website.
class websiteConfiguration (AWSConfiguration):
    def __init__(self,):
        pass

