#!/usr/bin/env python

################################################################################
#   Copyright 2010 Jason Hoover
#
#
#   This file is part of AutoCache.
#
#   AutoCache is free software: you can redistribute it and/or modify
#   it under the terms of the GNU Lesser General Public License as published by
#   the Free Software Foundation, either version 3 of the License, or
#   (at your option) any later version.
#
#   AutoCache is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.

#   You should have received a copy of the GNU Lesser General Public License
#   along with AutoCache.  If not, see <http://www.gnu.org/licenses/>.
################################################################################



################################################################################
# ac-cache - Cache management functions. Depends on ac-common.
#
# Function Name   Description                           Usage                  Returns
#
# ac_checkup      Checks status of a given URL          ac_checkup(url)        Local URL if cached. False if invalid/stale. 
# ac_validate     Checks integrity of a cached object   ac_validate(hash)      True or an error code.
# ac_update       Updates a URL into the cache          ac_update(url)         Nothing
# ac_queue        Adds/Retrieves downloads from a queue ac_queue([url])        Nothing if URL set, next URL from queue if unset.
# ac_dlcleanup    Cleans up the downloads folder.       ac_cleanup()           Nothing
# ac_remove       Removes a cache object directory.     ac_remove(path)        Nothing
# ac_cachesize    Fast handler of cache size queries.   ac_cachesize([adjust]) Nothing
# ac_fileage      Returns age of a file in seconds.     ac_fileage(filename)   INT of seconds.
# ac_rchmod       Recursively sets permissions.         ac_rchmod(path)        Nothing
################################################################################

import time
import cPickle
import urllib2
import fcntl
import socket

from ac_common import *
from ac_download import ac_download

# We have to set the socket timeout here, because in python 2.5 there -is no-
# timeout.

socket.setdefaulttimeout(ac_settings["dl_timeout"])

# Build a urlopener object. No need to get too fancy, we only need an opener
# that takes in proxy information as you might expect to get passed to ENV.

if ac_settings["dl_proxy"]:
    ac_log.debug("Setting proxy to: "+ac_settings["dl_proxy"])
    ac_urlopen = urllib2.build_opener(
                     urllib2.ProxyHandler({'http': ac_settings["dl_proxy"]})
                     )
else:
    ac_urlopen = urllib2.build_opener()

# Here will set some static directory structure variables so that they can be
# modified on a whim. They're used very often, so it's not a terrible thing to
# turn them into variables.

ac_cachestatic = {
    "dl_locks"     : ac_settings["state_dir"] + "download/locks/",
    "dl_active"    : ac_settings["state_dir"] + "download/active/",
    }

################################################################################
# ac_checkup - Check the freshness of a cache object. Returns the cached URL if
# stored in the cache and fresh, False if not.
################################################################################

def ac_checkup(url):

    # url_hash - Saves us from doing an md5 over and over..
    # Filename - A macro for accessing the filename of the URL being evaluated.
    # Fullpath - Full path to the file, saves us from a lot of 
    #            ac_settings["cachedir"] concatination and soforth.

    url_hash = ac_urltohash(url)
    filename = ac_urltofile(url)
    fullpath = ac_settings["cache_dir"] + url_hash + "/"

    # See if the file exists.
    if os.path.isdir(fullpath):
        ac_log.debug("Cache file exists: " + url_hash)

        try:

            # If the file in question has been served up less than dl_tolerance
            # serve our cached version instead.
            if ac_fileage(fullpath+"access_log") < ac_settings["trust_short"]:

                ac_log.debug("Local version checked less than trust_short: " 
                             + url_hash)

                # Update the access timestamp. We may want to factor the 
                # timestamp updates out to the main part, or not.
                os.utime(fullpath + "access_log", None)

                # Use urllib2.quote to escape %'s in filenames which had %'s
                # added when downloaded. A filename such as "foo bar" will be 
                # saved in the cache as "foo%20bar". Serving the file to squid
                # as "foo%20bar" cause the webserver to look for "foo bar", 
                # which does not exist.
                return ac_settings["cache_web"] + url_hash + "/" + urllib2.quote(filename)


        except OSError:
            ac_log.warning("Could not read access_log for: " + url_hash)
            return 9

        # Attempt to use the metadata to return some information.
        try:

            # Read the object's local and remote metadata into memory.
            cache_metadata = cPickle.load(open(fullpath+"metadata"))
            url_metadata = dict(ac_urlopen.open(url).info())        

            # If the URL's mtime is not the same as ours, ours is probably the 
            # wrong fucking file.
            if cache_metadata["last-modified"] != url_metadata["last-modified"]:
                ac_log.debug("Remote date: " + url_metadata["last-modified"] +
                             " but cached file: " + cache_metadata["last-modified"])
                return 2

            # If the URL has a different size, then wrong file.
            elif cache_metadata["content-length"] != url_metadata["content-length"]:
                ac_log.debug("Remote size: " + url_metadata["content-length"] +
                             " but cached file: " + cache_metadata["content-length"])
                return 3

            # If the server is serving a txt file instead of an exe, something 
            # is wrong here.
            elif cache_metadata["content-type"] != url_metadata["content-type"]:
                ac_log.debug("Remote content: " + url_metadata["content-type"] + 
                             " but cached file: " + cache_metadata["content-type"])
                return 4

            # Otherwise, our cache is good!
            else:
                ac_log.debug("Cached copy current for: " + url_hash )
                return ac_settings["cache_web"] + url_hash + "/" + filename                

        # A KeyError means that a key from one of the metadata sets was 
        # missing. Whups.                
        except KeyError:

            ac_log.warning("Value missing from metadata in " + url_hash + 
                           ".\n Local: " + cache_metadata +
                           "\n Remote: " + url_metadata )
            return 5        

        # This means urllib2 couldn't reach the host.
        except urllib2.URLError:

            ac_log.debug("Could not reach host for: " + url )
            return 6

        # This means urllib2 got a 404 or the like.
        except urllib2.HTTPError:

            ac_log.debug("Recieved HTTP error code for: " + url )
            return 7

        # An IOError means that the metadata could not be read.
        except IOError:
            ac_log.error("Could not read local metadata for: " + url_hash)
            return 8
            

    else:
        ac_log.debug("Object does not exist in cache: " + url_hash )
        return 1

################################################################################
# ac_validate - Validates the metadata for a given url hash.
#
# Note that this does not validate the file against the URL, but instead does a
# local sanity check. Use ac_checkup for remote tests. This is purely a I/O 
# intenseive cache object validation subroutine, to be used once after a 
# completed download and periodically after that.
################################################################################

def ac_validate(url_hash):

        ac_log.debug("Going to validate " + url_hash)

        # Attempt to use the metadata to return some information.
        fullpath = ac_settings["cache_dir"] + url_hash + "/"

        try:
            # Read the object's metadata into memory.
            cache_metadata = cPickle.load(file(fullpath + "metadata"))

            # Read the filename from the metadata. Use ac_urltofile to generate
            # the filename.
            filename = fullpath + ac_urltofile(cache_metadata["original-url"])

            # Validate size..
            if int(os.stat(filename).st_size) != int(cache_metadata["content-length"]):
                ac_log.warning("Size error on object: " + url_hash  
                               + " Metadata: " + cache_metadata["content-length"]
                               + " File: " + str(os.stat(filename).st_size)
                               )
                return 2
    
            # Validate local-cache mtime.  This part is a little tricky. 
            # Mtimes on systems are stored in local time, so you have to convert
            # them to gmtime (GMT/UTC). Web provided mtimes are always provided
            # in GMT/UTC. Additionally the url_metadata time string comes in the
            # format of: 
            # "%a, %d %b %Y %H:%M:%S %Z" or "Sun, 18 Apr 2010 06:37:55 GMT"
            
            if (time.strptime(cache_metadata["last-modified"], 
                              "%a, %d %b %Y %H:%M:%S %Z") 
                != time.gmtime(os.stat(filename).st_mtime)):

                ac_log.warning("Date error on object: " + url_hash
                               + " Metadata: "  
                               + str(time.strptime(cache_metadata["last-modified"], 
                                                   "%a, %d %b %Y %H:%M:%S %Z"))
                               + " File: "  
                               + str(time.gmtime(os.stat(filename).st_mtime))
                               )
                return 3

        except KeyError:
            ac_log.error("Metadata values missing for " + url_hash 
                             + " Metadata contents: " + str(cache_metadata))
            return 4
        
        except ValueError:
            ac_log.warning("Corrupt metadata values for " + url_hash + 
                             + " Metadata contents: " + str(cache_metadata))
            return 5

        # OSErrors are when metadata can't be found.
        except OSError:
            ac_log.exception("Unable to read metadata or access file for: " +
                             url_hash)
            return 6

        # IOError typically means that the system found a directory instead
        # of a file or vice-versa.
        except IOError:
            ac_log.warning("Object in cache not a directory: " + url_hash)
            return 7


        ac_log.debug("Validation passed for " + url_hash)
        return True

################################################################################
# ac_update -  Removes the old destination cache dir and runs a loop of urls 
# from the queue to download. Blocks until finished and should be called as a
# threadded object. It's primary function is to update files in the cache. Takes
# URLs as arguements.
################################################################################

def ac_update(url):

    # Make sure URL is a string, so we don't have to do it later. You know, in 
    # case we get a Null or list or something stupid.
    url = str(url)
    ac_log.debug("Starting ac_update for " + url)

    # Make sure the URL is still reachable before attempting to update it. 
    # Otherwise we might wind up removing an object that's just temporarily
    # 404.
    try:
        ac_urlopen.open(url).close()

    except (urllib2.URLError,urllib2.HTTPError):
        ac_log.info("Not updating URL because it was unreachable or not found: " 
                    + url)
        return 1

    except ValueError:
        ac_log.error("Attempted to update an invalid URL: " + url)
        return 2

    # Just some quick reference variables.
    url_hash = ac_urltohash(url)
    object_dir = ac_settings["cache_dir"] + url_hash
    ac_log.debug(url + " is now " + url_hash)

    # Purge the old object before enqueing.
    if os.path.isdir(object_dir):

        ac_log.info("Purging outdated object: " + url_hash)

        # Subtract the cache size from our current status before killing 
        # the directory.
        ac_cachesize( - ac_du(object_dir))
        ac_remove(object_dir)

    # Now that we know the download is still worth doing, queue it up!
    ac_queue(url)

    # Don't even bother with the first part of the download loop if there's no
    # more slots.
    if len(os.listdir(ac_cachestatic["dl_locks"])) > ac_settings["dl_slots"]:
                ac_log.debug("No free download slots, not starting another.")
                ac_dlcleanup()
                return

    # Load the first object from the queue before starting our loop.
    url = ac_queue()

    # Until we've run out of URLs...
    try:
        while url:

            # Hash the file, but don't quite bother with the metadata quite yet.
            download_fn = ac_urltofile(url)
            download_hash = ac_urltohash(url)

            ac_log.info("Downloading " + url + " as " + download_hash)

            # Other processes can blindly add stuff to the queue as requests 
            # come in, so we can have duplicates there. If the object is being
            # downloaded or already in the cache, skip it.

            if (os.path.isdir(ac_settings["cache_dir"] + download_hash) or
               os.path.isfile(ac_cachestatic["dl_locks"] + download_hash)):

                ac_log.debug("Object already downloading or cached, skipping: " 
                             + download_hash)
                url = ac_queue()
                continue

            # If we can't get the metadata, the file may have gone missing in 
            # the time it took to get to it.
            try:
                download_metadata = dict(ac_urlopen.open(url).info())

            except (urllib2.URLError,urllib2.HTTPError):
                ac_log.info("Unable to retrieve headers for: " + download_hash)        
                url = ac_queue()
                continue

            # Make sure all necessary metadata is still availible.
            if not download_metadata.has_key("last-modified") or not  \
               download_metadata.has_key("content-length") or not     \
               download_metadata.has_key("content-type"):

                    ac_log.warning("Missing length, type or last-modified for: "
                                    + download_hash + " Remote headers were: "
                                    + str(download_metadata)
                                    )

                    url = ac_queue()
                    continue

            # Make sure this doesn't put us over budget.
            try:

                # Check for mblimit.
                if (int(download_metadata["content-length"]) + 
                     ac_cachesize() ) > ac_settings["cache_mblimit"]:

                    ac_log.warning("Cache would go over cache_mblimit on: " 
                                   + download_hash + " Object Size: "
                                   + download_metadata["content-length"]
                                   + " Cache Size: " + str(ac_cachesize())
                                   )

                    url = ac_queue()
                    continue

                # Same check for pctlimit. Note it only checks if it's not
                # already over limit because percentages are harder to predict.
                elif ac_df(ac_settings["cache_dir"]) \
                    > ac_settings["cache_pctlimit"]:

                    ac_log.warning("Cache size is over cache_pctlimit: " + 
                                   str(ac_df(ac_settings["cache_dir"])) +
                                   "/" + 
                                   str(ac_settings["cache_pctlimit"])
                                   )

                    url=ac_queue()
                    continue

            # Should never happen, really.
            except:
                ac_log.exception("Critcal failure trying to determine availible space.")
                raise

            # Confirm the availibility of a slot. Return from the function if
            # none are availible. Oh, and put our URL back into the queue.
            if len(os.listdir(ac_cachestatic["dl_locks"])) > ac_settings["dl_slots"]:

                ac_log.debug("No free download slots, returning and requeuing URL.")
                ac_dlcleanup()
                ac_queue(url)
                return 2

            else:    

                # Try to open a lock for the download. 
                # <statedir>/download/locks/<urlhash>
                try:
                    download_lock = file(ac_cachestatic["dl_locks"] + 
                                         download_hash, 'w')
                    fcntl.flock(download_lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
            
                # Failing that, see if you can clean up the download directory
                # and move on to the next file. We skip this URL because 
                # another thread/process might have beat us to making the lock.
                except IOError:
                    ac_log.debug("Unable to create download lock for: " 
                                   + download_hash)
                    ac_dlcleanup()
                    url = ac_queue()
                    continue

            # Make the cache directory object in ac_cachestatic["dl_active"]
            download_dir = ac_cachestatic["dl_active"] + download_hash + "/"
            ac_log.debug("Making download dir: " + download_dir)
            os.mkdir(download_dir)

            # Add the URL source to the metadata and dump it into a file.
            download_metadata["original-url"] = url
            cPickle.dump(download_metadata,
                         file(download_dir + "metadata", 'wb'))

            # Call ac_download and fire up the downloader! A return code of not
            # 0 means that the download failed, and that we need to clean up.
            # Do not move object into cache, do not pass go, do not collect $200
            if ac_download(url, download_dir, download_fn) != 0:

                ac_log.error("Cleaning up failed download of " + download_hash)
                ac_remove(download_dir)
                download_lock.close()
                os.remove(ac_cachestatic["dl_locks"] + download_hash)
                url = ac_queue()
                continue

            # Make the access.log file in the cache object's dir.
            file(download_dir + "access_log", 'a').close()

            # For now, assume the download went okay.
            try:
                ac_log.debug("Adding new download to cache: " + download_hash)
                os.rename(download_dir, ac_settings["cache_dir"] + download_hash)
                ac_cachesize(ac_du(ac_settings["cache_dir"] + download_hash))
                ac_rchmod(ac_settings["cache_dir"] + download_hash)

            except:

		# If we were unable to move the object, remove the download dir,
		# but NOT the cached object, the validation chunk below will do 
		# that instead.
                ac_log.exception("Failed to move object into cache: " + download_hash)
                ac_remove(download_dir)    

            # Make sure everything is okay, clean up if not.
            if not ac_validate(download_hash) == True:
                ac_log.error("Removing invalid download: " + download_hash )

                # Decrease the cache size because now we're removing it.
                ac_cachesize( - ac_du(ac_settings["cache_dir"] + download_hash))
                ac_remove(ac_settings["cache_dir"] + download_hash) 

            # Close and delete our particular lock.
            download_lock.close()
            os.remove(ac_cachestatic["dl_locks"] + download_hash)
            ac_log.info("Sucessfully cached " + download_hash)
            url = ac_queue()

    except:
        ac_log.exception("Horrible error while downloading!! Send log to developer.")    


################################################################################
# ac_queue - Reads and writes the queue.
#
# I made this into a function because having it all in ac_download was too 
# fucking much to be readable. I might re-add it later, but that'd take time and
# effort.
################################################################################

def ac_queue(url=""):

    try:

        # Lock the queue file here. We'll open a full r/w lock on it, because 
        # we can do all sorts of stuff during this function. Make sure we do a
        # seek to 0 before every operation.
        dl_queuefile = file(ac_settings["state_dir"] + "queue", 'r+w+b')
        fcntl.flock(dl_queuefile, fcntl.LOCK_EX)
        dl_queuefile.seek(0)
        
        if url != "":

            # Open a file handle to lock the download queue. We're threaded, so
            # we can happily wait for the lock. Read the queue as a list object
            # just in case it comes back as a string or something.
            ac_log.debug("Adding url to queue:" + url )

            try:    
                dl_queuefile.seek(0)        
                dl_queue = list(cPickle.load(dl_queuefile))

            # cPickle.load returns an EOFError if the file is empty. 
            except EOFError:
                dl_queue = []

            # Add this URL to the queue and write it. 
            dl_queue.append(url)
            dl_queuefile.seek(0)
            cPickle.dump(dl_queue, dl_queuefile, protocol=cPickle.HIGHEST_PROTOCOL)

        else:

            ac_log.debug("Retrieving next object from queue.")
        
            try:
                dl_queuefile.seek(0)            
                dl_queue = list(cPickle.load(dl_queuefile))

            # cPickle.load will return an EOFError if the file is empty.
            except EOFError:

                dl_queue = False
                ac_log.debug("Queue empty, no more downloads remain.")
            
            # This time, take out the first URL in the list and remove it. If 
            # the list is empty, return an empty list.
            if dl_queue:
                url = dl_queue.pop(0)

            else:
                url = False
                
            # Write the file and release the lock again.
            dl_queuefile.seek(0)
            cPickle.dump(dl_queue, dl_queuefile, protocol=cPickle.HIGHEST_PROTOCOL)
            dl_queuefile.close()

        # Release the file and return the result. If something was added to 
	# the queue, it doesn't matter what we return. If the queue was 
        # queried, then we return the result.
        dl_queuefile.close()
        return(url)

    # This is to handle the loss or reset of a queue file.
    except (OSError, IOError):

        ac_log.exception("Queue file vanished/corrupt, recreating.")
        
        try:
            ac_stateinit()

        except:
            ac_log.exception("Queue file lost and unable to recreate, dying.")
            raise IOError("Queue file lost and unable to recreate, dying.")

################################################################################
# ac_dlcleanup - This function is responsible for cleaning up the download 
# directory and is run once upon import. In order for us to make sure the 
# download directory is in a sane state, we have to actually lock it.
################################################################################

def ac_dlcleanup():

    cleanup_log = ac_settings["state_dir"] + "lastcleanup"

    # Don't bother doing this if we just did it not too long ago.
    if not os.path.isfile(cleanup_log):
        ac_stateinit()

    if ac_fileage(cleanup_log) < ac_settings["trust_short"]:

        # Update the cleanup timestamp.
        ac_log.debug("Download directory recently cleaned.")
        os.utime(cleanup_log, None)
        return

    ac_log.debug("Cleaning up download directory.")

    # Update the cleanup log BEFORE cleaning up the directory to avoid two
    # processes at once.
    os.utime(cleanup_log, None)

    # If there are no lock files or download directories we're already done.
    if ( len(os.listdir(ac_cachestatic["dl_active"]))
         + len(os.listdir(ac_cachestatic["dl_locks"])) 
         ) == 0:

        ac_log.debug("No old downloads to clean up, good!")
        return

    # Troll the lock dir first by trying to get an exclusive lock on each
    # file. Flock will throw an IOError if it's already locked.
    for each in os.listdir(ac_cachestatic["dl_locks"]):

        try:
            lock_file = file(ac_cachestatic["dl_locks"] + each)
            fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)

            # If it hasn't thrown an IOError, presume the lock isn't.
            lock_file.close()
            ac_log.info("Removing dead download lock " + each)
            os.remove(ac_cachestatic["dl_locks"] + each)

        except IOError:
            ac_log.debug("Download still in progress for " + each)
            continue

        except OSError:
            ac_log.debug("Download lock vanished before removal for " + each)
            continue

    # Every download in the download dir should have an asocciated lock. If 
    # not, it's safe to presume it's dead. We did just remove the old locks,
    # after all.
    for each in os.listdir(ac_cachestatic["dl_active"]):

        if not os.path.isfile(ac_cachestatic["dl_locks"] + each):
            ac_log.info("Removing the lockless download directory: " + each)
            try:
                ac_remove(ac_cachestatic["dl_active"] + each)

            except OSError:
                ac_log.debug("Lockless directory already removed: " + each)
                continue

################################################################################
# ac_remove - This function removes a target cache directory. It does not return
# anything, there's many different scenarios under which this needs to be done, 
# so a subroutine was made for it.
################################################################################

def ac_remove(cache_dir):

    ac_log.debug("Going to destroy: " + cache_dir)

    try:

        # There should only be 4 files in a given cache object dir. The file,
        # the access_log, the metadata and the download.log file. More than 
        # that and you've found some serious trouble, and shouldn't 
        # automatically destroy the directory and it's contents.
        if len(os.listdir(cache_dir)) > 6:
            ac_log.error("Refusing to recursively remove large directory: "
                         + cache_dir)
            raise OSError

        for each_file in os.listdir(cache_dir):

            # Put the file removals in a try loop so that we still try to 
            # remove the directory when finished.
            try:
                os.remove(cache_dir + "/" + each_file)
                #print("Would remove: "+cache_dir+"/"+each_file)

            except OSError:
                ac_log.warning("The file: " + each_file + 
                               " in " + cache_dir + 
                               " vanished or could not be removed!")    

        os.rmdir(cache_dir)        
        #print("Would remove directory:"+cache_dir)

    except OSError:
        ac_log.warning("The directory vanished or could not be removed: "
                       + cache_dir)
        raise

################################################################################
# ac_cachesize - Updates the size of the cache or returns it's current size.
# looks a lot like ac_queue, doesn't it?
################################################################################

def ac_cachesize(size=None):

    try:

        # Open and lock the cache size file, we're going to need it for a few 
        # moments.
        cache_sizefile = file(ac_settings["state_dir"] + "cachesize", 'r+w+b')
        fcntl.flock(cache_sizefile, fcntl.LOCK_EX)
        cache_sizefile.seek(0)

        # Check the truthiness of our cache size file, if out of date, update!
        if ac_fileage(ac_settings["state_dir"] + "cachesize") > \
           ac_settings["trust_long"]:

            cPickle.dump(ac_du(ac_settings["cache_dir"]), 
                         cache_sizefile,
                         protocol=cPickle.HIGHEST_PROTOCOL)
            cache_sizefile.seek(0)
            ac_log.debug("Cache size file too old, updated.")

        # If not told to do something to the cachesize, return current value.
        if size == None:
            size = int(cPickle.load(cache_sizefile))
            ac_log.debug("Cache size is now: " + str(size))
            return size

        # Otherwise, adjust the size by the amount given.
        else:
            ac_log.debug("Adjusting cache size file by: " + str(size))

            # Make sure we read this as an INT.
            size = int(cPickle.load(cache_sizefile)) + size
            cache_sizefile.seek(0)
            cPickle.dump(size, 
                         cache_sizefile, 
                         protocol=cPickle.HIGHEST_PROTOCOL)
            return size

    except (TypeError, EOFError, IOError):

        # If we get one of these, it means that the file contained an invalid 
        # bit of data. So we'll have to populate it with valid data and try 
        # again.
        ac_log.warning("Serious problem reading cachesize file. Resetting.")

        # Do a stateinit, just in case.
        ac_stateinit()

        # Remember to seek to 0 in case we're at some weird position in the 
        # cachesize file somehow.
        size = ac_du(ac_settings["cache_dir"])
        cache_sizefile.seek(0)
        cPickle.dump(size, cache_sizefile, protocol=cPickle.HIGHEST_PROTOCOL)
        ac_log.debug("Cache size is now: " + str(size))
        return size

################################################################################
# ac_fileage - Return age of a file in seconds.
# Subtract time.mktime(time.localtime()) from the stat.st_mtime to get the 
# difference. Used in a number of places.
################################################################################

ac_fileage = lambda filename: int(time.mktime(time.localtime()) -
                                  os.stat(filename).st_mtime)

################################################################################
# ac_rchmod - Recursively set the permissions to world readable. Mostly copied 
# from rmdir.
################################################################################

def ac_rchmod(path):

    # Add trailing slash...
    path = path + "/"
    ac_log.debug("Going to chmod: " + path)

    try:

        os.chmod(path, ac_settings["cache_dmode"])

        for each_file in os.listdir(path):

            # Keep in a try so we can pinpoint exactly which files failed 
            # in the log.
            try:
                os.chmod(path + each_file, ac_settings["cache_fmode"])

            except OSError:
                ac_log.error("Unable to chmod: " + each_file + " in " + path )

    except OSError:
        raise
        ac_log.error("Unable to chmod the directory: " + path )
