# cache.py
# Caches for incoming and outgoing data.
import threading
import os
import shutil
import time

import chunk
from util import config

incoming = None
outgoing = None

import logging
logger = logging.getLogger('async')

def get_incoming():
    global incoming
    if incoming is None:
        incoming = IncomingCache()
    return incoming

def get_outgoing():
    global outgoing
    if outgoing is None:
       outgoing = OutgoingCache()
    return outgoing

def reset():
    global incoming
    global outgoing
    incoming = None
    outgoing = None

def get_cache_filename(header):
    return config.get('async.folders', 'cache-root') + '/' + str(header.mid)

class BaseCache:

    def __init__(self):
        self._chunks = {}
        self._lru = []
        self._max_size = 100
        self._lock = threading.Lock()
        # Make the caching folders
        if not os.path.exists(config.get('async.folders', 'cache-root')):
            os.makedirs(config.get('async.folders', 'cache-root'))

    def _evict(self):
        key = self._lru.pop(0)
        del self._chunks[key]
        
    def _is_full(self):
        return len(self._chunks) > self._max_size

    def _make_key(self, header, seq):
        return header.get_file_path() + '::' + str(seq);
    
class OutgoingCache(BaseCache):
   
    def __init__(self):
        BaseCache.__init__(self)
        
    def get_chunk(self, header, seq):
        """
            Gets the chunk with the sequence number seq from the message with the given header.
        """
        key = self._make_key(header, seq)
        if not key in self._chunks:
            self.__load_chunks(header, seq) 
        chunk = self._chunks[key]
        return chunk
    
    def __load_chunks(self, header, seq, n=20):
        """
            Loads n chunks of message msg starting at sequence number seq
        """
        # Read in the data
        offset = header.csize * seq
        f = open(header.get_file_path(), mode='rb')
        f.seek(offset)
        data = f.read(header.csize * n)
        f.close()
        # Chunk it up
        while data:
            key = self._make_key(header, seq)
            if self._is_full():
                self._evict()
            if not key in self._chunks:
                self._chunks[key] = chunk.DataChunk(header.mid, seq, data[:header.csize])
                self._lru.append(key)
            data = data[header.csize:]

class IncomingCache(BaseCache):

    def __init__(self):
        BaseCache.__init__(self)
        # id -> file descriptor
        self.__files = {}

    def put_chunk(self, header, chunk):
        logger.debug('Putting chunk #%s of mid %s', chunk.seq, chunk.mid)
        key = self._make_key(header, chunk.seq)
        if not key in self._chunks:
            if self._is_full():
                self.flush()
            self._chunks[key] = (chunk, header)
        
    def flush(self):
        self._lock.acquire()
        logger.debug('Flushing the cache')
        for (chunk,header) in self._chunks.values():
            if chunk.mid in self.__files:
                f = self.__files[chunk.mid]
            else:
                file_path = get_cache_filename(header)
                if os.path.exists(file_path):
                    # The file exists, so we have to read old file, write it into new one.
                    # This is nonsense, but is necessary; you can't just start writing arbitrary data 
                    # at byte positions without truncating the file.
                    existing = open(file_path, mode='rb')
                    data = existing.read()
                    existing.close()
                    os.remove(file_path)
                    # Dump the contents back; keep the file handle.
                    f = open(file_path, mode='wb+')
                    self.__files[chunk.mid] = f
                    f.write(data)
                    f.flush()
                else:
                    logger.debug('File didnt exist; opening and adding to self.__files')
                    f = open(file_path, mode='wb+')
                    self.__files[chunk.mid] = f
            logger.debug('Files: %s', self.__files)
            if f.closed:
                continue
                
            offset = header.csize * chunk.seq
            f.seek(offset)
            f.write(chunk.data)
            f.flush()
            
        self._chunks = {}
        self._lock.release()
        return
    
    def msg_complete(self, header):
        logger.debug('Message complete. Header mid %s', header.mid)
        self.flush()
        self._lock.acquire()
        self.__files[header.mid].close()
        del self.__files[header.mid]
        cache_file = get_cache_filename(header)
        dest_file = header.get_file_path()
        dest_dir = os.path.dirname(dest_file)
        if not os.path.exists(dest_dir):
            os.makedirs(dest_dir)
        shutil.move(cache_file, dest_file)
        self._lock.release()