import sys
import threading
from stockpyle._base import BaseStore
from stockpyle._stacker import Stacker


class BaseOperation(object):
    pass


class StorageOperation(BaseOperation):
    
    method_name = property(lambda self: self.__method_name)
    kwargs = property(lambda self: self.__kwargs)

    def __init__(self, method_name, kwargs):
        super(StorageOperation, self).__init__()
        self.__method_name = method_name
        self.__kwargs = kwargs
    
    def perform(self, store):
        method = getattr(store, self.__method_name)
        return method(**self.__kwargs)


class ShutdownOperation(BaseOperation):
    pass


class BaseResponse(object):
    
    operation = property(lambda self: self.__operation)
    
    def __init__(self, operation):
        self.__operation = operation


class SuccessResponse(BaseResponse):
    pass


class FailureResponse(BaseResponse):
    
    exception = property(lambda self: self.__exception)
    
    def __init__(self, exception, operation):
        super(ExceptionResponse, self).__init__(operation)
        self.__exception = exception


class ShutdownResponse(BaseResponse):
    
    def __init__(self):
        super(ShutdownResponse, self).__init__(operation=None)


class BaseDeferredStore(BaseStore):
    """Wrapper for a given store, with a series of 'optimistic' stores
    that were optimistically populated with data that must be invalidated
    in the case of an asynchronous storage failure in the wrapped store"""    
        
    def __init__(self, blueprint, store_name, optimistic_stores):
        self.__store = Stacker(blueprint=blueprint).get_store(store_name)
        self.__optimistic_stores = optimistic_stores
        self.__deferral_checker_thread = None
    
    def __ensure_deferral_checker_started(self):
        if not self.__deferral_checker_thread:
            def loop():
                keepgoing = True
                while keepgoing:
                    latest_result = self._dequeue_response()
                    if isinstance(latest_result, ShutdownResponse):
                        # we got a response that says we need to shut down
                        keepgoing = False
                        
                    elif isinstance(latest_result, FailureResponse):
                        # FIXME: use logging
                        print >> sys.stderr, "deferral-failed: %s" % latest_result.exception
                        # invalidate all the optimistic stores for this object
                        # TODO: this is a little janky, there should be a more consistent approach
                        if "obj" in latest_result.kwargs:
                            for s in self.__optimistic_stores:
                                s.delete(objs=latest_result.kwargs["obj"])
                        elif "objs" in latest_result.kwargs:
                            for s in self.__optimistic_stores:
                                s.batch_delete(objs=latest_result.kwargs["objs"])
                        else:
                            # FIXME: use logging
                            print >> sys.stderr, "rollback-failed: kwargs=%s" % latest_result.kwargs
            self.__deferral_checker_thread = threading.Thread(target=loop)
            self.__deferral_checker_thread.start()
    
    def __defer_call(self, method_name, kwargs):
        """this is the only method that needs"""
        self.__ensure_deferral_checker_started()
        self._enqueue_operation(operation=StorageOperation(
            method_name=method_name,
            kwargs=kwargs,
            ))
    
    def _enqueue_operation(self, operation):
        """schedules an operation to happen asynchronously"""
        raise NotImplementedError()
    
    def _dequeue_response(self):
        """returns the result of the least-recently completed Operation"""
        raise NotImplementedError()
    
    def _shutdown_queue(self):
        """tells the queue to close any resources"""
        raise NotImplementedError()
    
    def put(self, obj):
        self.__store._deferred_put(obj=obj, defer_cb=self.__defer_call)
        
    def batch_put(self, objs):
        self.__store._deferred_batch_put(objs=objs, defer_cb=self.__defer_call)
    
    def delete(self, obj):
        self.__store._deferred_delete(obj=obj, defer_cb=self.__defer_call)
    
    def batch_delete(self, objs):
        self.__store._deferred_batch_delete(objs=objs, defer_cb=self.__defer_call)
    
    def get(self, klass, key):
        return self.__store.get(klass, key)
    
    def batch_get(self, klass, keys):
        return self.__store.batch_get(klass, keys)
    
    def release(self):
        
        # schedule shutdown
        self._enqueue_operation(ShutdownOperation())
        
        # wait for our checker thread to finish
        self.__deferral_checker_thread.join()
        self.__deferral_checker_thread = None
        
        # clean up any queue resources
        self._shutdown_queue()
        
        # release any other resources in our wrapped store
        self.__store.release()
        
    def _deferred_put(self, obj, defer_cb):
        raise StandardError("you cannot defer an operation on a %s" % self.__class__.__name__)
        
    def _deferred_batch_put(self, objs, defer_cb):
        raise StandardError("you cannot defer an operation on a %s" % self.__class__.__name__)
    
    def _deferred_delete(self, obj, defer_cb):
        raise StandardError("you cannot defer an operation on a %s" % self.__class__.__name__)
    
    def _deferred_batch_delete(self, objs, defer_cb):
        raise StandardError("you cannot defer an operation on a %s" % self.__class__.__name__)
