import sys
from time import time
from random import random
from logging import getLogger
from threading import Thread, Condition, Event, currentThread
from cloudcontrol._logging import log_event
from cloudcontrol._support.excwrap import create_wrapped_exception
# http://docs.python.org/lib/itertools-functions.html

# TODO: figure out why we need to give a timeout to the Condition::wait method.  It appears to be necessary to allow signal handling
_OBJECT_CV_WAIT_IN_SECONDS = 3600
_USE_TRACER_MESSAGES = False
_LOGGER = getLogger("cloudcontrol.async")


class _AsyncThreadExceptionPlaceholder(object):
    
    exception = property(lambda self: self.__exception)
    exc_info = property(lambda self: self.__exc_info)
    
    def __init__(self, exception):
        # TODO: ensure exception should ALWAYS be the same as what sys.exc_info refers to
        self.__exception = exception
        self.__exc_info = sys.exc_info()


class AsyncIterableBase(object):
    """Takes *any* iterable and iterates over it on another
    thread, capturing each element and storing it for iteration
    on an arbitrary number of other threads"""
    
    def __iter__(self):
        
        # start up if needed
        self._ensure_started()
        
        # get the data we will use
        data_buffer = None
        # unique_buffer_identifier = currentThread()
        unique_buffer_identifier = "%s%s%s" % (random(), random(), time())
        self.__cv.acquire()
        try:
            # lazy-load an _AsyncIterableDataBuffer for this thread
            if unique_buffer_identifier not in self.__data_buffer_lookup.keys():
                self.__data_buffer_lookup[unique_buffer_identifier] = _AsyncIterableDataBuffer(objects = self.__objects[0:])
            data_buffer = self.__data_buffer_lookup[unique_buffer_identifier]

        finally:
            self.__cv.release()
        
        keep_going = True
        while keep_going:
            
            # we should wait for more object events if we haven't been stopped
            data_buffer.objects_cv.acquire()
            try:
                while len(data_buffer.objects) is 0:
                    log_event(_LOGGER.info, "waiting_for_object")
                    if self._is_stopped():
                        log_event(_LOGGER.info, "stop_looping")
                        keep_going = False
                        break
                    else:
                        log_event(_LOGGER.info, "waiting_because_not_stopped")
                        data_buffer.objects_cv.wait(_OBJECT_CV_WAIT_IN_SECONDS)
                
                # consume the list
                copied_object_list = []
                while len(data_buffer.objects) > 0:
                    copied_object_list.append( data_buffer.objects.pop(0) )
                    data_buffer.objects_cv.notifyAll()

                # iterate over the entire copied list
                log_event(_LOGGER.info, "begin_iteration_over_copied_list", {"copied_object_list": copied_object_list})
                for obj in copied_object_list:
                    if isinstance(obj, _AsyncThreadExceptionPlaceholder):
                        # re-raise exceptions on this thread
                        raise create_wrapped_exception(obj.exception, obj.exc_info)

                    else:
                        # yield objects
                        yield obj
                log_event(_LOGGER.info, "finished_iteration")
                
            finally:
                data_buffer.objects_cv.release()
        
        log_event(_LOGGER.info, "completely_finished")
        
        # delete the data buffer
        self.__cv.acquire()
        try:
            del self.__data_buffer_lookup[unique_buffer_identifier]
        finally:
            self.__cv.release()
    
    def _is_stopped(self):
        self.__cv.acquire()
        try:
            return self.__stopped
        finally:
            self.__cv.release()
    
    def _consume_object(self, obj):
        
        # FIXME: needs to have a 'buffer size', so that we don't just keep consuming memory
        
        # push the object onto all lists
        data_buffer_delayed_lookup = {}
        self.__cv.acquire()
        try:

            # add to the global buffer
            self.__objects.append(obj)
            self.__cv.notifyAll()
            
            # delayed add to all thread-local lists
            for unique_buffer_identifier, data_buffer in self.__data_buffer_lookup.iteritems():
                data_buffer_delayed_lookup[unique_buffer_identifier] = data_buffer
            
        finally:
            self.__cv.release()
        
        # perform the actual adds
        for unique_buffer_identifier, data_buffer in data_buffer_delayed_lookup.iteritems():
            log_event(_LOGGER.info, "appending_to_data_buffer", {"obj": repr(obj), "unique_buffer_identifier": unique_buffer_identifier})
            data_buffer.objects_cv.acquire()
            try:
                data_buffer.objects.append(obj)
                data_buffer.objects_cv.notifyAll()
            finally:
                data_buffer.objects_cv.release()
            log_event(_LOGGER.info, "appended_to_data_buffer", {"obj": repr(obj), "unique_buffer_identifier": unique_buffer_identifier})
    
    def _ensure_started(self):
        self.__cv.acquire()
        try:
            if not self.__started:
                log_event(_LOGGER.info, "starting_iteration_thread")
                self.__iteration_thread.start()
                self.__started = True
                log_event(_LOGGER.info, "started_iteration_thread")
        finally:
            self.__cv.release()
        self.__started_event.set()
    
    def kill(self):
        self._stop_and_notify_all()
        
    def _stop_and_notify_all(self):

        # we're done
        self.__cv.acquire()
        try:
            self.__stopped = True
            self.__cv.notifyAll()
        finally:
            self.__cv.release()
        log_event(_LOGGER.info, "finished_stopping")

        # notify all subthreads we are done
        for data_buffer in self.__data_buffer_lookup.values():
            data_buffer.objects_cv.acquire()
            try:
                data_buffer.objects_cv.notifyAll()
            finally:
                data_buffer.objects_cv.release()
    
    def __init__(self, consume_with):
        
        self.__cv = Condition()
        self.__objects = []
        self.__started = False
        self.__started_event = Event()
        self.__stopped = False
        self.__data_buffer_lookup = {}
        
        # create the iteration thread and start it
        def iterate():
            
            self.__started_event.wait()
            
            # delegate to the consume method
            consume_with( self._consume_object )
            log_event(_LOGGER.info, "done_consuming")
            
            # stop acquiring data and wake all listening threads
            self._stop_and_notify_all()
        
        self.__iteration_thread = Thread(name = "Thread-IterationThread", target = iterate)


class AsyncIterable(AsyncIterableBase):
    
    def __init__(self, iterable):
        
        def single_consumer(consume_object):
            
            try:
                # iterate over every element
                if callable(iterable):
                    for obj in iterable():
                        consume_object(obj)
                else:
                    for obj in iterable:
                        consume_object(obj)
            
            except Exception, e:
                # inject exceptions as objects into the stream
                # (AsyncIterableBase knows how to handle them properly)
                consume_object( _AsyncThreadExceptionPlaceholder(e) )
                
        AsyncIterableBase.__init__(self, consume_with = single_consumer)


class AsyncIterableGroup(AsyncIterableBase):
    
    def __init__(self, *iterables):
        
        def multi_consumer(consume_object):
            
            base_tuple_list = [None for iterable in iterables]
            consumer_threads = []

            for idx in range(len(iterables)):

                def single_consumer(iterable, idx):
                    
                    try:
                        # iterate over every element
                        if callable(iterable):
                            log_event(_LOGGER.info, "found_callable_iterable")
                            for obj in iterable():
                                log_event(_LOGGER.info, "got_object", {"obj": repr(obj)})
                                tupled_obj_list = base_tuple_list[0:]
                                tupled_obj_list[idx] = obj
                                tupled_obj = tuple(tupled_obj_list)
                                consume_object(tupled_obj)
                        else:
                            log_event(_LOGGER.info, "found_normal_iterable")
                            for obj in iterable:
                                log_event(_LOGGER.info, "got_object", {"obj": repr(obj)})
                                tupled_obj_list = base_tuple_list[0:]
                                tupled_obj_list[idx] = obj
                                tupled_obj = tuple(tupled_obj_list)
                                consume_object(tupled_obj)
                        
                    except Exception, e:
                        # inject exceptions as objects into the stream
                        # (AsyncIterableBase knows how to handle them properly)
                        consume_object( _AsyncThreadExceptionPlaceholder(e) )
                
                thread_name = "Thread-AsyncIterableGroup(%s)[iterable #%s]" % (currentThread().getName(), idx)
                consume_thread = Thread(name = thread_name, target = single_consumer, args = [iterables[idx], idx])
                consumer_threads.append( consume_thread )
                consume_thread.start()
            
            # join all threads
            for thread in consumer_threads:
                log_event(_LOGGER.info, "joining_thread", {"thread": repr(thread)})
                thread.join()
                log_event(_LOGGER.info, "joined_thread", {"thread": repr(thread)})
            log_event(_LOGGER.info, "joined_all_multiconsumer_threads")
                
        AsyncIterableBase.__init__(self, consume_with = multi_consumer)
        

class _AsyncIterableDataBuffer(object):
    """Helper class for AsyncIterable"""
    
    def __init__(self, objects):
        self.objects = objects
        self.objects_cv = Condition()
