query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
priority queue
python
def priority_enqueue(self, function, name=None, force_start=False, times=1, data=None): """ Like :class:`enqueue()`, but adds the given function at the top of the queue. If force_start is True, the function is immediately started even when the maximum number of concurrent threads is already reached. :type function: callable :param function: The function that is executed. :type name: str :param name: Stored in Job.name. :type force_start: bool :param force_start: Whether to start execution immediately. :type times: int :param times: The maximum number of attempts. :type data: object :param data: Optional data to store in Job.data. :rtype: int :return: The id of the new job. """ self._check_if_ready() return self.main_loop.priority_enqueue(function, name, force_start, times, data)
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/workqueue/workqueue.py#L161-L191
priority queue
python
def get_all_by_priority(cls, names): """ Return all the queues with the given names, sorted by priorities (higher priority first), then by name """ names = cls._get_iterable_for_names(names) queues = cls.get_all(names) # sort all queues by priority queues.sort(key=lambda q: int(q.priority.hget() or 0), reverse=True) return queues
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L125-L137
priority queue
python
def queue(self, name, url=None, method=None, reservation_sid=None, post_work_activity_sid=None, **kwargs): """ Create a <Queue> element :param name: Queue name :param url: Action URL :param method: Action URL method :param reservation_sid: TaskRouter Reservation SID :param post_work_activity_sid: TaskRouter Activity SID :param kwargs: additional attributes :returns: <Queue> element """ return self.nest(Queue( name, url=url, method=method, reservation_sid=reservation_sid, post_work_activity_sid=post_work_activity_sid, **kwargs ))
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/twiml/voice_response.py#L207-L228
priority queue
python
def enqueue(self, priority: int, item: TItem) -> bool: """Adds an entry to the priority queue. If drop_duplicate_entries is set and there is already a (priority, item) entry in the queue, then the enqueue is ignored. Check the return value to determine if an enqueue was kept or dropped. Args: priority: The priority of the item. Lower priorities dequeue before higher priorities. item: The item associated with the given priority. Returns: True if the item was enqueued. False if drop_duplicate_entries is set and the item is already in the queue. """ if self._drop_set is not None: if (priority, item) in self._drop_set: return False self._drop_set.add((priority, item)) # First enqueue initializes self._offset. if not self._buckets: self._buckets.append([item]) self._offset = priority self._len = 1 return True # Where is the bucket this item is supposed to go into? i = priority - self._offset # Extend bucket list backwards if needed. if i < 0: self._buckets[:0] = [[] for _ in range(-i)] self._offset = priority i = 0 # Extend bucket list forwards if needed. while i >= len(self._buckets): self._buckets.append([]) # Finish by adding item to the intended bucket's list. self._buckets[i].append(item) self._len += 1 return True
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/_bucket_priority_queue.py#L86-L130
priority queue
python
def setPriority(self, queue, priority): ''' Set priority of a sub-queue ''' q = self.queueindex[queue] self.queues[q[0]].removeSubQueue(q[1]) newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority)) q[0] = priority newPriority.addSubQueue(q[1])
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/pqueue.py#L1032-L1040
priority queue
python
def get_queue(self): """获取新闻标题候选队列 Return: queue -- 新闻标题候选队列,list类型 """ queue = [] for i in range(0, self.index): unit = self.unit_raw[i] c = CDM(unit) # 过滤 if c.get_alpha() > 0 and c.PTN in range(self.title_min, self.title_max): queue.append(unit) if queue == []: pass else: log('debug', '\n获取标题候选队列成功:【{}】\n'.format(queue)) return queue
https://github.com/pzs741/TEDT/blob/6b6663227b755005fe1a1e3e807a05bdb521e066/TEDT/candidate_corpus.py#L252-L271
priority queue
python
def p_queue(p): """ queue : QUEUE COLON LIFO | QUEUE COLON FIFO """ if p[3] == "LIFO": p[0] = {"queue": LIFO()} elif p[3] == "FIFO": p[0] = {"queue": FIFO()} else: raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L184-L196
priority queue
python
def create_queue( self, queue_name, lock_duration=30, max_size_in_megabytes=None, requires_duplicate_detection=False, requires_session=False, default_message_time_to_live=None, dead_lettering_on_message_expiration=False, duplicate_detection_history_time_window=None, max_delivery_count=None, enable_batched_operations=None): """Create a queue entity. :param queue_name: The name of the new queue. :type queue_name: str :param lock_duration: The lock durection in seconds for each message in the queue. :type lock_duration: int :param max_size_in_megabytes: The max size to allow the queue to grow to. :type max_size_in_megabytes: int :param requires_duplicate_detection: Whether the queue will require every message with a specified time frame to have a unique ID. Non-unique messages will be discarded. Default value is False. :type requires_duplicate_detection: bool :param requires_session: Whether the queue will be sessionful, and therefore require all message to have a Session ID and be received by a sessionful receiver. Default value is False. :type requires_session: bool :param default_message_time_to_live: The length of time a message will remain in the queue before it is either discarded or moved to the dead letter queue. :type default_message_time_to_live: ~datetime.timedelta :param dead_lettering_on_message_expiration: Whether to move expired messages to the dead letter queue. Default value is False. :type dead_lettering_on_message_expiration: bool :param duplicate_detection_history_time_window: The period within which all incoming messages must have a unique message ID. :type duplicate_detection_history_time_window: ~datetime.timedelta :param max_delivery_count: The maximum number of times a message will attempt to be delivered before it is moved to the dead letter queue. :type max_delivery_count: int :param enable_batched_operations: :type: enable_batched_operations: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists. """ queue_properties = Queue( lock_duration="PT{}S".format(int(lock_duration)), max_size_in_megabytes=max_size_in_megabytes, requires_duplicate_detection=requires_duplicate_detection, requires_session=requires_session, default_message_time_to_live=default_message_time_to_live, dead_lettering_on_message_expiration=dead_lettering_on_message_expiration, duplicate_detection_history_time_window=duplicate_detection_history_time_window, max_delivery_count=max_delivery_count, enable_batched_operations=enable_batched_operations) try: return self.mgmt_client.create_queue(queue_name, queue=queue_properties, fail_on_exist=True) except requests.exceptions.ConnectionError as e: raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e)
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/common/mixins.py#L39-L94
priority queue
python
def queue(self): """The name of the queue that this command was assigned to.""" entry = self._proto.commandQueueEntry if entry.HasField('queueName'): return entry.queueName return None
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L193-L198
priority queue
python
def queue_exists(self, name): """ Returns True or False, depending on the existence of the named queue. """ try: queue = self._manager.head(name) return True except exc.NotFound: return False
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/queueing.py#L591-L599
priority queue
python
def queue(self, name, value, quality=None, timestamp=None, attributes=None): """ To reduce network traffic, you can buffer datapoints and then flush() anything in the queue. :param name: the name / label / tag for sensor data :param value: the sensor reading or value to record :param quality: the quality value, use the constants BAD, GOOD, etc. (optional and defaults to UNCERTAIN) :param timestamp: the time the reading was recorded in epoch milliseconds (optional and defaults to now) :param attributes: dictionary for any key-value pairs to store with the reading (optional) """ # Get timestamp first in case delay opening websocket connection # and it must have millisecond accuracy if not timestamp: timestamp = int(round(time.time() * 1000)) else: # Coerce datetime objects to epoch if isinstance(timestamp, datetime.datetime): timestamp = int(round(int(timestamp.strftime('%s')) * 1000)) # Only specific quality values supported if quality not in [self.BAD, self.GOOD, self.NA, self.UNCERTAIN]: quality = self.UNCERTAIN # Check if adding to queue of an existing tag and add second datapoint for point in self._queue: if point['name'] == name: point['datapoints'].append([timestamp, value, quality]) return # If adding new tag, initialize and set any attributes datapoint = { "name": name, "datapoints": [[timestamp, value, quality]] } # Attributes are extra details for a datapoint if attributes is not None: if not isinstance(attributes, dict): raise ValueError("Attributes are expected to be a dictionary.") # Validate rules for attribute keys to provide guidance. invalid_value = ':;= ' has_invalid_value = re.compile(r'[%s]' % (invalid_value)).search has_valid_key = re.compile(r'^[\w\.\/\-]+$').search for (key, val) in list(attributes.items()): # Values cannot be empty if (val == '') or (val is None): raise ValueError("Attribute (%s) must have a non-empty value." % (key)) # Values should be treated as a string for regex validation val = str(val) # Values cannot contain certain arbitrary characters if bool(has_invalid_value(val)): raise ValueError("Attribute (%s) cannot contain (%s)." % (key, invalid_value)) # Attributes have to be alphanumeric-ish if not bool(has_valid_key): raise ValueError("Key (%s) not alphanumeric-ish." % (key)) datapoint['attributes'] = attributes self._queue.append(datapoint) logging.debug("QUEUE: " + str(len(self._queue)))
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/data/timeseries.py#L422-L498
priority queue
python
def queue(self): """ Get a queue of notifications Use it with Python with """ queue = NotificationQueue() self._listeners.add(queue) yield queue self._listeners.remove(queue)
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/notification_manager.py#L33-L42
priority queue
python
def enqueue_or_delay(self, queue_name=None, priority=None, delayed_until=None, prepend=False, queue_model=None): """ Will enqueue or delay the job depending of the delayed_until. """ queue_name = self._get_queue_name(queue_name) fields = {'queued': '1'} if priority is not None: fields['priority'] = priority else: priority = self.priority.hget() in_the_future = delayed_until and delayed_until > datetime.utcnow() if in_the_future: fields['delayed_until'] = str(delayed_until) fields['status'] = STATUSES.DELAYED else: self.delayed_until.delete() fields['status'] = STATUSES.WAITING self.hmset(**fields) if queue_model is None: queue_model = self.queue_model queue = queue_model.get_queue(queue_name, priority) if in_the_future: queue.delay_job(self, delayed_until) else: queue.enqueue_job(self, prepend)
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L423-L455
priority queue
python
def queue_declare(self, queue, durable, exclusive, auto_delete, warn_if_exists=False, arguments=None): """Declare a named queue.""" return self.channel.queue_declare(queue=queue, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments)
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/pikachu.py#L87-L95
priority queue
python
def queue_declare(self, queue, durable, exclusive, auto_delete, warn_if_exists=False, arguments=None): """Declare a named queue.""" if warn_if_exists and self.queue_exists(queue): warnings.warn(QueueAlreadyExistsWarning( QueueAlreadyExistsWarning.__doc__)) return self.channel.queue_declare(queue=queue, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments)
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/pyamqplib.py#L245-L256
priority queue
python
def queue(self, project): """ Get a queue of notifications Use it with Python with """ queue = NotificationQueue() self._listeners.setdefault(project.id, set()) self._listeners[project.id].add(queue) yield queue self._listeners[project.id].remove(queue)
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/notification.py#L36-L46
priority queue
python
def get_queue(cls, name, priority=0, **fields_if_new): """ Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue. """ queue_kwargs = {'name': name, 'priority': priority} retries = 0 while retries < 10: retries += 1 try: queue, created = cls.get_or_connect(**queue_kwargs) except IndexError: # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP # => retry continue except ValueError: # more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?) try: queue = cls.collection(**queue_kwargs).instances()[0] except IndexError: # but no more now ?! # => retry continue else: created = False # ok we have our queue, stop now break if created and fields_if_new: queue.set_fields(**fields_if_new) return queue
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L51-L84
priority queue
python
def queue(self, queue_, value): """Puts a value into a queue but aborts if this thread is closed.""" while not self.closed: try: queue_.put(value, block=True, timeout=1) return except queue.Full: continue
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/stream/segmented.py#L124-L131
priority queue
python
def next(self): """Get the next processable item of the queue. A processable item is supposed to have the status `queued`. Returns: None : If no key is found. Int: If a valid entry is found. """ smallest = None for key in self.queue.keys(): if self.queue[key]['status'] == 'queued': if smallest is None or key < smallest: smallest = key return smallest
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/queue.py#L76-L91
priority queue
python
def prioritize(): """ Yield the messages in the queue in the order they should be sent. """ while True: hp_qs = Message.objects.high_priority().using('default') mp_qs = Message.objects.medium_priority().using('default') lp_qs = Message.objects.low_priority().using('default') while hp_qs.count() or mp_qs.count(): while hp_qs.count(): for message in hp_qs.order_by("when_added"): yield message while hp_qs.count() == 0 and mp_qs.count(): yield mp_qs.order_by("when_added")[0] while hp_qs.count() == 0 and mp_qs.count() == 0 and lp_qs.count(): yield lp_qs.order_by("when_added")[0] if Message.objects.non_deferred().using('default').count() == 0: break
https://github.com/pinax/django-mailer/blob/129a848090d5de8a3e25067048ba6d3091c3b187/mailer/engine.py#L31-L49
priority queue
python
def _flush_queue(self, q, ignore_priority=False): """ :param q: PriorityQueue instance holding GarbageCollector entries :param ignore_priority: If True - all GarbageCollector entries should be resubmitted If False - only those entries whose waiting time has expired will be resubmitted """ assert isinstance(q, PriorityQueue) current_timestamp = compute_release_time(lag_in_minutes=0) for _ in range(len(q)): entry = q.pop() assert isinstance(entry, PriorityEntry) if ignore_priority or entry.release_time < current_timestamp: self._resubmit_uow(entry.entry) else: q.put(entry) break
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/garbage_collector.py#L81-L98
priority queue
python
def queue_status(self, targets='all', verbose=False): """Fetch the status of engine queues. Parameters ---------- targets : int/str/list of ints/strs the engines whose states are to be queried. default : all verbose : bool Whether to return lengths only, or lists of ids for each element """ if targets == 'all': # allow 'all' to be evaluated on the engine engine_ids = None else: engine_ids = self._build_targets(targets)[1] content = dict(targets=engine_ids, verbose=verbose) self.session.send(self._query_socket, "queue_request", content=content) idents,msg = self.session.recv(self._query_socket, 0) if self.debug: pprint(msg) content = msg['content'] status = content.pop('status') if status != 'ok': raise self._unwrap_exception(content) content = rekey(content) if isinstance(targets, int): return content[targets] else: return content
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/client.py#L1568-L1598
priority queue
python
def get(self): """Get the highest priority Processing Block from the queue.""" with self._mutex: entry = self._queue.pop() del self._block_map[entry[2]] return entry[2]
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/pb_queue.py#L66-L71
priority queue
python
def dequeue(self, destination): """ Removes and returns an item from the queue (or C{None} if no items in queue). @param destination: The queue name (destinationination). @type destination: C{str} @return: The first frame in the specified queue, or C{None} if there are none. @rtype: C{stompclient.frame.Frame} """ session = meta.Session() try: selstmt = select( [model.frames_table.c.message_id, model.frames_table.c.frame]) selstmt = selstmt.where( model.frames_table.c.destination == destination) selstmt = selstmt.order_by( model.frames_table.c.queued, model.frames_table.c.sequence) result = session.execute(selstmt) first = result.fetchone() if not first: return None delstmt = model.frames_table.delete().where(model.frames_table.c.message_id == first[model.frames_table.c.message_id]) session.execute(delstmt) frame = first[model.frames_table.c.frame] except: session.rollback() raise else: session.commit() return frame
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/store/sa/__init__.py#L111-L149
priority queue
python
def get_queue(self, queue_name): ''' Retrieves an existing queue. queue_name: Name of the queue. ''' _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/' + _str(queue_name) + '' request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _convert_response_to_queue(response)
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/control_client/servicebusservice.py#L285-L301
priority queue
python
def _prime_queue(self, init_points): """Make sure there's something in the queue at the very beginning.""" if self._queue.empty and self._space.empty: init_points = max(init_points, 1) for _ in range(init_points): self._queue.add(self._space.random_sample())
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L137-L143
priority queue
python
def queue(self, *args, **kwargs): """ Schedule a task for execution. The task call (and its arguments) will be placed on the queue and processed asynchronously. """ self.scraper.task_manager.put(self, args, kwargs) return self
https://github.com/pudo-attic/scrapekit/blob/cfd258120922fcd571430cdf00ba50f3cf18dc15/scrapekit/tasks.py#L138-L143
priority queue
python
def queue(self, *args, **kwargs): """ A function to queue a RQ job, e.g.:: @rq.job(timeout=60) def add(x, y): return x + y add.queue(1, 2, timeout=30) :param \\*args: The positional arguments to pass to the queued job. :param \\*\\*kwargs: The keyword arguments to pass to the queued job. :param queue: Name of the queue to queue in, defaults to queue of of job or :attr:`~flask_rq2.RQ.default_queue`. :type queue: str :param timeout: The job timeout in seconds. If not provided uses the job's timeout or :attr:`~flask_rq2.RQ.default_timeout`. :type timeout: int :param description: Description of the job. :type description: str :param result_ttl: The result TTL in seconds. If not provided uses the job's result TTL or :attr:`~flask_rq2.RQ.default_result_ttl`. :type result_ttl: int :param ttl: The job TTL in seconds. If not provided uses the job's TTL or no TTL at all. :type ttl: int :param depends_on: A job instance or id that the new job depends on. :type depends_on: ~flask_rq2.job.FlaskJob or str :param job_id: A custom ID for the new job. Defaults to an :mod:`UUID <uuid>`. :type job_id: str :param at_front: Whether or not the job is queued in front of all other enqueued jobs. :type at_front: bool :param meta: Additional meta data about the job. :type meta: dict :return: An RQ job instance. :rtype: ~flask_rq2.job.FlaskJob """ queue_name = kwargs.pop('queue', self.queue_name) timeout = kwargs.pop('timeout', self.timeout) result_ttl = kwargs.pop('result_ttl', self.result_ttl) ttl = kwargs.pop('ttl', self.ttl) depends_on = kwargs.pop('depends_on', self._depends_on) job_id = kwargs.pop('job_id', None) at_front = kwargs.pop('at_front', self._at_front) meta = kwargs.pop('meta', self._meta) description = kwargs.pop('description', self._description) return self.rq.get_queue(queue_name).enqueue_call( self.wrapped, args=args, kwargs=kwargs, timeout=timeout, result_ttl=result_ttl, ttl=ttl, depends_on=depends_on, job_id=job_id, at_front=at_front, meta=meta, description=description, )
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/functions.py#L65-L139
priority queue
python
def put(self, taskid, priority=0, exetime=0): """ Put a task into task queue when use heap sort, if we put tasks(with the same priority and exetime=0) into queue, the queue is not a strict FIFO queue, but more like a FILO stack. It is very possible that when there are continuous big flow, the speed of select is slower than request, resulting in priority-queue accumulation in short time. In this scenario, the tasks more earlier entering the priority-queue will not get processed until the request flow becomes small. Thus, we store a global atom self increasing value into task.sequence which represent the task enqueue sequence. When the comparison of exetime and priority have no difference, we compare task.sequence to ensure that the entire queue is ordered. """ now = time.time() task = InQueueTask(taskid, priority, exetime) self.mutex.acquire() if taskid in self.priority_queue: self.priority_queue.put(task) elif taskid in self.time_queue: self.time_queue.put(task) elif taskid in self.processing and self.processing[taskid].taskid: # force update a processing task is not allowed as there are so many # problems may happen pass else: if exetime and exetime > now: self.time_queue.put(task) else: task.exetime = 0 self.priority_queue.put(task) self.mutex.release()
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/task_queue.py#L190-L225
priority queue
python
def priority_argsort(list_, priority): r""" Args: list_ (list): priority (list): desired order of items Returns: list: reordered_index_list Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> list_ = [2, 4, 6, 8, 10] >>> priority = [8, 2, 6, 9] >>> sortx = priority_argsort(list_, priority) >>> reordered_list = priority_sort(list_, priority) >>> assert ut.take(list_, sortx) == reordered_list >>> result = str(sortx) >>> print(result) [3, 0, 2, 1, 4] """ reordered_list = priority_sort(list_, priority) # FIXME: inefficient sortx = [list_.index(item) for item in reordered_list] return sortx
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1228-L1253
priority queue
python
def get_queue(self, name): """Get information about a queue. Parameters ---------- name : str The queue name. Returns ------- queue : Queue Examples -------- >>> client.get_queue('myqueue') Queue<name='myqueue', percent_used=5.00> """ req = proto.QueueRequest(name=name) resp = self._call('getQueue', req) return Queue.from_protobuf(resp)
https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/skein/core.py#L720-L739
priority queue
python
def queue_call(self, delay, callback, *args, **kwds): """Schedule a function call at a specific time in the future.""" if delay is None: self.current.append((callback, args, kwds)) return if delay < 1e9: when = delay + self.clock.now() else: # Times over a billion seconds are assumed to be absolute. when = delay self.insort_event_right((when, callback, args, kwds))
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L138-L148
priority queue
python
def queue(self): """An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments """ # Use heapq to sort the queue rather than using 'sorted(self._queue)'. # With heapq, two events scheduled at the same time will show in # the actual order they would be retrieved. events = self._queue[:] return map(heapq.heappop, [events]*len(events))
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/sched.py#L133-L142
priority queue
python
def qos_queue_scheduler_strict_priority_priority_number(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") queue = ET.SubElement(qos, "queue") scheduler = ET.SubElement(queue, "scheduler") strict_priority = ET.SubElement(scheduler, "strict-priority") priority_number = ET.SubElement(strict_priority, "priority-number") priority_number.text = kwargs.pop('priority_number') callback = kwargs.pop('callback', self._callback) return callback(config)
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L593-L605
priority queue
python
def _process_waiting_queue(self): ''' thread to processes the waiting queue fetches transfer spec then calls start transfer ensures that max ascp is not exceeded ''' logger.info("Queue processing thread started") while not self.is_stop(): self._processing_event.wait(3) self._processing_event.clear() if self.is_stop(): break while self.waiting_coordinator_count() > 0: if self.is_stop(): break _used_slots = self.tracked_coordinator_count(True) _free_slots = self._config.ascp_max_concurrent - _used_slots if _free_slots <= 0: break with self._lockw: # check are there enough free slots _req_slots = self._waiting_transfer_coordinators[0].session_count if _req_slots > _free_slots: break _coordinator = self._waiting_transfer_coordinators.popleft() self.add_transfer_coordinator(_coordinator) if not _coordinator.set_transfer_spec(): self.remove_aspera_coordinator(_coordinator) else: logger.info("ASCP process queue - Max(%d) InUse(%d) Free(%d) New(%d)" % (self._config.ascp_max_concurrent, _used_slots, _free_slots, _req_slots)) _coordinator.start_transfer() logger.info("Queue processing thread stopped") self._processing_stopped_event.set()
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L581-L620
priority queue
python
def needs_high_priority(self, priority): """ :return: None """ assert isinstance(priority, int) if priority != velbus.HIGH_PRIORITY: self.parser_error("needs high priority set")
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/message.py#L209-L215
priority queue
python
def _queue_task(self, args): ''' add transfer to waiting queue if possible then notify the background thread to process it ''' if self._cancel_called: raise AsperaTransferQueueError("Cancel already called") elif self._wait_called: raise AsperaTransferQueueError("Cant queue items during wait") elif self.waiting_coordinator_count() >= self._config.max_submission_queue_size: raise AsperaTransferQueueError("Max queued items reached") else: _coordinator = AsperaTransferCoordinator(args) _components = {'meta': TransferMeta(args, transfer_id=args.transfer_id), 'coordinator': _coordinator} _transfer_future = AsperaTransferFuture(**_components) _coordinator.add_subscribers(args.subscribers, future=_transfer_future) _coordinator.add_done_callback(self.remove_aspera_coordinator, transfer_coordinator=_coordinator) self.append_waiting_queue(_coordinator) if not self._processing_thread: self._processing_thread = threading.Thread(target=self._process_waiting_queue) self._processing_thread.daemon = True self._processing_thread.start() self._wakeup_processing_thread() return _transfer_future
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L503-L530
priority queue
python
def queues(self): """ Access the queues :returns: twilio.rest.api.v2010.account.queue.QueueList :rtype: twilio.rest.api.v2010.account.queue.QueueList """ if self._queues is None: self._queues = QueueList(self._version, account_sid=self._solution['sid'], ) return self._queues
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/__init__.py#L522-L531
priority queue
python
def _enqueue(self, msg): """Push a new `msg` onto the queue, return `(success, msg)`""" self.log.debug('queueing: %s', msg) if self.queue.full(): self.log.warn('librato_bg queue is full') return False, msg self.queue.put(msg) self.log.debug('enqueued %s.', msg) return True, msg
https://github.com/nyaruka/python-librato-bg/blob/e541092838694de31d256becea8391a9cfe086c7/librato_bg/client.py#L38-L48
priority queue
python
def flush(self, ignore_priority=False): """ method iterates over each reprocessing queues and re-submits UOW whose waiting time has expired """ for process_name, q in self.reprocess_uows.items(): self._flush_queue(q, ignore_priority)
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/garbage_collector.py#L101-L104
priority queue
python
def get_all(cls, names): """ Return all queues for the given names (for all available priorities) """ names = cls._get_iterable_for_names(names) queues = [] for queue_name in names: queues.extend(cls.collection(name=queue_name).instances()) return queues
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L112-L122
priority queue
python
def send_to_queue( self, args: Tuple=(), kwargs: Dict[str, Any]={}, host: str=None, wait_result: Union[int, float]=None, message_ttl: Union[int, float]=None, ) -> Any: """ Sends a message to the queue. A worker will run the task's function when it receives the message. :param args: Arguments that will be passed to task on execution. :param kwargs: Keyword arguments that will be passed to task on execution. :param host: Send this task to specific host. ``host`` will be appended to the queue name. If ``host`` is "localhost", hostname of the server will be appended to the queue name. :param wait_result: Wait for result from worker for ``wait_result`` seconds. If timeout occurs, :class:`~kuyruk.exceptions.ResultTimeout` is raised. If excecption occurs in worker, :class:`~kuyruk.exceptions.RemoteException` is raised. :param message_ttl: If set, message will be destroyed in queue after ``message_ttl`` seconds. :return: Result from worker if ``wait_result`` is set, else :const:`None`. """ if self.kuyruk.config.EAGER: # Run the task in current process result = self.apply(*args, **kwargs) return result if wait_result else None logger.debug("Task.send_to_queue args=%r, kwargs=%r", args, kwargs) queue = self._queue_for_host(host) description = self._get_description(args, kwargs) self._send_signal(signals.task_presend, args=args, kwargs=kwargs, description=description) body = json.dumps(description) msg = amqp.Message(body=body) if wait_result: # Use direct reply-to feature from RabbitMQ: # https://www.rabbitmq.com/direct-reply-to.html msg.properties['reply_to'] = 'amq.rabbitmq.reply-to' if message_ttl: msg.properties['expiration'] = str(int(message_ttl * 1000)) with self.kuyruk.channel() as ch: if wait_result: result = Result(ch.connection) ch.basic_consume(queue='amq.rabbitmq.reply-to', no_ack=True, callback=result.process_message) ch.queue_declare(queue=queue, durable=True, auto_delete=False) ch.basic_publish(msg, exchange="", routing_key=queue) self._send_signal(signals.task_postsend, args=args, kwargs=kwargs, description=description) if wait_result: return result.wait(wait_result)
https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/task.py#L69-L130
priority queue
python
def __command_queue_listener(self): """Function to continuously retrieve data from the frontend. Commands are sent to the central priority queue. If the pipe from the frontend is closed the service shutdown is initiated. Check every second if service has shut down, then terminate. This function is run by a separate daemon thread, which is started by the __start_command_queue_listener function. """ self.log.debug("Queue listener thread started") counter = itertools.count() # insertion sequence to keep messages in order while not self.__shutdown: if self.__pipe_commands.poll(1): try: message = self.__pipe_commands.recv() except EOFError: # Pipe was closed by frontend. Shut down service. self.__shutdown = True self.log.error( "Pipe closed by frontend, shutting down service", exc_info=True ) break queue_item = (Priority.COMMAND, next(counter), message) try: self.__queue.put(queue_item, True, 60) except queue.Full: # If the message can't be stored within 60 seconds then the service is # operating outside normal parameters. Try to shut it down. self.__shutdown = True self.log.error( "Write to service priority queue failed, shutting down service", exc_info=True, ) break self.log.debug("Queue listener thread terminating")
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L228-L261
priority queue
python
def fix_queue_critical(self): """ This function tries to fix critical events originating from the queue submission system. General strategy, first try to increase resources in order to fix the problem, if this is not possible, call a task specific method to attempt to decrease the demands. Returns: 1 if task has been fixed else 0. """ from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError #assert isinstance(self.manager, TaskManager) self.history.info('fixing queue critical') ret = "task.fix_queue_critical: " if not self.queue_errors: # TODO # paral_kgb = 1 leads to nasty sigegv that are seen as Qcritical errors! # Try to fallback to the conjugate gradient. #if self.uses_paral_kgb(1): # logger.critical("QCRITICAL with PARAL_KGB==1. Will try CG!") # self.set_vars(paral_kgb=0) # self.reset_from_scratch() # return # queue error but no errors detected, try to solve by increasing ncpus if the task scales # if resources are at maximum the task is definitively turned to errored if self.mem_scales or self.load_scales: try: self.manager.increase_resources() # acts either on the policy or on the qadapter self.reset_from_scratch() ret += "increased resources" return ret except ManagerIncreaseError: self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further') raise FixQueueCriticalError else: self.set_status(self.S_ERROR, msg='unknown queue error, no options left') raise FixQueueCriticalError else: print("Fix_qcritical: received %d queue_errors" % len(self.queue_errors)) print("type_list: %s" % list(type(qe) for qe in self.queue_errors)) for error in self.queue_errors: self.history.info('fixing: %s' % str(error)) ret += str(error) if isinstance(error, NodeFailureError): # if the problematic node is known, exclude it if error.nodes is not None: try: self.manager.exclude_nodes(error.nodes) self.reset_from_scratch() self.set_status(self.S_READY, msg='excluding nodes') except: raise FixQueueCriticalError else: self.set_status(self.S_ERROR, msg='Node error but no node identified.') raise FixQueueCriticalError elif isinstance(error, MemoryCancelError): # ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code # scales this should fix the memeory problem # increase both max and min ncpu of the autoparalel and rerun autoparalel if self.mem_scales: try: self.manager.increase_ncpus() self.reset_from_scratch() self.set_status(self.S_READY, msg='increased ncps to solve memory problem') return except ManagerIncreaseError: self.history.warning('increasing ncpus failed') # if the max is reached, try to increase the memory per cpu: try: self.manager.increase_mem() self.reset_from_scratch() self.set_status(self.S_READY, msg='increased mem') return except ManagerIncreaseError: self.history.warning('increasing mem failed') # if this failed ask the task to provide a method to reduce the memory demand try: self.reduce_memory_demand() self.reset_from_scratch() self.set_status(self.S_READY, msg='decreased mem demand') return except DecreaseDemandsError: self.history.warning('decreasing demands failed') msg = ('Memory error detected but the memory could not be increased neither could the\n' 'memory demand be decreased. Unrecoverable error.') self.set_status(self.S_ERROR, msg) raise FixQueueCriticalError elif isinstance(error, TimeCancelError): # ask the qadapter to provide more time print('trying to increase time') try: self.manager.increase_time() self.reset_from_scratch() self.set_status(self.S_READY, msg='increased wall time') return except ManagerIncreaseError: self.history.warning('increasing the waltime failed') # if this fails ask the qadapter to increase the number of cpus if self.load_scales: try: self.manager.increase_ncpus() self.reset_from_scratch() self.set_status(self.S_READY, msg='increased number of cpus') return except ManagerIncreaseError: self.history.warning('increase ncpus to speed up the calculation to stay in the walltime failed') # if this failed ask the task to provide a method to speed up the task try: self.speed_up() self.reset_from_scratch() self.set_status(self.S_READY, msg='task speedup') return except DecreaseDemandsError: self.history.warning('decreasing demands failed') msg = ('Time cancel error detected but the time could not be increased neither could\n' 'the time demand be decreased by speedup of increasing the number of cpus.\n' 'Unrecoverable error.') self.set_status(self.S_ERROR, msg) else: msg = 'No solution provided for error %s. Unrecoverable error.' % error.name self.set_status(self.S_ERROR, msg) return 0
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L3016-L3151
priority queue
python
def queue(p_queue, host=None): '''Construct a path to the queue dir for a queue''' if host is not None: return _path(_c.FSQ_QUEUE, root=_path(host, root=hosts(p_queue))) return _path(p_queue, _c.FSQ_QUEUE)
https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/path.py#L46-L50
priority queue
python
def LifoQueue(self, name, initial=None, maxsize=None): """The LIFO queue datatype. :param name: The name of the queue. :keyword initial: Initial items in the queue. See :class:`redish.types.LifoQueue`. """ return types.LifoQueue(name, self.api, initial=initial, maxsize=maxsize)
https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/client.py#L106-L116
priority queue
python
def Queue(self, name, initial=None, maxsize=None): """The queue datatype. :param name: The name of the queue. :keyword initial: Initial items in the queue. See :class:`redish.types.Queue`. """ return types.Queue(name, self.api, initial=initial, maxsize=maxsize)
https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/client.py#L95-L104
priority queue
python
def append_waiting_queue(self, transfer_coordinator): ''' append item to waiting queue ''' logger.debug("Add to waiting queue count=%d" % self.waiting_coordinator_count()) with self._lockw: self._waiting_transfer_coordinators.append(transfer_coordinator)
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L552-L556
priority queue
python
def process_bulk_queue(self, es_bulk_kwargs=None): """Process bulk indexing queue. :param dict es_bulk_kwargs: Passed to :func:`elasticsearch:elasticsearch.helpers.bulk`. """ with current_celery_app.pool.acquire(block=True) as conn: consumer = Consumer( connection=conn, queue=self.mq_queue.name, exchange=self.mq_exchange.name, routing_key=self.mq_routing_key, ) req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT'] es_bulk_kwargs = es_bulk_kwargs or {} count = bulk( self.client, self._actionsiter(consumer.iterqueue()), stats_only=True, request_timeout=req_timeout, **es_bulk_kwargs ) consumer.close() return count
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L166-L193
priority queue
python
def requeue_job(self, job, queue, priority, delayed_for=None): """ Requeue a job in a queue with the given priority, possibly delayed """ job.requeue(queue_name=queue._cached_name, priority=priority, delayed_for=delayed_for, queue_model=self.queue_model) if hasattr(job, 'on_requeued'): job.on_requeued(queue) self.log(self.job_requeue_message(job, queue))
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L492-L504
priority queue
python
def declare_queue(self, queue_name='', passive=False, durable=False, exclusive=False, auto_delete=False, arguments=None): """ 声明一个队列 :param queue_name: 队列名 :param passive: :param durable: :param exclusive: :param auto_delete: :param arguments: :return: pika 框架生成的随机回调队列名 """ result = self._channel.queue_declare( queue=queue_name, passive=passive, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments ) return result.method.queue
https://github.com/pushyzheng/flask-rabbitmq/blob/beecefdf7bb6ff0892388e2bc303aa96931588bd/example/producer/flask_rabbitmq/RabbitMQ.py#L85-L105
priority queue
python
def queue_status(self, client_id, msg): """Return the Queue status of one or more targets. if verbose: return the msg_ids else: return len of each type. keys: queue (pending MUX jobs) tasks (pending Task jobs) completed (finished jobs from both queues)""" content = msg['content'] targets = content['targets'] try: targets = self._validate_targets(targets) except: content = error.wrap_exception() self.session.send(self.query, "hub_error", content=content, ident=client_id) return verbose = content.get('verbose', False) content = dict(status='ok') for t in targets: queue = self.queues[t] completed = self.completed[t] tasks = self.tasks[t] if not verbose: queue = len(queue) completed = len(completed) tasks = len(tasks) content[str(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks} content['unassigned'] = list(self.unassigned) if verbose else len(self.unassigned) # print (content) self.session.send(self.query, "queue_reply", content=content, ident=client_id)
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/hub.py#L1069-L1098
priority queue
python
def dequeue(self) -> Tuple[int, TItem]: """Removes and returns an item from the priority queue. Returns: A tuple whose first element is the priority of the dequeued item and whose second element is the dequeued item. Raises: ValueError: The queue is empty. """ if self._len == 0: raise ValueError('BucketPriorityQueue is empty.') # Drop empty buckets at the front of the queue. while self._buckets and not self._buckets[0]: self._buckets.pop(0) self._offset += 1 # Pull item out of the front bucket. item = self._buckets[0].pop(0) priority = self._offset self._len -= 1 if self._drop_set is not None: self._drop_set.remove((priority, item)) # Note: do not eagerly clear out empty buckets after pulling the item! # Doing so increases the worst case complexity of "monotonic" use from # O(N+P) to O(N*P). return priority, item
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/_bucket_priority_queue.py#L132-L162
priority queue
python
def put(self, value, priority=100): """ Put a task into the queue. Args: value (str): Task data. priority (int): An optional priority as an integer with at most 3 digits. Lower values signify higher priority. """ task_name = '{}{:03d}_{}'.format(self.TASK_PREFIX, priority, self._counter) path = posixpath.join(self._queue_path, task_name) self._client.kv[path] = value
https://github.com/billyshambrook/taskman/blob/7e293ce9ea89ec6fc7e8b5a687f02ec9d4ad235e/taskman/queue.py#L45-L56
priority queue
python
def prioritize(self, item, force=False): """ Moves the item to the very left of the queue. """ with self.condition: # If the job is already running (or about to be forced), # there is nothing to be done. if item in self.working or item in self.force: return self.queue.remove(item) if force: self.force.append(item) else: self.queue.appendleft(item) self.condition.notify_all()
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/workqueue/pipeline.py#L132-L146
priority queue
python
def _wait_for_queue_space(self, timeout=DEFAULT_WAIT_TIME): """ EXPECT THE self.lock TO BE HAD, WAITS FOR self.queue TO HAVE A LITTLE SPACE """ wait_time = 5 (DEBUG and len(self.queue) > 1 * 1000 * 1000) and Log.warning("Queue {{name}} has over a million items") now = time() if timeout != None: time_to_stop_waiting = now + timeout else: time_to_stop_waiting = now + DEFAULT_WAIT_TIME if self.next_warning < now: self.next_warning = now + wait_time while not self.closed and len(self.queue) >= self.max: if now > time_to_stop_waiting: Log.error(THREAD_TIMEOUT) if self.silent: self.lock.wait(Till(till=time_to_stop_waiting)) else: self.lock.wait(Till(seconds=wait_time)) if len(self.queue) >= self.max: now = time() if self.next_warning < now: self.next_warning = now + wait_time Log.alert( "Queue by name of {{name|quote}} is full with ({{num}} items), thread(s) have been waiting {{wait_time}} sec", name=self.name, num=len(self.queue), wait_time=wait_time )
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_threads/queues.py#L144-L178
priority queue
python
def get_queue(self, vhost, name): """ Get a single queue, which requires both vhost and name. :param string vhost: The virtual host for the queue being requested. If the vhost is '/', note that it will be translated to '%2F' to conform to URL encoding requirements. :param string name: The name of the queue being requested. :returns: A dictionary of queue properties. :rtype: dict """ vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['queues_by_name'] % (vhost, name) queue = self._call(path, 'GET') return queue
https://github.com/bkjones/pyrabbit/blob/e8a9f74ed5c6bba958994fb9a72c396e6a99ea0f/pyrabbit/api.py#L481-L497
priority queue
python
def declare_queue(self, name='', *, durable=True, exclusive=False, auto_delete=False, passive=False, nowait=False, arguments=None): """ Declare a queue on the broker. If the queue does not exist, it will be created. This method is a :ref:`coroutine <coroutine>`. :param str name: the name of the queue. Supplying a name of '' will create a queue with a unique name of the server's choosing. :keyword bool durable: If true, the queue will be re-created when the server restarts. :keyword bool exclusive: If true, the queue can only be accessed by the current connection, and will be deleted when the connection is closed. :keyword bool auto_delete: If true, the queue will be deleted when the last consumer is cancelled. If there were never any conusmers, the queue won't be deleted. :keyword bool passive: If true and queue with such a name does not exist it will raise a :class:`exceptions.NotFound` instead of creating it. Arguments ``durable``, ``auto_delete`` and ``exclusive`` are ignored if ``passive=True``. :keyword bool nowait: If true, will not wait for a declare-ok to arrive. :keyword dict arguments: Table of optional parameters for extensions to the AMQP protocol. See :ref:`extensions`. :return: The new :class:`Queue` object. """ q = yield from self.queue_factory.declare( name, durable, exclusive, auto_delete, passive, nowait, arguments if arguments is not None else {}) return q
https://github.com/benjamin-hodgson/asynqp/blob/ea8630d1803d10d4fd64b1a0e50f3097710b34d1/src/asynqp/channel.py#L96-L123
priority queue
python
def dequeue(self, destination): """ Removes and returns an item from the queue (or C{None} if no items in queue). @param destination: The queue name (destinationination). @type destination: C{str} @return: The first frame in the specified queue, or C{None} if there are none. @rtype: C{stompclient.frame.Frame} """ if not self.has_frames(destination): return None message_id = self.queue_metadata[destination]['frames'].pop() self.queue_metadata[destination]['dequeued'] += 1 frame = self.frame_store[message_id] del self.frame_store[message_id] self._opcount += 1 self._sync() return frame
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/store/dbm.py#L175-L197
priority queue
python
def queue_push(self, key, value, create=False, **kwargs): """ Add an item to the end of a queue. :param key: The document ID of the queue :param value: The item to add to the queue :param create: Whether the queue should be created if it does not exist :param kwargs: Arguments to pass to :meth:`mutate_in` :return: :class:`OperationResult` :raise: :cb_exc:`NotFoundError` if the queue does not exist and `create` was not specified. example:: # Ensure it's removed first cb.remove('a_queue') cb.queue_push('a_queue', 'job9999', create=True) cb.queue_pop('a_queue').value # => job9999 """ return self.list_prepend(key, value, **kwargs)
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucket.py#L2383-L2403
priority queue
python
def enqueue(self, function, name=None, times=1, data=None): """ Appends a function to the queue for execution. The times argument specifies the number of attempts if the function raises an exception. If the name argument is None it defaults to whatever id(function) returns. :type function: callable :param function: The function that is executed. :type name: str :param name: Stored in Job.name. :type times: int :param times: The maximum number of attempts. :type data: object :param data: Optional data to store in Job.data. :rtype: int :return: The id of the new job. """ self._check_if_ready() return self.main_loop.enqueue(function, name, times, data)
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/workqueue/workqueue.py#L120-L139
priority queue
python
def priority(self, item): """ The priority of the item depends of the number of entries published in the cache divided by the maximum of entries. """ return '%.1f' % max(self.cache[item.pk][0] / self.max_entries, 0.1)
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/sitemaps.py#L96-L101
priority queue
python
def do_list_queue(self, line): """list_queue <peer> """ def f(p, args): o = p.get() if o.resources.queue: for q in o.resources.queue: print('%s %s' % (q.resource_id, q.port)) self._request(line, f)
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/cmd/of_config_cli.py#L302-L312
priority queue
python
def enqueue(self, s): """ Append `s` to the queue. Equivalent to:: queue += s if `queue` where a regular string. """ self._parts.append(s) self._len += len(s)
https://github.com/pydron/anycall/blob/43add96660258a14b24aa8e8413dffb1741b72d7/anycall/bytequeue.py#L19-L30
priority queue
python
def _get_queue(config): ''' Check the context for the notifier and construct it if not present ''' if 'watchdog.observer' not in __context__: queue = collections.deque() observer = Observer() for path in config.get('directories', {}): path_params = config.get('directories').get(path) masks = path_params.get('mask', DEFAULT_MASK) event_handler = Handler(queue, masks) observer.schedule(event_handler, path) observer.start() __context__['watchdog.observer'] = observer __context__['watchdog.queue'] = queue return __context__['watchdog.queue']
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/watchdog.py#L80-L99
priority queue
python
def queues(self, page=None, per_page=None, previous=None, prefix=None): """Execute an HTTP request to get a list of queues and return it. Keyword arguments: page -- The 0-based page to get queues from. Defaults to None, which omits the parameter. """ options = {} if page is not None: raise Exception('page param is deprecated!') if per_page is not None: options['per_page'] = per_page if previous is not None: options['previous'] = previous if prefix is not None: options['prefix'] = prefix query = urlencode(options) url = 'queues' if query != '': url = "%s?%s" % (url, query) result = self.client.get(url) return [queue['name'] for queue in result['body']['queues']]
https://github.com/iron-io/iron_mq_python/blob/d6a293f0d54b4ca2dca1c335f9867cd2310f6fc7/iron_mq.py#L318-L341
priority queue
python
def basic_get(self, queue='', no_ack=False, ticket=None): """ direct access to a queue This method provides a direct access to the messages in a queue using a synchronous dialogue that is designed for specific types of application where synchronous functionality is more important than performance. PARAMETERS: queue: shortstr Specifies the name of the queue to consume from. If the queue name is null, refers to the current queue for the channel, which is the last declared queue. RULE: If the client did not previously declare a queue, and the queue name in this method is empty, the server MUST raise a connection exception with reply code 530 (not allowed). no_ack: boolean no acknowledgement needed If this field is set the server does not expect acknowledgments for messages. That is, when a message is delivered to the client the server automatically and silently acknowledges it on behalf of the client. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. ticket: short RULE: The client MUST provide a valid access ticket giving "read" access rights to the realm for the queue. Non-blocking, returns a message object, or None. """ args = AMQPWriter() if ticket is not None: args.write_short(ticket) else: args.write_short(self.default_ticket) args.write_shortstr(queue) args.write_bit(no_ack) self._send_method((60, 70), args) return self.wait(allowed_methods=[ (60, 71), # Channel.basic_get_ok (60, 72), # Channel.basic_get_empty ])
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/channel.py#L2063-L2120
priority queue
python
async def queue_declare(self): """ Override this method to change how a queue is declared """ await self.channel.queue_declare( self.queue, durable=self.durable, exclusive=self.exclusive, no_wait=self.no_wait )
https://github.com/wasp/waspy/blob/31cc352f300a089f9607d7f13d93591d4c69d5ec/waspy/listeners/rabbitmq_listener.py#L87-L94
priority queue
python
def declare_queue(self, queue_name): """Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed. """ attempts = 1 while True: try: if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self._declare_queue(queue_name) self.queues.add(queue_name) self.emit_after("declare_queue", queue_name) delayed_name = dq_name(queue_name) self._declare_dq_queue(queue_name) self.delay_queues.add(delayed_name) self.emit_after("declare_delay_queue", delayed_name) self._declare_xq_queue(queue_name) break except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError) as e: # pragma: no cover # Delete the channel and the connection so that the next # caller may initiate new ones of each. del self.channel del self.connection attempts += 1 if attempts > MAX_DECLARE_ATTEMPTS: raise ConnectionClosed(e) from None self.logger.debug( "Retrying declare due to closed connection. [%d/%d]", attempts, MAX_DECLARE_ATTEMPTS, )
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/brokers/rabbitmq.py#L183-L224
priority queue
python
def list_priority_class(self, **kwargs): """ list or watch objects of kind PriorityClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_priority_class(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1PriorityClassList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_priority_class_with_http_info(**kwargs) else: (data) = self.list_priority_class_with_http_info(**kwargs) return data
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/scheduling_v1_api.py#L475-L501
priority queue
python
def queue(users, label, extra_context=None, sender=None): """ Queue the notification in NoticeQueueBatch. This allows for large amounts of user notifications to be deferred to a seperate process running outside the webserver. """ if extra_context is None: extra_context = {} if isinstance(users, QuerySet): users = [row["pk"] for row in users.values("pk")] else: users = [user.pk for user in users] notices = [] for user in users: notices.append((user, label, extra_context, sender)) NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save()
https://github.com/GeoNode/geonode-notification/blob/c60bc28f16f5d0e62536e76c17d6944a79449ef1/notification/models.py#L198-L213
priority queue
python
def dispatch_queue(self): """ Dispatch any queued requests. Called by the debugger when it stops. """ self.queue_lock.acquire() q = list(self.queue) self.queue = [] self.queue_lock.release() log.debug("Dispatching requests: {}".format(q)) for req in q: req.response = self.dispatch_request(req) for req in q: req.signal()
https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/voltron/core.py#L263-L277
priority queue
python
def list_(prefix='', region=None, key=None, keyid=None, profile=None): ''' Return a list of the names of all visible queues. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt myminion boto_sqs.list region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) def extract_name(queue_url): # Note: this logic taken from boto, so should be safe return _urlparse(queue_url).path.split('/')[2] try: r = conn.list_queues(QueueNamePrefix=prefix) # The 'QueueUrls' attribute is missing if there are no queues urls = r.get('QueueUrls', []) return {'result': [extract_name(url) for url in urls]} except botocore.exceptions.ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L179-L203
priority queue
python
def packet_queue(self, pkt): """Enqueue packet to out_packet queue.""" pkt.pos = 0 pkt.to_process = pkt.packet_length self.out_packet.append(pkt) return NC.ERR_SUCCESS
https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/base_nyamuk.py#L87-L94
priority queue
python
def get_queue(self, start=0, max_items=100, full_album_art_uri=False): """Get information about the queue. :param start: Starting number of returned matches :param max_items: Maximum number of returned matches :param full_album_art_uri: If the album art URI should include the IP address :returns: A :py:class:`~.soco.data_structures.Queue` object This method is heavly based on Sam Soffes (aka soffes) ruby implementation """ queue = [] response = self.contentDirectory.Browse([ ('ObjectID', 'Q:0'), ('BrowseFlag', 'BrowseDirectChildren'), ('Filter', '*'), ('StartingIndex', start), ('RequestedCount', max_items), ('SortCriteria', '') ]) result = response['Result'] metadata = {} for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']: metadata[camel_to_underscore(tag)] = int(response[tag]) # I'm not sure this necessary (any more). Even with an empty queue, # there is still a result object. This shoud be investigated. if not result: # pylint: disable=star-args return Queue(queue, **metadata) items = from_didl_string(result) for item in items: # Check if the album art URI should be fully qualified if full_album_art_uri: self.music_library._update_album_art_to_full_uri(item) queue.append(item) # pylint: disable=star-args return Queue(queue, **metadata)
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L1317-L1358
priority queue
python
def priority_sort(list_, priority): r""" Args: list_ (list): priority (list): desired order of items Returns: list: reordered_list CommandLine: python -m utool.util_list --test-priority_argsort Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [2, 4, 6, 8, 10] >>> priority = [8, 2, 6, 9] >>> reordered_list = priority_sort(list_, priority) >>> result = str(reordered_list) >>> print(result) [8, 2, 6, 4, 10] """ # remove requested priority items not in the list priority_ = setintersect_ordered(priority, list_) reordered_list = unique_ordered(priority_ + list_) return reordered_list
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1200-L1225
priority queue
python
def fix_queue_critical(self): """ This function tries to fix critical events originating from the queue submission system. Returns the number of tasks that have been fixed. """ count = 0 for task in self.iflat_tasks(status=self.S_QCRITICAL): logger.info("Will try to fix task %s" % str(task)) try: print(task.fix_queue_critical()) count += 1 except FixQueueCriticalError: logger.info("Not able to fix task %s" % task) return count
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L815-L830
priority queue
python
def dump_queue(queue): """ Empties all pending items in a queue and returns them in a list. """ result = [] try: while True: item = queue.get_nowait() result.append(item) except: Empty return result
https://github.com/egineering-llc/egat/blob/63a172276b554ae1c7d0f13ba305881201c49d55/egat/loggers/html_logger.py#L267-L279
priority queue
python
def enqueue(self, destination): """Enqueues given destination for processing. Given instance should be a valid destination. """ if not destination: raise BgpProcessorError('Invalid destination %s.' % destination) dest_queue = self._dest_queue # RtDest are queued in a separate queue if destination.route_family == RF_RTC_UC: dest_queue = self._rtdest_queue # We do not add given destination to the queue for processing if # it is already on the queue. if not dest_queue.is_on_list(destination): dest_queue.append(destination) # Wake-up processing thread if sleeping. self.dest_que_evt.set()
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/processor.py#L136-L155
priority queue
python
def consume_queue(queue, cascade_stop): """Consume the queue by reading lines off of it and yielding them.""" while True: try: item = queue.get(timeout=0.1) except Empty: yield None continue # See https://github.com/docker/compose/issues/189 except thread.error: raise ShutdownException() if item.exc: raise item.exc if item.is_stop: if cascade_stop: raise StopIteration else: continue yield item.item
https://github.com/openai/universe/blob/cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c/universe/remotes/compose/log_printer.py#L217-L238
priority queue
python
def _get_pausable_id(self): """ Get the queue id (either id or root_id) that should be used to pause/unpause the current queue TODO: handle subqueues with more than one level, e.g. "queue/subqueue/" """ queue = self.id if self.id.endswith("/"): queue = self.root_id return queue
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L116-L124
priority queue
python
def queue(self): """The queue property. Return rq.Queue instance.""" if not self.include_rq: return None ctx = stack.top if ctx is not None: if not hasattr(ctx, 'redislite_queue'): ctx.redislite_queue = {} for queue_name in self.queues: ctx.redislite_queue[queue_name] = \ Queue(queue_name, connection=self.connection) return ctx.redislite_queue
https://github.com/ViiSiX/FlaskRedislite/blob/01bc9fbbeb415aac621c7a9cc091a666e728e651/flask_redislite.py#L161-L174
priority queue
python
def queue(celery_arguments): """启动队列服务[开发中]""" if not app.celery: return click.echo( click.style('No celery config found,skip start...', fg='yellow')) celery = app.celery celery.autodiscover_tasks() argv = celery_arguments.split() argv.insert(0, 'worker') argv.insert(0, 'Queue') celery.worker_main(argv) pass
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/cli.py#L321-L335
priority queue
python
def _all_queue_names(self): """ Return a list of all unique queue names in our config. :return: list of all queue names (str) :rtype: :std:term:`list` """ queues = set() endpoints = self.config.get('endpoints') for e in endpoints: for q in endpoints[e]['queues']: queues.add(q) return sorted(queues)
https://github.com/jantman/webhook2lambda2sqs/blob/c80c18d5a908ba8b8ee624dc3a977c633fba2b7c/webhook2lambda2sqs/aws.py#L231-L243
priority queue
python
def _convert_priority(p_priority): """ Converts todo.txt priority to an iCalendar priority (RFC 2445). Priority A gets priority 1, priority B gets priority 5 and priority C-F get priorities 6-9. This scheme makes sure that clients that use "high", "medium" and "low" show the correct priority. """ result = 0 prio_map = { 'A': 1, 'B': 5, 'C': 6, 'D': 7, 'E': 8, 'F': 9, } try: result = prio_map[p_priority] except KeyError: if p_priority: # todos with no priority have priority None, and result of this # function will be 0. For all other letters, return 9 (lowest # priority in RFC 2445). result = 9 return result
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/printers/Ical.py#L29-L57
priority queue
python
def get_queue_sizes(self, queue): """ Get the queue's number of tasks in each state. Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE states. Does not include size of error queue. """ states = [QUEUED, SCHEDULED, ACTIVE] pipeline = self.connection.pipeline() for state in states: pipeline.zcard(self._key(state, queue)) results = pipeline.execute() return dict(zip(states, results))
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L350-L363
priority queue
python
def command_queue_worker(self, command_queue): """Process commands in command queues. """ while True: try: # set timeout to ensure self.stopping is checked periodically command, data = command_queue.get(timeout=3) try: self.process_command(command, data) except Exception as e: _logger.exception(e) self.worker_exceptions.append(e) break except Empty: pass if self.stopping and (_worker_fast_exit_on_terminate or command_queue.empty()): break
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/msg_dispatcher_base.py#L94-L110
priority queue
python
def queue_put_stoppable(self, q, obj): """ Put obj to queue, but will give up when the thread is stopped""" while not self.stopped(): try: q.put(obj, timeout=5) break except queue.Full: pass
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/concurrency.py#L59-L66
priority queue
python
def _process_queue(self): ''' If there are any message in the queue, process one of them. ''' if len(self._queue): args, kwargs = self._queue.popleft() self.publish(*args, **kwargs)
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel_pool.py#L74-L80
priority queue
python
def enqueue(self, payload, interval, job_id, queue_id, queue_type='default', requeue_limit=None): """Enqueues the job into the specified queue_id of a particular queue_type """ # validate all the input if not is_valid_interval(interval): raise BadArgumentException('`interval` has an invalid value.') if not is_valid_identifier(job_id): raise BadArgumentException('`job_id` has an invalid value.') if not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') if requeue_limit is None: requeue_limit = self._default_job_requeue_limit if not is_valid_requeue_limit(requeue_limit): raise BadArgumentException('`requeue_limit` has an invalid value.') try: serialized_payload = serialize_payload(payload) except TypeError as e: raise BadArgumentException(e.message) timestamp = str(generate_epoch()) keys = [ self._key_prefix, queue_type ] args = [ timestamp, queue_id, job_id, '"%s"' % serialized_payload, interval, requeue_limit ] self._lua_enqueue(keys=keys, args=args) response = { 'status': 'queued' } return response
https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L128-L178
priority queue
python
def _queue_declare_ok(self, args): """ confirms a queue definition This method confirms a Declare method and confirms the name of the queue, essential for automatically-named queues. PARAMETERS: queue: shortstr Reports the name of the queue. If the server generated a queue name, this field contains that name. message_count: long number of messages in queue Reports the number of messages in the queue, which will be zero for newly-created queues. consumer_count: long number of consumers Reports the number of active consumers for the queue. Note that consumers can suspend activity (Channel.Flow) in which case they do not appear in this count. """ queue = args.read_shortstr() message_count = args.read_long() consumer_count = args.read_long() return queue, message_count, consumer_count
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/channel.py#L1385-L1419
priority queue
python
def needs_low_priority(self, priority): """ :return: None """ assert isinstance(priority, int) if priority != velbus.LOW_PRIORITY: self.parser_error("needs low priority set")
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/message.py#L195-L201
priority queue
python
def _queue_declare_ok(self, args): """Confirms a queue definition This method confirms a Declare method and confirms the name of the queue, essential for automatically-named queues. PARAMETERS: queue: shortstr Reports the name of the queue. If the server generated a queue name, this field contains that name. message_count: long number of messages in queue Reports the number of messages in the queue, which will be zero for newly-created queues. consumer_count: long number of consumers Reports the number of active consumers for the queue. Note that consumers can suspend activity (Channel.Flow) in which case they do not appear in this count. """ return queue_declare_ok_t( args.read_shortstr(), args.read_long(), args.read_long(), )
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1262-L1295
priority queue
python
def put_nowait(self, item: _T) -> None: """Put an item into the queue without blocking. If no free slot is immediately available, raise `QueueFull`. """ self._consume_expired() if self._getters: assert self.empty(), "queue non-empty, why are getters waiting?" getter = self._getters.popleft() self.__put_internal(item) future_set_result_unless_cancelled(getter, self._get()) elif self.full(): raise QueueFull else: self.__put_internal(item)
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/queues.py#L209-L223
priority queue
python
def queue_exists(self, queue): """Check if a queue has been declared. :rtype bool: """ try: self.channel.queue_declare(queue=queue, passive=True) except AMQPChannelException, e: if e.amqp_reply_code == 404: return False raise e else: return True
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/pyamqplib.py#L221-L234
priority queue
python
async def _queue(self, ctx, page: int = 1): """ Shows the player's queue. """ player = self.bot.lavalink.players.get(ctx.guild.id) if not player.queue: return await ctx.send('There\'s nothing in the queue! Why not queue something?') items_per_page = 10 pages = math.ceil(len(player.queue) / items_per_page) start = (page - 1) * items_per_page end = start + items_per_page queue_list = '' for index, track in enumerate(player.queue[start:end], start=start): queue_list += f'`{index + 1}.` [**{track.title}**]({track.uri})\n' embed = discord.Embed(colour=discord.Color.blurple(), description=f'**{len(player.queue)} tracks**\n\n{queue_list}') embed.set_footer(text=f'Viewing page {page}/{pages}') await ctx.send(embed=embed)
https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/examples/music-v2.py#L156-L176
priority queue
python
def process_priority(self, process_priority): """ Sets the process priority. :param process_priority: string """ log.info('QEMU VM "{name}" [{id}] has set the process priority to {priority}'.format(name=self._name, id=self._id, priority=process_priority)) self._process_priority = process_priority
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/qemu/qemu_vm.py#L629-L639
priority queue
python
def list_queue(self, embed_last_unused_offers=False): """List all the tasks queued up or waiting to be scheduled. :returns: list of queue items :rtype: list[:class:`marathon.models.queue.MarathonQueueItem`] """ if embed_last_unused_offers: params = {'embed': 'lastUnusedOffers'} else: params = {} response = self._do_request('GET', '/v2/queue', params=params) return self._parse_response(response, MarathonQueueItem, is_list=True, resource_name='queue')
https://github.com/thefactory/marathon-python/blob/592b253aa8edf2475c97ca438ad7b6936652caf2/marathon/client.py#L687-L698
priority queue
python
def get_top_priority(self): """Pops the element that has the top (smallest) priority. :returns: element with the top (smallest) priority. :raises: IndexError -- Priority queue is empty. """ if self.is_empty(): raise IndexError("Priority queue is empty.") _, _, element = heapq.heappop(self.pq) if element in self.element_finder: del self.element_finder[element] return element
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/utilities/priority_queue.py#L54-L66
priority queue
python
def get_queue(self, name=None): """ Returns an RQ queue instance with the given name, e.g.:: default_queue = rq.get_queue() low_queue = rq.get_queue('low') :param name: Name of the queue to return, defaults to :attr:`~flask_rq2.RQ.default_queue`. :type name: str :return: An RQ queue instance. :rtype: ``rq.queue.Queue`` """ if not name: name = self.default_queue queue = self._queue_instances.get(name) if queue is None: queue_cls = import_attribute(self.queue_class) queue = queue_cls( name=name, default_timeout=self.default_timeout, is_async=self._is_async, connection=self.connection, job_class=self.job_class ) self._queue_instances[name] = queue return queue
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/app.py#L339-L365