sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
async def stoplisten(self, vhost = None): ''' Stop listen on current servers :param vhost: return only servers of vhost if specified. '' to return only default servers. None for all servers. ''' servers = self.getservers(vhost) for s in servers: await s.stoplisten() return len(servers)
Stop listen on current servers :param vhost: return only servers of vhost if specified. '' to return only default servers. None for all servers.
entailment
async def startlisten(self, vhost = None): ''' Start listen on current servers :param vhost: return only servers of vhost if specified. '' to return only default servers. None for all servers. ''' servers = self.getservers(vhost) for s in servers: await s.startlisten() return len(servers)
Start listen on current servers :param vhost: return only servers of vhost if specified. '' to return only default servers. None for all servers.
entailment
async def updateconfig(self): "Reload configurations, remove non-exist servers, add new servers, and leave others unchanged" exists = {} for s in self.connections: exists[(s.protocol.vhost, s.rawurl)] = s self._createServers(self, '', exists = exists) for _,v in exists.items(): await v.shutdown() self.connections.remove(v)
Reload configurations, remove non-exist servers, add new servers, and leave others unchanged
entailment
def getconnections(self, vhost = None): "Return accepted connections, optionally filtered by vhost" if vhost is None: return list(self.managed_connections) else: return [c for c in self.managed_connections if c.protocol.vhost == vhost]
Return accepted connections, optionally filtered by vhost
entailment
async def _dhcp_handler(self): """ Mini DHCP server, respond DHCP packets from OpenFlow """ conn = self._connection ofdef = self._connection.openflowdef l3 = self._parent._gettableindex('l3input', self._connection.protocol.vhost) dhcp_packet_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN, None, None, l3, 1, self._connection, self._connection.connmark) # These tags are important options. They are sent first to make sure the client # correctly receive these options. required_tags = [d.OPTION_MESSAGE_TYPE, d.OPTION_SERVER_IDENTIFIER, d.OPTION_NETMASK, d.OPTION_ROUTER, d.OPTION_DNSSERVER, d.OPTION_BROADCAST, d.OPTION_MTU, d.OPTION_LEASE_TIME, d.OPTION_T1, d.OPTION_T2] server_mac = mac_addr(self._parent.servermac) # IP fragment identifier trans_id = uint16.create(os.urandom(2)) def set_options(payload, option_dict, provide_options, message_type, remove_lease = False): """ Set DHCP options to output payload regarding the incoming request :param payload: output DHCP payload :param option_dict: incoming DHCP options in request :param provide_options: all DHCP options that are ready to sent to the client :param message_type: output DHCP message type :param remove_lease: remove all leases options. DHCPINFORM cannot contain leases options. See https://tools.ietf.org/html/rfc2131#section-3.4 """ message_type_opt = d.dhcp_option_message_type(value = message_type) if d.OPTION_REQUESTED_OPTIONS in option_dict: # First requested options, then required options, then others reqs = set(option_dict[d.OPTION_REQUESTED_OPTIONS].value) send_tags = [t for t in option_dict[d.OPTION_REQUESTED_OPTIONS].value if t == d.OPTION_MESSAGE_TYPE or t in provide_options] \ + [t for t in required_tags if (t in provide_options or t == d.OPTION_MESSAGE_TYPE) and t not in reqs] \ + [t for t in provide_options if t not in reqs and t not in required_tags] else: # Required options, then others send_tags = [t for t in required_tags if t in provide_options or t == d.OPTION_MESSAGE_TYPE] \ + [t for t in set(provide_options.keys()).difference(required_tags)] # If the client has sent an option for max message size, use it; or use the RFC required 576 bytes not_finished = d.build_options( payload, [message_type_opt if t == d.OPTION_MESSAGE_TYPE else provide_options[t] for t in send_tags if not remove_lease or (t != d.OPTION_LEASE_TIME and t != d.OPTION_T1 and t != OPTION_T2)], max(min(option_dict[d.OPTION_MAX_MESSAGE_SIZE].value, 1400), 576) if d.OPTION_MAX_MESSAGE_SIZE in option_dict else 576) async def send_packet(pid, packet): """ Send DHCP packet to specified port """ await self.execute_commands(conn, [ofdef.ofp_packet_out( buffer_id = ofdef.OFP_NO_BUFFER, in_port = ofdef.OFPP_CONTROLLER, actions = [ofdef.ofp_action_output(port = pid, max_len = ofdef.OFPCML_NO_BUFFER)], data = packet._tobytes() )]) while True: ev = await dhcp_packet_matcher msg = ev.message try: in_port = ofdef.ofp_port_no.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.OXM_OF_IN_PORT)) if in_port not in self._dhcpentries: # Not a DHCP-enabled port continue port_mac, port_ip, server_ip, provide_options = self._dhcpentries[in_port] # Fragmented DHCP packets are not supported - this is allowed according # to RFC: server and clients are only needed to support at least 576 bytes # DHCP messages. l7_packet = ethernet_l7.create(msg.data) dhcp_packet = d.dhcp_payload.create(l7_packet.data) # We only process a DHCP request directly sent from the logical port if (dhcp_packet.op != d.BOOTREQUEST # A DHCP server packet or dhcp_packet.hlen != 6 or dhcp_packet.htype != 1 # Hardware address not ethernet (48-bit) or dhcp_packet.magic_cookie != d.BOOTP_MAGIC_COOKIE # Magic cookie not match or dhcp_packet.giaddr != 0): # A relayed DHCP message raise ValueError('Unsupported DHCP packet') # Reassemble DHCP options options = d.reassemble_options(dhcp_packet) option_dict = dict((o.tag, o) for o in options) if d.OPTION_MESSAGE_TYPE not in option_dict: raise ValueError('Message type not found') message_type = option_dict[d.OPTION_MESSAGE_TYPE].value is_nak = False if message_type == d.DHCPDISCOVER: # A DHCPDISCOVER should get a DHCPOFFER response if dhcp_packet.chaddr[:6].ljust(6, b'\x00') != mac_addr.tobytes(port_mac): # MAC address not match, ignore this packet continue dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = 0, yiaddr = port_ip, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPOFFER) elif message_type == d.DHCPREQUEST: # A DHCPREQUEST should get a DHCPACK reply if d.OPTION_SERVER_IDENTIFIER in option_dict and option_dict[d.OPTION_SERVER_IDENTIFIER].value != server_ip: # Ignore packets to wrong address continue if dhcp_packet.chaddr[:6].ljust(6, b'\x00') != mac_addr.tobytes(port_mac) \ or (d.OPTION_REQUESTED_IP in option_dict and option_dict[d.OPTION_REQUESTED_IP].value != port_ip) \ or (dhcp_packet.ciaddr != 0 and dhcp_packet.ciaddr != port_ip): # Requested MAC or IP not matched, send a NACK dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = 0, yiaddr = 0, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) # Do not send more options in NACK d.build_options(dhcp_reply, [d.dhcp_option_message_type(value = d.DHCPNAK), d.dhcp_option_address(tag = d.OPTION_SERVER_IDENTIFIER, value = server_ip)], 576, 0) is_nak = True else: dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = dhcp_packet.ciaddr, yiaddr = port_ip, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPACK) elif message_type == d.DHCPDECLINE: # Address already in use? self._logger.warning('DHCP client reports DHCPDECLINE, there should be some problems.'\ ' Connection = %r(%016x), port = %d.', self._connection, self._connection.openflow_datapathid) elif message_type == d.DHCPRELEASE: # Safe to ignore, we do not use a dynamic IP address pool continue elif message_type == d.DHCPINFORM: # Client setup IP addresses itself, but requesting more information # DHCPINFORM reply cannot have lease options, and yiaddr = 0 dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = dhcp_packet.ciaddr, yiaddr = 0, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPACK, True) trans_id = (trans_id + 1) & 0xffff if (dhcp_packet.flags & d.DHCPFLAG_BROADCAST) or is_nak: # client request broadcast, or DHCPNAK # RFC requires that DHCPNAK uses broadcast dl_dst = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff] ip_dst = 0xffffffff else: dl_dst = l7_packet.dl_src ip_dst = port_ip reply_packet = ip4_packet_l7((ip4_payload, ip4_udp_payload), dl_src = server_mac, dl_dst = dl_dst, identifier = trans_id, ttl = 128, ip_src = server_ip, ip_dst = ip_dst, sport = 67, dport = 68, data = dhcp_reply._tobytes() ) # Send packet to the incoming port self.subroutine(send_packet(in_port, reply_packet), True) except Exception: self._logger.info('Invalid DHCP packet received: %r', msg.data, exc_info = True)
Mini DHCP server, respond DHCP packets from OpenFlow
entailment
def return_self_updater(func): ''' Run func, but still return v. Useful for using knowledge.update with operates like append, extend, etc. e.g. return_self(lambda k,v: v.append('newobj')) ''' @functools.wraps(func) def decorator(k,v): func(k,v) return v return decorator
Run func, but still return v. Useful for using knowledge.update with operates like append, extend, etc. e.g. return_self(lambda k,v: v.append('newobj'))
entailment
def date_time_string(timestamp=None): """Return the current date and time formatted for a message header.""" global _last_date_time_string _last_timestamp, _last_str = _last_date_time_string if timestamp is None: timestamp = time.time() _curr_timestamp = int(timestamp) if _curr_timestamp == _last_timestamp: return _last_str else: year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) s = b"%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( weekdayname[wd], day, monthname[month], year, hh, mm, ss) _last_date_time_string = (_curr_timestamp, s) return s
Return the current date and time formatted for a message header.
entailment
def escape_b(s, quote=True): '''Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated.''' s = s.replace(b"&", b"&amp;") # Must be done first! s = s.replace(b"<", b"&lt;") s = s.replace(b">", b"&gt;") if quote: s = s.replace(b'"', b"&quot;") return s
Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated.
entailment
def watch_context(keys, result, reqid, container, module = 'objectdb'): """ DEPRECATED - use request_context for most use cases """ try: keys = [k for k,r in zip(keys, result) if r is not None] yield result finally: if keys: async def clearup(): try: await send_api(container, module, 'munwatch', {'keys': keys, 'requestid': reqid}) except QuitException: pass container.subroutine(clearup(), False)
DEPRECATED - use request_context for most use cases
entailment
def updater(f): "Decorate a function with named arguments into updater for transact" @functools.wraps(f) def wrapped_updater(keys, values): result = f(*values) return (keys[:len(result)], result) return wrapped_updater
Decorate a function with named arguments into updater for transact
entailment
def list_updater(*args): """ Decorate a function with named lists into updater for transact. :params \*args: parameter list sizes. -1 means all other items. None means a single item instead of a list. only one -1 is allowed. """ neg_index = [i for v,i in izip(args, itertools.count()) if v is not None and v < 0] if len(neg_index) > 1: raise ValueError("Cannot use negative values more than once") if not neg_index: slice_list = [] size = 0 for arg in args: if arg is None: slice_list.append(size) size += 1 else: slice_list.append(slice(size, size + arg)) size += arg else: sep = neg_index[0] slice_list = [] size = 0 for arg in args[:sep]: if arg is None: slice_list.append(size) size += 1 else: slice_list.append(slice(size, size + arg)) size += arg rslice_list = [] rsize = 0 for arg in args[:sep:-1]: if arg is None: rslice_list.append(-1-rsize) rsize += 1 else: rslice_list.append(slice(None if not rsize else -rsize, -(rsize + arg))) rsize += arg slice_list.append(slice(size, rsize)) slice_list.extend(reversed(rslice_list)) def inner_wrapper(f): @functools.wraps(f) def wrapped_updater(keys, values): result = f(*[values[s] for s in slice_list]) return (keys[:len(result)], result) return wrapped_updater return inner_wrapper
Decorate a function with named lists into updater for transact. :params \*args: parameter list sizes. -1 means all other items. None means a single item instead of a list. only one -1 is allowed.
entailment
def create_new(cls, oldvalue, *args): "Raise if the old value already exists" if oldvalue is not None: raise AlreadyExistsException('%r already exists' % (oldvalue,)) return cls.create_instance(*args)
Raise if the old value already exists
entailment
def create_from_key(cls, oldvalue, key): "Raise if the old value already exists" if oldvalue is not None: raise AlreadyExistsException('%r already exists' % (oldvalue,)) return cls.create_from_key(key)
Raise if the old value already exists
entailment
def dump(obj, attributes = True, _refset = None): "Show full value of a data object" if _refset is None: _refset = set() if obj is None: return None elif isinstance(obj, DataObject): if id(obj) in _refset: attributes = False else: _refset.add(id(obj)) cls = type(obj) clsname = getattr(cls, '__module__', '<unknown>') + '.' + getattr(cls, '__name__', '<unknown>') baseresult = {'_type': clsname, '_key': obj.getkey()} if not attributes: return baseresult else: baseresult.update((k,dump(v, attributes, _refset)) for k,v in vars(obj).items() if k[:1] != '_') _refset.remove(id(obj)) return baseresult elif isinstance(obj, ReferenceObject): if obj._ref is not None: return dump(obj._ref, attributes, _refset) else: return {'_ref':obj.getkey()} elif isinstance(obj, WeakReferenceObject): return {'_weakref':obj.getkey()} elif isinstance(obj, DataObjectSet): return dump(list(obj.dataset())) elif isinstance(obj, dict): return dict((k, dump(v, attributes, _refset)) for k,v in obj.items()) elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set): return [dump(v, attributes, _refset) for v in obj] else: return obj
Show full value of a data object
entailment
async def action_handler(self): """ Call vtep controller in sequence, merge mutiple calls if possible When a bind relationship is updated, we always send all logical ports to a logicalswitch, to make sure it recovers from some failed updates (so called idempotency). When multiple calls are pending, we only need to send the last of them. """ bind_event = VtepControllerCall.createMatcher(self._conn) event_queue = [] timeout_flag = [False] async def handle_action(): while event_queue or timeout_flag[0]: events = event_queue[:] del event_queue[:] for e in events: # every event must have physname , phyiname # physname: physical switch name - must be same with OVSDB-VTEP switch # phyiname: physical port name - must be same with the corresponding port physname = e.physname phyiname = e.phyiname if e.type == VtepControllerCall.UNBINDALL: # clear all other event info self._store_event[(physname,phyiname)] = {"all":e} elif e.type == VtepControllerCall.BIND: # bind will combine bind event before vlanid = e.vlanid if (physname,phyiname) in self._store_event: v = self._store_event[(physname,phyiname)] if vlanid in v: logicalports = e.logicalports v.update({vlanid:(e.type,e.logicalnetworkid,e.vni,logicalports)}) self._store_event[(physname,phyiname)] = v else: # new bind info , no combind event v.update({vlanid:(e.type,e.logicalnetworkid,e.vni,e.logicalports)}) self._store_event[(physname,phyiname)] = v else: self._store_event[(physname,phyiname)] = {vlanid:(e.type,e.logicalnetworkid, e.vni,e.logicalports)} elif e.type == VtepControllerCall.UNBIND: vlanid = e.vlanid if (physname,phyiname) in self._store_event: v = self._store_event[(physname,phyiname)] v.update({vlanid:(e.type,e.logicalnetworkid)}) self._store_event[(physname,phyiname)] = v else: self._store_event[(physname,phyiname)] = {vlanid:(e.type,e.logicalnetworkid)} else: self._parent._logger.warning("catch error type event %r , ignore it", exc_info=True) continue call = [] target_name = "vtepcontroller" for k,v in self._store_event.items(): if "all" in v: # send unbindall call.append(self.api(self,target_name,"unbindphysicalport", {"physicalswitch": k[0], "physicalport": k[1]}, timeout=10)) # unbindall , del it whatever del v["all"] try: await self.execute_all(call) except Exception: self._parent._logger.warning("unbindall remove call failed", exc_info=True) for k,v in self._store_event.items(): for vlanid , e in dict(v).items(): if vlanid != "all": if e[0] == VtepControllerCall.BIND: params = {"physicalswitch": k[0], "physicalport": k[1], "vlanid": vlanid, "logicalnetwork": e[1], "vni":e[2], "logicalports": e[3]} try: await self.api(self,target_name,"updatelogicalswitch", params,timeout=10) except Exception: self._parent._logger.warning("update logical switch error,try next %r",params, exc_info=True) else: del self._store_event[k][vlanid] elif e[0] == VtepControllerCall.UNBIND: params = {"logicalnetwork":e[1], "physicalswitch":k[0], "physicalport":k[1], "vlanid":vlanid} try: await self.api(self,target_name,"unbindlogicalswitch", params,timeout=10) except Exception: self._parent._logger.warning("unbind logical switch error,try next %r",params, exc_info=True) else: del self._store_event[k][vlanid] self._store_event = dict((k,v) for k,v in self._store_event.items() if v) if timeout_flag[0]: timeout_flag[0] = False def append_event(event, matcher): event_queue.append(event) while True: timeout, ev, m = await self.wait_with_timeout(10, bind_event) if not timeout: event_queue.append(ev) else: timeout_flag[0] = True await self.with_callback(handle_action(), append_event, bind_event)
Call vtep controller in sequence, merge mutiple calls if possible When a bind relationship is updated, we always send all logical ports to a logicalswitch, to make sure it recovers from some failed updates (so called idempotency). When multiple calls are pending, we only need to send the last of them.
entailment
async def update_ports(self, ports, ovsdb_ports): """ Called from main module to update port information """ new_port_names = dict((p['name'], _to32bitport(p['ofport'])) for p in ovsdb_ports) new_port_ids = dict((p['id'], _to32bitport(p['ofport'])) for p in ovsdb_ports if p['id']) if new_port_names == self._portnames and new_port_ids == self._portids: return self._portnames.clear() self._portnames.update(new_port_names) self._portids.clear() self._portids.update(new_port_ids) logicalportkeys = [LogicalPort.default_key(id) for id in self._portids] self._original_initialkeys = logicalportkeys + [PhysicalPortSet.default_key()] self._initialkeys = tuple(itertools.chain(self._original_initialkeys, self._append_initialkeys)) phy_walker = partial(self._physicalport_walker, _portnames=new_port_names) log_walker = partial(self._logicalport_walker, _portids=new_port_ids) self._walkerdict = dict(itertools.chain( ((PhysicalPortSet.default_key(),phy_walker),), ((lgportkey,log_walker) for lgportkey in logicalportkeys) )) self._portnames = new_port_names self._portids = new_port_ids await self.restart_walk()
Called from main module to update port information
entailment
def list_proxy(root_package = 'vlcp'): ''' Walk through all the sub modules, find subclasses of vlcp.server.module._ProxyModule, list their default values ''' proxy_dict = OrderedDict() pkg = __import__(root_package, fromlist=['_']) for imp, module, _ in walk_packages(pkg.__path__, root_package + '.'): m = __import__(module, fromlist = ['_']) for _, v in vars(m).items(): if v is not None and isinstance(v, type) and issubclass(v, _ProxyModule) \ and v is not _ProxyModule \ and v.__module__ == module \ and hasattr(v, '_default'): name = v.__name__.lower() if name not in proxy_dict: proxy_dict[name] = {'defaultmodule': v._default.__name__.lower(), 'class': repr(v._default.__module__ + '.' + v._default.__name__)} return proxy_dict
Walk through all the sub modules, find subclasses of vlcp.server.module._ProxyModule, list their default values
entailment
def list_modules(root_package = 'vlcp'): ''' Walk through all the sub modules, find subclasses of vlcp.server.module.Module, list their apis through apidefs ''' pkg = __import__(root_package, fromlist=['_']) module_dict = OrderedDict() _server = Server() for imp, module, _ in walk_packages(pkg.__path__, root_package + '.'): m = __import__(module, fromlist = ['_']) for name, v in vars(m).items(): if v is not None and isinstance(v, type) and issubclass(v, Module) \ and v is not Module \ and not isinstance(v, _ProxyModule) \ and hasattr(v, '__dict__') and 'configkey' in v.__dict__ \ and v.__module__ == module: module_name = v.__name__.lower() if module_name not in module_dict: _inst = v(_server) module_info = OrderedDict((('class', v.__module__ + '.' + v.__name__), ('dependencies', [d.__name__.lower() for d in v.depends]), ('classdescription', getdoc(v)), ('apis', []))) if hasattr(_inst, 'apiHandler'): apidefs = _inst.apiHandler.apidefs module_info['apis'] = [(d[0], d[3]) for d in apidefs if len(d) > 3 and \ not d[0].startswith('public/')] module_dict[module_name] = module_info return module_dict
Walk through all the sub modules, find subclasses of vlcp.server.module.Module, list their apis through apidefs
entailment
def append(self, event, force = False): ''' Append an event to queue. The events are classified and appended to sub-queues :param event: input event :param force: if True, the event is appended even if the queue is full :returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise ''' if self.tree is None: if self.parent is None: raise IndexError('The queue is removed') else: return self.parent.parent.append(event, force) q = self.tree.matchfirst(event) return q.append(event, force)
Append an event to queue. The events are classified and appended to sub-queues :param event: input event :param force: if True, the event is appended even if the queue is full :returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise
entailment
def block(self, event, emptyEvents = ()): ''' Return a recently popped event to queue, and block all later events until unblock. Only the sub-queue directly containing the event is blocked, so events in other queues may still be processed. It is illegal to call block and unblock in different queues with a same event. :param event: the returned event. When the queue is unblocked later, this event will be popped again. :param emptyEvents: reactivate the QueueIsEmptyEvents ''' q = self.tree.matchfirst(event) q.block(event) self.blockEvents[event] = q for ee in emptyEvents: ee.queue.waitForEmpty()
Return a recently popped event to queue, and block all later events until unblock. Only the sub-queue directly containing the event is blocked, so events in other queues may still be processed. It is illegal to call block and unblock in different queues with a same event. :param event: the returned event. When the queue is unblocked later, this event will be popped again. :param emptyEvents: reactivate the QueueIsEmptyEvents
entailment
def unblock(self, event): ''' Remove a block ''' if event not in self.blockEvents: return self.blockEvents[event].unblock(event) del self.blockEvents[event]
Remove a block
entailment
def unblockqueue(self, queue): ''' Remove blocked events from the queue and all subqueues. Usually used after queue clear/unblockall to prevent leak. :returns: the cleared events ''' subqueues = set() def allSubqueues(q): subqueues.add(q) subqueues.add(q.defaultQueue) for v in q.queueindex.values(): if len(v) == 3: allSubqueues(v[1]) allSubqueues(queue) events = [k for k,v in self.blockEvents.items() if v in subqueues] for e in events: del self.blockEvents[e] return events
Remove blocked events from the queue and all subqueues. Usually used after queue clear/unblockall to prevent leak. :returns: the cleared events
entailment
def unblockall(self): ''' Remove all blocks from the queue and all sub-queues ''' for q in self.queues.values(): q.unblockall() self.blockEvents.clear()
Remove all blocks from the queue and all sub-queues
entailment
def notifyAppend(self, queue, force): ''' Internal notify for sub-queues :returns: If the append is blocked by parent, an EventMatcher is returned, None else. ''' if not force and not self.canAppend(): self.isWaited = True return self._matcher if self.parent is not None: m = self.parent.notifyAppend(self, force) if m is not None: return m self.totalSize = self.totalSize + 1 return None
Internal notify for sub-queues :returns: If the append is blocked by parent, an EventMatcher is returned, None else.
entailment
def notifyBlock(self, queue, blocked): ''' Internal notify for sub-queues been blocked ''' if blocked: if self.prioritySet[-1] == queue.priority: self.prioritySet.pop() else: pindex = bisect_left(self.prioritySet, queue.priority) if pindex < len(self.prioritySet) and self.prioritySet[pindex] == queue.priority: del self.prioritySet[pindex] else: if queue.canPop(): pindex = bisect_left(self.prioritySet, queue.priority) if pindex >= len(self.prioritySet) or self.prioritySet[pindex] != queue.priority: self.prioritySet.insert(pindex, queue.priority) newblocked = not self.canPop() if newblocked != self.blocked: self.blocked = newblocked if self.parent is not None: self.parent.notifyBlock(self, newblocked)
Internal notify for sub-queues been blocked
entailment
def notifyPop(self, queue, length = 1): ''' Internal notify for sub-queues been poped :returns: List of any events generated by this pop ''' self.totalSize = self.totalSize - length ret1 = [] ret2 = [] if self.isWaited and self.canAppend(): self.isWaited = False ret1.append(QueueCanWriteEvent(self)) if self.isWaitEmpty and not self: self.isWaitEmpty = False ret2.append(QueueIsEmptyEvent(self)) if self.parent is not None: pr = self.parent.notifyPop(self, length) ret1 += pr[0] ret2 += pr[1] newblocked = not self.canPop() if newblocked != self.blocked: self.blocked = newblocked if self.parent is not None: self.parent.notifyBlock(self, newblocked) return (ret1, ret2)
Internal notify for sub-queues been poped :returns: List of any events generated by this pop
entailment
def pop(self): ''' Pop an event from the queue. The event in the queue with higher priority is popped before ones in lower priority. If there are multiple queues with the same priority, events are taken in turn from each queue. May return some queueEvents indicating that some of the queues can be written into. :returns: `(obj, (queueEvents,...), (queueEmptyEvents,...))` where obj is the popped event, queueEvents are QueueCanWriteEvents generated by this pop and queueEmptyEvents are QueueIsEmptyEvents generated by this pop ''' ret = self._pop() if self.parent is not None: pr = self.parent.notifyPop(self) ret[1].extend(pr[0]) ret[2].extend(pr[1]) return ret
Pop an event from the queue. The event in the queue with higher priority is popped before ones in lower priority. If there are multiple queues with the same priority, events are taken in turn from each queue. May return some queueEvents indicating that some of the queues can be written into. :returns: `(obj, (queueEvents,...), (queueEmptyEvents,...))` where obj is the popped event, queueEvents are QueueCanWriteEvents generated by this pop and queueEmptyEvents are QueueIsEmptyEvents generated by this pop
entailment
def _pop(self): ''' Actual pop ''' if not self.canPop(): raise IndexError('pop from an empty or blocked queue') priority = self.prioritySet[-1] ret = self.queues[priority]._pop() self.outputStat = self.outputStat + 1 self.totalSize = self.totalSize - 1 if self.isWaited and self.canAppend(): self.isWaited = False ret[1].append(QueueCanWriteEvent(self)) if self.isWaitEmpty and not self: self.isWaitEmpty = False ret[2].append(QueueIsEmptyEvent(self)) return ret
Actual pop
entailment
def clear(self): ''' Clear all the events in this queue, including any sub-queues. :returns: ((queueEvents,...), (queueEmptyEvents,...)) where queueEvents are QueueCanWriteEvents generated by clearing. ''' l = len(self) ret = self._clear() if self.parent is not None: pr = self.parent.notifyPop(self, l) ret[0].extend(pr[0]) ret[1].extend(pr[1]) return ret
Clear all the events in this queue, including any sub-queues. :returns: ((queueEvents,...), (queueEmptyEvents,...)) where queueEvents are QueueCanWriteEvents generated by clearing.
entailment
def _clear(self): ''' Actual clear ''' ret = ([],[]) for q in self.queues.values(): pr = q._clear() ret[0].extend(pr[0]) ret[1].extend(pr[1]) self.totalSize = 0 del self.prioritySet[:] if self.isWaited and self.canAppend(): self.isWaited = False ret[0].append(QueueCanWriteEvent(self)) if self.isWaitEmpty and not self: self.isWaitEmpty = False ret[1].append(QueueIsEmptyEvent(self)) self.blockEvents.clear() return ret
Actual clear
entailment
def setPriority(self, queue, priority): ''' Set priority of a sub-queue ''' q = self.queueindex[queue] self.queues[q[0]].removeSubQueue(q[1]) newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority)) q[0] = priority newPriority.addSubQueue(q[1])
Set priority of a sub-queue
entailment
def addSubQueue(self, priority, matcher, name = None, maxdefault = None, maxtotal = None, defaultQueueClass = FifoQueue): ''' add a sub queue to current queue, with a priority and a matcher :param priority: priority of this queue. Larger is higher, 0 is lowest. :param matcher: an event matcher to catch events. Every event match the criteria will be stored in this queue. :param name: a unique name to identify the sub-queue. If none, the queue is anonymous. It can be any hashable value. :param maxdefault: max length for default queue. :param maxtotal: max length for sub-queue total, including sub-queues of sub-queue ''' if name is not None and name in self.queueindex: raise IndexError("Duplicated sub-queue name '" + str(name) + "'") subtree = self.tree.subtree(matcher, True) newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority)) newQueue = CBQueue(subtree, newPriority, maxdefault, maxtotal, defaultQueueClass) newPriority.addSubQueue(newQueue) qi = [priority, newQueue, name] if name is not None: self.queueindex[name] = qi self.queueindex[newQueue] = qi return newQueue
add a sub queue to current queue, with a priority and a matcher :param priority: priority of this queue. Larger is higher, 0 is lowest. :param matcher: an event matcher to catch events. Every event match the criteria will be stored in this queue. :param name: a unique name to identify the sub-queue. If none, the queue is anonymous. It can be any hashable value. :param maxdefault: max length for default queue. :param maxtotal: max length for sub-queue total, including sub-queues of sub-queue
entailment
def removeSubQueue(self, queue): ''' remove a sub queue from current queue. This unblock the sub-queue, retrieve all events from the queue and put them back to the parent. Call clear on the sub-queue first if the events are not needed any more. :param queue: the name or queue object to remove :returns: ((queueevents,...), (queueEmptyEvents,...)) Possible queue events from removing sub-queues ''' q = self.queueindex[queue] q[1].unblockall() q[1]._removeFromTree() ret = ([],[]) while q[1].canPop(): r = q[1].pop() self.append(r[0], True) ret[0].extend(r[1]) ret[1].extend(r[2]) self.queues[q[0]].removeSubQueue(q[1]) # Remove from index if q[2] is not None: del self.queueindex[q[2]] del self.queueindex[q[1]] newblocked = not self.canPop() if newblocked != self.blocked: self.blocked = newblocked if self.parent is not None: self.parent.notifyBlock(self, newblocked) return ret
remove a sub queue from current queue. This unblock the sub-queue, retrieve all events from the queue and put them back to the parent. Call clear on the sub-queue first if the events are not needed any more. :param queue: the name or queue object to remove :returns: ((queueevents,...), (queueEmptyEvents,...)) Possible queue events from removing sub-queues
entailment
def ensure_keys(walk, *keys): """ Use walk to try to retrieve all keys """ all_retrieved = True for k in keys: try: walk(k) except WalkKeyNotRetrieved: all_retrieved = False return all_retrieved
Use walk to try to retrieve all keys
entailment
def list_config(root_package = 'vlcp'): ''' Walk through all the sub modules, find subclasses of vlcp.config.Configurable, list their available configurations through _default_ prefix ''' pkg = __import__(root_package, fromlist=['_']) return_dict = OrderedDict() for imp, module, _ in walk_packages(pkg.__path__, root_package + '.'): m = __import__(module, fromlist = ['_']) for name, v in vars(m).items(): if v is not None and isinstance(v, type) and issubclass(v, Configurable) \ and v is not Configurable \ and hasattr(v, '__dict__') and 'configkey' in v.__dict__ \ and v.__module__ == module: configkey = v.__dict__['configkey'] if configkey not in return_dict: configs = OrderedDict() v2 = v parents = [v2] while True: parent = None for c in v2.__bases__: if issubclass(c, Configurable): parent = c if parent is None or parent is Configurable: break if hasattr(parent, '__dict__') and 'configkey' not in parent.__dict__: parents.append(parent) v2 = parent else: break for v2 in reversed(parents): tmp_configs = {} for k, default_value in v2.__dict__.items(): if k.startswith('_default_'): config_attr = k[len('_default_'):] if config_attr in v.__dict__: continue configname = configkey + '.' + config_attr tmp_configs.setdefault(configname, OrderedDict())['default'] = \ pformat(default_value, width=10) # Inspect the source lines to find remarks for these configurations lines, _ = getsourcelines(v2) last_remark = [] for l in lines: l = l.strip() if not l: continue if l.startswith('#'): last_remark.append(l[1:]) else: if l.startswith('_default_'): key, sep, _ = l.partition('=') if sep and key.startswith('_default_'): configname = configkey + '.' + key[len('_default_'):].strip() if configname in tmp_configs and configname not in configs: configs[configname] = tmp_configs.pop(configname) if configname in configs and last_remark: configs[configname]['description'] = cleandoc('\n' + '\n'.join(last_remark)) del last_remark[:] for key in tmp_configs: if key not in configs: configs[key] = tmp_configs[key] if configs: return_dict[configkey] = OrderedDict((('class', v.__module__ + '.' + name), ('classdescription', getdoc(v)), ('configs', configs))) return return_dict
Walk through all the sub modules, find subclasses of vlcp.config.Configurable, list their available configurations through _default_ prefix
entailment
def http(container = None): "wrap a WSGI-style class method to a HTTPRequest event handler" def decorator(func): @functools.wraps(func) def handler(self, event): return _handler(self if container is None else container, event, lambda env: func(self, env)) return handler return decorator
wrap a WSGI-style class method to a HTTPRequest event handler
entailment
def statichttp(container = None): "wrap a WSGI-style function to a HTTPRequest event handler" def decorator(func): @functools.wraps(func) def handler(event): return _handler(container, event, func) if hasattr(func, '__self__'): handler.__self__ = func.__self__ return handler return decorator
wrap a WSGI-style function to a HTTPRequest event handler
entailment
def start_response(self, status = 200, headers = [], clearheaders = True, disabletransferencoding = False): "Start to send response" if self._sendHeaders: raise HttpProtocolException('Cannot modify response, headers already sent') self.status = status self.disabledeflate = disabletransferencoding if clearheaders: self.sent_headers = headers[:] else: self.sent_headers.extend(headers)
Start to send response
entailment
def header(self, key, value, replace = True): "Send a new header" if hasattr(key, 'encode'): key = key.encode('ascii') if hasattr(value, 'encode'): value = value.encode(self.encoding) if replace: self.sent_headers = [(k,v) for k,v in self.sent_headers if k.lower() != key.lower()] self.sent_headers.append((key, value))
Send a new header
entailment
def rawheader(self, kv, replace = True): """ Add a header with "<Header>: Value" string """ if hasattr(kv, 'encode'): kv = kv.encode(self.encoding) k,v = kv.split(b':', 1) self.header(k, v.strip(), replace)
Add a header with "<Header>: Value" string
entailment
def setcookie(self, key, value, max_age=None, expires=None, path='/', domain=None, secure=None, httponly=False): """ Add a new cookie """ newcookie = Morsel() newcookie.key = key newcookie.value = value newcookie.coded_value = value if max_age is not None: newcookie['max-age'] = max_age if expires is not None: newcookie['expires'] = expires if path is not None: newcookie['path'] = path if domain is not None: newcookie['domain'] = domain if secure: newcookie['secure'] = secure if httponly: newcookie['httponly'] = httponly self.sent_cookies = [c for c in self.sent_cookies if c.key != key] self.sent_cookies.append(newcookie)
Add a new cookie
entailment
def bufferoutput(self): """ Buffer the whole output until write EOF or flushed. """ new_stream = Stream(writebufferlimit=None) if self._sendHeaders: # An extra copy self.container.subroutine(new_stream.copy_to(self.outputstream, self.container, buffering=False)) self.outputstream = Stream(writebufferlimit=None)
Buffer the whole output until write EOF or flushed.
entailment
async def rewrite(self, path, method = None, keepresponse = True): "Rewrite this request to another processor. Must be called before header sent" if self._sendHeaders: raise HttpProtocolException('Cannot modify response, headers already sent') if getattr(self.event, 'rewritedepth', 0) >= getattr(self.protocol, 'rewritedepthlimit', 32): raise HttpRewriteLoopException newpath = urljoin(quote_from_bytes(self.path).encode('ascii'), path) if newpath == self.fullpath or newpath == self.originalpath: raise HttpRewriteLoopException extraparams = {} if keepresponse: if hasattr(self, 'status'): extraparams['status'] = self.status extraparams['sent_headers'] = self.sent_headers extraparams['sent_cookies'] = self.sent_cookies r = HttpRequestEvent(self.host, newpath, self.method if method is None else method, self.connection, self.connmark, self.xid, self.protocol, headers = self.headers, headerdict = self.headerdict, setcookies = self.setcookies, stream = self.inputstream, rewritefrom = self.fullpath, originalpath = self.originalpath, rewritedepth = getattr(self.event, 'rewritedepth', 0) + 1, **extraparams ) await self.connection.wait_for_send(r) self._sendHeaders = True self.outputstream = None
Rewrite this request to another processor. Must be called before header sent
entailment
async def redirect(self, path, status = 302): """ Redirect this request with 3xx status """ location = urljoin(urlunsplit((b'https' if self.https else b'http', self.host, quote_from_bytes(self.path).encode('ascii'), '', '' )), path) self.start_response(status, [(b'Location', location)]) await self.write(b'<a href="' + self.escape(location, True) + b'">' + self.escape(location) + b'</a>') await self.flush(True)
Redirect this request with 3xx status
entailment
def nl2br(self, text): """ Replace \'\n\' with \'<br/>\\n\' """ if isinstance(text, bytes): return text.replace(b'\n', b'<br/>\n') else: return text.replace('\n', '<br/>\n')
Replace \'\n\' with \'<br/>\\n\'
entailment
def escape(self, text, quote = True): """ Escape special characters in HTML """ if isinstance(text, bytes): return escape_b(text, quote) else: return escape(text, quote)
Escape special characters in HTML
entailment
async def error(self, status=500, allowredirect = True, close = True, showerror = None, headers = []): """ Show default error response """ if showerror is None: showerror = self.showerrorinfo if self._sendHeaders: if showerror: typ, exc, tb = sys.exc_info() if exc: await self.write('<span style="white-space:pre-wrap">\n', buffering = False) await self.writelines((self.nl2br(self.escape(v)) for v in traceback.format_exception(typ, exc, tb)), buffering = False) await self.write('</span>\n', close, False) elif allowredirect and status in self.protocol.errorrewrite: await self.rewrite(self.protocol.errorrewrite[status], b'GET') elif allowredirect and status in self.protocol.errorredirect: await self.redirect(self.protocol.errorredirect[status]) else: self.start_response(status, headers) typ, exc, tb = sys.exc_info() if showerror and exc: await self.write('<span style="white-space:pre-wrap">\n', buffering = False) await self.writelines((self.nl2br(self.escape(v)) for v in traceback.format_exception(typ, exc, tb)), buffering = False) await self.write('</span>\n', close, False) else: await self.write(b'<h1>' + _createstatus(status) + b'</h1>', close, False)
Show default error response
entailment
async def write(self, data, eof = False, buffering = True): """ Write output to current output stream """ if not self.outputstream: self.outputstream = Stream() self._startResponse() elif (not buffering or eof) and not self._sendHeaders: self._startResponse() if not isinstance(data, bytes): data = data.encode(self.encoding) await self.outputstream.write(data, self.connection, eof, False, buffering)
Write output to current output stream
entailment
async def writelines(self, lines, eof = False, buffering = True): """ Write lines to current output stream """ for l in lines: await self.write(l, False, buffering) if eof: await self.write(b'', eof, buffering)
Write lines to current output stream
entailment
def output(self, stream, disabletransferencoding = None): """ Set output stream and send response immediately """ if self._sendHeaders: raise HttpProtocolException('Cannot modify response, headers already sent') self.outputstream = stream try: content_length = len(stream) except Exception: pass else: self.header(b'Content-Length', str(content_length).encode('ascii')) if disabletransferencoding is not None: self.disabledeflate = disabletransferencoding self._startResponse()
Set output stream and send response immediately
entailment
def outputdata(self, data): """ Send output with fixed length data """ if not isinstance(data, bytes): data = str(data).encode(self.encoding) self.output(MemoryStream(data))
Send output with fixed length data
entailment
async def close(self): """ Close this request, send all data. You can still run other operations in the handler. """ if not self._sendHeaders: self._startResponse() if self.inputstream is not None: self.inputstream.close(self.connection.scheduler) if self.outputstream is not None: await self.flush(True) if hasattr(self, 'session') and self.session: self.session.unlock()
Close this request, send all data. You can still run other operations in the handler.
entailment
async def parseform(self, limit = 67108864, tostr = True, safename = True): ''' Parse form-data with multipart/form-data or application/x-www-form-urlencoded In Python3, the keys of form and files are unicode, but values are bytes If the key ends with '[]', it is considered to be a list: a=1&b=2&b=3 => {'a':1,'b':3} a[]=1&b[]=2&b[]=3 => {'a':[1],'b':[2,3]} :param limit: limit total input size, default to 64MB. None = no limit. Note that all the form data is stored in memory (including upload files), so it is dangerous to accept a very large input. :param tostr: convert values to str in Python3. Only apply to form, files data are always bytes :param safename: if True, extra security checks are performed on filenames to reduce known security risks. ''' if tostr: def _str(s): try: if not isinstance(s, str): return s.decode(self.encoding) else: return s except Exception: raise HttpInputException('Invalid encoding in post data: ' + repr(s)) else: def _str(s): return s try: form = {} files = {} # If there is not a content-type header, maybe there is not a content. if b'content-type' in self.headerdict and self.inputstream is not None: contenttype = self.headerdict[b'content-type'] m = Message() # Email library expects string, which is unicode in Python 3 try: m.add_header('Content-Type', str(contenttype.decode('ascii'))) except UnicodeDecodeError: raise HttpInputException('Content-Type has non-ascii characters') if m.get_content_type() == 'multipart/form-data': fp = BytesFeedParser() fp.feed(b'Content-Type: ' + contenttype + b'\r\n\r\n') total_length = 0 while True: try: await self.inputstream.prepareRead(self.container) data = self.inputstream.readonce() total_length += len(data) if limit is not None and total_length > limit: raise HttpInputException('Data is too large') fp.feed(data) except EOFError: break msg = fp.close() if not msg.is_multipart() or msg.defects: # Reject the data raise HttpInputException('Not valid multipart/form-data format') for part in msg.get_payload(): if part.is_multipart() or part.defects: raise HttpInputException('Not valid multipart/form-data format') disposition = part.get_params(header='content-disposition') if not disposition: raise HttpInputException('Not valid multipart/form-data format') disposition = dict(disposition) if 'form-data' not in disposition or 'name' not in disposition: raise HttpInputException('Not valid multipart/form-data format') if 'filename' in disposition: name = disposition['name'] filename = disposition['filename'] if safename: filename = _safename(filename) if name.endswith('[]'): files.setdefault(name[:-2], []).append({'filename': filename, 'content': part.get_payload(decode=True)}) else: files[name] = {'filename': filename, 'content': part.get_payload(decode=True)} else: name = disposition['name'] if name.endswith('[]'): form.setdefault(name[:-2], []).append(_str(part.get_payload(decode=True))) else: form[name] = _str(part.get_payload(decode=True)) elif m.get_content_type() == 'application/x-www-form-urlencoded' or \ m.get_content_type() == 'application/x-url-encoded': if limit is not None: data = await self.inputstream.read(self.container, limit + 1) if len(data) > limit: raise HttpInputException('Data is too large') else: data = await self.inputstream.read(self.container) result = parse_qs(data, True) def convert(k,v): try: k = str(k.decode('ascii')) except Exception: raise HttpInputException('Form-data key must be ASCII') if not k.endswith('[]'): v = _str(v[-1]) else: k = k[:-2] v = [_str(i) for i in v] return (k,v) form = dict(convert(k,v) for k,v in result.items()) else: # Other formats, treat like no data pass self.form = form self.files = files except Exception as exc: raise HttpInputException('Failed to parse form-data: ' + str(exc))
Parse form-data with multipart/form-data or application/x-www-form-urlencoded In Python3, the keys of form and files are unicode, but values are bytes If the key ends with '[]', it is considered to be a list: a=1&b=2&b=3 => {'a':1,'b':3} a[]=1&b[]=2&b[]=3 => {'a':[1],'b':[2,3]} :param limit: limit total input size, default to 64MB. None = no limit. Note that all the form data is stored in memory (including upload files), so it is dangerous to accept a very large input. :param tostr: convert values to str in Python3. Only apply to form, files data are always bytes :param safename: if True, extra security checks are performed on filenames to reduce known security risks.
entailment
async def sessionstart(self): "Start session. Must start service.utils.session.Session to use this method" if not hasattr(self, 'session') or not self.session: self.session, setcookies = await call_api(self.container, 'session', 'start', {'cookies':self.rawcookie}) for nc in setcookies: self.sent_cookies = [c for c in self.sent_cookies if c.key != nc.key] self.sent_cookies.append(nc)
Start session. Must start service.utils.session.Session to use this method
entailment
async def sessiondestroy(self): """ Destroy current session. The session object is discarded and can no longer be used in other requests. """ if hasattr(self, 'session') and self.session: setcookies = await call_api(self.container, 'session', 'destroy', {'sessionid':self.session.id}) self.session.unlock() del self.session for nc in setcookies: self.sent_cookies = [c for c in self.sent_cookies if c.key != nc.key] self.sent_cookies.append(nc)
Destroy current session. The session object is discarded and can no longer be used in other requests.
entailment
def basicauth(self, realm = b'all', nofail = False): "Try to get the basic authorize info, return (username, password) if succeeded, return 401 otherwise" if b'authorization' in self.headerdict: auth = self.headerdict[b'authorization'] auth_pair = auth.split(b' ', 1) if len(auth_pair) < 2: raise HttpInputException('Authorization header is malformed') if auth_pair[0].lower() == b'basic': try: userpass = base64.b64decode(auth_pair[1]) except Exception: raise HttpInputException('Invalid base-64 string') userpass_pair = userpass.split(b':', 1) if len(userpass_pair) != 2: raise HttpInputException('Authorization header is malformed') return userpass_pair if nofail: return (None, None) else: self.basicauthfail(realm)
Try to get the basic authorize info, return (username, password) if succeeded, return 401 otherwise
entailment
def basicauthfail(self, realm = b'all'): """ Return 401 for authentication failure. This will end the handler. """ if not isinstance(realm, bytes): realm = realm.encode('ascii') self.start_response(401, [(b'WWW-Authenticate', b'Basic realm="' + realm + b'"')]) self.exit(b'<h1>' + _createstatus(401) + b'</h1>')
Return 401 for authentication failure. This will end the handler.
entailment
def getrealpath(self, root, path): ''' Return the real path on disk from the query path, from a root path. The input path from URL might be absolute '/abc', or point to parent '../test', or even with UNC or drive '\\test\abc', 'c:\test.abc', which creates security issues when accessing file contents with the path. With getrealpath, these paths cannot point to files beyond the root path. :param root: root path of disk files, any query is limited in root directory. :param path: query path from URL. ''' if not isinstance(path, str): path = path.decode(self.encoding) # In windows, if the path starts with multiple / or \, the os.path library may consider it an UNC path # remove them; also replace \ to / path = pathrep.subn('/', path)[0] # The relative root is considered ROOT, eliminate any relative path like ../abc, which create security issues # We can use os.path.relpath(..., '/') but python2.6 os.path.relpath is buggy path = os.path.normpath(os.path.join('/', path)) # The normalized path can be an UNC path, or event a path with drive letter # Send bad request for these types if os.path.splitdrive(path)[0]: raise HttpInputException('Bad path') return os.path.join(root, path[1:])
Return the real path on disk from the query path, from a root path. The input path from URL might be absolute '/abc', or point to parent '../test', or even with UNC or drive '\\test\abc', 'c:\test.abc', which creates security issues when accessing file contents with the path. With getrealpath, these paths cannot point to files beyond the root path. :param root: root path of disk files, any query is limited in root directory. :param path: query path from URL.
entailment
def argstostr(self): "Query string arguments are bytes in Python3. This function Convert bytes to string with env.encoding(default to utf-8)." self.args = dict((k, self._tostr(v)) for k,v in self.args.items()) return self.args
Query string arguments are bytes in Python3. This function Convert bytes to string with env.encoding(default to utf-8).
entailment
def cookietostr(self): "Cookie values are bytes in Python3. This function Convert bytes to string with env.encoding(default to utf-8)." self.cookies = dict((k, (v.decode(self.encoding) if not isinstance(v, str) else v)) for k,v in self.cookies.items()) return self.cookies
Cookie values are bytes in Python3. This function Convert bytes to string with env.encoding(default to utf-8).
entailment
async def createcsrf(self, csrfarg = '_csrf'): """ Create a anti-CSRF token in the session """ await self.sessionstart() if not csrfarg in self.session.vars: self.session.vars[csrfarg] = uuid.uuid4().hex
Create a anti-CSRF token in the session
entailment
def outputjson(self, obj): """ Serialize `obj` with JSON and output to the client """ self.header('Content-Type', 'application/json') self.outputdata(json.dumps(obj).encode('ascii'))
Serialize `obj` with JSON and output to the client
entailment
def routeevent(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']): ''' Route specified path to a routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent :param container: routine container. If None, default to self for bound method, or event.connection if not :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods ''' regm = re.compile(path + b'$') if vhost is None: vhost = self.vhost if container is None: container = getattr(routinemethod, '__self__', None) def ismatch(event): # Check vhost if vhost is not None and getattr(event.createby, 'vhost', '') != vhost: return False # First parse the path # RFC said we should accept absolute path psplit = urlsplit(event.path) if psplit.path[:1] != b'/': # For security reason, ignore unrecognized path return False if psplit.netloc and host is not None and host != psplit.netloc: # Maybe a proxy request, ignore it return False if getattr(event.createby, 'unquoteplus', True): realpath = unquote_plus_to_bytes(psplit.path) else: realpath = unquote_to_bytes(psplit.path) m = regm.match(realpath) if m is None: return False event.realpath = realpath event.querystring = psplit.query event.path_match = m return True def func(event, scheduler): try: if event.canignore: # Already processed return event.canignore = True c = event.connection if container is None else container c.subroutine(routinemethod(event), False) except Exception: pass for m in method: self.registerHandler(HttpRequestEvent.createMatcher(host, None, m, _ismatch = ismatch), func)
Route specified path to a routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent :param container: routine container. If None, default to self for bound method, or event.connection if not :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods
entailment
def route(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']): ''' Route specified path to a WSGI-styled routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(env), env is an Environment object see also utils.http.Environment :param container: routine container :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods ''' self.routeevent(path, statichttp(container)(routinemethod), container, host, vhost, method)
Route specified path to a WSGI-styled routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(env), env is an Environment object see also utils.http.Environment :param container: routine container :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods
entailment
def routeargs(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'POST'], tostr = True, matchargs = (), fileargs=(), queryargs=(), cookieargs=(), sessionargs=(), csrfcheck = False, csrfarg = '_csrf', formlimit = 67108864): ''' Convenient way to route a processor with arguments. Automatically parse arguments and pass them to the corresponding handler arguments. If required arguments are missing, HttpInputException is thrown which creates a 400 Bad Request response. If optional arguments are missing, they are replaced with default values just as normal Python call does. If handler accepts keyword arguments, extra arguments are sent with kwargs. If not, they are safely ignored. :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(env, arga, argb, argc...). env is an Environment object. form or querystring arguments 'arga', 'argb', 'argc' are passed to arga, argb, argc. :param container: routine container :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: methods allowed. With POST method, arguments are extracted from form by default; With GET or HEAD method, arguments are extracted from querystring(args). :param tostr: In Python3, convert bytes to str before sending arguments to handler. :param matchargs: Instead of using form or args, extract arguments from path match. matchargs is a sequence of matcher group names. If specified a group name by number, the argument is used as positional arguments; if specified a group name by name(str), the argument is used as a keyword argument. :param fileargs: Instead of using form or args, extract specified arguments from files. :param queryargs: Instead of using form, extract specified arguments from args. Notice that when GET is allowed, the arguments are always extracted from args by default. :param cookieargs: Instead of using form or args, extract specified arguments from cookies. :param sessionargs: Instead of using form or args, extract specified arguments from session. Notice that if sessionargs is not empty, env.sessionstart() is called, so vlcp.service.utils.session.Session module must be loaded. :param csrfcheck: If True, check <csrfarg> in input arguments against <csrfarg> in session. Notice that csrfcheck=True cause env.sessionstart() to be called, so vlcp.service.utils.session.Session module must be loaded. :param csrfarg: argument name to check, default to "_csrf" :param formlimit: limit on parseform, default to 64MB. None to no limit. For example, if using:: async def handler(env, target, arga, argb, argc): ... dispatcher.routeargs(b'/do/(.*)', handler, matchargs=(1,), queryargs=('argc')) And there is a HTTP POST:: POST /do/mytarget?argc=1 HTTP/1.1 Host: ... ... arga=test&argb=test2 then handler accepts arguments: target="mytarget", arga="test", argb="test2", argc="1" ''' code = routinemethod.__code__ if code.co_flags & 0x08: haskwargs = True else: haskwargs = False # Remove argument env arguments = code.co_varnames[1:code.co_argcount] if hasattr(routinemethod, '__self__') and routinemethod.__self__: # First argument is self, remove an extra argument arguments=arguments[1:] # Optional arguments if hasattr(routinemethod, '__defaults__') and routinemethod.__defaults__: requires = arguments[:-len(routinemethod.__defaults__)] else: requires = arguments[:] async def handler(env): if tostr: def _str(s): if not isinstance(s, str): return s.decode(env.encoding) else: return s else: def _str(s): return s if tostr: env.argstostr() env.cookietostr() if env.method == b'POST': await env.parseform(formlimit, tostr) argfrom = env.form else: # Ignore input env.form = {} env.files = {} argfrom = env.args args = [] kwargs = dict(argfrom) def discard(k): if k in kwargs: del kwargs[k] def extract(k, source): if k in source: kwargs[k] = source[k] else: discard(k) try: ps = 0 for ma in matchargs: v = _str(env.path_match.group(ma)) if v is not None: if isinstance(ma, str): kwargs[ma] = v else: args.append(v) ps += 1 else: if isinstance(ma, str): discard(ma) else: if ps < len(arguments): discard(arguments[ps]) ps += 1 for fa in fileargs: extract(fa, env.files) if env.method == b'POST': for qa in queryargs: extract(qa, env.args) for ca in cookieargs: extract(ca, env.cookies) # CSRF check is done before session arguments to prevent check against session self if csrfcheck: if csrfarg not in kwargs: raise HttpInputException('CSRF check failed') await env.sessionstart() if env.session.vars[csrfarg] != kwargs[csrfarg]: raise HttpInputException('CSRF check failed') if sessionargs: await env.sessionstart() for sa in sessionargs: extract(sa, env.session.vars) # Check required arguments for k in requires[ps:]: if k not in kwargs: raise HttpInputException('Argument "' + k + '" is required') # Remove positional arguments for k in requires[:ps]: if k in kwargs: del kwargs[k] if not haskwargs: # Remove extra parameters validargs = arguments[ps:] kwargs = dict((k,v) for (k,v) in kwargs.items() if k in validargs) r = routinemethod(env, *args, **kwargs) except KeyError as exc: raise HttpInputException('Missing argument: ' + str(exc)) except Exception as exc: raise HttpInputException(str(exc)) if r: return await r self.route(path, handler, container, host, vhost, method)
Convenient way to route a processor with arguments. Automatically parse arguments and pass them to the corresponding handler arguments. If required arguments are missing, HttpInputException is thrown which creates a 400 Bad Request response. If optional arguments are missing, they are replaced with default values just as normal Python call does. If handler accepts keyword arguments, extra arguments are sent with kwargs. If not, they are safely ignored. :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(env, arga, argb, argc...). env is an Environment object. form or querystring arguments 'arga', 'argb', 'argc' are passed to arga, argb, argc. :param container: routine container :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: methods allowed. With POST method, arguments are extracted from form by default; With GET or HEAD method, arguments are extracted from querystring(args). :param tostr: In Python3, convert bytes to str before sending arguments to handler. :param matchargs: Instead of using form or args, extract arguments from path match. matchargs is a sequence of matcher group names. If specified a group name by number, the argument is used as positional arguments; if specified a group name by name(str), the argument is used as a keyword argument. :param fileargs: Instead of using form or args, extract specified arguments from files. :param queryargs: Instead of using form, extract specified arguments from args. Notice that when GET is allowed, the arguments are always extracted from args by default. :param cookieargs: Instead of using form or args, extract specified arguments from cookies. :param sessionargs: Instead of using form or args, extract specified arguments from session. Notice that if sessionargs is not empty, env.sessionstart() is called, so vlcp.service.utils.session.Session module must be loaded. :param csrfcheck: If True, check <csrfarg> in input arguments against <csrfarg> in session. Notice that csrfcheck=True cause env.sessionstart() to be called, so vlcp.service.utils.session.Session module must be loaded. :param csrfarg: argument name to check, default to "_csrf" :param formlimit: limit on parseform, default to 64MB. None to no limit. For example, if using:: async def handler(env, target, arga, argb, argc): ... dispatcher.routeargs(b'/do/(.*)', handler, matchargs=(1,), queryargs=('argc')) And there is a HTTP POST:: POST /do/mytarget?argc=1 HTTP/1.1 Host: ... ... arga=test&argb=test2 then handler accepts arguments: target="mytarget", arga="test", argb="test2", argc="1"
entailment
def expand(cls, match, expand): """ If use expand directly, the url-decoded context will be decoded again, which create a security issue. Hack expand to quote the text before expanding """ return re._expand(match.re, cls._EncodedMatch(match), expand)
If use expand directly, the url-decoded context will be decoded again, which create a security issue. Hack expand to quote the text before expanding
entailment
def rewrite(self, path, expand, newmethod = None, host = None, vhost = None, method = [b'GET', b'HEAD'], keepquery = True): "Automatically rewrite a request to another location" async def func(env): newpath = self.expand(env.path_match, expand) if keepquery and getattr(env, 'querystring', None): if b'?' in newpath: newpath += b'&' + env.querystring else: newpath += b'?' + env.querystring await env.rewrite(newpath, newmethod) self.route(path, func)
Automatically rewrite a request to another location
entailment
def routeargs(path, host = None, vhost = None, method = [b'POST'], **kwargs): "For extra arguments, see Dispatcher.routeargs. They must be specified by keyword arguments" def decorator(func): func.routemode = 'routeargs' func.route_path = path func.route_host = host func.route_vhost = vhost func.route_method = method func.route_kwargs = kwargs return func return decorator
For extra arguments, see Dispatcher.routeargs. They must be specified by keyword arguments
entailment
def _close_generator(g): """ PyPy 3 generator has a bug that calling `close` caused memory leak. Before it is fixed, use `throw` instead """ if isinstance(g, generatorwrapper): g.close() elif _get_frame(g) is not None: try: g.throw(GeneratorExit_) except (StopIteration, GeneratorExit_): return else: raise RuntimeError("coroutine ignored GeneratorExit")
PyPy 3 generator has a bug that calling `close` caused memory leak. Before it is fixed, use `throw` instead
entailment
def Routine(coroutine, scheduler, asyncStart = True, container = None, manualStart = False, daemon = False): """ This wraps a normal coroutine to become a VLCP routine. Usually you do not need to call this yourself; `container.start` and `container.subroutine` calls this automatically. """ def run(): iterator = _await(coroutine) iterself = yield if manualStart: yield try: if asyncStart: scheduler.yield_(iterself) yield if container is not None: container.currentroutine = iterself if daemon: scheduler.setDaemon(iterself, True) try: matchers = next(iterator) except StopIteration: return while matchers is None: scheduler.yield_(iterself) yield try: matchers = next(iterator) except StopIteration: return try: scheduler.register(matchers, iterself) except Exception: try: iterator.throw(IllegalMatchersException(matchers)) except StopIteration: pass raise while True: try: etup = yield except GeneratorExit_: raise except: #scheduler.unregister(matchers, iterself) lmatchers = matchers t,v,tr = sys.exc_info() # @UnusedVariable if container is not None: container.currentroutine = iterself try: matchers = iterator.throw(t,v) except StopIteration: return else: #scheduler.unregister(matchers, iterself) lmatchers = matchers if container is not None: container.currentroutine = iterself try: matchers = iterator.send(etup) except StopIteration: return while matchers is None: scheduler.yield_(iterself) yield try: matchers = next(iterator) except StopIteration: return try: if hasattr(matchers, 'two_way_difference'): reg, unreg = matchers.two_way_difference(lmatchers) else: reg = set(matchers).difference(lmatchers) unreg = set(lmatchers).difference(matchers) scheduler.register(reg, iterself) scheduler.unregister(unreg, iterself) except Exception: try: iterator.throw(IllegalMatchersException(matchers)) except StopIteration: pass raise finally: # iterator.close() can be called in other routines, we should restore the currentroutine variable if container is not None: lastcurrentroutine = getattr(container, 'currentroutine', None) container.currentroutine = iterself else: lastcurrentroutine = None _close_generator(coroutine) if container is not None: container.currentroutine = lastcurrentroutine scheduler.unregisterall(iterself) r = generatorwrapper(run()) next(r) r.send(r) return r
This wraps a normal coroutine to become a VLCP routine. Usually you do not need to call this yourself; `container.start` and `container.subroutine` calls this automatically.
entailment
def registerHandler(self, matcher, handler): ''' Register self to scheduler ''' self.handlers[matcher] = handler self.scheduler.register((matcher,), self) self._setDaemon()
Register self to scheduler
entailment
def registerAllHandlers(self, handlerDict): ''' Register self to scheduler ''' self.handlers.update(handlerDict) if hasattr(handlerDict, 'keys'): self.scheduler.register(handlerDict.keys(), self) else: self.scheduler.register(tuple(h[0] for h in handlerDict), self) self._setDaemon()
Register self to scheduler
entailment
def start(self, asyncStart = False): """ Start `container.main` as the main routine. :param asyncStart: if True, start the routine in background. By default, the routine starts in foreground, which means it is executed to the first `yield` statement before returning. If the started routine raises an exception, the exception is re-raised to the caller of `start` """ r = Routine(self.main(), self.scheduler, asyncStart, self, True, self.daemon) self.mainroutine = r try: next(r) except StopIteration: pass return r
Start `container.main` as the main routine. :param asyncStart: if True, start the routine in background. By default, the routine starts in foreground, which means it is executed to the first `yield` statement before returning. If the started routine raises an exception, the exception is re-raised to the caller of `start`
entailment
def subroutine(self, iterator, asyncStart = True, name = None, daemon = False): """ Start extra routines in this container. :param iterator: A coroutine object i.e the return value of an async method `my_routine()` :param asyncStart: if False, start the routine in foreground. By default, the routine starts in background, which means it is not executed until the current caller reaches the next `yield` statement or quit. :param name: if not None, `container.<name>` is set to the routine object. This is useful when you want to terminate the routine from outside. :param daemon: if True, this routine is set to be a daemon routine. A daemon routine does not stop the scheduler from quitting; if all non-daemon routines are quit, the scheduler stops. """ r = Routine(iterator, self.scheduler, asyncStart, self, True, daemon) if name is not None: setattr(self, name, r) # Call subroutine may change the currentroutine, we should restore it currentroutine = getattr(self, 'currentroutine', None) try: next(r) except StopIteration: pass self.currentroutine = currentroutine return r
Start extra routines in this container. :param iterator: A coroutine object i.e the return value of an async method `my_routine()` :param asyncStart: if False, start the routine in foreground. By default, the routine starts in background, which means it is not executed until the current caller reaches the next `yield` statement or quit. :param name: if not None, `container.<name>` is set to the routine object. This is useful when you want to terminate the routine from outside. :param daemon: if True, this routine is set to be a daemon routine. A daemon routine does not stop the scheduler from quitting; if all non-daemon routines are quit, the scheduler stops.
entailment
async def wait_for_send(self, event, *, until=None): ''' Send an event to the main event queue. Can call without delegate. :param until: if the callback returns True, stop sending and return :return: the last True value the callback returns, or None ''' while True: if until: r = until() if r: return r waiter = self.scheduler.send(event) if waiter is None: break await waiter
Send an event to the main event queue. Can call without delegate. :param until: if the callback returns True, stop sending and return :return: the last True value the callback returns, or None
entailment
async def wait_with_timeout(self, timeout, *matchers): """ Wait for multiple event matchers, or until timeout. :param timeout: a timeout value :param \*matchers: event matchers :return: (is_timeout, event, matcher). When is_timeout = True, event = matcher = None. """ if timeout is None: ev, m = await M_(*matchers) return False, ev, m else: th = self.scheduler.setTimer(timeout) try: tm = TimerEvent.createMatcher(th) ev, m = await M_(*(tuple(matchers) + (tm,))) if m is tm: return True, None, None else: return False, ev, m finally: self.scheduler.cancelTimer(th)
Wait for multiple event matchers, or until timeout. :param timeout: a timeout value :param \*matchers: event matchers :return: (is_timeout, event, matcher). When is_timeout = True, event = matcher = None.
entailment
async def execute_with_timeout(self, timeout, subprocess): """ Execute a subprocess with timeout. If time limit exceeds, the subprocess is terminated, and `is_timeout` is set to True; otherwise the `is_timeout` is set to False. You can uses `execute_with_timeout` with other help functions to create time limit for them:: timeout, result = await container.execute_with_timeout(10, container.execute_all([routine1(), routine2()])) :return: (is_timeout, result) When is_timeout = True, result = None """ if timeout is None: return (False, await subprocess) else: th = self.scheduler.setTimer(timeout) try: tm = TimerEvent.createMatcher(th) try: r = await self.with_exception(subprocess, tm) except RoutineException as exc: if exc.matcher is tm: return True, None else: raise else: return False, r finally: self.scheduler.cancelTimer(th)
Execute a subprocess with timeout. If time limit exceeds, the subprocess is terminated, and `is_timeout` is set to True; otherwise the `is_timeout` is set to False. You can uses `execute_with_timeout` with other help functions to create time limit for them:: timeout, result = await container.execute_with_timeout(10, container.execute_all([routine1(), routine2()])) :return: (is_timeout, result) When is_timeout = True, result = None
entailment
async def with_exception(self, subprocess, *matchers): """ Monitoring event matchers while executing a subprocess. If events are matched before the subprocess ends, the subprocess is terminated and a RoutineException is raised. """ def _callback(event, matcher): raise RoutineException(matcher, event) return await self.with_callback(subprocess, _callback, *matchers)
Monitoring event matchers while executing a subprocess. If events are matched before the subprocess ends, the subprocess is terminated and a RoutineException is raised.
entailment
def with_callback(self, subprocess, callback, *matchers, intercept_callback = None): """ Monitoring event matchers while executing a subprocess. `callback(event, matcher)` is called each time an event is matched by any event matchers. If the callback raises an exception, the subprocess is terminated. :param intercept_callback: a callback called before a event is delegated to the inner subprocess """ it_ = _await(subprocess) if not matchers and not intercept_callback: return (yield from it_) try: try: m = next(it_) except StopIteration as e: return e.value while True: if m is None: try: yield except GeneratorExit_: raise except: t,v,tr = sys.exc_info() # @UnusedVariable try: m = it_.throw(t,v) except StopIteration as e: return e.value else: try: m = next(it_) except StopIteration as e: return e.value else: while True: try: ev, matcher = yield m + tuple(matchers) except GeneratorExit_: # subprocess is closed in `finally` clause raise except: # delegate this exception inside t,v,tr = sys.exc_info() # @UnusedVariable try: m = it_.throw(t,v) except StopIteration as e: return e.value else: if matcher in matchers: callback(ev, matcher) else: if intercept_callback: intercept_callback(ev, matcher) break try: m = it_.send((ev, matcher)) except StopIteration as e: return e.value finally: _close_generator(subprocess)
Monitoring event matchers while executing a subprocess. `callback(event, matcher)` is called each time an event is matched by any event matchers. If the callback raises an exception, the subprocess is terminated. :param intercept_callback: a callback called before a event is delegated to the inner subprocess
entailment
async def wait_for_all(self, *matchers, eventlist = None, eventdict = None, callback = None): """ Wait until each matcher matches an event. When this coroutine method returns, `eventlist` is set to the list of events in the arriving order (may not be the same as the matchers); `eventdict` is set to a dictionary `{matcher1: event1, matcher2: event2, ...}` :param eventlist: use external event list, so when an exception occurs (e.g. routine close), you can retrieve the result from the passed-in list :param eventdict: use external event dict :param callback: if not None, the callback should be a callable callback(event, matcher) which is called each time an event is received :return: (eventlist, eventdict) """ if eventdict is None: eventdict = {} if eventlist is None: eventlist = [] ms = len(matchers) last_matchers = Diff_(matchers) while ms: ev, m = await last_matchers ms -= 1 if callback: callback(ev, m) eventlist.append(ev) eventdict[m] = ev last_matchers = Diff_(last_matchers, remove=(m,)) return eventlist, eventdict
Wait until each matcher matches an event. When this coroutine method returns, `eventlist` is set to the list of events in the arriving order (may not be the same as the matchers); `eventdict` is set to a dictionary `{matcher1: event1, matcher2: event2, ...}` :param eventlist: use external event list, so when an exception occurs (e.g. routine close), you can retrieve the result from the passed-in list :param eventdict: use external event dict :param callback: if not None, the callback should be a callable callback(event, matcher) which is called each time an event is received :return: (eventlist, eventdict)
entailment
async def wait_for_all_to_process(self, *matchers, eventlist = None, eventdict = None, callback = None): """ Similar to `waitForAll`, but set `canignore=True` for these events. This ensures blocking events are processed correctly. """ def _callback(event, matcher): event.canignore = True if callback: callback(event, matcher) return await self.wait_for_all(*matchers, eventlist=eventlist, eventdict=eventdict, callback=_callback)
Similar to `waitForAll`, but set `canignore=True` for these events. This ensures blocking events are processed correctly.
entailment
async def wait_for_all_empty(self, *queues): """ Wait for multiple queues to be empty at the same time. Require delegate when calling from coroutines running in other containers """ matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None] while matchers: await self.wait_for_all(*matchers) matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]
Wait for multiple queues to be empty at the same time. Require delegate when calling from coroutines running in other containers
entailment
def syscall_noreturn(self, func): ''' Call a syscall method. A syscall method is executed outside of any routines, directly in the scheduler loop, which gives it chances to directly operate the event loop. See :py:method::`vlcp.event.core.Scheduler.syscall`. ''' matcher = self.scheduler.syscall(func) while not matcher: yield matcher = self.scheduler.syscall(func) ev, _ = yield (matcher,) return ev
Call a syscall method. A syscall method is executed outside of any routines, directly in the scheduler loop, which gives it chances to directly operate the event loop. See :py:method::`vlcp.event.core.Scheduler.syscall`.
entailment
async def syscall(self, func, ignoreException = False): """ Call a syscall method and retrieve its return value """ ev = await self.syscall_noreturn(func) if hasattr(ev, 'exception'): if ignoreException: return else: raise ev.exception[1] else: return ev.retvalue
Call a syscall method and retrieve its return value
entailment
async def delegate(self, subprocess, forceclose = False): ''' Run a subprocess without container support Many subprocess assume itself running in a specified container, it uses container reference like self.events. Calling the subprocess in other containers will fail. With delegate, you can call a subprocess in any container (or without a container):: r = await c.delegate(c.someprocess()) :return: original return value ''' finish, r = self.begin_delegate(subprocess) return await self.end_delegate(finish, r, forceclose)
Run a subprocess without container support Many subprocess assume itself running in a specified container, it uses container reference like self.events. Calling the subprocess in other containers will fail. With delegate, you can call a subprocess in any container (or without a container):: r = await c.delegate(c.someprocess()) :return: original return value
entailment
async def end_delegate(self, delegate_matcher, routine = None, forceclose = False): """ Retrieve a begin_delegate result. Must be called immediately after begin_delegate before any other `await`, or the result might be lost. Do not use this method without thinking. Always use `RoutineFuture` when possible. """ try: ev = await delegate_matcher if hasattr(ev, 'exception'): raise ev.exception else: return ev.result finally: if forceclose and routine: routine.close()
Retrieve a begin_delegate result. Must be called immediately after begin_delegate before any other `await`, or the result might be lost. Do not use this method without thinking. Always use `RoutineFuture` when possible.
entailment
def begin_delegate(self, subprocess): ''' Start the delegate routine, but do not wait for result, instead returns a (matcher, routine) tuple. Useful for advanced delegates (e.g. delegate multiple subprocesses in the same time). This is NOT a coroutine method. WARNING: this is not a safe way for asynchronous executing and get the result. Use `RoutineFuture` instead. :param subprocess: a coroutine :returns: (matcher, routine) where matcher is a event matcher to get the delegate result, routine is the created routine ''' async def delegateroutine(): try: r = await subprocess except: _, val, _ = sys.exc_info() e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, self.currentroutine, exception=val) self.scheduler.emergesend(e) raise else: e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, self.currentroutine, result = r) await self.wait_for_send(e) r = self.subroutine(generatorwrapper(delegateroutine(), 'subprocess', 'delegate'), True) finish = RoutineControlEvent.createMatcher(RoutineControlEvent.DELEGATE_FINISHED, r) return finish, r
Start the delegate routine, but do not wait for result, instead returns a (matcher, routine) tuple. Useful for advanced delegates (e.g. delegate multiple subprocesses in the same time). This is NOT a coroutine method. WARNING: this is not a safe way for asynchronous executing and get the result. Use `RoutineFuture` instead. :param subprocess: a coroutine :returns: (matcher, routine) where matcher is a event matcher to get the delegate result, routine is the created routine
entailment
def begin_delegate_other(self, subprocess, container, retnames = ('',)): ''' DEPRECATED Start the delegate routine, but do not wait for result, instead returns a (matcher routine) tuple. Useful for advanced delegates (e.g. delegate multiple subprocesses in the same time). This is NOT a coroutine method. :param subprocess: a coroutine :param container: container in which to start the routine :param retnames: get return values from keys. '' for the return value (for compatibility with earlier versions) :returns: (matcher, routine) where matcher is a event matcher to get the delegate result, routine is the created routine ''' async def delegateroutine(): try: r = await subprocess except: _, val, _ = sys.exc_info() e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, container.currentroutine, exception = val) container.scheduler.emergesend(e) raise else: e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, container.currentroutine, result = tuple(r if n == '' else getattr(container, n, None) for n in retnames)) await container.waitForSend(e) r = container.subroutine(generatorwrapper(delegateroutine(), 'subprocess', 'delegate'), True) return (RoutineControlEvent.createMatcher(RoutineControlEvent.DELEGATE_FINISHED, r), r)
DEPRECATED Start the delegate routine, but do not wait for result, instead returns a (matcher routine) tuple. Useful for advanced delegates (e.g. delegate multiple subprocesses in the same time). This is NOT a coroutine method. :param subprocess: a coroutine :param container: container in which to start the routine :param retnames: get return values from keys. '' for the return value (for compatibility with earlier versions) :returns: (matcher, routine) where matcher is a event matcher to get the delegate result, routine is the created routine
entailment
async def delegate_other(self, subprocess, container, retnames = ('',), forceclose = False): ''' DEPRECATED Another format of delegate allows delegate a subprocess in another container, and get some returning values the subprocess is actually running in 'container'. :: ret = await self.delegate_other(c.method(), c) :return: a tuple for retnames values ''' finish, r = self.beginDelegateOther(subprocess, container, retnames) return await self.end_delegate(finish, r, forceclose)
DEPRECATED Another format of delegate allows delegate a subprocess in another container, and get some returning values the subprocess is actually running in 'container'. :: ret = await self.delegate_other(c.method(), c) :return: a tuple for retnames values
entailment
async def execute_all_with_names(self, subprocesses, container = None, retnames = ('',), forceclose = True): ''' DEPRECATED Execute all subprocesses and get the return values. :param subprocesses: sequence of subroutines (coroutines) :param container: if specified, run subprocesses in another container. :param retnames: DEPRECATED get return value from container.(name) for each name in retnames. '' for return value (to be compatible with earlier versions) :param forceclose: force close the routines on exit, so all the subprocesses are terminated on timeout if used with executeWithTimeout :returns: a list of tuples, one for each subprocess, with value of retnames inside: `[('retvalue1',),('retvalue2',),...]` ''' if not subprocesses: return [] subprocesses = list(subprocesses) if len(subprocesses) == 1 and (container is None or container is self) and forceclose: # Directly run the process to improve performance return [await subprocesses[0]] if container is None: container = self delegates = [self.begin_delegate_other(p, container, retnames) for p in subprocesses] matchers = [d[0] for d in delegates] try: _, eventdict = await self.wait_for_all(*matchers) events = [eventdict[m] for m in matchers] exceptions = [e.exception for e in events if hasattr(e, 'exception')] if exceptions: if len(exceptions) == 1: raise exceptions[0] else: raise MultipleException(exceptions) return [e.result for e in events] finally: if forceclose: for d in delegates: try: container.terminate(d[1]) except Exception: pass
DEPRECATED Execute all subprocesses and get the return values. :param subprocesses: sequence of subroutines (coroutines) :param container: if specified, run subprocesses in another container. :param retnames: DEPRECATED get return value from container.(name) for each name in retnames. '' for return value (to be compatible with earlier versions) :param forceclose: force close the routines on exit, so all the subprocesses are terminated on timeout if used with executeWithTimeout :returns: a list of tuples, one for each subprocess, with value of retnames inside: `[('retvalue1',),('retvalue2',),...]`
entailment
async def execute_all(self, subprocesses, forceclose=True): ''' Execute all subprocesses and get the return values. :param subprocesses: sequence of subroutines (coroutines) :param forceclose: force close the routines on exit, so all the subprocesses are terminated on timeout if used with executeWithTimeout :returns: a list of return values for each subprocess ''' if not subprocesses: return [] subprocesses = list(subprocesses) if len(subprocesses) == 1 and forceclose: return [await subprocesses[0]] delegates = [self.begin_delegate(p) for p in subprocesses] matchers = [d[0] for d in delegates] try: _, eventdict = await self.wait_for_all(*matchers) events = [eventdict[m] for m in matchers] exceptions = [e.exception for e in events if hasattr(e, 'exception')] if exceptions: if len(exceptions) == 1: raise exceptions[0] else: raise MultipleException(exceptions) return [e.result for e in events] finally: if forceclose: for d in delegates: try: d[1].close() except Exception: pass
Execute all subprocesses and get the return values. :param subprocesses: sequence of subroutines (coroutines) :param forceclose: force close the routines on exit, so all the subprocesses are terminated on timeout if used with executeWithTimeout :returns: a list of return values for each subprocess
entailment
def get_container(cls, scheduler): """ Create temporary instance for helper functions """ if scheduler in cls._container_cache: return cls._container_cache[scheduler] else: c = cls(scheduler) cls._container_cache[scheduler] = c return c
Create temporary instance for helper functions
entailment
def depend(*args): """ Decorator to declare dependencies to other modules. Recommended usage is:: import other_module @depend(other_module.ModuleClass) class MyModule(Module): ... :param \*args: depended module classes. """ def decfunc(cls): if not 'depends' in cls.__dict__: cls.depends = [] cls.depends.extend(list(args)) for a in args: if not hasattr(a, 'referencedBy'): a.referencedBy = [] a.referencedBy.append(cls) return cls return decfunc
Decorator to declare dependencies to other modules. Recommended usage is:: import other_module @depend(other_module.ModuleClass) class MyModule(Module): ... :param \*args: depended module classes.
entailment
def api(func, container = None, criteria = None): ''' Return an API def for a generic function :param func: a function or bounded method :param container: if None, this is used as a synchronous method, the return value of the method is used for the return value. If not None, this is used as an asynchronous method, the return value should be a generator, and it is executed in `container` as a routine. The return value should be set to `container.retvalue`. :param criteria: An extra function used to test whether this function should process the API. This allows multiple API definitions to use the same API method name. ''' return (func.__name__.lower(), functools.update_wrapper(lambda n,p: func(**p), func), container, create_discover_info(func), criteria)
Return an API def for a generic function :param func: a function or bounded method :param container: if None, this is used as a synchronous method, the return value of the method is used for the return value. If not None, this is used as an asynchronous method, the return value should be a generator, and it is executed in `container` as a routine. The return value should be set to `container.retvalue`. :param criteria: An extra function used to test whether this function should process the API. This allows multiple API definitions to use the same API method name.
entailment
def proxy(name, default = None): """ Create a proxy module. A proxy module has a default implementation, but can be redirected to other implementations with configurations. Other modules can depend on proxy modules. """ proxymodule = _ProxyMetaClass(name, (_ProxyModule,), {'_default': default}) proxymodule.__module__ = sys._getframe(1).f_globals.get('__name__') return proxymodule
Create a proxy module. A proxy module has a default implementation, but can be redirected to other implementations with configurations. Other modules can depend on proxy modules.
entailment
async def send_api(container, targetname, name, params = {}): """ Send API and discard the result """ handle = object() apiEvent = ModuleAPICall(handle, targetname, name, params = params) await container.wait_for_send(apiEvent)
Send API and discard the result
entailment
async def call_api(container, targetname, name, params = {}, timeout = 120.0): """ Call module API `targetname/name` with parameters. :param targetname: module targetname. Usually the lower-cased name of the module class, or 'public' for public APIs. :param name: method name :param params: module API parameters, should be a dictionary of `{parameter: value}` :param timeout: raise an exception if the API call is not returned for a long time :return: API return value """ handle = object() apiEvent = ModuleAPICall(handle, targetname, name, params = params) await container.wait_for_send(apiEvent) replyMatcher = ModuleAPIReply.createMatcher(handle) timeout_, ev, m = await container.wait_with_timeout(timeout, replyMatcher) if timeout_: # Ignore the Event apiEvent.canignore = True container.scheduler.ignore(ModuleAPICall.createMatcher(handle)) raise ModuleAPICallTimeoutException('API call timeout') else: return get_api_result(ev)
Call module API `targetname/name` with parameters. :param targetname: module targetname. Usually the lower-cased name of the module class, or 'public' for public APIs. :param name: method name :param params: module API parameters, should be a dictionary of `{parameter: value}` :param timeout: raise an exception if the API call is not returned for a long time :return: API return value
entailment
async def batch_call_api(container, apis, timeout = 120.0): """ DEPRECATED - use execute_all instead """ apiHandles = [(object(), api) for api in apis] apiEvents = [ModuleAPICall(handle, targetname, name, params = params) for handle, (targetname, name, params) in apiHandles] apiMatchers = tuple(ModuleAPIReply.createMatcher(handle) for handle, _ in apiHandles) async def process(): for e in apiEvents: await container.wait_for_send(e) container.subroutine(process(), False) eventdict = {} async def process2(): ms = len(apiMatchers) matchers = Diff_(apiMatchers) while ms: ev, m = await matchers matchers = Diff_(matchers, remove=(m,)) eventdict[ev.handle] = ev await container.execute_with_timeout(timeout, process2()) for e in apiEvents: if e.handle not in eventdict: e.canignore = True container.scheduler.ignore(ModuleAPICall.createMatcher(e.handle)) return [eventdict.get(handle, None) for handle, _ in apiHandles]
DEPRECATED - use execute_all instead
entailment
def registerAPIs(self, apidefs): ''' API definition is in format: `(name, handler, container, discoverinfo)` if the handler is a generator, container should be specified handler should accept two arguments:: def handler(name, params): ... `name` is the method name, `params` is a dictionary contains the parameters. the handler can either return the result directly, or be a generator (async-api), and write the result to container.retvalue on exit. e.g:: ('method1', self.method1), # method1 directly returns the result ('method2', self.method2, self) # method2 is an async-api Use api() to automatically generate API definitions. ''' handlers = [self._createHandler(*apidef) for apidef in apidefs] self.handler.registerAllHandlers(handlers) self.discoverinfo.update((apidef[0], apidef[3] if len(apidef) > 3 else {'description':cleandoc(apidef[1].__doc__)}) for apidef in apidefs)
API definition is in format: `(name, handler, container, discoverinfo)` if the handler is a generator, container should be specified handler should accept two arguments:: def handler(name, params): ... `name` is the method name, `params` is a dictionary contains the parameters. the handler can either return the result directly, or be a generator (async-api), and write the result to container.retvalue on exit. e.g:: ('method1', self.method1), # method1 directly returns the result ('method2', self.method2, self) # method2 is an async-api Use api() to automatically generate API definitions.
entailment
def registerAPI(self, name, handler, container = None, discoverinfo = None, criteria = None): """ Append new API to this handler """ self.handler.registerHandler(*self._createHandler(name, handler, container, criteria)) if discoverinfo is None: self.discoverinfo[name] = {'description': cleandoc(handler.__doc__)} else: self.discoverinfo[name] = discoverinfo
Append new API to this handler
entailment