text
stringlengths
78
104k
score
float64
0
0.18
def ensembl_request(self, ext, headers): """ obtain sequence via the ensembl REST API """ self.attempt += 1 if self.attempt > 5: raise ValueError("too many attempts, figure out why its failing") response, status, requested_headers = self.open_url(self.server + ext, headers=headers) # we might end up passing too many simultaneous requests, or too many # requests per hour, just wait until the period is finished before # retrying if status == 429: if "retry-after" in requested_headers: time.sleep(float(requested_headers["retry-after"])) elif "x-ratelimit-reset" in requested_headers: time.sleep(int(requested_headers["x-ratelimit-reset"])) return self.ensembl_request(ext, headers) # retry after 30 seconds if we get service unavailable error elif status in [500, 503, 504]: time.sleep(30) return self.ensembl_request(ext, headers) elif status != 200: raise ValueError("Invalid Ensembl response for {}\nheaders: {}\nresponse: {}".format(\ self.server + ext, requested_headers, response)) # sometimes ensembl returns odd data. I don't know what it is, but the # json interpreter can't handle it. Rather than trying to catch it, # simply re-request the data if requested_headers["content-type"] == "application/json": try: json.loads(response) except ValueError: now = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) logging.warning("{}\t{}\t{}\t{}\t{}".format(now, status, self.server + ext, "cannot obtain json output")) return self.ensembl_request(ext, requested_headers) self.cache.cache_url_data(self.server + ext, response) return response
0.00692
def update_metric_by_name(self, metric_name, metric_type, description=None, custom_properties=None, tags=None, **kwargs): """ Create or update a metric object Args: metric_name (string): name of metric type (string): metric type, must be one of 'gauge', 'counter', 'cumulative_counter' description (optional[string]): a description custom_properties (optional[dict]): dictionary of custom properties tags (optional[list of strings]): list of tags associated with metric """ data = {'type': metric_type.upper(), 'description': description or '', 'customProperties': custom_properties or {}, 'tags': tags or []} resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX, str(metric_name)), data=data, **kwargs) resp.raise_for_status() return resp.json()
0.002852
def _process_args_as_rows_or_columns(self, arg, unpack=False): """ We must be able to interpret the args as as either a column name or row number, or sequences thereof. Numpy arrays and slices are also fine. Examples: 'field' 35 [35,55,86] ['f1',f2',...] Can also be tuples or arrays. """ flags = set() # if isinstance(arg, (tuple, list, numpy.ndarray)): # a sequence was entered if isstring(arg[0]): result = arg else: result = arg flags.add('isrows') elif isstring(arg): # a single string was entered result = arg elif isinstance(arg, slice): if unpack: flags.add('isrows') result = self._slice2rows(arg.start, arg.stop, arg.step) else: flags.add('isrows') flags.add('isslice') result = self._process_slice(arg) else: # a single object was entered. # Probably should apply some more checking on this result = arg flags.add('isrows') if numpy.ndim(arg) == 0: flags.add('isscalar') return result, flags
0.001477
def print_page_cb(self, print_op, print_context, page_nb, keep_refs={}): """ Called for printing operation by Gtk """ page = ImgPage(self, page_nb) page.print_page_cb(print_op, print_context, keep_refs=keep_refs)
0.007937
def run_mrbayes(self, ipyclient, force=False, quiet=False): """ calls the mrbayes block in each nexus file. """ ## get all the nexus files for this object minidir = os.path.realpath(os.path.join(self.workdir, self.name)) nexus_files = glob.glob(os.path.join(minidir, "*.nex")) ## clear existing files #existing = glob.glob(os.path.join(self.workdir, self.name, "*.nex")) existing = glob.glob(os.path.join(minidir, "*.nex.*")) if any(existing): if force: for rfile in existing: os.remove(rfile) else: raise IPyradWarningExit(EXISTING_NEXdot_FILES.format(minidir)) ## write new nexus files, or should users do that before this? #self.write_nexus_files(force=True) ## load balancer lbview = ipyclient.load_balanced_view() ## submit each to be processed asyncs = [] for nex in nexus_files: async = lbview.apply(_call_mb, nex) asyncs.append(async) ## track progress start = time.time() printstr = "[mb] infer gene-tree posteriors | {} | " while 1: ready = [i.ready() for i in asyncs] elapsed = datetime.timedelta(seconds=int(time.time()-start)) if not quiet: progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer="") if len(ready) == sum(ready): if not quiet: print("") break else: time.sleep(0.1) ## check success for async in asyncs: if not async.successful(): raise IPyradWarningExit(async.result())
0.011136
def _get_ownership(self, data): """Determine on which rank each subject currently resides Parameters ---------- data: list of 4D arrays with subject data Returns ------- list of ranks indicating the owner of each subject """ rank = self.comm.rank B = [(rank, idx) for (idx, c) in enumerate(data) if c is not None] C = self.comm.allreduce(B) ownership = [None] * len(data) for c in C: ownership[c[1]] = c[0] return ownership
0.003623
def add_header(self, name: str, value: _HeaderTypes) -> None: """Adds the given response header and value. Unlike `set_header`, `add_header` may be called multiple times to return multiple values for the same header. """ self._headers.add(name, self._convert_header_value(value))
0.00625
def get_media_formats(self, media_id): """CR doesn't seem to provide the video_format and video_quality params through any of the APIs so we have to scrape the video page """ url = (SCRAPER.API_URL + 'media-' + media_id).format( protocol=SCRAPER.PROTOCOL_INSECURE) format_pattern = re.compile(SCRAPER.VIDEO.FORMAT_PATTERN) formats = {} for format, param in iteritems(SCRAPER.VIDEO.FORMAT_PARAMS): resp = self._connector.get(url, params={param: '1'}) if not resp.ok: continue try: match = format_pattern.search(resp.content) except TypeError: match = format_pattern.search(resp.text) if match: formats[format] = (int(match.group(1)), int(match.group(2))) return formats
0.002301
def _make_crl_distribution_points(self, name, value): """ Constructs an asn1crypto.x509.CRLDistributionPoints object :param name: A unicode string of the attribute name to use in exceptions :param value: Either a unicode string of a URL, or a 2-element tuple of a unicode string of a URL, plus an asn1crypto.x509.Certificate object that will be signing the CRL (for indirect CRLs). :return: None or an asn1crypto.x509.CRLDistributionPoints object """ if value is None: return None is_tuple = isinstance(value, tuple) if not is_tuple and not isinstance(value, str_cls): raise TypeError(_pretty_message( ''' %s must be a unicode string or tuple of (unicode string, asn1crypto.x509.Certificate), not %s ''', name, _type_name(value) )) issuer = None if is_tuple: if len(value) != 2: raise ValueError(_pretty_message( ''' %s must be a unicode string or 2-element tuple, not a %s-element tuple ''', name, len(value) )) if not isinstance(value[0], str_cls) or not isinstance(value[1], x509.Certificate): raise TypeError(_pretty_message( ''' %s must be a tuple of (unicode string, ans1crypto.x509.Certificate), not (%s, %s) ''', name, _type_name(value[0]), _type_name(value[1]) )) url = value[0] issuer = value[1].subject else: url = value general_names = x509.GeneralNames([ x509.GeneralName( name='uniform_resource_identifier', value=url ) ]) distribution_point_name = x509.DistributionPointName( name='full_name', value=general_names ) distribution_point = x509.DistributionPoint({ 'distribution_point': distribution_point_name }) if issuer: distribution_point['crl_issuer'] = x509.GeneralNames([ x509.GeneralName(name='directory_name', value=issuer) ]) return x509.CRLDistributionPoints([distribution_point])
0.001162
def portrait_image(model, request): """XXX: needs polishing. Return configured default portrait if not set on user. """ response = Response() cfg = ugm_general(model) response.body = model.attrs[cfg.attrs['users_portrait_attr']] response.headers['Content-Type'] = 'image/jpeg' response.headers['Cache-Control'] = 'max-age=0' return response
0.00266
async def connect(self, loop, port, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, connection_timeout=10, inactivity_timeout=5): """ Connect to Opentherm Gateway at @port. Initialize the parameters obtained from the PS= and PR= commands and returns the status dict with the obtained values. If called while connected, reconnect to the gateway. This method is a coroutine """ if self._connected: # We are actually reconnecting, cleanup first. _LOGGER.debug("Reconnecting to serial device on %s", port) if self._gpio_task: self._gpio_task.cancel() self._connected = False self._transport.close() await asyncio.sleep(3) self.loop = loop transport = None while transport is None: try: transport, protocol = ( await serial_asyncio.create_serial_connection( loop, otgw.protocol, port, baudrate, bytesize, parity, stopbits, connection_timeout)) except serial.serialutil.SerialException as e: if not self._conn_error: _LOGGER.error( "Could not connect to serial device on %s. " "Will keep trying. Reported error was: %s", port, e) self._conn_error = True transport = None await asyncio.sleep(5) self._conn_error = False _LOGGER.debug("Connected to serial device on %s", port) self._transport = transport self._protocol = protocol self.loop.create_task(self._protocol.set_update_cb(self._send_report)) if 0 < inactivity_timeout < 3: _LOGGER.error("Inactivity timeout too low. Should be at least 3 " "seconds, got %d", inactivity_timeout) if inactivity_timeout >= 3: async def reconnect(): """Reconnect to the OpenTherm Gateway.""" _LOGGER.debug("Scheduling reconnect...") await self.connect( loop, port, baudrate, bytesize, parity, stopbits, connection_timeout, inactivity_timeout) self.loop.create_task( self._protocol.setup_watchdog(reconnect, inactivity_timeout)) self._gpio_task = None self._connected = True await self.get_reports() await self.get_status() if (self._protocol.status.get(OTGW_GPIO_A) or self._protocol.status.get(OTGW_GPIO_B)): await self._poll_gpio(True) return dict(self._protocol.status)
0.001761
def create_from_boosted_machine(self, boosted_machine, classifiers_per_round, classification_thresholds=-5.): """Creates this cascade from the given boosted machine, by simply splitting off strong classifiers that have classifiers_per_round weak classifiers. **Parameters:** ``boosted_machine`` : :py:class:`bob.learn.boosting.BoostedMachine` The strong classifier to split into a regular cascade. ``classifiers_per_round`` : int The number of classifiers that each cascade step should contain. ``classification_threshold`` : float A single threshold that will be applied in all rounds of the cascade. """ indices = list(range(0, len(boosted_machine.weak_machines), classifiers_per_round)) if indices[-1] != len(boosted_machine.weak_machines): indices.append(len(boosted_machine.weak_machines)) self.cascade = [] self.indices = [] for i in range(len(indices)-1): machine = bob.learn.boosting.BoostedMachine() for index in range(indices[i], indices[i+1]): machine.add_weak_machine(boosted_machine.weak_machines[index], boosted_machine.weights[index, 0]) self.cascade.append(machine) if isinstance(classification_thresholds, (int, float)): self.thresholds = [classification_thresholds] * len(self.cascade) else: self.thresholds = classification_thresholds
0.008791
def all_other_enabled_satchels(self): """ Returns a dictionary of satchels used in the current configuration, excluding ourselves. """ return dict( (name, satchel) for name, satchel in self.all_satchels.items() if name != self.name.upper() and name.lower() in map(str.lower, self.genv.services) )
0.010753
def receive(self, transport, myname = None): """Receive an XMPP connection over the `transport`. :Parameters: - `transport`: an XMPP transport instance - `myname`: local stream endpoint name (defaults to own jid domain part). """ if myname is None: myname = JID(self.me.domain) return StreamBase.receive(self, transport, myname)
0.009547
def cdf(self, x): """ Computes the cdf of a specific value, ie. computes F(x) where F denotes the CDF of the distribution. """ t = 0 N = float(self.n) if len(self) == 1: # only one centroid return int(x >= self.C.min_key()) for i, key in enumerate(self.C.keys()): c_i = self.C[key] if i == len(self) - 1: delta = (c_i.mean - self.C.prev_item(key)[1].mean) / 2. else: delta = (self.C.succ_item(key)[1].mean - c_i.mean) / 2. z = max(-1, (x - c_i.mean) / delta) if z < 1: return t / N + c_i.count / N * (z + 1) / 2 t += c_i.count return 1
0.002688
def lists(self, value, key=None): """ Get a list with the values of a given key :rtype: list """ results = map(lambda x: x[value], self._items) return list(results)
0.009346
def _block_width(self): """ Return a |Length| object specifying the width of available "writing" space between the margins of the last section of this document. """ section = self.sections[-1] return Emu( section.page_width - section.left_margin - section.right_margin )
0.005917
def delete(self, alias_name, timeout=-1): """ Revokes a certificate signed by the internal CA. If client certificate to be revoked is RabbitMQ_readonly, then the internal CA root certificate, RabbitMQ client certificate and RabbitMQ server certificate will be regenerated. This will invalidate the previous version of RabbitMQ client certificate and the RabbitMQ server will be restarted to read the latest certificates. Args: alias_name (str): Alias name. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. """ uri = self.URI + "/" + alias_name return self._client.delete(uri, timeout=timeout)
0.007238
def get_caller_name(N=0, strict=True): """ Standalone version of get_caller_name """ if isinstance(N, (list, tuple)): name_list = [] for N_ in N: try: name_list.append(get_caller_name(N_)) except AssertionError: name_list.append('X') return '[' + ']['.join(name_list) + ']' # <get_parent_frame> parent_frame = get_stack_frame(N=N + 2, strict=strict) # </get_parent_frame> caller_name = parent_frame.f_code.co_name if caller_name == '<module>': co_filename = parent_frame.f_code.co_filename caller_name = splitext(split(co_filename)[1])[0] if caller_name == '__init__': co_filename = parent_frame.f_code.co_filename caller_name = basename(dirname(co_filename)) + '.' + caller_name return caller_name
0.001182
def create_entity(self): """Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity` """ entity_type = self.process.entity_type # pylint: disable=no-member entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member entity_input = self.process.entity_input # pylint: disable=no-member if entity_type: data_filter = {} if entity_input: input_id = dict_dot(self.input, entity_input, default=lambda: None) if input_id is None: logger.warning("Skipping creation of entity due to missing input.") return if isinstance(input_id, int): data_filter['data__pk'] = input_id elif isinstance(input_id, list): data_filter['data__pk__in'] = input_id else: raise ValueError( "Cannot create entity due to invalid value of field {}.".format(entity_input) ) else: data_filter['data__in'] = self.parents.all() # pylint: disable=no-member entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct() entity_count = entity_query.count() if entity_count == 0: descriptor_schema = DescriptorSchema.objects.filter( slug=entity_descriptor_schema ).latest() entity = Entity.objects.create( contributor=self.contributor, descriptor_schema=descriptor_schema, type=entity_type, name=self.name, tags=self.tags, ) assign_contributor_permissions(entity) elif entity_count == 1: entity = entity_query.first() copy_permissions(entity, self) else: logger.info("Skipping creation of entity due to multiple entities found.") entity = None if entity: entity.data.add(self) # Inherit collections from entity. for collection in entity.collections.all(): collection.data.add(self)
0.003244
def _setChoiceDict(self): """Create dictionary for choice list""" # value is name of choice parameter (same as key) self.choiceDict = {} for c in self.choice: self.choiceDict[c] = c
0.014085
def point(self, t): """Evaluate the cubic Bezier curve at t using Horner's rule.""" # algebraically equivalent to # P0*(1-t)**3 + 3*P1*t*(1-t)**2 + 3*P2*(1-t)*t**2 + P3*t**3 # for (P0, P1, P2, P3) = self.bpoints() return self.start + t*( 3*(self.control1 - self.start) + t*( 3*(self.start + self.control2) - 6*self.control1 + t*( -self.start + 3*(self.control1 - self.control2) + self.end )))
0.004032
def load_items(self, items): """Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. """ loaded_items = {} requests = collections.deque(create_batch_get_chunks(items)) while requests: request = requests.pop() try: response = self.dynamodb_client.batch_get_item(RequestItems=request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while loading items.") from error # Accumulate results for table_name, table_items in response.get("Responses", {}).items(): loaded_items.setdefault(table_name, []).extend(table_items) # Push additional request onto the deque. # "UnprocessedKeys" is {} if this request is done if response["UnprocessedKeys"]: requests.append(response["UnprocessedKeys"]) return loaded_items
0.005556
def initialize_users(self) -> None: """Load device user data and initialize user management.""" users = self.request('get', pwdgrp_url) self.users = Users(users, self.request)
0.01005
def get_session_cookie(self): """ Create a session cookie object for use by aiohttp """ if self._login is not None and self._password is not None: session_key = self.encode_user(self._login, self._password) return {'sessionkey': session_key} else: return None
0.005882
def setPWMFrequency(self, pwm, device=DEFAULT_DEVICE_ID, message=True): """ Set the PWM frequency. :Parameters: pwm : `int` The PWN frequency to set in hertz. :Keywords: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. Defaults to the hardware's default value. message : `bool` If set to `True` a text message will be returned, if set to `False` the integer stored in the Qik will be returned. :Returns: A text message or an int. See the `message` parameter above. :Exceptions: * `SerialException` IO error indicating there was a problem reading from the serial connection. """ return self._setPWMFrequency(pwm, device, message)
0.002195
def sg_pool(tensor, opt): r"""Performs the 2-D pooling on the `tensor`. Mostly used with sg_conv(). Args: tensor: A 4-D `Tensor` (automatically given by chain). opt: size: A tuple or list of integers of length 2 representing `[kernel height, kernel width]`. Can be an int if both values are the same. If not specified, (2, 2) is set implicitly. stride: A tuple or list of integers of length 2 or 4 representing stride dimensions. If the length is 2, i.e., (a, b), the stride is `[1, a, b, 1]`. If the length is 4, i.e., (a, b, c, d), the stride is `[a, b, c, d]`. Can be an int. If the length is an int, i.e., a, the stride is `[1, a, a, 1]`. The default value is [1, 1, 1, 1]. avg: Boolean. If True, average pooling is applied. Otherwise, max pooling. name: If provided, replace current tensor's name. Returns: A `Tensor`. The max pooled output tensor. """ # default stride and pad opt += tf.sg_opt(stride=(1, 2, 2, 1), pad='VALID') # shape stride opt.stride = opt.stride if isinstance(opt.stride, (list, tuple)) else [1, opt.stride, opt.stride, 1] opt.stride = [1, opt.stride[0], opt.stride[1], 1] if len(opt.stride) == 2 else opt.stride # shape size opt += tf.sg_opt(size=opt.stride) opt.size = opt.size if isinstance(opt.size, (list, tuple)) else [1, opt.size, opt.size, 1] opt.size = [1, opt.size[0], opt.size[1], 1] if len(opt.size) == 2 else opt.size if opt.avg: out = tf.nn.avg_pool(tensor, opt.size, opt.stride, opt.pad) else: out = tf.nn.max_pool(tensor, opt.size, opt.stride, opt.pad) return tf.identity(out, name=opt.name)
0.005187
def run_job(args): """Starts the wrapper script to execute a job, interpreting the JOB_ID and SGE_TASK_ID keywords that are set by the grid or by us.""" jm = setup(args) job_id = int(os.environ['JOB_ID']) array_id = int(os.environ['SGE_TASK_ID']) if os.environ['SGE_TASK_ID'] != 'undefined' else None jm.run_job(job_id, array_id)
0.023599
def getInput(): """Read the input buffer without blocking the system.""" input = '' if sys.platform == 'win32': import msvcrt if msvcrt.kbhit(): # Check for a keyboard hit. input += msvcrt.getch() print_(input) else: time.sleep(.1) else: # Other platforms # Posix will work with sys.stdin or sys.stdin.fileno() # Mac needs the file descriptor. # This solution does not work for windows since select # expects a socket, and I have no idea how to create a # socket from standard input. sock = sys.stdin.fileno() # select(rlist, wlist, xlist, timeout) while len(select.select([sock], [], [], 0.1)[0]) > 0: input += decode(os.read(sock, 4096)) return input
0.001229
def get_access_token(self): """Method to return the current requests' access_token. :returns: Access token or None :rtype: str .. versionadded:: 1.2 """ try: credentials = OAuth2Credentials.from_json( self.credentials_store[g.oidc_id_token['sub']]) return credentials.access_token except KeyError: logger.debug("Expired ID token, credentials missing", exc_info=True) return None
0.00381
def render_columns(columns, write_borders=True, column_colors=None): """ Renders a list of columns. :param columns: A list of columns, where each column is a list of strings. :type columns: [[``str``]] :param write_borders: Whether to write the top and bottom borders. :type write_borders: ``bool`` :param column_colors: A list of coloring functions, one for each column. Optional. :type column_colors: [``str`` -> ``str``] or ``NoneType`` :return: The rendered columns. :rtype: ``str`` """ if column_colors is not None and len(column_colors) != len(columns): raise ValueError('Wrong number of column colors') widths = [max(len(cell) for cell in column) for column in columns] max_column_length = max(len(column) for column in columns) result = '\n'.join(render_row(i, columns, widths, column_colors) for i in range(max_column_length)) if write_borders: border = '+%s+' % '|'.join('-' * (w + 2) for w in widths) return '%s\n%s\n%s' % (border, result, border) else: return result
0.000886
def _valid_other_type(x, types): """ Do all elements of x have a type from types? """ return all(any(isinstance(el, t) for t in types) for el in np.ravel(x))
0.00578
def PlugIn(self): """Take next available controller id and plug in to Virtual USB Bus""" ids = self.available_ids() if len(ids) == 0: raise MaxInputsReachedError('Max Inputs Reached') self.id = ids[0] _xinput.PlugIn(self.id) while self.id in self.available_ids(): pass
0.005848
def load_plugin(self, p): """Load the specified plugin :param p: The plugin to load :type p: Subclass of JB_Plugin :returns: None :rtype: None :raises: errors.PluginInitError """ if p.is_loaded(): return # load required plugins first reqnames = p.required reqplugins = [] for name in reqnames: try: reqplugins.append(self.__plugins[name]) except KeyError as e: log.error("Required Plugin %s not found. Cannot load %s." % (name, p)) raise errors.PluginInitError('Required Plugin %s not found. Cannot load %s. Reason: %s' % (name, p, e)) for plug in reqplugins: try: self.load_plugin(plug) except errors.PluginInitError as e: log.error("Required Plugin %s could not be loaded. Cannot load %s" % (plug, p)) raise errors.PluginInitError('Required Plugin %s could not be loaded. Cannot load %s. Reason: %s' % (plug,p, e)) # load the actual plugin p._load() log.info('Initialized the plugin: %s' % p)
0.005937
def set_context(self, expr, ctx): """Set the context of an expression to Store or Del if possible.""" t = type(expr) try: # TODO: check if Starred is ok if t in (ast.Attribute, ast.Name): if type(ctx) == ast.Store(): mis.check_forbidden_name(getattr (expr, expression_name_map[t]), expr) elif t in (ast.Subscript, ast.Starred): pass elif t in (ast.List, ast.Tuple): for elt in expr.elts: self.set_context(elt, ctx) expr.ctx = ctx except misc.ForbiddenNameAssignment as e: self.error_ast("cannot assign to %s" % (e.name,), e.node)
0.005548
def start_server(self, event=None, server=None): """ Negotiate a new SSH2 session as a server. This is the first step after creating a new L{Transport} and setting up your server host key(s). A separate thread is created for protocol negotiation. If an event is passed in, this method returns immediately. When negotiation is done (successful or not), the given C{Event} will be triggered. On failure, L{is_active} will return C{False}. (Since 1.4) If C{event} is C{None}, this method will not return until negotation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, the client will need to authenticate. Override the methods L{get_allowed_auths <ServerInterface.get_allowed_auths>}, L{check_auth_none <ServerInterface.check_auth_none>}, L{check_auth_password <ServerInterface.check_auth_password>}, and L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the given C{server} object to control the authentication process. After a successful authentication, the client should request to open a channel. Override L{check_channel_request <ServerInterface.check_channel_request>} in the given C{server} object to allow channels to be opened. @note: After calling this method (or L{start_client} or L{connect}), you should no longer directly read from or write to the original socket object. @param event: an event to trigger when negotiation is complete. @type event: threading.Event @param server: an object used to perform authentication and create L{Channel}s. @type server: L{server.ServerInterface} @raise SSHException: if negotiation fails (and no C{event} was passed in) """ if server is None: server = ServerInterface() self.server_mode = True self.server_object = server self.active = True if event is not None: # async, return immediately and let the app poll for completion self.completion_event = event self.start() return # synchronous, wait for a result self.completion_event = event = threading.Event() self.start() while True: event.wait(0.1) if not self.active: e = self.get_exception() if e is not None: raise e raise SSHException('Negotiation failed.') if event.isSet(): break
0.000737
def to_kaf(self): """ Converts the object to KAF (if it is NAF) """ if self.type == 'NAF': self.type = 'KAF' for node in self.__get_node_terms(): node.set('cid', node.get('id')) del node.attrib['id']
0.006969
def Start(self): """Start HTTPServer.""" try: self._http_server = http_server.HTTPServer(("", self.port), StatsServerHandler) except socket.error as e: if e.errno == errno.EADDRINUSE: raise base_stats_server.PortInUseError(self.port) else: raise self._server_thread = threading.Thread( target=self._http_server.serve_forever) self._server_thread.daemon = True self._server_thread.start()
0.007952
def status(self): """ The HTTP status line as a string (e.g. ``404 Not Found``).""" status = _HTTP_STATUS_LINES.get(self._status_code) return str(status or ('{} Unknown'.format(self._status_code)))
0.00905
def is_parent_of_objective_bank(self, id_, objective_bank_id): """Tests if an ``Id`` is a direct parent of an objective bank. arg: id (osid.id.Id): an ``Id`` arg: objective_bank_id (osid.id.Id): the ``Id`` of an objective bank return: (boolean) - ``true`` if this ``id`` is a parent of ``objective_bank_id,`` ``false`` otherwise raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``id`` or ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_parent_of_bin if self._catalog_session is not None: return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=objective_bank_id) return self._hierarchy_session.is_parent(id_=objective_bank_id, parent_id=id_)
0.003457
def loglike(self, endog, mu, freq_weights=1, scale=1.): r""" The log-likelihood function in terms of the fitted mean response. Parameters ---------- endog : array-like Endogenous response variable mu : array-like Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional Not used for the Binomial GLM. Returns ------- llf : float The value of the loglikelihood function evaluated at (endog,mu,freq_weights,scale) as defined below. """ if np.shape(self.n) == () and self.n == 1: return scale * np.sum((endog * np.log(mu/(1 - mu) + 1e-200) + np.log(1 - mu)) * freq_weights) else: y = endog * self.n # convert back to successes return scale * np.sum((special.gammaln(self.n + 1) - special.gammaln(y + 1) - special.gammaln(self.n - y + 1) + y * np.log(mu/(1 - mu)) + self.n * np.log(1 - mu)) * freq_weights)
0.00157
def CalculateWaitForRetry(retry_attempt, max_wait=60): """Calculates amount of time to wait before a retry attempt. Wait time grows exponentially with the number of attempts. A random amount of jitter is added to spread out retry attempts from different clients. Args: retry_attempt: Retry attempt counter. max_wait: Upper bound for wait time [seconds]. Returns: Number of seconds to wait before retrying request. """ wait_time = 2 ** retry_attempt max_jitter = wait_time / 4.0 wait_time += random.uniform(-max_jitter, max_jitter) return max(1, min(wait_time, max_wait))
0.00157
def mates(args): """ %prog mates bedfile Generate the mates file by inferring from the names. """ p = OptionParser(mates.__doc__) p.add_option("--lib", default=False, action="store_true", help="Output library information along with pairs [default: %default]") p.add_option("--nointra", default=False, action="store_true", help="Remove mates that are intra-scaffold [default: %default]") p.add_option("--prefix", default=False, action="store_true", help="Only keep links between IDs with same prefix [default: %default]") p.set_mates() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args rclip = opts.rclip key = (lambda x: x.accn[:-rclip]) if rclip else (lambda x: x.accn) bed = Bed(bedfile, key=key) pf = bedfile.rsplit(".", 1)[0] matesfile = pf + ".mates" lib = pf if opts.lib else None fw = open(matesfile, "w") if lib: bedfile, stats = pairs([bedfile, \ "--rclip={0}".format(rclip), "--cutoff={0}".format(opts.cutoff)]) sv = int(2 * stats.sd) mindist = max(stats.mean - sv, 1) maxdist = stats.mean + sv print("\t".join(str(x) for x in \ ("library", pf, mindist, maxdist)), file=fw) num_fragments = num_pairs = 0 matesbedfile = matesfile + ".bed" fwm = open(matesbedfile, "w") for pe, lines in groupby(bed, key=key): lines = list(lines) if len(lines) != 2: num_fragments += len(lines) continue a, b = lines if opts.nointra and a.seqid == b.seqid: continue # Use --prefix to limit the links between seqids with the same prefix # For example, contigs of the same BAC, mth2-23j10_001, mth-23j10_002 if opts.prefix: aprefix = a.seqid.split("_")[0] bprefix = b.seqid.split("_")[0] if aprefix != bprefix: continue num_pairs += 1 pair = [a.accn, b.accn] if lib: pair.append(lib) print("\t".join(pair), file=fw) print(a, file=fwm) print(b, file=fwm) logging.debug("Discard {0} frags and write {1} pairs to `{2}` and `{3}`.".\ format(num_fragments, num_pairs, matesfile, matesbedfile)) fw.close() fwm.close() return matesfile, matesbedfile
0.005267
def _cursor_up(self, value): """ Moves the cursor up by ``value``. """ value = int(value) if value == 0: value = 1 self._cursor.clearSelection() self._cursor.movePosition(self._cursor.Up, self._cursor.MoveAnchor, value) self._last_cursor_pos = self._cursor.position()
0.008746
def get_children(self): """Get the child nodes below this node. :returns: The children. :rtype: iterable(NodeNG) """ for expr, var in self.items: yield expr if var: yield var yield from self.body
0.007042
def update_parent(self, fut): """Add a callback to the parent to update the state. This handles the case where the user has called result on the AppFuture before the parent exists. """ self.parent = fut try: fut.add_done_callback(self.parent_callback) except Exception as e: logger.error("add_done_callback got an exception {} which will be ignored".format(e))
0.006787
def _get_dependencies_of(name, location=None): ''' Returns list of first level dependencies of the given installed dap or dap from Dapi if not installed If a location is specified, this only checks for dap installed in that path and return [] if the dap is not located there ''' if not location: detailed_dap_list = get_installed_daps_detailed() if name not in detailed_dap_list: return _get_api_dependencies_of(name) location = detailed_dap_list[name][0]['location'] meta = '{d}/meta/{dap}.yaml'.format(d=location, dap=name) try: data = yaml.load(open(meta), Loader=Loader) except IOError: return [] return data.get('dependencies', [])
0.001362
def _set_error_handler_callbacks(self, app): """ Sets the error handler callbacks used by this extension """ @app.errorhandler(NoAuthorizationError) def handle_auth_error(e): return self._unauthorized_callback(str(e)) @app.errorhandler(CSRFError) def handle_csrf_error(e): return self._unauthorized_callback(str(e)) @app.errorhandler(ExpiredSignatureError) def handle_expired_error(e): try: token = ctx_stack.top.expired_jwt return self._expired_token_callback(token) except TypeError: msg = ( "jwt.expired_token_loader callback now takes the expired token " "as an additional paramter. Example: expired_callback(token)" ) warn(msg, DeprecationWarning) return self._expired_token_callback() @app.errorhandler(InvalidHeaderError) def handle_invalid_header_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(InvalidTokenError) def handle_invalid_token_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(JWTDecodeError) def handle_jwt_decode_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(WrongTokenError) def handle_wrong_token_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(InvalidAudienceError) def handle_invalid_audience_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(RevokedTokenError) def handle_revoked_token_error(e): return self._revoked_token_callback() @app.errorhandler(FreshTokenRequired) def handle_fresh_token_required(e): return self._needs_fresh_token_callback() @app.errorhandler(UserLoadError) def handler_user_load_error(e): # The identity is already saved before this exception was raised, # otherwise a different exception would be raised, which is why we # can safely call get_jwt_identity() here identity = get_jwt_identity() return self._user_loader_error_callback(identity) @app.errorhandler(UserClaimsVerificationError) def handle_failed_user_claims_verification(e): return self._verify_claims_failed_callback()
0.001587
def close_connection(self, connection, force=False): """overriding the baseclass function, this routine will decline to close a connection at the end of a transaction context. This allows for reuse of connections.""" if force: try: connection.close() except self.operational_exceptions: self.config.logger.error('ConnectionFactory - failed closing') for name, conn in self.pool.iteritems(): if conn is connection: break del self.pool[name] else: pass
0.003231
def get_current_branch(self, location): """ Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD). """ # git-symbolic-ref exits with empty stdout if "HEAD" is a detached # HEAD rather than a symbolic ref. In addition, the -q causes the # command to exit with status code 1 instead of 128 in this case # and to suppress the message to stderr. args = ['symbolic-ref', '-q', 'HEAD'] output = self.run_command( args, extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, ) ref = output.strip() if ref.startswith('refs/heads/'): return ref[len('refs/heads/'):] return None
0.002699
def reduced_dependencies(self, exported_target): """Calculates the reduced transitive dependencies for an exported target. The reduced set of dependencies will be just those transitive dependencies "owned" by the `exported_target`. A target is considered "owned" if: 1. It's "3rdparty" and "directly reachable" from `exported_target` by at least 1 path. 2. It's not "3rdparty" and not "directly reachable" by any of `exported_target`'s "3rdparty" dependencies. Here "3rdparty" refers to targets identified as either `is_third_party` or `is_exported`. And in this context "directly reachable" means the target can be reached by following a series of dependency links from the `exported_target`, never crossing another exported target and staying within the `exported_target` address space. It's the latter restriction that allows for unambiguous ownership of exportable targets and mirrors the BUILD file convention of targets only being able to own sources in their filesystem subtree. The single ambiguous case that can arise is when there is more than one exported target in the same BUILD file family that can "directly reach" a target in its address space. :raises: `UnExportedError` if the given `exported_target` is not, in-fact, exported. :raises: `NoOwnerError` if a transitive dependency is found with no proper owning exported target. :raises: `AmbiguousOwnerError` if there is more than one viable exported owner target for a given transitive dependency. """ # The strategy adopted requires 3 passes: # 1.) Walk the exported target to collect provisional owned exportable targets, but _not_ # 3rdparty since these may be introduced by exported subgraphs we discover in later steps! # 2.) Determine the owner of each target collected in 1 by walking the ancestor chain to find # the closest exported target. The ancestor chain is just all targets whose spec path is # a prefix of the descendant. In other words, all targets in descendant's BUILD file family # (its siblings), all targets in its parent directory BUILD file family, and so on. # 3.) Finally walk the exported target once more, replacing each visited dependency with its # owner. if not self.is_exported(exported_target): raise self.UnExportedError('Cannot calculate reduced dependencies for a non-exported ' 'target, given: {}'.format(exported_target)) owner_by_owned_python_target = OrderedDict() # Only check ownership on the original target graph. original_exported_target = exported_target.derived_from def collect_potentially_owned_python_targets(current): if current.is_original: owner_by_owned_python_target[current] = None # We can't know the owner in the 1st pass. return (current == exported_target) or not self.is_exported(current) self._walk(original_exported_target, collect_potentially_owned_python_targets) for owned in owner_by_owned_python_target: if self.requires_export(owned) and not self.is_exported(owned): potential_owners = set() for potential_owner in self._ancestor_iterator.iter_target_siblings_and_ancestors(owned): if self.is_exported(potential_owner) and owned in self._closure(potential_owner): potential_owners.add(potential_owner) if not potential_owners: raise self.NoOwnerError('No exported target owner found for {}'.format(owned)) owner = potential_owners.pop() if potential_owners: ambiguous_owners = [o for o in potential_owners if o.address.spec_path == owner.address.spec_path] if ambiguous_owners: raise self.AmbiguousOwnerError('Owners for {} are ambiguous. Found {} and ' '{} others: {}'.format(owned, owner, len(ambiguous_owners), ambiguous_owners)) owner_by_owned_python_target[owned] = owner reduced_dependencies = OrderedSet() def collect_reduced_dependencies(current): if current == exported_target: return True else: # The provider will be one of: # 1. `None`, ie: a 3rdparty requirement we should collect. # 2. `exported_target`, ie: a local exportable target owned by `exported_target` that we # should collect # 3. Or else a local exportable target owned by some other exported target in which case # we should collect the exported owner. owner = owner_by_owned_python_target.get(current) if owner is None or owner == exported_target: reduced_dependencies.add(current) else: reduced_dependencies.add(owner) return owner == exported_target or not self.requires_export(current) self._walk(exported_target, collect_reduced_dependencies) return OrderedSet(d for d in reduced_dependencies if d.is_original)
0.008607
def optsChanged(self, param, opts): """Called when any options are changed that are not name, value, default, or limits""" # print "opts changed:", opts ParameterItem.optsChanged(self, param, opts) w = self.widget if 'readonly' in opts: self.updateDefaultBtn() if isinstance(w, (QtWidgets.QCheckBox, ColorButton)): w.setEnabled(not opts['readonly']) # If widget is a SpinBox, pass options straight through if isinstance(self.widget, SpinBox): if 'units' in opts and 'suffix' not in opts: opts['suffix'] = opts['units'] w.setOpts(**opts) self.updateDisplayLabel()
0.002782
def _after_flush_handler(session, _flush_context): """Archive all new/updated/deleted data""" dialect = get_dialect(session) handlers = [ (_versioned_delete, session.deleted), (_versioned_insert, session.new), (_versioned_update, session.dirty), ] for handler, rows in handlers: # TODO: Bulk archive insert statements for row in rows: if not isinstance(row, SavageModelMixin): continue if not hasattr(row, 'ArchiveTable'): raise LogTableCreationError('Need to register Savage tables!!') user_id = getattr(row, '_updated_by', None) handler(row, session, user_id, dialect)
0.001408
def missing_input_files(self): """Make and return a dictionary of the missing input files. This returns a dictionary mapping filepath to list of `Link` that use the file as input. """ missing = self.check_input_files(return_found=False) ret_dict = {} for miss_file in missing: ret_dict[miss_file] = [self.linkname] return ret_dict
0.004914
def scrub(zpool, stop=False, pause=False): ''' Scrub a storage pool zpool : string Name of storage pool stop : boolean If ``True``, cancel ongoing scrub pause : boolean If ``True``, pause ongoing scrub .. versionadded:: 2018.3.0 .. note:: Pause is only available on recent versions of ZFS. If both ``pause`` and ``stop`` are ``True``, then ``stop`` will win. CLI Example: .. code-block:: bash salt '*' zpool.scrub myzpool ''' ## select correct action if stop: action = ['-s'] elif pause: action = ['-p'] else: action = None ## Scrub storage pool res = __salt__['cmd.run_all']( __utils__['zfs.zpool_command']( command='scrub', flags=action, target=zpool, ), python_shell=False, ) if res['retcode'] != 0: return __utils__['zfs.parse_command_result'](res, 'scrubbing') ret = OrderedDict() if stop or pause: ret['scrubbing'] = False else: ret['scrubbing'] = True return ret
0.002597
def main_nonexecutable_region_limbos_contain(self, addr, tolerance_before=64, tolerance_after=64): """ Sometimes there exists a pointer that points to a few bytes before the beginning of a section, or a few bytes after the beginning of the section. We take care of that here. :param int addr: The address to check. :return: A 2-tuple of (bool, the closest base address) :rtype: tuple """ closest_region = None least_limbo = None for start, end in self.main_nonexecutable_regions: if start - tolerance_before <= addr < start: if least_limbo is None or start - addr < least_limbo: closest_region = (True, start) least_limbo = start - addr if end <= addr < end + tolerance_after: if least_limbo is None or addr - end < least_limbo: closest_region = (True, end) least_limbo = addr - end if closest_region is not None: return closest_region return False, None
0.00363
def import_file(self, file_obj, folder): """ Create a File or an Image into the given folder """ created = False for cls in MEDIA_MODELS: if cls.matches_file_type(file_obj.name): obj, created = cls.objects.get_or_create( original_filename=file_obj.name, file=file_obj, folder=folder, is_public=FILER_IS_PUBLIC_DEFAULT) if created: self.image_created += 1 if not created: obj, created = File.objects.get_or_create( original_filename=file_obj.name, file=file_obj, folder=folder, is_public=FILER_IS_PUBLIC_DEFAULT) if created: self.file_created += 1 if self.verbosity >= 2: print("file_created #%s / image_created #%s -- file : %s -- created : %s" % (self.file_created, self.image_created, obj, created)) return obj
0.004115
def packet_handle(self): """Incoming packet handler dispatcher.""" cmd = self.in_packet.command & 0xF0 if cmd == NC.CMD_CONNACK: return self.handle_connack() elif cmd == NC.CMD_PINGRESP: return self.handle_pingresp() elif cmd == NC.CMD_PUBLISH: return self.handle_publish() elif cmd == NC.CMD_PUBACK: return self.handle_puback() elif cmd == NC.CMD_PUBREC: return self.handle_pubrec() elif cmd == NC.CMD_PUBREL: return self.handle_pubrel() elif cmd == NC.CMD_PUBCOMP: return self.handle_pubcomp() elif cmd == NC.CMD_SUBSCRIBE: sys.exit(-1) elif cmd == NC.CMD_SUBACK: return self.handle_suback() elif cmd == NC.CMD_UNSUBSCRIBE: print "Received UNSUBSCRIBE" sys.exit(-1) elif cmd == NC.CMD_UNSUBACK: return self.handle_unsuback() else: self.logger.warning("Unknown protocol. Cmd = %d", cmd) return NC.ERR_PROTOCOL
0.00273
def items(self, query=None, **kwargs): """ Return the items to be sent to the client """ # Cut this, we don't need no empty query if not query: self.__final_queryset = self.get_model().objects.none() return self.serialize(self.__final_queryset) # Query is too short, no item if len(query) < self.get_query_size_min(): self.__final_queryset = self.get_model().objects.none() return self.serialize(self.__final_queryset) if self.requires_authentication: if not self.user: raise AuthenticationRequiredAgnocompleteException( "Authentication is required to use this autocomplete" ) if not self.user.is_authenticated: raise AuthenticationRequiredAgnocompleteException( "Authentication is required to use this autocomplete" ) qs = self.build_filtered_queryset(query, **kwargs) # The final queryset is the paginated queryset self.__final_queryset = qs return self.serialize(qs)
0.00175
def create_job(self, project_id, job, use_existing_job_fn=None): """ Launches a MLEngine job and wait for it to reach a terminal state. :param project_id: The Google Cloud project id within which MLEngine job will be launched. :type project_id: str :param job: MLEngine Job object that should be provided to the MLEngine API, such as: :: { 'jobId': 'my_job_id', 'trainingInput': { 'scaleTier': 'STANDARD_1', ... } } :type job: dict :param use_existing_job_fn: In case that a MLEngine job with the same job_id already exist, this method (if provided) will decide whether we should use this existing job, continue waiting for it to finish and returning the job object. It should accepts a MLEngine job object, and returns a boolean value indicating whether it is OK to reuse the existing job. If 'use_existing_job_fn' is not provided, we by default reuse the existing MLEngine job. :type use_existing_job_fn: function :return: The MLEngine job object if the job successfully reach a terminal state (which might be FAILED or CANCELLED state). :rtype: dict """ request = self._mlengine.projects().jobs().create( parent='projects/{}'.format(project_id), body=job) job_id = job['jobId'] try: request.execute() except HttpError as e: # 409 means there is an existing job with the same job ID. if e.resp.status == 409: if use_existing_job_fn is not None: existing_job = self._get_job(project_id, job_id) if not use_existing_job_fn(existing_job): self.log.error( 'Job with job_id %s already exist, but it does ' 'not match our expectation: %s', job_id, existing_job ) raise self.log.info( 'Job with job_id %s already exist. Will waiting for it to finish', job_id ) else: self.log.error('Failed to create MLEngine job: {}'.format(e)) raise return self._wait_for_job_done(project_id, job_id)
0.001182
def get_next_scheduled_time(cron_string): """Calculate the next scheduled time by creating a crontab object with a cron string""" itr = croniter.croniter(cron_string, datetime.utcnow()) return itr.get_next(datetime)
0.004329
def process_sparser_output(output_fname, output_fmt='json'): """Return a processor with Statements extracted from Sparser XML or JSON Parameters ---------- output_fname : str The path to the Sparser output file to be processed. The file can either be JSON or XML output from Sparser, with the output_fmt parameter defining what format is assumed to be processed. output_fmt : Optional[str] The format of the Sparser output to be processed, can either be 'json' or 'xml'. Default: 'json' Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ if output_fmt not in ['json', 'xml']: logger.error("Unrecognized output format '%s'." % output_fmt) return None sp = None with open(output_fname, 'rt') as fh: if output_fmt == 'json': json_dict = json.load(fh) sp = process_json_dict(json_dict) else: xml_str = fh.read() sp = process_xml(xml_str) return sp
0.000923
def regex(self, protocols, localhost=True): """ URL Validation regex Based on regular expression by Diego Perini (@dperini) and provided under MIT License: https://gist.github.com/dperini/729294 :return: """ p = r"^" # protocol p += r"(?:(?:(?:{}):)?//)".format('|'.join(protocols)) # basic auth (optional) p += r"(?:\S+(?::\S*)?@)?" p += r"(?:" # ip exclusion: private and local networks p += r"(?!(?:10|127)(?:\.\d{1,3}){3})" p += r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" p += r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" # ip excluding loopback (0.0.0.0), reserved space (244.0.0.0) # and network/broadcast addresses p += r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" p += r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" p += r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" p += r"|" # hostname p += r"(?:" p += r"(?:" p += r"[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?" p += r"[a-z0-9\u00a1-\uffff]" p += r"\." if not localhost else r"[\.]?|localhost" p += r")+" # tld p += r"(?:[a-z\u00a1-\uffff]{2,}\.?)" p += r")" # port (optional) p += r"(?::\d{2,5})?" # path (optional) p += r"(?:[/?#]\S*)?" p += r"$" return p
0.002088
def expand_folder(notebook_or_folder, recursive=False): """ If notebook_or_folder is a folder, returns a list containing all notebooks in the folder. Otherwise, returns a list containing the notebook name. If recursive is True, recurses into subdirectories. """ is_file = os.path.isfile(notebook_or_folder) is_dir = os.path.isdir(notebook_or_folder) if not (is_file or is_dir): raise ValueError( '{} is neither an existing file nor a folder.' .format(notebook_or_folder) ) if is_file: return [notebook_or_folder] # Now we know the input is a directory if not recursive: return glob('{}/*.ipynb'.format(notebook_or_folder)) # Recursive case return [ os.path.join(folder, filename) for folder, _, filenames in os.walk(notebook_or_folder) # Skip folders that start with . if not os.path.basename(folder).startswith('.') for filename in fnmatch.filter(filenames, '*.ipynb') ]
0.000974
def process_mav(self, mlog, flightmode_selections): '''process one file''' self.vars = {} idx = 0 all_false = True for s in flightmode_selections: if s: all_false = False # pre-calc right/left axes self.num_fields = len(self.fields) for i in range(0, self.num_fields): f = self.fields[i] if f.endswith(":2"): self.axes[i] = 2 f = f[:-2] if f.endswith(":1"): self.first_only[i] = True f = f[:-2] self.fields[i] = f # see which fields are simple self.simple_field = [] for i in range(0, self.num_fields): f = self.fields[i] m = re.match('^([A-Z][A-Z0-9_]*)[.]([A-Za-z_][A-Za-z0-9_]*)$', f) if m is None: self.simple_field.append(None) else: self.simple_field.append((m.group(1),m.group(2))) if len(self.flightmode_list) > 0: # prime the timestamp conversion self.timestamp_to_days(self.flightmode_list[0][1]) while True: msg = mlog.recv_match(type=self.msg_types) if msg is None: break if msg.get_type() not in self.msg_types: continue if self.condition: if not mavutil.evaluate_condition(self.condition, mlog.messages): continue tdays = self.timestamp_to_days(msg._timestamp) if all_false or len(flightmode_selections) == 0: self.add_data(tdays, msg, mlog.messages) else: if idx < len(self.flightmode_list) and msg._timestamp >= self.flightmode_list[idx][2]: idx += 1 elif (idx < len(flightmode_selections) and flightmode_selections[idx]): self.add_data(tdays, msg, mlog.messages)
0.003043
def idle_task(self): '''called on idle''' if self.module('console') is not None and not self.menu_added_console: self.menu_added_console = True self.module('console').add_menu(self.menu)
0.00885
def subscribe(ws): """WebSocket endpoint, used for liveupdates""" while ws is not None: gevent.sleep(0.1) try: message = ws.receive() # expect function name to subscribe to if message: stream.register(ws, message) except WebSocketError: ws = None
0.003021
def _initial_run(): """ Check things during the initial setting of sprinter's global config """ if not system.is_officially_supported(): logger.warn(warning_template + "===========================================================\n" + "Sprinter is not officially supported on {0}! Please use at your own risk.\n\n".format(system.operating_system()) + "You can find the supported platforms here:\n" + "(http://sprinter.readthedocs.org/en/latest/index.html#compatible-systems)\n\n" + "Conversely, please help us support your system by reporting on issues\n" + "(http://sprinter.readthedocs.org/en/latest/faq.html#i-need-help-who-do-i-talk-to)\n" + "===========================================================") else: logger.info( "\nThanks for using \n" + "=" * 60 + sprinter_template + "=" * 60 )
0.006876
def initdb(self): '''initdb will check for writability of the data folder, meaning that it is bound to the local machine. If the folder isn't bound, expfactory runs in demo mode (not saving data) ''' self.database = EXPFACTORY_DATABASE bot.info("DATABASE: %s" %self.database) # Supported database options valid = ('sqlite', 'postgres', 'mysql', 'filesystem') if not self.database.startswith(valid): bot.warning('%s is not yet a supported type, saving to filesystem.' % self.database) self.database = 'filesystem' # Add functions specific to database type self.init_db() # uses url in self.database bot.log("Data base: %s" % self.database)
0.00651
def rmatrixquaternion(q): """Create a rotation matrix from q quaternion rotation. Quaternions are typed as Numeric Python numpy.arrays of length 4. """ assert np.allclose(math.sqrt(np.dot(q,q)), 1.0) x, y, z, w = q xx = x*x xy = x*y xz = x*z xw = x*w yy = y*y yz = y*z yw = y*w zz = z*z zw = z*w r00 = 1.0 - 2.0 * (yy + zz) r01 = 2.0 * (xy - zw) r02 = 2.0 * (xz + yw) r10 = 2.0 * (xy + zw) r11 = 1.0 - 2.0 * (xx + zz) r12 = 2.0 * (yz - xw) r20 = 2.0 * (xz - yw) r21 = 2.0 * (yz + xw) r22 = 1.0 - 2.0 * (xx + yy) R = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]], float) assert np.allclose(np.linalg.det(R), 1.0) return R
0.011084
def gplot(self, analytes=None, win=25, figsize=[10, 4], ranges=False, focus_stage=None, ax=None, recalc=True): """ Plot analytes gradients as a function of Time. Parameters ---------- analytes : array_like list of strings containing names of analytes to plot. None = all analytes. win : int The window over which to calculate the rolling gradient. figsize : tuple size of final figure. ranges : bool show signal/background regions. Returns ------- figure, axis """ if type(analytes) is str: analytes = [analytes] if analytes is None: analytes = self.analytes if focus_stage is None: focus_stage = self.focus_stage if ax is None: fig = plt.figure(figsize=figsize) ax = fig.add_axes([.1, .12, .77, .8]) ret = True else: fig = ax.figure ret = False x = self.Time if recalc or not self.grads_calced: self.grads = calc_grads(x, self.data[focus_stage], analytes, win) self.grads_calce = True for a in analytes: ax.plot(x, self.grads[a], color=self.cmap[a], label=a) if ranges: for lims in self.bkgrng: ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1) for lims in self.sigrng: ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1) ax.text(0.01, 0.99, self.sample + ' : ' + self.focus_stage + ' : gradient', transform=ax.transAxes, ha='left', va='top') ax.set_xlabel('Time (s)') ax.set_xlim(np.nanmin(x), np.nanmax(x)) # y label ud = {'rawdata': 'counts/s', 'despiked': 'counts/s', 'bkgsub': 'background corrected counts/s', 'ratios': 'counts/{:s} count/s', 'calibrated': 'mol/mol {:s}/s'} if focus_stage in ['ratios', 'calibrated']: ud[focus_stage] = ud[focus_stage].format(self.internal_standard) ax.set_ylabel(ud[focus_stage]) # y tick format def yfmt(x, p): return '{:.0e}'.format(x) ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(yfmt)) ax.legend(bbox_to_anchor=(1.15, 1)) ax.axhline(0, color='k', lw=1, ls='dashed', alpha=0.5) if ret: return fig, ax
0.001584
def get(self, direction=NOMINAL, names=ALL, diff=False, factor=False): """ get(direction=NOMINAL, names=ALL, diff=False, factor=False) Returns different representations of the contained value(s). *direction* should be any of *NOMINAL*, *UP* or *DOWN*. When not *NOMINAL*, *names* decides which uncertainties to take into account for the combination. When *diff* is *True*, only the unsigned, combined uncertainty is returned. When *False*, the nominal value plus or minus the uncertainty is returned. When *factor* is *True*, the ratio w.r.t. the nominal value is returned. """ if direction == self.NOMINAL: value = self.nominal elif direction in (self.UP, self.DOWN): # find uncertainties to take into account if names == self.ALL: names = self.uncertainties.keys() else: names = make_list(names) if any(name not in self.uncertainties for name in names): unknown = list(set(names) - set(self.uncertainties.keys())) raise ValueError("unknown uncertainty name(s): {}".format(unknown)) # calculate the combined uncertainty without correlation idx = int(direction == self.DOWN) uncs = [self.uncertainties[name][idx] for name in names] unc = sum(u**2. for u in uncs)**0.5 # determine the output value if diff: value = unc elif direction == self.UP: value = self.nominal + unc else: value = self.nominal - unc else: raise ValueError("unknown direction: {}".format(direction)) return value if not factor else value / self.nominal
0.004425
def print_dictionary(self, d, h, n, nl=False): """Print complex using the specified indent (n) and newline (nl).""" if d in h: return "{}..." h.append(d) s = [] if nl: s.append("\n") s.append(self.indent(n)) s.append("{") for item in d.items(): s.append("\n") s.append(self.indent(n+1)) if isinstance(item[1], (list,tuple)): s.append(tostr(item[0])) s.append("[]") else: s.append(tostr(item[0])) s.append(" = ") s.append(self.process(item[1], h, n, True)) s.append("\n") s.append(self.indent(n)) s.append("}") h.pop() return "".join(s)
0.003812
def stop(): """Stop the server, invalidating any viewer URLs. This allows any previously-referenced data arrays to be garbage collected if there are no other references to them. """ global global_server if global_server is not None: ioloop = global_server.ioloop def stop_ioloop(): ioloop.stop() ioloop.close() global_server.ioloop.add_callback(stop_ioloop) global_server = None
0.006536
def receive_message(self): """Read the next message from the connection. @rtype: OmapiMessage @raises OmapiError: @raises socket.error: """ while not self.recv_message_queue: self.transport.fill_inbuffer() message = self.recv_message_queue.pop(0) assert message is not None if not message.verify(self.protocol.authenticators): self.close() raise OmapiError("bad omapi message signature") return message
0.034803
def create_ec2_role(self, role, bound_ami_id=None, bound_account_id=None, bound_iam_role_arn=None, bound_iam_instance_profile_arn=None, bound_ec2_instance_id=None, bound_region=None, bound_vpc_id=None, bound_subnet_id=None, role_tag=None, ttl=None, max_ttl=None, period=None, policies=None, allow_instance_migration=False, disallow_reauthentication=False, resolve_aws_unique_ids=None, mount_point='aws-ec2'): """POST /auth/<mount_point>/role/<role> :param role: :type role: :param bound_ami_id: :type bound_ami_id: :param bound_account_id: :type bound_account_id: :param bound_iam_role_arn: :type bound_iam_role_arn: :param bound_iam_instance_profile_arn: :type bound_iam_instance_profile_arn: :param bound_ec2_instance_id: :type bound_ec2_instance_id: :param bound_region: :type bound_region: :param bound_vpc_id: :type bound_vpc_id: :param bound_subnet_id: :type bound_subnet_id: :param role_tag: :type role_tag: :param ttl: :type ttl: :param max_ttl: :type max_ttl: :param period: :type period: :param policies: :type policies: :param allow_instance_migration: :type allow_instance_migration: :param disallow_reauthentication: :type disallow_reauthentication: :param resolve_aws_unique_ids: :type resolve_aws_unique_ids: :param mount_point: :type mount_point: :return: :rtype: """ params = { 'role': role, 'auth_type': 'ec2', 'disallow_reauthentication': disallow_reauthentication, 'allow_instance_migration': allow_instance_migration } if bound_ami_id is not None: params['bound_ami_id'] = bound_ami_id if bound_account_id is not None: params['bound_account_id'] = bound_account_id if bound_iam_role_arn is not None: params['bound_iam_role_arn'] = bound_iam_role_arn if bound_ec2_instance_id is not None: params['bound_iam_instance_profile_arn'] = bound_ec2_instance_id if bound_iam_instance_profile_arn is not None: params['bound_iam_instance_profile_arn'] = bound_iam_instance_profile_arn if bound_region is not None: params['bound_region'] = bound_region if bound_vpc_id is not None: params['bound_vpc_id'] = bound_vpc_id if bound_subnet_id is not None: params['bound_subnet_id'] = bound_subnet_id if role_tag is not None: params['role_tag'] = role_tag if ttl is not None: params['ttl'] = ttl else: params['ttl'] = 0 if max_ttl is not None: params['max_ttl'] = max_ttl else: params['max_ttl'] = 0 if period is not None: params['period'] = period else: params['period'] = 0 if policies is not None: params['policies'] = policies if resolve_aws_unique_ids is not None: params['resolve_aws_unique_ids'] = resolve_aws_unique_ids return self._adapter.post('/v1/auth/{0}/role/{1}'.format(mount_point, role), json=params)
0.003475
def f_load(self, recursive=True, load_data=pypetconstants.LOAD_DATA, max_depth=None): """Loads a group from disk. :param recursive: Default is ``True``. Whether recursively all nodes below the current node should be loaded, too. Note that links are never evaluated recursively. Only the linked node will be loaded if it does not exist in the tree, yet. Any nodes or links of this linked node are not loaded. :param load_data: Flag how to load the data. For how to choose 'load_data' see :ref:`more-on-loading`. :param max_depth: In case `recursive` is `True`, you can specify the maximum depth to load load data relative from current node. :returns: The node itself. """ traj = self._nn_interface._root_instance storage_service = traj.v_storage_service storage_service.load(pypetconstants.GROUP, self, trajectory_name=traj.v_name, load_data=load_data, recursive=recursive, max_depth=max_depth) return self
0.005613
def serialize_operations(self, operations): """Serialize a list of operations into JSON.""" serialized_ops = [] for operation in operations: serializer = self.get_serializer_class(operation.__class__) serialized_ops.append(serializer(operation).data) return serialized_ops
0.006079
def transpose(attrs, inputs, proto_obj): """Transpose the input array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'perm' : 'axes'}) return 'transpose', new_attrs, inputs
0.007843
def p_stringValueList(p): """stringValueList : stringValue | stringValueList stringValue """ if len(p) == 2: p[0] = _fixStringValue(p[1], p) else: p[0] = p[1] + _fixStringValue(p[2], p)
0.003861
def next_frame_basic_stochastic_discrete(): """Basic 2-frame conv model with stochastic discrete latent.""" hparams = basic_deterministic_params.next_frame_sampling() hparams.batch_size = 4 hparams.video_num_target_frames = 6 hparams.scheduled_sampling_mode = "prob_inverse_lin" hparams.scheduled_sampling_decay_steps = 40000 hparams.scheduled_sampling_max_prob = 1.0 hparams.dropout = 0.15 hparams.filter_double_steps = 3 hparams.hidden_size = 96 hparams.learning_rate_constant = 0.002 hparams.learning_rate_warmup_steps = 2000 hparams.learning_rate_schedule = "linear_warmup * constant" hparams.concat_internal_states = True hparams.video_modality_loss_cutoff = 0.03 hparams.add_hparam("bottleneck_bits", 128) hparams.add_hparam("bottleneck_noise", 0.1) hparams.add_hparam("discretize_warmup_steps", 40000) hparams.add_hparam("latent_rnn_warmup_steps", 40000) hparams.add_hparam("latent_rnn_max_sampling", 0.5) hparams.add_hparam("latent_use_max_probability", 0.8) hparams.add_hparam("full_latent_tower", False) hparams.add_hparam("latent_predictor_state_size", 128) hparams.add_hparam("latent_predictor_temperature", 1.0) hparams.add_hparam("complex_addn", True) hparams.add_hparam("recurrent_state_size", 64) return hparams
0.021858
def propertyContainer(self, ulBuffer): """retrieves the property container of an buffer.""" fn = self.function_table.propertyContainer result = fn(ulBuffer) return result
0.009852
def subnet_delete(auth=None, **kwargs): ''' Delete a subnet name Name or ID of the subnet to update CLI Example: .. code-block:: bash salt '*' neutronng.subnet_delete name=subnet1 salt '*' neutronng.subnet_delete \ name=1dcac318a83b4610b7a7f7ba01465548 ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_subnet(**kwargs)
0.002304
def read_json(self): """Calls the overridden method. :returns: The read metadata. :rtype: dict """ with reading_ancillary_files(self): metadata = super(GenericLayerMetadata, self).read_json() return metadata
0.007435
def triangle(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray: """Continuous triangle wave. Args: times: Times to output wave for. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. phase: Pulse phase. """ return amp*(-2*np.abs(sawtooth(times, 1, period, (phase-np.pi/2)/2)) + 1).astype(np.complex_)
0.007282
def get_kwargs(self, **kwargs): """ Creates a full URL to request based on arguments. :Parametes: - `kwargs`: All keyword arguments to build a kubernetes API endpoint """ version = kwargs.pop("version", "v1") if version == "v1": base = kwargs.pop("base", "/api") elif "/" in version: base = kwargs.pop("base", "/apis") else: if "base" not in kwargs: raise TypeError("unknown API version; base kwarg must be specified.") base = kwargs.pop("base") bits = [base, version] # Overwrite (default) namespace from context if it was set if "namespace" in kwargs: n = kwargs.pop("namespace") if n is not None: if n: namespace = n else: namespace = self.config.namespace if namespace: bits.extend([ "namespaces", namespace, ]) url = kwargs.get("url", "") if url.startswith("/"): url = url[1:] bits.append(url) kwargs["url"] = self.url + posixpath.join(*bits) return kwargs
0.002349
def get_items(self, names): """ Subclass get items to get support for all methods in an given object """ env = self.state.document.settings.env prefixes = get_import_prefixes_from_env(env) methodNames = [] for name in names: methodNames.append(name) _, obj, _, _ = import_by_name(name, prefixes=prefixes) methodNames.extend(["%s.%s" % (name, method) for method in dir(obj) if not method.startswith("_")]) return super(AutosummaryMethodList, self).get_items(methodNames)
0.005199
def _detach_children(self): """Remove all children and give them independent parent copies.""" children = [val[0] for val in self._children.values()] for child in children: child()._parent = list(self) self._children.clear()
0.007463
def set_load_from(self, load_from): """Update load_from in Cache and backend.""" assert load_from is None or isinstance(load_from, Cache), \ "load_from needs to be None or a Cache object." assert load_from is None or load_from.cl_size <= self.cl_size, \ "cl_size may only increase towards main memory." self.load_from = load_from self.backend.load_from = load_from.backend
0.004587
def _default_request_kwargs(self): """The default request keyword arguments to be passed to the requests library.""" defaults = copy.deepcopy(super(Acls, self)._default_request_kwargs) defaults.setdefault('headers', {}).update({ 'X-Auth-Token': self._client.auth._token }) return defaults
0.008824
def delete_binding(self, vhost, exchange, queue, rt_key): """ Deletes a binding between an exchange and a queue on a given vhost. :param string vhost: vhost housing the exchange/queue to bind :param string exchange: the target exchange of the binding :param string queue: the queue to bind to the exchange :param string rt_key: the routing key to use for the binding """ vhost = quote(vhost, '') exchange = quote(exchange, '') queue = quote(queue, '') body = '' path = Client.urls['rt_bindings_between_exch_queue'] % (vhost, exchange, queue, rt_key) return self._call(path, 'DELETE', headers=Client.json_headers)
0.002198
def insert(cls, index, interceptor): """ Add interceptor to the given index in the internal list. Note: Raises ``ValueError`` if interceptor does not extend ``OpenTracingInterceptor`` """ cls._check(interceptor) cls._interceptors.insert(index, interceptor)
0.00627
def delete_firewall_rule(self, server_name, name): ''' Deletes an Azure SQL Database server firewall rule. server_name: Name of the server with the firewall rule you want to delete. name: Name of the firewall rule you want to delete. ''' _validate_not_none('server_name', server_name) _validate_not_none('name', name) return self._perform_delete( self._get_firewall_rules_path(server_name, name))
0.004024
def stream(self, limit=None, page_size=None): """ Streams KeyInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.key.KeyInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
0.009083
def DeregisterFormatter(cls, formatter_class): """Deregisters a formatter class. The formatter classes are identified based on their lower case data type. Args: formatter_class (type): class of the formatter. Raises: KeyError: if formatter class is not set for the corresponding data type. """ formatter_data_type = formatter_class.DATA_TYPE.lower() if formatter_data_type not in cls._formatter_classes: raise KeyError( 'Formatter class not set for data type: {0:s}.'.format( formatter_class.DATA_TYPE)) del cls._formatter_classes[formatter_data_type]
0.00318
def mapper_from_prior_arguments(self, arguments): """ Creates a new model mapper from a dictionary mapping_matrix existing priors to new priors. Parameters ---------- arguments: {Prior: Prior} A dictionary mapping_matrix priors to priors Returns ------- model_mapper: ModelMapper A new model mapper with updated priors. """ mapper = copy.deepcopy(self) for prior_model_tuple in self.prior_model_tuples: setattr(mapper, prior_model_tuple.name, prior_model_tuple.prior_model.gaussian_prior_model_for_arguments(arguments)) return mapper
0.005806
def interact(self, client, location, interaction_required_err): '''Implement Interactor.interact by obtaining obtaining a macaroon from the discharger, discharging it with the local private key using the discharged macaroon as a discharge token''' p = interaction_required_err.interaction_method('agent', InteractionInfo) if p.login_url is None or p.login_url == '': raise httpbakery.InteractionError( 'no login-url field found in agent interaction method') agent = self._find_agent(location) if not location.endswith('/'): location += '/' login_url = urljoin(location, p.login_url) resp = requests.get( login_url, params={ 'username': agent.username, 'public-key': str(self._auth_info.key.public_key)}, auth=client.auth()) if resp.status_code != 200: raise httpbakery.InteractionError( 'cannot acquire agent macaroon: {} {}'.format( resp.status_code, resp.text) ) m = resp.json().get('macaroon') if m is None: raise httpbakery.InteractionError('no macaroon in response') m = bakery.Macaroon.from_dict(m) ms = bakery.discharge_all(m, None, self._auth_info.key) b = bytearray() for m in ms: b.extend(utils.b64decode(m.serialize())) return httpbakery.DischargeToken(kind='agent', value=bytes(b))
0.001271
def _level_coords(self): """Return a mapping of all MultiIndex levels and their corresponding coordinate name. """ level_coords = OrderedDict() for name, index in self.indexes.items(): if isinstance(index, pd.MultiIndex): level_names = index.names (dim,) = self.variables[name].dims level_coords.update({lname: dim for lname in level_names}) return level_coords
0.004264
def get_unspents(self): """Fetches all available unspent transaction outputs. :rtype: ``list`` of :class:`~bitcash.network.meta.Unspent` """ self.unspents[:] = NetworkAPI.get_unspent(self.address) self.balance = sum(unspent.amount for unspent in self.unspents) return self.unspents
0.006061
def solve_filter(expr, vars): """Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value. """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_filter(): for lhs_value in repeated.getvalues(lhs_values): if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value: yield lhs_value return Result(repeated.lazy(lazy_filter), ())
0.002114