text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def to_html(self, **kwargs): """Render as html Args: None Returns: Str the html representation Raises: Errors are propagated We pass the kwargs on to the base class so an exception is raised if invalid keywords were passed. See: http://stackoverflow.com/questions/13124961/ how-to-pass-arguments-efficiently-kwargs-in-python """ super(LineBreak, self).__init__(**kwargs) return '<br%s/>\n' % self.html_attributes()
[ "def", "to_html", "(", "self", ",", "*", "*", "kwargs", ")", ":", "super", "(", "LineBreak", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "return", "'<br%s/>\\n'", "%", "self", ".", "html_attributes", "(", ")" ]
26.2
0.003683
def fetch( self, request: Union["HTTPRequest", str], **kwargs: Any ) -> "HTTPResponse": """Executes a request, returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` If an error occurs during the fetch, we raise an `HTTPError` unless the ``raise_error`` keyword argument is set to False. """ response = self._io_loop.run_sync( functools.partial(self._async_client.fetch, request, **kwargs) ) return response
[ "def", "fetch", "(", "self", ",", "request", ":", "Union", "[", "\"HTTPRequest\"", ",", "str", "]", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "\"HTTPResponse\"", ":", "response", "=", "self", ".", "_io_loop", ".", "run_sync", "(", "functools", ".", "partial", "(", "self", ".", "_async_client", ".", "fetch", ",", "request", ",", "*", "*", "kwargs", ")", ")", "return", "response" ]
40.75
0.004498
def upload_feature_value_file(self, mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, filename, index_col): '''Uploads feature values for the given :class:`MapobjectType <tmlib.models.mapobject.MapobjectType>` at the specified :class:`Site <tmlib.models.site.Site>`. Parameters ---------- mapobject_type_name: str type of the segmented objects plate_name: str name of the plate well_name: str name of the well well_pos_y: int y-position of the site relative to the well grid well_pos_x: int x-position of the site relative to the well grid tpoint: int zero-based time point index filename: str path to the file on disk index_col: str column name containing the object labels See also -------- :func:`tmserver.api.feature.add_feature_values` :class:`tmlib.models.feature.FeatureValues` ''' logger.info('upload feature value file "%s"', filename) if not filename.lower().endswith('csv'): raise IOError('Filename must have "csv" extension.') filename = os.path.expanduser(os.path.expandvars(filename)) data = pd.read_csv(filename, index_col=index_col) self._upload_feature_values( mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, data )
[ "def", "upload_feature_value_file", "(", "self", ",", "mapobject_type_name", ",", "plate_name", ",", "well_name", ",", "well_pos_y", ",", "well_pos_x", ",", "tpoint", ",", "filename", ",", "index_col", ")", ":", "logger", ".", "info", "(", "'upload feature value file \"%s\"'", ",", "filename", ")", "if", "not", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "'csv'", ")", ":", "raise", "IOError", "(", "'Filename must have \"csv\" extension.'", ")", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "expandvars", "(", "filename", ")", ")", "data", "=", "pd", ".", "read_csv", "(", "filename", ",", "index_col", "=", "index_col", ")", "self", ".", "_upload_feature_values", "(", "mapobject_type_name", ",", "plate_name", ",", "well_name", ",", "well_pos_y", ",", "well_pos_x", ",", "tpoint", ",", "data", ")" ]
38.102564
0.001969
def parse_http_scheme(uri): """ match on http scheme if no match is found will assume http """ regex = re.compile( r'^(?:http)s?://', flags=re.IGNORECASE ) match = regex.match(uri) return match.group(0) if match else 'http://'
[ "def", "parse_http_scheme", "(", "uri", ")", ":", "regex", "=", "re", ".", "compile", "(", "r'^(?:http)s?://'", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "match", "=", "regex", ".", "match", "(", "uri", ")", "return", "match", ".", "group", "(", "0", ")", "if", "match", "else", "'http://'" ]
23.727273
0.00369
def set_default_unit(self, twig=None, unit=None, **kwargs): """ TODO: add documentation """ if twig is not None and unit is None: # then try to support value as the first argument if no matches with twigs if isinstance(unit, u.Unit) or not isinstance(twig, str): unit = twig twig = None elif not len(self.filter(twig=twig, check_default=check_default, **kwargs)): unit = twig twig = None return self.get_parameter(twig=twig, **kwargs).set_default_unit(unit)
[ "def", "set_default_unit", "(", "self", ",", "twig", "=", "None", ",", "unit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "twig", "is", "not", "None", "and", "unit", "is", "None", ":", "# then try to support value as the first argument if no matches with twigs", "if", "isinstance", "(", "unit", ",", "u", ".", "Unit", ")", "or", "not", "isinstance", "(", "twig", ",", "str", ")", ":", "unit", "=", "twig", "twig", "=", "None", "elif", "not", "len", "(", "self", ".", "filter", "(", "twig", "=", "twig", ",", "check_default", "=", "check_default", ",", "*", "*", "kwargs", ")", ")", ":", "unit", "=", "twig", "twig", "=", "None", "return", "self", ".", "get_parameter", "(", "twig", "=", "twig", ",", "*", "*", "kwargs", ")", ".", "set_default_unit", "(", "unit", ")" ]
39
0.006678
def to_tex(self, text_size='large', table_width=5, clear_pages = False): """ Write the program information to a .tex file, which can be rendered to .pdf running pdflatex. The program can then be printed and brought to the gym. Parameters ---------- text_size The tex text size, e.g. '\small', 'normalsize', 'large', 'Large' or 'LARGE'. table_width The table with of the .tex code. Returns ------- string Program as tex. """ # If rendered, find the length of the longest '6 x 75kg'-type string max_ex_scheme = 0 if self._rendered: for (week, day, dynamic_ex) in self._yield_week_day_dynamic(): lengths = [len(s) for s in self._rendered[week][day][dynamic_ex]['strings']] max_ex_scheme = max(max_ex_scheme, max(lengths)) env = self.jinja2_environment template = env.get_template(self.TEMPLATE_NAMES['tex']) return template.render(program=self, text_size=text_size, table_width=table_width, clear_pages = clear_pages)
[ "def", "to_tex", "(", "self", ",", "text_size", "=", "'large'", ",", "table_width", "=", "5", ",", "clear_pages", "=", "False", ")", ":", "# If rendered, find the length of the longest '6 x 75kg'-type string", "max_ex_scheme", "=", "0", "if", "self", ".", "_rendered", ":", "for", "(", "week", ",", "day", ",", "dynamic_ex", ")", "in", "self", ".", "_yield_week_day_dynamic", "(", ")", ":", "lengths", "=", "[", "len", "(", "s", ")", "for", "s", "in", "self", ".", "_rendered", "[", "week", "]", "[", "day", "]", "[", "dynamic_ex", "]", "[", "'strings'", "]", "]", "max_ex_scheme", "=", "max", "(", "max_ex_scheme", ",", "max", "(", "lengths", ")", ")", "env", "=", "self", ".", "jinja2_environment", "template", "=", "env", ".", "get_template", "(", "self", ".", "TEMPLATE_NAMES", "[", "'tex'", "]", ")", "return", "template", ".", "render", "(", "program", "=", "self", ",", "text_size", "=", "text_size", ",", "table_width", "=", "table_width", ",", "clear_pages", "=", "clear_pages", ")" ]
33.742857
0.007407
def addDataModels(self, mods): ''' Adds a model definition (same format as input to Model.addDataModels and output of Model.getModelDef). ''' # Load all the universal properties for _, mdef in mods: for univname, _, _ in mdef.get('univs', ()): self.addUnivName(univname) # Load all the forms for _, mdef in mods: for formname, formopts, propdefs in mdef.get('forms', ()): self.formnames.add(formname) self.propnames.add(formname) for univname in self.univnames: full = f'{formname}{univname}' self.propnames.add(full) for propname, _, _ in propdefs: full = f'{formname}:{propname}' self.propnames.add(full)
[ "def", "addDataModels", "(", "self", ",", "mods", ")", ":", "# Load all the universal properties", "for", "_", ",", "mdef", "in", "mods", ":", "for", "univname", ",", "_", ",", "_", "in", "mdef", ".", "get", "(", "'univs'", ",", "(", ")", ")", ":", "self", ".", "addUnivName", "(", "univname", ")", "# Load all the forms", "for", "_", ",", "mdef", "in", "mods", ":", "for", "formname", ",", "formopts", ",", "propdefs", "in", "mdef", ".", "get", "(", "'forms'", ",", "(", ")", ")", ":", "self", ".", "formnames", ".", "add", "(", "formname", ")", "self", ".", "propnames", ".", "add", "(", "formname", ")", "for", "univname", "in", "self", ".", "univnames", ":", "full", "=", "f'{formname}{univname}'", "self", ".", "propnames", ".", "add", "(", "full", ")", "for", "propname", ",", "_", ",", "_", "in", "propdefs", ":", "full", "=", "f'{formname}:{propname}'", "self", ".", "propnames", ".", "add", "(", "full", ")" ]
36
0.003529
def find_ask(): """ Find our instance of Ask, navigating Local's and possible blueprints. Note: This only supports returning a reference to the first instance of Ask found. """ if hasattr(current_app, 'ask'): return getattr(current_app, 'ask') else: if hasattr(current_app, 'blueprints'): blueprints = getattr(current_app, 'blueprints') for blueprint_name in blueprints: if hasattr(blueprints[blueprint_name], 'ask'): return getattr(blueprints[blueprint_name], 'ask')
[ "def", "find_ask", "(", ")", ":", "if", "hasattr", "(", "current_app", ",", "'ask'", ")", ":", "return", "getattr", "(", "current_app", ",", "'ask'", ")", "else", ":", "if", "hasattr", "(", "current_app", ",", "'blueprints'", ")", ":", "blueprints", "=", "getattr", "(", "current_app", ",", "'blueprints'", ")", "for", "blueprint_name", "in", "blueprints", ":", "if", "hasattr", "(", "blueprints", "[", "blueprint_name", "]", ",", "'ask'", ")", ":", "return", "getattr", "(", "blueprints", "[", "blueprint_name", "]", ",", "'ask'", ")" ]
37.2
0.001748
def _classify_load_constant(self, regs_init, regs_fini, mem_fini, written_regs, read_regs): """Classify load-constant gadgets. """ matches = [] # Check for "dst_reg <- constant" pattern. for dst_reg, dst_val in regs_fini.items(): # Make sure the *dst* register was written. if dst_reg not in written_regs: continue # Check restrictions... if dst_val == regs_init[dst_reg]: continue dst_val_ir = ReilImmediateOperand(dst_val, self._arch_regs_size[dst_reg]) dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg]) matches.append({ "src": [dst_val_ir], "dst": [dst_reg_ir] }) return matches
[ "def", "_classify_load_constant", "(", "self", ",", "regs_init", ",", "regs_fini", ",", "mem_fini", ",", "written_regs", ",", "read_regs", ")", ":", "matches", "=", "[", "]", "# Check for \"dst_reg <- constant\" pattern.", "for", "dst_reg", ",", "dst_val", "in", "regs_fini", ".", "items", "(", ")", ":", "# Make sure the *dst* register was written.", "if", "dst_reg", "not", "in", "written_regs", ":", "continue", "# Check restrictions...", "if", "dst_val", "==", "regs_init", "[", "dst_reg", "]", ":", "continue", "dst_val_ir", "=", "ReilImmediateOperand", "(", "dst_val", ",", "self", ".", "_arch_regs_size", "[", "dst_reg", "]", ")", "dst_reg_ir", "=", "ReilRegisterOperand", "(", "dst_reg", ",", "self", ".", "_arch_regs_size", "[", "dst_reg", "]", ")", "matches", ".", "append", "(", "{", "\"src\"", ":", "[", "dst_val_ir", "]", ",", "\"dst\"", ":", "[", "dst_reg_ir", "]", "}", ")", "return", "matches" ]
33.083333
0.00612
def _read_json(self, path, name): """ Load a json into a dictionary from a file. :param path: path to file :param name: name of file :return: dict """ with open(os.path.join(path, name), 'r') as fil: output = json.load(fil) self.logger.info("Read contents of {}".format(name)) return output
[ "def", "_read_json", "(", "self", ",", "path", ",", "name", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", ",", "'r'", ")", "as", "fil", ":", "output", "=", "json", ".", "load", "(", "fil", ")", "self", ".", "logger", ".", "info", "(", "\"Read contents of {}\"", ".", "format", "(", "name", ")", ")", "return", "output" ]
31
0.005222
def spawn_containers(addrs, env_cls=Environment, env_params=None, mgr_cls=EnvManager, *args, **kwargs): """Spawn environments in a multiprocessing :class:`multiprocessing.Pool`. Arguments and keyword arguments are passed down to the created environments at initialization time if *env_params* is None. If *env_params* is not None, then it is assumed to contain individual initialization parameters for each environment in *addrs*. :param addrs: List of (HOST, PORT) addresses for the environments. :param env_cls: Callable for the environments. Must be a subclass of :py:class:`~creamas.core.environment.Environment`. :param env_params: Initialization parameters for the environments. :type env_params: Iterable of same length as *addrs* or None. :param mgr_cls: Callable for the managers. Must be a subclass of :py:class:`~creamas.mp.EnvManager`.s :returns: The created process pool and the *ApplyAsync* results for the spawned environments. """ pool = multiprocessing.Pool(len(addrs)) kwargs['env_cls'] = env_cls kwargs['mgr_cls'] = mgr_cls r = [] for i, addr in enumerate(addrs): if env_params is not None: k = env_params[i] k['env_cls'] = env_cls k['mgr_cls'] = mgr_cls # Copy kwargs so that we can apply different address to different # containers. else: k = kwargs.copy() k['addr'] = addr ret = pool.apply_async(spawn_container, args=args, kwds=k, error_callback=logger.warning) r.append(ret) return pool, r
[ "def", "spawn_containers", "(", "addrs", ",", "env_cls", "=", "Environment", ",", "env_params", "=", "None", ",", "mgr_cls", "=", "EnvManager", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pool", "=", "multiprocessing", ".", "Pool", "(", "len", "(", "addrs", ")", ")", "kwargs", "[", "'env_cls'", "]", "=", "env_cls", "kwargs", "[", "'mgr_cls'", "]", "=", "mgr_cls", "r", "=", "[", "]", "for", "i", ",", "addr", "in", "enumerate", "(", "addrs", ")", ":", "if", "env_params", "is", "not", "None", ":", "k", "=", "env_params", "[", "i", "]", "k", "[", "'env_cls'", "]", "=", "env_cls", "k", "[", "'mgr_cls'", "]", "=", "mgr_cls", "# Copy kwargs so that we can apply different address to different", "# containers.", "else", ":", "k", "=", "kwargs", ".", "copy", "(", ")", "k", "[", "'addr'", "]", "=", "addr", "ret", "=", "pool", ".", "apply_async", "(", "spawn_container", ",", "args", "=", "args", ",", "kwds", "=", "k", ",", "error_callback", "=", "logger", ".", "warning", ")", "r", ".", "append", "(", "ret", ")", "return", "pool", ",", "r" ]
36.229167
0.00056
def process_text(text, save_xml_name='trips_output.xml', save_xml_pretty=True, offline=False, service_endpoint='drum'): """Return a TripsProcessor by processing text. Parameters ---------- text : str The text to be processed. save_xml_name : Optional[str] The name of the file to save the returned TRIPS extraction knowledge base XML. Default: trips_output.xml save_xml_pretty : Optional[bool] If True, the saved XML is pretty-printed. Some third-party tools require non-pretty-printed XMLs which can be obtained by setting this to False. Default: True offline : Optional[bool] If True, offline reading is used with a local instance of DRUM, if available. Default: False service_endpoint : Optional[str] Selects the TRIPS/DRUM web service endpoint to use. Is a choice between "drum" (default) and "drum-dev", a nightly build. Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements. """ if not offline: html = client.send_query(text, service_endpoint) xml = client.get_xml(html) else: if offline_reading: try: dr = DrumReader() if dr is None: raise Exception('DrumReader could not be instantiated.') except BaseException as e: logger.error(e) logger.error('Make sure drum/bin/trips-drum is running in' ' a separate process') return None try: dr.read_text(text) dr.start() except SystemExit: pass xml = dr.extractions[0] else: logger.error('Offline reading with TRIPS/DRUM not available.') logger.error('Error message was: %s' % offline_err) msg = """ To install DRUM locally, follow instructions at https://github.com/wdebeaum/drum. Next, install the pykqml package either from pip or from https://github.com/bgyori/pykqml. Once installed, run drum/bin/trips-drum in a separate process. """ logger.error(msg) return None if save_xml_name: client.save_xml(xml, save_xml_name, save_xml_pretty) return process_xml(xml)
[ "def", "process_text", "(", "text", ",", "save_xml_name", "=", "'trips_output.xml'", ",", "save_xml_pretty", "=", "True", ",", "offline", "=", "False", ",", "service_endpoint", "=", "'drum'", ")", ":", "if", "not", "offline", ":", "html", "=", "client", ".", "send_query", "(", "text", ",", "service_endpoint", ")", "xml", "=", "client", ".", "get_xml", "(", "html", ")", "else", ":", "if", "offline_reading", ":", "try", ":", "dr", "=", "DrumReader", "(", ")", "if", "dr", "is", "None", ":", "raise", "Exception", "(", "'DrumReader could not be instantiated.'", ")", "except", "BaseException", "as", "e", ":", "logger", ".", "error", "(", "e", ")", "logger", ".", "error", "(", "'Make sure drum/bin/trips-drum is running in'", "' a separate process'", ")", "return", "None", "try", ":", "dr", ".", "read_text", "(", "text", ")", "dr", ".", "start", "(", ")", "except", "SystemExit", ":", "pass", "xml", "=", "dr", ".", "extractions", "[", "0", "]", "else", ":", "logger", ".", "error", "(", "'Offline reading with TRIPS/DRUM not available.'", ")", "logger", ".", "error", "(", "'Error message was: %s'", "%", "offline_err", ")", "msg", "=", "\"\"\"\n To install DRUM locally, follow instructions at\n https://github.com/wdebeaum/drum.\n Next, install the pykqml package either from pip or from\n https://github.com/bgyori/pykqml.\n Once installed, run drum/bin/trips-drum in a separate process.\n \"\"\"", "logger", ".", "error", "(", "msg", ")", "return", "None", "if", "save_xml_name", ":", "client", ".", "save_xml", "(", "xml", ",", "save_xml_name", ",", "save_xml_pretty", ")", "return", "process_xml", "(", "xml", ")" ]
38.444444
0.000805
def set_file(path, saltenv='base', **kwargs): ''' Set answers to debconf questions from a file. CLI Example: .. code-block:: bash salt '*' debconf.set_file salt://pathto/pkg.selections ''' if '__env__' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('__env__') path = __salt__['cp.cache_file'](path, saltenv) if path: _set_file(path) return True return False
[ "def", "set_file", "(", "path", ",", "saltenv", "=", "'base'", ",", "*", "*", "kwargs", ")", ":", "if", "'__env__'", "in", "kwargs", ":", "# \"env\" is not supported; Use \"saltenv\".", "kwargs", ".", "pop", "(", "'__env__'", ")", "path", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "path", ",", "saltenv", ")", "if", "path", ":", "_set_file", "(", "path", ")", "return", "True", "return", "False" ]
21.75
0.002203
def savepysyn(self,wave,flux,fname,units=None): """ Cannot ever use the .writefits() method, because the array is frequently just sampled at the synphot waveset; plus, writefits is smart and does things like tapering.""" if units is None: ytype='throughput' units=' ' else: ytype='flux' col1=pyfits.Column(name='wavelength',format='D',array=wave) col2=pyfits.Column(name=ytype,format='D',array=flux) tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs([col1,col2])) tbhdu.header.update('tunit1','angstrom') tbhdu.header.update('tunit2',units) tbhdu.writeto(fname.replace('.fits','_pysyn.fits'))
[ "def", "savepysyn", "(", "self", ",", "wave", ",", "flux", ",", "fname", ",", "units", "=", "None", ")", ":", "if", "units", "is", "None", ":", "ytype", "=", "'throughput'", "units", "=", "' '", "else", ":", "ytype", "=", "'flux'", "col1", "=", "pyfits", ".", "Column", "(", "name", "=", "'wavelength'", ",", "format", "=", "'D'", ",", "array", "=", "wave", ")", "col2", "=", "pyfits", ".", "Column", "(", "name", "=", "ytype", ",", "format", "=", "'D'", ",", "array", "=", "flux", ")", "tbhdu", "=", "pyfits", ".", "BinTableHDU", ".", "from_columns", "(", "pyfits", ".", "ColDefs", "(", "[", "col1", ",", "col2", "]", ")", ")", "tbhdu", ".", "header", ".", "update", "(", "'tunit1'", ",", "'angstrom'", ")", "tbhdu", ".", "header", ".", "update", "(", "'tunit2'", ",", "units", ")", "tbhdu", ".", "writeto", "(", "fname", ".", "replace", "(", "'.fits'", ",", "'_pysyn.fits'", ")", ")" ]
47
0.027816
def __global_logging_exception_handler(exc_type, exc_value, exc_traceback): ''' This function will log all un-handled python exceptions. ''' if exc_type.__name__ == "KeyboardInterrupt": # Do not log the exception or display the traceback on Keyboard Interrupt # Stop the logging queue listener thread if is_mp_logging_listener_configured(): shutdown_multiprocessing_logging_listener() else: # Log the exception logging.getLogger(__name__).error( 'An un-handled exception was caught by salt\'s global exception ' 'handler:\n%s: %s\n%s', exc_type.__name__, exc_value, ''.join(traceback.format_exception( exc_type, exc_value, exc_traceback )).strip() ) # Call the original sys.excepthook sys.__excepthook__(exc_type, exc_value, exc_traceback)
[ "def", "__global_logging_exception_handler", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", ":", "if", "exc_type", ".", "__name__", "==", "\"KeyboardInterrupt\"", ":", "# Do not log the exception or display the traceback on Keyboard Interrupt", "# Stop the logging queue listener thread", "if", "is_mp_logging_listener_configured", "(", ")", ":", "shutdown_multiprocessing_logging_listener", "(", ")", "else", ":", "# Log the exception", "logging", ".", "getLogger", "(", "__name__", ")", ".", "error", "(", "'An un-handled exception was caught by salt\\'s global exception '", "'handler:\\n%s: %s\\n%s'", ",", "exc_type", ".", "__name__", ",", "exc_value", ",", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", ")", ".", "strip", "(", ")", ")", "# Call the original sys.excepthook", "sys", ".", "__excepthook__", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")" ]
41
0.002167
def array_to_hdf5(a, parent, name, **kwargs): """Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close()
[ "def", "array_to_hdf5", "(", "a", ",", "parent", ",", "name", ",", "*", "*", "kwargs", ")", ":", "import", "h5py", "h5f", "=", "None", "if", "isinstance", "(", "parent", ",", "str", ")", ":", "h5f", "=", "h5py", ".", "File", "(", "parent", ",", "mode", "=", "'a'", ")", "parent", "=", "h5f", "try", ":", "kwargs", ".", "setdefault", "(", "'chunks'", ",", "True", ")", "# auto-chunking", "kwargs", ".", "setdefault", "(", "'dtype'", ",", "a", ".", "dtype", ")", "kwargs", ".", "setdefault", "(", "'compression'", ",", "'gzip'", ")", "h5d", "=", "parent", ".", "require_dataset", "(", "name", ",", "shape", "=", "a", ".", "shape", ",", "*", "*", "kwargs", ")", "h5d", "[", "...", "]", "=", "a", "return", "h5d", "finally", ":", "if", "h5f", "is", "not", "None", ":", "h5f", ".", "close", "(", ")" ]
22.682927
0.001031
def _get_metadata_for_region(region_code): """The metadata needed by this class is the same for all regions sharing the same country calling code. Therefore, we return the metadata for "main" region for this country calling code.""" country_calling_code = country_code_for_region(region_code) main_country = region_code_for_country_code(country_calling_code) # Set to a default instance of the metadata. This allows us to # function with an incorrect region code, even if formatting only # works for numbers specified with "+". return PhoneMetadata.metadata_for_region(main_country, _EMPTY_METADATA)
[ "def", "_get_metadata_for_region", "(", "region_code", ")", ":", "country_calling_code", "=", "country_code_for_region", "(", "region_code", ")", "main_country", "=", "region_code_for_country_code", "(", "country_calling_code", ")", "# Set to a default instance of the metadata. This allows us to", "# function with an incorrect region code, even if formatting only", "# works for numbers specified with \"+\".", "return", "PhoneMetadata", ".", "metadata_for_region", "(", "main_country", ",", "_EMPTY_METADATA", ")" ]
62.6
0.001575
def _fix_pooling(self, op_name, inputs, new_attr): """onnx pooling operator supports asymmetrical padding Adding pad operator before pooling in mxnet to work with onnx""" pool_type = 'avg' if op_name == 'AveragePool' else 'max' stride = new_attr.get('strides') kernel = new_attr.get('kernel_shape') padding = new_attr.get('pads') pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, len(kernel)) new_pad_op = mx.sym.pad(inputs[0], mode='constant', pad_width=pad_width) new_pooling_op = mx.sym.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel) return new_pooling_op
[ "def", "_fix_pooling", "(", "self", ",", "op_name", ",", "inputs", ",", "new_attr", ")", ":", "pool_type", "=", "'avg'", "if", "op_name", "==", "'AveragePool'", "else", "'max'", "stride", "=", "new_attr", ".", "get", "(", "'strides'", ")", "kernel", "=", "new_attr", ".", "get", "(", "'kernel_shape'", ")", "padding", "=", "new_attr", ".", "get", "(", "'pads'", ")", "pad_width", "=", "(", "0", ",", "0", ",", "0", ",", "0", ")", "+", "_pad_sequence_fix", "(", "padding", ",", "len", "(", "kernel", ")", ")", "new_pad_op", "=", "mx", ".", "sym", ".", "pad", "(", "inputs", "[", "0", "]", ",", "mode", "=", "'constant'", ",", "pad_width", "=", "pad_width", ")", "new_pooling_op", "=", "mx", ".", "sym", ".", "Pooling", "(", "new_pad_op", ",", "pool_type", "=", "pool_type", ",", "stride", "=", "stride", ",", "kernel", "=", "kernel", ")", "return", "new_pooling_op" ]
57.916667
0.004249
def get_archive_part_value(self, part): """Return archive part for today""" parts_dict = {'year': '%Y', 'month': self.month_format, 'week': self.week_format, 'day': '%d'} if self.today is None: today = timezone.now() if timezone.is_aware(today): today = timezone.localtime(today) self.today = today return self.today.strftime(parts_dict[part])
[ "def", "get_archive_part_value", "(", "self", ",", "part", ")", ":", "parts_dict", "=", "{", "'year'", ":", "'%Y'", ",", "'month'", ":", "self", ".", "month_format", ",", "'week'", ":", "self", ".", "week_format", ",", "'day'", ":", "'%d'", "}", "if", "self", ".", "today", "is", "None", ":", "today", "=", "timezone", ".", "now", "(", ")", "if", "timezone", ".", "is_aware", "(", "today", ")", ":", "today", "=", "timezone", ".", "localtime", "(", "today", ")", "self", ".", "today", "=", "today", "return", "self", ".", "today", ".", "strftime", "(", "parts_dict", "[", "part", "]", ")" ]
40.166667
0.004057
def _compute_hamming_matrix(N): """Compute and store a Hamming matrix for |N| nodes. Hamming matrices have the following sizes:: N MBs == === 9 2 10 8 11 32 12 128 13 512 Given these sizes and the fact that large matrices are needed infrequently, we store computed matrices using the Joblib filesystem cache instead of adding computed matrices to the ``_hamming_matrices`` global and clogging up memory. This function is only called when |N| > ``_NUM_PRECOMPUTED_HAMMING_MATRICES``. Don't call this function directly; use |_hamming_matrix| instead. """ possible_states = np.array(list(utils.all_states((N)))) return cdist(possible_states, possible_states, 'hamming') * N
[ "def", "_compute_hamming_matrix", "(", "N", ")", ":", "possible_states", "=", "np", ".", "array", "(", "list", "(", "utils", ".", "all_states", "(", "(", "N", ")", ")", ")", ")", "return", "cdist", "(", "possible_states", ",", "possible_states", ",", "'hamming'", ")", "*", "N" ]
31.833333
0.001271
def _set_link_fault_signaling(self, v, load=False): """ Setter method for link_fault_signaling, mapped from YANG variable /interface/ethernet/link_fault_signaling (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_fault_signaling is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_fault_signaling() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=link_fault_signaling.link_fault_signaling, is_container='container', presence=False, yang_name="link-fault-signaling", rest_name="link-fault-signaling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure a link-fault-signaling', u'callpoint': u'Lfs', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-lfs', defining_module='brocade-lfs', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """link_fault_signaling must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=link_fault_signaling.link_fault_signaling, is_container='container', presence=False, yang_name="link-fault-signaling", rest_name="link-fault-signaling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure a link-fault-signaling', u'callpoint': u'Lfs', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-lfs', defining_module='brocade-lfs', yang_type='container', is_config=True)""", }) self.__link_fault_signaling = t if hasattr(self, '_set'): self._set()
[ "def", "_set_link_fault_signaling", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "link_fault_signaling", ".", "link_fault_signaling", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"link-fault-signaling\"", ",", "rest_name", "=", "\"link-fault-signaling\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure a link-fault-signaling'", ",", "u'callpoint'", ":", "u'Lfs'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-lfs'", ",", "defining_module", "=", "'brocade-lfs'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"link_fault_signaling must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=link_fault_signaling.link_fault_signaling, is_container='container', presence=False, yang_name=\"link-fault-signaling\", rest_name=\"link-fault-signaling\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure a link-fault-signaling', u'callpoint': u'Lfs', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-lfs', defining_module='brocade-lfs', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__link_fault_signaling", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
92.318182
0.004873
def modelAdoptNextOrphan(self, jobId, maxUpdateInterval): """ Look through the models table for an orphaned model, which is a model that is not completed yet, whose _eng_last_update_time is more than maxUpdateInterval seconds ago. If one is found, change its _eng_worker_conn_id to the current worker's and return the model id. Parameters: ---------------------------------------------------------------- retval: modelId of the model we adopted, or None if none found """ @g_retrySQL def findCandidateModelWithRetries(): modelID = None with ConnectionFactory.get() as conn: # TODO: may need a table index on job_id/status for speed query = 'SELECT model_id FROM %s ' \ ' WHERE status=%%s ' \ ' AND job_id=%%s ' \ ' AND TIMESTAMPDIFF(SECOND, ' \ ' _eng_last_update_time, ' \ ' UTC_TIMESTAMP()) > %%s ' \ ' LIMIT 1 ' \ % (self.modelsTableName,) sqlParams = [self.STATUS_RUNNING, jobId, maxUpdateInterval] numRows = conn.cursor.execute(query, sqlParams) rows = conn.cursor.fetchall() assert numRows <= 1, "Unexpected numRows: %r" % numRows if numRows == 1: (modelID,) = rows[0] return modelID @g_retrySQL def adoptModelWithRetries(modelID): adopted = False with ConnectionFactory.get() as conn: query = 'UPDATE %s SET _eng_worker_conn_id=%%s, ' \ ' _eng_last_update_time=UTC_TIMESTAMP() ' \ ' WHERE model_id=%%s ' \ ' AND status=%%s' \ ' AND TIMESTAMPDIFF(SECOND, ' \ ' _eng_last_update_time, ' \ ' UTC_TIMESTAMP()) > %%s ' \ ' LIMIT 1 ' \ % (self.modelsTableName,) sqlParams = [self._connectionID, modelID, self.STATUS_RUNNING, maxUpdateInterval] numRowsAffected = conn.cursor.execute(query, sqlParams) assert numRowsAffected <= 1, 'Unexpected numRowsAffected=%r' % ( numRowsAffected,) if numRowsAffected == 1: adopted = True else: # Discern between transient failure during update and someone else # claiming this model (status, connectionID) = self._getOneMatchingRowNoRetries( self._models, conn, {'model_id':modelID}, ['status', '_eng_worker_conn_id']) adopted = (status == self.STATUS_RUNNING and connectionID == self._connectionID) return adopted adoptedModelID = None while True: modelID = findCandidateModelWithRetries() if modelID is None: break if adoptModelWithRetries(modelID): adoptedModelID = modelID break return adoptedModelID
[ "def", "modelAdoptNextOrphan", "(", "self", ",", "jobId", ",", "maxUpdateInterval", ")", ":", "@", "g_retrySQL", "def", "findCandidateModelWithRetries", "(", ")", ":", "modelID", "=", "None", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "# TODO: may need a table index on job_id/status for speed", "query", "=", "'SELECT model_id FROM %s '", "' WHERE status=%%s '", "' AND job_id=%%s '", "' AND TIMESTAMPDIFF(SECOND, '", "' _eng_last_update_time, '", "' UTC_TIMESTAMP()) > %%s '", "' LIMIT 1 '", "%", "(", "self", ".", "modelsTableName", ",", ")", "sqlParams", "=", "[", "self", ".", "STATUS_RUNNING", ",", "jobId", ",", "maxUpdateInterval", "]", "numRows", "=", "conn", ".", "cursor", ".", "execute", "(", "query", ",", "sqlParams", ")", "rows", "=", "conn", ".", "cursor", ".", "fetchall", "(", ")", "assert", "numRows", "<=", "1", ",", "\"Unexpected numRows: %r\"", "%", "numRows", "if", "numRows", "==", "1", ":", "(", "modelID", ",", ")", "=", "rows", "[", "0", "]", "return", "modelID", "@", "g_retrySQL", "def", "adoptModelWithRetries", "(", "modelID", ")", ":", "adopted", "=", "False", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "query", "=", "'UPDATE %s SET _eng_worker_conn_id=%%s, '", "' _eng_last_update_time=UTC_TIMESTAMP() '", "' WHERE model_id=%%s '", "' AND status=%%s'", "' AND TIMESTAMPDIFF(SECOND, '", "' _eng_last_update_time, '", "' UTC_TIMESTAMP()) > %%s '", "' LIMIT 1 '", "%", "(", "self", ".", "modelsTableName", ",", ")", "sqlParams", "=", "[", "self", ".", "_connectionID", ",", "modelID", ",", "self", ".", "STATUS_RUNNING", ",", "maxUpdateInterval", "]", "numRowsAffected", "=", "conn", ".", "cursor", ".", "execute", "(", "query", ",", "sqlParams", ")", "assert", "numRowsAffected", "<=", "1", ",", "'Unexpected numRowsAffected=%r'", "%", "(", "numRowsAffected", ",", ")", "if", "numRowsAffected", "==", "1", ":", "adopted", "=", "True", "else", ":", "# Discern between transient failure during update and someone else", "# claiming this model", "(", "status", ",", "connectionID", ")", "=", "self", ".", "_getOneMatchingRowNoRetries", "(", "self", ".", "_models", ",", "conn", ",", "{", "'model_id'", ":", "modelID", "}", ",", "[", "'status'", ",", "'_eng_worker_conn_id'", "]", ")", "adopted", "=", "(", "status", "==", "self", ".", "STATUS_RUNNING", "and", "connectionID", "==", "self", ".", "_connectionID", ")", "return", "adopted", "adoptedModelID", "=", "None", "while", "True", ":", "modelID", "=", "findCandidateModelWithRetries", "(", ")", "if", "modelID", "is", "None", ":", "break", "if", "adoptModelWithRetries", "(", "modelID", ")", ":", "adoptedModelID", "=", "modelID", "break", "return", "adoptedModelID" ]
37.949367
0.006177
def update(self, client=None): """API call: update the project via a ``PUT`` request. See https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/update :type client: :class:`google.cloud.resource_manager.client.Client` or :data:`NoneType <types.NoneType>` :param client: the client to use. If not passed, falls back to the client stored on the current project. """ client = self._require_client(client) data = {"name": self.name, "labels": self.labels, "parent": self.parent} resp = client._connection.api_request(method="PUT", path=self.path, data=data) self.set_properties_from_api_repr(resp)
[ "def", "update", "(", "self", ",", "client", "=", "None", ")", ":", "client", "=", "self", ".", "_require_client", "(", "client", ")", "data", "=", "{", "\"name\"", ":", "self", ".", "name", ",", "\"labels\"", ":", "self", ".", "labels", ",", "\"parent\"", ":", "self", ".", "parent", "}", "resp", "=", "client", ".", "_connection", ".", "api_request", "(", "method", "=", "\"PUT\"", ",", "path", "=", "self", ".", "path", ",", "data", "=", "data", ")", "self", ".", "set_properties_from_api_repr", "(", "resp", ")" ]
42.823529
0.005376
def unlock(self, time=3): """ unlock the door\n thanks to https://github.com/SoftwareHouseMerida/pyzk/ :param time: define delay in seconds :return: bool """ command = const.CMD_UNLOCK command_string = pack("I",int(time)*10) cmd_response = self.__send_command(command, command_string) if cmd_response.get('status'): return True else: raise ZKErrorResponse("Can't open door")
[ "def", "unlock", "(", "self", ",", "time", "=", "3", ")", ":", "command", "=", "const", ".", "CMD_UNLOCK", "command_string", "=", "pack", "(", "\"I\"", ",", "int", "(", "time", ")", "*", "10", ")", "cmd_response", "=", "self", ".", "__send_command", "(", "command", ",", "command_string", ")", "if", "cmd_response", ".", "get", "(", "'status'", ")", ":", "return", "True", "else", ":", "raise", "ZKErrorResponse", "(", "\"Can't open door\"", ")" ]
31.533333
0.00616
def rename_key(d: Dict[str, Any], old: str, new: str) -> None: """ Rename a key in dictionary ``d`` from ``old`` to ``new``, in place. """ d[new] = d.pop(old)
[ "def", "rename_key", "(", "d", ":", "Dict", "[", "str", ",", "Any", "]", ",", "old", ":", "str", ",", "new", ":", "str", ")", "->", "None", ":", "d", "[", "new", "]", "=", "d", ".", "pop", "(", "old", ")" ]
34
0.005747
def config(host, seq, option, value): """Set configuration parameters of the drone.""" at(host, 'CONFIG', seq, [str(option), str(value)])
[ "def", "config", "(", "host", ",", "seq", ",", "option", ",", "value", ")", ":", "at", "(", "host", ",", "'CONFIG'", ",", "seq", ",", "[", "str", "(", "option", ")", ",", "str", "(", "value", ")", "]", ")" ]
47.666667
0.006897
def _trj_backup_trajectory(self, traj, backup_filename=None): """Backs up a trajectory. :param traj: Trajectory that should be backed up :param backup_filename: Path and filename of backup file. If None is specified the storage service defaults to `path_to_trajectory_hdf5_file/backup_trajectory_name.hdf`. """ self._logger.info('Storing backup of %s.' % traj.v_name) mypath, _ = os.path.split(self._filename) if backup_filename is None: backup_filename = os.path.join('%s' % mypath, 'backup_%s.hdf5' % traj.v_name) backup_hdf5file = pt.open_file(filename=backup_filename, mode='a', title=backup_filename) if '/' + self._trajectory_name in backup_hdf5file: raise ValueError('I cannot backup `%s` into file `%s`, there is already a ' 'trajectory with that name.' % (traj.v_name, backup_filename)) backup_root = backup_hdf5file.root self._trajectory_group._f_copy(newparent=backup_root, recursive=True) backup_hdf5file.flush() backup_hdf5file.close() self._logger.info('Finished backup of %s.' % traj.v_name)
[ "def", "_trj_backup_trajectory", "(", "self", ",", "traj", ",", "backup_filename", "=", "None", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Storing backup of %s.'", "%", "traj", ".", "v_name", ")", "mypath", ",", "_", "=", "os", ".", "path", ".", "split", "(", "self", ".", "_filename", ")", "if", "backup_filename", "is", "None", ":", "backup_filename", "=", "os", ".", "path", ".", "join", "(", "'%s'", "%", "mypath", ",", "'backup_%s.hdf5'", "%", "traj", ".", "v_name", ")", "backup_hdf5file", "=", "pt", ".", "open_file", "(", "filename", "=", "backup_filename", ",", "mode", "=", "'a'", ",", "title", "=", "backup_filename", ")", "if", "'/'", "+", "self", ".", "_trajectory_name", "in", "backup_hdf5file", ":", "raise", "ValueError", "(", "'I cannot backup `%s` into file `%s`, there is already a '", "'trajectory with that name.'", "%", "(", "traj", ".", "v_name", ",", "backup_filename", ")", ")", "backup_root", "=", "backup_hdf5file", ".", "root", "self", ".", "_trajectory_group", ".", "_f_copy", "(", "newparent", "=", "backup_root", ",", "recursive", "=", "True", ")", "backup_hdf5file", ".", "flush", "(", ")", "backup_hdf5file", ".", "close", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Finished backup of %s.'", "%", "traj", ".", "v_name", ")" ]
37
0.006385
def _lerp(x, x0, x1, y0, y1): """Affinely map from [x0, x1] onto [y0, y1].""" return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)
[ "def", "_lerp", "(", "x", ",", "x0", ",", "x1", ",", "y0", ",", "y1", ")", ":", "return", "y0", "+", "(", "x", "-", "x0", ")", "*", "float", "(", "y1", "-", "y0", ")", "/", "(", "x1", "-", "x0", ")" ]
43
0.022901
def _validate_compose_list(destination_file, file_list, files_metadata=None, number_of_files=32): """Validates the file_list and merges the file_list, files_metadata. Args: destination: Path to the file (ie. /destination_bucket/destination_file). file_list: List of files to compose, see compose for details. files_metadata: Meta details for each file in the file_list. number_of_files: Maximum number of files allowed in the list. Returns: A tuple (list_of_files, bucket): list_of_files: Ready to use dict version of the list. bucket: bucket name extracted from the file paths. """ common.validate_file_path(destination_file) bucket = destination_file[0:(destination_file.index('/', 1) + 1)] try: if isinstance(file_list, types.StringTypes): raise TypeError list_len = len(file_list) except TypeError: raise TypeError('file_list must be a list') if list_len > number_of_files: raise ValueError( 'Compose attempted to create composite with too many' '(%i) components; limit is (%i).' % (list_len, number_of_files)) if list_len <= 0: raise ValueError('Compose operation requires at' ' least one component; 0 provided.') if files_metadata is None: files_metadata = [] elif len(files_metadata) > list_len: raise ValueError('files_metadata contains more entries(%i)' ' than file_list(%i)' % (len(files_metadata), list_len)) list_of_files = [] for source_file, meta_data in itertools.izip_longest(file_list, files_metadata): if not isinstance(source_file, str): raise TypeError('Each item of file_list must be a string') if source_file.startswith('/'): logging.warn('Detected a "/" at the start of the file, ' 'Unless the file name contains a "/" it ' ' may cause files to be misread') if source_file.startswith(bucket): logging.warn('Detected bucket name at the start of the file, ' 'must not specify the bucket when listing file_names.' ' May cause files to be misread') common.validate_file_path(bucket + source_file) list_entry = {} if meta_data is not None: list_entry.update(meta_data) list_entry['Name'] = source_file list_of_files.append(list_entry) return list_of_files, bucket
[ "def", "_validate_compose_list", "(", "destination_file", ",", "file_list", ",", "files_metadata", "=", "None", ",", "number_of_files", "=", "32", ")", ":", "common", ".", "validate_file_path", "(", "destination_file", ")", "bucket", "=", "destination_file", "[", "0", ":", "(", "destination_file", ".", "index", "(", "'/'", ",", "1", ")", "+", "1", ")", "]", "try", ":", "if", "isinstance", "(", "file_list", ",", "types", ".", "StringTypes", ")", ":", "raise", "TypeError", "list_len", "=", "len", "(", "file_list", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "'file_list must be a list'", ")", "if", "list_len", ">", "number_of_files", ":", "raise", "ValueError", "(", "'Compose attempted to create composite with too many'", "'(%i) components; limit is (%i).'", "%", "(", "list_len", ",", "number_of_files", ")", ")", "if", "list_len", "<=", "0", ":", "raise", "ValueError", "(", "'Compose operation requires at'", "' least one component; 0 provided.'", ")", "if", "files_metadata", "is", "None", ":", "files_metadata", "=", "[", "]", "elif", "len", "(", "files_metadata", ")", ">", "list_len", ":", "raise", "ValueError", "(", "'files_metadata contains more entries(%i)'", "' than file_list(%i)'", "%", "(", "len", "(", "files_metadata", ")", ",", "list_len", ")", ")", "list_of_files", "=", "[", "]", "for", "source_file", ",", "meta_data", "in", "itertools", ".", "izip_longest", "(", "file_list", ",", "files_metadata", ")", ":", "if", "not", "isinstance", "(", "source_file", ",", "str", ")", ":", "raise", "TypeError", "(", "'Each item of file_list must be a string'", ")", "if", "source_file", ".", "startswith", "(", "'/'", ")", ":", "logging", ".", "warn", "(", "'Detected a \"/\" at the start of the file, '", "'Unless the file name contains a \"/\" it '", "' may cause files to be misread'", ")", "if", "source_file", ".", "startswith", "(", "bucket", ")", ":", "logging", ".", "warn", "(", "'Detected bucket name at the start of the file, '", "'must not specify the bucket when listing file_names.'", "' May cause files to be misread'", ")", "common", ".", "validate_file_path", "(", "bucket", "+", "source_file", ")", "list_entry", "=", "{", "}", "if", "meta_data", "is", "not", "None", ":", "list_entry", ".", "update", "(", "meta_data", ")", "list_entry", "[", "'Name'", "]", "=", "source_file", "list_of_files", ".", "append", "(", "list_entry", ")", "return", "list_of_files", ",", "bucket" ]
39.704918
0.007655
def pretty_print(source, dest): """ Pretty print the XML file """ parser = etree.XMLParser(remove_blank_text=True) if not isinstance(source, str): source = str(source) tree = etree.parse(source, parser) docinfo = tree.docinfo with open(dest, 'wb') as fp: fp.write(etree.tostring(tree, pretty_print=True, encoding=docinfo.encoding, standalone=docinfo.standalone))
[ "def", "pretty_print", "(", "source", ",", "dest", ")", ":", "parser", "=", "etree", ".", "XMLParser", "(", "remove_blank_text", "=", "True", ")", "if", "not", "isinstance", "(", "source", ",", "str", ")", ":", "source", "=", "str", "(", "source", ")", "tree", "=", "etree", ".", "parse", "(", "source", ",", "parser", ")", "docinfo", "=", "tree", ".", "docinfo", "with", "open", "(", "dest", ",", "'wb'", ")", "as", "fp", ":", "fp", ".", "write", "(", "etree", ".", "tostring", "(", "tree", ",", "pretty_print", "=", "True", ",", "encoding", "=", "docinfo", ".", "encoding", ",", "standalone", "=", "docinfo", ".", "standalone", ")", ")" ]
36.833333
0.004415
def delete(self, uuid): # type: (UUID) -> None """Delete file with given uuid. :param:uuid: :class:`UUID` instance :raises:KeyError if file does not exists """ dest = self.abs_path(uuid) if not dest.exists(): raise KeyError("No file can be found for this uuid", uuid) dest.unlink()
[ "def", "delete", "(", "self", ",", "uuid", ")", ":", "# type: (UUID) -> None", "dest", "=", "self", ".", "abs_path", "(", "uuid", ")", "if", "not", "dest", ".", "exists", "(", ")", ":", "raise", "KeyError", "(", "\"No file can be found for this uuid\"", ",", "uuid", ")", "dest", ".", "unlink", "(", ")" ]
29
0.008357
def delete_repository_from_recycle_bin(self, project, repository_id): """DeleteRepositoryFromRecycleBin. [Preview API] Destroy (hard delete) a soft-deleted Git repository. :param str project: Project ID or project name :param str repository_id: The ID of the repository. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') self._send(http_method='DELETE', location_id='a663da97-81db-4eb3-8b83-287670f63073', version='5.1-preview.1', route_values=route_values)
[ "def", "delete_repository_from_recycle_bin", "(", "self", ",", "project", ",", "repository_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "repository_id", "is", "not", "None", ":", "route_values", "[", "'repositoryId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'repository_id'", ",", "repository_id", ",", "'str'", ")", "self", ".", "_send", "(", "http_method", "=", "'DELETE'", ",", "location_id", "=", "'a663da97-81db-4eb3-8b83-287670f63073'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ")" ]
52.333333
0.005006
def _check_connectivity(self, err): ''' a method to check connectivity as source of error ''' try: import requests requests.get(self.uptime_ssl) except: from requests import Request request_object = Request(method='GET', url=self.uptime_ssl) request_details = self.handle_requests(request_object) self.printer('ERROR.') raise ConnectionError(request_details['error']) self.printer('ERROR.') raise err
[ "def", "_check_connectivity", "(", "self", ",", "err", ")", ":", "try", ":", "import", "requests", "requests", ".", "get", "(", "self", ".", "uptime_ssl", ")", "except", ":", "from", "requests", "import", "Request", "request_object", "=", "Request", "(", "method", "=", "'GET'", ",", "url", "=", "self", ".", "uptime_ssl", ")", "request_details", "=", "self", ".", "handle_requests", "(", "request_object", ")", "self", ".", "printer", "(", "'ERROR.'", ")", "raise", "ConnectionError", "(", "request_details", "[", "'error'", "]", ")", "self", ".", "printer", "(", "'ERROR.'", ")", "raise", "err" ]
35.6
0.007299
def jstimestamp(dte): '''Convert a date or datetime object into a javsacript timestamp.''' days = date(dte.year, dte.month, 1).toordinal() - _EPOCH_ORD + dte.day - 1 hours = days*24 if isinstance(dte,datetime): hours += dte.hour minutes = hours*60 + dte.minute seconds = minutes*60 + dte.second return 1000*seconds + int(0.001*dte.microsecond) else: return 3600000*hours
[ "def", "jstimestamp", "(", "dte", ")", ":", "days", "=", "date", "(", "dte", ".", "year", ",", "dte", ".", "month", ",", "1", ")", ".", "toordinal", "(", ")", "-", "_EPOCH_ORD", "+", "dte", ".", "day", "-", "1", "hours", "=", "days", "*", "24", "if", "isinstance", "(", "dte", ",", "datetime", ")", ":", "hours", "+=", "dte", ".", "hour", "minutes", "=", "hours", "*", "60", "+", "dte", ".", "minute", "seconds", "=", "minutes", "*", "60", "+", "dte", ".", "second", "return", "1000", "*", "seconds", "+", "int", "(", "0.001", "*", "dte", ".", "microsecond", ")", "else", ":", "return", "3600000", "*", "hours" ]
36.25
0.006726
def create_group(self, group): """ Creates a new group and set group privileges. :param group: The group object to be created. :type group: ``dict`` """ data = json.dumps(self._create_group_dict(group)) response = self._perform_request( url='/um/groups', method='POST', data=data) return response
[ "def", "create_group", "(", "self", ",", "group", ")", ":", "data", "=", "json", ".", "dumps", "(", "self", ".", "_create_group_dict", "(", "group", ")", ")", "response", "=", "self", ".", "_perform_request", "(", "url", "=", "'/um/groups'", ",", "method", "=", "'POST'", ",", "data", "=", "data", ")", "return", "response" ]
24.75
0.004866
def get_link(href, value=None, **kwargs): """ Returns a well-formed link. If href is None/empty, returns an empty string :param href: value to be set for attribute href :param value: the text to be displayed. If None, the href itself is used :param kwargs: additional attributes and values :return: a well-formed html anchor """ if not href: return "" anchor_value = value and value or href attr = render_html_attributes(**kwargs) return '<a href="{}" {}>{}</a>'.format(href, attr, anchor_value)
[ "def", "get_link", "(", "href", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "href", ":", "return", "\"\"", "anchor_value", "=", "value", "and", "value", "or", "href", "attr", "=", "render_html_attributes", "(", "*", "*", "kwargs", ")", "return", "'<a href=\"{}\" {}>{}</a>'", ".", "format", "(", "href", ",", "attr", ",", "anchor_value", ")" ]
41.153846
0.001828
def _get_tokens_for_line_func(self, cli, document): """ Create a function that returns the tokens for a given line. """ # Cache using `document.text`. def get_tokens_for_line(): return self.lexer.lex_document(cli, document) return self._token_cache.get(document.text, get_tokens_for_line)
[ "def", "_get_tokens_for_line_func", "(", "self", ",", "cli", ",", "document", ")", ":", "# Cache using `document.text`.", "def", "get_tokens_for_line", "(", ")", ":", "return", "self", ".", "lexer", ".", "lex_document", "(", "cli", ",", "document", ")", "return", "self", ".", "_token_cache", ".", "get", "(", "document", ".", "text", ",", "get_tokens_for_line", ")" ]
37.888889
0.005731
def latex(self): """Return LaTeX representation of the abstract.""" s = ('{authors}, \\textit{{{title}}}, {journal}, {volissue}, ' '{pages}, ({date}). {doi}, {scopus_url}.') if len(self.authors) > 1: authors = ', '.join([str(a.given_name) + ' ' + str(a.surname) for a in self.authors[0:-1]]) authors += (' and ' + str(self.authors[-1].given_name) + ' ' + str(self.authors[-1].surname)) else: a = self.authors[0] authors = str(a.given_name) + ' ' + str(a.surname) title = self.title journal = self.publicationName volume = self.volume issue = self.issueIdentifier if volume and issue: volissue = '\\textbf{{{0}({1})}}'.format(volume, issue) elif volume: volissue = '\\textbf{{0}}'.format(volume) else: volissue = 'no volume' date = self.coverDate if self.pageRange: pages = 'p. {0}'.format(self.pageRange) elif self.startingPage: pages = 'p. {self.startingPage}'.format(self) elif self.article_number: pages = 'Art. No. {self.article_number}, '.format(self) else: pages = '(no pages found)' doi = '\\href{{https://doi.org/{0}}}{{doi:{0}}}'.format(self.doi) scopus_url = '\\href{{{0}}}{{scopus:{1}}}'.format(self.scopus_url, self.eid) return s.format(**locals())
[ "def", "latex", "(", "self", ")", ":", "s", "=", "(", "'{authors}, \\\\textit{{{title}}}, {journal}, {volissue}, '", "'{pages}, ({date}). {doi}, {scopus_url}.'", ")", "if", "len", "(", "self", ".", "authors", ")", ">", "1", ":", "authors", "=", "', '", ".", "join", "(", "[", "str", "(", "a", ".", "given_name", ")", "+", "' '", "+", "str", "(", "a", ".", "surname", ")", "for", "a", "in", "self", ".", "authors", "[", "0", ":", "-", "1", "]", "]", ")", "authors", "+=", "(", "' and '", "+", "str", "(", "self", ".", "authors", "[", "-", "1", "]", ".", "given_name", ")", "+", "' '", "+", "str", "(", "self", ".", "authors", "[", "-", "1", "]", ".", "surname", ")", ")", "else", ":", "a", "=", "self", ".", "authors", "[", "0", "]", "authors", "=", "str", "(", "a", ".", "given_name", ")", "+", "' '", "+", "str", "(", "a", ".", "surname", ")", "title", "=", "self", ".", "title", "journal", "=", "self", ".", "publicationName", "volume", "=", "self", ".", "volume", "issue", "=", "self", ".", "issueIdentifier", "if", "volume", "and", "issue", ":", "volissue", "=", "'\\\\textbf{{{0}({1})}}'", ".", "format", "(", "volume", ",", "issue", ")", "elif", "volume", ":", "volissue", "=", "'\\\\textbf{{0}}'", ".", "format", "(", "volume", ")", "else", ":", "volissue", "=", "'no volume'", "date", "=", "self", ".", "coverDate", "if", "self", ".", "pageRange", ":", "pages", "=", "'p. {0}'", ".", "format", "(", "self", ".", "pageRange", ")", "elif", "self", ".", "startingPage", ":", "pages", "=", "'p. {self.startingPage}'", ".", "format", "(", "self", ")", "elif", "self", ".", "article_number", ":", "pages", "=", "'Art. No. {self.article_number}, '", ".", "format", "(", "self", ")", "else", ":", "pages", "=", "'(no pages found)'", "doi", "=", "'\\\\href{{https://doi.org/{0}}}{{doi:{0}}}'", ".", "format", "(", "self", ".", "doi", ")", "scopus_url", "=", "'\\\\href{{{0}}}{{scopus:{1}}}'", ".", "format", "(", "self", ".", "scopus_url", ",", "self", ".", "eid", ")", "return", "s", ".", "format", "(", "*", "*", "locals", "(", ")", ")" ]
41.921053
0.001227
def total_seconds(offset): """Backport of offset.total_seconds() from python 2.7+.""" seconds = offset.days * 24 * 60 * 60 + offset.seconds microseconds = seconds * 10**6 + offset.microseconds return microseconds / (10**6 * 1.0)
[ "def", "total_seconds", "(", "offset", ")", ":", "seconds", "=", "offset", ".", "days", "*", "24", "*", "60", "*", "60", "+", "offset", ".", "seconds", "microseconds", "=", "seconds", "*", "10", "**", "6", "+", "offset", ".", "microseconds", "return", "microseconds", "/", "(", "10", "**", "6", "*", "1.0", ")" ]
48
0.004098
def set_xlabels(self, label=None, **kwargs): """Label the x axis on the bottom row of the grid.""" if label is None: label = label_from_attrs(self.data[self._x_var]) for ax in self._bottom_axes: ax.set_xlabel(label, **kwargs) return self
[ "def", "set_xlabels", "(", "self", ",", "label", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "label", "is", "None", ":", "label", "=", "label_from_attrs", "(", "self", ".", "data", "[", "self", ".", "_x_var", "]", ")", "for", "ax", "in", "self", ".", "_bottom_axes", ":", "ax", ".", "set_xlabel", "(", "label", ",", "*", "*", "kwargs", ")", "return", "self" ]
41
0.006826
def get(self, request, bot_id, id, format=None): """ Get list of source state of a handler --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated """ return super(SourceStateList, self).get(request, bot_id, id, format)
[ "def", "get", "(", "self", ",", "request", ",", "bot_id", ",", "id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "SourceStateList", ",", "self", ")", ".", "get", "(", "request", ",", "bot_id", ",", "id", ",", "format", ")" ]
32.5
0.005988
def handleException(exception): """ Handles an exception that occurs somewhere in the process of handling a request. """ serverException = exception if not isinstance(exception, exceptions.BaseServerException): with app.test_request_context(): app.log_exception(exception) serverException = exceptions.getServerError(exception) error = serverException.toProtocolElement() # If the exception is being viewed by a web browser, we can render a nicer # view. if flask.request and 'Accept' in flask.request.headers and \ flask.request.headers['Accept'].find('text/html') != -1: message = "<h1>Error {}</h1><pre>{}</pre>".format( serverException.httpStatus, protocol.toJson(error)) if serverException.httpStatus == 401 \ or serverException.httpStatus == 403: message += "Please try <a href=\"/login\">logging in</a>." return message else: responseStr = protocol.toJson(error) return getFlaskResponse(responseStr, serverException.httpStatus)
[ "def", "handleException", "(", "exception", ")", ":", "serverException", "=", "exception", "if", "not", "isinstance", "(", "exception", ",", "exceptions", ".", "BaseServerException", ")", ":", "with", "app", ".", "test_request_context", "(", ")", ":", "app", ".", "log_exception", "(", "exception", ")", "serverException", "=", "exceptions", ".", "getServerError", "(", "exception", ")", "error", "=", "serverException", ".", "toProtocolElement", "(", ")", "# If the exception is being viewed by a web browser, we can render a nicer", "# view.", "if", "flask", ".", "request", "and", "'Accept'", "in", "flask", ".", "request", ".", "headers", "and", "flask", ".", "request", ".", "headers", "[", "'Accept'", "]", ".", "find", "(", "'text/html'", ")", "!=", "-", "1", ":", "message", "=", "\"<h1>Error {}</h1><pre>{}</pre>\"", ".", "format", "(", "serverException", ".", "httpStatus", ",", "protocol", ".", "toJson", "(", "error", ")", ")", "if", "serverException", ".", "httpStatus", "==", "401", "or", "serverException", ".", "httpStatus", "==", "403", ":", "message", "+=", "\"Please try <a href=\\\"/login\\\">logging in</a>.\"", "return", "message", "else", ":", "responseStr", "=", "protocol", ".", "toJson", "(", "error", ")", "return", "getFlaskResponse", "(", "responseStr", ",", "serverException", ".", "httpStatus", ")" ]
44.08
0.000888
def simxReadProximitySensor(clientID, sensorHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' detectionState = ct.c_ubyte() detectedObjectHandle = ct.c_int() detectedPoint = (ct.c_float*3)() detectedSurfaceNormalVector = (ct.c_float*3)() ret = c_ReadProximitySensor(clientID, sensorHandle, ct.byref(detectionState), detectedPoint, ct.byref(detectedObjectHandle), detectedSurfaceNormalVector, operationMode) arr1 = [] for i in range(3): arr1.append(detectedPoint[i]) arr2 = [] for i in range(3): arr2.append(detectedSurfaceNormalVector[i]) return ret, bool(detectionState.value!=0), arr1, detectedObjectHandle.value, arr2
[ "def", "simxReadProximitySensor", "(", "clientID", ",", "sensorHandle", ",", "operationMode", ")", ":", "detectionState", "=", "ct", ".", "c_ubyte", "(", ")", "detectedObjectHandle", "=", "ct", ".", "c_int", "(", ")", "detectedPoint", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", ")", "detectedSurfaceNormalVector", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", ")", "ret", "=", "c_ReadProximitySensor", "(", "clientID", ",", "sensorHandle", ",", "ct", ".", "byref", "(", "detectionState", ")", ",", "detectedPoint", ",", "ct", ".", "byref", "(", "detectedObjectHandle", ")", ",", "detectedSurfaceNormalVector", ",", "operationMode", ")", "arr1", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "arr1", ".", "append", "(", "detectedPoint", "[", "i", "]", ")", "arr2", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "arr2", ".", "append", "(", "detectedSurfaceNormalVector", "[", "i", "]", ")", "return", "ret", ",", "bool", "(", "detectionState", ".", "value", "!=", "0", ")", ",", "arr1", ",", "detectedObjectHandle", ".", "value", ",", "arr2" ]
43.647059
0.007916
def replace_grist (features, new_grist): """ Replaces the grist of a string by a new one. Returns the string with the new grist. """ assert is_iterable_typed(features, basestring) or isinstance(features, basestring) assert isinstance(new_grist, basestring) # this function is used a lot in the build phase and the original implementation # was extremely slow; thus some of the weird-looking optimizations for this function. single_item = False if isinstance(features, str): features = [features] single_item = True result = [] for feature in features: # '<feature>value' -> ('<feature', '>', 'value') # 'something' -> ('something', '', '') # '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value') grist, split, value = feature.partition('>') # if a partition didn't occur, then grist is just 'something' # set the value to be the grist if not value and not split: value = grist result.append(new_grist + value) if single_item: return result[0] return result
[ "def", "replace_grist", "(", "features", ",", "new_grist", ")", ":", "assert", "is_iterable_typed", "(", "features", ",", "basestring", ")", "or", "isinstance", "(", "features", ",", "basestring", ")", "assert", "isinstance", "(", "new_grist", ",", "basestring", ")", "# this function is used a lot in the build phase and the original implementation", "# was extremely slow; thus some of the weird-looking optimizations for this function.", "single_item", "=", "False", "if", "isinstance", "(", "features", ",", "str", ")", ":", "features", "=", "[", "features", "]", "single_item", "=", "True", "result", "=", "[", "]", "for", "feature", "in", "features", ":", "# '<feature>value' -> ('<feature', '>', 'value')", "# 'something' -> ('something', '', '')", "# '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value')", "grist", ",", "split", ",", "value", "=", "feature", ".", "partition", "(", "'>'", ")", "# if a partition didn't occur, then grist is just 'something'", "# set the value to be the grist", "if", "not", "value", "and", "not", "split", ":", "value", "=", "grist", "result", ".", "append", "(", "new_grist", "+", "value", ")", "if", "single_item", ":", "return", "result", "[", "0", "]", "return", "result" ]
39.571429
0.005286
def setRoute(self, vehID, edgeList): """ setRoute(string, list) -> None changes the vehicle route to given edges list. The first edge in the list has to be the one that the vehicle is at at the moment. example usage: setRoute('1', ['1', '2', '4', '6', '7']) this changes route for vehicle id 1 to edges 1-2-4-6-7 """ if isinstance(edgeList, str): edgeList = [edgeList] self._connection._beginMessage(tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_ROUTE, vehID, 1 + 4 + sum(map(len, edgeList)) + 4 * len(edgeList)) self._connection._packStringList(edgeList) self._connection._sendExact()
[ "def", "setRoute", "(", "self", ",", "vehID", ",", "edgeList", ")", ":", "if", "isinstance", "(", "edgeList", ",", "str", ")", ":", "edgeList", "=", "[", "edgeList", "]", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_ROUTE", ",", "vehID", ",", "1", "+", "4", "+", "sum", "(", "map", "(", "len", ",", "edgeList", ")", ")", "+", "4", "*", "len", "(", "edgeList", ")", ")", "self", ".", "_connection", ".", "_packStringList", "(", "edgeList", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")" ]
39.388889
0.006887
def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): ''' .. versionadded:: 0.17.0 Create a block device, character device, or fifo pipe. Identical to the gnu mknod. CLI Examples: .. code-block:: bash salt '*' file.mknod /dev/chr c 180 31 salt '*' file.mknod /dev/blk b 8 999 salt '*' file.nknod /dev/fifo p ''' ret = False makedirs_(name, user, group) if ntype == 'c': ret = mknod_chrdev(name, major, minor, user, group, mode) elif ntype == 'b': ret = mknod_blkdev(name, major, minor, user, group, mode) elif ntype == 'p': ret = mknod_fifo(name, user, group, mode) else: raise SaltInvocationError( 'Node type unavailable: \'{0}\'. Available node types are ' 'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype) ) return ret
[ "def", "mknod", "(", "name", ",", "ntype", ",", "major", "=", "0", ",", "minor", "=", "0", ",", "user", "=", "None", ",", "group", "=", "None", ",", "mode", "=", "'0600'", ")", ":", "ret", "=", "False", "makedirs_", "(", "name", ",", "user", ",", "group", ")", "if", "ntype", "==", "'c'", ":", "ret", "=", "mknod_chrdev", "(", "name", ",", "major", ",", "minor", ",", "user", ",", "group", ",", "mode", ")", "elif", "ntype", "==", "'b'", ":", "ret", "=", "mknod_blkdev", "(", "name", ",", "major", ",", "minor", ",", "user", ",", "group", ",", "mode", ")", "elif", "ntype", "==", "'p'", ":", "ret", "=", "mknod_fifo", "(", "name", ",", "user", ",", "group", ",", "mode", ")", "else", ":", "raise", "SaltInvocationError", "(", "'Node type unavailable: \\'{0}\\'. Available node types are '", "'character (\\'c\\'), block (\\'b\\'), and pipe (\\'p\\').'", ".", "format", "(", "ntype", ")", ")", "return", "ret" ]
26.771429
0.00103
def copy_snapshot(kwargs=None, call=None): ''' Copy a snapshot ''' if call != 'function': log.error( 'The copy_snapshot function must be called with -f or --function.' ) return False if 'source_region' not in kwargs: log.error('A source_region must be specified to copy a snapshot.') return False if 'source_snapshot_id' not in kwargs: log.error('A source_snapshot_id must be specified to copy a snapshot.') return False if 'description' not in kwargs: kwargs['description'] = '' params = {'Action': 'CopySnapshot'} if 'source_region' in kwargs: params['SourceRegion'] = kwargs['source_region'] if 'source_snapshot_id' in kwargs: params['SourceSnapshotId'] = kwargs['source_snapshot_id'] if 'description' in kwargs: params['Description'] = kwargs['description'] log.debug(params) data = aws.query(params, return_url=True, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') return data
[ "def", "copy_snapshot", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "log", ".", "error", "(", "'The copy_snapshot function must be called with -f or --function.'", ")", "return", "False", "if", "'source_region'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'A source_region must be specified to copy a snapshot.'", ")", "return", "False", "if", "'source_snapshot_id'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'A source_snapshot_id must be specified to copy a snapshot.'", ")", "return", "False", "if", "'description'", "not", "in", "kwargs", ":", "kwargs", "[", "'description'", "]", "=", "''", "params", "=", "{", "'Action'", ":", "'CopySnapshot'", "}", "if", "'source_region'", "in", "kwargs", ":", "params", "[", "'SourceRegion'", "]", "=", "kwargs", "[", "'source_region'", "]", "if", "'source_snapshot_id'", "in", "kwargs", ":", "params", "[", "'SourceSnapshotId'", "]", "=", "kwargs", "[", "'source_snapshot_id'", "]", "if", "'description'", "in", "kwargs", ":", "params", "[", "'Description'", "]", "=", "kwargs", "[", "'description'", "]", "log", ".", "debug", "(", "params", ")", "data", "=", "aws", ".", "query", "(", "params", ",", "return_url", "=", "True", ",", "location", "=", "get_location", "(", ")", ",", "provider", "=", "get_provider", "(", ")", ",", "opts", "=", "__opts__", ",", "sigver", "=", "'4'", ")", "return", "data" ]
27.804878
0.000847
def tensor_components_to_use(mrr, mtt, mpp, mrt, mrp, mtp): ''' Converts components to Up, South, East definition:: USE = [[mrr, mrt, mrp], [mtt, mtt, mtp], [mrp, mtp, mpp]] ''' return np.array([[mrr, mrt, mrp], [mrt, mtt, mtp], [mrp, mtp, mpp]])
[ "def", "tensor_components_to_use", "(", "mrr", ",", "mtt", ",", "mpp", ",", "mrt", ",", "mrp", ",", "mtp", ")", ":", "return", "np", ".", "array", "(", "[", "[", "mrr", ",", "mrt", ",", "mrp", "]", ",", "[", "mrt", ",", "mtt", ",", "mtp", "]", ",", "[", "mrp", ",", "mtp", ",", "mpp", "]", "]", ")" ]
31.555556
0.003425
def primers(self, tm=60): '''Design primers for amplifying the assembled sequence. :param tm: melting temperature (lower than overlaps is best). :type tm: float :returns: Primer list (the output of coral.design.primers). :rtype: list ''' self.primers = coral.design.primers(self.template, tm=tm) return self.primers
[ "def", "primers", "(", "self", ",", "tm", "=", "60", ")", ":", "self", ".", "primers", "=", "coral", ".", "design", ".", "primers", "(", "self", ".", "template", ",", "tm", "=", "tm", ")", "return", "self", ".", "primers" ]
33.727273
0.005249
def _doCascadeFetch(obj): ''' _doCascadeFetch - Takes an object and performs a cascading fetch on all foreign links, and all theirs, and so on. @param obj <IndexedRedisModel> - A fetched model ''' obj.validateModel() if not obj.foreignFields: return # NOTE: Currently this fetches using one transaction per object. Implementation for actual resolution is in # IndexedRedisModel.__getattribute__ for foreignField in obj.foreignFields: subObjsData = object.__getattribute__(obj, foreignField) if not subObjsData: setattr(obj, str(foreignField), irNull) continue subObjs = subObjsData.getObjs() for subObj in subObjs: if isIndexedRedisModel(subObj): IndexedRedisQuery._doCascadeFetch(subObj)
[ "def", "_doCascadeFetch", "(", "obj", ")", ":", "obj", ".", "validateModel", "(", ")", "if", "not", "obj", ".", "foreignFields", ":", "return", "# NOTE: Currently this fetches using one transaction per object. Implementation for actual resolution is in", "# IndexedRedisModel.__getattribute__ ", "for", "foreignField", "in", "obj", ".", "foreignFields", ":", "subObjsData", "=", "object", ".", "__getattribute__", "(", "obj", ",", "foreignField", ")", "if", "not", "subObjsData", ":", "setattr", "(", "obj", ",", "str", "(", "foreignField", ")", ",", "irNull", ")", "continue", "subObjs", "=", "subObjsData", ".", "getObjs", "(", ")", "for", "subObj", "in", "subObjs", ":", "if", "isIndexedRedisModel", "(", "subObj", ")", ":", "IndexedRedisQuery", ".", "_doCascadeFetch", "(", "subObj", ")" ]
30.5
0.038411
def _check_log_scale(base, sides, scales, coord): """ Check the log transforms Parameters ---------- base : float or None Base of the logarithm in which the ticks will be calculated. If ``None``, the base of the log transform the scale will be used. sides : str (default: bl) Sides onto which to draw the marks. Any combination chosen from the characters ``btlr``, for *bottom*, *top*, *left* or *right* side marks. If ``coord_flip()`` is used, these are the sides *after* the flip. scales : SimpleNamespace ``x`` and ``y`` scales. coord : coord Coordinate (e.g. coord_cartesian) system of the geom. Returns ------- out : tuple The bases (base_x, base_y) to use when generating the ticks. """ def is_log(trans): return (trans.__class__.__name__.startswith('log') and hasattr(trans, 'base')) base_x, base_y = base, base x_is_log = is_log(scales.x.trans) y_is_log = is_log(scales.y.trans) if isinstance(coord, coord_flip): x_is_log, y_is_log = y_is_log, x_is_log if 't' in sides or 'b' in sides: if base_x is None: base_x = scales.x.trans.base if not x_is_log: warnings.warn( "annotation_logticks for x-axis which does not have " "a log scale. The logticks may not make sense.", PlotnineWarning) elif x_is_log and base_x != scales.x.trans.base: warnings.warn( "The x-axis is log transformed in base {} ," "but the annotation_logticks are computed in base {}" "".format(base_x, scales.x.trans.base), PlotnineWarning) if 'l' in sides or 'r' in sides: if base_y is None: base_y = scales.y.trans.base if not y_is_log: warnings.warn( "annotation_logticks for y-axis which does not have " "a log scale. The logticks may not make sense.", PlotnineWarning) elif y_is_log and base_y != scales.x.trans.base: warnings.warn( "The y-axis is log transformed in base {} ," "but the annotation_logticks are computed in base {}" "".format(base_y, scales.x.trans.base), PlotnineWarning) return base_x, base_y
[ "def", "_check_log_scale", "(", "base", ",", "sides", ",", "scales", ",", "coord", ")", ":", "def", "is_log", "(", "trans", ")", ":", "return", "(", "trans", ".", "__class__", ".", "__name__", ".", "startswith", "(", "'log'", ")", "and", "hasattr", "(", "trans", ",", "'base'", ")", ")", "base_x", ",", "base_y", "=", "base", ",", "base", "x_is_log", "=", "is_log", "(", "scales", ".", "x", ".", "trans", ")", "y_is_log", "=", "is_log", "(", "scales", ".", "y", ".", "trans", ")", "if", "isinstance", "(", "coord", ",", "coord_flip", ")", ":", "x_is_log", ",", "y_is_log", "=", "y_is_log", ",", "x_is_log", "if", "'t'", "in", "sides", "or", "'b'", "in", "sides", ":", "if", "base_x", "is", "None", ":", "base_x", "=", "scales", ".", "x", ".", "trans", ".", "base", "if", "not", "x_is_log", ":", "warnings", ".", "warn", "(", "\"annotation_logticks for x-axis which does not have \"", "\"a log scale. The logticks may not make sense.\"", ",", "PlotnineWarning", ")", "elif", "x_is_log", "and", "base_x", "!=", "scales", ".", "x", ".", "trans", ".", "base", ":", "warnings", ".", "warn", "(", "\"The x-axis is log transformed in base {} ,\"", "\"but the annotation_logticks are computed in base {}\"", "\"\"", ".", "format", "(", "base_x", ",", "scales", ".", "x", ".", "trans", ".", "base", ")", ",", "PlotnineWarning", ")", "if", "'l'", "in", "sides", "or", "'r'", "in", "sides", ":", "if", "base_y", "is", "None", ":", "base_y", "=", "scales", ".", "y", ".", "trans", ".", "base", "if", "not", "y_is_log", ":", "warnings", ".", "warn", "(", "\"annotation_logticks for y-axis which does not have \"", "\"a log scale. The logticks may not make sense.\"", ",", "PlotnineWarning", ")", "elif", "y_is_log", "and", "base_y", "!=", "scales", ".", "x", ".", "trans", ".", "base", ":", "warnings", ".", "warn", "(", "\"The y-axis is log transformed in base {} ,\"", "\"but the annotation_logticks are computed in base {}\"", "\"\"", ".", "format", "(", "base_y", ",", "scales", ".", "x", ".", "trans", ".", "base", ")", ",", "PlotnineWarning", ")", "return", "base_x", ",", "base_y" ]
38.820896
0.00075
def load_from_tar_or_patch(tar, image_filename, patch_images): """Do everything necessary to process an image inside a TAR. Parameters ---------- tar : `TarFile` instance The tar from which to read `image_filename`. image_filename : str Fully-qualified path inside of `tar` from which to read an image file. patch_images : dict A dictionary containing filenames (without path) of replacements to be substituted in place of the version of the same file found in `tar`. Returns ------- image_data : bytes The JPEG bytes representing either the image from the TAR archive or its replacement from the patch dictionary. patched : bool True if the image was retrieved from the patch dictionary. False if it was retrieved from the TAR file. """ patched = True image_bytes = patch_images.get(os.path.basename(image_filename), None) if image_bytes is None: patched = False try: image_bytes = tar.extractfile(image_filename).read() numpy.array(Image.open(io.BytesIO(image_bytes))) except (IOError, OSError): with gzip.GzipFile(fileobj=tar.extractfile(image_filename)) as gz: image_bytes = gz.read() numpy.array(Image.open(io.BytesIO(image_bytes))) return image_bytes, patched
[ "def", "load_from_tar_or_patch", "(", "tar", ",", "image_filename", ",", "patch_images", ")", ":", "patched", "=", "True", "image_bytes", "=", "patch_images", ".", "get", "(", "os", ".", "path", ".", "basename", "(", "image_filename", ")", ",", "None", ")", "if", "image_bytes", "is", "None", ":", "patched", "=", "False", "try", ":", "image_bytes", "=", "tar", ".", "extractfile", "(", "image_filename", ")", ".", "read", "(", ")", "numpy", ".", "array", "(", "Image", ".", "open", "(", "io", ".", "BytesIO", "(", "image_bytes", ")", ")", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "with", "gzip", ".", "GzipFile", "(", "fileobj", "=", "tar", ".", "extractfile", "(", "image_filename", ")", ")", "as", "gz", ":", "image_bytes", "=", "gz", ".", "read", "(", ")", "numpy", ".", "array", "(", "Image", ".", "open", "(", "io", ".", "BytesIO", "(", "image_bytes", ")", ")", ")", "return", "image_bytes", ",", "patched" ]
36.864865
0.000714
def visit(self, node): """ Replace the placeholder if it is one or continue. """ if isinstance(node, Placeholder): return self.placeholders[node.id] else: return super(PlaceholderReplace, self).visit(node)
[ "def", "visit", "(", "self", ",", "node", ")", ":", "if", "isinstance", "(", "node", ",", "Placeholder", ")", ":", "return", "self", ".", "placeholders", "[", "node", ".", "id", "]", "else", ":", "return", "super", "(", "PlaceholderReplace", ",", "self", ")", ".", "visit", "(", "node", ")" ]
41.333333
0.007905
def _resolve_children(self, ldap_user, groups): """ Generates the query result for each child. """ for child in self.children: if isinstance(child, LDAPGroupQuery): yield child.resolve(ldap_user, groups) else: yield groups.is_member_of(child)
[ "def", "_resolve_children", "(", "self", ",", "ldap_user", ",", "groups", ")", ":", "for", "child", "in", "self", ".", "children", ":", "if", "isinstance", "(", "child", ",", "LDAPGroupQuery", ")", ":", "yield", "child", ".", "resolve", "(", "ldap_user", ",", "groups", ")", "else", ":", "yield", "groups", ".", "is_member_of", "(", "child", ")" ]
35.777778
0.006061
def get_dataset(self, key, info): """Get the dataset refered to by `key`.""" angles = self._get_coarse_dataset(key, info) if angles is None: return # Fill gaps at edges of swath darr = DataArray(angles, dims=['y', 'x']) darr = darr.bfill('x') darr = darr.ffill('x') angles = darr.data res = self.interpolate_angles(angles, key.resolution) proj = DataArray(res, dims=['y', 'x']) proj.attrs = info.copy() proj.attrs['units'] = 'degrees' proj.attrs['platform_name'] = self.platform_name return proj
[ "def", "get_dataset", "(", "self", ",", "key", ",", "info", ")", ":", "angles", "=", "self", ".", "_get_coarse_dataset", "(", "key", ",", "info", ")", "if", "angles", "is", "None", ":", "return", "# Fill gaps at edges of swath", "darr", "=", "DataArray", "(", "angles", ",", "dims", "=", "[", "'y'", ",", "'x'", "]", ")", "darr", "=", "darr", ".", "bfill", "(", "'x'", ")", "darr", "=", "darr", ".", "ffill", "(", "'x'", ")", "angles", "=", "darr", ".", "data", "res", "=", "self", ".", "interpolate_angles", "(", "angles", ",", "key", ".", "resolution", ")", "proj", "=", "DataArray", "(", "res", ",", "dims", "=", "[", "'y'", ",", "'x'", "]", ")", "proj", ".", "attrs", "=", "info", ".", "copy", "(", ")", "proj", ".", "attrs", "[", "'units'", "]", "=", "'degrees'", "proj", ".", "attrs", "[", "'platform_name'", "]", "=", "self", ".", "platform_name", "return", "proj" ]
30.2
0.00321
def _get_credentials_from_settings(self): """Get the stored credentials if any.""" remember_me = CONF.get('main', 'report_error/remember_me') remember_token = CONF.get('main', 'report_error/remember_token') username = CONF.get('main', 'report_error/username', '') if not remember_me: username = '' return username, remember_me, remember_token
[ "def", "_get_credentials_from_settings", "(", "self", ")", ":", "remember_me", "=", "CONF", ".", "get", "(", "'main'", ",", "'report_error/remember_me'", ")", "remember_token", "=", "CONF", ".", "get", "(", "'main'", ",", "'report_error/remember_token'", ")", "username", "=", "CONF", ".", "get", "(", "'main'", ",", "'report_error/username'", ",", "''", ")", "if", "not", "remember_me", ":", "username", "=", "''", "return", "username", ",", "remember_me", ",", "remember_token" ]
43.888889
0.004963
def get_grading_standards_for_course(self, course_id): """ List the grading standards available to a course https://canvas.instructure.com/doc/api/grading_standards.html#method.grading_standards_api.context_index """ url = COURSES_API.format(course_id) + "/grading_standards" standards = [] for data in self._get_resource(url): standards.append(GradingStandard(data=data)) return standards
[ "def", "get_grading_standards_for_course", "(", "self", ",", "course_id", ")", ":", "url", "=", "COURSES_API", ".", "format", "(", "course_id", ")", "+", "\"/grading_standards\"", "standards", "=", "[", "]", "for", "data", "in", "self", ".", "_get_resource", "(", "url", ")", ":", "standards", ".", "append", "(", "GradingStandard", "(", "data", "=", "data", ")", ")", "return", "standards" ]
41.454545
0.004292
def update(self): """Update the IRQ stats.""" # Init new stats stats = self.get_init_value() # IRQ plugin only available on GNU/Linux if not LINUX: return self.stats if self.input_method == 'local': # Grab the stats stats = self.irq.get() elif self.input_method == 'snmp': # not available pass # Get the TOP 5 (by rate/s) stats = sorted(stats, key=operator.itemgetter('irq_rate'), reverse=True)[:5] # Update the stats self.stats = stats return self.stats
[ "def", "update", "(", "self", ")", ":", "# Init new stats", "stats", "=", "self", ".", "get_init_value", "(", ")", "# IRQ plugin only available on GNU/Linux", "if", "not", "LINUX", ":", "return", "self", ".", "stats", "if", "self", ".", "input_method", "==", "'local'", ":", "# Grab the stats", "stats", "=", "self", ".", "irq", ".", "get", "(", ")", "elif", "self", ".", "input_method", "==", "'snmp'", ":", "# not available", "pass", "# Get the TOP 5 (by rate/s)", "stats", "=", "sorted", "(", "stats", ",", "key", "=", "operator", ".", "itemgetter", "(", "'irq_rate'", ")", ",", "reverse", "=", "True", ")", "[", ":", "5", "]", "# Update the stats", "self", ".", "stats", "=", "stats", "return", "self", ".", "stats" ]
24.5
0.003021
def estimate_achievable_tmid_precision(snr, t_ingress_min=10, t_duration_hr=2.14): '''Using Carter et al. 2009's estimate, calculate the theoretical optimal precision on mid-transit time measurement possible given a transit of a particular SNR. The relation used is:: sigma_tc = Q^{-1} * T * sqrt(θ/2) Q = SNR of the transit. T = transit duration, which is 2.14 hours from discovery paper. θ = τ/T = ratio of ingress to total duration ~= (few minutes [guess]) / 2.14 hours Parameters ---------- snr : float The measured signal-to-noise of the transit, e,g. from :py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod` or from running the `.compute_stats()` method on an Astropy BoxLeastSquares object. t_ingress_min : float The ingress duration in minutes. This is t_I to t_II in Winn (2010) nomenclature. t_duration_hr : float The transit duration in hours. This is t_I to t_IV in Winn (2010) nomenclature. Returns ------- float Returns the precision achievable for transit-center time as calculated from the relation above. This is in days. ''' t_ingress = t_ingress_min*u.minute t_duration = t_duration_hr*u.hour theta = t_ingress/t_duration sigma_tc = (1/snr * t_duration * np.sqrt(theta/2)) LOGINFO('assuming t_ingress = {:.1f}'.format(t_ingress)) LOGINFO('assuming t_duration = {:.1f}'.format(t_duration)) LOGINFO('measured SNR={:.2f}\n\t'.format(snr) + '-->theoretical sigma_tc = {:.2e} = {:.2e} = {:.2e}'.format( sigma_tc.to(u.minute), sigma_tc.to(u.hour), sigma_tc.to(u.day))) return sigma_tc.to(u.day).value
[ "def", "estimate_achievable_tmid_precision", "(", "snr", ",", "t_ingress_min", "=", "10", ",", "t_duration_hr", "=", "2.14", ")", ":", "t_ingress", "=", "t_ingress_min", "*", "u", ".", "minute", "t_duration", "=", "t_duration_hr", "*", "u", ".", "hour", "theta", "=", "t_ingress", "/", "t_duration", "sigma_tc", "=", "(", "1", "/", "snr", "*", "t_duration", "*", "np", ".", "sqrt", "(", "theta", "/", "2", ")", ")", "LOGINFO", "(", "'assuming t_ingress = {:.1f}'", ".", "format", "(", "t_ingress", ")", ")", "LOGINFO", "(", "'assuming t_duration = {:.1f}'", ".", "format", "(", "t_duration", ")", ")", "LOGINFO", "(", "'measured SNR={:.2f}\\n\\t'", ".", "format", "(", "snr", ")", "+", "'-->theoretical sigma_tc = {:.2e} = {:.2e} = {:.2e}'", ".", "format", "(", "sigma_tc", ".", "to", "(", "u", ".", "minute", ")", ",", "sigma_tc", ".", "to", "(", "u", ".", "hour", ")", ",", "sigma_tc", ".", "to", "(", "u", ".", "day", ")", ")", ")", "return", "sigma_tc", ".", "to", "(", "u", ".", "day", ")", ".", "value" ]
32.018182
0.001102
def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) if i == self._skip - 2: self._obs_buffer[0] = obs if i == self._skip - 1: self._obs_buffer[1] = obs total_reward += reward if done: break # Note that the observation on the done=True frame doesn't matter. max_frame = self._obs_buffer.max(axis=0) return max_frame, total_reward, done, info
[ "def", "step", "(", "self", ",", "action", ")", ":", "total_reward", "=", "0.0", "done", "=", "None", "for", "i", "in", "range", "(", "self", ".", "_skip", ")", ":", "obs", ",", "reward", ",", "done", ",", "info", "=", "self", ".", "env", ".", "step", "(", "action", ")", "if", "i", "==", "self", ".", "_skip", "-", "2", ":", "self", ".", "_obs_buffer", "[", "0", "]", "=", "obs", "if", "i", "==", "self", ".", "_skip", "-", "1", ":", "self", ".", "_obs_buffer", "[", "1", "]", "=", "obs", "total_reward", "+=", "reward", "if", "done", ":", "break", "# Note that the observation on the done=True frame doesn't matter.", "max_frame", "=", "self", ".", "_obs_buffer", ".", "max", "(", "axis", "=", "0", ")", "return", "max_frame", ",", "total_reward", ",", "done", ",", "info" ]
34.4375
0.010601
def context(root, project=""): """Produce the be environment The environment is an exact replica of the active environment of the current process, with a few additional variables, all of which are listed below. """ environment = os.environ.copy() environment.update({ "BE_PROJECT": project, "BE_PROJECTROOT": ( os.path.join(root, project).replace("\\", "/") if project else ""), "BE_PROJECTSROOT": root, "BE_ALIASDIR": "", "BE_CWD": root, "BE_CD": "", "BE_ROOT": "", "BE_TOPICS": "", "BE_DEVELOPMENTDIR": "", "BE_ACTIVE": "1", "BE_USER": "", "BE_SCRIPT": "", "BE_PYTHON": "", "BE_ENTER": "", "BE_TEMPDIR": "", "BE_PRESETSDIR": "", "BE_GITHUB_API_TOKEN": "", "BE_ENVIRONMENT": "", "BE_BINDING": "", "BE_TABCOMPLETION": "" }) return environment
[ "def", "context", "(", "root", ",", "project", "=", "\"\"", ")", ":", "environment", "=", "os", ".", "environ", ".", "copy", "(", ")", "environment", ".", "update", "(", "{", "\"BE_PROJECT\"", ":", "project", ",", "\"BE_PROJECTROOT\"", ":", "(", "os", ".", "path", ".", "join", "(", "root", ",", "project", ")", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "if", "project", "else", "\"\"", ")", ",", "\"BE_PROJECTSROOT\"", ":", "root", ",", "\"BE_ALIASDIR\"", ":", "\"\"", ",", "\"BE_CWD\"", ":", "root", ",", "\"BE_CD\"", ":", "\"\"", ",", "\"BE_ROOT\"", ":", "\"\"", ",", "\"BE_TOPICS\"", ":", "\"\"", ",", "\"BE_DEVELOPMENTDIR\"", ":", "\"\"", ",", "\"BE_ACTIVE\"", ":", "\"1\"", ",", "\"BE_USER\"", ":", "\"\"", ",", "\"BE_SCRIPT\"", ":", "\"\"", ",", "\"BE_PYTHON\"", ":", "\"\"", ",", "\"BE_ENTER\"", ":", "\"\"", ",", "\"BE_TEMPDIR\"", ":", "\"\"", ",", "\"BE_PRESETSDIR\"", ":", "\"\"", ",", "\"BE_GITHUB_API_TOKEN\"", ":", "\"\"", ",", "\"BE_ENVIRONMENT\"", ":", "\"\"", ",", "\"BE_BINDING\"", ":", "\"\"", ",", "\"BE_TABCOMPLETION\"", ":", "\"\"", "}", ")", "return", "environment" ]
25.861111
0.001035
def render(self, doc, context=None, math_option=False, img_path='', css_path=CSS_PATH): """Start thread to render a given documentation""" # If the thread is already running wait for it to finish before # starting it again. if self.wait(): self.doc = doc self.context = context self.math_option = math_option self.img_path = img_path self.css_path = css_path # This causes run() to be executed in separate thread self.start()
[ "def", "render", "(", "self", ",", "doc", ",", "context", "=", "None", ",", "math_option", "=", "False", ",", "img_path", "=", "''", ",", "css_path", "=", "CSS_PATH", ")", ":", "# If the thread is already running wait for it to finish before", "# starting it again.", "if", "self", ".", "wait", "(", ")", ":", "self", ".", "doc", "=", "doc", "self", ".", "context", "=", "context", "self", ".", "math_option", "=", "math_option", "self", ".", "img_path", "=", "img_path", "self", ".", "css_path", "=", "css_path", "# This causes run() to be executed in separate thread", "self", ".", "start", "(", ")" ]
41.846154
0.005396
def list_security_groups(self, retrieve_all=True, **_params): """Fetches a list of all security groups for a project.""" return self.list('security_groups', self.security_groups_path, retrieve_all, **_params)
[ "def", "list_security_groups", "(", "self", ",", "retrieve_all", "=", "True", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "list", "(", "'security_groups'", ",", "self", ".", "security_groups_path", ",", "retrieve_all", ",", "*", "*", "_params", ")" ]
61.5
0.008032
def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects
[ "def", "_to_dict", "(", "objects", ")", ":", "try", ":", "if", "isinstance", "(", "objects", ",", "six", ".", "string_types", ")", ":", "objects", "=", "salt", ".", "utils", ".", "json", ".", "loads", "(", "objects", ")", "except", "ValueError", "as", "err", ":", "log", ".", "error", "(", "\"Could not parse objects: %s\"", ",", "err", ")", "raise", "err", "return", "objects" ]
27.166667
0.002967
def insert(self, song): """在当前歌曲后插入一首歌曲""" if song in self._songs: return if self._current_song is None: self._songs.append(song) else: index = self._songs.index(self._current_song) self._songs.insert(index + 1, song)
[ "def", "insert", "(", "self", ",", "song", ")", ":", "if", "song", "in", "self", ".", "_songs", ":", "return", "if", "self", ".", "_current_song", "is", "None", ":", "self", ".", "_songs", ".", "append", "(", "song", ")", "else", ":", "index", "=", "self", ".", "_songs", ".", "index", "(", "self", ".", "_current_song", ")", "self", ".", "_songs", ".", "insert", "(", "index", "+", "1", ",", "song", ")" ]
32.111111
0.006734
def stop(self): """ Stop this WriterProcessBase, and reset the cursor. """ self.stop_flag.value = True with self.lock: ( Control().text(C(' ', style='reset_all')) .pos_restore().move_column(1).erase_line() .write(self.file) )
[ "def", "stop", "(", "self", ")", ":", "self", ".", "stop_flag", ".", "value", "=", "True", "with", "self", ".", "lock", ":", "(", "Control", "(", ")", ".", "text", "(", "C", "(", "' '", ",", "style", "=", "'reset_all'", ")", ")", ".", "pos_restore", "(", ")", ".", "move_column", "(", "1", ")", ".", "erase_line", "(", ")", ".", "write", "(", "self", ".", "file", ")", ")" ]
34.777778
0.006231
def getReadGroupSet(self, id_): """ Returns the readgroup set with the specified ID. """ compoundId = datamodel.ReadGroupSetCompoundId.parse(id_) dataset = self.getDataset(compoundId.dataset_id) return dataset.getReadGroupSet(id_)
[ "def", "getReadGroupSet", "(", "self", ",", "id_", ")", ":", "compoundId", "=", "datamodel", ".", "ReadGroupSetCompoundId", ".", "parse", "(", "id_", ")", "dataset", "=", "self", ".", "getDataset", "(", "compoundId", ".", "dataset_id", ")", "return", "dataset", ".", "getReadGroupSet", "(", "id_", ")" ]
38.857143
0.007194
def __main(draft, directory, project_name, project_version, project_date, answer_yes): """ The main entry point. """ directory = os.path.abspath(directory) config = load_config(directory) to_err = draft click.echo("Loading template...", err=to_err) if config["template"] is None: template = pkg_resources.resource_string( __name__, "templates/template.rst" ).decode("utf8") else: with open(config["template"], "rb") as tmpl: template = tmpl.read().decode("utf8") click.echo("Finding news fragments...", err=to_err) definitions = config["types"] if config.get("directory"): base_directory = os.path.abspath(config["directory"]) fragment_directory = None else: base_directory = os.path.abspath( os.path.join(directory, config["package_dir"], config["package"]) ) fragment_directory = "newsfragments" fragments, fragment_filenames = find_fragments( base_directory, config["sections"], fragment_directory, definitions ) click.echo("Rendering news fragments...", err=to_err) fragments = split_fragments(fragments, definitions) rendered = render_fragments( # The 0th underline is used for the top line template, config["issue_format"], fragments, definitions, config["underlines"][1:], config["wrap"], ) if project_version is None: project_version = get_version( os.path.join(directory, config["package_dir"]), config["package"] ) if project_name is None: package = config.get("package") if package: project_name = get_project_name( os.path.abspath(os.path.join(directory, config["package_dir"])), package ) else: # Can't determine a project_name, but maybe it is not needed. project_name = "" if project_date is None: project_date = _get_date() top_line = config["title_format"].format( name=project_name, version=project_version, project_date=project_date ) top_line += u"\n" + (config["underlines"][0] * len(top_line)) + u"\n" if draft: click.echo( "Draft only -- nothing has been written.\n" "What is seen below is what would be written.\n", err=to_err, ) click.echo("%s\n%s" % (top_line, rendered)) else: click.echo("Writing to newsfile...", err=to_err) start_line = config["start_line"] append_to_newsfile( directory, config["filename"], start_line, top_line, rendered ) click.echo("Staging newsfile...", err=to_err) stage_newsfile(directory, config["filename"]) click.echo("Removing news fragments...", err=to_err) remove_files(fragment_filenames, answer_yes) click.echo("Done!", err=to_err)
[ "def", "__main", "(", "draft", ",", "directory", ",", "project_name", ",", "project_version", ",", "project_date", ",", "answer_yes", ")", ":", "directory", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "config", "=", "load_config", "(", "directory", ")", "to_err", "=", "draft", "click", ".", "echo", "(", "\"Loading template...\"", ",", "err", "=", "to_err", ")", "if", "config", "[", "\"template\"", "]", "is", "None", ":", "template", "=", "pkg_resources", ".", "resource_string", "(", "__name__", ",", "\"templates/template.rst\"", ")", ".", "decode", "(", "\"utf8\"", ")", "else", ":", "with", "open", "(", "config", "[", "\"template\"", "]", ",", "\"rb\"", ")", "as", "tmpl", ":", "template", "=", "tmpl", ".", "read", "(", ")", ".", "decode", "(", "\"utf8\"", ")", "click", ".", "echo", "(", "\"Finding news fragments...\"", ",", "err", "=", "to_err", ")", "definitions", "=", "config", "[", "\"types\"", "]", "if", "config", ".", "get", "(", "\"directory\"", ")", ":", "base_directory", "=", "os", ".", "path", ".", "abspath", "(", "config", "[", "\"directory\"", "]", ")", "fragment_directory", "=", "None", "else", ":", "base_directory", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "config", "[", "\"package_dir\"", "]", ",", "config", "[", "\"package\"", "]", ")", ")", "fragment_directory", "=", "\"newsfragments\"", "fragments", ",", "fragment_filenames", "=", "find_fragments", "(", "base_directory", ",", "config", "[", "\"sections\"", "]", ",", "fragment_directory", ",", "definitions", ")", "click", ".", "echo", "(", "\"Rendering news fragments...\"", ",", "err", "=", "to_err", ")", "fragments", "=", "split_fragments", "(", "fragments", ",", "definitions", ")", "rendered", "=", "render_fragments", "(", "# The 0th underline is used for the top line", "template", ",", "config", "[", "\"issue_format\"", "]", ",", "fragments", ",", "definitions", ",", "config", "[", "\"underlines\"", "]", "[", "1", ":", "]", ",", "config", "[", "\"wrap\"", "]", ",", ")", "if", "project_version", "is", "None", ":", "project_version", "=", "get_version", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "config", "[", "\"package_dir\"", "]", ")", ",", "config", "[", "\"package\"", "]", ")", "if", "project_name", "is", "None", ":", "package", "=", "config", ".", "get", "(", "\"package\"", ")", "if", "package", ":", "project_name", "=", "get_project_name", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "config", "[", "\"package_dir\"", "]", ")", ")", ",", "package", ")", "else", ":", "# Can't determine a project_name, but maybe it is not needed.", "project_name", "=", "\"\"", "if", "project_date", "is", "None", ":", "project_date", "=", "_get_date", "(", ")", "top_line", "=", "config", "[", "\"title_format\"", "]", ".", "format", "(", "name", "=", "project_name", ",", "version", "=", "project_version", ",", "project_date", "=", "project_date", ")", "top_line", "+=", "u\"\\n\"", "+", "(", "config", "[", "\"underlines\"", "]", "[", "0", "]", "*", "len", "(", "top_line", ")", ")", "+", "u\"\\n\"", "if", "draft", ":", "click", ".", "echo", "(", "\"Draft only -- nothing has been written.\\n\"", "\"What is seen below is what would be written.\\n\"", ",", "err", "=", "to_err", ",", ")", "click", ".", "echo", "(", "\"%s\\n%s\"", "%", "(", "top_line", ",", "rendered", ")", ")", "else", ":", "click", ".", "echo", "(", "\"Writing to newsfile...\"", ",", "err", "=", "to_err", ")", "start_line", "=", "config", "[", "\"start_line\"", "]", "append_to_newsfile", "(", "directory", ",", "config", "[", "\"filename\"", "]", ",", "start_line", ",", "top_line", ",", "rendered", ")", "click", ".", "echo", "(", "\"Staging newsfile...\"", ",", "err", "=", "to_err", ")", "stage_newsfile", "(", "directory", ",", "config", "[", "\"filename\"", "]", ")", "click", ".", "echo", "(", "\"Removing news fragments...\"", ",", "err", "=", "to_err", ")", "remove_files", "(", "fragment_filenames", ",", "answer_yes", ")", "click", ".", "echo", "(", "\"Done!\"", ",", "err", "=", "to_err", ")" ]
31.833333
0.001016
def _make_info(self, name, stat_result, namespaces): """Create an `Info` object from a stat result. """ info = { 'basic': { 'name': name, 'is_dir': stat.S_ISDIR(stat_result.st_mode) } } if 'details' in namespaces: info['details'] = self._make_details_from_stat(stat_result) if 'stat' in namespaces: info['stat'] = { k: getattr(stat_result, k) for k in dir(stat_result) if k.startswith('st_') } if 'access' in namespaces: info['access'] = self._make_access_from_stat(stat_result) return Info(info)
[ "def", "_make_info", "(", "self", ",", "name", ",", "stat_result", ",", "namespaces", ")", ":", "info", "=", "{", "'basic'", ":", "{", "'name'", ":", "name", ",", "'is_dir'", ":", "stat", ".", "S_ISDIR", "(", "stat_result", ".", "st_mode", ")", "}", "}", "if", "'details'", "in", "namespaces", ":", "info", "[", "'details'", "]", "=", "self", ".", "_make_details_from_stat", "(", "stat_result", ")", "if", "'stat'", "in", "namespaces", ":", "info", "[", "'stat'", "]", "=", "{", "k", ":", "getattr", "(", "stat_result", ",", "k", ")", "for", "k", "in", "dir", "(", "stat_result", ")", "if", "k", ".", "startswith", "(", "'st_'", ")", "}", "if", "'access'", "in", "namespaces", ":", "info", "[", "'access'", "]", "=", "self", ".", "_make_access_from_stat", "(", "stat_result", ")", "return", "Info", "(", "info", ")" ]
35.684211
0.002874
def render_source(self): """Render the sourcecode.""" return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in self.get_annotated_lines())
[ "def", "render_source", "(", "self", ")", ":", "return", "SOURCE_TABLE_HTML", "%", "u'\\n'", ".", "join", "(", "line", ".", "render", "(", ")", "for", "line", "in", "self", ".", "get_annotated_lines", "(", ")", ")" ]
51
0.009662
def avail_locations(call=None): ''' List all available locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) ret = {} conn = get_conn() response = conn.getCreateObjectOptions() #return response for datacenter in response['datacenters']: #return data center ret[datacenter['template']['datacenter']['name']] = { 'name': datacenter['template']['datacenter']['name'], } return ret
[ "def", "avail_locations", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_locations function must be called with '", "'-f or --function, or with the --list-locations option'", ")", "ret", "=", "{", "}", "conn", "=", "get_conn", "(", ")", "response", "=", "conn", ".", "getCreateObjectOptions", "(", ")", "#return response", "for", "datacenter", "in", "response", "[", "'datacenters'", "]", ":", "#return data center", "ret", "[", "datacenter", "[", "'template'", "]", "[", "'datacenter'", "]", "[", "'name'", "]", "]", "=", "{", "'name'", ":", "datacenter", "[", "'template'", "]", "[", "'datacenter'", "]", "[", "'name'", "]", ",", "}", "return", "ret" ]
29.65
0.004902
def stop_timer(self, timer_id): """ Stop a timer. If the timer is not active, nothing happens. """ self._logger.debug('Stop timer {} in stm {}'.format(timer_id, self.id)) self._driver._stop_timer(timer_id, self)
[ "def", "stop_timer", "(", "self", ",", "timer_id", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Stop timer {} in stm {}'.", "f", "ormat(", "t", "imer_id,", " ", "elf.", "i", "d)", ")", "", "self", ".", "_driver", ".", "_stop_timer", "(", "timer_id", ",", "self", ")" ]
31.625
0.007692
def send_media_group(self, chat_id, media, disable_notification=None, reply_to_message_id=None): """ Use this method to send a group of photos or videos as an album. On success, an array of the sent Messages is returned. https://core.telegram.org/bots/api#sendmediagroup Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param media: A array describing photos and videos to be sent, must include 2–10 items :type media: list of (pytgbot.api_types.sendable.input_media.InputMediaPhoto|pytgbot.api_types.sendable.input_media.InputMediaVideo) Optional keyword parameters: :param disable_notification: Sends the messages silently. Users will receive a notification with no sound. :type disable_notification: bool :param reply_to_message_id: If the messages are a reply, ID of the original message :type reply_to_message_id: int Returns: :return: On success, an array of the sent Messages is returned :rtype: Messages """ assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id") from .api_types.sendable.input_media import InputMediaPhoto, InputMediaVideo files = {} new_media = [] assert_type_or_raise(media, list, parameter_name="media") for i, medium in enumerate(media): assert_type_or_raise(medium, InputMediaPhoto, InputMediaVideo, parameter_name="media[{i}]".format(i=i)) assert isinstance(medium, (InputMediaPhoto, InputMediaVideo)) new_medium, file = medium.get_request_data('pytgbot{i}'.format(i=i), full_data=True) logger.debug('InputMedia {} found.'.format(new_medium)) new_media.append(new_medium) if file: files.update(file) # end if # end for new_media = json.dumps(new_media) assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification") assert_type_or_raise(reply_to_message_id, None, int, parameter_name="reply_to_message_id") result = self.do( "sendMediaGroup", chat_id=chat_id, media=new_media, files=files, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, ) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) # no valid parsing so far if not isinstance(result, list): raise TgApiParseException("Could not parse result als list.") # See debug log for details! # end if from .api_types.receivable.updates import Message return [Message.from_array(msg) for msg in result] # parse them all as Message. raise TgApiParseException("Could not parse result.") # See debug log for details! # end if return_python_objects return result
[ "def", "send_media_group", "(", "self", ",", "chat_id", ",", "media", ",", "disable_notification", "=", "None", ",", "reply_to_message_id", "=", "None", ")", ":", "assert_type_or_raise", "(", "chat_id", ",", "(", "int", ",", "unicode_type", ")", ",", "parameter_name", "=", "\"chat_id\"", ")", "from", ".", "api_types", ".", "sendable", ".", "input_media", "import", "InputMediaPhoto", ",", "InputMediaVideo", "files", "=", "{", "}", "new_media", "=", "[", "]", "assert_type_or_raise", "(", "media", ",", "list", ",", "parameter_name", "=", "\"media\"", ")", "for", "i", ",", "medium", "in", "enumerate", "(", "media", ")", ":", "assert_type_or_raise", "(", "medium", ",", "InputMediaPhoto", ",", "InputMediaVideo", ",", "parameter_name", "=", "\"media[{i}]\"", ".", "format", "(", "i", "=", "i", ")", ")", "assert", "isinstance", "(", "medium", ",", "(", "InputMediaPhoto", ",", "InputMediaVideo", ")", ")", "new_medium", ",", "file", "=", "medium", ".", "get_request_data", "(", "'pytgbot{i}'", ".", "format", "(", "i", "=", "i", ")", ",", "full_data", "=", "True", ")", "logger", ".", "debug", "(", "'InputMedia {} found.'", ".", "format", "(", "new_medium", ")", ")", "new_media", ".", "append", "(", "new_medium", ")", "if", "file", ":", "files", ".", "update", "(", "file", ")", "# end if", "# end for", "new_media", "=", "json", ".", "dumps", "(", "new_media", ")", "assert_type_or_raise", "(", "disable_notification", ",", "None", ",", "bool", ",", "parameter_name", "=", "\"disable_notification\"", ")", "assert_type_or_raise", "(", "reply_to_message_id", ",", "None", ",", "int", ",", "parameter_name", "=", "\"reply_to_message_id\"", ")", "result", "=", "self", ".", "do", "(", "\"sendMediaGroup\"", ",", "chat_id", "=", "chat_id", ",", "media", "=", "new_media", ",", "files", "=", "files", ",", "disable_notification", "=", "disable_notification", ",", "reply_to_message_id", "=", "reply_to_message_id", ",", ")", "if", "self", ".", "return_python_objects", ":", "logger", ".", "debug", "(", "\"Trying to parse {data}\"", ".", "format", "(", "data", "=", "repr", "(", "result", ")", ")", ")", "# no valid parsing so far", "if", "not", "isinstance", "(", "result", ",", "list", ")", ":", "raise", "TgApiParseException", "(", "\"Could not parse result als list.\"", ")", "# See debug log for details!", "# end if", "from", ".", "api_types", ".", "receivable", ".", "updates", "import", "Message", "return", "[", "Message", ".", "from_array", "(", "msg", ")", "for", "msg", "in", "result", "]", "# parse them all as Message.", "raise", "TgApiParseException", "(", "\"Could not parse result.\"", ")", "# See debug log for details!", "# end if return_python_objects", "return", "result" ]
46.446154
0.006487
def register(linter): """ Registering additional checkers. """ # add all of the checkers register_checkers(linter) # register any checking fiddlers try: from pylint_django.augmentations import apply_augmentations apply_augmentations(linter) except ImportError: # probably trying to execute pylint_django when Django isn't installed # in this case the django-not-installed checker will kick-in pass if not compat.LOAD_CONFIGURATION_SUPPORTED: load_configuration(linter)
[ "def", "register", "(", "linter", ")", ":", "# add all of the checkers", "register_checkers", "(", "linter", ")", "# register any checking fiddlers", "try", ":", "from", "pylint_django", ".", "augmentations", "import", "apply_augmentations", "apply_augmentations", "(", "linter", ")", "except", "ImportError", ":", "# probably trying to execute pylint_django when Django isn't installed", "# in this case the django-not-installed checker will kick-in", "pass", "if", "not", "compat", ".", "LOAD_CONFIGURATION_SUPPORTED", ":", "load_configuration", "(", "linter", ")" ]
29.833333
0.001805
def resolve_blobs(self, iter_blobs): """Resolve the blobs given in blob iterator. This will effectively remove the index entries of the respective path at all non-null stages and add the given blob as new stage null blob. For each path there may only be one blob, otherwise a ValueError will be raised claiming the path is already at stage 0. :raise ValueError: if one of the blobs already existed at stage 0 :return: self :note: You will have to write the index manually once you are done, i.e. index.resolve_blobs(blobs).write() """ for blob in iter_blobs: stage_null_key = (blob.path, 0) if stage_null_key in self.entries: raise ValueError("Path %r already exists at stage 0" % blob.path) # END assert blob is not stage 0 already # delete all possible stages for stage in (1, 2, 3): try: del(self.entries[(blob.path, stage)]) except KeyError: pass # END ignore key errors # END for each possible stage self.entries[stage_null_key] = IndexEntry.from_blob(blob) # END for each blob return self
[ "def", "resolve_blobs", "(", "self", ",", "iter_blobs", ")", ":", "for", "blob", "in", "iter_blobs", ":", "stage_null_key", "=", "(", "blob", ".", "path", ",", "0", ")", "if", "stage_null_key", "in", "self", ".", "entries", ":", "raise", "ValueError", "(", "\"Path %r already exists at stage 0\"", "%", "blob", ".", "path", ")", "# END assert blob is not stage 0 already", "# delete all possible stages", "for", "stage", "in", "(", "1", ",", "2", ",", "3", ")", ":", "try", ":", "del", "(", "self", ".", "entries", "[", "(", "blob", ".", "path", ",", "stage", ")", "]", ")", "except", "KeyError", ":", "pass", "# END ignore key errors", "# END for each possible stage", "self", ".", "entries", "[", "stage_null_key", "]", "=", "IndexEntry", ".", "from_blob", "(", "blob", ")", "# END for each blob", "return", "self" ]
37.5
0.004587
def read_mm_uic2(fd, byte_order, dtype, count): """Read MM_UIC2 tag from file and return as dictionary.""" result = {'number_planes': count} values = numpy.fromfile(fd, byte_order+'I', 6*count) result['z_distance'] = values[0::6] // values[1::6] #result['date_created'] = tuple(values[2::6]) #result['time_created'] = tuple(values[3::6]) #result['date_modified'] = tuple(values[4::6]) #result['time_modified'] = tuple(values[5::6]) return result
[ "def", "read_mm_uic2", "(", "fd", ",", "byte_order", ",", "dtype", ",", "count", ")", ":", "result", "=", "{", "'number_planes'", ":", "count", "}", "values", "=", "numpy", ".", "fromfile", "(", "fd", ",", "byte_order", "+", "'I'", ",", "6", "*", "count", ")", "result", "[", "'z_distance'", "]", "=", "values", "[", "0", ":", ":", "6", "]", "//", "values", "[", "1", ":", ":", "6", "]", "#result['date_created'] = tuple(values[2::6])", "#result['time_created'] = tuple(values[3::6])", "#result['date_modified'] = tuple(values[4::6])", "#result['time_modified'] = tuple(values[5::6])", "return", "result" ]
47.2
0.010395
def mission_start_date_as_string(self) -> str: """ Returns: mission start date as string """ return self._start_date_as_string(self.day, self.month, self.year)
[ "def", "mission_start_date_as_string", "(", "self", ")", "->", "str", ":", "return", "self", ".", "_start_date_as_string", "(", "self", ".", "day", ",", "self", ".", "month", ",", "self", ".", "year", ")" ]
37.4
0.010471
def infer_typing_namedtuple_class(class_node, context=None): """Infer a subclass of typing.NamedTuple""" # Check if it has the corresponding bases annassigns_fields = [ annassign.target.name for annassign in class_node.body if isinstance(annassign, nodes.AnnAssign) ] code = dedent( """ from collections import namedtuple namedtuple({typename!r}, {fields!r}) """ ).format(typename=class_node.name, fields=",".join(annassigns_fields)) node = extract_node(code) generated_class_node = next(infer_named_tuple(node, context)) for method in class_node.mymethods(): generated_class_node.locals[method.name] = [method] return iter((generated_class_node,))
[ "def", "infer_typing_namedtuple_class", "(", "class_node", ",", "context", "=", "None", ")", ":", "# Check if it has the corresponding bases", "annassigns_fields", "=", "[", "annassign", ".", "target", ".", "name", "for", "annassign", "in", "class_node", ".", "body", "if", "isinstance", "(", "annassign", ",", "nodes", ".", "AnnAssign", ")", "]", "code", "=", "dedent", "(", "\"\"\"\n from collections import namedtuple\n namedtuple({typename!r}, {fields!r})\n \"\"\"", ")", ".", "format", "(", "typename", "=", "class_node", ".", "name", ",", "fields", "=", "\",\"", ".", "join", "(", "annassigns_fields", ")", ")", "node", "=", "extract_node", "(", "code", ")", "generated_class_node", "=", "next", "(", "infer_named_tuple", "(", "node", ",", "context", ")", ")", "for", "method", "in", "class_node", ".", "mymethods", "(", ")", ":", "generated_class_node", ".", "locals", "[", "method", ".", "name", "]", "=", "[", "method", "]", "return", "iter", "(", "(", "generated_class_node", ",", ")", ")" ]
38
0.001351
def find_atoms_near_atom(self, source_atom, search_radius, atom_hit_cache = set(), atom_names_to_include = set(), atom_names_to_exclude = set(), restrict_to_CA = False): '''It is advisable to set up and use an atom hit cache object. This reduces the number of distance calculations and gives better performance. See find_sidechain_atoms_within_radius_of_residue_objects for an example of how to set this up e.g. atom_hit_cache = set() for x in some_loop: this_object.find_atoms_near_atom(source_atom, search_radius, atom_hit_cache = atom_hit_cache) ''' if len(atom_names_to_include) > 0 and len(atom_names_to_exclude) > 0: raise Exception('Error: either one of the set of atoms types to include or the set of atom types to exclude can be set but not both.') atom_names_to_exclude = set(atom_names_to_exclude) if atom_names_to_include: atom_names_to_exclude = set(self.atom_name_to_group.keys()).difference(atom_names_to_include) radius = float(search_radius) + self.buffer # add buffer to account for edge cases in searching bin_size = self.bin_size atom_bins = self.atom_bins if source_atom: bin_radius = int(math.ceil(radius / bin_size)) # search this many bins in all directions xrange = range(max(0, source_atom.bin.x - bin_radius), min(self.atom_bin_dimensions[0], source_atom.bin.x + bin_radius) + 1) yrange = range(max(0, source_atom.bin.y - bin_radius), min(self.atom_bin_dimensions[1], source_atom.bin.y + bin_radius) + 1) zrange = range(max(0, source_atom.bin.z - bin_radius), min(self.atom_bin_dimensions[2], source_atom.bin.z + bin_radius) + 1) for x in xrange: for y in yrange: for z in zrange: for atom in atom_bins[x][y][z]: if atom not in atom_hit_cache: if restrict_to_CA: if atom.name == 'CA' and (source_atom - atom <= search_radius): atom_hit_cache.add(atom) else: if (source_atom - atom <= search_radius) and (atom.name not in atom_names_to_exclude): atom_hit_cache.add(atom) return atom_hit_cache
[ "def", "find_atoms_near_atom", "(", "self", ",", "source_atom", ",", "search_radius", ",", "atom_hit_cache", "=", "set", "(", ")", ",", "atom_names_to_include", "=", "set", "(", ")", ",", "atom_names_to_exclude", "=", "set", "(", ")", ",", "restrict_to_CA", "=", "False", ")", ":", "if", "len", "(", "atom_names_to_include", ")", ">", "0", "and", "len", "(", "atom_names_to_exclude", ")", ">", "0", ":", "raise", "Exception", "(", "'Error: either one of the set of atoms types to include or the set of atom types to exclude can be set but not both.'", ")", "atom_names_to_exclude", "=", "set", "(", "atom_names_to_exclude", ")", "if", "atom_names_to_include", ":", "atom_names_to_exclude", "=", "set", "(", "self", ".", "atom_name_to_group", ".", "keys", "(", ")", ")", ".", "difference", "(", "atom_names_to_include", ")", "radius", "=", "float", "(", "search_radius", ")", "+", "self", ".", "buffer", "# add buffer to account for edge cases in searching", "bin_size", "=", "self", ".", "bin_size", "atom_bins", "=", "self", ".", "atom_bins", "if", "source_atom", ":", "bin_radius", "=", "int", "(", "math", ".", "ceil", "(", "radius", "/", "bin_size", ")", ")", "# search this many bins in all directions", "xrange", "=", "range", "(", "max", "(", "0", ",", "source_atom", ".", "bin", ".", "x", "-", "bin_radius", ")", ",", "min", "(", "self", ".", "atom_bin_dimensions", "[", "0", "]", ",", "source_atom", ".", "bin", ".", "x", "+", "bin_radius", ")", "+", "1", ")", "yrange", "=", "range", "(", "max", "(", "0", ",", "source_atom", ".", "bin", ".", "y", "-", "bin_radius", ")", ",", "min", "(", "self", ".", "atom_bin_dimensions", "[", "1", "]", ",", "source_atom", ".", "bin", ".", "y", "+", "bin_radius", ")", "+", "1", ")", "zrange", "=", "range", "(", "max", "(", "0", ",", "source_atom", ".", "bin", ".", "z", "-", "bin_radius", ")", ",", "min", "(", "self", ".", "atom_bin_dimensions", "[", "2", "]", ",", "source_atom", ".", "bin", ".", "z", "+", "bin_radius", ")", "+", "1", ")", "for", "x", "in", "xrange", ":", "for", "y", "in", "yrange", ":", "for", "z", "in", "zrange", ":", "for", "atom", "in", "atom_bins", "[", "x", "]", "[", "y", "]", "[", "z", "]", ":", "if", "atom", "not", "in", "atom_hit_cache", ":", "if", "restrict_to_CA", ":", "if", "atom", ".", "name", "==", "'CA'", "and", "(", "source_atom", "-", "atom", "<=", "search_radius", ")", ":", "atom_hit_cache", ".", "add", "(", "atom", ")", "else", ":", "if", "(", "source_atom", "-", "atom", "<=", "search_radius", ")", "and", "(", "atom", ".", "name", "not", "in", "atom_names_to_exclude", ")", ":", "atom_hit_cache", ".", "add", "(", "atom", ")", "return", "atom_hit_cache" ]
69.114286
0.010192
def add(event, reactors, saltenv='base', test=None): ''' Add a new reactor CLI Example: .. code-block:: bash salt-run reactor.add 'salt/cloud/*/destroyed' reactors='/srv/reactor/destroy/*.sls' ''' if isinstance(reactors, string_types): reactors = [reactors] sevent = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=True) master_key = salt.utils.master.get_master_key('root', __opts__) __jid_event__.fire_event({'event': event, 'reactors': reactors, 'key': master_key}, 'salt/reactors/manage/add') res = sevent.get_event(wait=30, tag='salt/reactors/manage/add-complete') return res['result']
[ "def", "add", "(", "event", ",", "reactors", ",", "saltenv", "=", "'base'", ",", "test", "=", "None", ")", ":", "if", "isinstance", "(", "reactors", ",", "string_types", ")", ":", "reactors", "=", "[", "reactors", "]", "sevent", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "'master'", ",", "__opts__", "[", "'sock_dir'", "]", ",", "__opts__", "[", "'transport'", "]", ",", "opts", "=", "__opts__", ",", "listen", "=", "True", ")", "master_key", "=", "salt", ".", "utils", ".", "master", ".", "get_master_key", "(", "'root'", ",", "__opts__", ")", "__jid_event__", ".", "fire_event", "(", "{", "'event'", ":", "event", ",", "'reactors'", ":", "reactors", ",", "'key'", ":", "master_key", "}", ",", "'salt/reactors/manage/add'", ")", "res", "=", "sevent", ".", "get_event", "(", "wait", "=", "30", ",", "tag", "=", "'salt/reactors/manage/add-complete'", ")", "return", "res", "[", "'result'", "]" ]
28.793103
0.002317
def _run(self, thread_n): """The thread function.""" try: logger.debug("{0!r}: entering thread #{1}" .format(self, thread_n)) resolver = self._make_resolver() while True: request = self.queue.get() if request is None: break method, args = request logger.debug(" calling {0!r}.{1}{2!r}" .format(resolver, method, args)) getattr(resolver, method)(*args) # pylint: disable=W0142 self.queue.task_done() logger.debug("{0!r}: leaving thread #{1}" .format(self, thread_n)) finally: self.threads.remove(threading.currentThread())
[ "def", "_run", "(", "self", ",", "thread_n", ")", ":", "try", ":", "logger", ".", "debug", "(", "\"{0!r}: entering thread #{1}\"", ".", "format", "(", "self", ",", "thread_n", ")", ")", "resolver", "=", "self", ".", "_make_resolver", "(", ")", "while", "True", ":", "request", "=", "self", ".", "queue", ".", "get", "(", ")", "if", "request", "is", "None", ":", "break", "method", ",", "args", "=", "request", "logger", ".", "debug", "(", "\" calling {0!r}.{1}{2!r}\"", ".", "format", "(", "resolver", ",", "method", ",", "args", ")", ")", "getattr", "(", "resolver", ",", "method", ")", "(", "*", "args", ")", "# pylint: disable=W0142", "self", ".", "queue", ".", "task_done", "(", ")", "logger", ".", "debug", "(", "\"{0!r}: leaving thread #{1}\"", ".", "format", "(", "self", ",", "thread_n", ")", ")", "finally", ":", "self", ".", "threads", ".", "remove", "(", "threading", ".", "currentThread", "(", ")", ")" ]
44.368421
0.006969
def get_primitive_structure(self, tolerance=0.25, use_site_props=False, constrain_latt=None): """ This finds a smaller unit cell than the input. Sometimes it doesn"t find the smallest possible one, so this method is recursively called until it is unable to find a smaller cell. NOTE: if the tolerance is greater than 1/2 the minimum inter-site distance in the primitive cell, the algorithm will reject this lattice. Args: tolerance (float), Angstroms: Tolerance for each coordinate of a particular site. For example, [0.1, 0, 0.1] in cartesian coordinates will be considered to be on the same coordinates as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25. use_site_props (bool): Whether to account for site properties in differntiating sites. constrain_latt (list/dict): List of lattice parameters we want to preserve, e.g. ["alpha", "c"] or dict with the lattice parameter names as keys and values we want the parameters to be e.g. {"alpha": 90, "c": 2.5}. Returns: The most primitive structure found. """ if constrain_latt is None: constrain_latt = [] def site_label(site): if not use_site_props: return site.species_string else: d = [site.species_string] for k in sorted(site.properties.keys()): d.append(k + "=" + str(site.properties[k])) return ", ".join(d) # group sites by species string sites = sorted(self._sites, key=site_label) grouped_sites = [ list(a[1]) for a in itertools.groupby(sites, key=site_label)] grouped_fcoords = [np.array([s.frac_coords for s in g]) for g in grouped_sites] # min_vecs are approximate periodicities of the cell. The exact # periodicities from the supercell matrices are checked against these # first min_fcoords = min(grouped_fcoords, key=lambda x: len(x)) min_vecs = min_fcoords - min_fcoords[0] # fractional tolerance in the supercell super_ftol = np.divide(tolerance, self.lattice.abc) super_ftol_2 = super_ftol * 2 def pbc_coord_intersection(fc1, fc2, tol): """ Returns the fractional coords in fc1 that have coordinates within tolerance to some coordinate in fc2 """ d = fc1[:, None, :] - fc2[None, :, :] d -= np.round(d) np.abs(d, d) return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)] # here we reduce the number of min_vecs by enforcing that every # vector in min_vecs approximately maps each site onto a similar site. # The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no # reduction. # This reduction is O(n^3) so usually is an improvement. Using double # the tolerance because both vectors are approximate for g in sorted(grouped_fcoords, key=lambda x: len(x)): for f in g: min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2) def get_hnf(fu): """ Returns all possible distinct supercell matrices given a number of formula units in the supercell. Batches the matrices by the values in the diagonal (for less numpy overhead). Computational complexity is O(n^3), and difficult to improve. Might be able to do something smart with checking combinations of a and b first, though unlikely to reduce to O(n^2). """ def factors(n): for i in range(1, n + 1): if n % i == 0: yield i for det in factors(fu): if det == 1: continue for a in factors(det): for e in factors(det // a): g = det // a // e yield det, np.array( [[[a, b, c], [0, e, f], [0, 0, g]] for b, c, f in itertools.product(range(a), range(a), range(e))]) # we cant let sites match to their neighbors in the supercell grouped_non_nbrs = [] for gfcoords in grouped_fcoords: fdist = gfcoords[None, :, :] - gfcoords[:, None, :] fdist -= np.round(fdist) np.abs(fdist, fdist) non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1) # since we want sites to match to themselves np.fill_diagonal(non_nbrs, True) grouped_non_nbrs.append(non_nbrs) num_fu = functools.reduce(gcd, map(len, grouped_sites)) for size, ms in get_hnf(num_fu): inv_ms = np.linalg.inv(ms) # find sets of lattice vectors that are are present in min_vecs dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :] dist -= np.round(dist) np.abs(dist, dist) is_close = np.all(dist < super_ftol, axis=-1) any_close = np.any(is_close, axis=-1) inds = np.all(any_close, axis=-1) for inv_m, m in zip(inv_ms[inds], ms[inds]): new_m = np.dot(inv_m, self.lattice.matrix) ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1))) valid = True new_coords = [] new_sp = [] new_props = collections.defaultdict(list) for gsites, gfcoords, non_nbrs in zip(grouped_sites, grouped_fcoords, grouped_non_nbrs): all_frac = np.dot(gfcoords, m) # calculate grouping of equivalent sites, represented by # adjacency matrix fdist = all_frac[None, :, :] - all_frac[:, None, :] fdist = np.abs(fdist - np.round(fdist)) close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1) groups = np.logical_and(close_in_prim, non_nbrs) # check that groups are correct if not np.all(np.sum(groups, axis=0) == size): valid = False break # check that groups are all cliques for g in groups: if not np.all(groups[g][:, g]): valid = False break if not valid: break # add the new sites, averaging positions added = np.zeros(len(gsites)) new_fcoords = all_frac % 1 for i, group in enumerate(groups): if not added[i]: added[group] = True inds = np.where(group)[0] coords = new_fcoords[inds[0]] for n, j in enumerate(inds[1:]): offset = new_fcoords[j] - coords coords += (offset - np.round(offset)) / (n + 2) new_sp.append(gsites[inds[0]].species) for k in gsites[inds[0]].properties: new_props[k].append(gsites[inds[0]].properties[k]) new_coords.append(coords) if valid: inv_m = np.linalg.inv(m) new_l = Lattice(np.dot(inv_m, self.lattice.matrix)) s = Structure(new_l, new_sp, new_coords, site_properties=new_props, coords_are_cartesian=False) # Default behavior p = s.get_primitive_structure( tolerance=tolerance, use_site_props=use_site_props, constrain_latt=constrain_latt ).get_reduced_structure() if not constrain_latt: return p # Only return primitive structures that # satisfy the restriction condition p_latt, s_latt = p.lattice, self.lattice if type(constrain_latt).__name__ == "list": if all([getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt]): return p elif type(constrain_latt).__name__ == "dict": if all([getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()]): return p return self.copy()
[ "def", "get_primitive_structure", "(", "self", ",", "tolerance", "=", "0.25", ",", "use_site_props", "=", "False", ",", "constrain_latt", "=", "None", ")", ":", "if", "constrain_latt", "is", "None", ":", "constrain_latt", "=", "[", "]", "def", "site_label", "(", "site", ")", ":", "if", "not", "use_site_props", ":", "return", "site", ".", "species_string", "else", ":", "d", "=", "[", "site", ".", "species_string", "]", "for", "k", "in", "sorted", "(", "site", ".", "properties", ".", "keys", "(", ")", ")", ":", "d", ".", "append", "(", "k", "+", "\"=\"", "+", "str", "(", "site", ".", "properties", "[", "k", "]", ")", ")", "return", "\", \"", ".", "join", "(", "d", ")", "# group sites by species string", "sites", "=", "sorted", "(", "self", ".", "_sites", ",", "key", "=", "site_label", ")", "grouped_sites", "=", "[", "list", "(", "a", "[", "1", "]", ")", "for", "a", "in", "itertools", ".", "groupby", "(", "sites", ",", "key", "=", "site_label", ")", "]", "grouped_fcoords", "=", "[", "np", ".", "array", "(", "[", "s", ".", "frac_coords", "for", "s", "in", "g", "]", ")", "for", "g", "in", "grouped_sites", "]", "# min_vecs are approximate periodicities of the cell. The exact", "# periodicities from the supercell matrices are checked against these", "# first", "min_fcoords", "=", "min", "(", "grouped_fcoords", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", ")", ")", "min_vecs", "=", "min_fcoords", "-", "min_fcoords", "[", "0", "]", "# fractional tolerance in the supercell", "super_ftol", "=", "np", ".", "divide", "(", "tolerance", ",", "self", ".", "lattice", ".", "abc", ")", "super_ftol_2", "=", "super_ftol", "*", "2", "def", "pbc_coord_intersection", "(", "fc1", ",", "fc2", ",", "tol", ")", ":", "\"\"\"\n Returns the fractional coords in fc1 that have coordinates\n within tolerance to some coordinate in fc2\n \"\"\"", "d", "=", "fc1", "[", ":", ",", "None", ",", ":", "]", "-", "fc2", "[", "None", ",", ":", ",", ":", "]", "d", "-=", "np", ".", "round", "(", "d", ")", "np", ".", "abs", "(", "d", ",", "d", ")", "return", "fc1", "[", "np", ".", "any", "(", "np", ".", "all", "(", "d", "<", "tol", ",", "axis", "=", "-", "1", ")", ",", "axis", "=", "-", "1", ")", "]", "# here we reduce the number of min_vecs by enforcing that every", "# vector in min_vecs approximately maps each site onto a similar site.", "# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no", "# reduction.", "# This reduction is O(n^3) so usually is an improvement. Using double", "# the tolerance because both vectors are approximate", "for", "g", "in", "sorted", "(", "grouped_fcoords", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", ")", ")", ":", "for", "f", "in", "g", ":", "min_vecs", "=", "pbc_coord_intersection", "(", "min_vecs", ",", "g", "-", "f", ",", "super_ftol_2", ")", "def", "get_hnf", "(", "fu", ")", ":", "\"\"\"\n Returns all possible distinct supercell matrices given a\n number of formula units in the supercell. Batches the matrices\n by the values in the diagonal (for less numpy overhead).\n Computational complexity is O(n^3), and difficult to improve.\n Might be able to do something smart with checking combinations of a\n and b first, though unlikely to reduce to O(n^2).\n \"\"\"", "def", "factors", "(", "n", ")", ":", "for", "i", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "if", "n", "%", "i", "==", "0", ":", "yield", "i", "for", "det", "in", "factors", "(", "fu", ")", ":", "if", "det", "==", "1", ":", "continue", "for", "a", "in", "factors", "(", "det", ")", ":", "for", "e", "in", "factors", "(", "det", "//", "a", ")", ":", "g", "=", "det", "//", "a", "//", "e", "yield", "det", ",", "np", ".", "array", "(", "[", "[", "[", "a", ",", "b", ",", "c", "]", ",", "[", "0", ",", "e", ",", "f", "]", ",", "[", "0", ",", "0", ",", "g", "]", "]", "for", "b", ",", "c", ",", "f", "in", "itertools", ".", "product", "(", "range", "(", "a", ")", ",", "range", "(", "a", ")", ",", "range", "(", "e", ")", ")", "]", ")", "# we cant let sites match to their neighbors in the supercell", "grouped_non_nbrs", "=", "[", "]", "for", "gfcoords", "in", "grouped_fcoords", ":", "fdist", "=", "gfcoords", "[", "None", ",", ":", ",", ":", "]", "-", "gfcoords", "[", ":", ",", "None", ",", ":", "]", "fdist", "-=", "np", ".", "round", "(", "fdist", ")", "np", ".", "abs", "(", "fdist", ",", "fdist", ")", "non_nbrs", "=", "np", ".", "any", "(", "fdist", ">", "2", "*", "super_ftol", "[", "None", ",", "None", ",", ":", "]", ",", "axis", "=", "-", "1", ")", "# since we want sites to match to themselves", "np", ".", "fill_diagonal", "(", "non_nbrs", ",", "True", ")", "grouped_non_nbrs", ".", "append", "(", "non_nbrs", ")", "num_fu", "=", "functools", ".", "reduce", "(", "gcd", ",", "map", "(", "len", ",", "grouped_sites", ")", ")", "for", "size", ",", "ms", "in", "get_hnf", "(", "num_fu", ")", ":", "inv_ms", "=", "np", ".", "linalg", ".", "inv", "(", "ms", ")", "# find sets of lattice vectors that are are present in min_vecs", "dist", "=", "inv_ms", "[", ":", ",", ":", ",", "None", ",", ":", "]", "-", "min_vecs", "[", "None", ",", "None", ",", ":", ",", ":", "]", "dist", "-=", "np", ".", "round", "(", "dist", ")", "np", ".", "abs", "(", "dist", ",", "dist", ")", "is_close", "=", "np", ".", "all", "(", "dist", "<", "super_ftol", ",", "axis", "=", "-", "1", ")", "any_close", "=", "np", ".", "any", "(", "is_close", ",", "axis", "=", "-", "1", ")", "inds", "=", "np", ".", "all", "(", "any_close", ",", "axis", "=", "-", "1", ")", "for", "inv_m", ",", "m", "in", "zip", "(", "inv_ms", "[", "inds", "]", ",", "ms", "[", "inds", "]", ")", ":", "new_m", "=", "np", ".", "dot", "(", "inv_m", ",", "self", ".", "lattice", ".", "matrix", ")", "ftol", "=", "np", ".", "divide", "(", "tolerance", ",", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "new_m", "**", "2", ",", "axis", "=", "1", ")", ")", ")", "valid", "=", "True", "new_coords", "=", "[", "]", "new_sp", "=", "[", "]", "new_props", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "gsites", ",", "gfcoords", ",", "non_nbrs", "in", "zip", "(", "grouped_sites", ",", "grouped_fcoords", ",", "grouped_non_nbrs", ")", ":", "all_frac", "=", "np", ".", "dot", "(", "gfcoords", ",", "m", ")", "# calculate grouping of equivalent sites, represented by", "# adjacency matrix", "fdist", "=", "all_frac", "[", "None", ",", ":", ",", ":", "]", "-", "all_frac", "[", ":", ",", "None", ",", ":", "]", "fdist", "=", "np", ".", "abs", "(", "fdist", "-", "np", ".", "round", "(", "fdist", ")", ")", "close_in_prim", "=", "np", ".", "all", "(", "fdist", "<", "ftol", "[", "None", ",", "None", ",", ":", "]", ",", "axis", "=", "-", "1", ")", "groups", "=", "np", ".", "logical_and", "(", "close_in_prim", ",", "non_nbrs", ")", "# check that groups are correct", "if", "not", "np", ".", "all", "(", "np", ".", "sum", "(", "groups", ",", "axis", "=", "0", ")", "==", "size", ")", ":", "valid", "=", "False", "break", "# check that groups are all cliques", "for", "g", "in", "groups", ":", "if", "not", "np", ".", "all", "(", "groups", "[", "g", "]", "[", ":", ",", "g", "]", ")", ":", "valid", "=", "False", "break", "if", "not", "valid", ":", "break", "# add the new sites, averaging positions", "added", "=", "np", ".", "zeros", "(", "len", "(", "gsites", ")", ")", "new_fcoords", "=", "all_frac", "%", "1", "for", "i", ",", "group", "in", "enumerate", "(", "groups", ")", ":", "if", "not", "added", "[", "i", "]", ":", "added", "[", "group", "]", "=", "True", "inds", "=", "np", ".", "where", "(", "group", ")", "[", "0", "]", "coords", "=", "new_fcoords", "[", "inds", "[", "0", "]", "]", "for", "n", ",", "j", "in", "enumerate", "(", "inds", "[", "1", ":", "]", ")", ":", "offset", "=", "new_fcoords", "[", "j", "]", "-", "coords", "coords", "+=", "(", "offset", "-", "np", ".", "round", "(", "offset", ")", ")", "/", "(", "n", "+", "2", ")", "new_sp", ".", "append", "(", "gsites", "[", "inds", "[", "0", "]", "]", ".", "species", ")", "for", "k", "in", "gsites", "[", "inds", "[", "0", "]", "]", ".", "properties", ":", "new_props", "[", "k", "]", ".", "append", "(", "gsites", "[", "inds", "[", "0", "]", "]", ".", "properties", "[", "k", "]", ")", "new_coords", ".", "append", "(", "coords", ")", "if", "valid", ":", "inv_m", "=", "np", ".", "linalg", ".", "inv", "(", "m", ")", "new_l", "=", "Lattice", "(", "np", ".", "dot", "(", "inv_m", ",", "self", ".", "lattice", ".", "matrix", ")", ")", "s", "=", "Structure", "(", "new_l", ",", "new_sp", ",", "new_coords", ",", "site_properties", "=", "new_props", ",", "coords_are_cartesian", "=", "False", ")", "# Default behavior", "p", "=", "s", ".", "get_primitive_structure", "(", "tolerance", "=", "tolerance", ",", "use_site_props", "=", "use_site_props", ",", "constrain_latt", "=", "constrain_latt", ")", ".", "get_reduced_structure", "(", ")", "if", "not", "constrain_latt", ":", "return", "p", "# Only return primitive structures that", "# satisfy the restriction condition", "p_latt", ",", "s_latt", "=", "p", ".", "lattice", ",", "self", ".", "lattice", "if", "type", "(", "constrain_latt", ")", ".", "__name__", "==", "\"list\"", ":", "if", "all", "(", "[", "getattr", "(", "p_latt", ",", "p", ")", "==", "getattr", "(", "s_latt", ",", "p", ")", "for", "p", "in", "constrain_latt", "]", ")", ":", "return", "p", "elif", "type", "(", "constrain_latt", ")", ".", "__name__", "==", "\"dict\"", ":", "if", "all", "(", "[", "getattr", "(", "p_latt", ",", "p", ")", "==", "constrain_latt", "[", "p", "]", "for", "p", "in", "constrain_latt", ".", "keys", "(", ")", "]", ")", ":", "return", "p", "return", "self", ".", "copy", "(", ")" ]
44.597015
0.000982
def run(self): """ Run the server. Returns with system error code. """ normalized = os.path.normpath(self.path) + ("/" if self.path.endswith("/") else "") if self.path != normalized: sys.stderr.write("Please use full path '%s'" % (normalized,)) return -1 self.butterStore = ButterStore.ButterStore(None, self.path, self.mode, dryrun=False) # self.butterStore.ignoreExtraVolumes = True self.toObj = _Arg2Obj(self.butterStore) self.toDict = _Obj2Dict() self.running = True with self.butterStore: with self: while self.running: self._processCommand() return 0
[ "def", "run", "(", "self", ")", ":", "normalized", "=", "os", ".", "path", ".", "normpath", "(", "self", ".", "path", ")", "+", "(", "\"/\"", "if", "self", ".", "path", ".", "endswith", "(", "\"/\"", ")", "else", "\"\"", ")", "if", "self", ".", "path", "!=", "normalized", ":", "sys", ".", "stderr", ".", "write", "(", "\"Please use full path '%s'\"", "%", "(", "normalized", ",", ")", ")", "return", "-", "1", "self", ".", "butterStore", "=", "ButterStore", ".", "ButterStore", "(", "None", ",", "self", ".", "path", ",", "self", ".", "mode", ",", "dryrun", "=", "False", ")", "# self.butterStore.ignoreExtraVolumes = True", "self", ".", "toObj", "=", "_Arg2Obj", "(", "self", ".", "butterStore", ")", "self", ".", "toDict", "=", "_Obj2Dict", "(", ")", "self", ".", "running", "=", "True", "with", "self", ".", "butterStore", ":", "with", "self", ":", "while", "self", ".", "running", ":", "self", ".", "_processCommand", "(", ")", "return", "0" ]
33.047619
0.005602
def _uninstall(action='remove', name=None, pkgs=None, **kwargs): ''' remove and purge do identical things but with different pacman commands, this function performs the common logic. ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} remove_arg = '-Rsc' if action == 'purge' else '-R' cmd = [] if salt.utils.systemd.has_scope(__context__) \ and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.extend(['pacman', remove_arg, '--noprogressbar', '--noconfirm']) cmd.extend(targets) if 'root' in kwargs: cmd.extend(('-r', kwargs['root'])) out = __salt__['cmd.run_all']( cmd, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret
[ "def", "_uninstall", "(", "action", "=", "'remove'", ",", "name", "=", "None", ",", "pkgs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "pkg_params", "=", "__salt__", "[", "'pkg_resource.parse_targets'", "]", "(", "name", ",", "pkgs", ")", "[", "0", "]", "except", "MinionError", "as", "exc", ":", "raise", "CommandExecutionError", "(", "exc", ")", "old", "=", "list_pkgs", "(", ")", "targets", "=", "[", "x", "for", "x", "in", "pkg_params", "if", "x", "in", "old", "]", "if", "not", "targets", ":", "return", "{", "}", "remove_arg", "=", "'-Rsc'", "if", "action", "==", "'purge'", "else", "'-R'", "cmd", "=", "[", "]", "if", "salt", ".", "utils", ".", "systemd", ".", "has_scope", "(", "__context__", ")", "and", "__salt__", "[", "'config.get'", "]", "(", "'systemd.scope'", ",", "True", ")", ":", "cmd", ".", "extend", "(", "[", "'systemd-run'", ",", "'--scope'", "]", ")", "cmd", ".", "extend", "(", "[", "'pacman'", ",", "remove_arg", ",", "'--noprogressbar'", ",", "'--noconfirm'", "]", ")", "cmd", ".", "extend", "(", "targets", ")", "if", "'root'", "in", "kwargs", ":", "cmd", ".", "extend", "(", "(", "'-r'", ",", "kwargs", "[", "'root'", "]", ")", ")", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "output_loglevel", "=", "'trace'", ",", "python_shell", "=", "False", ")", "if", "out", "[", "'retcode'", "]", "!=", "0", "and", "out", "[", "'stderr'", "]", ":", "errors", "=", "[", "out", "[", "'stderr'", "]", "]", "else", ":", "errors", "=", "[", "]", "__context__", ".", "pop", "(", "'pkg.list_pkgs'", ",", "None", ")", "new", "=", "list_pkgs", "(", ")", "ret", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")", "if", "errors", ":", "raise", "CommandExecutionError", "(", "'Problem encountered removing package(s)'", ",", "info", "=", "{", "'errors'", ":", "errors", ",", "'changes'", ":", "ret", "}", ")", "return", "ret" ]
27.44898
0.000718
def getFullPathToSnapshot(self, n): """Get the full path to snapshot n.""" return os.path.join(self.snapDir, str(n))
[ "def", "getFullPathToSnapshot", "(", "self", ",", "n", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "snapDir", ",", "str", "(", "n", ")", ")" ]
39.333333
0.033333
def ufo_create_background_layer_for_all_glyphs(ufo_font): # type: (defcon.Font) -> None """Create a background layer for all glyphs in ufo_font if not present to reduce roundtrip differences.""" if "public.background" in ufo_font.layers: background = ufo_font.layers["public.background"] else: background = ufo_font.newLayer("public.background") for glyph in ufo_font: if glyph.name not in background: background.newGlyph(glyph.name)
[ "def", "ufo_create_background_layer_for_all_glyphs", "(", "ufo_font", ")", ":", "# type: (defcon.Font) -> None", "if", "\"public.background\"", "in", "ufo_font", ".", "layers", ":", "background", "=", "ufo_font", ".", "layers", "[", "\"public.background\"", "]", "else", ":", "background", "=", "ufo_font", ".", "newLayer", "(", "\"public.background\"", ")", "for", "glyph", "in", "ufo_font", ":", "if", "glyph", ".", "name", "not", "in", "background", ":", "background", ".", "newGlyph", "(", "glyph", ".", "name", ")" ]
37.153846
0.00202
def _setup_tls_files(self, files): """Initiates TLSFIle objects with the paths given to this bundle""" for file_type in TLSFileType: if file_type.value in files: file_path = files[file_type.value] setattr(self, file_type.value, TLSFile(file_path, file_type=file_type))
[ "def", "_setup_tls_files", "(", "self", ",", "files", ")", ":", "for", "file_type", "in", "TLSFileType", ":", "if", "file_type", ".", "value", "in", "files", ":", "file_path", "=", "files", "[", "file_type", ".", "value", "]", "setattr", "(", "self", ",", "file_type", ".", "value", ",", "TLSFile", "(", "file_path", ",", "file_type", "=", "file_type", ")", ")" ]
43.25
0.005666
def _finish_transaction_with_retry(self, command_name, explict_retry): """Run commit or abort with one retry after any retryable error. :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". - `explict_retry`: True when this is an explict commit retry attempt, ie the application called session.commit_transaction() twice. """ # This can be refactored with MongoClient._retry_with_session. try: return self._finish_transaction(command_name, explict_retry) except ServerSelectionTimeoutError: raise except ConnectionFailure as exc: try: return self._finish_transaction(command_name, True) except ServerSelectionTimeoutError: # Raise the original error so the application can infer that # an attempt was made. raise exc except OperationFailure as exc: if exc.code not in _RETRYABLE_ERROR_CODES: raise try: return self._finish_transaction(command_name, True) except ServerSelectionTimeoutError: # Raise the original error so the application can infer that # an attempt was made. raise exc
[ "def", "_finish_transaction_with_retry", "(", "self", ",", "command_name", ",", "explict_retry", ")", ":", "# This can be refactored with MongoClient._retry_with_session.", "try", ":", "return", "self", ".", "_finish_transaction", "(", "command_name", ",", "explict_retry", ")", "except", "ServerSelectionTimeoutError", ":", "raise", "except", "ConnectionFailure", "as", "exc", ":", "try", ":", "return", "self", ".", "_finish_transaction", "(", "command_name", ",", "True", ")", "except", "ServerSelectionTimeoutError", ":", "# Raise the original error so the application can infer that", "# an attempt was made.", "raise", "exc", "except", "OperationFailure", "as", "exc", ":", "if", "exc", ".", "code", "not", "in", "_RETRYABLE_ERROR_CODES", ":", "raise", "try", ":", "return", "self", ".", "_finish_transaction", "(", "command_name", ",", "True", ")", "except", "ServerSelectionTimeoutError", ":", "# Raise the original error so the application can infer that", "# an attempt was made.", "raise", "exc" ]
45.103448
0.001497
def _find_next(server): """Finds the name of the next repository to run based on the *current* state of the database. """ from datetime import datetime #Re-load the database in case we have multiple instances of the script #running in memory. _load_db() result = None visited = [] if "status" in db: for reponame, status in db["status"].items(): vms("Checking cron status for {}: {}".format(reponame, status)) start = None if "started" not in status else status["started"] end = None if "end" not in status else status["end"] running = start is not None and end is not None and start > end add = False if not running and end is not None: #Check the last time it was run and see if enough time has #elapsed. elapsed = (datetime.now() - end).seconds/60 add = elapsed > server.cron.settings[reponame].frequency if not add: vms("'{}' skipped because the interval hasn't ".format(reponame) + "elapsed ({} vs. {})".format(elapsed, server.cron.settings[reponame].frequency)) elif end is None: add = True if add: result = reponame break visited.append(reponame) else: db["status"] = {} if result is None: #We still need to check the newly installed repos. for reponame, repo in server.repositories.items(): if reponame not in visited: #These are newly installed repos that have never run before. vms("Added '{}' as new repo for cron execution.".format(reponame)) result = reponame break return result
[ "def", "_find_next", "(", "server", ")", ":", "from", "datetime", "import", "datetime", "#Re-load the database in case we have multiple instances of the script", "#running in memory.", "_load_db", "(", ")", "result", "=", "None", "visited", "=", "[", "]", "if", "\"status\"", "in", "db", ":", "for", "reponame", ",", "status", "in", "db", "[", "\"status\"", "]", ".", "items", "(", ")", ":", "vms", "(", "\"Checking cron status for {}: {}\"", ".", "format", "(", "reponame", ",", "status", ")", ")", "start", "=", "None", "if", "\"started\"", "not", "in", "status", "else", "status", "[", "\"started\"", "]", "end", "=", "None", "if", "\"end\"", "not", "in", "status", "else", "status", "[", "\"end\"", "]", "running", "=", "start", "is", "not", "None", "and", "end", "is", "not", "None", "and", "start", ">", "end", "add", "=", "False", "if", "not", "running", "and", "end", "is", "not", "None", ":", "#Check the last time it was run and see if enough time has", "#elapsed.", "elapsed", "=", "(", "datetime", ".", "now", "(", ")", "-", "end", ")", ".", "seconds", "/", "60", "add", "=", "elapsed", ">", "server", ".", "cron", ".", "settings", "[", "reponame", "]", ".", "frequency", "if", "not", "add", ":", "vms", "(", "\"'{}' skipped because the interval hasn't \"", ".", "format", "(", "reponame", ")", "+", "\"elapsed ({} vs. {})\"", ".", "format", "(", "elapsed", ",", "server", ".", "cron", ".", "settings", "[", "reponame", "]", ".", "frequency", ")", ")", "elif", "end", "is", "None", ":", "add", "=", "True", "if", "add", ":", "result", "=", "reponame", "break", "visited", ".", "append", "(", "reponame", ")", "else", ":", "db", "[", "\"status\"", "]", "=", "{", "}", "if", "result", "is", "None", ":", "#We still need to check the newly installed repos. ", "for", "reponame", ",", "repo", "in", "server", ".", "repositories", ".", "items", "(", ")", ":", "if", "reponame", "not", "in", "visited", ":", "#These are newly installed repos that have never run before.", "vms", "(", "\"Added '{}' as new repo for cron execution.\"", ".", "format", "(", "reponame", ")", ")", "result", "=", "reponame", "break", "return", "result" ]
38.0625
0.008004
def clean(args): """ %prog clean fastafile Remove irregular chars in FASTA seqs. """ p = OptionParser(clean.__doc__) p.add_option("--fancy", default=False, action="store_true", help="Pretty print the sequence [default: %default]") p.add_option("--canonical", default=False, action="store_true", help="Use only acgtnACGTN [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args fw = must_open(opts.outfile, "w") if opts.fancy: for header, seq in iter_clean_fasta(fastafile): print(">" + header, file=fw) fancyprint(fw, seq) return 0 iterator = iter_canonical_fasta if opts.canonical else iter_clean_fasta for header, seq in iterator(fastafile): seq = Seq(seq) s = SeqRecord(seq, id=header, description="") SeqIO.write([s], fw, "fasta")
[ "def", "clean", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "clean", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--fancy\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Pretty print the sequence [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--canonical\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Use only acgtnACGTN [default: %default]\"", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "fastafile", ",", "=", "args", "fw", "=", "must_open", "(", "opts", ".", "outfile", ",", "\"w\"", ")", "if", "opts", ".", "fancy", ":", "for", "header", ",", "seq", "in", "iter_clean_fasta", "(", "fastafile", ")", ":", "print", "(", "\">\"", "+", "header", ",", "file", "=", "fw", ")", "fancyprint", "(", "fw", ",", "seq", ")", "return", "0", "iterator", "=", "iter_canonical_fasta", "if", "opts", ".", "canonical", "else", "iter_clean_fasta", "for", "header", ",", "seq", "in", "iterator", "(", "fastafile", ")", ":", "seq", "=", "Seq", "(", "seq", ")", "s", "=", "SeqRecord", "(", "seq", ",", "id", "=", "header", ",", "description", "=", "\"\"", ")", "SeqIO", ".", "write", "(", "[", "s", "]", ",", "fw", ",", "\"fasta\"", ")" ]
28.939394
0.001013
async def spawn_n(self, agent_cls, n, *args, **kwargs): '''Spawn *n* agents to the managed environment. This is a convenience function so that one does not have to repeatedly make connections to the environment to spawn multiple agents with the same parameters. See :py:meth:`~creamas.mp.EnvManager.spawn` for details. ''' rets = [] for _ in range(n): ret = await self.spawn(agent_cls, *args, **kwargs) rets.append(ret) return rets
[ "async", "def", "spawn_n", "(", "self", ",", "agent_cls", ",", "n", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rets", "=", "[", "]", "for", "_", "in", "range", "(", "n", ")", ":", "ret", "=", "await", "self", ".", "spawn", "(", "agent_cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", "rets", ".", "append", "(", "ret", ")", "return", "rets" ]
42.416667
0.003846
def from_dict(d): """Transform the dict to a record object and return the record.""" query_params_match = d.get('@query_params_match') query_person_match = d.get('@query_person_match') valid_since = d.get('@valid_since') if valid_since: valid_since = str_to_datetime(valid_since) source = Source.from_dict(d.get('source', {})) fields = Record.fields_from_dict(d) return Record(source=source, fields=fields, query_params_match=query_params_match, query_person_match=query_person_match, valid_since=valid_since)
[ "def", "from_dict", "(", "d", ")", ":", "query_params_match", "=", "d", ".", "get", "(", "'@query_params_match'", ")", "query_person_match", "=", "d", ".", "get", "(", "'@query_person_match'", ")", "valid_since", "=", "d", ".", "get", "(", "'@valid_since'", ")", "if", "valid_since", ":", "valid_since", "=", "str_to_datetime", "(", "valid_since", ")", "source", "=", "Source", ".", "from_dict", "(", "d", ".", "get", "(", "'source'", ",", "{", "}", ")", ")", "fields", "=", "Record", ".", "fields_from_dict", "(", "d", ")", "return", "Record", "(", "source", "=", "source", ",", "fields", "=", "fields", ",", "query_params_match", "=", "query_params_match", ",", "query_person_match", "=", "query_person_match", ",", "valid_since", "=", "valid_since", ")" ]
49.384615
0.007645
def run(path, code=None, params=None, **meta): """Check code with mypy. :return list: List of errors. """ args = [path, '--follow-imports=skip', '--show-column-numbers'] stdout, stderr, status = api.run(args) messages = [] for line in stdout.split('\n'): line.strip() if not line: continue message = _MyPyMessage(line) if message.valid: if message.message_type == 'note': if messages[-1].line_num == message.line_num: messages[-1].add_note(message.text) else: messages.append(message) return [m.to_result() for m in messages]
[ "def", "run", "(", "path", ",", "code", "=", "None", ",", "params", "=", "None", ",", "*", "*", "meta", ")", ":", "args", "=", "[", "path", ",", "'--follow-imports=skip'", ",", "'--show-column-numbers'", "]", "stdout", ",", "stderr", ",", "status", "=", "api", ".", "run", "(", "args", ")", "messages", "=", "[", "]", "for", "line", "in", "stdout", ".", "split", "(", "'\\n'", ")", ":", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "message", "=", "_MyPyMessage", "(", "line", ")", "if", "message", ".", "valid", ":", "if", "message", ".", "message_type", "==", "'note'", ":", "if", "messages", "[", "-", "1", "]", ".", "line_num", "==", "message", ".", "line_num", ":", "messages", "[", "-", "1", "]", ".", "add_note", "(", "message", ".", "text", ")", "else", ":", "messages", ".", "append", "(", "message", ")", "return", "[", "m", ".", "to_result", "(", ")", "for", "m", "in", "messages", "]" ]
34.809524
0.002663
async def shutdown(self, force = False, connmark = -1): ''' Can call without delegate ''' if connmark is None: connmark = self.connmark self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.SHUTDOWN, force, connmark))
[ "async", "def", "shutdown", "(", "self", ",", "force", "=", "False", ",", "connmark", "=", "-", "1", ")", ":", "if", "connmark", "is", "None", ":", "connmark", "=", "self", ".", "connmark", "self", ".", "scheduler", ".", "emergesend", "(", "ConnectionControlEvent", "(", "self", ",", "ConnectionControlEvent", ".", "SHUTDOWN", ",", "force", ",", "connmark", ")", ")" ]
41
0.023891
def batch(args): """ %prog batch all.cds *.anchors Compute Ks values for a set of anchors file. This will generate a bunch of work directories for each comparisons. The anchorsfile should be in the form of specie1.species2.anchors. """ from jcvi.apps.grid import MakeManager p = OptionParser(batch.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) cdsfile = args[0] anchors = args[1:] workdirs = [".".join(op.basename(x).split(".")[:2]) for x in anchors] for wd in workdirs: mkdir(wd) mm = MakeManager() for wd, ac in zip(workdirs, anchors): pairscdsfile = wd + ".cds.fasta" cmd = "python -m jcvi.apps.ks prepare {} {} -o {}".\ format(ac, cdsfile, pairscdsfile) mm.add((ac, cdsfile), pairscdsfile, cmd) ksfile = wd + ".ks" cmd = "python -m jcvi.apps.ks calc {} -o {} --workdir {}".\ format(pairscdsfile, ksfile, wd) mm.add(pairscdsfile, ksfile, cmd) mm.write()
[ "def", "batch", "(", "args", ")", ":", "from", "jcvi", ".", "apps", ".", "grid", "import", "MakeManager", "p", "=", "OptionParser", "(", "batch", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "cdsfile", "=", "args", "[", "0", "]", "anchors", "=", "args", "[", "1", ":", "]", "workdirs", "=", "[", "\".\"", ".", "join", "(", "op", ".", "basename", "(", "x", ")", ".", "split", "(", "\".\"", ")", "[", ":", "2", "]", ")", "for", "x", "in", "anchors", "]", "for", "wd", "in", "workdirs", ":", "mkdir", "(", "wd", ")", "mm", "=", "MakeManager", "(", ")", "for", "wd", ",", "ac", "in", "zip", "(", "workdirs", ",", "anchors", ")", ":", "pairscdsfile", "=", "wd", "+", "\".cds.fasta\"", "cmd", "=", "\"python -m jcvi.apps.ks prepare {} {} -o {}\"", ".", "format", "(", "ac", ",", "cdsfile", ",", "pairscdsfile", ")", "mm", ".", "add", "(", "(", "ac", ",", "cdsfile", ")", ",", "pairscdsfile", ",", "cmd", ")", "ksfile", "=", "wd", "+", "\".ks\"", "cmd", "=", "\"python -m jcvi.apps.ks calc {} -o {} --workdir {}\"", ".", "format", "(", "pairscdsfile", ",", "ksfile", ",", "wd", ")", "mm", ".", "add", "(", "pairscdsfile", ",", "ksfile", ",", "cmd", ")", "mm", ".", "write", "(", ")" ]
31.363636
0.003749
def _set_igmps_static_group(self, v, load=False): """ Setter method for igmps_static_group, mapped from YANG variable /bridge_domain/ip/bd_ip_igmp/snooping/igmps_static_group (list) If this variable is read-only (config: false) in the source YANG file, then _set_igmps_static_group is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmps_static_group() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("igmps_mcast_address igmps_interface igmps_if_type igmps_value",igmps_static_group.igmps_static_group, yang_name="igmps_static-group", rest_name="static-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-mcast-address igmps-interface igmps-if-type igmps-value', extensions={u'tailf-common': {u'callpoint': u'BDIgmpsSg', u'cli-suppress-mode': None, u'alt-name': u'static-group', u'info': u'Static Group to be Joined', u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps_static-group", rest_name="static-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'BDIgmpsSg', u'cli-suppress-mode': None, u'alt-name': u'static-group', u'info': u'Static Group to be Joined', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """igmps_static_group must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("igmps_mcast_address igmps_interface igmps_if_type igmps_value",igmps_static_group.igmps_static_group, yang_name="igmps_static-group", rest_name="static-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-mcast-address igmps-interface igmps-if-type igmps-value', extensions={u'tailf-common': {u'callpoint': u'BDIgmpsSg', u'cli-suppress-mode': None, u'alt-name': u'static-group', u'info': u'Static Group to be Joined', u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps_static-group", rest_name="static-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'BDIgmpsSg', u'cli-suppress-mode': None, u'alt-name': u'static-group', u'info': u'Static Group to be Joined', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)""", }) self.__igmps_static_group = t if hasattr(self, '_set'): self._set()
[ "def", "_set_igmps_static_group", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"igmps_mcast_address igmps_interface igmps_if_type igmps_value\"", ",", "igmps_static_group", ".", "igmps_static_group", ",", "yang_name", "=", "\"igmps_static-group\"", ",", "rest_name", "=", "\"static-group\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'igmps-mcast-address igmps-interface igmps-if-type igmps-value'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'BDIgmpsSg'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'alt-name'", ":", "u'static-group'", ",", "u'info'", ":", "u'Static Group to be Joined'", ",", "u'cli-suppress-list-no'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"igmps_static-group\"", ",", "rest_name", "=", "\"static-group\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'BDIgmpsSg'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'alt-name'", ":", "u'static-group'", ",", "u'info'", ":", "u'Static Group to be Joined'", ",", "u'cli-suppress-list-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-igmp-snooping'", ",", "defining_module", "=", "'brocade-igmp-snooping'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"igmps_static_group must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"igmps_mcast_address igmps_interface igmps_if_type igmps_value\",igmps_static_group.igmps_static_group, yang_name=\"igmps_static-group\", rest_name=\"static-group\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-mcast-address igmps-interface igmps-if-type igmps-value', extensions={u'tailf-common': {u'callpoint': u'BDIgmpsSg', u'cli-suppress-mode': None, u'alt-name': u'static-group', u'info': u'Static Group to be Joined', u'cli-suppress-list-no': None}}), is_container='list', yang_name=\"igmps_static-group\", rest_name=\"static-group\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'BDIgmpsSg', u'cli-suppress-mode': None, u'alt-name': u'static-group', u'info': u'Static Group to be Joined', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__igmps_static_group", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
131.454545
0.003776
def update(self, value=None): """ Update progress bar via the console or notebook accordingly. """ # Update self.value if value is None: value = self._current_value + 1 self._current_value = value # Choose the appropriate environment if self._ipython_widget: try: self._update_ipython_widget(value) except RuntimeError: pass else: self._update_console(value)
[ "def", "update", "(", "self", ",", "value", "=", "None", ")", ":", "# Update self.value", "if", "value", "is", "None", ":", "value", "=", "self", ".", "_current_value", "+", "1", "self", ".", "_current_value", "=", "value", "# Choose the appropriate environment", "if", "self", ".", "_ipython_widget", ":", "try", ":", "self", ".", "_update_ipython_widget", "(", "value", ")", "except", "RuntimeError", ":", "pass", "else", ":", "self", ".", "_update_console", "(", "value", ")" ]
27.5
0.003906
def expand_curlys(s): """Takes string and returns list of options: Example ------- >>> expand_curlys("py{26, 27}") ["py26", "py27"] """ from functools import reduce curleys = list(re.finditer(r"{[^{}]*}", s)) return reduce(_replace_curly, reversed(curleys), [s])
[ "def", "expand_curlys", "(", "s", ")", ":", "from", "functools", "import", "reduce", "curleys", "=", "list", "(", "re", ".", "finditer", "(", "r\"{[^{}]*}\"", ",", "s", ")", ")", "return", "reduce", "(", "_replace_curly", ",", "reversed", "(", "curleys", ")", ",", "[", "s", "]", ")" ]
24.083333
0.003333
def id(self, opts_id): """Handles tracking and cleanup of custom ids.""" old_id = self._id self._id = opts_id if old_id is not None: cleanup_custom_options(old_id) if opts_id is not None and opts_id != old_id: if opts_id not in Store._weakrefs: Store._weakrefs[opts_id] = [] ref = weakref.ref(self, partial(cleanup_custom_options, opts_id)) Store._weakrefs[opts_id].append(ref)
[ "def", "id", "(", "self", ",", "opts_id", ")", ":", "old_id", "=", "self", ".", "_id", "self", ".", "_id", "=", "opts_id", "if", "old_id", "is", "not", "None", ":", "cleanup_custom_options", "(", "old_id", ")", "if", "opts_id", "is", "not", "None", "and", "opts_id", "!=", "old_id", ":", "if", "opts_id", "not", "in", "Store", ".", "_weakrefs", ":", "Store", ".", "_weakrefs", "[", "opts_id", "]", "=", "[", "]", "ref", "=", "weakref", ".", "ref", "(", "self", ",", "partial", "(", "cleanup_custom_options", ",", "opts_id", ")", ")", "Store", ".", "_weakrefs", "[", "opts_id", "]", ".", "append", "(", "ref", ")" ]
42.818182
0.004158
def create_multi_weather(df, rename_dc): """Create a list of oemof weather objects if the given geometry is a polygon """ weather_list = [] # Create a pandas.DataFrame with the time series of the weather data set # for each data set and append them to a list. for gid in df.gid.unique(): gid_df = df[df.gid == gid] obj = create_single_weather(gid_df, rename_dc) weather_list.append(obj) return weather_list
[ "def", "create_multi_weather", "(", "df", ",", "rename_dc", ")", ":", "weather_list", "=", "[", "]", "# Create a pandas.DataFrame with the time series of the weather data set", "# for each data set and append them to a list.", "for", "gid", "in", "df", ".", "gid", ".", "unique", "(", ")", ":", "gid_df", "=", "df", "[", "df", ".", "gid", "==", "gid", "]", "obj", "=", "create_single_weather", "(", "gid_df", ",", "rename_dc", ")", "weather_list", ".", "append", "(", "obj", ")", "return", "weather_list" ]
40.727273
0.004367