text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def _uncompress_file(self, path): ''' Writes the current file into a file in the temporary directory. If that doesn't work, create a new file in the CDFs directory. ''' with self.file.open('rb') as f: if (self.cdfversion == 3): data_start, data_size, cType, _ = self._read_ccr(8) else: data_start, data_size, cType, _ = self._read_ccr2(8) if cType != 5: return f.seek(data_start) decompressed_data = gzip.decompress(f.read(data_size)) newpath = pathlib.Path(tempfile.NamedTemporaryFile(suffix='.cdf').name) with newpath.open('wb') as g: g.write(bytearray.fromhex('cdf30001')) g.write(bytearray.fromhex('0000ffff')) g.write(decompressed_data) return newpath
[ "def", "_uncompress_file", "(", "self", ",", "path", ")", ":", "with", "self", ".", "file", ".", "open", "(", "'rb'", ")", "as", "f", ":", "if", "(", "self", ".", "cdfversion", "==", "3", ")", ":", "data_start", ",", "data_size", ",", "cType", ",", "_", "=", "self", ".", "_read_ccr", "(", "8", ")", "else", ":", "data_start", ",", "data_size", ",", "cType", ",", "_", "=", "self", ".", "_read_ccr2", "(", "8", ")", "if", "cType", "!=", "5", ":", "return", "f", ".", "seek", "(", "data_start", ")", "decompressed_data", "=", "gzip", ".", "decompress", "(", "f", ".", "read", "(", "data_size", ")", ")", "newpath", "=", "pathlib", ".", "Path", "(", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.cdf'", ")", ".", "name", ")", "with", "newpath", ".", "open", "(", "'wb'", ")", "as", "g", ":", "g", ".", "write", "(", "bytearray", ".", "fromhex", "(", "'cdf30001'", ")", ")", "g", ".", "write", "(", "bytearray", ".", "fromhex", "(", "'0000ffff'", ")", ")", "g", ".", "write", "(", "decompressed_data", ")", "return", "newpath" ]
33.8
0.002301
def publishApp(self, app_info, map_info=None, fsInfo=None): """Publishes apps to AGOL/Portal Args: app_info (list): A list of JSON configuration apps to publish. map_info (list): Defaults to ``None``. fsInfo (list): Defaults to ``None``. Returns: dict: A dictionary of results objects. """ if self.securityhandler is None: print ("Security handler required") return appDet = None try: app_results = [] if isinstance(app_info, list): for appDet in app_info: app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo)) else: app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo)) return app_results except (common.ArcRestHelperError) as e: raise e except Exception as e: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "publishApp", "line": line, "filename": filename, "synerror": synerror, }) finally: appDet = None del appDet gc.collect()
[ "def", "publishApp", "(", "self", ",", "app_info", ",", "map_info", "=", "None", ",", "fsInfo", "=", "None", ")", ":", "if", "self", ".", "securityhandler", "is", "None", ":", "print", "(", "\"Security handler required\"", ")", "return", "appDet", "=", "None", "try", ":", "app_results", "=", "[", "]", "if", "isinstance", "(", "app_info", ",", "list", ")", ":", "for", "appDet", "in", "app_info", ":", "app_results", ".", "append", "(", "self", ".", "_publishAppLogic", "(", "appDet", "=", "appDet", ",", "map_info", "=", "map_info", ",", "fsInfo", "=", "fsInfo", ")", ")", "else", ":", "app_results", ".", "append", "(", "self", ".", "_publishAppLogic", "(", "appDet", "=", "app_info", ",", "map_info", "=", "map_info", ",", "fsInfo", "=", "fsInfo", ")", ")", "return", "app_results", "except", "(", "common", ".", "ArcRestHelperError", ")", "as", "e", ":", "raise", "e", "except", "Exception", "as", "e", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "raise", "common", ".", "ArcRestHelperError", "(", "{", "\"function\"", ":", "\"publishApp\"", ",", "\"line\"", ":", "line", ",", "\"filename\"", ":", "filename", ",", "\"synerror\"", ":", "synerror", ",", "}", ")", "finally", ":", "appDet", "=", "None", "del", "appDet", "gc", ".", "collect", "(", ")" ]
32.268293
0.00807
def put(self, thing_id='0', property_name=None): """ Handle a PUT request. thing_id -- ID of the thing this request is for property_name -- the name of the property from the URL path """ thing = self.get_thing(thing_id) if thing is None: self.set_status(404) return try: args = json.loads(self.request.body.decode()) except ValueError: self.set_status(400) return if property_name not in args: self.set_status(400) return if thing.has_property(property_name): try: thing.set_property(property_name, args[property_name]) except PropertyError: self.set_status(400) return self.set_header('Content-Type', 'application/json') self.write(json.dumps({ property_name: thing.get_property(property_name), })) else: self.set_status(404)
[ "def", "put", "(", "self", ",", "thing_id", "=", "'0'", ",", "property_name", "=", "None", ")", ":", "thing", "=", "self", ".", "get_thing", "(", "thing_id", ")", "if", "thing", "is", "None", ":", "self", ".", "set_status", "(", "404", ")", "return", "try", ":", "args", "=", "json", ".", "loads", "(", "self", ".", "request", ".", "body", ".", "decode", "(", ")", ")", "except", "ValueError", ":", "self", ".", "set_status", "(", "400", ")", "return", "if", "property_name", "not", "in", "args", ":", "self", ".", "set_status", "(", "400", ")", "return", "if", "thing", ".", "has_property", "(", "property_name", ")", ":", "try", ":", "thing", ".", "set_property", "(", "property_name", ",", "args", "[", "property_name", "]", ")", "except", "PropertyError", ":", "self", ".", "set_status", "(", "400", ")", "return", "self", ".", "set_header", "(", "'Content-Type'", ",", "'application/json'", ")", "self", ".", "write", "(", "json", ".", "dumps", "(", "{", "property_name", ":", "thing", ".", "get_property", "(", "property_name", ")", ",", "}", ")", ")", "else", ":", "self", ".", "set_status", "(", "404", ")" ]
28.942857
0.00191
def poll(self): """Return pairs of run ids and results of finish event loops. """ ret = self.communicationChannel.receive_finished() self.nruns -= len(ret) return ret
[ "def", "poll", "(", "self", ")", ":", "ret", "=", "self", ".", "communicationChannel", ".", "receive_finished", "(", ")", "self", ".", "nruns", "-=", "len", "(", "ret", ")", "return", "ret" ]
33.5
0.009709
def dirint(ghi, solar_zenith, times, pressure=101325., use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065, max_zenith=87): """ Determine DNI from GHI using the DIRINT modification of the DISC model. Implements the modified DISC model known as "DIRINT" introduced in [1]. DIRINT predicts direct normal irradiance (DNI) from measured global horizontal irradiance (GHI). DIRINT improves upon the DISC model by using time-series GHI data and dew point temperature information. The effectiveness of the DIRINT model improves with each piece of information provided. The pvlib implementation limits the clearness index to 1. Parameters ---------- ghi : array-like Global horizontal irradiance in W/m^2. solar_zenith : array-like True (not refraction-corrected) solar_zenith angles in decimal degrees. times : DatetimeIndex pressure : float or array-like, default 101325.0 The site pressure in Pascal. Pressure may be measured or an average pressure may be calculated from site altitude. use_delta_kt_prime : bool, default True If True, indicates that the stability index delta_kt_prime is included in the model. The stability index adjusts the estimated DNI in response to dynamics in the time series of GHI. It is recommended that delta_kt_prime is not used if the time between GHI points is 1.5 hours or greater. If use_delta_kt_prime=True, input data must be Series. temp_dew : None, float, or array-like, default None Surface dew point temperatures, in degrees C. Values of temp_dew may be numeric or NaN. Any single time period point with a temp_dew=NaN does not have dew point improvements applied. If temp_dew is not provided, then dew point improvements are not applied. min_cos_zenith : numeric, default 0.065 Minimum value of cos(zenith) to allow when calculating global clearness index `kt`. Equivalent to zenith = 86.273 degrees. max_zenith : numeric, default 87 Maximum value of zenith to allow in DNI calculation. DNI will be set to 0 for times with zenith values greater than `max_zenith`. Returns ------- dni : array-like The modeled direct normal irradiance in W/m^2 provided by the DIRINT model. Notes ----- DIRINT model requires time series data (ie. one of the inputs must be a vector of length > 2). References ---------- [1] Perez, R., P. Ineichen, E. Maxwell, R. Seals and A. Zelenka, (1992). "Dynamic Global-to-Direct Irradiance Conversion Models". ASHRAE Transactions-Research Series, pp. 354-369 [2] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly Global Horizontal to Direct Normal Insolation", Technical Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research Institute, 1987. """ disc_out = disc(ghi, solar_zenith, times, pressure=pressure, min_cos_zenith=min_cos_zenith, max_zenith=max_zenith) airmass = disc_out['airmass'] kt = disc_out['kt'] kt_prime = clearness_index_zenith_independent( kt, airmass, max_clearness_index=1) delta_kt_prime = _delta_kt_prime_dirint(kt_prime, use_delta_kt_prime, times) w = _temp_dew_dirint(temp_dew, times) dirint_coeffs = _dirint_coeffs(times, kt_prime, solar_zenith, w, delta_kt_prime) # Perez eqn 5 dni = disc_out['dni'] * dirint_coeffs return dni
[ "def", "dirint", "(", "ghi", ",", "solar_zenith", ",", "times", ",", "pressure", "=", "101325.", ",", "use_delta_kt_prime", "=", "True", ",", "temp_dew", "=", "None", ",", "min_cos_zenith", "=", "0.065", ",", "max_zenith", "=", "87", ")", ":", "disc_out", "=", "disc", "(", "ghi", ",", "solar_zenith", ",", "times", ",", "pressure", "=", "pressure", ",", "min_cos_zenith", "=", "min_cos_zenith", ",", "max_zenith", "=", "max_zenith", ")", "airmass", "=", "disc_out", "[", "'airmass'", "]", "kt", "=", "disc_out", "[", "'kt'", "]", "kt_prime", "=", "clearness_index_zenith_independent", "(", "kt", ",", "airmass", ",", "max_clearness_index", "=", "1", ")", "delta_kt_prime", "=", "_delta_kt_prime_dirint", "(", "kt_prime", ",", "use_delta_kt_prime", ",", "times", ")", "w", "=", "_temp_dew_dirint", "(", "temp_dew", ",", "times", ")", "dirint_coeffs", "=", "_dirint_coeffs", "(", "times", ",", "kt_prime", ",", "solar_zenith", ",", "w", ",", "delta_kt_prime", ")", "# Perez eqn 5", "dni", "=", "disc_out", "[", "'dni'", "]", "*", "dirint_coeffs", "return", "dni" ]
38.11828
0.000275
def set_single_attribute(self, other, trigger_klass, property_name): """Used to set guard the setting of an attribute which is singular and can't be set twice""" if isinstance(other, trigger_klass): # Check property exists if not hasattr(self, property_name): raise AttributeError("%s has no property %s" % (self.__class__.__name__, property_name)) if getattr(self, property_name) is None: setattr(self, property_name, other) else: raise ValueError( '%s already has a %s element set.' % (self.__class__.__name__, other.__class__.__name__,))
[ "def", "set_single_attribute", "(", "self", ",", "other", ",", "trigger_klass", ",", "property_name", ")", ":", "if", "isinstance", "(", "other", ",", "trigger_klass", ")", ":", "# Check property exists", "if", "not", "hasattr", "(", "self", ",", "property_name", ")", ":", "raise", "AttributeError", "(", "\"%s has no property %s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "property_name", ")", ")", "if", "getattr", "(", "self", ",", "property_name", ")", "is", "None", ":", "setattr", "(", "self", ",", "property_name", ",", "other", ")", "else", ":", "raise", "ValueError", "(", "'%s already has a %s element set.'", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "other", ".", "__class__", ".", "__name__", ",", ")", ")" ]
47.285714
0.007407
def model_data(self): """str: The model location in S3. Only set if Estimator has been ``fit()``.""" if self.latest_training_job is not None: model_uri = self.sagemaker_session.sagemaker_client.describe_training_job( TrainingJobName=self.latest_training_job.name)['ModelArtifacts']['S3ModelArtifacts'] else: logging.warning('No finished training job found associated with this estimator. Please make sure' 'this estimator is only used for building workflow config') model_uri = os.path.join(self.output_path, self._current_job_name, 'output', 'model.tar.gz') return model_uri
[ "def", "model_data", "(", "self", ")", ":", "if", "self", ".", "latest_training_job", "is", "not", "None", ":", "model_uri", "=", "self", ".", "sagemaker_session", ".", "sagemaker_client", ".", "describe_training_job", "(", "TrainingJobName", "=", "self", ".", "latest_training_job", ".", "name", ")", "[", "'ModelArtifacts'", "]", "[", "'S3ModelArtifacts'", "]", "else", ":", "logging", ".", "warning", "(", "'No finished training job found associated with this estimator. Please make sure'", "'this estimator is only used for building workflow config'", ")", "model_uri", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output_path", ",", "self", ".", "_current_job_name", ",", "'output'", ",", "'model.tar.gz'", ")", "return", "model_uri" ]
61.636364
0.011628
def _get_handler(self, handler_class): """Return an existing class of handler.""" element = None for handler in self.handlers: if isinstance(handler, handler_class): element = handler break return element
[ "def", "_get_handler", "(", "self", ",", "handler_class", ")", ":", "element", "=", "None", "for", "handler", "in", "self", ".", "handlers", ":", "if", "isinstance", "(", "handler", ",", "handler_class", ")", ":", "element", "=", "handler", "break", "return", "element" ]
34.125
0.007143
def sync(self, expectedThreads=0): 'Wait for all but expectedThreads async threads to finish.' while len(self.unfinishedThreads) > expectedThreads: time.sleep(.3) self.checkForFinishedThreads()
[ "def", "sync", "(", "self", ",", "expectedThreads", "=", "0", ")", ":", "while", "len", "(", "self", ".", "unfinishedThreads", ")", ">", "expectedThreads", ":", "time", ".", "sleep", "(", ".3", ")", "self", ".", "checkForFinishedThreads", "(", ")" ]
45.8
0.008584
def free(self, ptr): #pylint:disable=unused-argument """ A somewhat faithful implementation of libc `free`. :param ptr: the location in memory to be freed """ raise NotImplementedError("%s not implemented for %s" % (self.free.__func__.__name__, self.__class__.__name__))
[ "def", "free", "(", "self", ",", "ptr", ")", ":", "#pylint:disable=unused-argument", "raise", "NotImplementedError", "(", "\"%s not implemented for %s\"", "%", "(", "self", ".", "free", ".", "__func__", ".", "__name__", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
46.25
0.013263
def is_task_running(self, task, connection_failure_control=None): """ Check if a task is running according to: TASK_PENDING_STATES ['New', 'Starting', 'Pending', 'Running', 'Suspended', 'Stopping'] Args: task (dict): OneView Task resource. connection_failure_control (dict): A dictionary instance that contains last_success for error tolerance control. Examples: >>> connection_failure_control = dict(last_success=int(time.time())) >>> while self.is_task_running(task, connection_failure_control): >>> pass Returns: True when in TASK_PENDING_STATES; False when not. """ if 'uri' in task: try: task = self.get(task) if connection_failure_control: # Updates last success connection_failure_control['last_success'] = self.get_current_seconds() if 'taskState' in task and task['taskState'] in TASK_PENDING_STATES: return True except Exception as error: logger.error('; '.join(str(e) for e in error.args) + ' when waiting for the task: ' + str(task)) if not connection_failure_control: raise error if hasattr(error, 'errno') and error.errno in self.CONNECTION_FAILURE_ERROR_NUMBERS: last_success = connection_failure_control['last_success'] if last_success + self.CONNECTION_FAILURE_TIMEOUT < self.get_current_seconds(): # Timeout reached raise error else: # Return task is running when network instability occurs return True else: raise error return False
[ "def", "is_task_running", "(", "self", ",", "task", ",", "connection_failure_control", "=", "None", ")", ":", "if", "'uri'", "in", "task", ":", "try", ":", "task", "=", "self", ".", "get", "(", "task", ")", "if", "connection_failure_control", ":", "# Updates last success", "connection_failure_control", "[", "'last_success'", "]", "=", "self", ".", "get_current_seconds", "(", ")", "if", "'taskState'", "in", "task", "and", "task", "[", "'taskState'", "]", "in", "TASK_PENDING_STATES", ":", "return", "True", "except", "Exception", "as", "error", ":", "logger", ".", "error", "(", "'; '", ".", "join", "(", "str", "(", "e", ")", "for", "e", "in", "error", ".", "args", ")", "+", "' when waiting for the task: '", "+", "str", "(", "task", ")", ")", "if", "not", "connection_failure_control", ":", "raise", "error", "if", "hasattr", "(", "error", ",", "'errno'", ")", "and", "error", ".", "errno", "in", "self", ".", "CONNECTION_FAILURE_ERROR_NUMBERS", ":", "last_success", "=", "connection_failure_control", "[", "'last_success'", "]", "if", "last_success", "+", "self", ".", "CONNECTION_FAILURE_TIMEOUT", "<", "self", ".", "get_current_seconds", "(", ")", ":", "# Timeout reached", "raise", "error", "else", ":", "# Return task is running when network instability occurs", "return", "True", "else", ":", "raise", "error", "return", "False" ]
40.652174
0.005744
def setdocument(self, doc): """Associate a document with this element. Arguments: doc (:class:`Document`): A document Each element must be associated with a FoLiA document. """ assert isinstance(doc, Document) if not self.doc: self.doc = doc if self.id: if self.id in doc: raise DuplicateIDError(self.id) else: self.doc.index[id] = self for e in self: #recursive for all children if isinstance(e,AbstractElement): e.setdocument(doc)
[ "def", "setdocument", "(", "self", ",", "doc", ")", ":", "assert", "isinstance", "(", "doc", ",", "Document", ")", "if", "not", "self", ".", "doc", ":", "self", ".", "doc", "=", "doc", "if", "self", ".", "id", ":", "if", "self", ".", "id", "in", "doc", ":", "raise", "DuplicateIDError", "(", "self", ".", "id", ")", "else", ":", "self", ".", "doc", ".", "index", "[", "id", "]", "=", "self", "for", "e", "in", "self", ":", "#recursive for all children", "if", "isinstance", "(", "e", ",", "AbstractElement", ")", ":", "e", ".", "setdocument", "(", "doc", ")" ]
29.65
0.009804
def _parse_css_color(color): '''_parse_css_color(css_color) -> gtk.gdk.Color''' if color.startswith("rgb(") and color.endswith(')'): r, g, b = [int(c)*257 for c in color[4:-1].split(',')] return gtk.gdk.Color(r, g, b) else: return gtk.gdk.color_parse(color)
[ "def", "_parse_css_color", "(", "color", ")", ":", "if", "color", ".", "startswith", "(", "\"rgb(\"", ")", "and", "color", ".", "endswith", "(", "')'", ")", ":", "r", ",", "g", ",", "b", "=", "[", "int", "(", "c", ")", "*", "257", "for", "c", "in", "color", "[", "4", ":", "-", "1", "]", ".", "split", "(", "','", ")", "]", "return", "gtk", ".", "gdk", ".", "Color", "(", "r", ",", "g", ",", "b", ")", "else", ":", "return", "gtk", ".", "gdk", ".", "color_parse", "(", "color", ")" ]
41
0.003413
def _iterable_as_config_list(s): """Format an iterable as a sequence of comma-separated strings. To match what ConfigObj expects, a single item list has a trailing comma. """ items = sorted(s) if len(items) == 1: return "%s," % (items[0],) else: return ", ".join(items)
[ "def", "_iterable_as_config_list", "(", "s", ")", ":", "items", "=", "sorted", "(", "s", ")", "if", "len", "(", "items", ")", "==", "1", ":", "return", "\"%s,\"", "%", "(", "items", "[", "0", "]", ",", ")", "else", ":", "return", "\", \"", ".", "join", "(", "items", ")" ]
30.5
0.006369
def add_screen(self, ref): """ Add Screen """ if ref not in self.screens: screen = Screen(self, ref) screen.clear() # TODO Check this is needed, new screens should be clear. self.screens[ref] = screen return self.screens[ref]
[ "def", "add_screen", "(", "self", ",", "ref", ")", ":", "if", "ref", "not", "in", "self", ".", "screens", ":", "screen", "=", "Screen", "(", "self", ",", "ref", ")", "screen", ".", "clear", "(", ")", "# TODO Check this is needed, new screens should be clear.", "self", ".", "screens", "[", "ref", "]", "=", "screen", "return", "self", ".", "screens", "[", "ref", "]" ]
32.888889
0.009868
def x_position(self, filter_order=None, window_size=None, tol=0.05, Lx=None): ''' Calculate $x$-position according to: __ | C | ╲╱ a ⋅ | - - c_f | | a | x = ────────────── c_d - c_f where: - $C$ is the measured capacitance. - $c_f$ is the capacitance of the filler medium per unit area _(e.g., air)_. - $c_d$ is the capacitance of an electrode completely covered in liquid per unit area. - $a$ is the area of the actuated electrode(s). Note that this equation for $x$ assumes a single drop moving across an electrode with a length along the x-axis of Lx. If no value is provided for Lx, the electrode is assumed to be square, i.e., Lx=Ly=sqrt(area) ''' if self.calibration._c_drop: c_drop = self.calibration.c_drop(self.frequency) else: c_drop = self.capacitance()[-1] / self.area if self.calibration._c_filler: c_filler = self.calibration.c_filler(self.frequency) else: c_filler = 0 if Lx is None: Lx = np.sqrt(self.area) return (self.capacitance(filter_order=filter_order, window_size=window_size, tol=tol) / self.area \ - c_filler) / (c_drop - c_filler) * Lx
[ "def", "x_position", "(", "self", ",", "filter_order", "=", "None", ",", "window_size", "=", "None", ",", "tol", "=", "0.05", ",", "Lx", "=", "None", ")", ":", "if", "self", ".", "calibration", ".", "_c_drop", ":", "c_drop", "=", "self", ".", "calibration", ".", "c_drop", "(", "self", ".", "frequency", ")", "else", ":", "c_drop", "=", "self", ".", "capacitance", "(", ")", "[", "-", "1", "]", "/", "self", ".", "area", "if", "self", ".", "calibration", ".", "_c_filler", ":", "c_filler", "=", "self", ".", "calibration", ".", "c_filler", "(", "self", ".", "frequency", ")", "else", ":", "c_filler", "=", "0", "if", "Lx", "is", "None", ":", "Lx", "=", "np", ".", "sqrt", "(", "self", ".", "area", ")", "return", "(", "self", ".", "capacitance", "(", "filter_order", "=", "filter_order", ",", "window_size", "=", "window_size", ",", "tol", "=", "tol", ")", "/", "self", ".", "area", "-", "c_filler", ")", "/", "(", "c_drop", "-", "c_filler", ")", "*", "Lx" ]
37.526316
0.003418
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'limit') and self.limit is not None: _dict['limit'] = self.limit return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'limit'", ")", "and", "self", ".", "limit", "is", "not", "None", ":", "_dict", "[", "'limit'", "]", "=", "self", ".", "limit", "return", "_dict" ]
36.666667
0.008889
def start_session(self, causal_consistency=True, default_transaction_options=None): """Start a logical session. This method takes the same parameters as :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. Requires MongoDB 3.6. It is an error to call :meth:`start_session` if this client has been authenticated to multiple databases using the deprecated method :meth:`~pymongo.database.Database.authenticate`. A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ return self.__start_session( False, causal_consistency=causal_consistency, default_transaction_options=default_transaction_options)
[ "def", "start_session", "(", "self", ",", "causal_consistency", "=", "True", ",", "default_transaction_options", "=", "None", ")", ":", "return", "self", ".", "__start_session", "(", "False", ",", "causal_consistency", "=", "causal_consistency", ",", "default_transaction_options", "=", "default_transaction_options", ")" ]
39.48
0.003956
def Open(self): """Connects to the database and creates the required tables. Raises: IOError: if the specified output file already exists. OSError: if the specified output file already exists. ValueError: if the filename is not set. """ if not self._filename: raise ValueError('Missing filename.') if not self._append and os.path.isfile(self._filename): raise IOError(( 'Unable to use an already existing file for output ' '[{0:s}]').format(self._filename)) self._connection = sqlite3.connect(self._filename) self._cursor = self._connection.cursor() # Create table in database. if not self._append: self._cursor.execute(self._CREATE_TABLE_QUERY) for field in self._META_FIELDS: query = 'CREATE TABLE l2t_{0:s}s ({0:s}s TEXT, frequency INT)'.format( field) self._cursor.execute(query) if self._set_status: self._set_status('Created table: l2t_{0:s}'.format(field)) self._cursor.execute('CREATE TABLE l2t_tags (tag TEXT)') if self._set_status: self._set_status('Created table: l2t_tags') query = 'CREATE TABLE l2t_saved_query (name TEXT, query TEXT)' self._cursor.execute(query) if self._set_status: self._set_status('Created table: l2t_saved_query') query = ( 'CREATE TABLE l2t_disk (disk_type INT, mount_path TEXT, ' 'dd_path TEXT, dd_offset TEXT, storage_file TEXT, export_path TEXT)') self._cursor.execute(query) query = ( 'INSERT INTO l2t_disk (disk_type, mount_path, dd_path, dd_offset, ' 'storage_file, export_path) VALUES (0, "", "", "", "", "")') self._cursor.execute(query) if self._set_status: self._set_status('Created table: l2t_disk') self._count = 0
[ "def", "Open", "(", "self", ")", ":", "if", "not", "self", ".", "_filename", ":", "raise", "ValueError", "(", "'Missing filename.'", ")", "if", "not", "self", ".", "_append", "and", "os", ".", "path", ".", "isfile", "(", "self", ".", "_filename", ")", ":", "raise", "IOError", "(", "(", "'Unable to use an already existing file for output '", "'[{0:s}]'", ")", ".", "format", "(", "self", ".", "_filename", ")", ")", "self", ".", "_connection", "=", "sqlite3", ".", "connect", "(", "self", ".", "_filename", ")", "self", ".", "_cursor", "=", "self", ".", "_connection", ".", "cursor", "(", ")", "# Create table in database.", "if", "not", "self", ".", "_append", ":", "self", ".", "_cursor", ".", "execute", "(", "self", ".", "_CREATE_TABLE_QUERY", ")", "for", "field", "in", "self", ".", "_META_FIELDS", ":", "query", "=", "'CREATE TABLE l2t_{0:s}s ({0:s}s TEXT, frequency INT)'", ".", "format", "(", "field", ")", "self", ".", "_cursor", ".", "execute", "(", "query", ")", "if", "self", ".", "_set_status", ":", "self", ".", "_set_status", "(", "'Created table: l2t_{0:s}'", ".", "format", "(", "field", ")", ")", "self", ".", "_cursor", ".", "execute", "(", "'CREATE TABLE l2t_tags (tag TEXT)'", ")", "if", "self", ".", "_set_status", ":", "self", ".", "_set_status", "(", "'Created table: l2t_tags'", ")", "query", "=", "'CREATE TABLE l2t_saved_query (name TEXT, query TEXT)'", "self", ".", "_cursor", ".", "execute", "(", "query", ")", "if", "self", ".", "_set_status", ":", "self", ".", "_set_status", "(", "'Created table: l2t_saved_query'", ")", "query", "=", "(", "'CREATE TABLE l2t_disk (disk_type INT, mount_path TEXT, '", "'dd_path TEXT, dd_offset TEXT, storage_file TEXT, export_path TEXT)'", ")", "self", ".", "_cursor", ".", "execute", "(", "query", ")", "query", "=", "(", "'INSERT INTO l2t_disk (disk_type, mount_path, dd_path, dd_offset, '", "'storage_file, export_path) VALUES (0, \"\", \"\", \"\", \"\", \"\")'", ")", "self", ".", "_cursor", ".", "execute", "(", "query", ")", "if", "self", ".", "_set_status", ":", "self", ".", "_set_status", "(", "'Created table: l2t_disk'", ")", "self", ".", "_count", "=", "0" ]
34.480769
0.008677
def print_source_location_info(print_fn, filename, lineno, fn_name=None, f_lasti=None, remapped_file=None): """Print out a source location , e.g. the first line in line in: (/tmp.py:2 @21): <module> L -- 2 import sys,os (trepan3k) """ if remapped_file: mess = '(%s:%s remapped %s' % (remapped_file, lineno, filename) else: mess = '(%s:%s' % (filename, lineno) if f_lasti and f_lasti != -1: mess += ' @%d' % f_lasti pass mess += '):' if fn_name and fn_name != '?': mess += " %s" % fn_name pass print_fn(mess) return
[ "def", "print_source_location_info", "(", "print_fn", ",", "filename", ",", "lineno", ",", "fn_name", "=", "None", ",", "f_lasti", "=", "None", ",", "remapped_file", "=", "None", ")", ":", "if", "remapped_file", ":", "mess", "=", "'(%s:%s remapped %s'", "%", "(", "remapped_file", ",", "lineno", ",", "filename", ")", "else", ":", "mess", "=", "'(%s:%s'", "%", "(", "filename", ",", "lineno", ")", "if", "f_lasti", "and", "f_lasti", "!=", "-", "1", ":", "mess", "+=", "' @%d'", "%", "f_lasti", "pass", "mess", "+=", "'):'", "if", "fn_name", "and", "fn_name", "!=", "'?'", ":", "mess", "+=", "\" %s\"", "%", "fn_name", "pass", "print_fn", "(", "mess", ")", "return" ]
30.380952
0.00152
def grid_search(fn, grd, fmin=True, nproc=None): """Perform a grid search for optimal parameters of a specified function. In the simplest case the function returns a float value, and a single optimum value and corresponding parameter values are identified. If the function returns a tuple of values, each of these is taken to define a separate function on the search grid, with optimum function values and corresponding parameter values being identified for each of them. On all platforms except Windows (where ``mp.Pool`` usage has some limitations), the computation of the function at the grid points is computed in parallel. **Warning:** This function will hang if `fn` makes use of :mod:`pyfftw` with multi-threading enabled (the `bug <https://github.com/pyFFTW/pyFFTW/issues/135>`_ has been reported). When using the FFT functions in :mod:`sporco.linalg`, multi-threading can be disabled by including the following code:: import sporco.linalg sporco.linalg.pyfftw_threads = 1 Parameters ---------- fn : function Function to be evaluated. It should take a tuple of parameter values as an argument, and return a float value or a tuple of float values. grd : tuple of array_like A tuple providing an array of sample points for each axis of the grid on which the search is to be performed. fmin : bool, optional (default True) Determine whether optimal function values are selected as minima or maxima. If `fmin` is True then minima are selected. nproc : int or None, optional (default None) Number of processes to run in parallel. If None, the number of CPUs of the system is used. Returns ------- sprm : ndarray Optimal parameter values on each axis. If `fn` is multi-valued, `sprm` is a matrix with rows corresponding to parameter values and columns corresponding to function values. sfvl : float or ndarray Optimum function value or values fvmx : ndarray Function value(s) on search grid sidx : tuple of int or tuple of ndarray Indices of optimal values on parameter grid """ if fmin: slct = np.argmin else: slct = np.argmax fprm = itertools.product(*grd) if platform.system() == 'Windows': fval = list(map(fn, fprm)) else: if nproc is None: nproc = mp.cpu_count() pool = mp.Pool(processes=nproc) fval = pool.map(fn, fprm) pool.close() pool.join() if isinstance(fval[0], (tuple, list, np.ndarray)): nfnv = len(fval[0]) fvmx = np.reshape(fval, [a.size for a in grd] + [nfnv,]) sidx = np.unravel_index(slct(fvmx.reshape((-1, nfnv)), axis=0), fvmx.shape[0:-1]) + (np.array((range(nfnv))),) sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))]) sfvl = tuple(fvmx[sidx]) else: fvmx = np.reshape(fval, [a.size for a in grd]) sidx = np.unravel_index(slct(fvmx), fvmx.shape) sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))]) sfvl = fvmx[sidx] return sprm, sfvl, fvmx, sidx
[ "def", "grid_search", "(", "fn", ",", "grd", ",", "fmin", "=", "True", ",", "nproc", "=", "None", ")", ":", "if", "fmin", ":", "slct", "=", "np", ".", "argmin", "else", ":", "slct", "=", "np", ".", "argmax", "fprm", "=", "itertools", ".", "product", "(", "*", "grd", ")", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "fval", "=", "list", "(", "map", "(", "fn", ",", "fprm", ")", ")", "else", ":", "if", "nproc", "is", "None", ":", "nproc", "=", "mp", ".", "cpu_count", "(", ")", "pool", "=", "mp", ".", "Pool", "(", "processes", "=", "nproc", ")", "fval", "=", "pool", ".", "map", "(", "fn", ",", "fprm", ")", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "if", "isinstance", "(", "fval", "[", "0", "]", ",", "(", "tuple", ",", "list", ",", "np", ".", "ndarray", ")", ")", ":", "nfnv", "=", "len", "(", "fval", "[", "0", "]", ")", "fvmx", "=", "np", ".", "reshape", "(", "fval", ",", "[", "a", ".", "size", "for", "a", "in", "grd", "]", "+", "[", "nfnv", ",", "]", ")", "sidx", "=", "np", ".", "unravel_index", "(", "slct", "(", "fvmx", ".", "reshape", "(", "(", "-", "1", ",", "nfnv", ")", ")", ",", "axis", "=", "0", ")", ",", "fvmx", ".", "shape", "[", "0", ":", "-", "1", "]", ")", "+", "(", "np", ".", "array", "(", "(", "range", "(", "nfnv", ")", ")", ")", ",", ")", "sprm", "=", "np", ".", "array", "(", "[", "grd", "[", "k", "]", "[", "sidx", "[", "k", "]", "]", "for", "k", "in", "range", "(", "len", "(", "grd", ")", ")", "]", ")", "sfvl", "=", "tuple", "(", "fvmx", "[", "sidx", "]", ")", "else", ":", "fvmx", "=", "np", ".", "reshape", "(", "fval", ",", "[", "a", ".", "size", "for", "a", "in", "grd", "]", ")", "sidx", "=", "np", ".", "unravel_index", "(", "slct", "(", "fvmx", ")", ",", "fvmx", ".", "shape", ")", "sprm", "=", "np", ".", "array", "(", "[", "grd", "[", "k", "]", "[", "sidx", "[", "k", "]", "]", "for", "k", "in", "range", "(", "len", "(", "grd", ")", ")", "]", ")", "sfvl", "=", "fvmx", "[", "sidx", "]", "return", "sprm", ",", "sfvl", ",", "fvmx", ",", "sidx" ]
40.153846
0.000623
def reset_weights(self): """ Initialize properly model weights """ self.input_block.reset_weights() self.policy_backbone.reset_weights() self.value_backbone.reset_weights() self.action_head.reset_weights() self.value_head.reset_weights()
[ "def", "reset_weights", "(", "self", ")", ":", "self", ".", "input_block", ".", "reset_weights", "(", ")", "self", ".", "policy_backbone", ".", "reset_weights", "(", ")", "self", ".", "value_backbone", ".", "reset_weights", "(", ")", "self", ".", "action_head", ".", "reset_weights", "(", ")", "self", ".", "value_head", ".", "reset_weights", "(", ")" ]
31
0.006969
def install(self, force=False, overrides={}): """ Install the wheel into site-packages. """ # Utility to get the target directory for a particular key def get_path(key): return overrides.get(key) or self.install_paths[key] # The base target location is either purelib or platlib if self.parsed_wheel_info['Root-Is-Purelib'] == 'true': root = get_path('purelib') else: root = get_path('platlib') # Parse all the names in the archive name_trans = {} for info in self.zipfile.infolist(): name = info.filename # Zip files can contain entries representing directories. # These end in a '/'. # We ignore these, as we create directories on demand. if name.endswith('/'): continue # Pathnames in a zipfile namelist are always /-separated. # In theory, paths could start with ./ or have other oddities # but this won't happen in practical cases of well-formed wheels. # We'll cover the simple case of an initial './' as it's both easy # to do and more common than most other oddities. if name.startswith('./'): name = name[2:] # Split off the base directory to identify files that are to be # installed in non-root locations basedir, sep, filename = name.partition('/') if sep and basedir == self.datadir_name: # Data file. Target destination is elsewhere key, sep, filename = filename.partition('/') if not sep: raise ValueError("Invalid filename in wheel: {0}".format(name)) target = get_path(key) else: # Normal file. Target destination is root key = '' target = root filename = name # Map the actual filename from the zipfile to its intended target # directory and the pathname relative to that directory. dest = os.path.normpath(os.path.join(target, filename)) name_trans[info] = (key, target, filename, dest) # We're now ready to start processing the actual install. The process # is as follows: # 1. Prechecks - is the wheel valid, is its declared architecture # OK, etc. [[Responsibility of the caller]] # 2. Overwrite check - do any of the files to be installed already # exist? # 3. Actual install - put the files in their target locations. # 4. Update RECORD - write a suitably modified RECORD file to # reflect the actual installed paths. if not force: for info, v in name_trans.items(): k = info.filename key, target, filename, dest = v if os.path.exists(dest): raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest)) # Get the name of our executable, for use when replacing script # wrapper hashbang lines. # We encode it using getfilesystemencoding, as that is "the name of # the encoding used to convert Unicode filenames into system file # names". exename = sys.executable.encode(sys.getfilesystemencoding()) record_data = [] record_name = self.distinfo_name + '/RECORD' for info, (key, target, filename, dest) in name_trans.items(): name = info.filename source = self.zipfile.open(info) # Skip the RECORD file if name == record_name: continue ddir = os.path.dirname(dest) if not os.path.isdir(ddir): os.makedirs(ddir) destination = HashingFile(open(dest, 'wb')) if key == 'scripts': hashbang = source.readline() if hashbang.startswith(b'#!python'): hashbang = b'#!' + exename + binary(os.linesep) destination.write(hashbang) shutil.copyfileobj(source, destination) reldest = os.path.relpath(dest, root) reldest.replace(os.sep, '/') record_data.append((reldest, destination.digest(), destination.length)) destination.close() source.close() # preserve attributes (especially +x bit for scripts) attrs = info.external_attr >> 16 if attrs: # tends to be 0 if Windows. os.chmod(dest, info.external_attr >> 16) record_name = os.path.join(root, self.record_name) writer = csv.writer(open_for_csv(record_name, 'w+')) for reldest, digest, length in sorted(record_data): writer.writerow((reldest, digest, length)) writer.writerow((self.record_name, '', ''))
[ "def", "install", "(", "self", ",", "force", "=", "False", ",", "overrides", "=", "{", "}", ")", ":", "# Utility to get the target directory for a particular key", "def", "get_path", "(", "key", ")", ":", "return", "overrides", ".", "get", "(", "key", ")", "or", "self", ".", "install_paths", "[", "key", "]", "# The base target location is either purelib or platlib", "if", "self", ".", "parsed_wheel_info", "[", "'Root-Is-Purelib'", "]", "==", "'true'", ":", "root", "=", "get_path", "(", "'purelib'", ")", "else", ":", "root", "=", "get_path", "(", "'platlib'", ")", "# Parse all the names in the archive", "name_trans", "=", "{", "}", "for", "info", "in", "self", ".", "zipfile", ".", "infolist", "(", ")", ":", "name", "=", "info", ".", "filename", "# Zip files can contain entries representing directories.", "# These end in a '/'.", "# We ignore these, as we create directories on demand.", "if", "name", ".", "endswith", "(", "'/'", ")", ":", "continue", "# Pathnames in a zipfile namelist are always /-separated.", "# In theory, paths could start with ./ or have other oddities", "# but this won't happen in practical cases of well-formed wheels.", "# We'll cover the simple case of an initial './' as it's both easy", "# to do and more common than most other oddities.", "if", "name", ".", "startswith", "(", "'./'", ")", ":", "name", "=", "name", "[", "2", ":", "]", "# Split off the base directory to identify files that are to be", "# installed in non-root locations", "basedir", ",", "sep", ",", "filename", "=", "name", ".", "partition", "(", "'/'", ")", "if", "sep", "and", "basedir", "==", "self", ".", "datadir_name", ":", "# Data file. Target destination is elsewhere", "key", ",", "sep", ",", "filename", "=", "filename", ".", "partition", "(", "'/'", ")", "if", "not", "sep", ":", "raise", "ValueError", "(", "\"Invalid filename in wheel: {0}\"", ".", "format", "(", "name", ")", ")", "target", "=", "get_path", "(", "key", ")", "else", ":", "# Normal file. Target destination is root", "key", "=", "''", "target", "=", "root", "filename", "=", "name", "# Map the actual filename from the zipfile to its intended target", "# directory and the pathname relative to that directory.", "dest", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "target", ",", "filename", ")", ")", "name_trans", "[", "info", "]", "=", "(", "key", ",", "target", ",", "filename", ",", "dest", ")", "# We're now ready to start processing the actual install. The process", "# is as follows:", "# 1. Prechecks - is the wheel valid, is its declared architecture", "# OK, etc. [[Responsibility of the caller]]", "# 2. Overwrite check - do any of the files to be installed already", "# exist?", "# 3. Actual install - put the files in their target locations.", "# 4. Update RECORD - write a suitably modified RECORD file to", "# reflect the actual installed paths.", "if", "not", "force", ":", "for", "info", ",", "v", "in", "name_trans", ".", "items", "(", ")", ":", "k", "=", "info", ".", "filename", "key", ",", "target", ",", "filename", ",", "dest", "=", "v", "if", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "raise", "ValueError", "(", "\"Wheel file {0} would overwrite {1}. Use force if this is intended\"", ".", "format", "(", "k", ",", "dest", ")", ")", "# Get the name of our executable, for use when replacing script", "# wrapper hashbang lines.", "# We encode it using getfilesystemencoding, as that is \"the name of", "# the encoding used to convert Unicode filenames into system file", "# names\".", "exename", "=", "sys", ".", "executable", ".", "encode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ")", "record_data", "=", "[", "]", "record_name", "=", "self", ".", "distinfo_name", "+", "'/RECORD'", "for", "info", ",", "(", "key", ",", "target", ",", "filename", ",", "dest", ")", "in", "name_trans", ".", "items", "(", ")", ":", "name", "=", "info", ".", "filename", "source", "=", "self", ".", "zipfile", ".", "open", "(", "info", ")", "# Skip the RECORD file", "if", "name", "==", "record_name", ":", "continue", "ddir", "=", "os", ".", "path", ".", "dirname", "(", "dest", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "ddir", ")", ":", "os", ".", "makedirs", "(", "ddir", ")", "destination", "=", "HashingFile", "(", "open", "(", "dest", ",", "'wb'", ")", ")", "if", "key", "==", "'scripts'", ":", "hashbang", "=", "source", ".", "readline", "(", ")", "if", "hashbang", ".", "startswith", "(", "b'#!python'", ")", ":", "hashbang", "=", "b'#!'", "+", "exename", "+", "binary", "(", "os", ".", "linesep", ")", "destination", ".", "write", "(", "hashbang", ")", "shutil", ".", "copyfileobj", "(", "source", ",", "destination", ")", "reldest", "=", "os", ".", "path", ".", "relpath", "(", "dest", ",", "root", ")", "reldest", ".", "replace", "(", "os", ".", "sep", ",", "'/'", ")", "record_data", ".", "append", "(", "(", "reldest", ",", "destination", ".", "digest", "(", ")", ",", "destination", ".", "length", ")", ")", "destination", ".", "close", "(", ")", "source", ".", "close", "(", ")", "# preserve attributes (especially +x bit for scripts)", "attrs", "=", "info", ".", "external_attr", ">>", "16", "if", "attrs", ":", "# tends to be 0 if Windows.", "os", ".", "chmod", "(", "dest", ",", "info", ".", "external_attr", ">>", "16", ")", "record_name", "=", "os", ".", "path", ".", "join", "(", "root", ",", "self", ".", "record_name", ")", "writer", "=", "csv", ".", "writer", "(", "open_for_csv", "(", "record_name", ",", "'w+'", ")", ")", "for", "reldest", ",", "digest", ",", "length", "in", "sorted", "(", "record_data", ")", ":", "writer", ".", "writerow", "(", "(", "reldest", ",", "digest", ",", "length", ")", ")", "writer", ".", "writerow", "(", "(", "self", ".", "record_name", ",", "''", ",", "''", ")", ")" ]
44.66055
0.001005
def validate_accept(form, field): """Validate that accept have not been set.""" if field.data and form.reject.data: raise validators.ValidationError( _("Both reject and accept cannot be set at the same time.") )
[ "def", "validate_accept", "(", "form", ",", "field", ")", ":", "if", "field", ".", "data", "and", "form", ".", "reject", ".", "data", ":", "raise", "validators", ".", "ValidationError", "(", "_", "(", "\"Both reject and accept cannot be set at the same time.\"", ")", ")" ]
43.666667
0.007491
def resolve(self, value): """ Resolve contextual value. :param value: Contextual value. :return: If value is a function with a single parameter, which is a read-only dictionary, the return value of the function called with context properties as its parameter. If not, the value itself. """ if isinstance(value, collections.Callable): return value({ "base_dir": self.__base_dir, "profile_dir": self.__prof_dir, "profile_name": self.__prof_name }) return value
[ "def", "resolve", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "collections", ".", "Callable", ")", ":", "return", "value", "(", "{", "\"base_dir\"", ":", "self", ".", "__base_dir", ",", "\"profile_dir\"", ":", "self", ".", "__prof_dir", ",", "\"profile_name\"", ":", "self", ".", "__prof_name", "}", ")", "return", "value" ]
24.85
0.050388
def upload_attachment(self, location, data): """Upload attachment as required by CommentThread class. See CommentThread.upload_attachment for details. """ self.validate_attachment_location(location) content = data.read() if hasattr(data, 'read') else data orig_content = content if isinstance(content, bytes): content = base64.b64encode(orig_content).decode('ascii') else: pass # Should be base64 encoded already apath = '%s/%s' % (self.attachment_location, location) url = '%s/contents/%s' % (self.base_url, apath) result = requests.put( url, auth=(self.user, self.token), data=json.dumps({ 'message': 'file attachment %s' % location, 'content': content})) if result.status_code != 201: raise ValueError( "Can't upload attachment %s due to error %s." % ( location, result.reason)) return '[%s](https://github.com/%s/%s/blob/master/%s)' % ( location, self.owner, self.realm, apath)
[ "def", "upload_attachment", "(", "self", ",", "location", ",", "data", ")", ":", "self", ".", "validate_attachment_location", "(", "location", ")", "content", "=", "data", ".", "read", "(", ")", "if", "hasattr", "(", "data", ",", "'read'", ")", "else", "data", "orig_content", "=", "content", "if", "isinstance", "(", "content", ",", "bytes", ")", ":", "content", "=", "base64", ".", "b64encode", "(", "orig_content", ")", ".", "decode", "(", "'ascii'", ")", "else", ":", "pass", "# Should be base64 encoded already", "apath", "=", "'%s/%s'", "%", "(", "self", ".", "attachment_location", ",", "location", ")", "url", "=", "'%s/contents/%s'", "%", "(", "self", ".", "base_url", ",", "apath", ")", "result", "=", "requests", ".", "put", "(", "url", ",", "auth", "=", "(", "self", ".", "user", ",", "self", ".", "token", ")", ",", "data", "=", "json", ".", "dumps", "(", "{", "'message'", ":", "'file attachment %s'", "%", "location", ",", "'content'", ":", "content", "}", ")", ")", "if", "result", ".", "status_code", "!=", "201", ":", "raise", "ValueError", "(", "\"Can't upload attachment %s due to error %s.\"", "%", "(", "location", ",", "result", ".", "reason", ")", ")", "return", "'[%s](https://github.com/%s/%s/blob/master/%s)'", "%", "(", "location", ",", "self", ".", "owner", ",", "self", ".", "realm", ",", "apath", ")" ]
45.5
0.001794
def rpm(state, host, source, present=True): ''' Add/remove ``.rpm`` file packages. + source: filename or URL of the ``.rpm`` package + present: whether ore not the package should exist on the system URL sources with ``present=False``: If the ``.rpm`` file isn't downloaded, pyinfra can't remove any existing package as the file won't exist until mid-deploy. ''' # If source is a url if urlparse(source).scheme: # Generate a temp filename (with .rpm extension to please yum) temp_filename = '{0}.rpm'.format(state.get_temp_filename(source)) # Ensure it's downloaded yield files.download(state, host, source, temp_filename) # Override the source with the downloaded file source = temp_filename # Check for file .rpm information info = host.fact.rpm_package(source) exists = False # We have info! if info: current_packages = host.fact.rpm_packages if ( info['name'] in current_packages and info['version'] in current_packages[info['name']] ): exists = True # Package does not exist and we want? if present and not exists: # If we had info, always install if info: yield 'rpm -U {0}'.format(source) # This happens if we download the package mid-deploy, so we have no info # but also don't know if it's installed. So check at runtime, otherwise # the install will fail. else: yield 'rpm -qa | grep `rpm -qp {0}` || rpm -U {0}'.format(source) # Package exists but we don't want? if exists and not present: yield 'yum remove -y {0}'.format(info['name'])
[ "def", "rpm", "(", "state", ",", "host", ",", "source", ",", "present", "=", "True", ")", ":", "# If source is a url", "if", "urlparse", "(", "source", ")", ".", "scheme", ":", "# Generate a temp filename (with .rpm extension to please yum)", "temp_filename", "=", "'{0}.rpm'", ".", "format", "(", "state", ".", "get_temp_filename", "(", "source", ")", ")", "# Ensure it's downloaded", "yield", "files", ".", "download", "(", "state", ",", "host", ",", "source", ",", "temp_filename", ")", "# Override the source with the downloaded file", "source", "=", "temp_filename", "# Check for file .rpm information", "info", "=", "host", ".", "fact", ".", "rpm_package", "(", "source", ")", "exists", "=", "False", "# We have info!", "if", "info", ":", "current_packages", "=", "host", ".", "fact", ".", "rpm_packages", "if", "(", "info", "[", "'name'", "]", "in", "current_packages", "and", "info", "[", "'version'", "]", "in", "current_packages", "[", "info", "[", "'name'", "]", "]", ")", ":", "exists", "=", "True", "# Package does not exist and we want?", "if", "present", "and", "not", "exists", ":", "# If we had info, always install", "if", "info", ":", "yield", "'rpm -U {0}'", ".", "format", "(", "source", ")", "# This happens if we download the package mid-deploy, so we have no info", "# but also don't know if it's installed. So check at runtime, otherwise", "# the install will fail.", "else", ":", "yield", "'rpm -qa | grep `rpm -qp {0}` || rpm -U {0}'", ".", "format", "(", "source", ")", "# Package exists but we don't want?", "if", "exists", "and", "not", "present", ":", "yield", "'yum remove -y {0}'", ".", "format", "(", "info", "[", "'name'", "]", ")" ]
32.269231
0.001735
def has_mixture_channel(val: Any) -> bool: """Returns whether the value has a mixture channel representation. In contrast to `has_mixture` this method falls back to checking whether the value has a unitary representation via `has_channel`. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if `val` has a `_has_unitary_` method and its results is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method that is not a non-default value, True is returned. Returns False if none of these functions. """ mixture_getter = getattr(val, '_has_mixture_', None) result = NotImplemented if mixture_getter is None else mixture_getter() if result is not NotImplemented: return result result = has_unitary(val) if result is not NotImplemented and result: return result # No _has_mixture_ or _has_unitary_ function, use _mixture_ instead. return mixture_channel(val, None) is not None
[ "def", "has_mixture_channel", "(", "val", ":", "Any", ")", "->", "bool", ":", "mixture_getter", "=", "getattr", "(", "val", ",", "'_has_mixture_'", ",", "None", ")", "result", "=", "NotImplemented", "if", "mixture_getter", "is", "None", "else", "mixture_getter", "(", ")", "if", "result", "is", "not", "NotImplemented", ":", "return", "result", "result", "=", "has_unitary", "(", "val", ")", "if", "result", "is", "not", "NotImplemented", "and", "result", ":", "return", "result", "# No _has_mixture_ or _has_unitary_ function, use _mixture_ instead.", "return", "mixture_channel", "(", "val", ",", "None", ")", "is", "not", "None" ]
42.84
0.001826
def set_service(self, name, service_config, project=False): """ Store a ServiceConfig in the keychain """ if not self.project_config.services or name not in self.project_config.services: self._raise_service_not_valid(name) self._validate_service(name, service_config) self._set_service(name, service_config, project) self._load_services()
[ "def", "set_service", "(", "self", ",", "name", ",", "service_config", ",", "project", "=", "False", ")", ":", "if", "not", "self", ".", "project_config", ".", "services", "or", "name", "not", "in", "self", ".", "project_config", ".", "services", ":", "self", ".", "_raise_service_not_valid", "(", "name", ")", "self", ".", "_validate_service", "(", "name", ",", "service_config", ")", "self", ".", "_set_service", "(", "name", ",", "service_config", ",", "project", ")", "self", ".", "_load_services", "(", ")" ]
54.857143
0.007692
def startswith(string, prefix): """ Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes. str.startswith may return true for a prefix that is not visually represented as a prefix if a grapheme cluster is continued after the prefix ends. >>> grapheme.startswith("✊🏾", "✊") False >>> "✊🏾".startswith("✊") True """ return string.startswith(prefix) and safe_split_index(string, len(prefix)) == len(prefix)
[ "def", "startswith", "(", "string", ",", "prefix", ")", ":", "return", "string", ".", "startswith", "(", "prefix", ")", "and", "safe_split_index", "(", "string", ",", "len", "(", "prefix", ")", ")", "==", "len", "(", "prefix", ")" ]
37.230769
0.008065
def FlipAllowed(self): """Raise an error if the not keyword is used where it is not allowed.""" if not hasattr(self, 'flipped'): raise errors.ParseError('Not defined.') if not self.flipped: return if self.current_expression.operator: if not self.current_expression.operator.lower() in ( 'is', 'contains', 'inset', 'equals'): raise errors.ParseError( 'Keyword \'not\' does not work against operator: {0:s}'.format( self.current_expression.operator))
[ "def", "FlipAllowed", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'flipped'", ")", ":", "raise", "errors", ".", "ParseError", "(", "'Not defined.'", ")", "if", "not", "self", ".", "flipped", ":", "return", "if", "self", ".", "current_expression", ".", "operator", ":", "if", "not", "self", ".", "current_expression", ".", "operator", ".", "lower", "(", ")", "in", "(", "'is'", ",", "'contains'", ",", "'inset'", ",", "'equals'", ")", ":", "raise", "errors", ".", "ParseError", "(", "'Keyword \\'not\\' does not work against operator: {0:s}'", ".", "format", "(", "self", ".", "current_expression", ".", "operator", ")", ")" ]
36.928571
0.009434
def p_edgesigs(self, p): 'edgesigs : edgesigs SENS_OR edgesig' p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
[ "def", "p_edgesigs", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "(", "p", "[", "3", "]", ",", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
33.5
0.014599
def load_module(self, name): """ If we get this far, then there are hooks waiting to be called on import of this module. We manually load the module and then run the hooks. @param name: The name of the module to import. """ self.loaded_modules.append(name) try: __import__(name, {}, {}, []) mod = sys.modules[name] self._run_hooks(name, mod) except: self.loaded_modules.pop() raise return mod
[ "def", "load_module", "(", "self", ",", "name", ")", ":", "self", ".", "loaded_modules", ".", "append", "(", "name", ")", "try", ":", "__import__", "(", "name", ",", "{", "}", ",", "{", "}", ",", "[", "]", ")", "mod", "=", "sys", ".", "modules", "[", "name", "]", "self", ".", "_run_hooks", "(", "name", ",", "mod", ")", "except", ":", "self", ".", "loaded_modules", ".", "pop", "(", ")", "raise", "return", "mod" ]
24.619048
0.005587
def write(self, chunk): """Write unbuffered data to the client.""" if self.chunked_write and chunk: chunk_size_hex = hex(len(chunk))[2:].encode('ascii') buf = [chunk_size_hex, CRLF, chunk, CRLF] self.conn.wfile.write(EMPTY.join(buf)) else: self.conn.wfile.write(chunk)
[ "def", "write", "(", "self", ",", "chunk", ")", ":", "if", "self", ".", "chunked_write", "and", "chunk", ":", "chunk_size_hex", "=", "hex", "(", "len", "(", "chunk", ")", ")", "[", "2", ":", "]", ".", "encode", "(", "'ascii'", ")", "buf", "=", "[", "chunk_size_hex", ",", "CRLF", ",", "chunk", ",", "CRLF", "]", "self", ".", "conn", ".", "wfile", ".", "write", "(", "EMPTY", ".", "join", "(", "buf", ")", ")", "else", ":", "self", ".", "conn", ".", "wfile", ".", "write", "(", "chunk", ")" ]
41.625
0.005882
def xmlrpc_run(self, port=24444, bind='127.0.0.1', logRequests=False): '''Run xmlrpc server''' import umsgpack from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary application = WSGIXMLRPCApplication() application.register_function(self.quit, '_quit') application.register_function(self.size) def sync_fetch(task): result = self.sync_fetch(task) result = Binary(umsgpack.packb(result)) return result application.register_function(sync_fetch, 'fetch') def dump_counter(_time, _type): return self._cnt[_time].to_dict(_type) application.register_function(dump_counter, 'counter') import tornado.wsgi import tornado.ioloop import tornado.httpserver container = tornado.wsgi.WSGIContainer(application) self.xmlrpc_ioloop = tornado.ioloop.IOLoop() self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop) self.xmlrpc_server.listen(port=port, address=bind) logger.info('fetcher.xmlrpc listening on %s:%s', bind, port) self.xmlrpc_ioloop.start()
[ "def", "xmlrpc_run", "(", "self", ",", "port", "=", "24444", ",", "bind", "=", "'127.0.0.1'", ",", "logRequests", "=", "False", ")", ":", "import", "umsgpack", "from", "pyspider", ".", "libs", ".", "wsgi_xmlrpc", "import", "WSGIXMLRPCApplication", "try", ":", "from", "xmlrpc", ".", "client", "import", "Binary", "except", "ImportError", ":", "from", "xmlrpclib", "import", "Binary", "application", "=", "WSGIXMLRPCApplication", "(", ")", "application", ".", "register_function", "(", "self", ".", "quit", ",", "'_quit'", ")", "application", ".", "register_function", "(", "self", ".", "size", ")", "def", "sync_fetch", "(", "task", ")", ":", "result", "=", "self", ".", "sync_fetch", "(", "task", ")", "result", "=", "Binary", "(", "umsgpack", ".", "packb", "(", "result", ")", ")", "return", "result", "application", ".", "register_function", "(", "sync_fetch", ",", "'fetch'", ")", "def", "dump_counter", "(", "_time", ",", "_type", ")", ":", "return", "self", ".", "_cnt", "[", "_time", "]", ".", "to_dict", "(", "_type", ")", "application", ".", "register_function", "(", "dump_counter", ",", "'counter'", ")", "import", "tornado", ".", "wsgi", "import", "tornado", ".", "ioloop", "import", "tornado", ".", "httpserver", "container", "=", "tornado", ".", "wsgi", ".", "WSGIContainer", "(", "application", ")", "self", ".", "xmlrpc_ioloop", "=", "tornado", ".", "ioloop", ".", "IOLoop", "(", ")", "self", ".", "xmlrpc_server", "=", "tornado", ".", "httpserver", ".", "HTTPServer", "(", "container", ",", "io_loop", "=", "self", ".", "xmlrpc_ioloop", ")", "self", ".", "xmlrpc_server", ".", "listen", "(", "port", "=", "port", ",", "address", "=", "bind", ")", "logger", ".", "info", "(", "'fetcher.xmlrpc listening on %s:%s'", ",", "bind", ",", "port", ")", "self", ".", "xmlrpc_ioloop", ".", "start", "(", ")" ]
37.558824
0.00229
def parse_table(table_string, header, remove_rows=1): '''parse a table to json from a string, where a header is expected by default. Return a jsonified table. Parameters ========== table_string: the string table, ideally with a header header: header of expected table, must match dimension (number columns) remove_rows: an integer to indicate a number of rows to remove from top the default is 1 assuming we don't want the header ''' rows = [x for x in table_string.split('\n') if x] rows = rows[0+remove_rows:] # Parse into json dictionary parsed = [] for row in rows: item = {} # This assumes no white spaces in each entry, which should be the case row = [x for x in row.split(' ') if x] for e in range(len(row)): item[header[e]] = row[e] parsed.append(item) return parsed
[ "def", "parse_table", "(", "table_string", ",", "header", ",", "remove_rows", "=", "1", ")", ":", "rows", "=", "[", "x", "for", "x", "in", "table_string", ".", "split", "(", "'\\n'", ")", "if", "x", "]", "rows", "=", "rows", "[", "0", "+", "remove_rows", ":", "]", "# Parse into json dictionary", "parsed", "=", "[", "]", "for", "row", "in", "rows", ":", "item", "=", "{", "}", "# This assumes no white spaces in each entry, which should be the case", "row", "=", "[", "x", "for", "x", "in", "row", ".", "split", "(", "' '", ")", "if", "x", "]", "for", "e", "in", "range", "(", "len", "(", "row", ")", ")", ":", "item", "[", "header", "[", "e", "]", "]", "=", "row", "[", "e", "]", "parsed", ".", "append", "(", "item", ")", "return", "parsed" ]
35.96
0.002167
def get_vm_list(self): """Get the list of guests that are created by SDK return userid list""" action = "list all guests in database" with zvmutils.log_and_reraise_sdkbase_error(action): guests_in_db = self._GuestDbOperator.get_guest_list() guests_migrated = self._GuestDbOperator.get_migrated_guest_list() # db query return value in tuple (uuid, userid, metadata, comments) userids_in_db = [g[1].upper() for g in guests_in_db] userids_migrated = [g[1].upper() for g in guests_migrated] userid_list = list(set(userids_in_db) - set(userids_migrated)) return userid_list
[ "def", "get_vm_list", "(", "self", ")", ":", "action", "=", "\"list all guests in database\"", "with", "zvmutils", ".", "log_and_reraise_sdkbase_error", "(", "action", ")", ":", "guests_in_db", "=", "self", ".", "_GuestDbOperator", ".", "get_guest_list", "(", ")", "guests_migrated", "=", "self", ".", "_GuestDbOperator", ".", "get_migrated_guest_list", "(", ")", "# db query return value in tuple (uuid, userid, metadata, comments)", "userids_in_db", "=", "[", "g", "[", "1", "]", ".", "upper", "(", ")", "for", "g", "in", "guests_in_db", "]", "userids_migrated", "=", "[", "g", "[", "1", "]", ".", "upper", "(", ")", "for", "g", "in", "guests_migrated", "]", "userid_list", "=", "list", "(", "set", "(", "userids_in_db", ")", "-", "set", "(", "userids_migrated", ")", ")", "return", "userid_list" ]
46.642857
0.003003
def IIR_filter_design(CentralFreq, bandwidth, transitionWidth, SampleFreq, GainStop=40, GainPass=0.01): """ Function to calculate the coefficients of an IIR filter, IMPORTANT NOTE: make_butterworth_bandpass_b_a and make_butterworth_b_a can produce IIR filters with higher sample rates and are prefereable due to this. Parameters ---------- CentralFreq : float Central frequency of the IIR filter to be designed bandwidth : float The width of the passband to be created about the central frequency transitionWidth : float The width of the transition band between the pass-band and stop-band SampleFreq : float The sample frequency (rate) of the data to be filtered GainStop : float, optional The dB of attenuation within the stopband (i.e. outside the passband) GainPass : float, optional The dB attenuation inside the passband (ideally close to 0 for a bandpass filter) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients) """ NyquistFreq = SampleFreq / 2 if (CentralFreq + bandwidth / 2 + transitionWidth > NyquistFreq): raise ValueError( "Need a higher Sample Frequency for this Central Freq, Bandwidth and transition Width") CentralFreqNormed = CentralFreq / NyquistFreq bandwidthNormed = bandwidth / NyquistFreq transitionWidthNormed = transitionWidth / NyquistFreq bandpass = [CentralFreqNormed - bandwidthNormed / 2, CentralFreqNormed + bandwidthNormed / 2] bandstop = [CentralFreqNormed - bandwidthNormed / 2 - transitionWidthNormed, CentralFreqNormed + bandwidthNormed / 2 + transitionWidthNormed] print(bandpass, bandstop) b, a = scipy.signal.iirdesign(bandpass, bandstop, GainPass, GainStop) return b, a
[ "def", "IIR_filter_design", "(", "CentralFreq", ",", "bandwidth", ",", "transitionWidth", ",", "SampleFreq", ",", "GainStop", "=", "40", ",", "GainPass", "=", "0.01", ")", ":", "NyquistFreq", "=", "SampleFreq", "/", "2", "if", "(", "CentralFreq", "+", "bandwidth", "/", "2", "+", "transitionWidth", ">", "NyquistFreq", ")", ":", "raise", "ValueError", "(", "\"Need a higher Sample Frequency for this Central Freq, Bandwidth and transition Width\"", ")", "CentralFreqNormed", "=", "CentralFreq", "/", "NyquistFreq", "bandwidthNormed", "=", "bandwidth", "/", "NyquistFreq", "transitionWidthNormed", "=", "transitionWidth", "/", "NyquistFreq", "bandpass", "=", "[", "CentralFreqNormed", "-", "bandwidthNormed", "/", "2", ",", "CentralFreqNormed", "+", "bandwidthNormed", "/", "2", "]", "bandstop", "=", "[", "CentralFreqNormed", "-", "bandwidthNormed", "/", "2", "-", "transitionWidthNormed", ",", "CentralFreqNormed", "+", "bandwidthNormed", "/", "2", "+", "transitionWidthNormed", "]", "print", "(", "bandpass", ",", "bandstop", ")", "b", ",", "a", "=", "scipy", ".", "signal", ".", "iirdesign", "(", "bandpass", ",", "bandstop", ",", "GainPass", ",", "GainStop", ")", "return", "b", ",", "a" ]
43.977273
0.004044
def get_uploadflags(self, location): """Return uploadflags for the given server. """ uploadflags = [] server = self.defaults.servers[location] if self.sign: uploadflags.append('--sign') elif server.sign is not None: if server.sign: uploadflags.append('--sign') elif self.defaults.sign: uploadflags.append('--sign') if self.identity: if '--sign' not in uploadflags: uploadflags.append('--sign') uploadflags.append('--identity="%s"' % self.identity) elif '--sign' in uploadflags: if server.identity is not None: if server.identity: uploadflags.append('--identity="%s"' % server.identity) elif self.defaults.identity: uploadflags.append('--identity="%s"' % self.defaults.identity) return uploadflags
[ "def", "get_uploadflags", "(", "self", ",", "location", ")", ":", "uploadflags", "=", "[", "]", "server", "=", "self", ".", "defaults", ".", "servers", "[", "location", "]", "if", "self", ".", "sign", ":", "uploadflags", ".", "append", "(", "'--sign'", ")", "elif", "server", ".", "sign", "is", "not", "None", ":", "if", "server", ".", "sign", ":", "uploadflags", ".", "append", "(", "'--sign'", ")", "elif", "self", ".", "defaults", ".", "sign", ":", "uploadflags", ".", "append", "(", "'--sign'", ")", "if", "self", ".", "identity", ":", "if", "'--sign'", "not", "in", "uploadflags", ":", "uploadflags", ".", "append", "(", "'--sign'", ")", "uploadflags", ".", "append", "(", "'--identity=\"%s\"'", "%", "self", ".", "identity", ")", "elif", "'--sign'", "in", "uploadflags", ":", "if", "server", ".", "identity", "is", "not", "None", ":", "if", "server", ".", "identity", ":", "uploadflags", ".", "append", "(", "'--identity=\"%s\"'", "%", "server", ".", "identity", ")", "elif", "self", ".", "defaults", ".", "identity", ":", "uploadflags", ".", "append", "(", "'--identity=\"%s\"'", "%", "self", ".", "defaults", ".", "identity", ")", "return", "uploadflags" ]
35.461538
0.002112
def __recognize_scalar(self, node: yaml.Node, expected_type: Type) -> RecResult: """Recognize a node that we expect to be a scalar. Args: node: The node to recognize. expected_type: The type it is expected to be. Returns: A list of recognized types and an error message """ logger.debug('Recognizing as a scalar') if (isinstance(node, yaml.ScalarNode) and node.tag == scalar_type_to_tag[expected_type]): return [expected_type], '' message = 'Failed to recognize a {}\n{}\n'.format( type_to_desc(expected_type), node.start_mark) return [], message
[ "def", "__recognize_scalar", "(", "self", ",", "node", ":", "yaml", ".", "Node", ",", "expected_type", ":", "Type", ")", "->", "RecResult", ":", "logger", ".", "debug", "(", "'Recognizing as a scalar'", ")", "if", "(", "isinstance", "(", "node", ",", "yaml", ".", "ScalarNode", ")", "and", "node", ".", "tag", "==", "scalar_type_to_tag", "[", "expected_type", "]", ")", ":", "return", "[", "expected_type", "]", ",", "''", "message", "=", "'Failed to recognize a {}\\n{}\\n'", ".", "format", "(", "type_to_desc", "(", "expected_type", ")", ",", "node", ".", "start_mark", ")", "return", "[", "]", ",", "message" ]
38.777778
0.004196
def dimensions(self, *dimensions): """ Add a list of Dimension ingredients to the query. These can either be Dimension objects or strings representing dimensions on the shelf. The Dimension expression will be added to the query's select statement and to the group_by. :param dimensions: Dimensions to add to the recipe. Dimensions can either be keys on the ``shelf`` or Dimension objects :type dimensions: list """ for d in dimensions: self._cauldron.use(self._shelf.find(d, Dimension)) self.dirty = True return self
[ "def", "dimensions", "(", "self", ",", "*", "dimensions", ")", ":", "for", "d", "in", "dimensions", ":", "self", ".", "_cauldron", ".", "use", "(", "self", ".", "_shelf", ".", "find", "(", "d", ",", "Dimension", ")", ")", "self", ".", "dirty", "=", "True", "return", "self" ]
37.941176
0.004539
def query_params(self, value=None): """ Return or set a dictionary of query params :param dict value: new dictionary of values """ if value is not None: return URL._mutate(self, query=unicode_urlencode(value, doseq=True)) query = '' if self._tuple.query is None else self._tuple.query # In Python 2.6, urlparse needs a bytestring so we encode and then # decode the result. if not six.PY3: result = parse_qs(to_utf8(query), True) return dict_to_unicode(result) return parse_qs(query, True)
[ "def", "query_params", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "return", "URL", ".", "_mutate", "(", "self", ",", "query", "=", "unicode_urlencode", "(", "value", ",", "doseq", "=", "True", ")", ")", "query", "=", "''", "if", "self", ".", "_tuple", ".", "query", "is", "None", "else", "self", ".", "_tuple", ".", "query", "# In Python 2.6, urlparse needs a bytestring so we encode and then", "# decode the result.", "if", "not", "six", ".", "PY3", ":", "result", "=", "parse_qs", "(", "to_utf8", "(", "query", ")", ",", "True", ")", "return", "dict_to_unicode", "(", "result", ")", "return", "parse_qs", "(", "query", ",", "True", ")" ]
34.764706
0.004942
def add(name, function_name, cron): """ Create an event """ lambder.add_event(name=name, function_name=function_name, cron=cron)
[ "def", "add", "(", "name", ",", "function_name", ",", "cron", ")", ":", "lambder", ".", "add_event", "(", "name", "=", "name", ",", "function_name", "=", "function_name", ",", "cron", "=", "cron", ")" ]
44.666667
0.007353
def polypeptide_vector(p, start_index=0, end_index=-1, unit=True): """Vector along the Chain primitive (default is from N-terminus to C-terminus). Notes ----- `start_index` and `end_index` can be changed to examine smaller sections of the Chain, or reversed to change the direction of the vector. Parameters ---------- p : ampal.Polymer Reference `Polymer`. start_index : int, optional Default is 0 (start at the N-terminus of the Chain) end_index : int, optional Default is -1 (start at the C-terminus of the Chain) unit : bool If True, the vector returned has a magnitude of 1. Returns ------- vector : a numpy.array vector has shape (1, 3) """ if len(p) <= 1: raise ValueError( "Polymer should have length greater than 1. Polymer length = {0}".format(len(p))) try: prim_cas = p.primitive.coordinates direction_vector = prim_cas[end_index] - prim_cas[start_index] except ValueError: direction_vector = p[end_index]['CA'].array - \ p[start_index]['CA'].array if unit: direction_vector = unit_vector(direction_vector) return direction_vector
[ "def", "polypeptide_vector", "(", "p", ",", "start_index", "=", "0", ",", "end_index", "=", "-", "1", ",", "unit", "=", "True", ")", ":", "if", "len", "(", "p", ")", "<=", "1", ":", "raise", "ValueError", "(", "\"Polymer should have length greater than 1. Polymer length = {0}\"", ".", "format", "(", "len", "(", "p", ")", ")", ")", "try", ":", "prim_cas", "=", "p", ".", "primitive", ".", "coordinates", "direction_vector", "=", "prim_cas", "[", "end_index", "]", "-", "prim_cas", "[", "start_index", "]", "except", "ValueError", ":", "direction_vector", "=", "p", "[", "end_index", "]", "[", "'CA'", "]", ".", "array", "-", "p", "[", "start_index", "]", "[", "'CA'", "]", ".", "array", "if", "unit", ":", "direction_vector", "=", "unit_vector", "(", "direction_vector", ")", "return", "direction_vector" ]
32.324324
0.002435
def del_svc_comment(self, comment_id): """Delete a service comment Format of the line that triggers function call:: DEL_SVC_COMMENT;<comment_id> :param comment_id: comment id to delete :type comment_id: int :return: None """ for svc in self.daemon.services: if comment_id in svc.comments: svc.del_comment(comment_id) self.send_an_element(svc.get_update_status_brok()) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_SVC_COMMENT: comment id: %s does not exist ' 'and cannot be deleted.' % comment_id))
[ "def", "del_svc_comment", "(", "self", ",", "comment_id", ")", ":", "for", "svc", "in", "self", ".", "daemon", ".", "services", ":", "if", "comment_id", "in", "svc", ".", "comments", ":", "svc", ".", "del_comment", "(", "comment_id", ")", "self", ".", "send_an_element", "(", "svc", ".", "get_update_status_brok", "(", ")", ")", "break", "else", ":", "self", ".", "send_an_element", "(", "make_monitoring_log", "(", "'warning'", ",", "'DEL_SVC_COMMENT: comment id: %s does not exist '", "'and cannot be deleted.'", "%", "comment_id", ")", ")" ]
36.473684
0.002813
def onTagDel(self, name, func): ''' Register a callback for tag deletion. Args: name (str): The name of the tag or tag glob. func (function): The callback func(node, tagname, tagval). ''' if '*' in name: self.ontagdelglobs.add(name, func) else: self.ontagdels[name].append(func)
[ "def", "onTagDel", "(", "self", ",", "name", ",", "func", ")", ":", "if", "'*'", "in", "name", ":", "self", ".", "ontagdelglobs", ".", "add", "(", "name", ",", "func", ")", "else", ":", "self", ".", "ontagdels", "[", "name", "]", ".", "append", "(", "func", ")" ]
28
0.005319
def setProgressColor( self, color ): """ Sets the color that for the progress bar for this item. :param color | <QColor> """ self._progressColor = QColor(color) self.setAlternateProgressColor(self._progressColor.darker(110))
[ "def", "setProgressColor", "(", "self", ",", "color", ")", ":", "self", ".", "_progressColor", "=", "QColor", "(", "color", ")", "self", ".", "setAlternateProgressColor", "(", "self", ".", "_progressColor", ".", "darker", "(", "110", ")", ")" ]
35.75
0.017065
def validate_ok_for_update(update): """Validate an update document.""" validate_is_mapping("update", update) # Update can not be {} if not update: raise ValueError('update only works with $ operators') first = next(iter(update)) if not first.startswith('$'): raise ValueError('update only works with $ operators')
[ "def", "validate_ok_for_update", "(", "update", ")", ":", "validate_is_mapping", "(", "\"update\"", ",", "update", ")", "# Update can not be {}", "if", "not", "update", ":", "raise", "ValueError", "(", "'update only works with $ operators'", ")", "first", "=", "next", "(", "iter", "(", "update", ")", ")", "if", "not", "first", ".", "startswith", "(", "'$'", ")", ":", "raise", "ValueError", "(", "'update only works with $ operators'", ")" ]
38.333333
0.002833
def _get_nodes_from_symbol(sym): """Given a symbol and shapes, return a list of `NodeDef`s for visualizing the the graph in TensorBoard.""" if not isinstance(sym, Symbol): raise TypeError('sym must be an `mxnet.symbol.Symbol`,' ' received type {}'.format(str(type(sym)))) conf = json.loads(sym.tojson()) nodes = conf['nodes'] data2op = {} # key: data id, value: list of ops to whom data is an input for i, node in enumerate(nodes): if node['op'] != 'null': # node is an operator input_list = node['inputs'] for idx in input_list: if idx[0] == 0: # do not include 'data' node in the op scope continue if idx[0] in data2op: # nodes[idx[0]] is a data as an input to op nodes[i] data2op[idx[0]].append(i) else: data2op[idx[0]] = [i] # In the following, we group data with operators they belong to # by attaching them with operator names as scope names. # The parameters with the operator name as the prefix will be # assigned with the scope name of that operator. For example, # a convolution op has name 'conv', while its weight and bias # have name 'conv_weight' and 'conv_bias'. In the end, the operator # has scope name 'conv' prepended to its name, i.e. 'conv/conv'. # The parameters are named 'conv/conv_weight' and 'conv/conv_bias'. node_defs = [] for i, node in enumerate(nodes): node_name = node['name'] op_name = node['op'] kwargs = {'op': op_name, 'name': node_name} if op_name != 'null': # node is an operator inputs = [] input_list = node['inputs'] for idx in input_list: input_node = nodes[idx[0]] input_node_name = input_node['name'] if input_node['op'] != 'null': inputs.append(_scoped_name(input_node_name, input_node_name)) elif idx[0] in data2op and len(data2op[idx[0]]) == 1 and data2op[idx[0]][0] == i: # the data is only as an input to nodes[i], no else inputs.append(_scoped_name(node_name, input_node_name)) else: # the data node has no scope name, e.g. 'data' as the input node inputs.append(input_node_name) kwargs['input'] = inputs kwargs['name'] = _scoped_name(node_name, node_name) elif i in data2op and len(data2op[i]) == 1: # node is a data node belonging to one op, find out which operator this node belongs to op_node_name = nodes[data2op[i][0]]['name'] kwargs['name'] = _scoped_name(op_node_name, node_name) if 'attrs' in node: # TensorBoard would escape quotation marks, replace it with space attr = json.dumps(node['attrs'], sort_keys=True).replace("\"", ' ') attr = {'param': AttrValue(s=attr.encode(encoding='utf-8'))} kwargs['attr'] = attr node_def = NodeDef(**kwargs) node_defs.append(node_def) return node_defs
[ "def", "_get_nodes_from_symbol", "(", "sym", ")", ":", "if", "not", "isinstance", "(", "sym", ",", "Symbol", ")", ":", "raise", "TypeError", "(", "'sym must be an `mxnet.symbol.Symbol`,'", "' received type {}'", ".", "format", "(", "str", "(", "type", "(", "sym", ")", ")", ")", ")", "conf", "=", "json", ".", "loads", "(", "sym", ".", "tojson", "(", ")", ")", "nodes", "=", "conf", "[", "'nodes'", "]", "data2op", "=", "{", "}", "# key: data id, value: list of ops to whom data is an input", "for", "i", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "if", "node", "[", "'op'", "]", "!=", "'null'", ":", "# node is an operator", "input_list", "=", "node", "[", "'inputs'", "]", "for", "idx", "in", "input_list", ":", "if", "idx", "[", "0", "]", "==", "0", ":", "# do not include 'data' node in the op scope", "continue", "if", "idx", "[", "0", "]", "in", "data2op", ":", "# nodes[idx[0]] is a data as an input to op nodes[i]", "data2op", "[", "idx", "[", "0", "]", "]", ".", "append", "(", "i", ")", "else", ":", "data2op", "[", "idx", "[", "0", "]", "]", "=", "[", "i", "]", "# In the following, we group data with operators they belong to", "# by attaching them with operator names as scope names.", "# The parameters with the operator name as the prefix will be", "# assigned with the scope name of that operator. For example,", "# a convolution op has name 'conv', while its weight and bias", "# have name 'conv_weight' and 'conv_bias'. In the end, the operator", "# has scope name 'conv' prepended to its name, i.e. 'conv/conv'.", "# The parameters are named 'conv/conv_weight' and 'conv/conv_bias'.", "node_defs", "=", "[", "]", "for", "i", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "node_name", "=", "node", "[", "'name'", "]", "op_name", "=", "node", "[", "'op'", "]", "kwargs", "=", "{", "'op'", ":", "op_name", ",", "'name'", ":", "node_name", "}", "if", "op_name", "!=", "'null'", ":", "# node is an operator", "inputs", "=", "[", "]", "input_list", "=", "node", "[", "'inputs'", "]", "for", "idx", "in", "input_list", ":", "input_node", "=", "nodes", "[", "idx", "[", "0", "]", "]", "input_node_name", "=", "input_node", "[", "'name'", "]", "if", "input_node", "[", "'op'", "]", "!=", "'null'", ":", "inputs", ".", "append", "(", "_scoped_name", "(", "input_node_name", ",", "input_node_name", ")", ")", "elif", "idx", "[", "0", "]", "in", "data2op", "and", "len", "(", "data2op", "[", "idx", "[", "0", "]", "]", ")", "==", "1", "and", "data2op", "[", "idx", "[", "0", "]", "]", "[", "0", "]", "==", "i", ":", "# the data is only as an input to nodes[i], no else", "inputs", ".", "append", "(", "_scoped_name", "(", "node_name", ",", "input_node_name", ")", ")", "else", ":", "# the data node has no scope name, e.g. 'data' as the input node", "inputs", ".", "append", "(", "input_node_name", ")", "kwargs", "[", "'input'", "]", "=", "inputs", "kwargs", "[", "'name'", "]", "=", "_scoped_name", "(", "node_name", ",", "node_name", ")", "elif", "i", "in", "data2op", "and", "len", "(", "data2op", "[", "i", "]", ")", "==", "1", ":", "# node is a data node belonging to one op, find out which operator this node belongs to", "op_node_name", "=", "nodes", "[", "data2op", "[", "i", "]", "[", "0", "]", "]", "[", "'name'", "]", "kwargs", "[", "'name'", "]", "=", "_scoped_name", "(", "op_node_name", ",", "node_name", ")", "if", "'attrs'", "in", "node", ":", "# TensorBoard would escape quotation marks, replace it with space", "attr", "=", "json", ".", "dumps", "(", "node", "[", "'attrs'", "]", ",", "sort_keys", "=", "True", ")", ".", "replace", "(", "\"\\\"\"", ",", "' '", ")", "attr", "=", "{", "'param'", ":", "AttrValue", "(", "s", "=", "attr", ".", "encode", "(", "encoding", "=", "'utf-8'", ")", ")", "}", "kwargs", "[", "'attr'", "]", "=", "attr", "node_def", "=", "NodeDef", "(", "*", "*", "kwargs", ")", "node_defs", ".", "append", "(", "node_def", ")", "return", "node_defs" ]
50.403226
0.001883
def construct_schema_validators(schema, context): """ Given a schema object, construct a dictionary of validators needed to validate a response matching the given schema. Special Cases: - $ref: These validators need to be Lazily evaluating so that circular validation dependencies do not result in an infinitely deep validation chain. - properties: These validators are meant to apply to properties of the object being validated rather than the object itself. In this case, we need recurse back into this function to generate a dictionary of validators for the property. """ validators = ValidationDict() if '$ref' in schema: validators.add_validator( '$ref', SchemaReferenceValidator(schema['$ref'], context), ) if 'properties' in schema: for property_, property_schema in schema['properties'].items(): property_validator = generate_object_validator( schema=property_schema, context=context, ) validators.add_property_validator(property_, property_validator) if schema.get('additionalProperties') is False: validators.add_validator( 'additionalProperties', generate_additional_properties_validator(context=context, **schema), ) assert 'context' not in schema for key in schema: if key in validator_mapping: validators.add_validator(key, validator_mapping[key](context=context, **schema)) return validators
[ "def", "construct_schema_validators", "(", "schema", ",", "context", ")", ":", "validators", "=", "ValidationDict", "(", ")", "if", "'$ref'", "in", "schema", ":", "validators", ".", "add_validator", "(", "'$ref'", ",", "SchemaReferenceValidator", "(", "schema", "[", "'$ref'", "]", ",", "context", ")", ",", ")", "if", "'properties'", "in", "schema", ":", "for", "property_", ",", "property_schema", "in", "schema", "[", "'properties'", "]", ".", "items", "(", ")", ":", "property_validator", "=", "generate_object_validator", "(", "schema", "=", "property_schema", ",", "context", "=", "context", ",", ")", "validators", ".", "add_property_validator", "(", "property_", ",", "property_validator", ")", "if", "schema", ".", "get", "(", "'additionalProperties'", ")", "is", "False", ":", "validators", ".", "add_validator", "(", "'additionalProperties'", ",", "generate_additional_properties_validator", "(", "context", "=", "context", ",", "*", "*", "schema", ")", ",", ")", "assert", "'context'", "not", "in", "schema", "for", "key", "in", "schema", ":", "if", "key", "in", "validator_mapping", ":", "validators", ".", "add_validator", "(", "key", ",", "validator_mapping", "[", "key", "]", "(", "context", "=", "context", ",", "*", "*", "schema", ")", ")", "return", "validators" ]
41.657895
0.001852
def set_plugin_filepaths(self, filepaths, except_blacklisted=True): """ Sets `filepaths` to the `self.plugin_filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set. """ filepaths = util.to_absolute_paths(filepaths) if except_blacklisted: filepaths = util.remove_from_set(filepaths, self.blacklisted_filepaths) self.plugin_filepaths = filepaths
[ "def", "set_plugin_filepaths", "(", "self", ",", "filepaths", ",", "except_blacklisted", "=", "True", ")", ":", "filepaths", "=", "util", ".", "to_absolute_paths", "(", "filepaths", ")", "if", "except_blacklisted", ":", "filepaths", "=", "util", ".", "remove_from_set", "(", "filepaths", ",", "self", ".", "blacklisted_filepaths", ")", "self", ".", "plugin_filepaths", "=", "filepaths" ]
40.705882
0.002825
def Write(self, output_writer): """Writes the table to output writer. Args: output_writer (CLIOutputWriter): output writer. """ # Round up the column sizes to the nearest tab. for column_index, column_size in enumerate(self._column_sizes): column_size, _ = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB) column_size = (column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB self._column_sizes[column_index] = column_size if self._columns: self._WriteRow(output_writer, self._columns, in_bold=True) for values in self._rows: self._WriteRow(output_writer, values)
[ "def", "Write", "(", "self", ",", "output_writer", ")", ":", "# Round up the column sizes to the nearest tab.", "for", "column_index", ",", "column_size", "in", "enumerate", "(", "self", ".", "_column_sizes", ")", ":", "column_size", ",", "_", "=", "divmod", "(", "column_size", ",", "self", ".", "_NUMBER_OF_SPACES_IN_TAB", ")", "column_size", "=", "(", "column_size", "+", "1", ")", "*", "self", ".", "_NUMBER_OF_SPACES_IN_TAB", "self", ".", "_column_sizes", "[", "column_index", "]", "=", "column_size", "if", "self", ".", "_columns", ":", "self", ".", "_WriteRow", "(", "output_writer", ",", "self", ".", "_columns", ",", "in_bold", "=", "True", ")", "for", "values", "in", "self", ".", "_rows", ":", "self", ".", "_WriteRow", "(", "output_writer", ",", "values", ")" ]
35.882353
0.009585
def RFC3156_micalg_from_algo(hash_algo): """ Converts a GPGME hash algorithm name to one conforming to RFC3156. GPGME returns hash algorithm names such as "SHA256", but RFC3156 says that programs need to use names such as "pgp-sha256" instead. :param str hash_algo: GPGME hash_algo :returns: the lowercase name of of the algorithm with "pgp-" prepended :rtype: str """ # hash_algo will be something like SHA256, but we need pgp-sha256. algo = gpg.core.hash_algo_name(hash_algo) if algo is None: raise GPGProblem('Unknown hash algorithm {}'.format(algo), code=GPGCode.INVALID_HASH_ALGORITHM) return 'pgp-' + algo.lower()
[ "def", "RFC3156_micalg_from_algo", "(", "hash_algo", ")", ":", "# hash_algo will be something like SHA256, but we need pgp-sha256.", "algo", "=", "gpg", ".", "core", ".", "hash_algo_name", "(", "hash_algo", ")", "if", "algo", "is", "None", ":", "raise", "GPGProblem", "(", "'Unknown hash algorithm {}'", ".", "format", "(", "algo", ")", ",", "code", "=", "GPGCode", ".", "INVALID_HASH_ALGORITHM", ")", "return", "'pgp-'", "+", "algo", ".", "lower", "(", ")" ]
40.352941
0.001425
def _versioned_lib_suffix(env, suffix, version): """Generate versioned shared library suffix from a unversioned one. If suffix='.dll', and version='0.1.2', then it returns '-0-1-2.dll'""" Verbose = False if Verbose: print("_versioned_lib_suffix: suffix= ", suffix) print("_versioned_lib_suffix: version= ", version) cygversion = re.sub('\.', '-', version) if not suffix.startswith('-' + cygversion): suffix = '-' + cygversion + suffix if Verbose: print("_versioned_lib_suffix: return suffix= ", suffix) return suffix
[ "def", "_versioned_lib_suffix", "(", "env", ",", "suffix", ",", "version", ")", ":", "Verbose", "=", "False", "if", "Verbose", ":", "print", "(", "\"_versioned_lib_suffix: suffix= \"", ",", "suffix", ")", "print", "(", "\"_versioned_lib_suffix: version= \"", ",", "version", ")", "cygversion", "=", "re", ".", "sub", "(", "'\\.'", ",", "'-'", ",", "version", ")", "if", "not", "suffix", ".", "startswith", "(", "'-'", "+", "cygversion", ")", ":", "suffix", "=", "'-'", "+", "cygversion", "+", "suffix", "if", "Verbose", ":", "print", "(", "\"_versioned_lib_suffix: return suffix= \"", ",", "suffix", ")", "return", "suffix" ]
43.923077
0.003431
def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False
[ "def", "is_equal", "(", "self", ",", "another", ",", "limit", "=", "0.8", ")", ":", "if", "another", "is", "None", ":", "raise", "Exception", "(", "\"Parameter another is null\"", ")", "if", "isinstance", "(", "another", ",", "int", ")", ":", "distance", "=", "self", ".", "hamming_distance", "(", "another", ")", "elif", "isinstance", "(", "another", ",", "Simhash", ")", ":", "assert", "self", ".", "hash_bit_number", "==", "another", ".", "hash_bit_number", "distance", "=", "self", ".", "hamming_distance", "(", "another", ".", "hash", ")", "else", ":", "raise", "Exception", "(", "\"Unsupported parameter type %s\"", "%", "type", "(", "another", ")", ")", "similarity", "=", "float", "(", "self", ".", "hash_bit_number", "-", "distance", ")", "/", "self", ".", "hash_bit_number", "if", "similarity", ">", "limit", ":", "return", "True", "return", "False" ]
37.913043
0.004474
def find_first_wt_parent(self, with_ip=False): """ Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns a parent Biosample ID if its wild_type attribute is True. Args: with_ip: `bool`. True means to restrict the search to the first parental Wild Type that also has an Immunoblot linked to it, which may serve as a control between another immunoblot. For example, it could be useful to compare the target protein bands in Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a descendent sample. Returns: `False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to it (if the `with_ip` parameter is set to True). `int`: The ID of the WT parent. """ parent_id = self.part_of_id if not parent_id: return False parent = Biosample(parent_id) if parent.wild_type: if with_ip and parent.immunoblot_ids: return parent.id elif not with_ip: return parent.id return parent.find_first_wt_parent(with_ip=with_ip)
[ "def", "find_first_wt_parent", "(", "self", ",", "with_ip", "=", "False", ")", ":", "parent_id", "=", "self", ".", "part_of_id", "if", "not", "parent_id", ":", "return", "False", "parent", "=", "Biosample", "(", "parent_id", ")", "if", "parent", ".", "wild_type", ":", "if", "with_ip", "and", "parent", ".", "immunoblot_ids", ":", "return", "parent", ".", "id", "elif", "not", "with_ip", ":", "return", "parent", ".", "id", "return", "parent", ".", "find_first_wt_parent", "(", "with_ip", "=", "with_ip", ")" ]
46.074074
0.011811
def numYields(self): """Extract numYields counter if available (lazy).""" if not self._counters_calculated: self._counters_calculated = True self._extract_counters() return self._numYields
[ "def", "numYields", "(", "self", ")", ":", "if", "not", "self", ".", "_counters_calculated", ":", "self", ".", "_counters_calculated", "=", "True", "self", ".", "_extract_counters", "(", ")", "return", "self", ".", "_numYields" ]
33
0.008439
def count_items(self): """Counts Items in full_soup and soup. For debugging""" soup_items = self.soup.findAll('item') full_soup_items = self.full_soup.findAll('item') return len(soup_items), len(full_soup_items)
[ "def", "count_items", "(", "self", ")", ":", "soup_items", "=", "self", ".", "soup", ".", "findAll", "(", "'item'", ")", "full_soup_items", "=", "self", ".", "full_soup", ".", "findAll", "(", "'item'", ")", "return", "len", "(", "soup_items", ")", ",", "len", "(", "full_soup_items", ")" ]
47.8
0.00823
def remove_duplicates(self, ws): """When this analysis is unassigned from a worksheet, this function is responsible for deleting DuplicateAnalysis objects from the ws. """ for analysis in ws.objectValues(): if IDuplicateAnalysis.providedBy(analysis) \ and analysis.getAnalysis().UID() == self.UID(): ws.removeAnalysis(analysis)
[ "def", "remove_duplicates", "(", "self", ",", "ws", ")", ":", "for", "analysis", "in", "ws", ".", "objectValues", "(", ")", ":", "if", "IDuplicateAnalysis", ".", "providedBy", "(", "analysis", ")", "and", "analysis", ".", "getAnalysis", "(", ")", ".", "UID", "(", ")", "==", "self", ".", "UID", "(", ")", ":", "ws", ".", "removeAnalysis", "(", "analysis", ")" ]
50
0.004914
def format_back( number: FormatArg, light: Optional[bool] = False, extended: Optional[bool] = False) -> str: """ Return an escape code for a back color, by number. This is a convenience method for handling the different code types all in one shot. It also handles some validation. """ return _format_code( number, backcolor=True, light=light, extended=extended )
[ "def", "format_back", "(", "number", ":", "FormatArg", ",", "light", ":", "Optional", "[", "bool", "]", "=", "False", ",", "extended", ":", "Optional", "[", "bool", "]", "=", "False", ")", "->", "str", ":", "return", "_format_code", "(", "number", ",", "backcolor", "=", "True", ",", "light", "=", "light", ",", "extended", "=", "extended", ")" ]
29.6
0.002183
def set_index(self, index): """Display the data of the given index :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None """ item = index.internalPointer() note = item.internal_data() self.content_lb.setText(note.content) self.created_dte.setDateTime(dt_to_qdatetime(note.date_created)) self.updated_dte.setDateTime(dt_to_qdatetime(note.date_updated)) self.username_lb.setText(note.user.username)
[ "def", "set_index", "(", "self", ",", "index", ")", ":", "item", "=", "index", ".", "internalPointer", "(", ")", "note", "=", "item", ".", "internal_data", "(", ")", "self", ".", "content_lb", ".", "setText", "(", "note", ".", "content", ")", "self", ".", "created_dte", ".", "setDateTime", "(", "dt_to_qdatetime", "(", "note", ".", "date_created", ")", ")", "self", ".", "updated_dte", ".", "setDateTime", "(", "dt_to_qdatetime", "(", "note", ".", "date_updated", ")", ")", "self", ".", "username_lb", ".", "setText", "(", "note", ".", "user", ".", "username", ")" ]
36
0.00361
def complete(self, text, state): """Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. """ if self.use_main_ns: self.namespace = __main__.__dict__ if not text.strip(): if state == 0: if _readline_available: readline.insert_text('\t') readline.redisplay() return '' else: return '\t' else: return None if state == 0: if "." in text: self.matches = self.attr_matches(text) else: self.matches = self.global_matches(text) try: return self.matches[state] except IndexError: return None
[ "def", "complete", "(", "self", ",", "text", ",", "state", ")", ":", "if", "self", ".", "use_main_ns", ":", "self", ".", "namespace", "=", "__main__", ".", "__dict__", "if", "not", "text", ".", "strip", "(", ")", ":", "if", "state", "==", "0", ":", "if", "_readline_available", ":", "readline", ".", "insert_text", "(", "'\\t'", ")", "readline", ".", "redisplay", "(", ")", "return", "''", "else", ":", "return", "'\\t'", "else", ":", "return", "None", "if", "state", "==", "0", ":", "if", "\".\"", "in", "text", ":", "self", ".", "matches", "=", "self", ".", "attr_matches", "(", "text", ")", "else", ":", "self", ".", "matches", "=", "self", ".", "global_matches", "(", "text", ")", "try", ":", "return", "self", ".", "matches", "[", "state", "]", "except", "IndexError", ":", "return", "None" ]
29.6
0.002181
def _stringlist(*args): """ Take a lists of strings or strings and flatten these into a list of strings. Arguments: - `*args`: "" or [""...] Return: [""...] Exceptions: None """ return list(itertools.chain.from_iterable(itertools.repeat(x,1) if stringy(x) else x for x in args if x))
[ "def", "_stringlist", "(", "*", "args", ")", ":", "return", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "itertools", ".", "repeat", "(", "x", ",", "1", ")", "if", "stringy", "(", "x", ")", "else", "x", "for", "x", "in", "args", "if", "x", ")", ")" ]
25.833333
0.009346
def ValidateTimezone(timezone, column_name=None, problems=None): """ Validates a non-required timezone string value using IsValidTimezone(): - if invalid adds InvalidValue error (if problems accumulator is provided) - an empty timezone string is regarded as valid! Otherwise we might end up with many duplicate errors because of the required field checks. """ if IsEmpty(timezone) or IsValidTimezone(timezone): return True else: if problems: # if we get here pytz has already been imported successfully in # IsValidTimezone(). So a try-except block is not needed here. import pytz problems.InvalidValue( column_name, timezone, '"%s" is not a common timezone name according to pytz version %s' % (timezone, pytz.VERSION)) return False
[ "def", "ValidateTimezone", "(", "timezone", ",", "column_name", "=", "None", ",", "problems", "=", "None", ")", ":", "if", "IsEmpty", "(", "timezone", ")", "or", "IsValidTimezone", "(", "timezone", ")", ":", "return", "True", "else", ":", "if", "problems", ":", "# if we get here pytz has already been imported successfully in", "# IsValidTimezone(). So a try-except block is not needed here.", "import", "pytz", "problems", ".", "InvalidValue", "(", "column_name", ",", "timezone", ",", "'\"%s\" is not a common timezone name according to pytz version %s'", "%", "(", "timezone", ",", "pytz", ".", "VERSION", ")", ")", "return", "False" ]
42.368421
0.009721
def set_file_path(self, path): """Update the file_path Entry widget""" self.file_path.delete(0, END) self.file_path.insert(0, path)
[ "def", "set_file_path", "(", "self", ",", "path", ")", ":", "self", ".", "file_path", ".", "delete", "(", "0", ",", "END", ")", "self", ".", "file_path", ".", "insert", "(", "0", ",", "path", ")" ]
38
0.012903
def clear_response(self, assessment_section_id, item_id): """Clears the response to an item The item appears as unanswered. If no response exists, the method simply returns. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` arg: item_id (osid.id.Id): ``Id`` of the ``Item`` raise: IllegalState - ``has_assessment_section_begun() is false or is_assessment_section_over() is true`` raise: NotFound - ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id`` raise: NullArgument - ``assessment_section_id or item_id is null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ if (not self.has_assessment_section_begun(assessment_section_id) or self.is_assessment_section_over(assessment_section_id)): raise errors.IllegalState() # Should probably check to see if responses can be cleared, but how? self.get_assessment_section(assessment_section_id).submit_response(item_id, None)
[ "def", "clear_response", "(", "self", ",", "assessment_section_id", ",", "item_id", ")", ":", "if", "(", "not", "self", ".", "has_assessment_section_begun", "(", "assessment_section_id", ")", "or", "self", ".", "is_assessment_section_over", "(", "assessment_section_id", ")", ")", ":", "raise", "errors", ".", "IllegalState", "(", ")", "# Should probably check to see if responses can be cleared, but how?", "self", ".", "get_assessment_section", "(", "assessment_section_id", ")", ".", "submit_response", "(", "item_id", ",", "None", ")" ]
52.083333
0.002357
def parse_buckets(self, bucket, params): """ Parse a single S3 bucket TODO: - CORS - Lifecycle - Notification ? - Get bucket's policy :param bucket: :param params: :return: """ bucket['name'] = bucket.pop('Name') api_client = params['api_clients'][get_s3_list_region(list(params['api_clients'].keys())[0])] bucket['CreationDate'] = str(bucket['CreationDate']) bucket['region'] = get_s3_bucket_location(api_client, bucket['name']) # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland... if bucket['region'] == 'EU': bucket['region'] = 'eu-west-1' # h4ck :: S3 is global but region-aware... if bucket['region'] not in params['api_clients']: printInfo('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region'])) self.buckets_count -= 1 return api_client = params['api_clients'][bucket['region']] get_s3_bucket_logging(api_client, bucket['name'], bucket) get_s3_bucket_versioning(api_client, bucket['name'], bucket) get_s3_bucket_webhosting(api_client, bucket['name'], bucket) get_s3_bucket_default_encryption(api_client, bucket['name'], bucket) bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket) get_s3_bucket_policy(api_client, bucket['name'], bucket) get_s3_bucket_secure_transport(api_client, bucket['name'], bucket) # If requested, get key properties #if params['check_encryption'] or params['check_acls']: # get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'], # params['check_acls']) bucket['id'] = self.get_non_aws_id(bucket['name']) self.buckets[bucket['id']] = bucket
[ "def", "parse_buckets", "(", "self", ",", "bucket", ",", "params", ")", ":", "bucket", "[", "'name'", "]", "=", "bucket", ".", "pop", "(", "'Name'", ")", "api_client", "=", "params", "[", "'api_clients'", "]", "[", "get_s3_list_region", "(", "list", "(", "params", "[", "'api_clients'", "]", ".", "keys", "(", ")", ")", "[", "0", "]", ")", "]", "bucket", "[", "'CreationDate'", "]", "=", "str", "(", "bucket", "[", "'CreationDate'", "]", ")", "bucket", "[", "'region'", "]", "=", "get_s3_bucket_location", "(", "api_client", ",", "bucket", "[", "'name'", "]", ")", "# h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland...", "if", "bucket", "[", "'region'", "]", "==", "'EU'", ":", "bucket", "[", "'region'", "]", "=", "'eu-west-1'", "# h4ck :: S3 is global but region-aware...", "if", "bucket", "[", "'region'", "]", "not", "in", "params", "[", "'api_clients'", "]", ":", "printInfo", "(", "'Skipping bucket %s (region %s outside of scope)'", "%", "(", "bucket", "[", "'name'", "]", ",", "bucket", "[", "'region'", "]", ")", ")", "self", ".", "buckets_count", "-=", "1", "return", "api_client", "=", "params", "[", "'api_clients'", "]", "[", "bucket", "[", "'region'", "]", "]", "get_s3_bucket_logging", "(", "api_client", ",", "bucket", "[", "'name'", "]", ",", "bucket", ")", "get_s3_bucket_versioning", "(", "api_client", ",", "bucket", "[", "'name'", "]", ",", "bucket", ")", "get_s3_bucket_webhosting", "(", "api_client", ",", "bucket", "[", "'name'", "]", ",", "bucket", ")", "get_s3_bucket_default_encryption", "(", "api_client", ",", "bucket", "[", "'name'", "]", ",", "bucket", ")", "bucket", "[", "'grantees'", "]", "=", "get_s3_acls", "(", "api_client", ",", "bucket", "[", "'name'", "]", ",", "bucket", ")", "get_s3_bucket_policy", "(", "api_client", ",", "bucket", "[", "'name'", "]", ",", "bucket", ")", "get_s3_bucket_secure_transport", "(", "api_client", ",", "bucket", "[", "'name'", "]", ",", "bucket", ")", "# If requested, get key properties", "#if params['check_encryption'] or params['check_acls']:", "# get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'],", "# params['check_acls'])", "bucket", "[", "'id'", "]", "=", "self", ".", "get_non_aws_id", "(", "bucket", "[", "'name'", "]", ")", "self", ".", "buckets", "[", "bucket", "[", "'id'", "]", "]", "=", "bucket" ]
44.666667
0.003652
def map(self, **kwargs): ''' Change a name on the fly. Compat with kr/env. ''' return { key: str(self._envars[kwargs[key]]) # str strips Entry for key in kwargs }
[ "def", "map", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "{", "key", ":", "str", "(", "self", ".", "_envars", "[", "kwargs", "[", "key", "]", "]", ")", "# str strips Entry", "for", "key", "in", "kwargs", "}" ]
48.5
0.020305
def setposition(self, position): """ The move format is in long algebraic notation. Takes list of stirngs = ['e2e4', 'd7d5'] OR FEN = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1' """ try: if isinstance(position, list): self.send('position startpos moves {}'.format( self.__listtostring(position))) self.isready() elif re.match('\s*^(((?:[rnbqkpRNBQKP1-8]+\/){7})[rnbqkpRNBQKP1-8]+)\s([b|w])\s([K|Q|k|q|-]{1,4})\s(-|[a-h][1-8])\s(\d+\s\d+)$', position): regexList = re.match('\s*^(((?:[rnbqkpRNBQKP1-8]+\/){7})[rnbqkpRNBQKP1-8]+)\s([b|w])\s([K|Q|k|q|-]{1,4})\s(-|[a-h][1-8])\s(\d+\s\d+)$', position).groups() fen = regexList[0].split("/") if len(fen) != 8: raise ValueError("expected 8 rows in position part of fen: {0}".format(repr(fen))) for fenPart in fen: field_sum = 0 previous_was_digit, previous_was_piece = False, False for c in fenPart: if c in ["1", "2", "3", "4", "5", "6", "7", "8"]: if previous_was_digit: raise ValueError("two subsequent digits in position part of fen: {0}".format(repr(fen))) field_sum += int(c) previous_was_digit = True previous_was_piece = False elif c == "~": if not previous_was_piece: raise ValueError("~ not after piece in position part of fen: {0}".format(repr(fen))) previous_was_digit, previous_was_piece = False, False elif c.lower() in ["p", "n", "b", "r", "q", "k"]: field_sum += 1 previous_was_digit = False previous_was_piece = True else: raise ValueError("invalid character in position part of fen: {0}".format(repr(fen))) if field_sum != 8: raise ValueError("expected 8 columns per row in position part of fen: {0}".format(repr(fen))) self.send('position fen {}'.format(position)) self.isready() else: raise ValueError("fen doesn`t match follow this example: rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1 ") except ValueError as e: print('\nCheck position correctness\n') sys.exit(e.message)
[ "def", "setposition", "(", "self", ",", "position", ")", ":", "try", ":", "if", "isinstance", "(", "position", ",", "list", ")", ":", "self", ".", "send", "(", "'position startpos moves {}'", ".", "format", "(", "self", ".", "__listtostring", "(", "position", ")", ")", ")", "self", ".", "isready", "(", ")", "elif", "re", ".", "match", "(", "'\\s*^(((?:[rnbqkpRNBQKP1-8]+\\/){7})[rnbqkpRNBQKP1-8]+)\\s([b|w])\\s([K|Q|k|q|-]{1,4})\\s(-|[a-h][1-8])\\s(\\d+\\s\\d+)$'", ",", "position", ")", ":", "regexList", "=", "re", ".", "match", "(", "'\\s*^(((?:[rnbqkpRNBQKP1-8]+\\/){7})[rnbqkpRNBQKP1-8]+)\\s([b|w])\\s([K|Q|k|q|-]{1,4})\\s(-|[a-h][1-8])\\s(\\d+\\s\\d+)$'", ",", "position", ")", ".", "groups", "(", ")", "fen", "=", "regexList", "[", "0", "]", ".", "split", "(", "\"/\"", ")", "if", "len", "(", "fen", ")", "!=", "8", ":", "raise", "ValueError", "(", "\"expected 8 rows in position part of fen: {0}\"", ".", "format", "(", "repr", "(", "fen", ")", ")", ")", "for", "fenPart", "in", "fen", ":", "field_sum", "=", "0", "previous_was_digit", ",", "previous_was_piece", "=", "False", ",", "False", "for", "c", "in", "fenPart", ":", "if", "c", "in", "[", "\"1\"", ",", "\"2\"", ",", "\"3\"", ",", "\"4\"", ",", "\"5\"", ",", "\"6\"", ",", "\"7\"", ",", "\"8\"", "]", ":", "if", "previous_was_digit", ":", "raise", "ValueError", "(", "\"two subsequent digits in position part of fen: {0}\"", ".", "format", "(", "repr", "(", "fen", ")", ")", ")", "field_sum", "+=", "int", "(", "c", ")", "previous_was_digit", "=", "True", "previous_was_piece", "=", "False", "elif", "c", "==", "\"~\"", ":", "if", "not", "previous_was_piece", ":", "raise", "ValueError", "(", "\"~ not after piece in position part of fen: {0}\"", ".", "format", "(", "repr", "(", "fen", ")", ")", ")", "previous_was_digit", ",", "previous_was_piece", "=", "False", ",", "False", "elif", "c", ".", "lower", "(", ")", "in", "[", "\"p\"", ",", "\"n\"", ",", "\"b\"", ",", "\"r\"", ",", "\"q\"", ",", "\"k\"", "]", ":", "field_sum", "+=", "1", "previous_was_digit", "=", "False", "previous_was_piece", "=", "True", "else", ":", "raise", "ValueError", "(", "\"invalid character in position part of fen: {0}\"", ".", "format", "(", "repr", "(", "fen", ")", ")", ")", "if", "field_sum", "!=", "8", ":", "raise", "ValueError", "(", "\"expected 8 columns per row in position part of fen: {0}\"", ".", "format", "(", "repr", "(", "fen", ")", ")", ")", "self", ".", "send", "(", "'position fen {}'", ".", "format", "(", "position", ")", ")", "self", ".", "isready", "(", ")", "else", ":", "raise", "ValueError", "(", "\"fen doesn`t match follow this example: rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1 \"", ")", "except", "ValueError", "as", "e", ":", "print", "(", "'\\nCheck position correctness\\n'", ")", "sys", ".", "exit", "(", "e", ".", "message", ")" ]
53.26
0.011799
def colormesh(X, Y): """ Generates line paths for a quadmesh given 2D arrays of X and Y coordinates. """ X1 = X[0:-1, 0:-1].ravel() Y1 = Y[0:-1, 0:-1].ravel() X2 = X[1:, 0:-1].ravel() Y2 = Y[1:, 0:-1].ravel() X3 = X[1:, 1:].ravel() Y3 = Y[1:, 1:].ravel() X4 = X[0:-1, 1:].ravel() Y4 = Y[0:-1, 1:].ravel() X = np.column_stack([X1, X2, X3, X4, X1]) Y = np.column_stack([Y1, Y2, Y3, Y4, Y1]) return X, Y
[ "def", "colormesh", "(", "X", ",", "Y", ")", ":", "X1", "=", "X", "[", "0", ":", "-", "1", ",", "0", ":", "-", "1", "]", ".", "ravel", "(", ")", "Y1", "=", "Y", "[", "0", ":", "-", "1", ",", "0", ":", "-", "1", "]", ".", "ravel", "(", ")", "X2", "=", "X", "[", "1", ":", ",", "0", ":", "-", "1", "]", ".", "ravel", "(", ")", "Y2", "=", "Y", "[", "1", ":", ",", "0", ":", "-", "1", "]", ".", "ravel", "(", ")", "X3", "=", "X", "[", "1", ":", ",", "1", ":", "]", ".", "ravel", "(", ")", "Y3", "=", "Y", "[", "1", ":", ",", "1", ":", "]", ".", "ravel", "(", ")", "X4", "=", "X", "[", "0", ":", "-", "1", ",", "1", ":", "]", ".", "ravel", "(", ")", "Y4", "=", "Y", "[", "0", ":", "-", "1", ",", "1", ":", "]", ".", "ravel", "(", ")", "X", "=", "np", ".", "column_stack", "(", "[", "X1", ",", "X2", ",", "X3", ",", "X4", ",", "X1", "]", ")", "Y", "=", "np", ".", "column_stack", "(", "[", "Y1", ",", "Y2", ",", "Y3", ",", "Y4", ",", "Y1", "]", ")", "return", "X", ",", "Y" ]
26.176471
0.002169
def wait(self, token, timeout=DEFAULT_POLL_TIMEOUT): """ Wait for the job until it has been processed. This method will block for up to `timeout` seconds. This method will wait for 4 seconds after the initial request and then will call :py:meth:`cloudsight.API.image_response` method every second until the status changes. :param token: Job token as returned from :py:meth:`cloudsight.API.image_request` or :py:meth:`cloudsight.API.remote_image_request` """ delta = datetime.timedelta(seconds=timeout) timeout_at = datetime.datetime.now() + delta time.sleep(min(timeout, INITIAL_POLL_WAIT)) response = self.image_response(token) while response['status'] == STATUS_NOT_COMPLETED \ and datetime.datetime.now() < timeout_at: time.sleep(1) response = self.image_response(token) return response
[ "def", "wait", "(", "self", ",", "token", ",", "timeout", "=", "DEFAULT_POLL_TIMEOUT", ")", ":", "delta", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "timeout", ")", "timeout_at", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "delta", "time", ".", "sleep", "(", "min", "(", "timeout", ",", "INITIAL_POLL_WAIT", ")", ")", "response", "=", "self", ".", "image_response", "(", "token", ")", "while", "response", "[", "'status'", "]", "==", "STATUS_NOT_COMPLETED", "and", "datetime", ".", "datetime", ".", "now", "(", ")", "<", "timeout_at", ":", "time", ".", "sleep", "(", "1", ")", "response", "=", "self", ".", "image_response", "(", "token", ")", "return", "response" ]
40.666667
0.005005
def load_from(self, other, **kwargs): '''Create a :class:`Message` by merging `other` with `self`. Values from `other` will be copied to `self` if the value was not set on `self` and is set on `other`. :param other: The :class:`Message` to copy defaults from. :type other: :class:`Message` :param \*\*kwargs: Additional keyword arguments to construct :class:`Message` with. :rtype: :class:`Message` ''' data = self.data() other_data = other.data() for k, v in iteritems(other_data): if data.get(k) is None: data[k] = v return self.load_message(data, **kwargs)
[ "def", "load_from", "(", "self", ",", "other", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "data", "(", ")", "other_data", "=", "other", ".", "data", "(", ")", "for", "k", ",", "v", "in", "iteritems", "(", "other_data", ")", ":", "if", "data", ".", "get", "(", "k", ")", "is", "None", ":", "data", "[", "k", "]", "=", "v", "return", "self", ".", "load_message", "(", "data", ",", "*", "*", "kwargs", ")" ]
40
0.005747
def text_to_pango(self): """ Replaces all ampersands in `full_text` and `short_text` attributes of `self.output` with `&amp;`. It is called internally when pango markup is used. Can be called multiple times (`&amp;` won't change to `&amp;amp;`). """ def replace(s): s = s.split("&") out = s[0] for i in range(len(s) - 1): if s[i + 1].startswith("amp;"): out += "&" + s[i + 1] else: out += "&amp;" + s[i + 1] return out if "full_text" in self.output.keys(): self.output["full_text"] = replace(self.output["full_text"]) if "short_text" in self.output.keys(): self.output["short_text"] = replace(self.output["short_text"])
[ "def", "text_to_pango", "(", "self", ")", ":", "def", "replace", "(", "s", ")", ":", "s", "=", "s", ".", "split", "(", "\"&\"", ")", "out", "=", "s", "[", "0", "]", "for", "i", "in", "range", "(", "len", "(", "s", ")", "-", "1", ")", ":", "if", "s", "[", "i", "+", "1", "]", ".", "startswith", "(", "\"amp;\"", ")", ":", "out", "+=", "\"&\"", "+", "s", "[", "i", "+", "1", "]", "else", ":", "out", "+=", "\"&amp;\"", "+", "s", "[", "i", "+", "1", "]", "return", "out", "if", "\"full_text\"", "in", "self", ".", "output", ".", "keys", "(", ")", ":", "self", ".", "output", "[", "\"full_text\"", "]", "=", "replace", "(", "self", ".", "output", "[", "\"full_text\"", "]", ")", "if", "\"short_text\"", "in", "self", ".", "output", ".", "keys", "(", ")", ":", "self", ".", "output", "[", "\"short_text\"", "]", "=", "replace", "(", "self", ".", "output", "[", "\"short_text\"", "]", ")" ]
35.478261
0.002387
def element_data_from_sym(sym): '''Obtain elemental data given an elemental symbol The given symbol is not case sensitive An exception is thrown if the symbol is not found ''' sym_lower = sym.lower() if sym_lower not in _element_sym_map: raise KeyError('No element data for symbol \'{}\''.format(sym)) return _element_sym_map[sym_lower]
[ "def", "element_data_from_sym", "(", "sym", ")", ":", "sym_lower", "=", "sym", ".", "lower", "(", ")", "if", "sym_lower", "not", "in", "_element_sym_map", ":", "raise", "KeyError", "(", "'No element data for symbol \\'{}\\''", ".", "format", "(", "sym", ")", ")", "return", "_element_sym_map", "[", "sym_lower", "]" ]
30.333333
0.002667
def _ParseLogFileOptions(self, options): """Parses the log file options. Args: options (argparse.Namespace): command line arguments. """ self._log_file = self.ParseStringOption(options, 'log_file') if not self._log_file: local_date_time = datetime.datetime.now() self._log_file = ( '{0:s}-{1:04d}{2:02d}{3:02d}T{4:02d}{5:02d}{6:02d}.log.gz').format( self.NAME, local_date_time.year, local_date_time.month, local_date_time.day, local_date_time.hour, local_date_time.minute, local_date_time.second)
[ "def", "_ParseLogFileOptions", "(", "self", ",", "options", ")", ":", "self", ".", "_log_file", "=", "self", ".", "ParseStringOption", "(", "options", ",", "'log_file'", ")", "if", "not", "self", ".", "_log_file", ":", "local_date_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "self", ".", "_log_file", "=", "(", "'{0:s}-{1:04d}{2:02d}{3:02d}T{4:02d}{5:02d}{6:02d}.log.gz'", ")", ".", "format", "(", "self", ".", "NAME", ",", "local_date_time", ".", "year", ",", "local_date_time", ".", "month", ",", "local_date_time", ".", "day", ",", "local_date_time", ".", "hour", ",", "local_date_time", ".", "minute", ",", "local_date_time", ".", "second", ")" ]
41
0.006814
async def async_change_special_device( self, device_id: int, group_number: int, unit_number: int, enable_status: ESFlags, switches: SwitchFlags, special_status: SSFlags, high_limit: Optional[Union[int, float]], low_limit: Optional[Union[int, float]], control_high_limit: Optional[Union[int, float]], control_low_limit: Optional[Union[int, float]]) -> None: """ Change settings for a 'Special' device on the base unit. :param device_id: unique identifier for the device to be changed :param group_number: group number the device is to be assigned to :param unit_number: unit number the device is to be assigned to :param enable_status: flags indicating settings to enable :param switches: indicates switches that will be activated when device is triggered :param special_status: flags indicating 'Special' settings to enable :param high_limit: triggers on readings higher than value :param low_limit: triggers on readings lower than value :param control_high_limit: trigger switch for readings higher than value :param control_low_limit: trigger switch for readings lower than value """ # Lookup device using zone to obtain an accurate index and current # values, which will be needed to perform the change command device = self._devices[device_id] # Verify it is a Special device if not isinstance(device, SpecialDevice): raise ValueError("Device to be changed is not a Special device") response = await self._protocol.async_execute( GetDeviceCommand(device.category, device.group_number, device.unit_number)) if isinstance(response, DeviceInfoResponse): # Control limits only specified when they are supported if response.control_limit_fields_exist: command = ChangeSpecial2DeviceCommand( device.category, response.index, group_number, unit_number, enable_status, switches, response.current_status, response.down_count, response.message_attribute, response.current_reading, special_status, high_limit, low_limit, control_high_limit, control_low_limit) else: command = ChangeSpecialDeviceCommand( device.category, response.index, group_number, unit_number, enable_status, switches, response.current_status, response.down_count, response.message_attribute, response.current_reading, special_status, high_limit, low_limit) response = await self._protocol.async_execute(command) if isinstance(response, DeviceSettingsResponse): device._handle_response(response) # pylint: disable=protected-access if isinstance(response, DeviceNotFoundResponse): raise ValueError("Device to be changed was not found")
[ "async", "def", "async_change_special_device", "(", "self", ",", "device_id", ":", "int", ",", "group_number", ":", "int", ",", "unit_number", ":", "int", ",", "enable_status", ":", "ESFlags", ",", "switches", ":", "SwitchFlags", ",", "special_status", ":", "SSFlags", ",", "high_limit", ":", "Optional", "[", "Union", "[", "int", ",", "float", "]", "]", ",", "low_limit", ":", "Optional", "[", "Union", "[", "int", ",", "float", "]", "]", ",", "control_high_limit", ":", "Optional", "[", "Union", "[", "int", ",", "float", "]", "]", ",", "control_low_limit", ":", "Optional", "[", "Union", "[", "int", ",", "float", "]", "]", ")", "->", "None", ":", "# Lookup device using zone to obtain an accurate index and current", "# values, which will be needed to perform the change command", "device", "=", "self", ".", "_devices", "[", "device_id", "]", "# Verify it is a Special device", "if", "not", "isinstance", "(", "device", ",", "SpecialDevice", ")", ":", "raise", "ValueError", "(", "\"Device to be changed is not a Special device\"", ")", "response", "=", "await", "self", ".", "_protocol", ".", "async_execute", "(", "GetDeviceCommand", "(", "device", ".", "category", ",", "device", ".", "group_number", ",", "device", ".", "unit_number", ")", ")", "if", "isinstance", "(", "response", ",", "DeviceInfoResponse", ")", ":", "# Control limits only specified when they are supported", "if", "response", ".", "control_limit_fields_exist", ":", "command", "=", "ChangeSpecial2DeviceCommand", "(", "device", ".", "category", ",", "response", ".", "index", ",", "group_number", ",", "unit_number", ",", "enable_status", ",", "switches", ",", "response", ".", "current_status", ",", "response", ".", "down_count", ",", "response", ".", "message_attribute", ",", "response", ".", "current_reading", ",", "special_status", ",", "high_limit", ",", "low_limit", ",", "control_high_limit", ",", "control_low_limit", ")", "else", ":", "command", "=", "ChangeSpecialDeviceCommand", "(", "device", ".", "category", ",", "response", ".", "index", ",", "group_number", ",", "unit_number", ",", "enable_status", ",", "switches", ",", "response", ".", "current_status", ",", "response", ".", "down_count", ",", "response", ".", "message_attribute", ",", "response", ".", "current_reading", ",", "special_status", ",", "high_limit", ",", "low_limit", ")", "response", "=", "await", "self", ".", "_protocol", ".", "async_execute", "(", "command", ")", "if", "isinstance", "(", "response", ",", "DeviceSettingsResponse", ")", ":", "device", ".", "_handle_response", "(", "response", ")", "# pylint: disable=protected-access", "if", "isinstance", "(", "response", ",", "DeviceNotFoundResponse", ")", ":", "raise", "ValueError", "(", "\"Device to be changed was not found\"", ")" ]
58.72549
0.003941
def create_refresh_token(self, access_token_value): # type: (str) -> str """ Creates an refresh token bound to the specified access token. """ if access_token_value not in self.access_tokens: raise InvalidAccessToken('{} unknown'.format(access_token_value)) if not self.refresh_token_lifetime: logger.debug('no refresh token issued for for access_token=%s', access_token_value) return None refresh_token = rand_str() authz_info = {'access_token': access_token_value, 'exp': int(time.time()) + self.refresh_token_lifetime} self.refresh_tokens[refresh_token] = authz_info logger.debug('issued refresh_token=%s expiring=%d for access_token=%s', refresh_token, authz_info['exp'], access_token_value) return refresh_token
[ "def", "create_refresh_token", "(", "self", ",", "access_token_value", ")", ":", "# type: (str) -> str", "if", "access_token_value", "not", "in", "self", ".", "access_tokens", ":", "raise", "InvalidAccessToken", "(", "'{} unknown'", ".", "format", "(", "access_token_value", ")", ")", "if", "not", "self", ".", "refresh_token_lifetime", ":", "logger", ".", "debug", "(", "'no refresh token issued for for access_token=%s'", ",", "access_token_value", ")", "return", "None", "refresh_token", "=", "rand_str", "(", ")", "authz_info", "=", "{", "'access_token'", ":", "access_token_value", ",", "'exp'", ":", "int", "(", "time", ".", "time", "(", ")", ")", "+", "self", ".", "refresh_token_lifetime", "}", "self", ".", "refresh_tokens", "[", "refresh_token", "]", "=", "authz_info", "logger", ".", "debug", "(", "'issued refresh_token=%s expiring=%d for access_token=%s'", ",", "refresh_token", ",", "authz_info", "[", "'exp'", "]", ",", "access_token_value", ")", "return", "refresh_token" ]
44.526316
0.006944
def _findregex(self, reg_ex, start, end, bytealigned): """Find first occurrence of a compiled regular expression. Note that this doesn't support arbitrary regexes, in particular they must match a known length. """ p = start length = len(reg_ex.pattern) # We grab overlapping chunks of the binary representation and # do an ordinary string search within that. increment = max(4096, length * 10) buffersize = increment + length while p < end: buf = self._readbin(min(buffersize, end - p), p) # Test using regular expressions... m = reg_ex.search(buf) if m: pos = m.start() # pos = buf.find(targetbin) # if pos != -1: # if bytealigned then we only accept byte aligned positions. if not bytealigned or (p + pos) % 8 == 0: return (p + pos,) if bytealigned: # Advance to just beyond the non-byte-aligned match and try again... p += pos + 1 continue p += increment # Not found, return empty tuple return ()
[ "def", "_findregex", "(", "self", ",", "reg_ex", ",", "start", ",", "end", ",", "bytealigned", ")", ":", "p", "=", "start", "length", "=", "len", "(", "reg_ex", ".", "pattern", ")", "# We grab overlapping chunks of the binary representation and", "# do an ordinary string search within that.", "increment", "=", "max", "(", "4096", ",", "length", "*", "10", ")", "buffersize", "=", "increment", "+", "length", "while", "p", "<", "end", ":", "buf", "=", "self", ".", "_readbin", "(", "min", "(", "buffersize", ",", "end", "-", "p", ")", ",", "p", ")", "# Test using regular expressions...", "m", "=", "reg_ex", ".", "search", "(", "buf", ")", "if", "m", ":", "pos", "=", "m", ".", "start", "(", ")", "# pos = buf.find(targetbin)", "# if pos != -1:", "# if bytealigned then we only accept byte aligned positions.", "if", "not", "bytealigned", "or", "(", "p", "+", "pos", ")", "%", "8", "==", "0", ":", "return", "(", "p", "+", "pos", ",", ")", "if", "bytealigned", ":", "# Advance to just beyond the non-byte-aligned match and try again...", "p", "+=", "pos", "+", "1", "continue", "p", "+=", "increment", "# Not found, return empty tuple", "return", "(", ")" ]
38.935484
0.002425
def sync_repo_hook(self, repo_id): """Sync a GitHub repo's hook with the locally stored repo.""" # Get the hook that we may have set in the past gh_repo = self.api.repository_with_id(repo_id) hooks = (hook.id for hook in gh_repo.hooks() if hook.config.get('url', '') == self.webhook_url) hook_id = next(hooks, None) # If hook on GitHub exists, get or create corresponding db object and # enable the hook. Otherwise remove the old hook information. if hook_id: Repository.enable(user_id=self.user_id, github_id=gh_repo.id, name=gh_repo.full_name, hook=hook_id) else: Repository.disable(user_id=self.user_id, github_id=gh_repo.id, name=gh_repo.full_name)
[ "def", "sync_repo_hook", "(", "self", ",", "repo_id", ")", ":", "# Get the hook that we may have set in the past", "gh_repo", "=", "self", ".", "api", ".", "repository_with_id", "(", "repo_id", ")", "hooks", "=", "(", "hook", ".", "id", "for", "hook", "in", "gh_repo", ".", "hooks", "(", ")", "if", "hook", ".", "config", ".", "get", "(", "'url'", ",", "''", ")", "==", "self", ".", "webhook_url", ")", "hook_id", "=", "next", "(", "hooks", ",", "None", ")", "# If hook on GitHub exists, get or create corresponding db object and", "# enable the hook. Otherwise remove the old hook information.", "if", "hook_id", ":", "Repository", ".", "enable", "(", "user_id", "=", "self", ".", "user_id", ",", "github_id", "=", "gh_repo", ".", "id", ",", "name", "=", "gh_repo", ".", "full_name", ",", "hook", "=", "hook_id", ")", "else", ":", "Repository", ".", "disable", "(", "user_id", "=", "self", ".", "user_id", ",", "github_id", "=", "gh_repo", ".", "id", ",", "name", "=", "gh_repo", ".", "full_name", ")" ]
47.368421
0.002179
def trace_filter(mode): ''' Set the trace filter mode. mode: Whether to enable the trace hook. True: Trace filtering on (skipping methods tagged @DontTrace) False: Trace filtering off (trace methods tagged @DontTrace) None/default: Toggle trace filtering. ''' global should_trace_hook if mode is None: mode = should_trace_hook is None if mode: should_trace_hook = default_should_trace_hook else: should_trace_hook = None return mode
[ "def", "trace_filter", "(", "mode", ")", ":", "global", "should_trace_hook", "if", "mode", "is", "None", ":", "mode", "=", "should_trace_hook", "is", "None", "if", "mode", ":", "should_trace_hook", "=", "default_should_trace_hook", "else", ":", "should_trace_hook", "=", "None", "return", "mode" ]
26.052632
0.001949
def cli(ctx, env): """Enters a shell for slcli.""" # Set up the environment env = copy.deepcopy(env) env.load_modules_from_python(routes.ALL_ROUTES) env.aliases.update(routes.ALL_ALIASES) env.vars['global_args'] = ctx.parent.params env.vars['is_shell'] = True env.vars['last_exit_code'] = 0 # Set up prompt_toolkit settings app_path = click.get_app_dir('softlayer_shell') if not os.path.exists(app_path): os.makedirs(app_path) complete = completer.ShellCompleter(core.cli) while True: try: line = p_shortcuts.prompt( completer=complete, complete_while_typing=True, auto_suggest=p_auto_suggest.AutoSuggestFromHistory(), ) # Parse arguments try: args = shlex.split(line) except ValueError as ex: print("Invalid Command: %s" % ex) continue if not args: continue # Run Command try: # Reset client so that the client gets refreshed env.client = None core.main(args=list(get_env_args(env)) + args, obj=env, prog_name="", reraise_exceptions=True) except SystemExit as ex: env.vars['last_exit_code'] = ex.code except EOFError: return except ShellExit: return except Exception as ex: env.vars['last_exit_code'] = 1 traceback.print_exc(file=sys.stderr) except KeyboardInterrupt: env.vars['last_exit_code'] = 130
[ "def", "cli", "(", "ctx", ",", "env", ")", ":", "# Set up the environment", "env", "=", "copy", ".", "deepcopy", "(", "env", ")", "env", ".", "load_modules_from_python", "(", "routes", ".", "ALL_ROUTES", ")", "env", ".", "aliases", ".", "update", "(", "routes", ".", "ALL_ALIASES", ")", "env", ".", "vars", "[", "'global_args'", "]", "=", "ctx", ".", "parent", ".", "params", "env", ".", "vars", "[", "'is_shell'", "]", "=", "True", "env", ".", "vars", "[", "'last_exit_code'", "]", "=", "0", "# Set up prompt_toolkit settings", "app_path", "=", "click", ".", "get_app_dir", "(", "'softlayer_shell'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "app_path", ")", ":", "os", ".", "makedirs", "(", "app_path", ")", "complete", "=", "completer", ".", "ShellCompleter", "(", "core", ".", "cli", ")", "while", "True", ":", "try", ":", "line", "=", "p_shortcuts", ".", "prompt", "(", "completer", "=", "complete", ",", "complete_while_typing", "=", "True", ",", "auto_suggest", "=", "p_auto_suggest", ".", "AutoSuggestFromHistory", "(", ")", ",", ")", "# Parse arguments", "try", ":", "args", "=", "shlex", ".", "split", "(", "line", ")", "except", "ValueError", "as", "ex", ":", "print", "(", "\"Invalid Command: %s\"", "%", "ex", ")", "continue", "if", "not", "args", ":", "continue", "# Run Command", "try", ":", "# Reset client so that the client gets refreshed", "env", ".", "client", "=", "None", "core", ".", "main", "(", "args", "=", "list", "(", "get_env_args", "(", "env", ")", ")", "+", "args", ",", "obj", "=", "env", ",", "prog_name", "=", "\"\"", ",", "reraise_exceptions", "=", "True", ")", "except", "SystemExit", "as", "ex", ":", "env", ".", "vars", "[", "'last_exit_code'", "]", "=", "ex", ".", "code", "except", "EOFError", ":", "return", "except", "ShellExit", ":", "return", "except", "Exception", "as", "ex", ":", "env", ".", "vars", "[", "'last_exit_code'", "]", "=", "1", "traceback", ".", "print_exc", "(", "file", "=", "sys", ".", "stderr", ")", "except", "KeyboardInterrupt", ":", "env", ".", "vars", "[", "'last_exit_code'", "]", "=", "130" ]
30.981818
0.000569
def take_home_pay(gross_pay, employer_match, taxes_and_fees, numtype='float'): """ Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: gross_pay: float or int, gross monthly pay. employer_match: float or int, the 401(k) match from your employer. taxes_and_fees: list, taxes and fees that are deducted from your paycheck. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly take-home pay. """ if numtype == 'decimal': return (Decimal(gross_pay) + Decimal(employer_match)) - Decimal( sum(taxes_and_fees) ) else: return (float(gross_pay) + float(employer_match)) - sum(taxes_and_fees)
[ "def", "take_home_pay", "(", "gross_pay", ",", "employer_match", ",", "taxes_and_fees", ",", "numtype", "=", "'float'", ")", ":", "if", "numtype", "==", "'decimal'", ":", "return", "(", "Decimal", "(", "gross_pay", ")", "+", "Decimal", "(", "employer_match", ")", ")", "-", "Decimal", "(", "sum", "(", "taxes_and_fees", ")", ")", "else", ":", "return", "(", "float", "(", "gross_pay", ")", "+", "float", "(", "employer_match", ")", ")", "-", "sum", "(", "taxes_and_fees", ")" ]
35.625
0.002278
def histogram_distance(arr1, arr2, bins=None): """ This function returns the sum of the squared error Parameters: two arrays constrained to 0..1 Returns: sum of the squared error between the histograms """ eps = 1e-6 assert arr1.min() > 0 - eps assert arr1.max() < 1 + eps assert arr2.min() > 0 - eps assert arr2.max() < 1 + eps if not bins: bins = [x / 10 for x in range(11)] hist1 = np.histogram(arr1, bins=bins)[0] / arr1.size hist2 = np.histogram(arr2, bins=bins)[0] / arr2.size assert abs(hist1.sum() - 1.0) < eps assert abs(hist2.sum() - 1.0) < eps sqerr = (hist1 - hist2) ** 2 return sqerr.sum()
[ "def", "histogram_distance", "(", "arr1", ",", "arr2", ",", "bins", "=", "None", ")", ":", "eps", "=", "1e-6", "assert", "arr1", ".", "min", "(", ")", ">", "0", "-", "eps", "assert", "arr1", ".", "max", "(", ")", "<", "1", "+", "eps", "assert", "arr2", ".", "min", "(", ")", ">", "0", "-", "eps", "assert", "arr2", ".", "max", "(", ")", "<", "1", "+", "eps", "if", "not", "bins", ":", "bins", "=", "[", "x", "/", "10", "for", "x", "in", "range", "(", "11", ")", "]", "hist1", "=", "np", ".", "histogram", "(", "arr1", ",", "bins", "=", "bins", ")", "[", "0", "]", "/", "arr1", ".", "size", "hist2", "=", "np", ".", "histogram", "(", "arr2", ",", "bins", "=", "bins", ")", "[", "0", "]", "/", "arr2", ".", "size", "assert", "abs", "(", "hist1", ".", "sum", "(", ")", "-", "1.0", ")", "<", "eps", "assert", "abs", "(", "hist2", ".", "sum", "(", ")", "-", "1.0", ")", "<", "eps", "sqerr", "=", "(", "hist1", "-", "hist2", ")", "**", "2", "return", "sqerr", ".", "sum", "(", ")" ]
27.958333
0.001441
def roles(self): """List[:class:`Role`]: A :class:`list` of roles that is allowed to use this emoji. If roles is empty, the emoji is unrestricted. """ guild = self.guild if guild is None: return [] return [role for role in guild.roles if self._roles.has(role.id)]
[ "def", "roles", "(", "self", ")", ":", "guild", "=", "self", ".", "guild", "if", "guild", "is", "None", ":", "return", "[", "]", "return", "[", "role", "for", "role", "in", "guild", ".", "roles", "if", "self", ".", "_roles", ".", "has", "(", "role", ".", "id", ")", "]" ]
31.6
0.009231
def mp_check_impl(self, process_count): """ a multiprocessing-enabled check implementation. Will create up to process_count helper processes and use them to perform the DistJarReport and DistClassReport actions. """ from multiprocessing import Process, Queue options = self.reporter.options # this is the function that will be run in a separate process, # which will handle the tasks queue and feed into the results # queue func = _mp_run_check # normally this would happen lazily, but since we'll have # multiple processes all running reports at the same time, we # need to make sure the setup is done before-hand. This is # hackish, but in particular this keeps the HTML reports from # trying to perform the default data copy over and over. self.reporter.setup() # enqueue the sub-reports for multi-processing. Other types of # changes can happen sync. changes = list(self.collect_impl()) task_count = 0 tasks = Queue() results = Queue() try: # as soon as we start using the tasks queue, we need to be # catching the KeyboardInterrupt event so that we can # drain the queue and lets its underlying thread terminate # happily. # TODO: is there a better way to handle this shutdown # gracefully? # feed any sub-reports to the tasks queue for index in range(0, len(changes)): change = changes[index] if isinstance(change, (DistJarReport, DistClassReport)): changes[index] = None tasks.put((index, change)) task_count += 1 # infrequent edge case, but don't bother starting more # helpers than we'll ever use process_count = min(process_count, task_count) # start the number of helper processes, and make sure # there are that many stop sentinels at the end of the # tasks queue for _i in range(0, process_count): tasks.put(None) process = Process(target=func, args=(tasks, results, options)) process.daemon = False process.start() # while the helpers are running, perform our checks for change in changes: if change: change.check() # get all of the results and feed them back into our change for _i in range(0, task_count): index, change = results.get() changes[index] = change except KeyboardInterrupt: # drain the tasks queue so it will exit gracefully for _change in iter(tasks.get, None): pass raise # complete the check by setting our internal collection of # child changes and returning our overall status c = False for change in changes: c = c or change.is_change() self.changes = changes return c, None
[ "def", "mp_check_impl", "(", "self", ",", "process_count", ")", ":", "from", "multiprocessing", "import", "Process", ",", "Queue", "options", "=", "self", ".", "reporter", ".", "options", "# this is the function that will be run in a separate process,", "# which will handle the tasks queue and feed into the results", "# queue", "func", "=", "_mp_run_check", "# normally this would happen lazily, but since we'll have", "# multiple processes all running reports at the same time, we", "# need to make sure the setup is done before-hand. This is", "# hackish, but in particular this keeps the HTML reports from", "# trying to perform the default data copy over and over.", "self", ".", "reporter", ".", "setup", "(", ")", "# enqueue the sub-reports for multi-processing. Other types of", "# changes can happen sync.", "changes", "=", "list", "(", "self", ".", "collect_impl", "(", ")", ")", "task_count", "=", "0", "tasks", "=", "Queue", "(", ")", "results", "=", "Queue", "(", ")", "try", ":", "# as soon as we start using the tasks queue, we need to be", "# catching the KeyboardInterrupt event so that we can", "# drain the queue and lets its underlying thread terminate", "# happily.", "# TODO: is there a better way to handle this shutdown", "# gracefully?", "# feed any sub-reports to the tasks queue", "for", "index", "in", "range", "(", "0", ",", "len", "(", "changes", ")", ")", ":", "change", "=", "changes", "[", "index", "]", "if", "isinstance", "(", "change", ",", "(", "DistJarReport", ",", "DistClassReport", ")", ")", ":", "changes", "[", "index", "]", "=", "None", "tasks", ".", "put", "(", "(", "index", ",", "change", ")", ")", "task_count", "+=", "1", "# infrequent edge case, but don't bother starting more", "# helpers than we'll ever use", "process_count", "=", "min", "(", "process_count", ",", "task_count", ")", "# start the number of helper processes, and make sure", "# there are that many stop sentinels at the end of the", "# tasks queue", "for", "_i", "in", "range", "(", "0", ",", "process_count", ")", ":", "tasks", ".", "put", "(", "None", ")", "process", "=", "Process", "(", "target", "=", "func", ",", "args", "=", "(", "tasks", ",", "results", ",", "options", ")", ")", "process", ".", "daemon", "=", "False", "process", ".", "start", "(", ")", "# while the helpers are running, perform our checks", "for", "change", "in", "changes", ":", "if", "change", ":", "change", ".", "check", "(", ")", "# get all of the results and feed them back into our change", "for", "_i", "in", "range", "(", "0", ",", "task_count", ")", ":", "index", ",", "change", "=", "results", ".", "get", "(", ")", "changes", "[", "index", "]", "=", "change", "except", "KeyboardInterrupt", ":", "# drain the tasks queue so it will exit gracefully", "for", "_change", "in", "iter", "(", "tasks", ".", "get", ",", "None", ")", ":", "pass", "raise", "# complete the check by setting our internal collection of", "# child changes and returning our overall status", "c", "=", "False", "for", "change", "in", "changes", ":", "c", "=", "c", "or", "change", ".", "is_change", "(", ")", "self", ".", "changes", "=", "changes", "return", "c", ",", "None" ]
36.952381
0.000628
def persistant_success(request, message, extra_tags='', fail_silently=False, *args, **kwargs): """ Adds a persistant message with the ``SUCCESS`` level. """ add_message(request, SUCCESS_PERSISTENT, message, extra_tags=extra_tags, fail_silently=fail_silently, *args, **kwargs)
[ "def", "persistant_success", "(", "request", ",", "message", ",", "extra_tags", "=", "''", ",", "fail_silently", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "add_message", "(", "request", ",", "SUCCESS_PERSISTENT", ",", "message", ",", "extra_tags", "=", "extra_tags", ",", "fail_silently", "=", "fail_silently", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
50.333333
0.006515
def next_open(self, dt): """ Given a dt, returns the next open. If the given dt happens to be a session open, the next session's open will be returned. Parameters ---------- dt: pd.Timestamp The dt for which to get the next open. Returns ------- pd.Timestamp The UTC timestamp of the next open. """ idx = next_divider_idx(self.market_opens_nanos, dt.value) return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC)
[ "def", "next_open", "(", "self", ",", "dt", ")", ":", "idx", "=", "next_divider_idx", "(", "self", ".", "market_opens_nanos", ",", "dt", ".", "value", ")", "return", "pd", ".", "Timestamp", "(", "self", ".", "market_opens_nanos", "[", "idx", "]", ",", "tz", "=", "UTC", ")" ]
27.736842
0.00367
def pipeline_getter(self): "For duck-typing with *Spec types" if not self.derivable: raise ArcanaUsageError( "There is no pipeline getter for {} because it doesn't " "fallback to a derived spec".format(self)) return self._fallback.pipeline_getter
[ "def", "pipeline_getter", "(", "self", ")", ":", "if", "not", "self", ".", "derivable", ":", "raise", "ArcanaUsageError", "(", "\"There is no pipeline getter for {} because it doesn't \"", "\"fallback to a derived spec\"", ".", "format", "(", "self", ")", ")", "return", "self", ".", "_fallback", ".", "pipeline_getter" ]
44
0.006369
def write_byte(self, address, value): """Writes the byte to unaddressed register in a device. """ LOGGER.debug("Writing byte %s to device %s!", bin(value), hex(address)) return self.driver.write_byte(address, value)
[ "def", "write_byte", "(", "self", ",", "address", ",", "value", ")", ":", "LOGGER", ".", "debug", "(", "\"Writing byte %s to device %s!\"", ",", "bin", "(", "value", ")", ",", "hex", "(", "address", ")", ")", "return", "self", ".", "driver", ".", "write_byte", "(", "address", ",", "value", ")" ]
59
0.008368
def add_method(self, m, **kwargs): """Add an instance method or function Args: m: The instance method or function to store """ if isinstance(m, types.FunctionType): self['function', id(m)] = m else: f, obj = get_method_vars(m) wrkey = (f, id(obj)) self[wrkey] = obj
[ "def", "add_method", "(", "self", ",", "m", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "m", ",", "types", ".", "FunctionType", ")", ":", "self", "[", "'function'", ",", "id", "(", "m", ")", "]", "=", "m", "else", ":", "f", ",", "obj", "=", "get_method_vars", "(", "m", ")", "wrkey", "=", "(", "f", ",", "id", "(", "obj", ")", ")", "self", "[", "wrkey", "]", "=", "obj" ]
29.583333
0.005464
def transform(self,x,inds=None,labels = None): """return a transformation of x using population outputs""" if inds: # return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) # for I in inds)).transpose() return np.asarray( [self.out(I,x,labels,self.otype) for I in inds]).transpose() elif self._best_inds: # return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) # for I in self._best_inds)).transpose() return np.asarray( [self.out(I,x,labels,self.otype) for I in self._best_inds]).transpose() else: return x
[ "def", "transform", "(", "self", ",", "x", ",", "inds", "=", "None", ",", "labels", "=", "None", ")", ":", "if", "inds", ":", "# return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) ", "# for I in inds)).transpose()", "return", "np", ".", "asarray", "(", "[", "self", ".", "out", "(", "I", ",", "x", ",", "labels", ",", "self", ".", "otype", ")", "for", "I", "in", "inds", "]", ")", ".", "transpose", "(", ")", "elif", "self", ".", "_best_inds", ":", "# return np.asarray(Parallel(n_jobs=10)(delayed(self.out)(I,x,labels,self.otype) ", "# for I in self._best_inds)).transpose()", "return", "np", ".", "asarray", "(", "[", "self", ".", "out", "(", "I", ",", "x", ",", "labels", ",", "self", ".", "otype", ")", "for", "I", "in", "self", ".", "_best_inds", "]", ")", ".", "transpose", "(", ")", "else", ":", "return", "x" ]
53.785714
0.027415
def p2pkh_input(outpoint, sig, pubkey, sequence=0xFFFFFFFE): ''' OutPoint, hex_string, hex_string, int -> TxIn Create a signed legacy TxIn from a p2pkh prevout ''' stack_script = '{sig} {pk}'.format(sig=sig, pk=pubkey) stack_script = script_ser.serialize(stack_script) return tb.make_legacy_input(outpoint, stack_script, b'', sequence)
[ "def", "p2pkh_input", "(", "outpoint", ",", "sig", ",", "pubkey", ",", "sequence", "=", "0xFFFFFFFE", ")", ":", "stack_script", "=", "'{sig} {pk}'", ".", "format", "(", "sig", "=", "sig", ",", "pk", "=", "pubkey", ")", "stack_script", "=", "script_ser", ".", "serialize", "(", "stack_script", ")", "return", "tb", ".", "make_legacy_input", "(", "outpoint", ",", "stack_script", ",", "b''", ",", "sequence", ")" ]
44.5
0.002755
def changes(ctx, check, dry_run): """Show all the pending PRs for a given check.""" if not dry_run and check not in get_valid_checks(): abort('Check `{}` is not an Agent-based Integration'.format(check)) # get the name of the current release tag cur_version = get_version_string(check) target_tag = get_release_tag_string(check, cur_version) # get the diff from HEAD diff_lines = get_commits_since(check, target_tag) # for each PR get the title, we'll use it to populate the changelog pr_numbers = parse_pr_numbers(diff_lines) if not dry_run: echo_info('Found {} PRs merged since tag: {}'.format(len(pr_numbers), target_tag)) user_config = ctx.obj if dry_run: changelog_types = [] for pr_num in pr_numbers: try: payload = get_pr(pr_num, user_config) except Exception as e: echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e)) continue current_changelog_types = get_changelog_types(payload) if not current_changelog_types: abort('No valid changelog labels found attached to PR #{}, please add one!'.format(pr_num)) elif len(current_changelog_types) > 1: abort('Multiple changelog labels found attached to PR #{}, please only use one!'.format(pr_num)) current_changelog_type = current_changelog_types[0] if current_changelog_type != 'no-changelog': changelog_types.append(current_changelog_type) return cur_version, changelog_types else: for pr_num in pr_numbers: try: payload = get_pr(pr_num, user_config) except Exception as e: echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e)) continue changelog_types = get_changelog_types(payload) echo_success(payload.get('title')) echo_info(' * Url: {}'.format(payload.get('html_url'))) echo_info(' * Changelog status: ', nl=False) if not changelog_types: echo_warning('WARNING! No changelog labels attached.\n') elif len(changelog_types) > 1: echo_warning('WARNING! Too many changelog labels attached: {}\n'.format(', '.join(changelog_types))) else: echo_success('{}\n'.format(changelog_types[0]))
[ "def", "changes", "(", "ctx", ",", "check", ",", "dry_run", ")", ":", "if", "not", "dry_run", "and", "check", "not", "in", "get_valid_checks", "(", ")", ":", "abort", "(", "'Check `{}` is not an Agent-based Integration'", ".", "format", "(", "check", ")", ")", "# get the name of the current release tag", "cur_version", "=", "get_version_string", "(", "check", ")", "target_tag", "=", "get_release_tag_string", "(", "check", ",", "cur_version", ")", "# get the diff from HEAD", "diff_lines", "=", "get_commits_since", "(", "check", ",", "target_tag", ")", "# for each PR get the title, we'll use it to populate the changelog", "pr_numbers", "=", "parse_pr_numbers", "(", "diff_lines", ")", "if", "not", "dry_run", ":", "echo_info", "(", "'Found {} PRs merged since tag: {}'", ".", "format", "(", "len", "(", "pr_numbers", ")", ",", "target_tag", ")", ")", "user_config", "=", "ctx", ".", "obj", "if", "dry_run", ":", "changelog_types", "=", "[", "]", "for", "pr_num", "in", "pr_numbers", ":", "try", ":", "payload", "=", "get_pr", "(", "pr_num", ",", "user_config", ")", "except", "Exception", "as", "e", ":", "echo_failure", "(", "'Unable to fetch info for PR #{}: {}'", ".", "format", "(", "pr_num", ",", "e", ")", ")", "continue", "current_changelog_types", "=", "get_changelog_types", "(", "payload", ")", "if", "not", "current_changelog_types", ":", "abort", "(", "'No valid changelog labels found attached to PR #{}, please add one!'", ".", "format", "(", "pr_num", ")", ")", "elif", "len", "(", "current_changelog_types", ")", ">", "1", ":", "abort", "(", "'Multiple changelog labels found attached to PR #{}, please only use one!'", ".", "format", "(", "pr_num", ")", ")", "current_changelog_type", "=", "current_changelog_types", "[", "0", "]", "if", "current_changelog_type", "!=", "'no-changelog'", ":", "changelog_types", ".", "append", "(", "current_changelog_type", ")", "return", "cur_version", ",", "changelog_types", "else", ":", "for", "pr_num", "in", "pr_numbers", ":", "try", ":", "payload", "=", "get_pr", "(", "pr_num", ",", "user_config", ")", "except", "Exception", "as", "e", ":", "echo_failure", "(", "'Unable to fetch info for PR #{}: {}'", ".", "format", "(", "pr_num", ",", "e", ")", ")", "continue", "changelog_types", "=", "get_changelog_types", "(", "payload", ")", "echo_success", "(", "payload", ".", "get", "(", "'title'", ")", ")", "echo_info", "(", "' * Url: {}'", ".", "format", "(", "payload", ".", "get", "(", "'html_url'", ")", ")", ")", "echo_info", "(", "' * Changelog status: '", ",", "nl", "=", "False", ")", "if", "not", "changelog_types", ":", "echo_warning", "(", "'WARNING! No changelog labels attached.\\n'", ")", "elif", "len", "(", "changelog_types", ")", ">", "1", ":", "echo_warning", "(", "'WARNING! Too many changelog labels attached: {}\\n'", ".", "format", "(", "', '", ".", "join", "(", "changelog_types", ")", ")", ")", "else", ":", "echo_success", "(", "'{}\\n'", ".", "format", "(", "changelog_types", "[", "0", "]", ")", ")" ]
40.847458
0.002836
def reset(self): """Reset all OneTimeProperty attributes that may have fired already.""" instdict = self.__dict__ classdict = self.__class__.__dict__ # To reset them, we simply remove them from the instance dict. At that # point, it's as if they had never been computed. On the next access, # the accessor function from the parent class will be called, simply # because that's how the python descriptor protocol works. for mname, mval in classdict.items(): if mname in instdict and isinstance(mval, OneTimeProperty): delattr(self, mname)
[ "def", "reset", "(", "self", ")", ":", "instdict", "=", "self", ".", "__dict__", "classdict", "=", "self", ".", "__class__", ".", "__dict__", "# To reset them, we simply remove them from the instance dict. At that", "# point, it's as if they had never been computed. On the next access,", "# the accessor function from the parent class will be called, simply", "# because that's how the python descriptor protocol works.", "for", "mname", ",", "mval", "in", "classdict", ".", "items", "(", ")", ":", "if", "mname", "in", "instdict", "and", "isinstance", "(", "mval", ",", "OneTimeProperty", ")", ":", "delattr", "(", "self", ",", "mname", ")" ]
54.363636
0.018092
def writeObject(self, obj, output, setReferencePosition=False): """Serializes the given object to the output. Returns output. If setReferencePosition is True, will set the position the object was written. """ def proc_variable_length(format, length): result = b'' if length > 0b1110: result += pack('!B', (format << 4) | 0b1111) result = self.writeObject(length, result) else: result += pack('!B', (format << 4) | length) return result def timedelta_total_seconds(td): # Shim for Python 2.6 compatibility, which doesn't have total_seconds. # Make one argument a float to ensure the right calculation. return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10.0**6) / 10.0**6 if setReferencePosition: self.referencePositions[obj] = len(output) if obj is None: output += pack('!B', 0b00000000) elif isinstance(obj, BoolWrapper): if obj.value is False: output += pack('!B', 0b00001000) else: output += pack('!B', 0b00001001) elif isinstance(obj, Uid): size = self.intSize(obj.integer) output += pack('!B', (0b1000 << 4) | size - 1) output += self.binaryInt(obj.integer) elif isinstance(obj, (int, long)): byteSize = self.intSize(obj) root = math.log(byteSize, 2) output += pack('!B', (0b0001 << 4) | int(root)) output += self.binaryInt(obj, as_number=True) elif isinstance(obj, FloatWrapper): # just use doubles output += pack('!B', (0b0010 << 4) | 3) output += self.binaryReal(obj) elif isinstance(obj, datetime.datetime): try: timestamp = (obj - apple_reference_date).total_seconds() except AttributeError: timestamp = timedelta_total_seconds(obj - apple_reference_date) output += pack('!B', 0b00110011) output += pack('!d', float(timestamp)) elif isinstance(obj, Data): output += proc_variable_length(0b0100, len(obj)) output += obj elif isinstance(obj, StringWrapper): output += proc_variable_length(obj.encodingMarker, len(obj)) output += obj.encodedValue elif isinstance(obj, bytes): output += proc_variable_length(0b0101, len(obj)) output += obj elif isinstance(obj, HashableWrapper): obj = obj.value if isinstance(obj, (set, list, tuple)): if isinstance(obj, set): output += proc_variable_length(0b1100, len(obj)) else: output += proc_variable_length(0b1010, len(obj)) objectsToWrite = [] for objRef in sorted(obj) if isinstance(obj, set) else obj: (isNew, output) = self.writeObjectReference(objRef, output) if isNew: objectsToWrite.append(objRef) for objRef in objectsToWrite: output = self.writeObject(objRef, output, setReferencePosition=True) elif isinstance(obj, dict): output += proc_variable_length(0b1101, len(obj)) keys = [] values = [] objectsToWrite = [] for key, value in sorted(iteritems(obj)): keys.append(key) values.append(value) for key in keys: (isNew, output) = self.writeObjectReference(key, output) if isNew: objectsToWrite.append(key) for value in values: (isNew, output) = self.writeObjectReference(value, output) if isNew: objectsToWrite.append(value) for objRef in objectsToWrite: output = self.writeObject(objRef, output, setReferencePosition=True) return output
[ "def", "writeObject", "(", "self", ",", "obj", ",", "output", ",", "setReferencePosition", "=", "False", ")", ":", "def", "proc_variable_length", "(", "format", ",", "length", ")", ":", "result", "=", "b''", "if", "length", ">", "0b1110", ":", "result", "+=", "pack", "(", "'!B'", ",", "(", "format", "<<", "4", ")", "|", "0b1111", ")", "result", "=", "self", ".", "writeObject", "(", "length", ",", "result", ")", "else", ":", "result", "+=", "pack", "(", "'!B'", ",", "(", "format", "<<", "4", ")", "|", "length", ")", "return", "result", "def", "timedelta_total_seconds", "(", "td", ")", ":", "# Shim for Python 2.6 compatibility, which doesn't have total_seconds.", "# Make one argument a float to ensure the right calculation.", "return", "(", "td", ".", "microseconds", "+", "(", "td", ".", "seconds", "+", "td", ".", "days", "*", "24", "*", "3600", ")", "*", "10.0", "**", "6", ")", "/", "10.0", "**", "6", "if", "setReferencePosition", ":", "self", ".", "referencePositions", "[", "obj", "]", "=", "len", "(", "output", ")", "if", "obj", "is", "None", ":", "output", "+=", "pack", "(", "'!B'", ",", "0b00000000", ")", "elif", "isinstance", "(", "obj", ",", "BoolWrapper", ")", ":", "if", "obj", ".", "value", "is", "False", ":", "output", "+=", "pack", "(", "'!B'", ",", "0b00001000", ")", "else", ":", "output", "+=", "pack", "(", "'!B'", ",", "0b00001001", ")", "elif", "isinstance", "(", "obj", ",", "Uid", ")", ":", "size", "=", "self", ".", "intSize", "(", "obj", ".", "integer", ")", "output", "+=", "pack", "(", "'!B'", ",", "(", "0b1000", "<<", "4", ")", "|", "size", "-", "1", ")", "output", "+=", "self", ".", "binaryInt", "(", "obj", ".", "integer", ")", "elif", "isinstance", "(", "obj", ",", "(", "int", ",", "long", ")", ")", ":", "byteSize", "=", "self", ".", "intSize", "(", "obj", ")", "root", "=", "math", ".", "log", "(", "byteSize", ",", "2", ")", "output", "+=", "pack", "(", "'!B'", ",", "(", "0b0001", "<<", "4", ")", "|", "int", "(", "root", ")", ")", "output", "+=", "self", ".", "binaryInt", "(", "obj", ",", "as_number", "=", "True", ")", "elif", "isinstance", "(", "obj", ",", "FloatWrapper", ")", ":", "# just use doubles", "output", "+=", "pack", "(", "'!B'", ",", "(", "0b0010", "<<", "4", ")", "|", "3", ")", "output", "+=", "self", ".", "binaryReal", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "try", ":", "timestamp", "=", "(", "obj", "-", "apple_reference_date", ")", ".", "total_seconds", "(", ")", "except", "AttributeError", ":", "timestamp", "=", "timedelta_total_seconds", "(", "obj", "-", "apple_reference_date", ")", "output", "+=", "pack", "(", "'!B'", ",", "0b00110011", ")", "output", "+=", "pack", "(", "'!d'", ",", "float", "(", "timestamp", ")", ")", "elif", "isinstance", "(", "obj", ",", "Data", ")", ":", "output", "+=", "proc_variable_length", "(", "0b0100", ",", "len", "(", "obj", ")", ")", "output", "+=", "obj", "elif", "isinstance", "(", "obj", ",", "StringWrapper", ")", ":", "output", "+=", "proc_variable_length", "(", "obj", ".", "encodingMarker", ",", "len", "(", "obj", ")", ")", "output", "+=", "obj", ".", "encodedValue", "elif", "isinstance", "(", "obj", ",", "bytes", ")", ":", "output", "+=", "proc_variable_length", "(", "0b0101", ",", "len", "(", "obj", ")", ")", "output", "+=", "obj", "elif", "isinstance", "(", "obj", ",", "HashableWrapper", ")", ":", "obj", "=", "obj", ".", "value", "if", "isinstance", "(", "obj", ",", "(", "set", ",", "list", ",", "tuple", ")", ")", ":", "if", "isinstance", "(", "obj", ",", "set", ")", ":", "output", "+=", "proc_variable_length", "(", "0b1100", ",", "len", "(", "obj", ")", ")", "else", ":", "output", "+=", "proc_variable_length", "(", "0b1010", ",", "len", "(", "obj", ")", ")", "objectsToWrite", "=", "[", "]", "for", "objRef", "in", "sorted", "(", "obj", ")", "if", "isinstance", "(", "obj", ",", "set", ")", "else", "obj", ":", "(", "isNew", ",", "output", ")", "=", "self", ".", "writeObjectReference", "(", "objRef", ",", "output", ")", "if", "isNew", ":", "objectsToWrite", ".", "append", "(", "objRef", ")", "for", "objRef", "in", "objectsToWrite", ":", "output", "=", "self", ".", "writeObject", "(", "objRef", ",", "output", ",", "setReferencePosition", "=", "True", ")", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "output", "+=", "proc_variable_length", "(", "0b1101", ",", "len", "(", "obj", ")", ")", "keys", "=", "[", "]", "values", "=", "[", "]", "objectsToWrite", "=", "[", "]", "for", "key", ",", "value", "in", "sorted", "(", "iteritems", "(", "obj", ")", ")", ":", "keys", ".", "append", "(", "key", ")", "values", ".", "append", "(", "value", ")", "for", "key", "in", "keys", ":", "(", "isNew", ",", "output", ")", "=", "self", ".", "writeObjectReference", "(", "key", ",", "output", ")", "if", "isNew", ":", "objectsToWrite", ".", "append", "(", "key", ")", "for", "value", "in", "values", ":", "(", "isNew", ",", "output", ")", "=", "self", ".", "writeObjectReference", "(", "value", ",", "output", ")", "if", "isNew", ":", "objectsToWrite", ".", "append", "(", "value", ")", "for", "objRef", "in", "objectsToWrite", ":", "output", "=", "self", ".", "writeObject", "(", "objRef", ",", "output", ",", "setReferencePosition", "=", "True", ")", "return", "output" ]
45
0.002364
def node_theta(self, node): """ Convenience function to find the node's theta angle. """ group = self.find_node_group_membership(node) return self.group_theta(group)
[ "def", "node_theta", "(", "self", ",", "node", ")", ":", "group", "=", "self", ".", "find_node_group_membership", "(", "node", ")", "return", "self", ".", "group_theta", "(", "group", ")" ]
33.333333
0.009756
def list_of_dicts_to_dict_of_lists(list_of_dictionaries): """ Takes a list of dictionaries and creates a dictionary with the combined values for each key in each dicitonary. Missing values are set to `None` for each dicitonary that does not contain a key that is present in at least one other dicitonary. >>> litus.list_of_dicts_to_dict_of_lists([{'a':1,'b':2,'c':3},{'a':3,'b':4,'c':5},{'a':1,'b':2,'c':3}]) {'a': [1, 3, 1], 'b': [2, 4, 2], 'c': [3, 5, 3]} Shorthand: `litus.ld2dl(..)` """ result = {} all_keys = set([k for d in list_of_dictionaries for k in d.keys()]) for d in list_of_dictionaries: for k in all_keys: result.setdefault(k,[]).append(d.get(k,None)) return result
[ "def", "list_of_dicts_to_dict_of_lists", "(", "list_of_dictionaries", ")", ":", "result", "=", "{", "}", "all_keys", "=", "set", "(", "[", "k", "for", "d", "in", "list_of_dictionaries", "for", "k", "in", "d", ".", "keys", "(", ")", "]", ")", "for", "d", "in", "list_of_dictionaries", ":", "for", "k", "in", "all_keys", ":", "result", ".", "setdefault", "(", "k", ",", "[", "]", ")", ".", "append", "(", "d", ".", "get", "(", "k", ",", "None", ")", ")", "return", "result" ]
40.842105
0.011335
def add_type_struct_or_union(self, name, interp, node): """Store the node with the name. When it is instantiated, the node itself will be handled. :name: name of the typedefd struct/union :node: the union/struct node :interp: the 010 interpreter """ self.add_type_class(name, StructUnionDef(name, interp, node))
[ "def", "add_type_struct_or_union", "(", "self", ",", "name", ",", "interp", ",", "node", ")", ":", "self", ".", "add_type_class", "(", "name", ",", "StructUnionDef", "(", "name", ",", "interp", ",", "node", ")", ")" ]
40
0.005435
def _get_type(points, soma_class): '''get the type of the soma Args: points: Soma points soma_class(str): one of 'contour' or 'cylinder' to specify the type ''' assert soma_class in (SOMA_CONTOUR, SOMA_CYLINDER) npoints = len(points) if soma_class == SOMA_CONTOUR: return {0: None, 1: SomaSinglePoint, 2: None}.get(npoints, SomaSimpleContour) if(npoints == 3 and points[0][COLS.P] == -1 and points[1][COLS.P] == 1 and points[2][COLS.P] == 1): L.warning('Using neuromorpho 3-Point soma') # NeuroMorpho is the main provider of morphologies, but they # with SWC as their default file format: they convert all # uploads to SWC. In the process of conversion, they turn all # somas into their custom 'Three-point soma representation': # http://neuromorpho.org/SomaFormat.html return SomaNeuromorphoThreePointCylinders return {0: None, 1: SomaSinglePoint}.get(npoints, SomaCylinders)
[ "def", "_get_type", "(", "points", ",", "soma_class", ")", ":", "assert", "soma_class", "in", "(", "SOMA_CONTOUR", ",", "SOMA_CYLINDER", ")", "npoints", "=", "len", "(", "points", ")", "if", "soma_class", "==", "SOMA_CONTOUR", ":", "return", "{", "0", ":", "None", ",", "1", ":", "SomaSinglePoint", ",", "2", ":", "None", "}", ".", "get", "(", "npoints", ",", "SomaSimpleContour", ")", "if", "(", "npoints", "==", "3", "and", "points", "[", "0", "]", "[", "COLS", ".", "P", "]", "==", "-", "1", "and", "points", "[", "1", "]", "[", "COLS", ".", "P", "]", "==", "1", "and", "points", "[", "2", "]", "[", "COLS", ".", "P", "]", "==", "1", ")", ":", "L", ".", "warning", "(", "'Using neuromorpho 3-Point soma'", ")", "# NeuroMorpho is the main provider of morphologies, but they", "# with SWC as their default file format: they convert all", "# uploads to SWC. In the process of conversion, they turn all", "# somas into their custom 'Three-point soma representation':", "# http://neuromorpho.org/SomaFormat.html", "return", "SomaNeuromorphoThreePointCylinders", "return", "{", "0", ":", "None", ",", "1", ":", "SomaSinglePoint", "}", ".", "get", "(", "npoints", ",", "SomaCylinders", ")" ]
34.366667
0.000943
def merge(self,range2): """merge this bed with another bed to make a longer bed. Returns None if on different chromosomes. keeps the options of this class (not range2) :param range2: :type range2: GenomicRange :return: bigger range with both :rtype: GenomicRange """ if self.chr != range2.chr: return None o = type(self)(self.chr,min(self.start,range2.start)+self._start_offset,max(self.end,range2.end),self.payload,self.dir) return o
[ "def", "merge", "(", "self", ",", "range2", ")", ":", "if", "self", ".", "chr", "!=", "range2", ".", "chr", ":", "return", "None", "o", "=", "type", "(", "self", ")", "(", "self", ".", "chr", ",", "min", "(", "self", ".", "start", ",", "range2", ".", "start", ")", "+", "self", ".", "_start_offset", ",", "max", "(", "self", ".", "end", ",", "range2", ".", "end", ")", ",", "self", ".", "payload", ",", "self", ".", "dir", ")", "return", "o" ]
29.4375
0.024691