repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/tools/export_default.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/tools/export_default.py#L170-L212
def export(target_folder, source_folders = None, class_type ='all', raise_errors = False): """ exports the existing scripts/instruments (future: probes) into folder as .b26 files Args: target_folder: target location of created .b26 script files source_folder: singel path or list of paths that contains the location of python script files can also be just the name of a module class_type: string, one of the 4 following options -probes (exports probes) --not implemented yet-- -scripts (exports scripts) -instruments (exports instruments) -all (exports instruments, scripts and probes) target_folder: target folder whereb .b26 files are created Returns: """ if class_type not in ('all', 'scripts', 'instruments', 'probes'): print('unknown type to export') return if not os.path.isdir(target_folder): try: os.mkdir(target_folder) except: print((target_folder, ' is invalid target folder')) target_folder = None if target_folder is not None: if source_folders is None: module_list = [os.path.dirname(os.path.dirname(inspect.getfile(inspect.currentframe())))] elif isinstance(source_folders, str): module_list = [source_folders] elif isinstance(source_folders, list): module_list = source_folders else: raise TypeError('unknown type for source_folders') for path_to_module in module_list: if class_type in ('all', 'scripts'): export_default_scripts(target_folder, source_folder=path_to_module, raise_errors=raise_errors) if class_type in ('all', 'instruments'): export_default_instruments(target_folder, path_to_module, raise_errors=raise_errors) if class_type in ('all', 'probes'): print('WARNING: probes currently not supported')
[ "def", "export", "(", "target_folder", ",", "source_folders", "=", "None", ",", "class_type", "=", "'all'", ",", "raise_errors", "=", "False", ")", ":", "if", "class_type", "not", "in", "(", "'all'", ",", "'scripts'", ",", "'instruments'", ",", "'probes'", ...
exports the existing scripts/instruments (future: probes) into folder as .b26 files Args: target_folder: target location of created .b26 script files source_folder: singel path or list of paths that contains the location of python script files can also be just the name of a module class_type: string, one of the 4 following options -probes (exports probes) --not implemented yet-- -scripts (exports scripts) -instruments (exports instruments) -all (exports instruments, scripts and probes) target_folder: target folder whereb .b26 files are created Returns:
[ "exports", "the", "existing", "scripts", "/", "instruments", "(", "future", ":", "probes", ")", "into", "folder", "as", ".", "b26", "files", "Args", ":", "target_folder", ":", "target", "location", "of", "created", ".", "b26", "script", "files", "source_fold...
python
train
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L584-L594
def analog_units(self): """ Shortcut to retrieve all analog points units [Used by Bokeh trending feature] """ au = [] us = [] for each in self.points: if isinstance(each, NumericPoint): au.append(each.properties.name) us.append(each.properties.units_state) return dict(zip(au, us))
[ "def", "analog_units", "(", "self", ")", ":", "au", "=", "[", "]", "us", "=", "[", "]", "for", "each", "in", "self", ".", "points", ":", "if", "isinstance", "(", "each", ",", "NumericPoint", ")", ":", "au", ".", "append", "(", "each", ".", "prope...
Shortcut to retrieve all analog points units [Used by Bokeh trending feature]
[ "Shortcut", "to", "retrieve", "all", "analog", "points", "units", "[", "Used", "by", "Bokeh", "trending", "feature", "]" ]
python
train
stevearc/dql
dql/engine.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L162-L167
def cloudwatch_connection(self): """ Lazy create a connection to cloudwatch """ if self._cloudwatch_connection is None: conn = self._session.create_client("cloudwatch", self.connection.region) self._cloudwatch_connection = conn return self._cloudwatch_connection
[ "def", "cloudwatch_connection", "(", "self", ")", ":", "if", "self", ".", "_cloudwatch_connection", "is", "None", ":", "conn", "=", "self", ".", "_session", ".", "create_client", "(", "\"cloudwatch\"", ",", "self", ".", "connection", ".", "region", ")", "sel...
Lazy create a connection to cloudwatch
[ "Lazy", "create", "a", "connection", "to", "cloudwatch" ]
python
train
napalm-automation/napalm-ios
napalm_ios/ios.py
https://github.com/napalm-automation/napalm-ios/blob/7bbbc6a4d9f70a5b8cf32b7c7072a7ab437ddb81/napalm_ios/ios.py#L201-L208
def _create_tmp_file(config): """Write temp file and for use with inline config and SCP.""" tmp_dir = tempfile.gettempdir() rand_fname = py23_compat.text_type(uuid.uuid4()) filename = os.path.join(tmp_dir, rand_fname) with open(filename, 'wt') as fobj: fobj.write(config) return filename
[ "def", "_create_tmp_file", "(", "config", ")", ":", "tmp_dir", "=", "tempfile", ".", "gettempdir", "(", ")", "rand_fname", "=", "py23_compat", ".", "text_type", "(", "uuid", ".", "uuid4", "(", ")", ")", "filename", "=", "os", ".", "path", ".", "join", ...
Write temp file and for use with inline config and SCP.
[ "Write", "temp", "file", "and", "for", "use", "with", "inline", "config", "and", "SCP", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2974-L3007
def stk_metadata(self): """Return STK metadata from UIC tags as dict.""" if not self.is_stk: return None page = self.pages[0] tags = page.tags result = {} result['NumberPlanes'] = tags['UIC2tag'].count if page.description: result['PlaneDescriptions'] = page.description.split('\0') # result['plane_descriptions'] = stk_description_metadata( # page.image_description) if 'UIC1tag' in tags: result.update(tags['UIC1tag'].value) if 'UIC3tag' in tags: result.update(tags['UIC3tag'].value) # wavelengths if 'UIC4tag' in tags: result.update(tags['UIC4tag'].value) # override uic1 tags uic2tag = tags['UIC2tag'].value result['ZDistance'] = uic2tag['ZDistance'] result['TimeCreated'] = uic2tag['TimeCreated'] result['TimeModified'] = uic2tag['TimeModified'] try: result['DatetimeCreated'] = numpy.array( [julian_datetime(*dt) for dt in zip(uic2tag['DateCreated'], uic2tag['TimeCreated'])], dtype='datetime64[ns]') result['DatetimeModified'] = numpy.array( [julian_datetime(*dt) for dt in zip(uic2tag['DateModified'], uic2tag['TimeModified'])], dtype='datetime64[ns]') except ValueError as exc: log.warning('STK metadata: %s: %s', exc.__class__.__name__, exc) return result
[ "def", "stk_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_stk", ":", "return", "None", "page", "=", "self", ".", "pages", "[", "0", "]", "tags", "=", "page", ".", "tags", "result", "=", "{", "}", "result", "[", "'NumberPlanes'", "...
Return STK metadata from UIC tags as dict.
[ "Return", "STK", "metadata", "from", "UIC", "tags", "as", "dict", "." ]
python
train
DarkEnergySurvey/ugali
ugali/analysis/results.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/results.py#L317-L332
def surfaceBrightness(abs_mag, r_physical, distance): """ Compute the average surface brightness [mag arcsec^-2] within the half-light radius abs_mag = absolute magnitude [mag] r_physical = half-light radius [kpc] distance = [kpc] The factor 2 in the c_v equation below account for half the luminosity within the half-light radius. The 3600.**2 is conversion from deg^2 to arcsec^2 c_v = 2.5 * np.log10(2.) + 2.5 * np.log10(np.pi * 3600.**2) = 19.78 """ r_angle = np.degrees(np.arctan(r_physical / distance)) c_v = 19.78 # mag/arcsec^2 return abs_mag + dist2mod(distance) + c_v + 2.5 * np.log10(r_angle**2)
[ "def", "surfaceBrightness", "(", "abs_mag", ",", "r_physical", ",", "distance", ")", ":", "r_angle", "=", "np", ".", "degrees", "(", "np", ".", "arctan", "(", "r_physical", "/", "distance", ")", ")", "c_v", "=", "19.78", "# mag/arcsec^2", "return", "abs_ma...
Compute the average surface brightness [mag arcsec^-2] within the half-light radius abs_mag = absolute magnitude [mag] r_physical = half-light radius [kpc] distance = [kpc] The factor 2 in the c_v equation below account for half the luminosity within the half-light radius. The 3600.**2 is conversion from deg^2 to arcsec^2 c_v = 2.5 * np.log10(2.) + 2.5 * np.log10(np.pi * 3600.**2) = 19.78
[ "Compute", "the", "average", "surface", "brightness", "[", "mag", "arcsec^", "-", "2", "]", "within", "the", "half", "-", "light", "radius" ]
python
train
vsergeev/python-periphery
periphery/mmio.py
https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/mmio.py#L245-L268
def write(self, offset, data): """Write a string of bytes to the specified `offset` in bytes, relative to the base physical address of the MMIO region. Args: offset (int, long): offset from base physical address, in bytes. data (bytes, bytearray, list): a byte array or list of 8-bit integers to write. Raises: TypeError: if `offset` or `data` type are invalid. ValueError: if `offset` is out of bounds, or if data is not valid bytes. """ if not isinstance(offset, (int, long)): raise TypeError("Invalid offset type, should be integer.") if not isinstance(data, (bytes, bytearray, list)): raise TypeError("Invalid data type, expected bytes, bytearray, or list.") offset = self._adjust_offset(offset) self._validate_offset(offset, len(data)) data = bytes(bytearray(data)) self.mapping[offset:offset + len(data)] = data
[ "def", "write", "(", "self", ",", "offset", ",", "data", ")", ":", "if", "not", "isinstance", "(", "offset", ",", "(", "int", ",", "long", ")", ")", ":", "raise", "TypeError", "(", "\"Invalid offset type, should be integer.\"", ")", "if", "not", "isinstanc...
Write a string of bytes to the specified `offset` in bytes, relative to the base physical address of the MMIO region. Args: offset (int, long): offset from base physical address, in bytes. data (bytes, bytearray, list): a byte array or list of 8-bit integers to write. Raises: TypeError: if `offset` or `data` type are invalid. ValueError: if `offset` is out of bounds, or if data is not valid bytes.
[ "Write", "a", "string", "of", "bytes", "to", "the", "specified", "offset", "in", "bytes", "relative", "to", "the", "base", "physical", "address", "of", "the", "MMIO", "region", "." ]
python
train
wrboyce/telegrambot
telegrambot/api/__init__.py
https://github.com/wrboyce/telegrambot/blob/c35ce19886df4c306a2a19851cc1f63e3066d70d/telegrambot/api/__init__.py#L178-L185
def send_chat_action(self, action, to): """ Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). """ payload = dict(chat_id=to, action=action) return self._get('sendChatAction', payload)
[ "def", "send_chat_action", "(", "self", ",", "action", ",", "to", ")", ":", "payload", "=", "dict", "(", "chat_id", "=", "to", ",", "action", "=", "action", ")", "return", "self", ".", "_get", "(", "'sendChatAction'", ",", "payload", ")" ]
Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status).
[ "Use", "this", "method", "when", "you", "need", "to", "tell", "the", "user", "that", "something", "is", "happening", "on", "the", "bot", "s", "side", ".", "The", "status", "is", "set", "for", "5", "seconds", "or", "less", "(", "when", "a", "message", ...
python
train
wolfhong/formic
formic/formic.py
https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/formic/formic.py#L237-L272
def _match_iter_generic(self, path_elements, start_at): """Implementation of match_iter for >1 self.elements""" length = len(path_elements) # If bound to start, we stop searching at the first element if self.bound_start: end = 1 else: end = length - self.length + 1 # If bound to end, we start searching as late as possible if self.bound_end: start = length - self.length else: start = start_at if start > end or start < start_at or end > length - self.length + 1: # It's impossible to match. Either # 1) the search has a fixed start and end, and path_elements # does not have enough elements for a match, or # 2) To match the bound_end, we have to start before the start_at, # which means the search is impossible # 3) The end is after the last possible end point in path_elements return for index in range(start, end): matched = True i = index for matcher in self.elements: element = path_elements[i] i += 1 if not matcher.match(element): matched = False break if matched: yield index + self.length
[ "def", "_match_iter_generic", "(", "self", ",", "path_elements", ",", "start_at", ")", ":", "length", "=", "len", "(", "path_elements", ")", "# If bound to start, we stop searching at the first element", "if", "self", ".", "bound_start", ":", "end", "=", "1", "else"...
Implementation of match_iter for >1 self.elements
[ "Implementation", "of", "match_iter", "for", ">", "1", "self", ".", "elements" ]
python
train
aiven/pghoard
pghoard/pghoard.py
https://github.com/aiven/pghoard/blob/2994165d4ef3ff7a5669a2527346bcbfb5b3bd8a/pghoard/pghoard.py#L346-L392
def startup_walk_for_missed_files(self): """Check xlog and xlog_incoming directories for files that receivexlog has received but not yet compressed as well as the files we have compressed but not yet uploaded and process them.""" for site in self.config["backup_sites"]: compressed_xlog_path, _ = self.create_backup_site_paths(site) uncompressed_xlog_path = compressed_xlog_path + "_incoming" # Process uncompressed files (ie WAL pg_receivexlog received) for filename in os.listdir(uncompressed_xlog_path): full_path = os.path.join(uncompressed_xlog_path, filename) if not wal.WAL_RE.match(filename) and not wal.TIMELINE_RE.match(filename): self.log.warning("Found invalid file %r from incoming xlog directory", full_path) continue compression_event = { "delete_file_after_compression": True, "full_path": full_path, "site": site, "src_path": "{}.partial", "type": "MOVE", } self.log.debug("Found: %r when starting up, adding to compression queue", compression_event) self.compression_queue.put(compression_event) # Process compressed files (ie things we've processed but not yet uploaded) for filename in os.listdir(compressed_xlog_path): if filename.endswith(".metadata"): continue # silently ignore .metadata files, they're expected and processed below full_path = os.path.join(compressed_xlog_path, filename) metadata_path = full_path + ".metadata" is_xlog = wal.WAL_RE.match(filename) is_timeline = wal.TIMELINE_RE.match(filename) if not ((is_xlog or is_timeline) and os.path.exists(metadata_path)): self.log.warning("Found invalid file %r from compressed xlog directory", full_path) continue with open(metadata_path, "r") as fp: metadata = json.load(fp) transfer_event = { "file_size": os.path.getsize(full_path), "filetype": "xlog" if is_xlog else "timeline", "local_path": full_path, "metadata": metadata, "site": site, "type": "UPLOAD", } self.log.debug("Found: %r when starting up, adding to transfer queue", transfer_event) self.transfer_queue.put(transfer_event)
[ "def", "startup_walk_for_missed_files", "(", "self", ")", ":", "for", "site", "in", "self", ".", "config", "[", "\"backup_sites\"", "]", ":", "compressed_xlog_path", ",", "_", "=", "self", ".", "create_backup_site_paths", "(", "site", ")", "uncompressed_xlog_path"...
Check xlog and xlog_incoming directories for files that receivexlog has received but not yet compressed as well as the files we have compressed but not yet uploaded and process them.
[ "Check", "xlog", "and", "xlog_incoming", "directories", "for", "files", "that", "receivexlog", "has", "received", "but", "not", "yet", "compressed", "as", "well", "as", "the", "files", "we", "have", "compressed", "but", "not", "yet", "uploaded", "and", "proces...
python
train
ARMmbed/icetea
icetea_lib/tools/asserts.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/asserts.py#L191-L215
def assertJsonContains(jsonStr=None, key=None, message=None): """ Assert that jsonStr contains key. :param jsonStr: Json as string :param key: Key to look for :param message: Failure message :raises: TestStepFail if key is not in jsonStr or if loading jsonStr to a dictionary fails or if jsonStr is None. """ if jsonStr is not None: try: data = json.loads(jsonStr) if key not in data: raise TestStepFail( format_message(message) if message is not None else "Assert: " "Key : %s is not " "in : %s" % (str(key), str(jsonStr))) except (TypeError, ValueError) as e: raise TestStepFail( format_message(message) if message is not None else "Unable to parse json "+str(e)) else: raise TestStepFail( format_message(message) if message is not None else "Json string is empty")
[ "def", "assertJsonContains", "(", "jsonStr", "=", "None", ",", "key", "=", "None", ",", "message", "=", "None", ")", ":", "if", "jsonStr", "is", "not", "None", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "jsonStr", ")", "if", "key", "n...
Assert that jsonStr contains key. :param jsonStr: Json as string :param key: Key to look for :param message: Failure message :raises: TestStepFail if key is not in jsonStr or if loading jsonStr to a dictionary fails or if jsonStr is None.
[ "Assert", "that", "jsonStr", "contains", "key", "." ]
python
train
MolSSI-BSE/basis_set_exchange
basis_set_exchange/curate/compare.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/curate/compare.py#L188-L218
def compare_ecp_pots(potential1, potential2, compare_meta=False, rel_tol=0.0): ''' Compare two ecp potentials for approximate equality (exponents/coefficients are within a tolerance) If compare_meta is True, the metadata is also compared for exact equality. ''' if potential1['angular_momentum'] != potential2['angular_momentum']: return False rexponents1 = potential1['r_exponents'] rexponents2 = potential2['r_exponents'] gexponents1 = potential1['gaussian_exponents'] gexponents2 = potential2['gaussian_exponents'] coefficients1 = potential1['coefficients'] coefficients2 = potential2['coefficients'] # integer comparison if rexponents1 != rexponents2: return False if not _compare_vector(gexponents1, gexponents2, rel_tol): return False if not _compare_matrix(coefficients1, coefficients2, rel_tol): return False if compare_meta: if potential1['ecp_type'] != potential2['ecp_type']: return False return True else: return True
[ "def", "compare_ecp_pots", "(", "potential1", ",", "potential2", ",", "compare_meta", "=", "False", ",", "rel_tol", "=", "0.0", ")", ":", "if", "potential1", "[", "'angular_momentum'", "]", "!=", "potential2", "[", "'angular_momentum'", "]", ":", "return", "Fa...
Compare two ecp potentials for approximate equality (exponents/coefficients are within a tolerance) If compare_meta is True, the metadata is also compared for exact equality.
[ "Compare", "two", "ecp", "potentials", "for", "approximate", "equality", "(", "exponents", "/", "coefficients", "are", "within", "a", "tolerance", ")" ]
python
train
saltstack/salt
salt/minion.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2928-L2945
def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop()
[ "def", "destroy", "(", "self", ")", ":", "if", "self", ".", "_running", "is", "False", ":", "return", "self", ".", "_running", "=", "False", "if", "hasattr", "(", "self", ",", "'schedule'", ")", ":", "del", "self", ".", "schedule", "if", "hasattr", "...
Tear down the minion
[ "Tear", "down", "the", "minion" ]
python
train
quantopian/pgcontents
pgcontents/query.py
https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/query.py#L256-L286
def get_directory(db, user_id, api_dirname, content): """ Return the names of all files/directories that are direct children of api_dirname. If content is False, return a bare model containing just a database-style name. """ db_dirname = from_api_dirname(api_dirname) if not _dir_exists(db, user_id, db_dirname): raise NoSuchDirectory(api_dirname) if content: files = files_in_directory( db, user_id, db_dirname, ) subdirectories = directories_in_directory( db, user_id, db_dirname, ) else: files, subdirectories = None, None # TODO: Consider using namedtuples for these return values. return { 'name': db_dirname, 'files': files, 'subdirs': subdirectories, }
[ "def", "get_directory", "(", "db", ",", "user_id", ",", "api_dirname", ",", "content", ")", ":", "db_dirname", "=", "from_api_dirname", "(", "api_dirname", ")", "if", "not", "_dir_exists", "(", "db", ",", "user_id", ",", "db_dirname", ")", ":", "raise", "N...
Return the names of all files/directories that are direct children of api_dirname. If content is False, return a bare model containing just a database-style name.
[ "Return", "the", "names", "of", "all", "files", "/", "directories", "that", "are", "direct", "children", "of", "api_dirname", "." ]
python
test
tbobm/devscripts
devscripts/logs.py
https://github.com/tbobm/devscripts/blob/beb23371ba80739afb5474766e8049ead3837925/devscripts/logs.py#L12-L49
def simple_logger(**kwargs): """ Creates a simple logger :param str name: The logger's name ('api', 'back'...) :param int base_level: Lowest level allowed to log (Default: DEBUG) :param str log_format: Logging format used for STDOUT (Default: logs.FORMAT) :param bool should_stdout: Allows to log to stdout (Default: True) :param int stdout_level: Lowest level allowed to log to STDOUT (Default: DEBUG) :param bool should_http: Allows to log to HTTP server :param int http_level: Lowest level allowed to log to the HTTP server (Has to be superior or equals to base_level) :param str http_host: Address of the HTTP Server :param str http_url: Url of the HTTP Server """ # Args logger_name = kwargs.get('name') base_level = kwargs.get('base_level', logging.DEBUG) should_stdout = kwargs.get('should_stdout', True) should_http = kwargs.get('should_http', False) # Generate base logger logger = logging.getLogger(logger_name) logger.setLevel(base_level) # Define stdout handler if should_stdout: logger.addHandler(_add_stream_handler(**kwargs)) if should_http: logger.addHandler(_add_http_handler(**kwargs)) return logger
[ "def", "simple_logger", "(", "*", "*", "kwargs", ")", ":", "# Args", "logger_name", "=", "kwargs", ".", "get", "(", "'name'", ")", "base_level", "=", "kwargs", ".", "get", "(", "'base_level'", ",", "logging", ".", "DEBUG", ")", "should_stdout", "=", "kwa...
Creates a simple logger :param str name: The logger's name ('api', 'back'...) :param int base_level: Lowest level allowed to log (Default: DEBUG) :param str log_format: Logging format used for STDOUT (Default: logs.FORMAT) :param bool should_stdout: Allows to log to stdout (Default: True) :param int stdout_level: Lowest level allowed to log to STDOUT (Default: DEBUG) :param bool should_http: Allows to log to HTTP server :param int http_level: Lowest level allowed to log to the HTTP server (Has to be superior or equals to base_level) :param str http_host: Address of the HTTP Server :param str http_url: Url of the HTTP Server
[ "Creates", "a", "simple", "logger" ]
python
train
telefonicaid/fiware-sdc
python-sdcclient/sdcclient/client.py
https://github.com/telefonicaid/fiware-sdc/blob/d2d5f87fc574caf6bcc49594bbcb31f620ba8c51/python-sdcclient/sdcclient/client.py#L165-L173
def set_headers(self, headers): """ Set header. :param headers: Headers to be used by next request (dict) :return: None """ logger.debug("Setting headers: " + str(headers)) self.headers = headers
[ "def", "set_headers", "(", "self", ",", "headers", ")", ":", "logger", ".", "debug", "(", "\"Setting headers: \"", "+", "str", "(", "headers", ")", ")", "self", ".", "headers", "=", "headers" ]
Set header. :param headers: Headers to be used by next request (dict) :return: None
[ "Set", "header", ".", ":", "param", "headers", ":", "Headers", "to", "be", "used", "by", "next", "request", "(", "dict", ")", ":", "return", ":", "None" ]
python
train
bjoernricks/python-quilt
quilt/db.py
https://github.com/bjoernricks/python-quilt/blob/fae88237f601848cc34d073584d9dcb409f01777/quilt/db.py#L296-L300
def create(self): """ Creates the dirname and inserts a .version file """ if not os.path.exists(self.dirname): os.makedirs(self.dirname) self._create_version(self.version_file)
[ "def", "create", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "dirname", ")", ":", "os", ".", "makedirs", "(", "self", ".", "dirname", ")", "self", ".", "_create_version", "(", "self", ".", "version_file", ...
Creates the dirname and inserts a .version file
[ "Creates", "the", "dirname", "and", "inserts", "a", ".", "version", "file" ]
python
test
toidi/hadoop-yarn-api-python-client
yarn_api_client/resource_manager.py
https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L161-L172
def cluster_application(self, application_id): """ An application resource contains information about a particular application that was submitted to a cluster. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` """ path = '/ws/v1/cluster/apps/{appid}'.format(appid=application_id) return self.request(path)
[ "def", "cluster_application", "(", "self", ",", "application_id", ")", ":", "path", "=", "'/ws/v1/cluster/apps/{appid}'", ".", "format", "(", "appid", "=", "application_id", ")", "return", "self", ".", "request", "(", "path", ")" ]
An application resource contains information about a particular application that was submitted to a cluster. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
[ "An", "application", "resource", "contains", "information", "about", "a", "particular", "application", "that", "was", "submitted", "to", "a", "cluster", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/eapilib.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/eapilib.py#L308-L436
def send(self, data): """Sends the eAPI request to the destination node This method is responsible for sending an eAPI request to the destination node and returning a response based on the eAPI response object. eAPI responds to request messages with either a success message or failure message. eAPI Response - success .. code-block:: json { "jsonrpc": "2.0", "result": [ {}, {} { "warnings": [ <message> ] }, ], "id": <reqid> } eAPI Response - failure .. code-block:: json { "jsonrpc": "2.0", "error": { "code": <int>, "message": <string> "data": [ {}, {}, { "errors": [ <message> ] } ] } "id": <reqid> } Args: data (string): The data to be included in the body of the eAPI request object Returns: A decoded response. The response object is deserialized from JSON and returned as a standard Python dictionary object Raises: CommandError if an eAPI failure response object is returned from the node. The CommandError exception includes the error code and error message from the eAPI response. """ try: _LOGGER.debug('Request content: {}'.format(data)) # debug('eapi_request: %s' % data) self.transport.putrequest('POST', '/command-api') self.transport.putheader('Content-type', 'application/json-rpc') self.transport.putheader('Content-length', '%d' % len(data)) if self._auth: self.transport.putheader('Authorization', 'Basic %s' % self._auth) if int(sys.version[0]) > 2: # For Python 3.x compatibility data = data.encode() self.transport.endheaders(message_body=data) try: # Python 2.7: use buffering of HTTP responses response = self.transport.getresponse(buffering=True) except TypeError: # Python 2.6: older, and 3.x on response = self.transport.getresponse() response_content = response.read() _LOGGER.debug('Response: status:{status}, reason:{reason}'.format( status=response.status, reason=response.reason)) _LOGGER.debug('Response content: {}'.format(response_content)) if response.status == 401: raise ConnectionError(str(self), '%s. %s' % (response.reason, response_content)) # Work around for Python 2.7/3.x compatibility if not type(response_content) == str: # For Python 3.x - decode bytes into string response_content = response_content.decode() decoded = json.loads(response_content) _LOGGER.debug('eapi_response: %s' % decoded) if 'error' in decoded: (code, msg, err, out) = self._parse_error_message(decoded) pattern = "unexpected keyword argument '(.*)'" match = re.search(pattern, msg) if match: auto_msg = ('%s parameter is not supported in this' ' version of EOS.' % match.group(1)) _LOGGER.error(auto_msg) msg = msg + '. ' + auto_msg raise CommandError(code, msg, command_error=err, output=out) return decoded # socket.error is deprecated in python 3 and replaced with OSError. except (socket.error, OSError) as exc: _LOGGER.exception(exc) self.socket_error = exc self.error = exc error_msg = 'Socket error during eAPI connection: %s' % str(exc) raise ConnectionError(str(self), error_msg) except ValueError as exc: _LOGGER.exception(exc) self.socket_error = None self.error = exc raise ConnectionError(str(self), 'unable to connect to eAPI') finally: self.transport.close()
[ "def", "send", "(", "self", ",", "data", ")", ":", "try", ":", "_LOGGER", ".", "debug", "(", "'Request content: {}'", ".", "format", "(", "data", ")", ")", "# debug('eapi_request: %s' % data)", "self", ".", "transport", ".", "putrequest", "(", "'POST'", ",",...
Sends the eAPI request to the destination node This method is responsible for sending an eAPI request to the destination node and returning a response based on the eAPI response object. eAPI responds to request messages with either a success message or failure message. eAPI Response - success .. code-block:: json { "jsonrpc": "2.0", "result": [ {}, {} { "warnings": [ <message> ] }, ], "id": <reqid> } eAPI Response - failure .. code-block:: json { "jsonrpc": "2.0", "error": { "code": <int>, "message": <string> "data": [ {}, {}, { "errors": [ <message> ] } ] } "id": <reqid> } Args: data (string): The data to be included in the body of the eAPI request object Returns: A decoded response. The response object is deserialized from JSON and returned as a standard Python dictionary object Raises: CommandError if an eAPI failure response object is returned from the node. The CommandError exception includes the error code and error message from the eAPI response.
[ "Sends", "the", "eAPI", "request", "to", "the", "destination", "node" ]
python
train
ARMmbed/icetea
icetea_lib/Result.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Result.py#L159-L168
def build_date(self): """ get build date. :return: build date. None if not found """ # pylint: disable=len-as-condition if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None): return self.dutinformation.get(0).build.date return None
[ "def", "build_date", "(", "self", ")", ":", "# pylint: disable=len-as-condition", "if", "len", "(", "self", ".", "dutinformation", ")", ">", "0", "and", "(", "self", ".", "dutinformation", ".", "get", "(", "0", ")", ".", "build", "is", "not", "None", ")"...
get build date. :return: build date. None if not found
[ "get", "build", "date", "." ]
python
train
andreikop/qutepart
qutepart/bookmarks.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/bookmarks.py#L25-L36
def _createAction(self, widget, iconFileName, text, shortcut, slot): """Create QAction with given parameters and add to the widget """ icon = qutepart.getIcon(iconFileName) action = QAction(icon, text, widget) action.setShortcut(QKeySequence(shortcut)) action.setShortcutContext(Qt.WidgetShortcut) action.triggered.connect(slot) widget.addAction(action) return action
[ "def", "_createAction", "(", "self", ",", "widget", ",", "iconFileName", ",", "text", ",", "shortcut", ",", "slot", ")", ":", "icon", "=", "qutepart", ".", "getIcon", "(", "iconFileName", ")", "action", "=", "QAction", "(", "icon", ",", "text", ",", "w...
Create QAction with given parameters and add to the widget
[ "Create", "QAction", "with", "given", "parameters", "and", "add", "to", "the", "widget" ]
python
train
google/textfsm
textfsm/parser.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L622-L626
def _GetValue(self, name): """Returns the TextFSMValue object natching the requested name.""" for value in self.values: if value.name == name: return value
[ "def", "_GetValue", "(", "self", ",", "name", ")", ":", "for", "value", "in", "self", ".", "values", ":", "if", "value", ".", "name", "==", "name", ":", "return", "value" ]
Returns the TextFSMValue object natching the requested name.
[ "Returns", "the", "TextFSMValue", "object", "natching", "the", "requested", "name", "." ]
python
train
aarongarrett/inspyred
inspyred/ec/variators/crossovers.py
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L444-L493
def laplace_crossover(random, mom, dad, args): """Return the offspring of Laplace crossover on the candidates. This function performs Laplace crosssover (LX), following the implementation specified in (Deep and Thakur, "A new crossover operator for real coded genetic algorithms," Applied Mathematics and Computation, Volume 188, Issue 1, May 2007, pp. 895--911). This function also makes use of the bounder function as specified in the EC's ``evolve`` method. .. Arguments: random -- the random number generator object mom -- the first parent candidate dad -- the second parent candidate args -- a dictionary of keyword arguments Optional keyword arguments in args: - *crossover_rate* -- the rate at which crossover is performed (default 1.0) - *lx_location* -- the location parameter (default 0) - *lx_scale* -- the scale parameter (default 0.5) In some sense, the *lx_location* and *lx_scale* parameters can be thought of as analogs in a Laplace distribution to the mean and standard deviation of a Gaussian distribution. If *lx_scale* is near zero, offspring will be produced near the parents. If *lx_scale* is farther from zero, offspring will be produced far from the parents. """ crossover_rate = args.setdefault('crossover_rate', 1.0) if random.random() < crossover_rate: bounder = args['_ec'].bounder a = args.setdefault('lx_location', 0) b = args.setdefault('lx_scale', 0.5) bro = copy.copy(dad) sis = copy.copy(mom) for i, (m, d) in enumerate(zip(mom, dad)): u = random.random() if random.random() <= 0.5: beta = a - b * math.log(u) else: beta = a + b * math.log(u) bro[i] = m + beta * abs(m - d) sis[i] = d + beta * abs(m - d) bro = bounder(bro, args) sis = bounder(sis, args) return [bro, sis] else: return [mom, dad]
[ "def", "laplace_crossover", "(", "random", ",", "mom", ",", "dad", ",", "args", ")", ":", "crossover_rate", "=", "args", ".", "setdefault", "(", "'crossover_rate'", ",", "1.0", ")", "if", "random", ".", "random", "(", ")", "<", "crossover_rate", ":", "bo...
Return the offspring of Laplace crossover on the candidates. This function performs Laplace crosssover (LX), following the implementation specified in (Deep and Thakur, "A new crossover operator for real coded genetic algorithms," Applied Mathematics and Computation, Volume 188, Issue 1, May 2007, pp. 895--911). This function also makes use of the bounder function as specified in the EC's ``evolve`` method. .. Arguments: random -- the random number generator object mom -- the first parent candidate dad -- the second parent candidate args -- a dictionary of keyword arguments Optional keyword arguments in args: - *crossover_rate* -- the rate at which crossover is performed (default 1.0) - *lx_location* -- the location parameter (default 0) - *lx_scale* -- the scale parameter (default 0.5) In some sense, the *lx_location* and *lx_scale* parameters can be thought of as analogs in a Laplace distribution to the mean and standard deviation of a Gaussian distribution. If *lx_scale* is near zero, offspring will be produced near the parents. If *lx_scale* is farther from zero, offspring will be produced far from the parents.
[ "Return", "the", "offspring", "of", "Laplace", "crossover", "on", "the", "candidates", ".", "This", "function", "performs", "Laplace", "crosssover", "(", "LX", ")", "following", "the", "implementation", "specified", "in", "(", "Deep", "and", "Thakur", "A", "ne...
python
train
Opentrons/opentrons
api/src/opentrons/system/smoothie_update.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/system/smoothie_update.py#L6-L16
def _ensure_programmer_executable(): """ Find the lpc21isp executable and ensure it is executable """ # Find the lpc21isp executable, explicitly allowing the case where it # is not executable (since that’s exactly what we’re trying to fix) updater_executable = shutil.which('lpc21isp', mode=os.F_OK) # updater_executable might be None; we’re passing it here unchecked # because if it is None, we’re about to fail when we try to program # the smoothie, and we want the exception to bubble up. os.chmod(updater_executable, 0o777)
[ "def", "_ensure_programmer_executable", "(", ")", ":", "# Find the lpc21isp executable, explicitly allowing the case where it", "# is not executable (since that’s exactly what we’re trying to fix)", "updater_executable", "=", "shutil", ".", "which", "(", "'lpc21isp'", ",", "mode", "=...
Find the lpc21isp executable and ensure it is executable
[ "Find", "the", "lpc21isp", "executable", "and", "ensure", "it", "is", "executable" ]
python
train
doconix/django-mako-plus
django_mako_plus/template/util.py
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/template/util.py#L40-L170
def get_template_debug(template_name, error): ''' This structure is what Django wants when errors occur in templates. It gives the user a nice stack trace in the error page during debug. ''' # This is taken from mako.exceptions.html_error_template(), which has an issue # in Py3 where files get loaded as bytes but `lines = src.split('\n')` below # splits with a string. Not sure if this is a bug or if I'm missing something, # but doing a custom debugging template allows a workaround as well as a custom # DMP look. # I used to have a file in the templates directory for this, but too many users # reported TemplateNotFound errors. This function is a bit of a hack, but it only # happens during development (and mako.exceptions does this same thing). # /justification stacktrace_template = MakoTemplate(r""" <%! from mako.exceptions import syntax_highlight, pygments_html_formatter %> <style> .stacktrace { margin:5px 5px 5px 5px; } .highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; } .nonhighlight { padding:0px; background-color:#DFDFDF; } .sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; } .sampleline { padding:0px 10px 0px 10px; } .sourceline { margin:5px 5px 10px 5px; font-family:monospace;} .location { font-size:80%; } .highlight { white-space:pre; } .sampleline { white-space:pre; } % if pygments_html_formatter: ${pygments_html_formatter.get_style_defs() | n} .linenos { min-width: 2.5em; text-align: right; } pre { margin: 0; } .syntax-highlighted { padding: 0 10px; } .syntax-highlightedtable { border-spacing: 1px; } .nonhighlight { border-top: 1px solid #DFDFDF; border-bottom: 1px solid #DFDFDF; } .stacktrace .nonhighlight { margin: 5px 15px 10px; } .sourceline { margin: 0 0; font-family:monospace; } .code { background-color: #F8F8F8; width: 100%; } .error .code { background-color: #FFBDBD; } .error .syntax-highlighted { background-color: #FFBDBD; } % endif ## adjustments to Django css table.source { background-color: #fdfdfd; } table.source > tbody > tr > th { width: auto; } table.source > tbody > tr > td { font-family: inherit; white-space: normal; padding: 15px; } #template { background-color: #b3daff; } </style> <% src = tback.source line = tback.lineno if isinstance(src, bytes): src = src.decode() if src: lines = src.split('\n') else: lines = None %> <h3>${tback.errorname}: ${tback.message}</h3> % if lines: <div class="sample"> <div class="nonhighlight"> % for index in range(max(0, line-4),min(len(lines), line+5)): <% if pygments_html_formatter: pygments_html_formatter.linenostart = index + 1 %> % if index + 1 == line: <% if pygments_html_formatter: old_cssclass = pygments_html_formatter.cssclass pygments_html_formatter.cssclass = 'error ' + old_cssclass %> ${lines[index] | n,syntax_highlight(language='mako')} <% if pygments_html_formatter: pygments_html_formatter.cssclass = old_cssclass %> % else: ${lines[index] | n,syntax_highlight(language='mako')} % endif % endfor </div> </div> % endif <div class="stacktrace"> % for (filename, lineno, function, line) in tback.reverse_traceback: <div class="location">${filename}, line ${lineno}:</div> <div class="nonhighlight"> <% if pygments_html_formatter: pygments_html_formatter.linenostart = lineno %> <div class="sourceline">${line | n,syntax_highlight(filename)}</div> </div> % endfor </div> """) tback = RichTraceback(error, error.__traceback__) lines = stacktrace_template.render_unicode(tback=tback) return { 'message': '', 'source_lines': [ ( '', mark_safe(lines) ), ], 'before': '', 'during': '', 'after': '', 'top': 0, 'bottom': 0, 'total': 0, 'line': tback.lineno or 0, 'name': template_name, 'start': 0, 'end': 0, }
[ "def", "get_template_debug", "(", "template_name", ",", "error", ")", ":", "# This is taken from mako.exceptions.html_error_template(), which has an issue", "# in Py3 where files get loaded as bytes but `lines = src.split('\\n')` below", "# splits with a string. Not sure if this is a bug or if I...
This structure is what Django wants when errors occur in templates. It gives the user a nice stack trace in the error page during debug.
[ "This", "structure", "is", "what", "Django", "wants", "when", "errors", "occur", "in", "templates", ".", "It", "gives", "the", "user", "a", "nice", "stack", "trace", "in", "the", "error", "page", "during", "debug", "." ]
python
train
sorgerlab/indra
indra/sources/isi/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/api.py#L240-L264
def process_json_file(file_path, pmid=None, extra_annotations=None, add_grounding=True): """Extracts statements from the given ISI output file. Parameters ---------- file_path : str The ISI output file from which to extract statements pmid : int The PMID of the document being preprocessed, or None if not specified extra_annotations : dict Extra annotations to be added to each statement from this document (can be the empty dictionary) add_grounding : Optional[bool] If True the extracted Statements' grounding is mapped """ logger.info('Extracting from %s' % file_path) with open(file_path, 'rb') as fh: jd = json.load(fh) ip = IsiProcessor(jd, pmid, extra_annotations) ip.get_statements() if add_grounding: ip.add_grounding() return ip
[ "def", "process_json_file", "(", "file_path", ",", "pmid", "=", "None", ",", "extra_annotations", "=", "None", ",", "add_grounding", "=", "True", ")", ":", "logger", ".", "info", "(", "'Extracting from %s'", "%", "file_path", ")", "with", "open", "(", "file_...
Extracts statements from the given ISI output file. Parameters ---------- file_path : str The ISI output file from which to extract statements pmid : int The PMID of the document being preprocessed, or None if not specified extra_annotations : dict Extra annotations to be added to each statement from this document (can be the empty dictionary) add_grounding : Optional[bool] If True the extracted Statements' grounding is mapped
[ "Extracts", "statements", "from", "the", "given", "ISI", "output", "file", "." ]
python
train
multiformats/py-multibase
multibase/converters.py
https://github.com/multiformats/py-multibase/blob/8f435762b50a17f921c13b59eb0c7b9c52afc879/multibase/converters.py#L47-L51
def _chunk_with_padding(self, iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" # _chunk_with_padding('ABCDEFG', 3, 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
[ "def", "_chunk_with_padding", "(", "self", ",", "iterable", ",", "n", ",", "fillvalue", "=", "None", ")", ":", "# _chunk_with_padding('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"", "args", "=", "[", "iter", "(", "iterable", ")", "]", "*", "n", "return", "zip_longest", "...
Collect data into fixed-length chunks or blocks
[ "Collect", "data", "into", "fixed", "-", "length", "chunks", "or", "blocks" ]
python
train
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py#L306-L313
def get_network_by_full_name(self, si, default_network_full_name): """ Find network by a Full Name :param default_network_full_name: <str> Full Network Name - likes 'Root/Folder/Network' :return: """ path, name = get_path_and_name(default_network_full_name) return self.find_network_by_name(si, path, name) if name else None
[ "def", "get_network_by_full_name", "(", "self", ",", "si", ",", "default_network_full_name", ")", ":", "path", ",", "name", "=", "get_path_and_name", "(", "default_network_full_name", ")", "return", "self", ".", "find_network_by_name", "(", "si", ",", "path", ",",...
Find network by a Full Name :param default_network_full_name: <str> Full Network Name - likes 'Root/Folder/Network' :return:
[ "Find", "network", "by", "a", "Full", "Name", ":", "param", "default_network_full_name", ":", "<str", ">", "Full", "Network", "Name", "-", "likes", "Root", "/", "Folder", "/", "Network", ":", "return", ":" ]
python
train
Rapptz/discord.py
discord/guild.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/guild.py#L1375-L1399
async def unban(self, user, *, reason=None): """|coro| Unbans a user from the guild. The user must meet the :class:`abc.Snowflake` abc. You must have the :attr:`~Permissions.ban_members` permission to do this. Parameters ----------- user: :class:`abc.Snowflake` The user to unban. reason: Optional[:class:`str`] The reason for doing this action. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to unban. HTTPException Unbanning failed. """ await self._state.http.unban(user.id, self.id, reason=reason)
[ "async", "def", "unban", "(", "self", ",", "user", ",", "*", ",", "reason", "=", "None", ")", ":", "await", "self", ".", "_state", ".", "http", ".", "unban", "(", "user", ".", "id", ",", "self", ".", "id", ",", "reason", "=", "reason", ")" ]
|coro| Unbans a user from the guild. The user must meet the :class:`abc.Snowflake` abc. You must have the :attr:`~Permissions.ban_members` permission to do this. Parameters ----------- user: :class:`abc.Snowflake` The user to unban. reason: Optional[:class:`str`] The reason for doing this action. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to unban. HTTPException Unbanning failed.
[ "|coro|" ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/injector.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/injector.py#L1103-L1112
def remove(self): """ remove the gear from the cache and stop this actor :return: """ LOGGER.debug("InjectorGearSkeleton.remove") ret = self.cached_gear_actor.remove().get() if self.actor_ref: self.stop() return ret
[ "def", "remove", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"InjectorGearSkeleton.remove\"", ")", "ret", "=", "self", ".", "cached_gear_actor", ".", "remove", "(", ")", ".", "get", "(", ")", "if", "self", ".", "actor_ref", ":", "self", ".", ...
remove the gear from the cache and stop this actor :return:
[ "remove", "the", "gear", "from", "the", "cache", "and", "stop", "this", "actor", ":", "return", ":" ]
python
train
salu133445/pypianoroll
pypianoroll/multitrack.py
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L210-L253
def check_validity(self): """ Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type). """ # tracks for track in self.tracks: if not isinstance(track, Track): raise TypeError("`tracks` must be a list of " "`pypianoroll.Track` instances.") track.check_validity() # tempo if not isinstance(self.tempo, np.ndarray): raise TypeError("`tempo` must be int or a numpy array.") elif not np.issubdtype(self.tempo.dtype, np.number): raise TypeError("Data type of `tempo` must be a subdtype of " "np.number.") elif self.tempo.ndim != 1: raise ValueError("`tempo` must be a 1D numpy array.") if np.any(self.tempo <= 0.0): raise ValueError("`tempo` should contain only positive numbers.") # downbeat if self.downbeat is not None: if not isinstance(self.downbeat, np.ndarray): raise TypeError("`downbeat` must be a numpy array.") if not np.issubdtype(self.downbeat.dtype, np.bool_): raise TypeError("Data type of `downbeat` must be bool.") if self.downbeat.ndim != 1: raise ValueError("`downbeat` must be a 1D numpy array.") # beat_resolution if not isinstance(self.beat_resolution, int): raise TypeError("`beat_resolution` must be int.") if self.beat_resolution < 1: raise ValueError("`beat_resolution` must be a positive integer.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
[ "def", "check_validity", "(", "self", ")", ":", "# tracks", "for", "track", "in", "self", ".", "tracks", ":", "if", "not", "isinstance", "(", "track", ",", "Track", ")", ":", "raise", "TypeError", "(", "\"`tracks` must be a list of \"", "\"`pypianoroll.Track` in...
Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type).
[ "Raise", "an", "error", "if", "any", "invalid", "attribute", "found", "." ]
python
train
noxdafox/clipspy
clips/classes.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L336-L338
def superclass(self, klass): """True if the Class is a superclass of the given one.""" return bool(lib.EnvSuperclassP(self._env, self._cls, klass._cls))
[ "def", "superclass", "(", "self", ",", "klass", ")", ":", "return", "bool", "(", "lib", ".", "EnvSuperclassP", "(", "self", ".", "_env", ",", "self", ".", "_cls", ",", "klass", ".", "_cls", ")", ")" ]
True if the Class is a superclass of the given one.
[ "True", "if", "the", "Class", "is", "a", "superclass", "of", "the", "given", "one", "." ]
python
train
fossasia/knittingpattern
knittingpattern/convert/image_to_knittingpattern.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/image_to_knittingpattern.py#L12-L64
def convert_image_to_knitting_pattern(path, colors=("white", "black")): """Load a image file such as a png bitmap of jpeg file and convert it to a :ref:`knitting pattern file <FileFormatSpecification>`. :param list colors: a list of strings that should be used as :ref:`colors <png-color>`. :param str path: ignore this. It is fulfilled by the loeder. Example: .. code:: python convert_image_to_knitting_pattern().path("image.png").path("image.json") """ image = PIL.Image.open(path) pattern_id = os.path.splitext(os.path.basename(path))[0] rows = [] connections = [] pattern_set = { "version": "0.1", "type": "knitting pattern", "comment": { "source": path }, "patterns": [ { "name": pattern_id, "id": pattern_id, "rows": rows, "connections": connections } ]} bbox = image.getbbox() if not bbox: return pattern_set white = image.getpixel((0, 0)) min_x, min_y, max_x, max_y = bbox last_row_y = None for y in reversed(range(min_y, max_y)): instructions = [] row = {"id": y, "instructions": instructions} rows.append(row) for x in range(min_x, max_x): if image.getpixel((x, y)) == white: color = colors[0] else: color = colors[1] instruction = {"color": color} instructions.append(instruction) if last_row_y is not None: connections.append({"from": {"id": last_row_y}, "to": {"id": y}}) last_row_y = y return pattern_set
[ "def", "convert_image_to_knitting_pattern", "(", "path", ",", "colors", "=", "(", "\"white\"", ",", "\"black\"", ")", ")", ":", "image", "=", "PIL", ".", "Image", ".", "open", "(", "path", ")", "pattern_id", "=", "os", ".", "path", ".", "splitext", "(", ...
Load a image file such as a png bitmap of jpeg file and convert it to a :ref:`knitting pattern file <FileFormatSpecification>`. :param list colors: a list of strings that should be used as :ref:`colors <png-color>`. :param str path: ignore this. It is fulfilled by the loeder. Example: .. code:: python convert_image_to_knitting_pattern().path("image.png").path("image.json")
[ "Load", "a", "image", "file", "such", "as", "a", "png", "bitmap", "of", "jpeg", "file", "and", "convert", "it", "to", "a", ":", "ref", ":", "knitting", "pattern", "file", "<FileFormatSpecification", ">", "." ]
python
valid
nugget/python-anthemav
anthemav/protocol.py
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L157-L167
def connection_made(self, transport): """Called when asyncio.Protocol establishes the network connection.""" self.log.info('Connection established to AVR') self.transport = transport #self.transport.set_write_buffer_limits(0) limit_low, limit_high = self.transport.get_write_buffer_limits() self.log.debug('Write buffer limits %d to %d', limit_low, limit_high) self.command('ECH1') self.refresh_core()
[ "def", "connection_made", "(", "self", ",", "transport", ")", ":", "self", ".", "log", ".", "info", "(", "'Connection established to AVR'", ")", "self", ".", "transport", "=", "transport", "#self.transport.set_write_buffer_limits(0)", "limit_low", ",", "limit_high", ...
Called when asyncio.Protocol establishes the network connection.
[ "Called", "when", "asyncio", ".", "Protocol", "establishes", "the", "network", "connection", "." ]
python
train
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L623-L650
def get_name_record(self, name, include_expired=False, include_history=False): """ Get the whois-related info for a name (not a subdomain). Optionally include the history. Return {'status': True, 'record': rec} on success Return {'error': ...} on error """ if not check_name(name): return {'error': 'invalid name', 'http_status': 400} name = str(name) db = get_db_state(self.working_dir) name_record = db.get_name(str(name), include_expired=include_expired, include_history=include_history) if name_record is None: db.close() return {"error": "Not found.", 'http_status': 404} else: assert 'opcode' in name_record, 'BUG: missing opcode in {}'.format(json.dumps(name_record, sort_keys=True)) name_record = self.load_name_info(db, name_record) db.close() # also get the subdomain resolver resolver = get_subdomain_resolver(name) name_record['resolver'] = resolver return {'status': True, 'record': name_record}
[ "def", "get_name_record", "(", "self", ",", "name", ",", "include_expired", "=", "False", ",", "include_history", "=", "False", ")", ":", "if", "not", "check_name", "(", "name", ")", ":", "return", "{", "'error'", ":", "'invalid name'", ",", "'http_status'",...
Get the whois-related info for a name (not a subdomain). Optionally include the history. Return {'status': True, 'record': rec} on success Return {'error': ...} on error
[ "Get", "the", "whois", "-", "related", "info", "for", "a", "name", "(", "not", "a", "subdomain", ")", ".", "Optionally", "include", "the", "history", ".", "Return", "{", "status", ":", "True", "record", ":", "rec", "}", "on", "success", "Return", "{", ...
python
train
PyThaiNLP/pythainlp
pythainlp/tokenize/tcc.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/tokenize/tcc.py#L52-L69
def tcc(text: str) -> str: """ TCC generator, generates Thai Character Clusters :param str text: text to be tokenized to character clusters :return: subword (character cluster) """ if not text or not isinstance(text, str): return "" p = 0 while p < len(text): m = PAT_TCC.match(text[p:]) if m: n = m.span()[1] else: n = 1 yield text[p : p + n] p += n
[ "def", "tcc", "(", "text", ":", "str", ")", "->", "str", ":", "if", "not", "text", "or", "not", "isinstance", "(", "text", ",", "str", ")", ":", "return", "\"\"", "p", "=", "0", "while", "p", "<", "len", "(", "text", ")", ":", "m", "=", "PAT_...
TCC generator, generates Thai Character Clusters :param str text: text to be tokenized to character clusters :return: subword (character cluster)
[ "TCC", "generator", "generates", "Thai", "Character", "Clusters", ":", "param", "str", "text", ":", "text", "to", "be", "tokenized", "to", "character", "clusters", ":", "return", ":", "subword", "(", "character", "cluster", ")" ]
python
train
inveniosoftware/invenio-collections
invenio_collections/receivers.py
https://github.com/inveniosoftware/invenio-collections/blob/f3adca45c6d00a4dbf1f48fd501e8a68fe347f2f/invenio_collections/receivers.py#L74-L91
def get_record_collections(record, matcher): """Return list of collections to which record belongs to. :param record: Record instance. :param matcher: Function used to check if a record belongs to a collection. :return: list of collection names. """ collections = current_collections.collections if collections is None: # build collections cache collections = current_collections.collections = dict(_build_cache()) output = set() for collections in matcher(collections, record): output |= collections return list(output)
[ "def", "get_record_collections", "(", "record", ",", "matcher", ")", ":", "collections", "=", "current_collections", ".", "collections", "if", "collections", "is", "None", ":", "# build collections cache", "collections", "=", "current_collections", ".", "collections", ...
Return list of collections to which record belongs to. :param record: Record instance. :param matcher: Function used to check if a record belongs to a collection. :return: list of collection names.
[ "Return", "list", "of", "collections", "to", "which", "record", "belongs", "to", "." ]
python
train
erikdejonge/arguments
examples/classbased.py
https://github.com/erikdejonge/arguments/blob/fc222d3989d459343a81944cabb56854014335ed/examples/classbased.py#L98-L111
def main(): """ main """ args = MainArguments() if args.tool.lower() == "tool1": args = Tool1Arguments() elif args.tool.lower() == "tool2": args = Tool2Arguments() else: print("Unknown tool", args.tool) print(args)
[ "def", "main", "(", ")", ":", "args", "=", "MainArguments", "(", ")", "if", "args", ".", "tool", ".", "lower", "(", ")", "==", "\"tool1\"", ":", "args", "=", "Tool1Arguments", "(", ")", "elif", "args", ".", "tool", ".", "lower", "(", ")", "==", "...
main
[ "main" ]
python
train
refenv/cijoe
modules/cij/reporter.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/reporter.py#L165-L174
def process_tcase(tcase): """Goes through the trun and processes "run.log" """ tcase["src_content"] = src_to_html(tcase["fpath"]) tcase["log_content"] = runlogs_to_html(tcase["res_root"]) tcase["aux_list"] = aux_listing(tcase["aux_root"]) tcase["descr_short"], tcase["descr_long"] = tcase_parse_descr(tcase) tcase["hnames"] = extract_hook_names(tcase) return True
[ "def", "process_tcase", "(", "tcase", ")", ":", "tcase", "[", "\"src_content\"", "]", "=", "src_to_html", "(", "tcase", "[", "\"fpath\"", "]", ")", "tcase", "[", "\"log_content\"", "]", "=", "runlogs_to_html", "(", "tcase", "[", "\"res_root\"", "]", ")", "...
Goes through the trun and processes "run.log"
[ "Goes", "through", "the", "trun", "and", "processes", "run", ".", "log" ]
python
valid
Richienb/quilt
src/quilt_lang/__init__.py
https://github.com/Richienb/quilt/blob/4a659cac66f5286ad046d54a12fd850be5606643/src/quilt_lang/__init__.py#L1135-L1197
def circleconvert(amount, currentformat, newformat): """ Convert a circle measurement. :type amount: number :param amount: The number to convert. :type currentformat: string :param currentformat: The format of the provided value. :type newformat: string :param newformat: The intended format of the value. >>> circleconvert(45, "radius", "diameter") 90 """ # If the same format was provided if currentformat.lower() == newformat.lower(): # Return the provided value return amount # If the lowercase version of the current format is 'radius' if currentformat.lower() == 'radius': # If the lowercase version of the new format is 'diameter' if newformat.lower() == 'diameter': # Return the converted value return amount * 2 # If the lowercase version of the new format is 'circumference' elif newformat.lower() == 'circumference': # Return the converted value return amount * 2 * math.pi # Raise a warning raise ValueError("Invalid new format provided.") # If the lowercase version of the current format is 'diameter' elif currentformat.lower() == 'diameter': # If the lowercase version of the new format is 'radius' if newformat.lower() == 'radius': # Return the converted value return amount / 2 # If the lowercase version of the new format is 'circumference' elif newformat.lower() == 'circumference': # Return the converted value return amount * math.pi # Raise a warning raise ValueError("Invalid new format provided.") # If the lowercase version of the current format is 'circumference' elif currentformat.lower() == 'circumference': # If the lowercase version of the new format is 'radius' if newformat.lower() == 'radius': # Return the converted value return amount / math.pi / 2 # If the lowercase version of the new format is 'diameter' elif newformat.lower() == 'diameter': # Return the converted value return amount / math.pi
[ "def", "circleconvert", "(", "amount", ",", "currentformat", ",", "newformat", ")", ":", "# If the same format was provided", "if", "currentformat", ".", "lower", "(", ")", "==", "newformat", ".", "lower", "(", ")", ":", "# Return the provided value", "return", "a...
Convert a circle measurement. :type amount: number :param amount: The number to convert. :type currentformat: string :param currentformat: The format of the provided value. :type newformat: string :param newformat: The intended format of the value. >>> circleconvert(45, "radius", "diameter") 90
[ "Convert", "a", "circle", "measurement", "." ]
python
train
etal/biofrills
biofrills/pairutils.py
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/pairutils.py#L83-L116
def score_pairwise(aseq, bseq): """Compute pairwise distances between two sequences (raw strings).""" assert len(aseq) == len(bseq) # Affine gap penalties -- default values from EMBOSS needle/water GAP_OPEN = -10.0 GAP_EXTEND = -0.5 GAP_CHARS = frozenset('-.') score = 0.0 in_gap = True # Don't apply the opening penalty to the N-terminal gap for ares, bres in zip(aseq.upper(), bseq.upper()): if ares in GAP_CHARS and bres in GAP_CHARS: # Both are gaps -- this happens in multiple sequence alignments continue match = blosum62.get((ares, bres), None) if match is None: assert GAP_CHARS.intersection((ares, bres)), \ "Expected one gap in: " + str((ares, bres)) # Gap if not in_gap: score += GAP_OPEN in_gap = True score += GAP_EXTEND else: in_gap = False score += match if in_gap: # Correct for a penalty on the C-terminal gap score -= GAP_OPEN return score
[ "def", "score_pairwise", "(", "aseq", ",", "bseq", ")", ":", "assert", "len", "(", "aseq", ")", "==", "len", "(", "bseq", ")", "# Affine gap penalties -- default values from EMBOSS needle/water", "GAP_OPEN", "=", "-", "10.0", "GAP_EXTEND", "=", "-", "0.5", "GAP_...
Compute pairwise distances between two sequences (raw strings).
[ "Compute", "pairwise", "distances", "between", "two", "sequences", "(", "raw", "strings", ")", "." ]
python
train
hydpy-dev/hydpy
hydpy/exe/commandtools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/exe/commandtools.py#L223-L284
def execute_scriptfunction() -> None: """Execute a HydPy script function. Function |execute_scriptfunction| is indirectly applied and explained in the documentation on module |hyd|. """ try: args_given = [] kwargs_given = {} for arg in sys.argv[1:]: if len(arg) < 3: args_given.append(arg) else: try: key, value = parse_argument(arg) kwargs_given[key] = value except ValueError: args_given.append(arg) logfilepath = prepare_logfile(kwargs_given.pop('logfile', 'stdout')) logstyle = kwargs_given.pop('logstyle', 'plain') try: funcname = str(args_given.pop(0)) except IndexError: raise ValueError( 'The first positional argument defining the function ' 'to be called is missing.') try: func = hydpy.pub.scriptfunctions[funcname] except KeyError: available_funcs = objecttools.enumeration( sorted(hydpy.pub.scriptfunctions.keys())) raise ValueError( f'There is no `{funcname}` function callable by `hyd.py`. ' f'Choose one of the following instead: {available_funcs}.') args_required = inspect.getfullargspec(func).args nmb_args_required = len(args_required) nmb_args_given = len(args_given) if nmb_args_given != nmb_args_required: enum_args_given = '' if nmb_args_given: enum_args_given = ( f' ({objecttools.enumeration(args_given)})') enum_args_required = '' if nmb_args_required: enum_args_required = ( f' ({objecttools.enumeration(args_required)})') raise ValueError( f'Function `{funcname}` requires `{nmb_args_required:d}` ' f'positional arguments{enum_args_required}, but ' f'`{nmb_args_given:d}` are given{enum_args_given}.') with _activate_logfile(logfilepath, logstyle, 'info', 'warning'): func(*args_given, **kwargs_given) except BaseException as exc: if logstyle not in LogFileInterface.style2infotype2string: logstyle = 'plain' with _activate_logfile(logfilepath, logstyle, 'exception', 'exception'): arguments = ', '.join(sys.argv) print(f'Invoking hyd.py with arguments `{arguments}` ' f'resulted in the following error:\n{str(exc)}\n\n' f'See the following stack traceback for debugging:\n', file=sys.stderr) traceback.print_tb(sys.exc_info()[2])
[ "def", "execute_scriptfunction", "(", ")", "->", "None", ":", "try", ":", "args_given", "=", "[", "]", "kwargs_given", "=", "{", "}", "for", "arg", "in", "sys", ".", "argv", "[", "1", ":", "]", ":", "if", "len", "(", "arg", ")", "<", "3", ":", ...
Execute a HydPy script function. Function |execute_scriptfunction| is indirectly applied and explained in the documentation on module |hyd|.
[ "Execute", "a", "HydPy", "script", "function", "." ]
python
train
tariqdaouda/pyGeno
pyGeno/tools/SegmentTree.py
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/SegmentTree.py#L224-L229
def removeGaps(self) : """Remove all gaps between regions""" for i in range(1, len(self.children)) : if self.children[i].x1 > self.children[i-1].x2: aux_moveTree(self.children[i-1].x2-self.children[i].x1, self.children[i])
[ "def", "removeGaps", "(", "self", ")", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "self", ".", "children", ")", ")", ":", "if", "self", ".", "children", "[", "i", "]", ".", "x1", ">", "self", ".", "children", "[", "i", "-", "1...
Remove all gaps between regions
[ "Remove", "all", "gaps", "between", "regions" ]
python
train
ericpruitt/cronex
cronex/__init__.py
https://github.com/ericpruitt/cronex/blob/ff48a3a71bbcdf01cff46c0bf9376e69492c9224/cronex/__init__.py#L156-L261
def check_trigger(self, date_tuple, utc_offset=0): """ Returns boolean indicating if the trigger is active at the given time. The date tuple should be in the local time. Unless periodicities are used, utc_offset does not need to be specified. If periodicities are used, specifically in the hour and minutes fields, it is crucial that the utc_offset is specified. """ year, month, day, hour, mins = date_tuple given_date = datetime.date(year, month, day) zeroday = datetime.date(*self.epoch[:3]) last_dom = calendar.monthrange(year, month)[-1] dom_matched = True # In calendar and datetime.date.weekday, Monday = 0 given_dow = (datetime.date.weekday(given_date) + 1) % 7 first_dow = (given_dow + 1 - day) % 7 # Figure out how much time has passed from the epoch to the given date utc_diff = utc_offset - self.epoch[5] mod_delta_yrs = year - self.epoch[0] mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12 mod_delta_day = (given_date - zeroday).days mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60 # Makes iterating through like components easier. quintuple = zip( (mins, hour, day, month, given_dow), self.numerical_tab, self.string_tab, (mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon, mod_delta_day), FIELD_RANGES) for value, valid_values, field_str, delta_t, field_type in quintuple: # All valid, static values for the fields are stored in sets if value in valid_values: continue # The following for loop implements the logic for context # sensitive and epoch sensitive constraints. break statements, # which are executed when a match is found, lead to a continue # in the outer loop. If there are no matches found, the given date # does not match expression constraints, so the function returns # False as seen at the end of this for...else... construct. for cron_atom in field_str.split(','): if cron_atom[0] == '%': if not(delta_t % int(cron_atom[1:])): break elif '#' in cron_atom: D, N = int(cron_atom[0]), int(cron_atom[2]) # Computes Nth occurence of D day of the week if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day: break elif cron_atom[-1] == 'W': target = min(int(cron_atom[:-1]), last_dom) lands_on = (first_dow + target - 1) % 7 if lands_on == 0: # Shift from Sun. to Mon. unless Mon. is next month if target < last_dom: target += 1 else: target -= 2 elif lands_on == 6: # Shift from Sat. to Fri. unless Fri. in prior month if target > 1: target -= 1 else: target += 2 # Break if the day is correct, and target is a weekday if target == day and (first_dow + target) % 7 > 1: break elif cron_atom[-1] == 'L': # In dom field, L means the last day of the month target = last_dom if field_type == DAYS_OF_WEEK: # Calculates the last occurence of given day of week desired_dow = int(cron_atom[:-1]) target = (((desired_dow - first_dow) % 7) + 29) if target > last_dom: target -= 7 if target == day: break else: # See 2010.11.15 of CHANGELOG if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*': dom_matched = False continue elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*': # If we got here, then days of months validated so it does # not matter that days of the week failed. return dom_matched # None of the expressions matched which means this field fails return False # Arriving at this point means the date landed within the constraints # of all fields; the associated trigger should be fired. return True
[ "def", "check_trigger", "(", "self", ",", "date_tuple", ",", "utc_offset", "=", "0", ")", ":", "year", ",", "month", ",", "day", ",", "hour", ",", "mins", "=", "date_tuple", "given_date", "=", "datetime", ".", "date", "(", "year", ",", "month", ",", ...
Returns boolean indicating if the trigger is active at the given time. The date tuple should be in the local time. Unless periodicities are used, utc_offset does not need to be specified. If periodicities are used, specifically in the hour and minutes fields, it is crucial that the utc_offset is specified.
[ "Returns", "boolean", "indicating", "if", "the", "trigger", "is", "active", "at", "the", "given", "time", ".", "The", "date", "tuple", "should", "be", "in", "the", "local", "time", ".", "Unless", "periodicities", "are", "used", "utc_offset", "does", "not", ...
python
train
StorjOld/pyp2p
pyp2p/rendezvous_server.py
https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L108-L122
def cleanup_candidates(self, node_ip): """ Removes old TCP hole punching candidates for a designated node if a certain amount of time has passed since they last connected. """ if node_ip in self.factory.candidates: old_candidates = [] for candidate in self.factory.candidates[node_ip]: elapsed = int(time.time() - candidate["time"]) if elapsed > self.challege_timeout: old_candidates.append(candidate) for candidate in old_candidates: self.factory.candidates[node_ip].remove(candidate)
[ "def", "cleanup_candidates", "(", "self", ",", "node_ip", ")", ":", "if", "node_ip", "in", "self", ".", "factory", ".", "candidates", ":", "old_candidates", "=", "[", "]", "for", "candidate", "in", "self", ".", "factory", ".", "candidates", "[", "node_ip",...
Removes old TCP hole punching candidates for a designated node if a certain amount of time has passed since they last connected.
[ "Removes", "old", "TCP", "hole", "punching", "candidates", "for", "a", "designated", "node", "if", "a", "certain", "amount", "of", "time", "has", "passed", "since", "they", "last", "connected", "." ]
python
train
numenta/htmresearch
htmresearch/algorithms/apical_tiebreak_temporal_memory.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_tiebreak_temporal_memory.py#L410-L475
def _calculateApicalLearning(self, learningCells, activeColumns, activeApicalSegments, matchingApicalSegments, apicalPotentialOverlaps): """ Calculate apical learning for each learning cell. The set of learning cells was determined completely from basal segments. Do all apical learning on the same cells. Learn on any active segments on learning cells. For cells without active segments, learn on the best matching segment. For cells without a matching segment, grow a new segment. @param learningCells (numpy array) @param correctPredictedCells (numpy array) @param activeApicalSegments (numpy array) @param matchingApicalSegments (numpy array) @param apicalPotentialOverlaps (numpy array) @return (tuple) - learningActiveApicalSegments (numpy array) Active apical segments on correct predicted cells - learningMatchingApicalSegments (numpy array) Matching apical segments selected for learning in bursting columns - apicalSegmentsToPunish (numpy array) Apical segments that should be punished for predicting an inactive column - newApicalSegmentCells (numpy array) Cells in bursting columns that were selected to grow new apical segments """ # Cells with active apical segments learningActiveApicalSegments = self.apicalConnections.filterSegmentsByCell( activeApicalSegments, learningCells) # Cells with matching apical segments learningCellsWithoutActiveApical = np.setdiff1d( learningCells, self.apicalConnections.mapSegmentsToCells(learningActiveApicalSegments)) cellsForMatchingApical = self.apicalConnections.mapSegmentsToCells( matchingApicalSegments) learningCellsWithMatchingApical = np.intersect1d( learningCellsWithoutActiveApical, cellsForMatchingApical) learningMatchingApicalSegments = self._chooseBestSegmentPerCell( self.apicalConnections, learningCellsWithMatchingApical, matchingApicalSegments, apicalPotentialOverlaps) # Cells that need to grow an apical segment newApicalSegmentCells = np.setdiff1d(learningCellsWithoutActiveApical, learningCellsWithMatchingApical) # Incorrectly predicted columns correctMatchingApicalMask = np.in1d( cellsForMatchingApical / self.cellsPerColumn, activeColumns) apicalSegmentsToPunish = matchingApicalSegments[~correctMatchingApicalMask] return (learningActiveApicalSegments, learningMatchingApicalSegments, apicalSegmentsToPunish, newApicalSegmentCells)
[ "def", "_calculateApicalLearning", "(", "self", ",", "learningCells", ",", "activeColumns", ",", "activeApicalSegments", ",", "matchingApicalSegments", ",", "apicalPotentialOverlaps", ")", ":", "# Cells with active apical segments", "learningActiveApicalSegments", "=", "self", ...
Calculate apical learning for each learning cell. The set of learning cells was determined completely from basal segments. Do all apical learning on the same cells. Learn on any active segments on learning cells. For cells without active segments, learn on the best matching segment. For cells without a matching segment, grow a new segment. @param learningCells (numpy array) @param correctPredictedCells (numpy array) @param activeApicalSegments (numpy array) @param matchingApicalSegments (numpy array) @param apicalPotentialOverlaps (numpy array) @return (tuple) - learningActiveApicalSegments (numpy array) Active apical segments on correct predicted cells - learningMatchingApicalSegments (numpy array) Matching apical segments selected for learning in bursting columns - apicalSegmentsToPunish (numpy array) Apical segments that should be punished for predicting an inactive column - newApicalSegmentCells (numpy array) Cells in bursting columns that were selected to grow new apical segments
[ "Calculate", "apical", "learning", "for", "each", "learning", "cell", "." ]
python
train
dwavesystems/dwave-system
dwave/system/composites/embedding.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/system/composites/embedding.py#L432-L434
def _embed_state(embedding, state): """Embed a single state/sample by spreading it's values over the chains in the embedding""" return {u: state[v] for v, chain in embedding.items() for u in chain}
[ "def", "_embed_state", "(", "embedding", ",", "state", ")", ":", "return", "{", "u", ":", "state", "[", "v", "]", "for", "v", ",", "chain", "in", "embedding", ".", "items", "(", ")", "for", "u", "in", "chain", "}" ]
Embed a single state/sample by spreading it's values over the chains in the embedding
[ "Embed", "a", "single", "state", "/", "sample", "by", "spreading", "it", "s", "values", "over", "the", "chains", "in", "the", "embedding" ]
python
train
launchdarkly/relayCommander
relay_commander/generators.py
https://github.com/launchdarkly/relayCommander/blob/eee7fa22f04edc3854dd53c3ec2db8c599ad1e89/relay_commander/generators.py#L20-L38
def generate_relay_config(self, environments: list) -> None: """Generate ld-relay.conf file. Given a list of environments of a project, this will generate a ``ld-relay.conf`` file in the current working directory. The conf file follows the specification that is documented in the main `ld-relay`_ documentation. .. _ld-relay: https://github.com/launchdarkly/ld-relay#configuration-file-format :param environments: list of LaunchDarkly environments. """ template = self.env.get_template('ld-relay.conf.jinja') with open('ld-relay.conf', 'w') as ld_relay_config: template = template.render( envs=environments ) ld_relay_config.write(template)
[ "def", "generate_relay_config", "(", "self", ",", "environments", ":", "list", ")", "->", "None", ":", "template", "=", "self", ".", "env", ".", "get_template", "(", "'ld-relay.conf.jinja'", ")", "with", "open", "(", "'ld-relay.conf'", ",", "'w'", ")", "as",...
Generate ld-relay.conf file. Given a list of environments of a project, this will generate a ``ld-relay.conf`` file in the current working directory. The conf file follows the specification that is documented in the main `ld-relay`_ documentation. .. _ld-relay: https://github.com/launchdarkly/ld-relay#configuration-file-format :param environments: list of LaunchDarkly environments.
[ "Generate", "ld", "-", "relay", ".", "conf", "file", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py#L1180-L1204
def get_updates(self, id, project=None, top=None, skip=None): """GetUpdates. [Preview API] Returns a the deltas between work item revisions :param int id: :param str project: Project ID or project name :param int top: :param int skip: :rtype: [WorkItemUpdate] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if id is not None: route_values['id'] = self._serialize.url('id', id, 'int') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9', version='5.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WorkItemUpdate]', self._unwrap_collection(response))
[ "def", "get_updates", "(", "self", ",", "id", ",", "project", "=", "None", ",", "top", "=", "None", ",", "skip", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", ...
GetUpdates. [Preview API] Returns a the deltas between work item revisions :param int id: :param str project: Project ID or project name :param int top: :param int skip: :rtype: [WorkItemUpdate]
[ "GetUpdates", ".", "[", "Preview", "API", "]", "Returns", "a", "the", "deltas", "between", "work", "item", "revisions", ":", "param", "int", "id", ":", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", ...
python
train
wavefrontHQ/python-client
wavefront_api_client/api/webhook_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/webhook_api.py#L36-L56
def create_webhook(self, **kwargs): # noqa: E501 """Create a specific webhook # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_webhook(async_req=True) >>> result = thread.get() :param async_req bool :param Notificant body: Example Body: <pre>{ \"description\": \"WebHook Description\", \"template\": \"POST Body -- Mustache syntax\", \"title\": \"WebHook Title\", \"triggers\": [ \"ALERT_OPENED\" ], \"recipient\": \"http://example.com\", \"customHttpHeaders\": {}, \"contentType\": \"text/plain\" }</pre> :return: ResponseContainerNotificant If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_webhook_with_http_info(**kwargs) # noqa: E501 else: (data) = self.create_webhook_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "create_webhook", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "create_webhook_with_http_inf...
Create a specific webhook # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_webhook(async_req=True) >>> result = thread.get() :param async_req bool :param Notificant body: Example Body: <pre>{ \"description\": \"WebHook Description\", \"template\": \"POST Body -- Mustache syntax\", \"title\": \"WebHook Title\", \"triggers\": [ \"ALERT_OPENED\" ], \"recipient\": \"http://example.com\", \"customHttpHeaders\": {}, \"contentType\": \"text/plain\" }</pre> :return: ResponseContainerNotificant If the method is called asynchronously, returns the request thread.
[ "Create", "a", "specific", "webhook", "#", "noqa", ":", "E501" ]
python
train
phac-nml/sistr_cmd
sistr/src/writers.py
https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/writers.py#L5-L22
def listattrs(x): """Get all instance and class attributes for an object Get all instance and class attributes for an object except those that start with "__" (double underscore). __dict__ of an object only reports the instance attributes while dir() reports all of the attributes of an object including private ones. Callable attrs are filtered out. Args: x (object): Some object Returns: list str: List of non-callable non-private attributes of object x """ return [attr for attr in dir(x) if not attr.startswith("__") and not callable(getattr(x, attr))]
[ "def", "listattrs", "(", "x", ")", ":", "return", "[", "attr", "for", "attr", "in", "dir", "(", "x", ")", "if", "not", "attr", ".", "startswith", "(", "\"__\"", ")", "and", "not", "callable", "(", "getattr", "(", "x", ",", "attr", ")", ")", "]" ]
Get all instance and class attributes for an object Get all instance and class attributes for an object except those that start with "__" (double underscore). __dict__ of an object only reports the instance attributes while dir() reports all of the attributes of an object including private ones. Callable attrs are filtered out. Args: x (object): Some object Returns: list str: List of non-callable non-private attributes of object x
[ "Get", "all", "instance", "and", "class", "attributes", "for", "an", "object", "Get", "all", "instance", "and", "class", "attributes", "for", "an", "object", "except", "those", "that", "start", "with", "__", "(", "double", "underscore", ")", ".", "__dict__",...
python
train
xtuml/pyxtuml
bridgepoint/oal.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L975-L978
def t_END_WHILE(self, t): r"(?i)end[\s]+while" t.endlexpos = t.lexpos + len(t.value) return t
[ "def", "t_END_WHILE", "(", "self", ",", "t", ")", ":", "t", ".", "endlexpos", "=", "t", ".", "lexpos", "+", "len", "(", "t", ".", "value", ")", "return", "t" ]
r"(?i)end[\s]+while
[ "r", "(", "?i", ")", "end", "[", "\\", "s", "]", "+", "while" ]
python
test
diging/tethne
tethne/utilities.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L184-L194
def subdict(super_dict, keys): """ Returns a subset of the super_dict with the specified keys. """ sub_dict = {} valid_keys = super_dict.keys() for key in keys: if key in valid_keys: sub_dict[key] = super_dict[key] return sub_dict
[ "def", "subdict", "(", "super_dict", ",", "keys", ")", ":", "sub_dict", "=", "{", "}", "valid_keys", "=", "super_dict", ".", "keys", "(", ")", "for", "key", "in", "keys", ":", "if", "key", "in", "valid_keys", ":", "sub_dict", "[", "key", "]", "=", ...
Returns a subset of the super_dict with the specified keys.
[ "Returns", "a", "subset", "of", "the", "super_dict", "with", "the", "specified", "keys", "." ]
python
train
seequent/properties
properties/base/instance.py
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/base/instance.py#L161-L172
def to_json(value, **kwargs): """Convert instance to JSON""" if isinstance(value, HasProperties): return value.serialize(**kwargs) try: return json.loads(json.dumps(value)) except TypeError: raise TypeError( "Cannot convert type {} to JSON without calling 'serialize' " "on an instance of Instance Property and registering a custom " "serializer".format(value.__class__.__name__) )
[ "def", "to_json", "(", "value", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "value", ",", "HasProperties", ")", ":", "return", "value", ".", "serialize", "(", "*", "*", "kwargs", ")", "try", ":", "return", "json", ".", "loads", "(", ...
Convert instance to JSON
[ "Convert", "instance", "to", "JSON" ]
python
train
RedFantom/ttkthemes
ttkthemes/_utils.py
https://github.com/RedFantom/ttkthemes/blob/e7fc354c02faf0e3eb4842d7f44131a1c43dd299/ttkthemes/_utils.py#L18-L49
def temporary_chdir(new_dir): """ Like os.chdir(), but always restores the old working directory For example, code like this... old_curdir = os.getcwd() os.chdir('stuff') do_some_stuff() os.chdir(old_curdir) ...leaves the current working directory unchanged if do_some_stuff() raises an error, so it should be rewritten like this: old_curdir = os.getcwd() os.chdir('stuff') try: do_some_stuff() finally: os.chdir(old_curdir) Or equivalently, like this: with utils.temporary_chdir('stuff'): do_some_stuff() """ old_dir = os.getcwd() os.chdir(new_dir) try: yield finally: os.chdir(old_dir)
[ "def", "temporary_chdir", "(", "new_dir", ")", ":", "old_dir", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "new_dir", ")", "try", ":", "yield", "finally", ":", "os", ".", "chdir", "(", "old_dir", ")" ]
Like os.chdir(), but always restores the old working directory For example, code like this... old_curdir = os.getcwd() os.chdir('stuff') do_some_stuff() os.chdir(old_curdir) ...leaves the current working directory unchanged if do_some_stuff() raises an error, so it should be rewritten like this: old_curdir = os.getcwd() os.chdir('stuff') try: do_some_stuff() finally: os.chdir(old_curdir) Or equivalently, like this: with utils.temporary_chdir('stuff'): do_some_stuff()
[ "Like", "os", ".", "chdir", "()", "but", "always", "restores", "the", "old", "working", "directory" ]
python
train
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py#L581-L590
def size_to_content(self, horizontal_padding=None, vertical_padding=None): """ Size the canvas item to the text content. """ if horizontal_padding is None: horizontal_padding = 0 if vertical_padding is None: vertical_padding = 0 self.sizing.set_fixed_size(Geometry.IntSize(18 + 2 * horizontal_padding, 18 + 2 * vertical_padding))
[ "def", "size_to_content", "(", "self", ",", "horizontal_padding", "=", "None", ",", "vertical_padding", "=", "None", ")", ":", "if", "horizontal_padding", "is", "None", ":", "horizontal_padding", "=", "0", "if", "vertical_padding", "is", "None", ":", "vertical_p...
Size the canvas item to the text content.
[ "Size", "the", "canvas", "item", "to", "the", "text", "content", "." ]
python
train
offu/WeRoBot
werobot/session/mysqlstorage.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/session/mysqlstorage.py#L72-L88
def set(self, id, value): """ 根据 id 写入数据。 :param id: 要写入的 id :param value: 要写入的数据,可以是一个 ``dict`` 对象 """ value = json_dumps(value) self.conn.cursor().execute( "INSERT INTO WeRoBot (id, value) VALUES (%s,%s) \ ON DUPLICATE KEY UPDATE value=%s", ( id, value, value, ) ) self.conn.commit()
[ "def", "set", "(", "self", ",", "id", ",", "value", ")", ":", "value", "=", "json_dumps", "(", "value", ")", "self", ".", "conn", ".", "cursor", "(", ")", ".", "execute", "(", "\"INSERT INTO WeRoBot (id, value) VALUES (%s,%s) \\\n ON DUPLICATE KEY U...
根据 id 写入数据。 :param id: 要写入的 id :param value: 要写入的数据,可以是一个 ``dict`` 对象
[ "根据", "id", "写入数据。" ]
python
train
boriel/zxbasic
arch/zx48k/backend/__16bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__16bit.py#L620-L634
def _ne16(ins): ''' Compares & pops top 2 operands out of the stack, and checks if the 1st operand != 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 16 bit un/signed version ''' output = _16bit_oper(ins.quad[2], ins.quad[3]) output.append('or a') # Resets carry flag output.append('sbc hl, de') output.append('ld a, h') output.append('or l') output.append('push af') return output
[ "def", "_ne16", "(", "ins", ")", ":", "output", "=", "_16bit_oper", "(", "ins", ".", "quad", "[", "2", "]", ",", "ins", ".", "quad", "[", "3", "]", ")", "output", ".", "append", "(", "'or a'", ")", "# Resets carry flag", "output", ".", "append", "(...
Compares & pops top 2 operands out of the stack, and checks if the 1st operand != 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 16 bit un/signed version
[ "Compares", "&", "pops", "top", "2", "operands", "out", "of", "the", "stack", "and", "checks", "if", "the", "1st", "operand", "!", "=", "2nd", "operand", "(", "top", "of", "the", "stack", ")", ".", "Pushes", "0", "if", "False", "1", "if", "True", "...
python
train
3DLIRIOUS/MeshLabXML
examples/shield.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/examples/shield.py#L38-L130
def main(): """Run main script""" # segments = number of segments to use for circles segments = 50 # star_points = number of points (or sides) of the star star_points = 5 # star_radius = radius of circle circumscribing the star star_radius = 2 # ring_thickness = thickness of the colored rings ring_thickness = 1 # sphere_radius = radius of sphere the shield will be deformed to sphere_radius = 2 * (star_radius + 3 * ring_thickness) # Star calculations: # Visually approximate a star by using multiple diamonds (i.e. scaled # squares) which overlap in the center. For the star calculations, # consider a central polygon with triangles attached to the edges, all # circumscribed by a circle. # polygon_radius = distance from center of circle to polygon edge midpoint polygon_radius = star_radius / \ (1 + math.tan(math.radians(180 / star_points)) / math.tan(math.radians(90 / star_points))) # width = 1/2 width of polygon edge/outer triangle bottom width = polygon_radius * math.tan(math.radians(180 / star_points)) # height = height of outer triangle height = width / math.tan(math.radians(90 / star_points)) shield = mlx.FilterScript(file_out="shield.ply") # Create the colored front of the shield using several concentric # annuluses; combine them together and subdivide so we have more vertices # to give a smoother deformation later. mlx.create.annulus(shield, radius=star_radius, cir_segments=segments, color='blue') mlx.create.annulus(shield, radius1=star_radius + ring_thickness, radius2=star_radius, cir_segments=segments, color='red') mlx.create.annulus(shield, radius1=star_radius + 2 * ring_thickness, radius2=star_radius + ring_thickness, cir_segments=segments, color='white') mlx.create.annulus(shield, radius1=star_radius + 3 * ring_thickness, radius2=star_radius + 2 * ring_thickness, cir_segments=segments, color='red') mlx.layers.join(shield) mlx.subdivide.midpoint(shield, iterations=2) # Create the inside surface of the shield & translate down slightly so it # doesn't overlap the front. mlx.create.annulus(shield, radius1=star_radius + 3 * ring_thickness, cir_segments=segments, color='silver') mlx.transform.rotate(shield, axis='y', angle=180) mlx.transform.translate(shield, value=[0, 0, -0.005]) mlx.subdivide.midpoint(shield, iterations=4) # Create a diamond for the center star. First create a plane, specifying # extra vertices to support the final deformation. The length from the # center of the plane to the corners should be 1 for ease of scaling, so # we use a side length of sqrt(2) (thanks Pythagoras!). Rotate the plane # by 45 degrees and scale it to stretch it out per the calculations above, # then translate it into place (including moving it up in z slightly so # that it doesn't overlap the shield front). mlx.create.grid(shield, size=math.sqrt(2), x_segments=10, y_segments=10, center=True, color='white') mlx.transform.rotate(shield, axis='z', angle=45) mlx.transform.scale(shield, value=[width, height, 1]) mlx.transform.translate(shield, value=[0, polygon_radius, 0.001]) # Duplicate the diamond and rotate the duplicates around, generating the # star. for _ in range(1, star_points): mlx.layers.duplicate(shield) mlx.transform.rotate(shield, axis='z', angle=360 / star_points) # Combine everything together and deform using a spherical function. mlx.layers.join(shield) mlx.transform.vert_function(shield, z_func='sqrt(%s-x^2-y^2)-%s+z' % (sphere_radius**2, sphere_radius)) # Run the script using meshlabserver and generate the model shield.run_script() return None
[ "def", "main", "(", ")", ":", "# segments = number of segments to use for circles", "segments", "=", "50", "# star_points = number of points (or sides) of the star", "star_points", "=", "5", "# star_radius = radius of circle circumscribing the star", "star_radius", "=", "2", "# rin...
Run main script
[ "Run", "main", "script" ]
python
test
zkbt/the-friendly-stars
thefriendlystars/constellations/lspm.py
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/lspm.py#L73-L105
def from_sky(cls, magnitudelimit=None): ''' Create a Constellation from a criteria search of the whole sky. Parameters ---------- magnitudelimit : float Maximum magnitude (for Ve = "estimated V"). ''' # define a query for cone search surrounding this center criteria = {} if magnitudelimit is not None: criteria[cls.defaultfilter + 'mag'] = '<{}'.format(magnitudelimit) v = Vizier(columns=cls.columns, column_filters=criteria) v.ROW_LIMIT = -1 # run the query print('querying Vizier for {}, for {}<{}'.format(cls.name, cls.defaultfilter, magnitudelimit)) table = v.query_constraints(catalog=cls.catalog, **criteria)[0] # store the search parameters in this object c = cls(cls.standardize_table(table)) c.standardized.meta['catalog'] = cls.catalog c.standardized.meta['criteria'] = criteria c.standardized.meta['magnitudelimit'] = magnitudelimit or c.magnitudelimit #c.magnitudelimit = magnitudelimit or c.magnitudelimit return c
[ "def", "from_sky", "(", "cls", ",", "magnitudelimit", "=", "None", ")", ":", "# define a query for cone search surrounding this center", "criteria", "=", "{", "}", "if", "magnitudelimit", "is", "not", "None", ":", "criteria", "[", "cls", ".", "defaultfilter", "+",...
Create a Constellation from a criteria search of the whole sky. Parameters ---------- magnitudelimit : float Maximum magnitude (for Ve = "estimated V").
[ "Create", "a", "Constellation", "from", "a", "criteria", "search", "of", "the", "whole", "sky", "." ]
python
train
apache/spark
python/pyspark/rdd.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L329-L342
def flatMap(self, f, preservesPartitioning=False): """ Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2, 3] >>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect()) [(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)] """ def func(s, iterator): return chain.from_iterable(map(fail_on_stopiteration(f), iterator)) return self.mapPartitionsWithIndex(func, preservesPartitioning)
[ "def", "flatMap", "(", "self", ",", "f", ",", "preservesPartitioning", "=", "False", ")", ":", "def", "func", "(", "s", ",", "iterator", ")", ":", "return", "chain", ".", "from_iterable", "(", "map", "(", "fail_on_stopiteration", "(", "f", ")", ",", "i...
Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2, 3] >>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect()) [(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
[ "Return", "a", "new", "RDD", "by", "first", "applying", "a", "function", "to", "all", "elements", "of", "this", "RDD", "and", "then", "flattening", "the", "results", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L799-L809
def update_dcnm_net_info(self, tenant_id, direc, vlan_id, segmentation_id): """Update the DCNM net info with allocated values of seg/vlan. """ net_dict = self.retrieve_dcnm_net_info(tenant_id, direc) if not net_dict: return None net_dict['vlan_id'] = vlan_id if vlan_id != 0: net_dict['mob_domain'] = True net_dict['segmentation_id'] = segmentation_id return net_dict
[ "def", "update_dcnm_net_info", "(", "self", ",", "tenant_id", ",", "direc", ",", "vlan_id", ",", "segmentation_id", ")", ":", "net_dict", "=", "self", ".", "retrieve_dcnm_net_info", "(", "tenant_id", ",", "direc", ")", "if", "not", "net_dict", ":", "return", ...
Update the DCNM net info with allocated values of seg/vlan.
[ "Update", "the", "DCNM", "net", "info", "with", "allocated", "values", "of", "seg", "/", "vlan", "." ]
python
train
saltstack/salt
salt/modules/pcs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pcs.py#L306-L323
def prop_show(prop, extra_args=None, cibfile=None): ''' Show the value of a cluster property prop name of the property extra_args additional options for the pcs property command cibfile use cibfile instead of the live CIB CLI Example: .. code-block:: bash salt '*' pcs.prop_show cibfile='/tmp/2_node_cluster.cib' prop='no-quorum-policy' cibfile='/tmp/2_node_cluster.cib' ''' return item_show(item='property', item_id=prop, extra_args=extra_args, cibfile=cibfile)
[ "def", "prop_show", "(", "prop", ",", "extra_args", "=", "None", ",", "cibfile", "=", "None", ")", ":", "return", "item_show", "(", "item", "=", "'property'", ",", "item_id", "=", "prop", ",", "extra_args", "=", "extra_args", ",", "cibfile", "=", "cibfil...
Show the value of a cluster property prop name of the property extra_args additional options for the pcs property command cibfile use cibfile instead of the live CIB CLI Example: .. code-block:: bash salt '*' pcs.prop_show cibfile='/tmp/2_node_cluster.cib' prop='no-quorum-policy' cibfile='/tmp/2_node_cluster.cib'
[ "Show", "the", "value", "of", "a", "cluster", "property" ]
python
train
mottosso/be
be/cli.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/cli.py#L77-L274
def in_(ctx, topics, yes, as_, enter): """Set the current topics to `topics` Environment: BE_PROJECT: First topic BE_CWD: Current `be` working directory BE_TOPICS: Arguments to `in` BE_DEVELOPMENTDIR: Absolute path to current development directory BE_PROJECTROOT: Absolute path to current project BE_PROJECTSROOT: Absolute path to where projects are located BE_ACTIVE: 0 or 1, indicates an active be environment BE_USER: Current user, overridden with `--as` BE_SCRIPT: User-supplied shell script BE_PYTHON: User-supplied python script BE_ENTER: 0 or 1 depending on whether the topic was entered BE_GITHUB_API_TOKEN: Optional GitHub API token BE_ENVIRONMENT: Space-separated list of user-added environment variables BE_TEMPDIR: Directory in which temporary files are stored BE_PRESETSDIR: Directory in which presets are searched BE_ALIASDIR: Directory in which aliases are written BE_BINDING: Binding between template and item in inventory \b Usage: $ be in project topics """ topics = map(str, topics) # They enter as unicode if self.isactive(): lib.echo("ERROR: Exit current project first") sys.exit(lib.USER_ERROR) # Determine topic syntax if len(topics[0].split("/")) == 3: topic_syntax = lib.FIXED project = topics[0].split("/")[0] else: topic_syntax = lib.POSITIONAL project = topics[0] project_dir = lib.project_dir(_extern.cwd(), project) if not os.path.exists(project_dir): lib.echo("Project \"%s\" not found. " % project) lib.echo("\nAvailable:") ctx.invoke(ls) sys.exit(lib.USER_ERROR) # Boot up context = lib.context(root=_extern.cwd(), project=project) be = _extern.load_be(project) templates = _extern.load_templates(project) inventory = _extern.load_inventory(project) context.update({ "BE_PROJECT": project, "BE_USER": str(as_), "BE_ENTER": "1" if enter else "", "BE_TOPICS": " ".join(topics) }) # Remap topic syntax, for backwards compatibility # In cases where the topic is entered in a way that # differs from the template, remap topic to template. if any(re.findall("{\d+}", pattern) for pattern in templates.values()): template_syntax = lib.POSITIONAL else: template_syntax = lib.FIXED if topic_syntax & lib.POSITIONAL and not template_syntax & lib.POSITIONAL: topics = ["/".join(topics)] if topic_syntax & lib.FIXED and not template_syntax & lib.FIXED: topics[:] = topics[0].split("/") try: key = be.get("templates", {}).get("key") or "{1}" item = lib.item_from_topics(key, topics) binding = lib.binding_from_item(inventory, item) context["BE_BINDING"] = binding except IndexError as exc: lib.echo("At least %s topics are required" % str(exc)) sys.exit(lib.USER_ERROR) except KeyError as exc: lib.echo("\"%s\" not found" % item) if exc.bindings: lib.echo("\nAvailable:") for item_ in sorted(exc.bindings, key=lambda a: (exc.bindings[a], a)): lib.echo("- %s (%s)" % (item_, exc.bindings[item_])) sys.exit(lib.USER_ERROR) # Finally, determine a development directory # based on the template-, not topic-syntax. if template_syntax & lib.POSITIONAL: try: development_dir = lib.pos_development_directory( templates=templates, inventory=inventory, context=context, topics=topics, user=as_, item=item) except KeyError as exc: lib.echo("\"%s\" not found" % item) if exc.bindings: lib.echo("\nAvailable:") for item_ in sorted(exc.bindings, key=lambda a: (exc.bindings[a], a)): lib.echo("- %s (%s)" % (item_, exc.bindings[item_])) sys.exit(lib.USER_ERROR) else: # FIXED topic_syntax development_dir = lib.fixed_development_directory( templates, inventory, topics, as_) context["BE_DEVELOPMENTDIR"] = development_dir tempdir = (tempfile.mkdtemp() if not os.environ.get("BE_TEMPDIR") else os.environ["BE_TEMPDIR"]) context["BE_TEMPDIR"] = tempdir # Should it be entered? if enter and not os.path.exists(development_dir): create = False if yes: create = True else: sys.stdout.write("No development directory found. Create? [Y/n]: ") sys.stdout.flush() if raw_input().lower() in ("", "y", "yes"): create = True if create: ctx.invoke(mkdir, dir=development_dir) else: sys.stdout.write("Cancelled") sys.exit(lib.NORMAL) # Parse be.yaml if "script" in be: context["BE_SCRIPT"] = _extern.write_script( be["script"], tempdir).replace("\\", "/") if "python" in be: script = "\n".join(be["python"]) context["BE_PYTHON"] = script try: exec script in {"__name__": __name__} except Exception as e: lib.echo("ERROR: %s" % e) invalids = [v for v in context.values() if not isinstance(v, str)] assert all(isinstance(v, str) for v in context.values()), invalids # Create aliases aliases_dir = _extern.write_aliases( be.get("alias", {}), tempdir) context["PATH"] = (aliases_dir + os.pathsep + context.get("PATH", "")) context["BE_ALIASDIR"] = aliases_dir # Parse redirects lib.parse_redirect( be.get("redirect", {}), topics, context) # Override inherited context # with that coming from be.yaml. if "environment" in be: parsed = lib.parse_environment( fields=be["environment"], context=context, topics=topics) context["BE_ENVIRONMENT"] = " ".join(parsed.keys()) context.update(parsed) if "BE_TESTING" in context: os.chdir(development_dir) os.environ.update(context) else: parent = lib.parent() cmd = lib.cmd(parent) # Store reference to calling shell context["BE_SHELL"] = parent try: sys.exit(subprocess.call(cmd, env=context)) finally: import shutil shutil.rmtree(tempdir)
[ "def", "in_", "(", "ctx", ",", "topics", ",", "yes", ",", "as_", ",", "enter", ")", ":", "topics", "=", "map", "(", "str", ",", "topics", ")", "# They enter as unicode", "if", "self", ".", "isactive", "(", ")", ":", "lib", ".", "echo", "(", "\"ERRO...
Set the current topics to `topics` Environment: BE_PROJECT: First topic BE_CWD: Current `be` working directory BE_TOPICS: Arguments to `in` BE_DEVELOPMENTDIR: Absolute path to current development directory BE_PROJECTROOT: Absolute path to current project BE_PROJECTSROOT: Absolute path to where projects are located BE_ACTIVE: 0 or 1, indicates an active be environment BE_USER: Current user, overridden with `--as` BE_SCRIPT: User-supplied shell script BE_PYTHON: User-supplied python script BE_ENTER: 0 or 1 depending on whether the topic was entered BE_GITHUB_API_TOKEN: Optional GitHub API token BE_ENVIRONMENT: Space-separated list of user-added environment variables BE_TEMPDIR: Directory in which temporary files are stored BE_PRESETSDIR: Directory in which presets are searched BE_ALIASDIR: Directory in which aliases are written BE_BINDING: Binding between template and item in inventory \b Usage: $ be in project topics
[ "Set", "the", "current", "topics", "to", "topics" ]
python
train
marcocamma/datastorage
datastorage/datastorage.py
https://github.com/marcocamma/datastorage/blob/d88cdc08414c1c99d34d62e65fcbf807c3088a37/datastorage/datastorage.py#L158-L163
def h5ToDict(h5, readH5pyDataset=True): """ Read a hdf5 file into a dictionary """ h = h5py.File(h5, "r") ret = unwrapArray(h, recursive=True, readH5pyDataset=readH5pyDataset) if readH5pyDataset: h.close() return ret
[ "def", "h5ToDict", "(", "h5", ",", "readH5pyDataset", "=", "True", ")", ":", "h", "=", "h5py", ".", "File", "(", "h5", ",", "\"r\"", ")", "ret", "=", "unwrapArray", "(", "h", ",", "recursive", "=", "True", ",", "readH5pyDataset", "=", "readH5pyDataset"...
Read a hdf5 file into a dictionary
[ "Read", "a", "hdf5", "file", "into", "a", "dictionary" ]
python
train
ManiacalLabs/BiblioPixel
bibliopixel/project/recurse.py
https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/project/recurse.py#L20-L74
def recurse(desc, pre='pre_recursion', post=None, python_path=None): """ Depth first recursion through a dictionary containing type constructors The arguments pre, post and children are independently either: * None, which means to do nothing * a string, which means to use the static class method of that name on the class being constructed, or * a callable, to be called at each recursion Arguments: dictionary -- a project dictionary or one of its subdictionaries pre -- called before children are visited node in the recursion post -- called after children are visited in the recursion python_path -- relative path to start resolving typenames """ def call(f, desc): if isinstance(f, str): # f is the name of a static class method on the datatype. f = getattr(datatype, f, None) return f and f(desc) # Automatically load strings that look like JSON or Yaml filenames. desc = load.load_if_filename(desc) or desc desc = construct.to_type_constructor(desc, python_path) datatype = desc.get('datatype') desc = call(pre, desc) or desc for child_name in getattr(datatype, 'CHILDREN', []): child = desc.get(child_name) if child: is_plural = child_name.endswith('s') remove_s = is_plural and child_name != 'drivers' # This is because it's the "drivers" directory, whereas # the others are animation, control, layout, project # without the s. TODO: rename drivers/ to driver/ in v4 cname = child_name[:-1] if remove_s else child_name new_path = python_path or ('bibliopixel.' + cname) if is_plural: if isinstance(child, (dict, str)): child = [child] for i, c in enumerate(child): child[i] = recurse(c, pre, post, new_path) desc[child_name] = child else: desc[child_name] = recurse(child, pre, post, new_path) d = call(post, desc) return desc if d is None else d
[ "def", "recurse", "(", "desc", ",", "pre", "=", "'pre_recursion'", ",", "post", "=", "None", ",", "python_path", "=", "None", ")", ":", "def", "call", "(", "f", ",", "desc", ")", ":", "if", "isinstance", "(", "f", ",", "str", ")", ":", "# f is the ...
Depth first recursion through a dictionary containing type constructors The arguments pre, post and children are independently either: * None, which means to do nothing * a string, which means to use the static class method of that name on the class being constructed, or * a callable, to be called at each recursion Arguments: dictionary -- a project dictionary or one of its subdictionaries pre -- called before children are visited node in the recursion post -- called after children are visited in the recursion python_path -- relative path to start resolving typenames
[ "Depth", "first", "recursion", "through", "a", "dictionary", "containing", "type", "constructors" ]
python
valid
klavinslab/coral
coral/database/_rebase.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/database/_rebase.py#L15-L51
def update(self): '''Update definitions.''' # Download http://rebase.neb.com/rebase/link_withref to tmp self._tmpdir = tempfile.mkdtemp() try: self._rebase_file = self._tmpdir + '/rebase_file' print 'Downloading latest enzyme definitions' url = 'http://rebase.neb.com/rebase/link_withref' header = {'User-Agent': 'Mozilla/5.0'} req = urllib2.Request(url, headers=header) con = urllib2.urlopen(req) with open(self._rebase_file, 'wb') as rebase_file: rebase_file.write(con.read()) # Process into self._enzyme_dict self._process_file() except urllib2.HTTPError, e: print 'HTTP Error: {} {}'.format(e.code, url) print 'Falling back on default enzyme list' self._enzyme_dict = coral.constants.fallback_enzymes except urllib2.URLError, e: print 'URL Error: {} {}'.format(e.reason, url) print 'Falling back on default enzyme list' self._enzyme_dict = coral.constants.fallback_enzymes # Process into RestrictionSite objects? (depends on speed) print 'Processing into RestrictionSite instances.' self.restriction_sites = {} # TODO: make sure all names are unique for key, (site, cuts) in self._enzyme_dict.iteritems(): # Make a site try: r = coral.RestrictionSite(coral.DNA(site), cuts, name=key) # Add it to dict with name as key self.restriction_sites[key] = r except ValueError: # Encountered ambiguous sequence, have to ignore it until # coral.DNA can handle ambiguous DNA pass
[ "def", "update", "(", "self", ")", ":", "# Download http://rebase.neb.com/rebase/link_withref to tmp", "self", ".", "_tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "self", ".", "_rebase_file", "=", "self", ".", "_tmpdir", "+", "'/rebase_file'", ...
Update definitions.
[ "Update", "definitions", "." ]
python
train
algolia/algoliasearch-django
algoliasearch_django/models.py
https://github.com/algolia/algoliasearch-django/blob/ca219db41eb56bdd1c0389cdc1508a41698958d7/algoliasearch_django/models.py#L330-L361
def update_records(self, qs, batch_size=1000, **kwargs): """ Updates multiple records. This method is optimized for speed. It takes a QuerySet and the same arguments as QuerySet.update(). Optionnaly, you can specify the size of the batch send to Algolia with batch_size (default to 1000). >>> from algoliasearch_django import update_records >>> qs = MyModel.objects.filter(myField=False) >>> update_records(MyModel, qs, myField=True) >>> qs.update(myField=True) """ tmp = {} for key, value in kwargs.items(): name = self.__translate_fields.get(key, None) if name: tmp[name] = value batch = [] objectsIDs = qs.only(self.custom_objectID).values_list( self.custom_objectID, flat=True) for elt in objectsIDs: tmp['objectID'] = elt batch.append(dict(tmp)) if len(batch) >= batch_size: self.__index.partial_update_objects(batch) batch = [] if len(batch) > 0: self.__index.partial_update_objects(batch)
[ "def", "update_records", "(", "self", ",", "qs", ",", "batch_size", "=", "1000", ",", "*", "*", "kwargs", ")", ":", "tmp", "=", "{", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "name", "=", "self", ".", "__transl...
Updates multiple records. This method is optimized for speed. It takes a QuerySet and the same arguments as QuerySet.update(). Optionnaly, you can specify the size of the batch send to Algolia with batch_size (default to 1000). >>> from algoliasearch_django import update_records >>> qs = MyModel.objects.filter(myField=False) >>> update_records(MyModel, qs, myField=True) >>> qs.update(myField=True)
[ "Updates", "multiple", "records", "." ]
python
valid
CxAalto/gtfspy
gtfspy/exports.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/exports.py#L30-L44
def write_nodes(gtfs, output, fields=None): """ Parameters ---------- gtfs: gtfspy.GTFS output: str Path to the output file fields: list, optional which pieces of information to provide """ nodes = gtfs.get_table("stops") if fields is not None: nodes = nodes[fields] with util.create_file(output, tmpdir=True, keepext=True) as tmpfile: nodes.to_csv(tmpfile, encoding='utf-8', index=False, sep=";")
[ "def", "write_nodes", "(", "gtfs", ",", "output", ",", "fields", "=", "None", ")", ":", "nodes", "=", "gtfs", ".", "get_table", "(", "\"stops\"", ")", "if", "fields", "is", "not", "None", ":", "nodes", "=", "nodes", "[", "fields", "]", "with", "util"...
Parameters ---------- gtfs: gtfspy.GTFS output: str Path to the output file fields: list, optional which pieces of information to provide
[ "Parameters", "----------", "gtfs", ":", "gtfspy", ".", "GTFS", "output", ":", "str", "Path", "to", "the", "output", "file", "fields", ":", "list", "optional", "which", "pieces", "of", "information", "to", "provide" ]
python
valid
hydpy-dev/hydpy
hydpy/auxs/anntools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/anntools.py#L788-L803
def verify(self) -> None: """Raise a |RuntimeError| if the network's shape is not defined completely. >>> from hydpy import ANN >>> ANN(None).verify() Traceback (most recent call last): ... RuntimeError: The shape of the the artificial neural network \ parameter `ann` of element `?` has not been defined so far. """ if not self.__protectedproperties.allready(self): raise RuntimeError( 'The shape of the the artificial neural network ' 'parameter %s has not been defined so far.' % objecttools.elementphrase(self))
[ "def", "verify", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "__protectedproperties", ".", "allready", "(", "self", ")", ":", "raise", "RuntimeError", "(", "'The shape of the the artificial neural network '", "'parameter %s has not been defined so far...
Raise a |RuntimeError| if the network's shape is not defined completely. >>> from hydpy import ANN >>> ANN(None).verify() Traceback (most recent call last): ... RuntimeError: The shape of the the artificial neural network \ parameter `ann` of element `?` has not been defined so far.
[ "Raise", "a", "|RuntimeError|", "if", "the", "network", "s", "shape", "is", "not", "defined", "completely", "." ]
python
train
hsolbrig/sparql_slurper
sparql_slurper/_slurpygraph.py
https://github.com/hsolbrig/sparql_slurper/blob/9e338549337a6268d6f9c52e7fbf5b493b80cf59/sparql_slurper/_slurpygraph.py#L81-L122
def triples(self, pattern: QueryTriple): """ Return the triples that match pattern :param pattern: `(s, p, o)` tuple, with `None` as wild cards :return: Generator for resulting triples """ self.total_calls += 1 if self.graph_name is not None: gn = "?g" if not self.graph_name else self.graph_name gquery = f"graph {gn} {{" gqueryend = '}' else: gquery = gqueryend = '' if not self.already_resolved(pattern): subj = self._repr_element(pattern[0]) if pattern[0] is not None else '?s' pred = self._repr_element(pattern[1]) if pattern[1] is not None else '?p' obj = self._repr_element(pattern[2]) if pattern[2] is not None else '?o' query = f"SELECT ?s ?p ?o {{{gquery}{subj} {pred} {obj}{gqueryend}}}" start = time.time() if self.debug_slurps: print(f"SPARQL: ({query})", end="") self.sparql.setQuery(query) resp = self.sparql.query().convert() elapsed = time.time() - start ntriples = len(resp['results']['bindings']) self.total_slurptime += elapsed self.total_triples += ntriples self.total_queries += 1 if self.debug_slurps: print(f" ({round(elapsed, 2)} secs) - {ntriples} triples") query_result = self._query_result_hook(self) if self._query_result_hook is not None else None for row in resp['results']['bindings']: triple = RDFTriple(pattern[0] if pattern[0] is not None else self._map_type(row['s']), pattern[1] if pattern[1] is not None else self._map_type(row['p']), pattern[2] if pattern[2] is not None else self._map_type(row['o'])) self.add(triple) if query_result: query_result.add(triple) if query_result: query_result.done() self.resolved_nodes.append(pattern) return super().triples(pattern)
[ "def", "triples", "(", "self", ",", "pattern", ":", "QueryTriple", ")", ":", "self", ".", "total_calls", "+=", "1", "if", "self", ".", "graph_name", "is", "not", "None", ":", "gn", "=", "\"?g\"", "if", "not", "self", ".", "graph_name", "else", "self", ...
Return the triples that match pattern :param pattern: `(s, p, o)` tuple, with `None` as wild cards :return: Generator for resulting triples
[ "Return", "the", "triples", "that", "match", "pattern" ]
python
train
widdowquinn/pyADHoRe
pyadhore/iadhore.py
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L244-L259
def is_redundant_multiplicon(self, value): """ Returns True if the passed multiplicon ID is redundant, False otherwise. - value, (int) multiplicon ID """ if not hasattr(self, '_redundant_multiplicon_cache'): sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"''' cur = self._dbconn.cursor() cur.execute(sql, {'id': str(value)}) result = [int(r[0]) for r in cur.fetchall()] self._redundant_multiplicon_cache = set(result) if value in self._redundant_multiplicon_cache: return True else: return False
[ "def", "is_redundant_multiplicon", "(", "self", ",", "value", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_redundant_multiplicon_cache'", ")", ":", "sql", "=", "'''SELECT id FROM multiplicons WHERE is_redundant=\"-1\"'''", "cur", "=", "self", ".", "_dbconn",...
Returns True if the passed multiplicon ID is redundant, False otherwise. - value, (int) multiplicon ID
[ "Returns", "True", "if", "the", "passed", "multiplicon", "ID", "is", "redundant", "False", "otherwise", "." ]
python
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/gui/qt_b26_widgets.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/qt_b26_widgets.py#L183-L230
def setData(self, column, role, value): """ if value is valid sets the data to value Args: column: column of item role: role of item (see Qt doc) value: value to be set """ assert isinstance(column, int) assert isinstance(role, int) # make sure that the right row is selected, this is not always the case for checkboxes and # combo boxes because they are items on top of the tree structure if isinstance(value, (QtWidgets.QComboBox, QtWidgets.QCheckBox)): self.treeWidget().setCurrentItem(self) # if row 2 (editrole, value has been entered) if role == 2 and column == 1: if isinstance(value, str): value = self.cast_type(value) # cast into same type as valid values if isinstance(value, QtCore.QVariant): value = self.cast_type(value.toString()) # cast into same type as valid values if isinstance(value, QtWidgets.QComboBox): value = self.cast_type(value.currentText()) if isinstance(value, QtWidgets.QCheckBox): value = bool(int(value.checkState())) # checkState() gives 2 (True) and 0 (False) # save value in internal variable self.value = value elif column == 0: # labels should not be changed so we set it back value = self.name if value is None: value = self.value # 180327(asafira) --- why do we need to do the following lines? Why not just always call super or always # emitDataChanged()? if not isinstance(value, bool): super(B26QTreeItem, self).setData(column, role, value) else: self.emitDataChanged()
[ "def", "setData", "(", "self", ",", "column", ",", "role", ",", "value", ")", ":", "assert", "isinstance", "(", "column", ",", "int", ")", "assert", "isinstance", "(", "role", ",", "int", ")", "# make sure that the right row is selected, this is not always the cas...
if value is valid sets the data to value Args: column: column of item role: role of item (see Qt doc) value: value to be set
[ "if", "value", "is", "valid", "sets", "the", "data", "to", "value", "Args", ":", "column", ":", "column", "of", "item", "role", ":", "role", "of", "item", "(", "see", "Qt", "doc", ")", "value", ":", "value", "to", "be", "set" ]
python
train
phenomecentre/isaExplorer
isaExplorer/isaExplorer.py
https://github.com/phenomecentre/isaExplorer/blob/2fc817d53b6acba46918a0db8dd2f72e5e1d785e/isaExplorer/isaExplorer.py#L158-L183
def dropStudyFromISA(studyNum, pathToISATABFile): """ This function removes a study from an ISA file Typically, you should use the exploreISA function to check the contents of the ISA file and retrieve the study number you are interested in! Warning: this function deletes the given study and all its associated assays :param studyNum: The Study number (notice it's 1-based index). :type studyNum: int :param pathToISATABFile: The path to the ISATAB file :type pathToISATABFile: string :raise FileNotFoundError: If pathToISATABFile does not contain file 'i_Investigation.txt'. """ from isatools import isatab import os try: isa = isatab.load(pathToISATABFile, skip_load_tables=True) studies = isa.studies for assay in studies[studyNum - 1].assays: if os.path.isfile(os.path.join(pathToISATABFile,assay.filename)): os.remove(os.path.join(pathToISATABFile,assay.filename)) if os.path.isfile(os.path.join(pathToISATABFile,studies[studyNum - 1].filename)): os.remove(os.path.join(pathToISATABFile,studies[studyNum - 1].filename)) del studies[studyNum - 1] isatab.dump(isa_obj=isa, output_path=pathToISATABFile) except FileNotFoundError as err: raise err
[ "def", "dropStudyFromISA", "(", "studyNum", ",", "pathToISATABFile", ")", ":", "from", "isatools", "import", "isatab", "import", "os", "try", ":", "isa", "=", "isatab", ".", "load", "(", "pathToISATABFile", ",", "skip_load_tables", "=", "True", ")", "studies",...
This function removes a study from an ISA file Typically, you should use the exploreISA function to check the contents of the ISA file and retrieve the study number you are interested in! Warning: this function deletes the given study and all its associated assays :param studyNum: The Study number (notice it's 1-based index). :type studyNum: int :param pathToISATABFile: The path to the ISATAB file :type pathToISATABFile: string :raise FileNotFoundError: If pathToISATABFile does not contain file 'i_Investigation.txt'.
[ "This", "function", "removes", "a", "study", "from", "an", "ISA", "file", "Typically", "you", "should", "use", "the", "exploreISA", "function", "to", "check", "the", "contents", "of", "the", "ISA", "file", "and", "retrieve", "the", "study", "number", "you", ...
python
train
acutesoftware/AIKIF
aikif/comms.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/comms.py#L43-L54
def delete_channel(self, channel, pwd_hash): """ adds a channel, but must have authenication """ if channel.pwd_hash == pwd_hash: self.channels.remove(channel) lg.record_process('comms.py', 'Removed channel ' + channel.name) return True else: lg.record_process('comms.py', 'ERROR - Cant delete : wrong hash for ' + channel.name) return False
[ "def", "delete_channel", "(", "self", ",", "channel", ",", "pwd_hash", ")", ":", "if", "channel", ".", "pwd_hash", "==", "pwd_hash", ":", "self", ".", "channels", ".", "remove", "(", "channel", ")", "lg", ".", "record_process", "(", "'comms.py'", ",", "'...
adds a channel, but must have authenication
[ "adds", "a", "channel", "but", "must", "have", "authenication" ]
python
train
Pajinek/vhm
server/apps/xmlrpc/utils/vhlib_server.py
https://github.com/Pajinek/vhm/blob/e323e99855fd5c40fd61fba87c2646a1165505ed/server/apps/xmlrpc/utils/vhlib_server.py#L45-L53
def check_size_all(self): """ Get size of homedir and update data on the server """ result = self.rpc_srv.get_all_account(self.token) print "debug: %s" % result for it in result: size = getFolderSize(it["path"]) result = self.rpc_srv.set_account_size(self.token, it["id"], size)
[ "def", "check_size_all", "(", "self", ")", ":", "result", "=", "self", ".", "rpc_srv", ".", "get_all_account", "(", "self", ".", "token", ")", "print", "\"debug: %s\"", "%", "result", "for", "it", "in", "result", ":", "size", "=", "getFolderSize", "(", "...
Get size of homedir and update data on the server
[ "Get", "size", "of", "homedir", "and", "update", "data", "on", "the", "server" ]
python
train
ScottDuckworth/python-anyvcs
anyvcs/hg.py
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/hg.py#L79-L83
def create(cls, path): """Create a new repository""" cmd = [HG, 'init', path] subprocess.check_call(cmd) return cls(path)
[ "def", "create", "(", "cls", ",", "path", ")", ":", "cmd", "=", "[", "HG", ",", "'init'", ",", "path", "]", "subprocess", ".", "check_call", "(", "cmd", ")", "return", "cls", "(", "path", ")" ]
Create a new repository
[ "Create", "a", "new", "repository" ]
python
train
skorch-dev/skorch
skorch/net.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/net.py#L387-L431
def initialize_callbacks(self): """Initializes all callbacks and save the result in the ``callbacks_`` attribute. Both ``default_callbacks`` and ``callbacks`` are used (in that order). Callbacks may either be initialized or not, and if they don't have a name, the name is inferred from the class name. The ``initialize`` method is called on all callbacks. The final result will be a list of tuples, where each tuple consists of a name and an initialized callback. If names are not unique, a ValueError is raised. """ callbacks_ = [] class Dummy: # We cannot use None as dummy value since None is a # legitimate value to be set. pass for name, cb in self._uniquely_named_callbacks(): # check if callback itself is changed param_callback = getattr(self, 'callbacks__' + name, Dummy) if param_callback is not Dummy: # callback itself was set cb = param_callback # below: check for callback params # don't set a parameter for non-existing callback params = self._get_params_for('callbacks__{}'.format(name)) if (cb is None) and params: raise ValueError("Trying to set a parameter for callback {} " "which does not exist.".format(name)) if cb is None: continue if isinstance(cb, type): # uninitialized: cb = cb(**params) else: cb.set_params(**params) cb.initialize() callbacks_.append((name, cb)) self.callbacks_ = callbacks_ return self
[ "def", "initialize_callbacks", "(", "self", ")", ":", "callbacks_", "=", "[", "]", "class", "Dummy", ":", "# We cannot use None as dummy value since None is a", "# legitimate value to be set.", "pass", "for", "name", ",", "cb", "in", "self", ".", "_uniquely_named_callba...
Initializes all callbacks and save the result in the ``callbacks_`` attribute. Both ``default_callbacks`` and ``callbacks`` are used (in that order). Callbacks may either be initialized or not, and if they don't have a name, the name is inferred from the class name. The ``initialize`` method is called on all callbacks. The final result will be a list of tuples, where each tuple consists of a name and an initialized callback. If names are not unique, a ValueError is raised.
[ "Initializes", "all", "callbacks", "and", "save", "the", "result", "in", "the", "callbacks_", "attribute", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L396-L428
def _training_stats(self): """ Return a dictionary of statistics collected during creation of the model. These statistics are also available with the ``get`` method and are described in more detail in that method's documentation. Returns ------- out : dict Dictionary of statistics compiled during creation of the TopicModel. See Also -------- summary Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> m._training_stats() {'training_iterations': 20, 'training_time': 20.5034} """ fields = self._list_fields() stat_fields = ['training_time', 'training_iterations'] if 'validation_perplexity' in fields: stat_fields.append('validation_perplexity') ret = {k : self._get(k) for k in stat_fields} return ret
[ "def", "_training_stats", "(", "self", ")", ":", "fields", "=", "self", ".", "_list_fields", "(", ")", "stat_fields", "=", "[", "'training_time'", ",", "'training_iterations'", "]", "if", "'validation_perplexity'", "in", "fields", ":", "stat_fields", ".", "appen...
Return a dictionary of statistics collected during creation of the model. These statistics are also available with the ``get`` method and are described in more detail in that method's documentation. Returns ------- out : dict Dictionary of statistics compiled during creation of the TopicModel. See Also -------- summary Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> m._training_stats() {'training_iterations': 20, 'training_time': 20.5034}
[ "Return", "a", "dictionary", "of", "statistics", "collected", "during", "creation", "of", "the", "model", ".", "These", "statistics", "are", "also", "available", "with", "the", "get", "method", "and", "are", "described", "in", "more", "detail", "in", "that", ...
python
train
google/grr
grr/client/grr_response_client/comms.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/comms.py#L1358-L1363
def SavePrivateKey(self, private_key): """Store the new private key on disk.""" self.private_key = private_key config.CONFIG.Set("Client.private_key", self.private_key.SerializeToString()) config.CONFIG.Write()
[ "def", "SavePrivateKey", "(", "self", ",", "private_key", ")", ":", "self", ".", "private_key", "=", "private_key", "config", ".", "CONFIG", ".", "Set", "(", "\"Client.private_key\"", ",", "self", ".", "private_key", ".", "SerializeToString", "(", ")", ")", ...
Store the new private key on disk.
[ "Store", "the", "new", "private", "key", "on", "disk", "." ]
python
train
mjirik/io3d
io3d/dcmreaddata.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dcmreaddata.py#L645-L656
def print_series_info(self, series_info, minimal_series_number=1): """ Print series_info from dcmdirstats """ strinfo = '' if len(series_info) > minimal_series_number: for serie_number in series_info.keys(): strl = get_one_serie_info(series_info, serie_number) strinfo = strinfo + strl + '\n' # rint strl return strinfo
[ "def", "print_series_info", "(", "self", ",", "series_info", ",", "minimal_series_number", "=", "1", ")", ":", "strinfo", "=", "''", "if", "len", "(", "series_info", ")", ">", "minimal_series_number", ":", "for", "serie_number", "in", "series_info", ".", "keys...
Print series_info from dcmdirstats
[ "Print", "series_info", "from", "dcmdirstats" ]
python
train
PRIArobotics/HedgehogUtils
hedgehog/utils/__init__.py
https://github.com/PRIArobotics/HedgehogUtils/blob/cc368df270288c870cc66d707696ccb62823ca9c/hedgehog/utils/__init__.py#L21-L103
def coroutine(func): """ A decorator to wrap a generator function into a callable interface. >>> @coroutine ... def sum(count): ... sum = 0 ... for _ in range(0, count): ... # note that generator arguments are passed as a tuple, hence `num, = ...` instead of `num = ...` ... num, = yield sum ... sum += num ... yield sum ... >>> add = sum(2) >>> add(2) 2 >>> add(3) 5 >>> add(4) Traceback (most recent call last): ... StopIteration As you can see, this lets you keep state between calls easily, as expected from a generator, while calling the function looks like a function. The same without `@coroutine` would look like this: >>> def sum(count): ... sum = 0 ... for _ in range(0, count): ... num = yield sum ... sum += num ... yield sum ... >>> add = sum(2) >>> next(add) # initial next call is necessary 0 >>> add.send(2) # to call the function, next or send must be used 2 >>> add.send(3) 5 >>> add.send(4) Traceback (most recent call last): ... StopIteration Here is an example that shows how to translate traditional functions to use this decorator: >>> def foo(a, b): ... # do some foo ... return a + b ... >>> def bar(c): ... # do some bar ... return 2*c ... >>> foo(1, 2) 3 >>> bar(3) 6 >>> @coroutine ... def func_maker(): ... a, b = yield ... # do some foo ... c, = yield foo(a, b) ... # do some bar ... yield bar(c) ... >>> func_once = func_maker() >>> func_once(1, 2) 3 >>> func_once(3) 6 The two differences are that a) using traditional functions, func1 and func2 don't share any context and b) using the decorator, both calls use the same function name, and calling the function is limited to wice (in this case). """ def decorator(*args, **kwargs): generator = func(*args, **kwargs) next(generator) return lambda *args: generator.send(args) return decorator
[ "def", "coroutine", "(", "func", ")", ":", "def", "decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "generator", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "next", "(", "generator", ")", "return", "lambda", "*", ...
A decorator to wrap a generator function into a callable interface. >>> @coroutine ... def sum(count): ... sum = 0 ... for _ in range(0, count): ... # note that generator arguments are passed as a tuple, hence `num, = ...` instead of `num = ...` ... num, = yield sum ... sum += num ... yield sum ... >>> add = sum(2) >>> add(2) 2 >>> add(3) 5 >>> add(4) Traceback (most recent call last): ... StopIteration As you can see, this lets you keep state between calls easily, as expected from a generator, while calling the function looks like a function. The same without `@coroutine` would look like this: >>> def sum(count): ... sum = 0 ... for _ in range(0, count): ... num = yield sum ... sum += num ... yield sum ... >>> add = sum(2) >>> next(add) # initial next call is necessary 0 >>> add.send(2) # to call the function, next or send must be used 2 >>> add.send(3) 5 >>> add.send(4) Traceback (most recent call last): ... StopIteration Here is an example that shows how to translate traditional functions to use this decorator: >>> def foo(a, b): ... # do some foo ... return a + b ... >>> def bar(c): ... # do some bar ... return 2*c ... >>> foo(1, 2) 3 >>> bar(3) 6 >>> @coroutine ... def func_maker(): ... a, b = yield ... # do some foo ... c, = yield foo(a, b) ... # do some bar ... yield bar(c) ... >>> func_once = func_maker() >>> func_once(1, 2) 3 >>> func_once(3) 6 The two differences are that a) using traditional functions, func1 and func2 don't share any context and b) using the decorator, both calls use the same function name, and calling the function is limited to wice (in this case).
[ "A", "decorator", "to", "wrap", "a", "generator", "function", "into", "a", "callable", "interface", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/ngsalign/minimap2.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/minimap2.py#L10-L42
def align(fastq_file, pair_file, index_dir, names, align_dir, data): """Perform piped alignment of fastq input files, generating sorted, deduplicated BAM. """ umi_ext = "-cumi" if "umi_bam" in data else "" out_file = os.path.join(align_dir, "{0}-sort{1}.bam".format(dd.get_sample_name(data), umi_ext)) num_cores = data["config"]["algorithm"].get("num_cores", 1) rg_info = novoalign.get_rg_info(names) preset = "sr" pair_file = pair_file if pair_file else "" if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): index_file = None # Skip trying to use indices now as they provide only slight speed-ups # and give inconsitent outputs in BAM headers # If a single index present, index_dir points to that # if index_dir and os.path.isfile(index_dir): # index_dir = os.path.dirname(index_dir) # index_file = os.path.join(index_dir, "%s-%s.mmi" % (dd.get_genome_build(data), preset)) if not index_file or not os.path.exists(index_file): index_file = dd.get_ref_file(data) cmd = ("minimap2 -a -x {preset} -R '{rg_info}' -t {num_cores} {index_file} " "{fastq_file} {pair_file} | ") do.run(cmd.format(**locals()) + tobam_cl, "minimap2 alignment: %s" % dd.get_sample_name(data)) data["work_bam"] = out_file return data
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "index_dir", ",", "names", ",", "align_dir", ",", "data", ")", ":", "umi_ext", "=", "\"-cumi\"", "if", "\"umi_bam\"", "in", "data", "else", "\"\"", "out_file", "=", "os", ".", "path", ".", "join",...
Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
[ "Perform", "piped", "alignment", "of", "fastq", "input", "files", "generating", "sorted", "deduplicated", "BAM", "." ]
python
train
KeplerGO/K2fov
K2fov/K2findCampaigns.py
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/K2findCampaigns.py#L49-L86
def findCampaignsByName(target): """Returns a list of the campaigns that cover a given target. Parameters ---------- target : str Name of the celestial object. Returns ------- campaigns : list of int A list of the campaigns that cover the given target name. ra, dec : float, float Resolved coordinates in decimal degrees (J2000). Exceptions ---------- Raises an ImportError if AstroPy is not installed. Raises a ValueError if `name` cannot be resolved to coordinates. """ # Is AstroPy (optional dependency) installed? try: from astropy.coordinates import SkyCoord from astropy.coordinates.name_resolve import NameResolveError from astropy.utils.data import conf conf.remote_timeout = 90 except ImportError: print('Error: AstroPy needs to be installed for this feature.') sys.exit(1) # Translate the target name into celestial coordinates try: crd = SkyCoord.from_name(target) except NameResolveError: raise ValueError('Could not find coordinates ' 'for target "{0}".'.format(target)) # Find the campaigns with visibility return findCampaigns(crd.ra.deg, crd.dec.deg), crd.ra.deg, crd.dec.deg
[ "def", "findCampaignsByName", "(", "target", ")", ":", "# Is AstroPy (optional dependency) installed?", "try", ":", "from", "astropy", ".", "coordinates", "import", "SkyCoord", "from", "astropy", ".", "coordinates", ".", "name_resolve", "import", "NameResolveError", "fr...
Returns a list of the campaigns that cover a given target. Parameters ---------- target : str Name of the celestial object. Returns ------- campaigns : list of int A list of the campaigns that cover the given target name. ra, dec : float, float Resolved coordinates in decimal degrees (J2000). Exceptions ---------- Raises an ImportError if AstroPy is not installed. Raises a ValueError if `name` cannot be resolved to coordinates.
[ "Returns", "a", "list", "of", "the", "campaigns", "that", "cover", "a", "given", "target", "." ]
python
train
JNRowe/upoints
upoints/utils.py
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/utils.py#L206-L228
def prepare_xml_read(data, objectify=False): """Prepare various input types for XML parsing. Args: data (iter): Data to read objectify (bool): Parse using lxml's objectify data binding Returns: etree.ElementTree: Tree suitable for parsing Raises: TypeError: Invalid value for data """ mod = _objectify if objectify else etree if hasattr(data, 'readlines'): data = mod.parse(data).getroot() elif isinstance(data, list): data = mod.fromstring(''.join(data)) elif isinstance(data, basestring): data = mod.parse(open(data)).getroot() else: raise TypeError('Unable to handle data of type %r' % type(data)) return data
[ "def", "prepare_xml_read", "(", "data", ",", "objectify", "=", "False", ")", ":", "mod", "=", "_objectify", "if", "objectify", "else", "etree", "if", "hasattr", "(", "data", ",", "'readlines'", ")", ":", "data", "=", "mod", ".", "parse", "(", "data", "...
Prepare various input types for XML parsing. Args: data (iter): Data to read objectify (bool): Parse using lxml's objectify data binding Returns: etree.ElementTree: Tree suitable for parsing Raises: TypeError: Invalid value for data
[ "Prepare", "various", "input", "types", "for", "XML", "parsing", "." ]
python
train
BlueBrain/NeuroM
neurom/check/neuron_checks.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/neuron_checks.py#L128-L147
def has_all_nonzero_segment_lengths(neuron, threshold=0.0): '''Check presence of neuron segments with length not above threshold Arguments: neuron(Neuron): The neuron object to test threshold(float): value above which a segment length is considered to be non-zero Returns: CheckResult with result including list of (section_id, segment_id) of zero length segments ''' bad_ids = [] for sec in _nf.iter_sections(neuron): p = sec.points for i, s in enumerate(zip(p[:-1], p[1:])): if segment_length(s) <= threshold: bad_ids.append((sec.id, i)) return CheckResult(len(bad_ids) == 0, bad_ids)
[ "def", "has_all_nonzero_segment_lengths", "(", "neuron", ",", "threshold", "=", "0.0", ")", ":", "bad_ids", "=", "[", "]", "for", "sec", "in", "_nf", ".", "iter_sections", "(", "neuron", ")", ":", "p", "=", "sec", ".", "points", "for", "i", ",", "s", ...
Check presence of neuron segments with length not above threshold Arguments: neuron(Neuron): The neuron object to test threshold(float): value above which a segment length is considered to be non-zero Returns: CheckResult with result including list of (section_id, segment_id) of zero length segments
[ "Check", "presence", "of", "neuron", "segments", "with", "length", "not", "above", "threshold" ]
python
train
crossbario/txaio
txaio/aio.py
https://github.com/crossbario/txaio/blob/29c77ff1210cabd4cc03f16f34672612e7eef704/txaio/aio.py#L452-L474
def make_batched_timer(self, bucket_seconds, chunk_size=100): """ Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor. """ def get_seconds(): return self._config.loop.time() return _BatchedTimer( bucket_seconds * 1000.0, chunk_size, seconds_provider=get_seconds, delayed_call_creator=self.call_later, )
[ "def", "make_batched_timer", "(", "self", ",", "bucket_seconds", ",", "chunk_size", "=", "100", ")", ":", "def", "get_seconds", "(", ")", ":", "return", "self", ".", "_config", ".", "loop", ".", "time", "(", ")", "return", "_BatchedTimer", "(", "bucket_sec...
Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor.
[ "Creates", "and", "returns", "an", "object", "implementing", ":", "class", ":", "txaio", ".", "IBatchedTimer", "." ]
python
train
manns/pyspread
pyspread/src/gui/_gui_interfaces.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_gui_interfaces.py#L132-L165
def get_filepath_findex_from_user(self, wildcard, message, style, filterindex=0): """Opens a file dialog and returns filepath and filterindex Parameters ---------- wildcard: String \tWildcard string for file dialog message: String \tMessage in the file dialog style: Integer \tDialog style, e. g. wx.OPEN | wx.CHANGE_DIR filterindex: Integer, defaults to 0 \tDefault filterindex that is selected when the dialog is displayed """ dlg = wx.FileDialog(self.main_window, wildcard=wildcard, message=message, style=style, defaultDir=os.getcwd(), defaultFile="") # Set the initial filterindex dlg.SetFilterIndex(filterindex) filepath = None filter_index = None if dlg.ShowModal() == wx.ID_OK: filepath = dlg.GetPath() filter_index = dlg.GetFilterIndex() dlg.Destroy() return filepath, filter_index
[ "def", "get_filepath_findex_from_user", "(", "self", ",", "wildcard", ",", "message", ",", "style", ",", "filterindex", "=", "0", ")", ":", "dlg", "=", "wx", ".", "FileDialog", "(", "self", ".", "main_window", ",", "wildcard", "=", "wildcard", ",", "messag...
Opens a file dialog and returns filepath and filterindex Parameters ---------- wildcard: String \tWildcard string for file dialog message: String \tMessage in the file dialog style: Integer \tDialog style, e. g. wx.OPEN | wx.CHANGE_DIR filterindex: Integer, defaults to 0 \tDefault filterindex that is selected when the dialog is displayed
[ "Opens", "a", "file", "dialog", "and", "returns", "filepath", "and", "filterindex" ]
python
train
NLeSC/noodles
noodles/lib/utility.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/lib/utility.py#L111-L128
def inverse_deep_map(f, root): """Sibling to |deep_map|. Recursively maps objects in a nested structure of ``list`` and ``dict`` objects. Where |deep_map| starts at the top, |inverse_deep_map| starts at the bottom. First, if `root` is a ``list`` or ``dict``, its contents are |inverse_deep_map|ed. Then at the end, the entire object is passed through `f`. This function was created with decoding from JSON compatible data in mind. .. |inverse_deep_map| replace:: :py:func:`inverse_deep_map`""" if isinstance(root, dict): r = {k: inverse_deep_map(f, v) for k, v in root.items()} elif isinstance(root, list): r = [inverse_deep_map(f, v) for v in root] else: r = root return f(r)
[ "def", "inverse_deep_map", "(", "f", ",", "root", ")", ":", "if", "isinstance", "(", "root", ",", "dict", ")", ":", "r", "=", "{", "k", ":", "inverse_deep_map", "(", "f", ",", "v", ")", "for", "k", ",", "v", "in", "root", ".", "items", "(", ")"...
Sibling to |deep_map|. Recursively maps objects in a nested structure of ``list`` and ``dict`` objects. Where |deep_map| starts at the top, |inverse_deep_map| starts at the bottom. First, if `root` is a ``list`` or ``dict``, its contents are |inverse_deep_map|ed. Then at the end, the entire object is passed through `f`. This function was created with decoding from JSON compatible data in mind. .. |inverse_deep_map| replace:: :py:func:`inverse_deep_map`
[ "Sibling", "to", "|deep_map|", ".", "Recursively", "maps", "objects", "in", "a", "nested", "structure", "of", "list", "and", "dict", "objects", ".", "Where", "|deep_map|", "starts", "at", "the", "top", "|inverse_deep_map|", "starts", "at", "the", "bottom", "."...
python
train
mitsei/dlkit
dlkit/handcar/type/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/type/managers.py#L72-L93
def get_type_admin_session(self): """Gets the OsidSession associated with the type admin service. return: (osid.type.TypeAdminSession) - a TypeAdminSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_type_admin() is false compliance: optional - This method must be implemented if supports_type_admin() is true. """ pass if not self.supports_type_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.TypeAdminSession() except AttributeError: raise # OperationFailed() return session
[ "def", "get_type_admin_session", "(", "self", ")", ":", "pass", "if", "not", "self", ".", "supports_type_admin", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# Opera...
Gets the OsidSession associated with the type admin service. return: (osid.type.TypeAdminSession) - a TypeAdminSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_type_admin() is false compliance: optional - This method must be implemented if supports_type_admin() is true.
[ "Gets", "the", "OsidSession", "associated", "with", "the", "type", "admin", "service", "." ]
python
train
6809/MC6809
MC6809/components/memory.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/memory.py#L311-L315
def get(self, start, end): """ used in unittests """ return [self.read_byte(addr) for addr in range(start, end)]
[ "def", "get", "(", "self", ",", "start", ",", "end", ")", ":", "return", "[", "self", ".", "read_byte", "(", "addr", ")", "for", "addr", "in", "range", "(", "start", ",", "end", ")", "]" ]
used in unittests
[ "used", "in", "unittests" ]
python
train
saltstack/salt
salt/utils/openstack/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/neutron.py#L411-L420
def create_subnet(self, network, cidr, name=None, ip_version=4): ''' Creates a new subnet ''' net_id = self._find_network_id(network) body = {'cidr': cidr, 'ip_version': ip_version, 'network_id': net_id, 'name': name} return self.network_conn.create_subnet(body={'subnet': body})
[ "def", "create_subnet", "(", "self", ",", "network", ",", "cidr", ",", "name", "=", "None", ",", "ip_version", "=", "4", ")", ":", "net_id", "=", "self", ".", "_find_network_id", "(", "network", ")", "body", "=", "{", "'cidr'", ":", "cidr", ",", "'ip...
Creates a new subnet
[ "Creates", "a", "new", "subnet" ]
python
train
tensorflow/tensorboard
tensorboard/program.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L149-L199
def configure(self, argv=('',), **kwargs): """Configures TensorBoard behavior via flags. This method will populate the "flags" property with an argparse.Namespace representing flag values parsed from the provided argv list, overridden by explicit flags from remaining keyword arguments. Args: argv: Can be set to CLI args equivalent to sys.argv; the first arg is taken to be the name of the path being executed. kwargs: Additional arguments will override what was parsed from argv. They must be passed as Python data structures, e.g. `foo=1` rather than `foo="1"`. Returns: Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism for absl.app.run() compatibility. Raises: ValueError: If flag values are invalid. """ parser = argparse_flags.ArgumentParser( prog='tensorboard', description=('TensorBoard is a suite of web applications for ' 'inspecting and understanding your TensorFlow runs ' 'and graphs. https://github.com/tensorflow/tensorboard ')) for loader in self.plugin_loaders: loader.define_flags(parser) arg0 = argv[0] if argv else '' flags = parser.parse_args(argv[1:]) # Strip binary name from argv. self.cache_key = manager.cache_key( working_directory=os.getcwd(), arguments=argv[1:], configure_kwargs=kwargs, ) if absl_flags and arg0: # Only expose main module Abseil flags as TensorBoard native flags. # This is the same logic Abseil's ArgumentParser uses for determining # which Abseil flags to include in the short helpstring. for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)): if hasattr(flags, flag.name): raise ValueError('Conflicting Abseil flag: %s' % flag.name) setattr(flags, flag.name, flag.value) for k, v in kwargs.items(): if not hasattr(flags, k): raise ValueError('Unknown TensorBoard flag: %s' % k) setattr(flags, k, v) for loader in self.plugin_loaders: loader.fix_flags(flags) self.flags = flags return [arg0]
[ "def", "configure", "(", "self", ",", "argv", "=", "(", "''", ",", ")", ",", "*", "*", "kwargs", ")", ":", "parser", "=", "argparse_flags", ".", "ArgumentParser", "(", "prog", "=", "'tensorboard'", ",", "description", "=", "(", "'TensorBoard is a suite of ...
Configures TensorBoard behavior via flags. This method will populate the "flags" property with an argparse.Namespace representing flag values parsed from the provided argv list, overridden by explicit flags from remaining keyword arguments. Args: argv: Can be set to CLI args equivalent to sys.argv; the first arg is taken to be the name of the path being executed. kwargs: Additional arguments will override what was parsed from argv. They must be passed as Python data structures, e.g. `foo=1` rather than `foo="1"`. Returns: Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism for absl.app.run() compatibility. Raises: ValueError: If flag values are invalid.
[ "Configures", "TensorBoard", "behavior", "via", "flags", "." ]
python
train
yougov/mongo-connector
mongo_connector/doc_managers/mongo_doc_manager.py
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/mongo_doc_manager.py#L115-L124
def _meta_collections(self): """Provides the meta collections currently being used """ if self.use_single_meta_collection: yield self.meta_collection_name else: for name in self.meta_database.collection_names( include_system_collections=False ): yield name
[ "def", "_meta_collections", "(", "self", ")", ":", "if", "self", ".", "use_single_meta_collection", ":", "yield", "self", ".", "meta_collection_name", "else", ":", "for", "name", "in", "self", ".", "meta_database", ".", "collection_names", "(", "include_system_col...
Provides the meta collections currently being used
[ "Provides", "the", "meta", "collections", "currently", "being", "used" ]
python
train
pyGrowler/Growler
growler/http/parser.py
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/parser.py#L280-L303
def determine_newline(data): """ Looks for a newline character in bytestring parameter 'data'. Currently only looks for strings '\r\n', '\n'. If '\n' is found at the first position of the string, this raises an exception. Parameters: data (bytes): The data to be searched Returns: None: If no-newline is found One of '\n', '\r\n': whichever is found first """ line_end_pos = data.find(b'\n') if line_end_pos == -1: return None elif line_end_pos == 0: return b'\n' prev_char = data[line_end_pos - 1] return b'\r\n' if (prev_char is b'\r'[0]) else b'\n'
[ "def", "determine_newline", "(", "data", ")", ":", "line_end_pos", "=", "data", ".", "find", "(", "b'\\n'", ")", "if", "line_end_pos", "==", "-", "1", ":", "return", "None", "elif", "line_end_pos", "==", "0", ":", "return", "b'\\n'", "prev_char", "=", "d...
Looks for a newline character in bytestring parameter 'data'. Currently only looks for strings '\r\n', '\n'. If '\n' is found at the first position of the string, this raises an exception. Parameters: data (bytes): The data to be searched Returns: None: If no-newline is found One of '\n', '\r\n': whichever is found first
[ "Looks", "for", "a", "newline", "character", "in", "bytestring", "parameter", "data", ".", "Currently", "only", "looks", "for", "strings", "\\", "r", "\\", "n", "\\", "n", ".", "If", "\\", "n", "is", "found", "at", "the", "first", "position", "of", "th...
python
train
Kane610/axis
axis/rtsp.py
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/rtsp.py#L63-L68
def stop(self): """Stop session.""" if self.transport: self.transport.write(self.method.TEARDOWN().encode()) self.transport.close() self.rtp.stop()
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "transport", ":", "self", ".", "transport", ".", "write", "(", "self", ".", "method", ".", "TEARDOWN", "(", ")", ".", "encode", "(", ")", ")", "self", ".", "transport", ".", "close", "(", "...
Stop session.
[ "Stop", "session", "." ]
python
train
tomekwojcik/flask-htauth
flask_htauth/extension.py
https://github.com/tomekwojcik/flask-htauth/blob/bb89bee3fa7d88de3147ae338048624e01de710b/flask_htauth/extension.py#L81-L109
def authenticated(viewfunc): """Decorate **viewfunc** with this decorator to require HTTP auth on the view.""" @wraps(viewfunc) def wrapper(*args, **kwargs): ctx = stack.top if ctx and hasattr(ctx, 'htauth'): auth_header = request.headers.get('Authorization', None) if not auth_header: return _unauthorized_response() if not auth_header.startswith('Basic '): raise RuntimeError('Flask-HTAuth supports only Basic auth.') auth_header = auth_header.replace('Basic ', '') auth_header = base64.b64decode(auth_header) username, password = auth_header.split(':', 1) if not username in ctx.htauth['users']: return _unauthorized_response() if not check_password(password, ctx.htauth['users'][username]): return _unauthorized_response() g.htauth_user = username return viewfunc(*args, **kwargs) return wrapper
[ "def", "authenticated", "(", "viewfunc", ")", ":", "@", "wraps", "(", "viewfunc", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ctx", "=", "stack", ".", "top", "if", "ctx", "and", "hasattr", "(", "ctx", ",", "'htauth'...
Decorate **viewfunc** with this decorator to require HTTP auth on the view.
[ "Decorate", "**", "viewfunc", "**", "with", "this", "decorator", "to", "require", "HTTP", "auth", "on", "the", "view", "." ]
python
train
twilio/twilio-python
twilio/rest/voice/v1/dialing_permissions/country/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/voice/v1/dialing_permissions/country/__init__.py#L115-L156
def page(self, iso_code=values.unset, continent=values.unset, country_code=values.unset, low_risk_numbers_enabled=values.unset, high_risk_special_numbers_enabled=values.unset, high_risk_tollfraud_numbers_enabled=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of CountryInstance records from the API. Request is executed immediately :param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code :param unicode continent: Filter to retrieve the country permissions by specifying the continent :param unicode country_code: Country code filter :param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled :param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled :param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CountryInstance :rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryPage """ params = values.of({ 'IsoCode': iso_code, 'Continent': continent, 'CountryCode': country_code, 'LowRiskNumbersEnabled': low_risk_numbers_enabled, 'HighRiskSpecialNumbersEnabled': high_risk_special_numbers_enabled, 'HighRiskTollfraudNumbersEnabled': high_risk_tollfraud_numbers_enabled, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return CountryPage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "iso_code", "=", "values", ".", "unset", ",", "continent", "=", "values", ".", "unset", ",", "country_code", "=", "values", ".", "unset", ",", "low_risk_numbers_enabled", "=", "values", ".", "unset", ",", "high_risk_special_n...
Retrieve a single page of CountryInstance records from the API. Request is executed immediately :param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code :param unicode continent: Filter to retrieve the country permissions by specifying the continent :param unicode country_code: Country code filter :param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled :param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled :param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CountryInstance :rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryPage
[ "Retrieve", "a", "single", "page", "of", "CountryInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
python
train
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L705-L760
def mofval(value, indent=MOF_INDENT, maxline=MAX_MOF_LINE, line_pos=0, end_space=0): """ Low level function that returns the MOF representation of a non-string value (i.e. a value that cannot not be split into multiple parts, for example a numeric or boolean value). If the MOF representation of the value does not fit into the remaining space of the current line, it is put into a new line, considering the specified indentation. If it also does not fit on the remaining space of the new line, ValueError is raised. Parameters: value (:term:`unicode string`): The non-string value. Must not be `None`. indent (:term:`integer`): Number of spaces to indent any new lines that are generated. maxline (:term:`integer`): Maximum line length for the generated MOF. line_pos (:term:`integer`): Length of content already on the current line. end_space (:term:`integer`): Length of space to be left free on the last line. Returns: tuple of * :term:`unicode string`: MOF string. * new line_pos Raises: ValueError: The value does not fit onto an entire new line. """ assert isinstance(value, six.text_type) # Check for output on current line avl_len = maxline - line_pos - end_space if len(value) <= avl_len: line_pos += len(value) return value, line_pos # Check for output on new line avl_len = maxline - indent - end_space if len(value) <= avl_len: mof_str = u'\n' + _indent_str(indent) + value line_pos = indent + len(value) return mof_str, line_pos raise ValueError( _format("Cannot fit value {0!A} onto new MOF line, missing {1} " "characters", value, len(value) - avl_len))
[ "def", "mofval", "(", "value", ",", "indent", "=", "MOF_INDENT", ",", "maxline", "=", "MAX_MOF_LINE", ",", "line_pos", "=", "0", ",", "end_space", "=", "0", ")", ":", "assert", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", "# Check for o...
Low level function that returns the MOF representation of a non-string value (i.e. a value that cannot not be split into multiple parts, for example a numeric or boolean value). If the MOF representation of the value does not fit into the remaining space of the current line, it is put into a new line, considering the specified indentation. If it also does not fit on the remaining space of the new line, ValueError is raised. Parameters: value (:term:`unicode string`): The non-string value. Must not be `None`. indent (:term:`integer`): Number of spaces to indent any new lines that are generated. maxline (:term:`integer`): Maximum line length for the generated MOF. line_pos (:term:`integer`): Length of content already on the current line. end_space (:term:`integer`): Length of space to be left free on the last line. Returns: tuple of * :term:`unicode string`: MOF string. * new line_pos Raises: ValueError: The value does not fit onto an entire new line.
[ "Low", "level", "function", "that", "returns", "the", "MOF", "representation", "of", "a", "non", "-", "string", "value", "(", "i", ".", "e", ".", "a", "value", "that", "cannot", "not", "be", "split", "into", "multiple", "parts", "for", "example", "a", ...
python
train
NuGrid/NuGridPy
nugridpy/nugridse.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L779-L855
def ernst_table_exporter(self, cycle, outfname='table_out', sheetname='Sheet 1'): """ This routine takes NuGrid data (model output) for a given cycle and writes it into an Excel sheet. This is one format as requested by Ernst Zinner in June 2013 (through Marco). If you want all radioactive isotopes, start from the restart file. Empty columns are not written out and you will get a message how many were empty. Please note that only one cycle is written out. Parameters ---------- cycle : integer Number of the cycle to consider. outfname : string, optional File name to write it to, .xlsx is appended automatically. The default is 'table_out'. sheetname : string, optional Name of the sheet in the excel file. The default is 'Sheet 1'. """ from xlsxwriter.workbook import Workbook # https://xlsxwriter.readthedocs.org/ Note: We neex xlswriter. Please meake sure it is installed. Run pip install xlsxwriter to install it using pip. If pip is not installed, install it via easy_install pip. Depending on the system you are on, you might need sudo rights for thesethings.' # isotopes and data all_data = np.array(self.get(cycle,'iso_massf')) header_data = self.se.isotopes # get mass data mass_data = np.array(self.get(cycle,'mass'))[np.newaxis] # stack mass data and header together header_data = np.hstack((['Mass'],header_data)) all_data = np.hstack((mass_data.transpose(),all_data)) # zero the cells with 1.e-99 entry for i in range(len(all_data)): for j in range(len(all_data[i])): if all_data[i][j] == 1.e-99: all_data[i][j] = 0. # check how many columns have all zeros in the file colzero = 0 all_sum = all_data.sum(0) for i in range(len(all_sum)): if all_sum[i] == 0.: colzero += 1 print(str(colzero) + ' columns are empty. Skipping them.') # now filter data all_data_fil = np.zeros((len(all_data),len(all_data[0])-colzero)) header_data_fil = np.zeros((len(header_data)-colzero),dtype='|S9') k = 0 for j in range(len(all_data[0])): if all_sum[j] != 0: for i in range(len(all_data)): all_data_fil[i][k] = all_data[i][j] header_data_fil[k] = header_data[j] k += 1 # write to excel file excelfile = Workbook(outfname + '.xlsx') wsh = excelfile.add_worksheet(sheetname) print('If you run from a restart file, this might take a little bit. Be patient!') for i in range(len(all_data_fil)): for j in range(len(all_data_fil[i])): if i == 0: wsh.write(0,j,header_data_fil[j]) wsh.write(i+1,j,all_data_fil[i][j]) excelfile.close() return None
[ "def", "ernst_table_exporter", "(", "self", ",", "cycle", ",", "outfname", "=", "'table_out'", ",", "sheetname", "=", "'Sheet 1'", ")", ":", "from", "xlsxwriter", ".", "workbook", "import", "Workbook", "# https://xlsxwriter.readthedocs.org/ Note: We neex xlswriter. Please...
This routine takes NuGrid data (model output) for a given cycle and writes it into an Excel sheet. This is one format as requested by Ernst Zinner in June 2013 (through Marco). If you want all radioactive isotopes, start from the restart file. Empty columns are not written out and you will get a message how many were empty. Please note that only one cycle is written out. Parameters ---------- cycle : integer Number of the cycle to consider. outfname : string, optional File name to write it to, .xlsx is appended automatically. The default is 'table_out'. sheetname : string, optional Name of the sheet in the excel file. The default is 'Sheet 1'.
[ "This", "routine", "takes", "NuGrid", "data", "(", "model", "output", ")", "for", "a", "given", "cycle", "and", "writes", "it", "into", "an", "Excel", "sheet", "." ]
python
train