Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
9,700
zalando/patroni
patroni/postgresql.py
Postgresql.restore_configuration_files
def restore_configuration_files(self): """ restore a previously saved postgresql.conf """ try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._data_dir, f + '.backup') if not os.path.isfile(config_file): if os.path.isfile(backup_file): shutil.copy(backup_file, config_file) # Previously we didn't backup pg_ident.conf, if file is missing just create empty elif f == 'pg_ident.conf': open(config_file, 'w').close() except IOError: logger.exception('unable to restore configuration files from backup')
python
def restore_configuration_files(self): """ restore a previously saved postgresql.conf """ try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._data_dir, f + '.backup') if not os.path.isfile(config_file): if os.path.isfile(backup_file): shutil.copy(backup_file, config_file) # Previously we didn't backup pg_ident.conf, if file is missing just create empty elif f == 'pg_ident.conf': open(config_file, 'w').close() except IOError: logger.exception('unable to restore configuration files from backup')
['def', 'restore_configuration_files', '(', 'self', ')', ':', 'try', ':', 'for', 'f', 'in', 'self', '.', '_configuration_to_save', ':', 'config_file', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '_config_dir', ',', 'f', ')', 'backup_file', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '_data_dir', ',', 'f', '+', "'.backup'", ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'config_file', ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'backup_file', ')', ':', 'shutil', '.', 'copy', '(', 'backup_file', ',', 'config_file', ')', "# Previously we didn't backup pg_ident.conf, if file is missing just create empty", 'elif', 'f', '==', "'pg_ident.conf'", ':', 'open', '(', 'config_file', ',', "'w'", ')', '.', 'close', '(', ')', 'except', 'IOError', ':', 'logger', '.', 'exception', '(', "'unable to restore configuration files from backup'", ')']
restore a previously saved postgresql.conf
['restore', 'a', 'previously', 'saved', 'postgresql', '.', 'conf']
train
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/postgresql.py#L1532-L1545
9,701
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
_BufferWatchCondition.remove
def remove(self, bw): """ Removes a buffer watch identifier. @type bw: L{BufferWatch} @param bw: Buffer watch identifier. @raise KeyError: The buffer watch identifier was already removed. """ try: self.__ranges.remove(bw) except KeyError: if not bw.oneshot: raise
python
def remove(self, bw): """ Removes a buffer watch identifier. @type bw: L{BufferWatch} @param bw: Buffer watch identifier. @raise KeyError: The buffer watch identifier was already removed. """ try: self.__ranges.remove(bw) except KeyError: if not bw.oneshot: raise
['def', 'remove', '(', 'self', ',', 'bw', ')', ':', 'try', ':', 'self', '.', '__ranges', '.', 'remove', '(', 'bw', ')', 'except', 'KeyError', ':', 'if', 'not', 'bw', '.', 'oneshot', ':', 'raise']
Removes a buffer watch identifier. @type bw: L{BufferWatch} @param bw: Buffer watch identifier. @raise KeyError: The buffer watch identifier was already removed.
['Removes', 'a', 'buffer', 'watch', 'identifier', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L1840-L1854
9,702
gwastro/pycbc
pycbc/workflow/minifollowups.py
setup_foreground_minifollowups
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers, tmpltbank_file, insp_segs, insp_data_name, insp_anal_name, dax_output, out_dir, tags=None): """ Create plots that followup the Nth loudest coincident injection from a statmap produced HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots. """ logging.info('Entering minifollowups module') if not workflow.cp.has_section('workflow-minifollowups'): logging.info('There is no [workflow-minifollowups] section in configuration file') logging.info('Leaving minifollowups') return tags = [] if tags is None else tags makedir(dax_output) # turn the config file into a File class config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini') workflow.cp.write(open(config_path, 'w')) config_file = wdax.File(os.path.basename(config_path)) config_file.PFN(urlparse.urljoin('file:', urllib.pathname2url(config_path)), site='local') exe = Executable(workflow.cp, 'foreground_minifollowup', ifos=workflow.ifos, out_dir=dax_output) node = exe.create_node() node.add_input_opt('--config-files', config_file) node.add_input_opt('--bank-file', tmpltbank_file) node.add_input_opt('--statmap-file', coinc_file) node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers) node.add_input_opt('--inspiral-segments', insp_segs) node.add_opt('--inspiral-data-read-name', insp_data_name) node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) node.new_output_file_opt(workflow.analysis_time, '.dax', '--output-file', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.tc.txt', '--transformation-catalog', tags=tags) name = node.output_files[0].name map_file = node.output_files[1] tc_file = node.output_files[2] node.add_opt('--workflow-name', name) node.add_opt('--output-dir', out_dir) workflow += node # execute this in a sub-workflow fil = node.output_files[0] # determine if a staging site has been specified try: staging_site = workflow.cp.get('workflow-foreground_minifollowups', 'staging-site') except: staging_site = None job = dax.DAX(fil) job.addArguments('--basename %s' % os.path.splitext(os.path.basename(name))[0]) Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site) workflow._adag.addJob(job) dep = dax.Dependency(parent=node._dax_node, child=job) workflow._adag.addDependency(dep) logging.info('Leaving minifollowups module')
python
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers, tmpltbank_file, insp_segs, insp_data_name, insp_anal_name, dax_output, out_dir, tags=None): """ Create plots that followup the Nth loudest coincident injection from a statmap produced HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots. """ logging.info('Entering minifollowups module') if not workflow.cp.has_section('workflow-minifollowups'): logging.info('There is no [workflow-minifollowups] section in configuration file') logging.info('Leaving minifollowups') return tags = [] if tags is None else tags makedir(dax_output) # turn the config file into a File class config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini') workflow.cp.write(open(config_path, 'w')) config_file = wdax.File(os.path.basename(config_path)) config_file.PFN(urlparse.urljoin('file:', urllib.pathname2url(config_path)), site='local') exe = Executable(workflow.cp, 'foreground_minifollowup', ifos=workflow.ifos, out_dir=dax_output) node = exe.create_node() node.add_input_opt('--config-files', config_file) node.add_input_opt('--bank-file', tmpltbank_file) node.add_input_opt('--statmap-file', coinc_file) node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers) node.add_input_opt('--inspiral-segments', insp_segs) node.add_opt('--inspiral-data-read-name', insp_data_name) node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) node.new_output_file_opt(workflow.analysis_time, '.dax', '--output-file', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.tc.txt', '--transformation-catalog', tags=tags) name = node.output_files[0].name map_file = node.output_files[1] tc_file = node.output_files[2] node.add_opt('--workflow-name', name) node.add_opt('--output-dir', out_dir) workflow += node # execute this in a sub-workflow fil = node.output_files[0] # determine if a staging site has been specified try: staging_site = workflow.cp.get('workflow-foreground_minifollowups', 'staging-site') except: staging_site = None job = dax.DAX(fil) job.addArguments('--basename %s' % os.path.splitext(os.path.basename(name))[0]) Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site) workflow._adag.addJob(job) dep = dax.Dependency(parent=node._dax_node, child=job) workflow._adag.addDependency(dep) logging.info('Leaving minifollowups module')
['def', 'setup_foreground_minifollowups', '(', 'workflow', ',', 'coinc_file', ',', 'single_triggers', ',', 'tmpltbank_file', ',', 'insp_segs', ',', 'insp_data_name', ',', 'insp_anal_name', ',', 'dax_output', ',', 'out_dir', ',', 'tags', '=', 'None', ')', ':', 'logging', '.', 'info', '(', "'Entering minifollowups module'", ')', 'if', 'not', 'workflow', '.', 'cp', '.', 'has_section', '(', "'workflow-minifollowups'", ')', ':', 'logging', '.', 'info', '(', "'There is no [workflow-minifollowups] section in configuration file'", ')', 'logging', '.', 'info', '(', "'Leaving minifollowups'", ')', 'return', 'tags', '=', '[', ']', 'if', 'tags', 'is', 'None', 'else', 'tags', 'makedir', '(', 'dax_output', ')', '# turn the config file into a File class', 'config_path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'dax_output', '+', "'/'", '+', "'_'", '.', 'join', '(', 'tags', ')', '+', "'foreground_minifollowup.ini'", ')', 'workflow', '.', 'cp', '.', 'write', '(', 'open', '(', 'config_path', ',', "'w'", ')', ')', 'config_file', '=', 'wdax', '.', 'File', '(', 'os', '.', 'path', '.', 'basename', '(', 'config_path', ')', ')', 'config_file', '.', 'PFN', '(', 'urlparse', '.', 'urljoin', '(', "'file:'", ',', 'urllib', '.', 'pathname2url', '(', 'config_path', ')', ')', ',', 'site', '=', "'local'", ')', 'exe', '=', 'Executable', '(', 'workflow', '.', 'cp', ',', "'foreground_minifollowup'", ',', 'ifos', '=', 'workflow', '.', 'ifos', ',', 'out_dir', '=', 'dax_output', ')', 'node', '=', 'exe', '.', 'create_node', '(', ')', 'node', '.', 'add_input_opt', '(', "'--config-files'", ',', 'config_file', ')', 'node', '.', 'add_input_opt', '(', "'--bank-file'", ',', 'tmpltbank_file', ')', 'node', '.', 'add_input_opt', '(', "'--statmap-file'", ',', 'coinc_file', ')', 'node', '.', 'add_multiifo_input_list_opt', '(', "'--single-detector-triggers'", ',', 'single_triggers', ')', 'node', '.', 'add_input_opt', '(', "'--inspiral-segments'", ',', 'insp_segs', ')', 'node', '.', 'add_opt', '(', "'--inspiral-data-read-name'", ',', 'insp_data_name', ')', 'node', '.', 'add_opt', '(', "'--inspiral-data-analyzed-name'", ',', 'insp_anal_name', ')', 'node', '.', 'new_output_file_opt', '(', 'workflow', '.', 'analysis_time', ',', "'.dax'", ',', "'--output-file'", ',', 'tags', '=', 'tags', ')', 'node', '.', 'new_output_file_opt', '(', 'workflow', '.', 'analysis_time', ',', "'.dax.map'", ',', "'--output-map'", ',', 'tags', '=', 'tags', ')', 'node', '.', 'new_output_file_opt', '(', 'workflow', '.', 'analysis_time', ',', "'.tc.txt'", ',', "'--transformation-catalog'", ',', 'tags', '=', 'tags', ')', 'name', '=', 'node', '.', 'output_files', '[', '0', ']', '.', 'name', 'map_file', '=', 'node', '.', 'output_files', '[', '1', ']', 'tc_file', '=', 'node', '.', 'output_files', '[', '2', ']', 'node', '.', 'add_opt', '(', "'--workflow-name'", ',', 'name', ')', 'node', '.', 'add_opt', '(', "'--output-dir'", ',', 'out_dir', ')', 'workflow', '+=', 'node', '# execute this in a sub-workflow', 'fil', '=', 'node', '.', 'output_files', '[', '0', ']', '# determine if a staging site has been specified', 'try', ':', 'staging_site', '=', 'workflow', '.', 'cp', '.', 'get', '(', "'workflow-foreground_minifollowups'", ',', "'staging-site'", ')', 'except', ':', 'staging_site', '=', 'None', 'job', '=', 'dax', '.', 'DAX', '(', 'fil', ')', 'job', '.', 'addArguments', '(', "'--basename %s'", '%', 'os', '.', 'path', '.', 'splitext', '(', 'os', '.', 'path', '.', 'basename', '(', 'name', ')', ')', '[', '0', ']', ')', 'Workflow', '.', 'set_job_properties', '(', 'job', ',', 'map_file', ',', 'tc_file', ',', 'staging_site', '=', 'staging_site', ')', 'workflow', '.', '_adag', '.', 'addJob', '(', 'job', ')', 'dep', '=', 'dax', '.', 'Dependency', '(', 'parent', '=', 'node', '.', '_dax_node', ',', 'child', '=', 'job', ')', 'workflow', '.', '_adag', '.', 'addDependency', '(', 'dep', ')', 'logging', '.', 'info', '(', "'Leaving minifollowups module'", ')']
Create plots that followup the Nth loudest coincident injection from a statmap produced HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots.
['Create', 'plots', 'that', 'followup', 'the', 'Nth', 'loudest', 'coincident', 'injection', 'from', 'a', 'statmap', 'produced', 'HDF', 'file', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/minifollowups.py#L32-L123
9,703
asyncdef/apyio
apyio/__init__.py
StringIO
def StringIO(*args, **kwargs): """StringIO constructor shim for the async wrapper.""" raw = sync_io.StringIO(*args, **kwargs) return AsyncStringIOWrapper(raw)
python
def StringIO(*args, **kwargs): """StringIO constructor shim for the async wrapper.""" raw = sync_io.StringIO(*args, **kwargs) return AsyncStringIOWrapper(raw)
['def', 'StringIO', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'raw', '=', 'sync_io', '.', 'StringIO', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'AsyncStringIOWrapper', '(', 'raw', ')']
StringIO constructor shim for the async wrapper.
['StringIO', 'constructor', 'shim', 'for', 'the', 'async', 'wrapper', '.']
train
https://github.com/asyncdef/apyio/blob/d6b914929269b8795ca4d6b1ede8a393841cbc29/apyio/__init__.py#L882-L885
9,704
sentinel-hub/sentinelhub-py
sentinelhub/download.py
_check_if_must_download
def _check_if_must_download(request_list, redownload): """ Updates request.will_download attribute of each request in request_list. **Note:** the function mutates the elements of the list! :param request_list: a list of ``DownloadRequest`` instances :type: list[DownloadRequest] :param redownload: tells whether to download the data again or not :type: bool """ for request in request_list: request.will_download = (request.save_response or request.return_data) \ and (not request.is_downloaded() or redownload)
python
def _check_if_must_download(request_list, redownload): """ Updates request.will_download attribute of each request in request_list. **Note:** the function mutates the elements of the list! :param request_list: a list of ``DownloadRequest`` instances :type: list[DownloadRequest] :param redownload: tells whether to download the data again or not :type: bool """ for request in request_list: request.will_download = (request.save_response or request.return_data) \ and (not request.is_downloaded() or redownload)
['def', '_check_if_must_download', '(', 'request_list', ',', 'redownload', ')', ':', 'for', 'request', 'in', 'request_list', ':', 'request', '.', 'will_download', '=', '(', 'request', '.', 'save_response', 'or', 'request', '.', 'return_data', ')', 'and', '(', 'not', 'request', '.', 'is_downloaded', '(', ')', 'or', 'redownload', ')']
Updates request.will_download attribute of each request in request_list. **Note:** the function mutates the elements of the list! :param request_list: a list of ``DownloadRequest`` instances :type: list[DownloadRequest] :param redownload: tells whether to download the data again or not :type: bool
['Updates', 'request', '.', 'will_download', 'attribute', 'of', 'each', 'request', 'in', 'request_list', '.']
train
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/download.py#L202-L215
9,705
HDI-Project/MLPrimitives
mlprimitives/candidates/dsp.py
SpectralMask.fit_freq_min_max
def fit_freq_min_max(self, training_signal): """Defines a spectral mask based on training data using min and max values of each frequency component Args: training_signal: Training data """ window_length = len(self.window) window_weight = sum(self.window) max_mask = np.zeros(int(window_length / 2) + 1) min_mask = np.zeros(int(window_length / 2) + 1) for i in range(0, len(training_signal) - window_length - 1): rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window) temp = np.abs(rfft) / window_weight max_mask = np.maximum(max_mask, temp) min_mask = np.minimum(min_mask, temp) self.mask_top = self.gain * max_mask self.mask_bottom = min_mask / self.gain
python
def fit_freq_min_max(self, training_signal): """Defines a spectral mask based on training data using min and max values of each frequency component Args: training_signal: Training data """ window_length = len(self.window) window_weight = sum(self.window) max_mask = np.zeros(int(window_length / 2) + 1) min_mask = np.zeros(int(window_length / 2) + 1) for i in range(0, len(training_signal) - window_length - 1): rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window) temp = np.abs(rfft) / window_weight max_mask = np.maximum(max_mask, temp) min_mask = np.minimum(min_mask, temp) self.mask_top = self.gain * max_mask self.mask_bottom = min_mask / self.gain
['def', 'fit_freq_min_max', '(', 'self', ',', 'training_signal', ')', ':', 'window_length', '=', 'len', '(', 'self', '.', 'window', ')', 'window_weight', '=', 'sum', '(', 'self', '.', 'window', ')', 'max_mask', '=', 'np', '.', 'zeros', '(', 'int', '(', 'window_length', '/', '2', ')', '+', '1', ')', 'min_mask', '=', 'np', '.', 'zeros', '(', 'int', '(', 'window_length', '/', '2', ')', '+', '1', ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'training_signal', ')', '-', 'window_length', '-', '1', ')', ':', 'rfft', '=', 'np', '.', 'fft', '.', 'rfft', '(', 'training_signal', '[', 'i', ':', 'i', '+', 'window_length', ']', '*', 'self', '.', 'window', ')', 'temp', '=', 'np', '.', 'abs', '(', 'rfft', ')', '/', 'window_weight', 'max_mask', '=', 'np', '.', 'maximum', '(', 'max_mask', ',', 'temp', ')', 'min_mask', '=', 'np', '.', 'minimum', '(', 'min_mask', ',', 'temp', ')', 'self', '.', 'mask_top', '=', 'self', '.', 'gain', '*', 'max_mask', 'self', '.', 'mask_bottom', '=', 'min_mask', '/', 'self', '.', 'gain']
Defines a spectral mask based on training data using min and max values of each frequency component Args: training_signal: Training data
['Defines', 'a', 'spectral', 'mask', 'based', 'on', 'training', 'data', 'using', 'min', 'and', 'max', 'values', 'of', 'each', 'frequency', 'component', 'Args', ':', 'training_signal', ':', 'Training', 'data']
train
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/candidates/dsp.py#L79-L100
9,706
twisted/txaws
txaws/ec2/client.py
Parser.create_snapshot
def create_snapshot(self, xml_bytes): """Parse the XML returned by the C{CreateSnapshot} function. @param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root element. @return: The L{Snapshot} instance created. TODO: ownerId, volumeSize, description. """ root = XML(xml_bytes) snapshot_id = root.findtext("snapshotId") volume_id = root.findtext("volumeId") status = root.findtext("status") start_time = root.findtext("startTime") start_time = datetime.strptime( start_time[:19], "%Y-%m-%dT%H:%M:%S") progress = root.findtext("progress")[:-1] progress = float(progress or "0") / 100. return model.Snapshot( snapshot_id, volume_id, status, start_time, progress)
python
def create_snapshot(self, xml_bytes): """Parse the XML returned by the C{CreateSnapshot} function. @param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root element. @return: The L{Snapshot} instance created. TODO: ownerId, volumeSize, description. """ root = XML(xml_bytes) snapshot_id = root.findtext("snapshotId") volume_id = root.findtext("volumeId") status = root.findtext("status") start_time = root.findtext("startTime") start_time = datetime.strptime( start_time[:19], "%Y-%m-%dT%H:%M:%S") progress = root.findtext("progress")[:-1] progress = float(progress or "0") / 100. return model.Snapshot( snapshot_id, volume_id, status, start_time, progress)
['def', 'create_snapshot', '(', 'self', ',', 'xml_bytes', ')', ':', 'root', '=', 'XML', '(', 'xml_bytes', ')', 'snapshot_id', '=', 'root', '.', 'findtext', '(', '"snapshotId"', ')', 'volume_id', '=', 'root', '.', 'findtext', '(', '"volumeId"', ')', 'status', '=', 'root', '.', 'findtext', '(', '"status"', ')', 'start_time', '=', 'root', '.', 'findtext', '(', '"startTime"', ')', 'start_time', '=', 'datetime', '.', 'strptime', '(', 'start_time', '[', ':', '19', ']', ',', '"%Y-%m-%dT%H:%M:%S"', ')', 'progress', '=', 'root', '.', 'findtext', '(', '"progress"', ')', '[', ':', '-', '1', ']', 'progress', '=', 'float', '(', 'progress', 'or', '"0"', ')', '/', '100.', 'return', 'model', '.', 'Snapshot', '(', 'snapshot_id', ',', 'volume_id', ',', 'status', ',', 'start_time', ',', 'progress', ')']
Parse the XML returned by the C{CreateSnapshot} function. @param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root element. @return: The L{Snapshot} instance created. TODO: ownerId, volumeSize, description.
['Parse', 'the', 'XML', 'returned', 'by', 'the', 'C', '{', 'CreateSnapshot', '}', 'function', '.']
train
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L868-L887
9,707
prometheus/client_python
prometheus_client/metrics_core.py
Metric.add_sample
def add_sample(self, name, labels, value, timestamp=None, exemplar=None): """Add a sample to the metric. Internal-only, do not use.""" self.samples.append(Sample(name, labels, value, timestamp, exemplar))
python
def add_sample(self, name, labels, value, timestamp=None, exemplar=None): """Add a sample to the metric. Internal-only, do not use.""" self.samples.append(Sample(name, labels, value, timestamp, exemplar))
['def', 'add_sample', '(', 'self', ',', 'name', ',', 'labels', ',', 'value', ',', 'timestamp', '=', 'None', ',', 'exemplar', '=', 'None', ')', ':', 'self', '.', 'samples', '.', 'append', '(', 'Sample', '(', 'name', ',', 'labels', ',', 'value', ',', 'timestamp', ',', 'exemplar', ')', ')']
Add a sample to the metric. Internal-only, do not use.
['Add', 'a', 'sample', 'to', 'the', 'metric', '.']
train
https://github.com/prometheus/client_python/blob/31f5557e2e84ca4ffa9a03abf6e3f4d0c8b8c3eb/prometheus_client/metrics_core.py#L38-L42
9,708
edx/xblock-utils
xblockutils/studio_editable.py
StudioEditableXBlockMixin._make_field_info
def _make_field_info(self, field_name, field): """ Create the information that the template needs to render a form field for this field. """ supported_field_types = ( (Integer, 'integer'), (Float, 'float'), (Boolean, 'boolean'), (String, 'string'), (List, 'list'), (DateTime, 'datepicker'), (JSONField, 'generic'), # This is last so as a last resort we display a text field w/ the JSON string ) if self.service_declaration("i18n"): ugettext = self.ugettext else: def ugettext(text): """ Dummy ugettext method that doesn't do anything """ return text info = { 'name': field_name, 'display_name': ugettext(field.display_name) if field.display_name else "", 'is_set': field.is_set_on(self), 'default': field.default, 'value': field.read_from(self), 'has_values': False, 'help': ugettext(field.help) if field.help else "", 'allow_reset': field.runtime_options.get('resettable_editor', True), 'list_values': None, # Only available for List fields 'has_list_values': False, # True if list_values_provider exists, even if it returned no available options } for type_class, type_name in supported_field_types: if isinstance(field, type_class): info['type'] = type_name # If String fields are declared like String(..., multiline_editor=True), then call them "text" type: editor_type = field.runtime_options.get('multiline_editor') if type_class is String and editor_type: if editor_type == "html": info['type'] = 'html' else: info['type'] = 'text' if type_class is List and field.runtime_options.get('list_style') == "set": # List represents unordered, unique items, optionally drawn from list_values_provider() info['type'] = 'set' elif type_class is List: info['type'] = "generic" # disable other types of list for now until properly implemented break if "type" not in info: raise NotImplementedError("StudioEditableXBlockMixin currently only supports fields derived from JSONField") if info["type"] in ("list", "set"): info["value"] = [json.dumps(val) for val in info["value"]] info["default"] = json.dumps(info["default"]) elif info["type"] == "generic": # Convert value to JSON string if we're treating this field generically: info["value"] = json.dumps(info["value"]) info["default"] = json.dumps(info["default"]) elif info["type"] == "datepicker": if info["value"]: info["value"] = info["value"].strftime("%m/%d/%Y") if info["default"]: info["default"] = info["default"].strftime("%m/%d/%Y") if 'values_provider' in field.runtime_options: values = field.runtime_options["values_provider"](self) else: values = field.values if values and not isinstance(field, Boolean): # This field has only a limited number of pre-defined options. # Protip: when defining the field, values= can be a callable. if isinstance(field.values, dict) and isinstance(field, (Float, Integer)): # e.g. {"min": 0 , "max": 10, "step": .1} for option in field.values: if option in ("min", "max", "step"): info[option] = field.values.get(option) else: raise KeyError("Invalid 'values' key. Should be like values={'min': 1, 'max': 10, 'step': 1}") elif isinstance(values[0], dict) and "display_name" in values[0] and "value" in values[0]: # e.g. [ {"display_name": "Always", "value": "always"}, ... ] for value in values: assert "display_name" in value and "value" in value info['values'] = values else: # e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format info['values'] = [{"display_name": text_type(val), "value": val} for val in values] info['has_values'] = 'values' in info if info["type"] in ("list", "set") and field.runtime_options.get('list_values_provider'): list_values = field.runtime_options['list_values_provider'](self) # list_values must be a list of values or {"display_name": x, "value": y} objects # Furthermore, we need to convert all values to JSON since they could be of any type if list_values and isinstance(list_values[0], dict) and "display_name" in list_values[0]: # e.g. [ {"display_name": "Always", "value": "always"}, ... ] for entry in list_values: assert "display_name" in entry and "value" in entry entry["value"] = json.dumps(entry["value"]) else: # e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format list_values = [json.dumps(val) for val in list_values] list_values = [{"display_name": text_type(val), "value": val} for val in list_values] info['list_values'] = list_values info['has_list_values'] = True return info
python
def _make_field_info(self, field_name, field): """ Create the information that the template needs to render a form field for this field. """ supported_field_types = ( (Integer, 'integer'), (Float, 'float'), (Boolean, 'boolean'), (String, 'string'), (List, 'list'), (DateTime, 'datepicker'), (JSONField, 'generic'), # This is last so as a last resort we display a text field w/ the JSON string ) if self.service_declaration("i18n"): ugettext = self.ugettext else: def ugettext(text): """ Dummy ugettext method that doesn't do anything """ return text info = { 'name': field_name, 'display_name': ugettext(field.display_name) if field.display_name else "", 'is_set': field.is_set_on(self), 'default': field.default, 'value': field.read_from(self), 'has_values': False, 'help': ugettext(field.help) if field.help else "", 'allow_reset': field.runtime_options.get('resettable_editor', True), 'list_values': None, # Only available for List fields 'has_list_values': False, # True if list_values_provider exists, even if it returned no available options } for type_class, type_name in supported_field_types: if isinstance(field, type_class): info['type'] = type_name # If String fields are declared like String(..., multiline_editor=True), then call them "text" type: editor_type = field.runtime_options.get('multiline_editor') if type_class is String and editor_type: if editor_type == "html": info['type'] = 'html' else: info['type'] = 'text' if type_class is List and field.runtime_options.get('list_style') == "set": # List represents unordered, unique items, optionally drawn from list_values_provider() info['type'] = 'set' elif type_class is List: info['type'] = "generic" # disable other types of list for now until properly implemented break if "type" not in info: raise NotImplementedError("StudioEditableXBlockMixin currently only supports fields derived from JSONField") if info["type"] in ("list", "set"): info["value"] = [json.dumps(val) for val in info["value"]] info["default"] = json.dumps(info["default"]) elif info["type"] == "generic": # Convert value to JSON string if we're treating this field generically: info["value"] = json.dumps(info["value"]) info["default"] = json.dumps(info["default"]) elif info["type"] == "datepicker": if info["value"]: info["value"] = info["value"].strftime("%m/%d/%Y") if info["default"]: info["default"] = info["default"].strftime("%m/%d/%Y") if 'values_provider' in field.runtime_options: values = field.runtime_options["values_provider"](self) else: values = field.values if values and not isinstance(field, Boolean): # This field has only a limited number of pre-defined options. # Protip: when defining the field, values= can be a callable. if isinstance(field.values, dict) and isinstance(field, (Float, Integer)): # e.g. {"min": 0 , "max": 10, "step": .1} for option in field.values: if option in ("min", "max", "step"): info[option] = field.values.get(option) else: raise KeyError("Invalid 'values' key. Should be like values={'min': 1, 'max': 10, 'step': 1}") elif isinstance(values[0], dict) and "display_name" in values[0] and "value" in values[0]: # e.g. [ {"display_name": "Always", "value": "always"}, ... ] for value in values: assert "display_name" in value and "value" in value info['values'] = values else: # e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format info['values'] = [{"display_name": text_type(val), "value": val} for val in values] info['has_values'] = 'values' in info if info["type"] in ("list", "set") and field.runtime_options.get('list_values_provider'): list_values = field.runtime_options['list_values_provider'](self) # list_values must be a list of values or {"display_name": x, "value": y} objects # Furthermore, we need to convert all values to JSON since they could be of any type if list_values and isinstance(list_values[0], dict) and "display_name" in list_values[0]: # e.g. [ {"display_name": "Always", "value": "always"}, ... ] for entry in list_values: assert "display_name" in entry and "value" in entry entry["value"] = json.dumps(entry["value"]) else: # e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format list_values = [json.dumps(val) for val in list_values] list_values = [{"display_name": text_type(val), "value": val} for val in list_values] info['list_values'] = list_values info['has_list_values'] = True return info
['def', '_make_field_info', '(', 'self', ',', 'field_name', ',', 'field', ')', ':', 'supported_field_types', '=', '(', '(', 'Integer', ',', "'integer'", ')', ',', '(', 'Float', ',', "'float'", ')', ',', '(', 'Boolean', ',', "'boolean'", ')', ',', '(', 'String', ',', "'string'", ')', ',', '(', 'List', ',', "'list'", ')', ',', '(', 'DateTime', ',', "'datepicker'", ')', ',', '(', 'JSONField', ',', "'generic'", ')', ',', '# This is last so as a last resort we display a text field w/ the JSON string', ')', 'if', 'self', '.', 'service_declaration', '(', '"i18n"', ')', ':', 'ugettext', '=', 'self', '.', 'ugettext', 'else', ':', 'def', 'ugettext', '(', 'text', ')', ':', '""" Dummy ugettext method that doesn\'t do anything """', 'return', 'text', 'info', '=', '{', "'name'", ':', 'field_name', ',', "'display_name'", ':', 'ugettext', '(', 'field', '.', 'display_name', ')', 'if', 'field', '.', 'display_name', 'else', '""', ',', "'is_set'", ':', 'field', '.', 'is_set_on', '(', 'self', ')', ',', "'default'", ':', 'field', '.', 'default', ',', "'value'", ':', 'field', '.', 'read_from', '(', 'self', ')', ',', "'has_values'", ':', 'False', ',', "'help'", ':', 'ugettext', '(', 'field', '.', 'help', ')', 'if', 'field', '.', 'help', 'else', '""', ',', "'allow_reset'", ':', 'field', '.', 'runtime_options', '.', 'get', '(', "'resettable_editor'", ',', 'True', ')', ',', "'list_values'", ':', 'None', ',', '# Only available for List fields', "'has_list_values'", ':', 'False', ',', '# True if list_values_provider exists, even if it returned no available options', '}', 'for', 'type_class', ',', 'type_name', 'in', 'supported_field_types', ':', 'if', 'isinstance', '(', 'field', ',', 'type_class', ')', ':', 'info', '[', "'type'", ']', '=', 'type_name', '# If String fields are declared like String(..., multiline_editor=True), then call them "text" type:', 'editor_type', '=', 'field', '.', 'runtime_options', '.', 'get', '(', "'multiline_editor'", ')', 'if', 'type_class', 'is', 'String', 'and', 'editor_type', ':', 'if', 'editor_type', '==', '"html"', ':', 'info', '[', "'type'", ']', '=', "'html'", 'else', ':', 'info', '[', "'type'", ']', '=', "'text'", 'if', 'type_class', 'is', 'List', 'and', 'field', '.', 'runtime_options', '.', 'get', '(', "'list_style'", ')', '==', '"set"', ':', '# List represents unordered, unique items, optionally drawn from list_values_provider()', 'info', '[', "'type'", ']', '=', "'set'", 'elif', 'type_class', 'is', 'List', ':', 'info', '[', "'type'", ']', '=', '"generic"', '# disable other types of list for now until properly implemented', 'break', 'if', '"type"', 'not', 'in', 'info', ':', 'raise', 'NotImplementedError', '(', '"StudioEditableXBlockMixin currently only supports fields derived from JSONField"', ')', 'if', 'info', '[', '"type"', ']', 'in', '(', '"list"', ',', '"set"', ')', ':', 'info', '[', '"value"', ']', '=', '[', 'json', '.', 'dumps', '(', 'val', ')', 'for', 'val', 'in', 'info', '[', '"value"', ']', ']', 'info', '[', '"default"', ']', '=', 'json', '.', 'dumps', '(', 'info', '[', '"default"', ']', ')', 'elif', 'info', '[', '"type"', ']', '==', '"generic"', ':', "# Convert value to JSON string if we're treating this field generically:", 'info', '[', '"value"', ']', '=', 'json', '.', 'dumps', '(', 'info', '[', '"value"', ']', ')', 'info', '[', '"default"', ']', '=', 'json', '.', 'dumps', '(', 'info', '[', '"default"', ']', ')', 'elif', 'info', '[', '"type"', ']', '==', '"datepicker"', ':', 'if', 'info', '[', '"value"', ']', ':', 'info', '[', '"value"', ']', '=', 'info', '[', '"value"', ']', '.', 'strftime', '(', '"%m/%d/%Y"', ')', 'if', 'info', '[', '"default"', ']', ':', 'info', '[', '"default"', ']', '=', 'info', '[', '"default"', ']', '.', 'strftime', '(', '"%m/%d/%Y"', ')', 'if', "'values_provider'", 'in', 'field', '.', 'runtime_options', ':', 'values', '=', 'field', '.', 'runtime_options', '[', '"values_provider"', ']', '(', 'self', ')', 'else', ':', 'values', '=', 'field', '.', 'values', 'if', 'values', 'and', 'not', 'isinstance', '(', 'field', ',', 'Boolean', ')', ':', '# This field has only a limited number of pre-defined options.', '# Protip: when defining the field, values= can be a callable.', 'if', 'isinstance', '(', 'field', '.', 'values', ',', 'dict', ')', 'and', 'isinstance', '(', 'field', ',', '(', 'Float', ',', 'Integer', ')', ')', ':', '# e.g. {"min": 0 , "max": 10, "step": .1}', 'for', 'option', 'in', 'field', '.', 'values', ':', 'if', 'option', 'in', '(', '"min"', ',', '"max"', ',', '"step"', ')', ':', 'info', '[', 'option', ']', '=', 'field', '.', 'values', '.', 'get', '(', 'option', ')', 'else', ':', 'raise', 'KeyError', '(', '"Invalid \'values\' key. Should be like values={\'min\': 1, \'max\': 10, \'step\': 1}"', ')', 'elif', 'isinstance', '(', 'values', '[', '0', ']', ',', 'dict', ')', 'and', '"display_name"', 'in', 'values', '[', '0', ']', 'and', '"value"', 'in', 'values', '[', '0', ']', ':', '# e.g. [ {"display_name": "Always", "value": "always"}, ... ]', 'for', 'value', 'in', 'values', ':', 'assert', '"display_name"', 'in', 'value', 'and', '"value"', 'in', 'value', 'info', '[', "'values'", ']', '=', 'values', 'else', ':', '# e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format', 'info', '[', "'values'", ']', '=', '[', '{', '"display_name"', ':', 'text_type', '(', 'val', ')', ',', '"value"', ':', 'val', '}', 'for', 'val', 'in', 'values', ']', 'info', '[', "'has_values'", ']', '=', "'values'", 'in', 'info', 'if', 'info', '[', '"type"', ']', 'in', '(', '"list"', ',', '"set"', ')', 'and', 'field', '.', 'runtime_options', '.', 'get', '(', "'list_values_provider'", ')', ':', 'list_values', '=', 'field', '.', 'runtime_options', '[', "'list_values_provider'", ']', '(', 'self', ')', '# list_values must be a list of values or {"display_name": x, "value": y} objects', '# Furthermore, we need to convert all values to JSON since they could be of any type', 'if', 'list_values', 'and', 'isinstance', '(', 'list_values', '[', '0', ']', ',', 'dict', ')', 'and', '"display_name"', 'in', 'list_values', '[', '0', ']', ':', '# e.g. [ {"display_name": "Always", "value": "always"}, ... ]', 'for', 'entry', 'in', 'list_values', ':', 'assert', '"display_name"', 'in', 'entry', 'and', '"value"', 'in', 'entry', 'entry', '[', '"value"', ']', '=', 'json', '.', 'dumps', '(', 'entry', '[', '"value"', ']', ')', 'else', ':', '# e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format', 'list_values', '=', '[', 'json', '.', 'dumps', '(', 'val', ')', 'for', 'val', 'in', 'list_values', ']', 'list_values', '=', '[', '{', '"display_name"', ':', 'text_type', '(', 'val', ')', ',', '"value"', ':', 'val', '}', 'for', 'val', 'in', 'list_values', ']', 'info', '[', "'list_values'", ']', '=', 'list_values', 'info', '[', "'has_list_values'", ']', '=', 'True', 'return', 'info']
Create the information that the template needs to render a form field for this field.
['Create', 'the', 'information', 'that', 'the', 'template', 'needs', 'to', 'render', 'a', 'form', 'field', 'for', 'this', 'field', '.']
train
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L100-L202
9,709
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
Client.list_flavors
def list_flavors(self, retrieve_all=True, **_params): """Fetches a list of all Neutron service flavors for a project.""" return self.list('flavors', self.flavors_path, retrieve_all, **_params)
python
def list_flavors(self, retrieve_all=True, **_params): """Fetches a list of all Neutron service flavors for a project.""" return self.list('flavors', self.flavors_path, retrieve_all, **_params)
['def', 'list_flavors', '(', 'self', ',', 'retrieve_all', '=', 'True', ',', '*', '*', '_params', ')', ':', 'return', 'self', '.', 'list', '(', "'flavors'", ',', 'self', '.', 'flavors_path', ',', 'retrieve_all', ',', '*', '*', '_params', ')']
Fetches a list of all Neutron service flavors for a project.
['Fetches', 'a', 'list', 'of', 'all', 'Neutron', 'service', 'flavors', 'for', 'a', 'project', '.']
train
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L1643-L1646
9,710
theislab/scanpy
scanpy/queries/__init__.py
gene_coordinates
def gene_coordinates(host, org, gene, chr_exclude=[]) -> pd.DataFrame: """Retrieve gene coordinates for specific organism through BioMart. Parameters ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. Can be used to control genome build. org : {{'hsapiens', 'mmusculus', 'drerio'}} Organism to query. Currently available are human ('hsapiens'), mouse ('mmusculus') and zebrafish ('drerio'). gene : The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve coordinates. chr_exclude : A list of chromosomes to exclude from query. Returns ------- A `pd.DataFrame` containing gene coordinates for the specified gene symbol. """ try: from bioservices import biomart except ImportError: raise ImportError( 'You need to install the `bioservices` module.') from io import StringIO s = biomart.BioMart(host=host) # building query s.new_query() if org == 'hsapiens': s.add_dataset_to_xml('hsapiens_gene_ensembl') s.add_attribute_to_xml('hgnc_symbol') elif org == 'mmusculus': s.add_dataset_to_xml('mmusculus_gene_ensembl') s.add_attribute_to_xml('mgi_symbol') elif org == 'drerio': s.add_dataset_to_xml('drerio_gene_ensembl') s.add_attribute_to_xml('zfin_id_symbol') else: logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) return None s.add_attribute_to_xml('chromosome_name') s.add_attribute_to_xml('start_position') s.add_attribute_to_xml('end_position') xml = s.get_xml() # parsing gene coordinates res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None) res.columns = ['symbol', 'chromosome_name', 'start', 'end'] res = res.dropna() res = res[~res['chromosome_name'].isin(chr_exclude)] res = res.set_index('symbol') return res.loc[[gene], :]
python
def gene_coordinates(host, org, gene, chr_exclude=[]) -> pd.DataFrame: """Retrieve gene coordinates for specific organism through BioMart. Parameters ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. Can be used to control genome build. org : {{'hsapiens', 'mmusculus', 'drerio'}} Organism to query. Currently available are human ('hsapiens'), mouse ('mmusculus') and zebrafish ('drerio'). gene : The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve coordinates. chr_exclude : A list of chromosomes to exclude from query. Returns ------- A `pd.DataFrame` containing gene coordinates for the specified gene symbol. """ try: from bioservices import biomart except ImportError: raise ImportError( 'You need to install the `bioservices` module.') from io import StringIO s = biomart.BioMart(host=host) # building query s.new_query() if org == 'hsapiens': s.add_dataset_to_xml('hsapiens_gene_ensembl') s.add_attribute_to_xml('hgnc_symbol') elif org == 'mmusculus': s.add_dataset_to_xml('mmusculus_gene_ensembl') s.add_attribute_to_xml('mgi_symbol') elif org == 'drerio': s.add_dataset_to_xml('drerio_gene_ensembl') s.add_attribute_to_xml('zfin_id_symbol') else: logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) return None s.add_attribute_to_xml('chromosome_name') s.add_attribute_to_xml('start_position') s.add_attribute_to_xml('end_position') xml = s.get_xml() # parsing gene coordinates res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None) res.columns = ['symbol', 'chromosome_name', 'start', 'end'] res = res.dropna() res = res[~res['chromosome_name'].isin(chr_exclude)] res = res.set_index('symbol') return res.loc[[gene], :]
['def', 'gene_coordinates', '(', 'host', ',', 'org', ',', 'gene', ',', 'chr_exclude', '=', '[', ']', ')', '->', 'pd', '.', 'DataFrame', ':', 'try', ':', 'from', 'bioservices', 'import', 'biomart', 'except', 'ImportError', ':', 'raise', 'ImportError', '(', "'You need to install the `bioservices` module.'", ')', 'from', 'io', 'import', 'StringIO', 's', '=', 'biomart', '.', 'BioMart', '(', 'host', '=', 'host', ')', '# building query', 's', '.', 'new_query', '(', ')', 'if', 'org', '==', "'hsapiens'", ':', 's', '.', 'add_dataset_to_xml', '(', "'hsapiens_gene_ensembl'", ')', 's', '.', 'add_attribute_to_xml', '(', "'hgnc_symbol'", ')', 'elif', 'org', '==', "'mmusculus'", ':', 's', '.', 'add_dataset_to_xml', '(', "'mmusculus_gene_ensembl'", ')', 's', '.', 'add_attribute_to_xml', '(', "'mgi_symbol'", ')', 'elif', 'org', '==', "'drerio'", ':', 's', '.', 'add_dataset_to_xml', '(', "'drerio_gene_ensembl'", ')', 's', '.', 'add_attribute_to_xml', '(', "'zfin_id_symbol'", ')', 'else', ':', 'logg', '.', 'msg', '(', "'organism '", ',', 'str', '(', 'org', ')', ',', "' is unavailable'", ',', 'v', '=', '4', ',', 'no_indent', '=', 'True', ')', 'return', 'None', 's', '.', 'add_attribute_to_xml', '(', "'chromosome_name'", ')', 's', '.', 'add_attribute_to_xml', '(', "'start_position'", ')', 's', '.', 'add_attribute_to_xml', '(', "'end_position'", ')', 'xml', '=', 's', '.', 'get_xml', '(', ')', '# parsing gene coordinates', 'res', '=', 'pd', '.', 'read_csv', '(', 'StringIO', '(', 's', '.', 'query', '(', 'xml', ')', ')', ',', 'sep', '=', "'\\t'", ',', 'header', '=', 'None', ')', 'res', '.', 'columns', '=', '[', "'symbol'", ',', "'chromosome_name'", ',', "'start'", ',', "'end'", ']', 'res', '=', 'res', '.', 'dropna', '(', ')', 'res', '=', 'res', '[', '~', 'res', '[', "'chromosome_name'", ']', '.', 'isin', '(', 'chr_exclude', ')', ']', 'res', '=', 'res', '.', 'set_index', '(', "'symbol'", ')', 'return', 'res', '.', 'loc', '[', '[', 'gene', ']', ',', ':', ']']
Retrieve gene coordinates for specific organism through BioMart. Parameters ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. Can be used to control genome build. org : {{'hsapiens', 'mmusculus', 'drerio'}} Organism to query. Currently available are human ('hsapiens'), mouse ('mmusculus') and zebrafish ('drerio'). gene : The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve coordinates. chr_exclude : A list of chromosomes to exclude from query. Returns ------- A `pd.DataFrame` containing gene coordinates for the specified gene symbol.
['Retrieve', 'gene', 'coordinates', 'for', 'specific', 'organism', 'through', 'BioMart', '.', 'Parameters', '----------', 'host', ':', '{{', 'www', '.', 'ensembl', '.', 'org', '...', '}}', 'A', 'valid', 'BioMart', 'host', 'URL', '.', 'Can', 'be', 'used', 'to', 'control', 'genome', 'build', '.', 'org', ':', '{{', 'hsapiens', 'mmusculus', 'drerio', '}}', 'Organism', 'to', 'query', '.', 'Currently', 'available', 'are', 'human', '(', 'hsapiens', ')', 'mouse', '(', 'mmusculus', ')', 'and', 'zebrafish', '(', 'drerio', ')', '.', 'gene', ':', 'The', 'gene', 'symbol', '(', 'e', '.', 'g', '.', 'hgnc_symbol', 'for', 'human', ')', 'for', 'which', 'to', 'retrieve', 'coordinates', '.', 'chr_exclude', ':', 'A', 'list', 'of', 'chromosomes', 'to', 'exclude', 'from', 'query', '.', 'Returns', '-------', 'A', 'pd', '.', 'DataFrame', 'containing', 'gene', 'coordinates', 'for', 'the', 'specified', 'gene', 'symbol', '.']
train
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/queries/__init__.py#L56-L108
9,711
moonlitesolutions/SolrClient
SolrClient/zk.py
ZK.copy_config
def copy_config(self, original, new): ''' Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config. ''' if not self.kz.exists('/configs/{}'.format(original)): raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {}".format(self.kz.get_children('/configs'))) base = '/configs/{}'.format(original) nbase = '/configs/{}'.format(new) self._copy_dir(base, nbase)
python
def copy_config(self, original, new): ''' Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config. ''' if not self.kz.exists('/configs/{}'.format(original)): raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {}".format(self.kz.get_children('/configs'))) base = '/configs/{}'.format(original) nbase = '/configs/{}'.format(new) self._copy_dir(base, nbase)
['def', 'copy_config', '(', 'self', ',', 'original', ',', 'new', ')', ':', 'if', 'not', 'self', '.', 'kz', '.', 'exists', '(', "'/configs/{}'", '.', 'format', '(', 'original', ')', ')', ':', 'raise', 'ZookeeperError', '(', '"Collection doesn\'t exist in Zookeeper. Current Collections are: {}"', '.', 'format', '(', 'self', '.', 'kz', '.', 'get_children', '(', "'/configs'", ')', ')', ')', 'base', '=', "'/configs/{}'", '.', 'format', '(', 'original', ')', 'nbase', '=', "'/configs/{}'", '.', 'format', '(', 'new', ')', 'self', '.', '_copy_dir', '(', 'base', ',', 'nbase', ')']
Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config.
['Copies', 'collection', 'configs', 'into', 'a', 'new', 'folder', '.', 'Can', 'be', 'used', 'to', 'create', 'new', 'collections', 'based', 'on', 'existing', 'configs', '.']
train
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/zk.py#L98-L111
9,712
insightindustry/validator-collection
validator_collection/validators.py
email
def email(value, allow_empty = False, **kwargs): """Validate that ``value`` is a valid email address. .. note:: Email address validation is...complicated. The methodology that we have adopted here is *generally* compliant with `RFC 5322 <https://tools.ietf.org/html/rfc5322>`_ and uses a combination of string parsing and regular expressions. String parsing in particular is used to validate certain *highly unusual* but still valid email patterns, including the use of escaped text and comments within an email address' local address (the user name part). This approach ensures more complete coverage for unusual edge cases, while still letting us use regular expressions that perform quickly. :param value: The value to validate. :type value: :class:`str <python:str>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`str <python:str>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or :obj:`None <python:None>` :raises InvalidEmailError: if ``value`` is not a valid email address or empty with ``allow_empty`` set to ``True`` """ # pylint: disable=too-many-branches,too-many-statements,R0914 if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if not isinstance(value, basestring): raise errors.CannotCoerceError('value must be a valid string, ' 'was %s' % type(value)) if '@' not in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) if '(' in value and ')' in value: open_parentheses = value.find('(') close_parentheses = value.find(')') + 1 if close_parentheses < open_parentheses: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) commented_value = value[open_parentheses:close_parentheses] value = value.replace(commented_value, '') elif '(' in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) elif ')' in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) if '<' in value or '>' in value: lt_position = value.find('<') gt_position = value.find('>') first_quote_position = -1 second_quote_position = -1 if lt_position >= 0: first_quote_position = value.find('"', 0, lt_position) if gt_position >= 0: second_quote_position = value.find('"', gt_position) if first_quote_position < 0 or second_quote_position < 0: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) at_count = value.count('@') if at_count > 1: last_at_position = 0 last_quote_position = 0 for x in range(0, at_count): # pylint: disable=W0612 at_position = value.find('@', last_at_position + 1) if at_position >= 0: first_quote_position = value.find('"', last_quote_position, at_position) second_quote_position = value.find('"', first_quote_position) if first_quote_position < 0 or second_quote_position < 0: raise errors.InvalidEmailError( 'value (%s) is not a valid email address' % value ) last_at_position = at_position last_quote_position = second_quote_position split_values = value.split('@') if len(split_values) < 2: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) local_value = ''.join(split_values[:-1]) domain_value = split_values[-1] is_domain = False is_ip = False try: if domain_value.startswith('[') and domain_value.endswith(']'): domain_value = domain_value[1:-1] domain(domain_value) is_domain = True except ValueError: is_domain = False if not is_domain: try: ip_address(domain_value, force_run = True) # pylint: disable=E1123 is_ip = True except ValueError: is_ip = False if not is_domain and is_ip: try: email(local_value + '@test.com', force_run = True) # pylint: disable=E1123 except ValueError: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) return value if not is_domain: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) else: is_valid = EMAIL_REGEX.search(value) if not is_valid: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) matched_string = is_valid.group(0) position = value.find(matched_string) if position > 0: prefix = value[:position] if prefix[0] in string_.punctuation: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) if '..' in prefix: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) end_of_match = position + len(matched_string) suffix = value[end_of_match:] if suffix: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) return value
python
def email(value, allow_empty = False, **kwargs): """Validate that ``value`` is a valid email address. .. note:: Email address validation is...complicated. The methodology that we have adopted here is *generally* compliant with `RFC 5322 <https://tools.ietf.org/html/rfc5322>`_ and uses a combination of string parsing and regular expressions. String parsing in particular is used to validate certain *highly unusual* but still valid email patterns, including the use of escaped text and comments within an email address' local address (the user name part). This approach ensures more complete coverage for unusual edge cases, while still letting us use regular expressions that perform quickly. :param value: The value to validate. :type value: :class:`str <python:str>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`str <python:str>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or :obj:`None <python:None>` :raises InvalidEmailError: if ``value`` is not a valid email address or empty with ``allow_empty`` set to ``True`` """ # pylint: disable=too-many-branches,too-many-statements,R0914 if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if not isinstance(value, basestring): raise errors.CannotCoerceError('value must be a valid string, ' 'was %s' % type(value)) if '@' not in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) if '(' in value and ')' in value: open_parentheses = value.find('(') close_parentheses = value.find(')') + 1 if close_parentheses < open_parentheses: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) commented_value = value[open_parentheses:close_parentheses] value = value.replace(commented_value, '') elif '(' in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) elif ')' in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) if '<' in value or '>' in value: lt_position = value.find('<') gt_position = value.find('>') first_quote_position = -1 second_quote_position = -1 if lt_position >= 0: first_quote_position = value.find('"', 0, lt_position) if gt_position >= 0: second_quote_position = value.find('"', gt_position) if first_quote_position < 0 or second_quote_position < 0: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) at_count = value.count('@') if at_count > 1: last_at_position = 0 last_quote_position = 0 for x in range(0, at_count): # pylint: disable=W0612 at_position = value.find('@', last_at_position + 1) if at_position >= 0: first_quote_position = value.find('"', last_quote_position, at_position) second_quote_position = value.find('"', first_quote_position) if first_quote_position < 0 or second_quote_position < 0: raise errors.InvalidEmailError( 'value (%s) is not a valid email address' % value ) last_at_position = at_position last_quote_position = second_quote_position split_values = value.split('@') if len(split_values) < 2: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) local_value = ''.join(split_values[:-1]) domain_value = split_values[-1] is_domain = False is_ip = False try: if domain_value.startswith('[') and domain_value.endswith(']'): domain_value = domain_value[1:-1] domain(domain_value) is_domain = True except ValueError: is_domain = False if not is_domain: try: ip_address(domain_value, force_run = True) # pylint: disable=E1123 is_ip = True except ValueError: is_ip = False if not is_domain and is_ip: try: email(local_value + '@test.com', force_run = True) # pylint: disable=E1123 except ValueError: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) return value if not is_domain: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) else: is_valid = EMAIL_REGEX.search(value) if not is_valid: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) matched_string = is_valid.group(0) position = value.find(matched_string) if position > 0: prefix = value[:position] if prefix[0] in string_.punctuation: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) if '..' in prefix: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) end_of_match = position + len(matched_string) suffix = value[end_of_match:] if suffix: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) return value
['def', 'email', '(', 'value', ',', 'allow_empty', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', '# pylint: disable=too-many-branches,too-many-statements,R0914', 'if', 'not', 'value', 'and', 'not', 'allow_empty', ':', 'raise', 'errors', '.', 'EmptyValueError', '(', "'value (%s) was empty'", '%', 'value', ')', 'elif', 'not', 'value', ':', 'return', 'None', 'if', 'not', 'isinstance', '(', 'value', ',', 'basestring', ')', ':', 'raise', 'errors', '.', 'CannotCoerceError', '(', "'value must be a valid string, '", "'was %s'", '%', 'type', '(', 'value', ')', ')', 'if', "'@'", 'not', 'in', 'value', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email address'", '%', 'value', ')', 'if', "'('", 'in', 'value', 'and', "')'", 'in', 'value', ':', 'open_parentheses', '=', 'value', '.', 'find', '(', "'('", ')', 'close_parentheses', '=', 'value', '.', 'find', '(', "')'", ')', '+', '1', 'if', 'close_parentheses', '<', 'open_parentheses', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email '", "'address'", '%', 'value', ')', 'commented_value', '=', 'value', '[', 'open_parentheses', ':', 'close_parentheses', ']', 'value', '=', 'value', '.', 'replace', '(', 'commented_value', ',', "''", ')', 'elif', "'('", 'in', 'value', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email address'", '%', 'value', ')', 'elif', "')'", 'in', 'value', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email address'", '%', 'value', ')', 'if', "'<'", 'in', 'value', 'or', "'>'", 'in', 'value', ':', 'lt_position', '=', 'value', '.', 'find', '(', "'<'", ')', 'gt_position', '=', 'value', '.', 'find', '(', "'>'", ')', 'first_quote_position', '=', '-', '1', 'second_quote_position', '=', '-', '1', 'if', 'lt_position', '>=', '0', ':', 'first_quote_position', '=', 'value', '.', 'find', '(', '\'"\'', ',', '0', ',', 'lt_position', ')', 'if', 'gt_position', '>=', '0', ':', 'second_quote_position', '=', 'value', '.', 'find', '(', '\'"\'', ',', 'gt_position', ')', 'if', 'first_quote_position', '<', '0', 'or', 'second_quote_position', '<', '0', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email '", "'address'", '%', 'value', ')', 'at_count', '=', 'value', '.', 'count', '(', "'@'", ')', 'if', 'at_count', '>', '1', ':', 'last_at_position', '=', '0', 'last_quote_position', '=', '0', 'for', 'x', 'in', 'range', '(', '0', ',', 'at_count', ')', ':', '# pylint: disable=W0612', 'at_position', '=', 'value', '.', 'find', '(', "'@'", ',', 'last_at_position', '+', '1', ')', 'if', 'at_position', '>=', '0', ':', 'first_quote_position', '=', 'value', '.', 'find', '(', '\'"\'', ',', 'last_quote_position', ',', 'at_position', ')', 'second_quote_position', '=', 'value', '.', 'find', '(', '\'"\'', ',', 'first_quote_position', ')', 'if', 'first_quote_position', '<', '0', 'or', 'second_quote_position', '<', '0', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email address'", '%', 'value', ')', 'last_at_position', '=', 'at_position', 'last_quote_position', '=', 'second_quote_position', 'split_values', '=', 'value', '.', 'split', '(', "'@'", ')', 'if', 'len', '(', 'split_values', ')', '<', '2', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email address'", '%', 'value', ')', 'local_value', '=', "''", '.', 'join', '(', 'split_values', '[', ':', '-', '1', ']', ')', 'domain_value', '=', 'split_values', '[', '-', '1', ']', 'is_domain', '=', 'False', 'is_ip', '=', 'False', 'try', ':', 'if', 'domain_value', '.', 'startswith', '(', "'['", ')', 'and', 'domain_value', '.', 'endswith', '(', "']'", ')', ':', 'domain_value', '=', 'domain_value', '[', '1', ':', '-', '1', ']', 'domain', '(', 'domain_value', ')', 'is_domain', '=', 'True', 'except', 'ValueError', ':', 'is_domain', '=', 'False', 'if', 'not', 'is_domain', ':', 'try', ':', 'ip_address', '(', 'domain_value', ',', 'force_run', '=', 'True', ')', '# pylint: disable=E1123', 'is_ip', '=', 'True', 'except', 'ValueError', ':', 'is_ip', '=', 'False', 'if', 'not', 'is_domain', 'and', 'is_ip', ':', 'try', ':', 'email', '(', 'local_value', '+', "'@test.com'", ',', 'force_run', '=', 'True', ')', '# pylint: disable=E1123', 'except', 'ValueError', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email '", "'address'", '%', 'value', ')', 'return', 'value', 'if', 'not', 'is_domain', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email address'", '%', 'value', ')', 'else', ':', 'is_valid', '=', 'EMAIL_REGEX', '.', 'search', '(', 'value', ')', 'if', 'not', 'is_valid', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email '", "'address'", '%', 'value', ')', 'matched_string', '=', 'is_valid', '.', 'group', '(', '0', ')', 'position', '=', 'value', '.', 'find', '(', 'matched_string', ')', 'if', 'position', '>', '0', ':', 'prefix', '=', 'value', '[', ':', 'position', ']', 'if', 'prefix', '[', '0', ']', 'in', 'string_', '.', 'punctuation', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email '", "'address'", '%', 'value', ')', 'if', "'..'", 'in', 'prefix', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email '", "'address'", '%', 'value', ')', 'end_of_match', '=', 'position', '+', 'len', '(', 'matched_string', ')', 'suffix', '=', 'value', '[', 'end_of_match', ':', ']', 'if', 'suffix', ':', 'raise', 'errors', '.', 'InvalidEmailError', '(', "'value (%s) is not a valid email '", "'address'", '%', 'value', ')', 'return', 'value']
Validate that ``value`` is a valid email address. .. note:: Email address validation is...complicated. The methodology that we have adopted here is *generally* compliant with `RFC 5322 <https://tools.ietf.org/html/rfc5322>`_ and uses a combination of string parsing and regular expressions. String parsing in particular is used to validate certain *highly unusual* but still valid email patterns, including the use of escaped text and comments within an email address' local address (the user name part). This approach ensures more complete coverage for unusual edge cases, while still letting us use regular expressions that perform quickly. :param value: The value to validate. :type value: :class:`str <python:str>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`str <python:str>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or :obj:`None <python:None>` :raises InvalidEmailError: if ``value`` is not a valid email address or empty with ``allow_empty`` set to ``True``
['Validate', 'that', 'value', 'is', 'a', 'valid', 'email', 'address', '.']
train
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L2055-L2212
9,713
datastore/datastore
datastore/core/basic.py
SymlinkDatastore._follow_link
def _follow_link(self, value): '''Returns given `value` or, if it is a symlink, the `value` it names.''' seen_keys = set() while True: link_key = self._link_for_value(value) if not link_key: return value assert link_key not in seen_keys, 'circular symlink reference' seen_keys.add(link_key) value = super(SymlinkDatastore, self).get(link_key)
python
def _follow_link(self, value): '''Returns given `value` or, if it is a symlink, the `value` it names.''' seen_keys = set() while True: link_key = self._link_for_value(value) if not link_key: return value assert link_key not in seen_keys, 'circular symlink reference' seen_keys.add(link_key) value = super(SymlinkDatastore, self).get(link_key)
['def', '_follow_link', '(', 'self', ',', 'value', ')', ':', 'seen_keys', '=', 'set', '(', ')', 'while', 'True', ':', 'link_key', '=', 'self', '.', '_link_for_value', '(', 'value', ')', 'if', 'not', 'link_key', ':', 'return', 'value', 'assert', 'link_key', 'not', 'in', 'seen_keys', ',', "'circular symlink reference'", 'seen_keys', '.', 'add', '(', 'link_key', ')', 'value', '=', 'super', '(', 'SymlinkDatastore', ',', 'self', ')', '.', 'get', '(', 'link_key', ')']
Returns given `value` or, if it is a symlink, the `value` it names.
['Returns', 'given', 'value', 'or', 'if', 'it', 'is', 'a', 'symlink', 'the', 'value', 'it', 'names', '.']
train
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L739-L749
9,714
python-cmd2/cmd2
cmd2/utils.py
quote_string_if_needed
def quote_string_if_needed(arg: str) -> str: """ Quotes a string if it contains spaces and isn't already quoted """ if is_quoted(arg) or ' ' not in arg: return arg if '"' in arg: quote = "'" else: quote = '"' return quote + arg + quote
python
def quote_string_if_needed(arg: str) -> str: """ Quotes a string if it contains spaces and isn't already quoted """ if is_quoted(arg) or ' ' not in arg: return arg if '"' in arg: quote = "'" else: quote = '"' return quote + arg + quote
['def', 'quote_string_if_needed', '(', 'arg', ':', 'str', ')', '->', 'str', ':', 'if', 'is_quoted', '(', 'arg', ')', 'or', "' '", 'not', 'in', 'arg', ':', 'return', 'arg', 'if', '\'"\'', 'in', 'arg', ':', 'quote', '=', '"\'"', 'else', ':', 'quote', '=', '\'"\'', 'return', 'quote', '+', 'arg', '+', 'quote']
Quotes a string if it contains spaces and isn't already quoted
['Quotes', 'a', 'string', 'if', 'it', 'contains', 'spaces', 'and', 'isn', 't', 'already', 'quoted']
train
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/utils.py#L46-L56
9,715
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/dvipdf.py
PDFEmitter
def PDFEmitter(target, source, env): """Strips any .aux or .log files from the input source list. These are created by the TeX Builder that in all likelihood was used to generate the .dvi file we're using as input, and we only care about the .dvi file. """ def strip_suffixes(n): return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log'] source = [src for src in source if strip_suffixes(src)] return (target, source)
python
def PDFEmitter(target, source, env): """Strips any .aux or .log files from the input source list. These are created by the TeX Builder that in all likelihood was used to generate the .dvi file we're using as input, and we only care about the .dvi file. """ def strip_suffixes(n): return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log'] source = [src for src in source if strip_suffixes(src)] return (target, source)
['def', 'PDFEmitter', '(', 'target', ',', 'source', ',', 'env', ')', ':', 'def', 'strip_suffixes', '(', 'n', ')', ':', 'return', 'not', 'SCons', '.', 'Util', '.', 'splitext', '(', 'str', '(', 'n', ')', ')', '[', '1', ']', 'in', '[', "'.aux'", ',', "'.log'", ']', 'source', '=', '[', 'src', 'for', 'src', 'in', 'source', 'if', 'strip_suffixes', '(', 'src', ')', ']', 'return', '(', 'target', ',', 'source', ')']
Strips any .aux or .log files from the input source list. These are created by the TeX Builder that in all likelihood was used to generate the .dvi file we're using as input, and we only care about the .dvi file.
['Strips', 'any', '.', 'aux', 'or', '.', 'log', 'files', 'from', 'the', 'input', 'source', 'list', '.', 'These', 'are', 'created', 'by', 'the', 'TeX', 'Builder', 'that', 'in', 'all', 'likelihood', 'was', 'used', 'to', 'generate', 'the', '.', 'dvi', 'file', 'we', 're', 'using', 'as', 'input', 'and', 'we', 'only', 'care', 'about', 'the', '.', 'dvi', 'file', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/dvipdf.py#L82-L91
9,716
persephone-tools/persephone
persephone/datasets/bkw.py
pull_en_words
def pull_en_words() -> None: """ Fetches a repository containing English words. """ ENGLISH_WORDS_URL = "https://github.com/dwyl/english-words.git" en_words_path = Path(config.EN_WORDS_PATH) if not en_words_path.is_file(): subprocess.run(["git", "clone", ENGLISH_WORDS_URL, str(en_words_path.parent)])
python
def pull_en_words() -> None: """ Fetches a repository containing English words. """ ENGLISH_WORDS_URL = "https://github.com/dwyl/english-words.git" en_words_path = Path(config.EN_WORDS_PATH) if not en_words_path.is_file(): subprocess.run(["git", "clone", ENGLISH_WORDS_URL, str(en_words_path.parent)])
['def', 'pull_en_words', '(', ')', '->', 'None', ':', 'ENGLISH_WORDS_URL', '=', '"https://github.com/dwyl/english-words.git"', 'en_words_path', '=', 'Path', '(', 'config', '.', 'EN_WORDS_PATH', ')', 'if', 'not', 'en_words_path', '.', 'is_file', '(', ')', ':', 'subprocess', '.', 'run', '(', '[', '"git"', ',', '"clone"', ',', 'ENGLISH_WORDS_URL', ',', 'str', '(', 'en_words_path', '.', 'parent', ')', ']', ')']
Fetches a repository containing English words.
['Fetches', 'a', 'repository', 'containing', 'English', 'words', '.']
train
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/bkw.py#L27-L34
9,717
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/fmt_hierarchy.py
map_indices_parent2child
def map_indices_parent2child(child, parent_indices): """Map parent RTDCBase event indices to RTDC_Hierarchy Parameters ---------- parent: RTDC_Hierarchy hierarchy child parent_indices: 1d ndarray hierarchy parent (`child.hparent`) indices to map Returns ------- child_indices: 1d ndarray child indices """ parent = child.hparent # filters pf = parent.filter.all # indices in child child_indices = [] count = 0 for ii in range(len(pf)): if pf[ii]: # only append indices if they exist in child if ii in parent_indices: # current child event count is the child index child_indices.append(count) # increment child event count count += 1 return np.array(child_indices)
python
def map_indices_parent2child(child, parent_indices): """Map parent RTDCBase event indices to RTDC_Hierarchy Parameters ---------- parent: RTDC_Hierarchy hierarchy child parent_indices: 1d ndarray hierarchy parent (`child.hparent`) indices to map Returns ------- child_indices: 1d ndarray child indices """ parent = child.hparent # filters pf = parent.filter.all # indices in child child_indices = [] count = 0 for ii in range(len(pf)): if pf[ii]: # only append indices if they exist in child if ii in parent_indices: # current child event count is the child index child_indices.append(count) # increment child event count count += 1 return np.array(child_indices)
['def', 'map_indices_parent2child', '(', 'child', ',', 'parent_indices', ')', ':', 'parent', '=', 'child', '.', 'hparent', '# filters', 'pf', '=', 'parent', '.', 'filter', '.', 'all', '# indices in child', 'child_indices', '=', '[', ']', 'count', '=', '0', 'for', 'ii', 'in', 'range', '(', 'len', '(', 'pf', ')', ')', ':', 'if', 'pf', '[', 'ii', ']', ':', '# only append indices if they exist in child', 'if', 'ii', 'in', 'parent_indices', ':', '# current child event count is the child index', 'child_indices', '.', 'append', '(', 'count', ')', '# increment child event count', 'count', '+=', '1', 'return', 'np', '.', 'array', '(', 'child_indices', ')']
Map parent RTDCBase event indices to RTDC_Hierarchy Parameters ---------- parent: RTDC_Hierarchy hierarchy child parent_indices: 1d ndarray hierarchy parent (`child.hparent`) indices to map Returns ------- child_indices: 1d ndarray child indices
['Map', 'parent', 'RTDCBase', 'event', 'indices', 'to', 'RTDC_Hierarchy']
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L362-L392
9,718
blockstack/blockstack-core
blockstack/lib/atlas.py
atlasdb_init
def atlasdb_init( path, zonefile_dir, db, peer_seeds, peer_blacklist, recover=False, validate=False): """ Set up the atlas node: * create the db if it doesn't exist * go through all the names and verify that we have the *current* zonefiles * if we don't, queue them for fetching. * set up the peer db @db should be an instance of BlockstackDB @initial_peers should be a list of URLs Return the newly-initialized peer table """ global ATLASDB_SQL peer_table = {} if os.path.exists( path ): log.debug("Atlas DB exists at %s" % path) con = atlasdb_open( path ) atlasdb_last_block = atlasdb_get_lastblock( con=con, path=path ) if atlasdb_last_block is None: atlasdb_last_block = FIRST_BLOCK_MAINNET log.debug("Synchronize zonefiles from %s to %s" % (atlasdb_last_block, db.lastblock) ) atlasdb_queue_zonefiles( con, db, atlasdb_last_block, zonefile_dir, recover=recover, validate=validate) log.debug("Refreshing seed peers") for peer in peer_seeds: # forcibly add seed peers atlasdb_add_peer( peer, con=con, peer_table=peer_table, ping_on_evict=False ) # re-try fetching zonefiles from storage if we don't have them yet atlasdb_reset_zonefile_tried_storage( con=con, path=path ) # load up peer table from the db log.debug("Loading peer table") peer_table = atlasdb_load_peer_table( con=con, path=path ) # cache zonefile inventory and count atlasdb_cache_zonefile_info( con=con ) con.close() else: log.debug("Initializing Atlas DB at %s" % path) lines = [l + ";" for l in ATLASDB_SQL.split(";")] con = sqlite3.connect( path, isolation_level=None ) for line in lines: db_query_execute(con, line, ()) con.row_factory = atlasdb_row_factory # populate from db log.debug("Queuing all zonefiles") atlasdb_queue_zonefiles( con, db, FIRST_BLOCK_MAINNET, zonefile_dir, recover=recover, validate=validate) log.debug("Adding seed peers") for peer in peer_seeds: atlasdb_add_peer( peer, con=con, peer_table=peer_table ) atlasdb_cache_zonefile_info( con=con ) con.close() log.debug("peer_table: {}".format(peer_table.keys())) # whitelist and blacklist for peer_url in peer_seeds: host, port = url_to_host_port( peer_url ) peer_hostport = "%s:%s" % (host, port) if peer_hostport not in peer_table.keys(): atlasdb_add_peer( peer_hostport, path=path, peer_table=peer_table ) log.debug("peer_table: {}".format(peer_table.keys())) peer_table[peer_hostport]['whitelisted'] = True for peer_url in peer_blacklist: host, port = url_to_host_port( peer_url ) peer_hostport = "%s:%s" % (host, port) if peer_hostport not in peer_table.keys(): atlasdb_add_peer( peer_hostport, path=path, peer_table=peer_table ) log.debug("peer_table: {}".format(peer_table.keys())) peer_table[peer_hostport]['blacklisted'] = True return peer_table
python
def atlasdb_init( path, zonefile_dir, db, peer_seeds, peer_blacklist, recover=False, validate=False): """ Set up the atlas node: * create the db if it doesn't exist * go through all the names and verify that we have the *current* zonefiles * if we don't, queue them for fetching. * set up the peer db @db should be an instance of BlockstackDB @initial_peers should be a list of URLs Return the newly-initialized peer table """ global ATLASDB_SQL peer_table = {} if os.path.exists( path ): log.debug("Atlas DB exists at %s" % path) con = atlasdb_open( path ) atlasdb_last_block = atlasdb_get_lastblock( con=con, path=path ) if atlasdb_last_block is None: atlasdb_last_block = FIRST_BLOCK_MAINNET log.debug("Synchronize zonefiles from %s to %s" % (atlasdb_last_block, db.lastblock) ) atlasdb_queue_zonefiles( con, db, atlasdb_last_block, zonefile_dir, recover=recover, validate=validate) log.debug("Refreshing seed peers") for peer in peer_seeds: # forcibly add seed peers atlasdb_add_peer( peer, con=con, peer_table=peer_table, ping_on_evict=False ) # re-try fetching zonefiles from storage if we don't have them yet atlasdb_reset_zonefile_tried_storage( con=con, path=path ) # load up peer table from the db log.debug("Loading peer table") peer_table = atlasdb_load_peer_table( con=con, path=path ) # cache zonefile inventory and count atlasdb_cache_zonefile_info( con=con ) con.close() else: log.debug("Initializing Atlas DB at %s" % path) lines = [l + ";" for l in ATLASDB_SQL.split(";")] con = sqlite3.connect( path, isolation_level=None ) for line in lines: db_query_execute(con, line, ()) con.row_factory = atlasdb_row_factory # populate from db log.debug("Queuing all zonefiles") atlasdb_queue_zonefiles( con, db, FIRST_BLOCK_MAINNET, zonefile_dir, recover=recover, validate=validate) log.debug("Adding seed peers") for peer in peer_seeds: atlasdb_add_peer( peer, con=con, peer_table=peer_table ) atlasdb_cache_zonefile_info( con=con ) con.close() log.debug("peer_table: {}".format(peer_table.keys())) # whitelist and blacklist for peer_url in peer_seeds: host, port = url_to_host_port( peer_url ) peer_hostport = "%s:%s" % (host, port) if peer_hostport not in peer_table.keys(): atlasdb_add_peer( peer_hostport, path=path, peer_table=peer_table ) log.debug("peer_table: {}".format(peer_table.keys())) peer_table[peer_hostport]['whitelisted'] = True for peer_url in peer_blacklist: host, port = url_to_host_port( peer_url ) peer_hostport = "%s:%s" % (host, port) if peer_hostport not in peer_table.keys(): atlasdb_add_peer( peer_hostport, path=path, peer_table=peer_table ) log.debug("peer_table: {}".format(peer_table.keys())) peer_table[peer_hostport]['blacklisted'] = True return peer_table
['def', 'atlasdb_init', '(', 'path', ',', 'zonefile_dir', ',', 'db', ',', 'peer_seeds', ',', 'peer_blacklist', ',', 'recover', '=', 'False', ',', 'validate', '=', 'False', ')', ':', 'global', 'ATLASDB_SQL', 'peer_table', '=', '{', '}', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'log', '.', 'debug', '(', '"Atlas DB exists at %s"', '%', 'path', ')', 'con', '=', 'atlasdb_open', '(', 'path', ')', 'atlasdb_last_block', '=', 'atlasdb_get_lastblock', '(', 'con', '=', 'con', ',', 'path', '=', 'path', ')', 'if', 'atlasdb_last_block', 'is', 'None', ':', 'atlasdb_last_block', '=', 'FIRST_BLOCK_MAINNET', 'log', '.', 'debug', '(', '"Synchronize zonefiles from %s to %s"', '%', '(', 'atlasdb_last_block', ',', 'db', '.', 'lastblock', ')', ')', 'atlasdb_queue_zonefiles', '(', 'con', ',', 'db', ',', 'atlasdb_last_block', ',', 'zonefile_dir', ',', 'recover', '=', 'recover', ',', 'validate', '=', 'validate', ')', 'log', '.', 'debug', '(', '"Refreshing seed peers"', ')', 'for', 'peer', 'in', 'peer_seeds', ':', '# forcibly add seed peers', 'atlasdb_add_peer', '(', 'peer', ',', 'con', '=', 'con', ',', 'peer_table', '=', 'peer_table', ',', 'ping_on_evict', '=', 'False', ')', "# re-try fetching zonefiles from storage if we don't have them yet", 'atlasdb_reset_zonefile_tried_storage', '(', 'con', '=', 'con', ',', 'path', '=', 'path', ')', '# load up peer table from the db', 'log', '.', 'debug', '(', '"Loading peer table"', ')', 'peer_table', '=', 'atlasdb_load_peer_table', '(', 'con', '=', 'con', ',', 'path', '=', 'path', ')', '# cache zonefile inventory and count', 'atlasdb_cache_zonefile_info', '(', 'con', '=', 'con', ')', 'con', '.', 'close', '(', ')', 'else', ':', 'log', '.', 'debug', '(', '"Initializing Atlas DB at %s"', '%', 'path', ')', 'lines', '=', '[', 'l', '+', '";"', 'for', 'l', 'in', 'ATLASDB_SQL', '.', 'split', '(', '";"', ')', ']', 'con', '=', 'sqlite3', '.', 'connect', '(', 'path', ',', 'isolation_level', '=', 'None', ')', 'for', 'line', 'in', 'lines', ':', 'db_query_execute', '(', 'con', ',', 'line', ',', '(', ')', ')', 'con', '.', 'row_factory', '=', 'atlasdb_row_factory', '# populate from db', 'log', '.', 'debug', '(', '"Queuing all zonefiles"', ')', 'atlasdb_queue_zonefiles', '(', 'con', ',', 'db', ',', 'FIRST_BLOCK_MAINNET', ',', 'zonefile_dir', ',', 'recover', '=', 'recover', ',', 'validate', '=', 'validate', ')', 'log', '.', 'debug', '(', '"Adding seed peers"', ')', 'for', 'peer', 'in', 'peer_seeds', ':', 'atlasdb_add_peer', '(', 'peer', ',', 'con', '=', 'con', ',', 'peer_table', '=', 'peer_table', ')', 'atlasdb_cache_zonefile_info', '(', 'con', '=', 'con', ')', 'con', '.', 'close', '(', ')', 'log', '.', 'debug', '(', '"peer_table: {}"', '.', 'format', '(', 'peer_table', '.', 'keys', '(', ')', ')', ')', '# whitelist and blacklist', 'for', 'peer_url', 'in', 'peer_seeds', ':', 'host', ',', 'port', '=', 'url_to_host_port', '(', 'peer_url', ')', 'peer_hostport', '=', '"%s:%s"', '%', '(', 'host', ',', 'port', ')', 'if', 'peer_hostport', 'not', 'in', 'peer_table', '.', 'keys', '(', ')', ':', 'atlasdb_add_peer', '(', 'peer_hostport', ',', 'path', '=', 'path', ',', 'peer_table', '=', 'peer_table', ')', 'log', '.', 'debug', '(', '"peer_table: {}"', '.', 'format', '(', 'peer_table', '.', 'keys', '(', ')', ')', ')', 'peer_table', '[', 'peer_hostport', ']', '[', "'whitelisted'", ']', '=', 'True', 'for', 'peer_url', 'in', 'peer_blacklist', ':', 'host', ',', 'port', '=', 'url_to_host_port', '(', 'peer_url', ')', 'peer_hostport', '=', '"%s:%s"', '%', '(', 'host', ',', 'port', ')', 'if', 'peer_hostport', 'not', 'in', 'peer_table', '.', 'keys', '(', ')', ':', 'atlasdb_add_peer', '(', 'peer_hostport', ',', 'path', '=', 'path', ',', 'peer_table', '=', 'peer_table', ')', 'log', '.', 'debug', '(', '"peer_table: {}"', '.', 'format', '(', 'peer_table', '.', 'keys', '(', ')', ')', ')', 'peer_table', '[', 'peer_hostport', ']', '[', "'blacklisted'", ']', '=', 'True', 'return', 'peer_table']
Set up the atlas node: * create the db if it doesn't exist * go through all the names and verify that we have the *current* zonefiles * if we don't, queue them for fetching. * set up the peer db @db should be an instance of BlockstackDB @initial_peers should be a list of URLs Return the newly-initialized peer table
['Set', 'up', 'the', 'atlas', 'node', ':', '*', 'create', 'the', 'db', 'if', 'it', 'doesn', 't', 'exist', '*', 'go', 'through', 'all', 'the', 'names', 'and', 'verify', 'that', 'we', 'have', 'the', '*', 'current', '*', 'zonefiles', '*', 'if', 'we', 'don', 't', 'queue', 'them', 'for', 'fetching', '.', '*', 'set', 'up', 'the', 'peer', 'db']
train
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1260-L1352
9,719
asweigart/pysimplevalidate
src/pysimplevalidate/__init__.py
validateState
def validateState(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None, returnStateName=False): """Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. >>> import pysimplevalidate as pysv >>> pysv.validateState('tx') 'TX' >>> pysv.validateState('california') 'CA' >>> pysv.validateState('WASHINGTON') 'WA' >>> pysv.validateState('WASHINGTON', returnStateName=True) 'Washington' """ # TODO - note that this is USA-centric. I should work on trying to make this more international. # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if value.upper() in USA_STATES_UPPER.keys(): # check if value is a state abbreviation if returnStateName: return USA_STATES[value.upper()] # Return full state name. else: return value.upper() # Return abbreviation. elif value.title() in USA_STATES.values(): # check if value is a state name if returnStateName: return value.title() # Return full state name. else: return USA_STATES_REVERSED[value.title()] # Return abbreviation. _raiseValidationException(_('%r is not a state.') % (_errstr(value)), excMsg)
python
def validateState(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None, returnStateName=False): """Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. >>> import pysimplevalidate as pysv >>> pysv.validateState('tx') 'TX' >>> pysv.validateState('california') 'CA' >>> pysv.validateState('WASHINGTON') 'WA' >>> pysv.validateState('WASHINGTON', returnStateName=True) 'Washington' """ # TODO - note that this is USA-centric. I should work on trying to make this more international. # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if value.upper() in USA_STATES_UPPER.keys(): # check if value is a state abbreviation if returnStateName: return USA_STATES[value.upper()] # Return full state name. else: return value.upper() # Return abbreviation. elif value.title() in USA_STATES.values(): # check if value is a state name if returnStateName: return value.title() # Return full state name. else: return USA_STATES_REVERSED[value.title()] # Return abbreviation. _raiseValidationException(_('%r is not a state.') % (_errstr(value)), excMsg)
['def', 'validateState', '(', 'value', ',', 'blank', '=', 'False', ',', 'strip', '=', 'None', ',', 'allowlistRegexes', '=', 'None', ',', 'blocklistRegexes', '=', 'None', ',', 'excMsg', '=', 'None', ',', 'returnStateName', '=', 'False', ')', ':', '# TODO - note that this is USA-centric. I should work on trying to make this more international.', '# Validate parameters.', '_validateGenericParameters', '(', 'blank', '=', 'blank', ',', 'strip', '=', 'strip', ',', 'allowlistRegexes', '=', 'allowlistRegexes', ',', 'blocklistRegexes', '=', 'blocklistRegexes', ')', 'returnNow', ',', 'value', '=', '_prevalidationCheck', '(', 'value', ',', 'blank', ',', 'strip', ',', 'allowlistRegexes', ',', 'blocklistRegexes', ',', 'excMsg', ')', 'if', 'returnNow', ':', 'return', 'value', 'if', 'value', '.', 'upper', '(', ')', 'in', 'USA_STATES_UPPER', '.', 'keys', '(', ')', ':', '# check if value is a state abbreviation', 'if', 'returnStateName', ':', 'return', 'USA_STATES', '[', 'value', '.', 'upper', '(', ')', ']', '# Return full state name.', 'else', ':', 'return', 'value', '.', 'upper', '(', ')', '# Return abbreviation.', 'elif', 'value', '.', 'title', '(', ')', 'in', 'USA_STATES', '.', 'values', '(', ')', ':', '# check if value is a state name', 'if', 'returnStateName', ':', 'return', 'value', '.', 'title', '(', ')', '# Return full state name.', 'else', ':', 'return', 'USA_STATES_REVERSED', '[', 'value', '.', 'title', '(', ')', ']', '# Return abbreviation.', '_raiseValidationException', '(', '_', '(', "'%r is not a state.'", ')', '%', '(', '_errstr', '(', 'value', ')', ')', ',', 'excMsg', ')']
Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. >>> import pysimplevalidate as pysv >>> pysv.validateState('tx') 'TX' >>> pysv.validateState('california') 'CA' >>> pysv.validateState('WASHINGTON') 'WA' >>> pysv.validateState('WASHINGTON', returnStateName=True) 'Washington'
['Raises', 'ValidationException', 'if', 'value', 'is', 'not', 'a', 'USA', 'state', '.', 'Returns', 'the', 'capitalized', 'state', 'abbreviation', 'unless', 'returnStateName', 'is', 'True', 'in', 'which', 'case', 'it', 'returns', 'the', 'titlecased', 'state', 'name', '.']
train
https://github.com/asweigart/pysimplevalidate/blob/3ca27228abb7355d14bbf8abc225c63366379e44/src/pysimplevalidate/__init__.py#L1235-L1279
9,720
google/grr
grr/server/grr_response_server/flow_utils.py
WaitForFlow
def WaitForFlow(flow_urn, token=None, timeout=DEFAULT_TIMEOUT, max_sleep_time=1, min_sleep_time=0.2, dampening_multiplier=0.9): """Waits for a flow to finish, polling while we wait. Args: flow_urn: The urn of the flow to wait for. token: The datastore access token. timeout: How long to wait before giving up, usually because the client has gone away. max_sleep_time: The initial and longest time to wait in between polls. min_sleep_time: The final and shortest time to wait in between polls. dampening_multiplier: The current sleep time is multiplied by this number on each iteration. Controls how fast the polling reaches its minimum sleep time. You probably want this to be less than 1, unless you want to wait an increasing amount of time in between flows. Raises: IOError: If we time out while waiting for the client. """ start_time = time.time() sleep_time = max_sleep_time while True: # Reopen the AFF4Object to check if its status has changed, and also make # sure it's a flow. with aff4.FACTORY.Open( flow_urn, token=token, aff4_type=flow.GRRFlow) as flow_obj: # Stop if the flow is done or has timed out. if time.time() - start_time > timeout: logging.warning("Timed out after waiting %ss for %s!", timeout, flow_obj) raise IOError("Timed out trying to access client! Is it connected?") if not flow_obj.GetRunner().IsRunning(): break # Decrease the time we sleep each iteration. sleep_time = max(sleep_time * dampening_multiplier, min_sleep_time) time.sleep(sleep_time) logging.debug("Waiting for %s, sleeping for %.3fs", flow_obj, sleep_time)
python
def WaitForFlow(flow_urn, token=None, timeout=DEFAULT_TIMEOUT, max_sleep_time=1, min_sleep_time=0.2, dampening_multiplier=0.9): """Waits for a flow to finish, polling while we wait. Args: flow_urn: The urn of the flow to wait for. token: The datastore access token. timeout: How long to wait before giving up, usually because the client has gone away. max_sleep_time: The initial and longest time to wait in between polls. min_sleep_time: The final and shortest time to wait in between polls. dampening_multiplier: The current sleep time is multiplied by this number on each iteration. Controls how fast the polling reaches its minimum sleep time. You probably want this to be less than 1, unless you want to wait an increasing amount of time in between flows. Raises: IOError: If we time out while waiting for the client. """ start_time = time.time() sleep_time = max_sleep_time while True: # Reopen the AFF4Object to check if its status has changed, and also make # sure it's a flow. with aff4.FACTORY.Open( flow_urn, token=token, aff4_type=flow.GRRFlow) as flow_obj: # Stop if the flow is done or has timed out. if time.time() - start_time > timeout: logging.warning("Timed out after waiting %ss for %s!", timeout, flow_obj) raise IOError("Timed out trying to access client! Is it connected?") if not flow_obj.GetRunner().IsRunning(): break # Decrease the time we sleep each iteration. sleep_time = max(sleep_time * dampening_multiplier, min_sleep_time) time.sleep(sleep_time) logging.debug("Waiting for %s, sleeping for %.3fs", flow_obj, sleep_time)
['def', 'WaitForFlow', '(', 'flow_urn', ',', 'token', '=', 'None', ',', 'timeout', '=', 'DEFAULT_TIMEOUT', ',', 'max_sleep_time', '=', '1', ',', 'min_sleep_time', '=', '0.2', ',', 'dampening_multiplier', '=', '0.9', ')', ':', 'start_time', '=', 'time', '.', 'time', '(', ')', 'sleep_time', '=', 'max_sleep_time', 'while', 'True', ':', '# Reopen the AFF4Object to check if its status has changed, and also make', "# sure it's a flow.", 'with', 'aff4', '.', 'FACTORY', '.', 'Open', '(', 'flow_urn', ',', 'token', '=', 'token', ',', 'aff4_type', '=', 'flow', '.', 'GRRFlow', ')', 'as', 'flow_obj', ':', '# Stop if the flow is done or has timed out.', 'if', 'time', '.', 'time', '(', ')', '-', 'start_time', '>', 'timeout', ':', 'logging', '.', 'warning', '(', '"Timed out after waiting %ss for %s!"', ',', 'timeout', ',', 'flow_obj', ')', 'raise', 'IOError', '(', '"Timed out trying to access client! Is it connected?"', ')', 'if', 'not', 'flow_obj', '.', 'GetRunner', '(', ')', '.', 'IsRunning', '(', ')', ':', 'break', '# Decrease the time we sleep each iteration.', 'sleep_time', '=', 'max', '(', 'sleep_time', '*', 'dampening_multiplier', ',', 'min_sleep_time', ')', 'time', '.', 'sleep', '(', 'sleep_time', ')', 'logging', '.', 'debug', '(', '"Waiting for %s, sleeping for %.3fs"', ',', 'flow_obj', ',', 'sleep_time', ')']
Waits for a flow to finish, polling while we wait. Args: flow_urn: The urn of the flow to wait for. token: The datastore access token. timeout: How long to wait before giving up, usually because the client has gone away. max_sleep_time: The initial and longest time to wait in between polls. min_sleep_time: The final and shortest time to wait in between polls. dampening_multiplier: The current sleep time is multiplied by this number on each iteration. Controls how fast the polling reaches its minimum sleep time. You probably want this to be less than 1, unless you want to wait an increasing amount of time in between flows. Raises: IOError: If we time out while waiting for the client.
['Waits', 'for', 'a', 'flow', 'to', 'finish', 'polling', 'while', 'we', 'wait', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow_utils.py#L88-L130
9,721
pandas-dev/pandas
pandas/core/frame.py
DataFrame.combine_first
def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
python
def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
['def', 'combine_first', '(', 'self', ',', 'other', ')', ':', 'import', 'pandas', '.', 'core', '.', 'computation', '.', 'expressions', 'as', 'expressions', 'def', 'extract_values', '(', 'arr', ')', ':', '# Does two things:', '# 1. maybe gets the values from the Series / Index', '# 2. convert datelike to i8', 'if', 'isinstance', '(', 'arr', ',', '(', 'ABCIndexClass', ',', 'ABCSeries', ')', ')', ':', 'arr', '=', 'arr', '.', '_values', 'if', 'needs_i8_conversion', '(', 'arr', ')', ':', 'if', 'is_extension_array_dtype', '(', 'arr', '.', 'dtype', ')', ':', 'arr', '=', 'arr', '.', 'asi8', 'else', ':', 'arr', '=', 'arr', '.', 'view', '(', "'i8'", ')', 'return', 'arr', 'def', 'combiner', '(', 'x', ',', 'y', ')', ':', 'mask', '=', 'isna', '(', 'x', ')', 'if', 'isinstance', '(', 'mask', ',', '(', 'ABCIndexClass', ',', 'ABCSeries', ')', ')', ':', 'mask', '=', 'mask', '.', '_values', 'x_values', '=', 'extract_values', '(', 'x', ')', 'y_values', '=', 'extract_values', '(', 'y', ')', '# If the column y in other DataFrame is not in first DataFrame,', '# just return y_values.', 'if', 'y', '.', 'name', 'not', 'in', 'self', '.', 'columns', ':', 'return', 'y_values', 'return', 'expressions', '.', 'where', '(', 'mask', ',', 'y_values', ',', 'x_values', ')', 'return', 'self', '.', 'combine', '(', 'other', ',', 'combiner', ',', 'overwrite', '=', 'False', ')']
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0
['Update', 'null', 'elements', 'with', 'value', 'in', 'the', 'same', 'location', 'in', 'other', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5332-L5406
9,722
delfick/harpoon
harpoon/ship/runner.py
Runner.create_container
def create_container(self, conf, detach, tty): """Create a single container""" name = conf.name image_name = conf.image_name if conf.tag is not NotSpecified: image_name = conf.image_name_with_tag container_name = conf.container_name with conf.assumed_role(): env = dict(e.pair for e in conf.env) binds = conf.volumes.binds command = conf.formatted_command volume_names = conf.volumes.volume_names volumes_from = list(conf.volumes.share_with_names) no_tty_option = conf.no_tty_option ports = [p.container_port.port_pair for p in conf.ports] port_bindings = self.exposed(conf.ports) uncreated = [] for name in binds: if not os.path.exists(name): log.info("Making volume for mounting\tvolume=%s", name) try: os.makedirs(name) except OSError as error: uncreated.append((name, error)) if uncreated: raise BadOption("Failed to create some volumes on the host", uncreated=uncreated) log.info("Creating container from %s\timage=%s\tcontainer_name=%s\ttty=%s", image_name, name, container_name, tty) if binds: log.info("\tUsing volumes\tvolumes=%s", volume_names) if env: log.info("\tUsing environment\tenv=%s", sorted(env.keys())) if ports: log.info("\tUsing ports\tports=%s", ports) if port_bindings: log.info("\tPort bindings: %s", port_bindings) if volumes_from: log.info("\tVolumes from: %s", volumes_from) host_config = conf.harpoon.docker_api.create_host_config( binds = binds , volumes_from = volumes_from , port_bindings = port_bindings , devices = conf.devices , lxc_conf = conf.lxc_conf , privileged = conf.privileged , restart_policy = conf.restart_policy , dns = conf.network.dns , dns_search = conf.network.dns_search , extra_hosts = conf.network.extra_hosts , network_mode = conf.network.network_mode , publish_all_ports = conf.network.publish_all_ports , cap_add = conf.cpu.cap_add , cap_drop = conf.cpu.cap_drop , mem_limit = conf.cpu.mem_limit , cpu_shares = conf.cpu.cpu_shares , cpuset_cpus = conf.cpu.cpuset_cpus , cpuset_mems = conf.cpu.cpuset_mems , memswap_limit = conf.cpu.memswap_limit , ulimits = conf.ulimits , read_only = conf.read_only_rootfs , log_config = conf.log_config , security_opt = conf.security_opt , **conf.other_options.host_config ) container_id = conf.harpoon.docker_api.create_container(image_name , name=container_name , detach=detach , command=command , volumes=volume_names , environment=env , tty = False if no_tty_option else tty , user = conf.user , ports = ports , stdin_open = tty , hostname = conf.network.hostname , domainname = conf.network.domainname , network_disabled = conf.network.disabled , host_config = host_config , **conf.other_options.create ) if isinstance(container_id, dict): if "errorDetail" in container_id: raise BadImage("Failed to create container", image=name, error=container_id["errorDetail"]) container_id = container_id["Id"] return container_id
python
def create_container(self, conf, detach, tty): """Create a single container""" name = conf.name image_name = conf.image_name if conf.tag is not NotSpecified: image_name = conf.image_name_with_tag container_name = conf.container_name with conf.assumed_role(): env = dict(e.pair for e in conf.env) binds = conf.volumes.binds command = conf.formatted_command volume_names = conf.volumes.volume_names volumes_from = list(conf.volumes.share_with_names) no_tty_option = conf.no_tty_option ports = [p.container_port.port_pair for p in conf.ports] port_bindings = self.exposed(conf.ports) uncreated = [] for name in binds: if not os.path.exists(name): log.info("Making volume for mounting\tvolume=%s", name) try: os.makedirs(name) except OSError as error: uncreated.append((name, error)) if uncreated: raise BadOption("Failed to create some volumes on the host", uncreated=uncreated) log.info("Creating container from %s\timage=%s\tcontainer_name=%s\ttty=%s", image_name, name, container_name, tty) if binds: log.info("\tUsing volumes\tvolumes=%s", volume_names) if env: log.info("\tUsing environment\tenv=%s", sorted(env.keys())) if ports: log.info("\tUsing ports\tports=%s", ports) if port_bindings: log.info("\tPort bindings: %s", port_bindings) if volumes_from: log.info("\tVolumes from: %s", volumes_from) host_config = conf.harpoon.docker_api.create_host_config( binds = binds , volumes_from = volumes_from , port_bindings = port_bindings , devices = conf.devices , lxc_conf = conf.lxc_conf , privileged = conf.privileged , restart_policy = conf.restart_policy , dns = conf.network.dns , dns_search = conf.network.dns_search , extra_hosts = conf.network.extra_hosts , network_mode = conf.network.network_mode , publish_all_ports = conf.network.publish_all_ports , cap_add = conf.cpu.cap_add , cap_drop = conf.cpu.cap_drop , mem_limit = conf.cpu.mem_limit , cpu_shares = conf.cpu.cpu_shares , cpuset_cpus = conf.cpu.cpuset_cpus , cpuset_mems = conf.cpu.cpuset_mems , memswap_limit = conf.cpu.memswap_limit , ulimits = conf.ulimits , read_only = conf.read_only_rootfs , log_config = conf.log_config , security_opt = conf.security_opt , **conf.other_options.host_config ) container_id = conf.harpoon.docker_api.create_container(image_name , name=container_name , detach=detach , command=command , volumes=volume_names , environment=env , tty = False if no_tty_option else tty , user = conf.user , ports = ports , stdin_open = tty , hostname = conf.network.hostname , domainname = conf.network.domainname , network_disabled = conf.network.disabled , host_config = host_config , **conf.other_options.create ) if isinstance(container_id, dict): if "errorDetail" in container_id: raise BadImage("Failed to create container", image=name, error=container_id["errorDetail"]) container_id = container_id["Id"] return container_id
['def', 'create_container', '(', 'self', ',', 'conf', ',', 'detach', ',', 'tty', ')', ':', 'name', '=', 'conf', '.', 'name', 'image_name', '=', 'conf', '.', 'image_name', 'if', 'conf', '.', 'tag', 'is', 'not', 'NotSpecified', ':', 'image_name', '=', 'conf', '.', 'image_name_with_tag', 'container_name', '=', 'conf', '.', 'container_name', 'with', 'conf', '.', 'assumed_role', '(', ')', ':', 'env', '=', 'dict', '(', 'e', '.', 'pair', 'for', 'e', 'in', 'conf', '.', 'env', ')', 'binds', '=', 'conf', '.', 'volumes', '.', 'binds', 'command', '=', 'conf', '.', 'formatted_command', 'volume_names', '=', 'conf', '.', 'volumes', '.', 'volume_names', 'volumes_from', '=', 'list', '(', 'conf', '.', 'volumes', '.', 'share_with_names', ')', 'no_tty_option', '=', 'conf', '.', 'no_tty_option', 'ports', '=', '[', 'p', '.', 'container_port', '.', 'port_pair', 'for', 'p', 'in', 'conf', '.', 'ports', ']', 'port_bindings', '=', 'self', '.', 'exposed', '(', 'conf', '.', 'ports', ')', 'uncreated', '=', '[', ']', 'for', 'name', 'in', 'binds', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'name', ')', ':', 'log', '.', 'info', '(', '"Making volume for mounting\\tvolume=%s"', ',', 'name', ')', 'try', ':', 'os', '.', 'makedirs', '(', 'name', ')', 'except', 'OSError', 'as', 'error', ':', 'uncreated', '.', 'append', '(', '(', 'name', ',', 'error', ')', ')', 'if', 'uncreated', ':', 'raise', 'BadOption', '(', '"Failed to create some volumes on the host"', ',', 'uncreated', '=', 'uncreated', ')', 'log', '.', 'info', '(', '"Creating container from %s\\timage=%s\\tcontainer_name=%s\\ttty=%s"', ',', 'image_name', ',', 'name', ',', 'container_name', ',', 'tty', ')', 'if', 'binds', ':', 'log', '.', 'info', '(', '"\\tUsing volumes\\tvolumes=%s"', ',', 'volume_names', ')', 'if', 'env', ':', 'log', '.', 'info', '(', '"\\tUsing environment\\tenv=%s"', ',', 'sorted', '(', 'env', '.', 'keys', '(', ')', ')', ')', 'if', 'ports', ':', 'log', '.', 'info', '(', '"\\tUsing ports\\tports=%s"', ',', 'ports', ')', 'if', 'port_bindings', ':', 'log', '.', 'info', '(', '"\\tPort bindings: %s"', ',', 'port_bindings', ')', 'if', 'volumes_from', ':', 'log', '.', 'info', '(', '"\\tVolumes from: %s"', ',', 'volumes_from', ')', 'host_config', '=', 'conf', '.', 'harpoon', '.', 'docker_api', '.', 'create_host_config', '(', 'binds', '=', 'binds', ',', 'volumes_from', '=', 'volumes_from', ',', 'port_bindings', '=', 'port_bindings', ',', 'devices', '=', 'conf', '.', 'devices', ',', 'lxc_conf', '=', 'conf', '.', 'lxc_conf', ',', 'privileged', '=', 'conf', '.', 'privileged', ',', 'restart_policy', '=', 'conf', '.', 'restart_policy', ',', 'dns', '=', 'conf', '.', 'network', '.', 'dns', ',', 'dns_search', '=', 'conf', '.', 'network', '.', 'dns_search', ',', 'extra_hosts', '=', 'conf', '.', 'network', '.', 'extra_hosts', ',', 'network_mode', '=', 'conf', '.', 'network', '.', 'network_mode', ',', 'publish_all_ports', '=', 'conf', '.', 'network', '.', 'publish_all_ports', ',', 'cap_add', '=', 'conf', '.', 'cpu', '.', 'cap_add', ',', 'cap_drop', '=', 'conf', '.', 'cpu', '.', 'cap_drop', ',', 'mem_limit', '=', 'conf', '.', 'cpu', '.', 'mem_limit', ',', 'cpu_shares', '=', 'conf', '.', 'cpu', '.', 'cpu_shares', ',', 'cpuset_cpus', '=', 'conf', '.', 'cpu', '.', 'cpuset_cpus', ',', 'cpuset_mems', '=', 'conf', '.', 'cpu', '.', 'cpuset_mems', ',', 'memswap_limit', '=', 'conf', '.', 'cpu', '.', 'memswap_limit', ',', 'ulimits', '=', 'conf', '.', 'ulimits', ',', 'read_only', '=', 'conf', '.', 'read_only_rootfs', ',', 'log_config', '=', 'conf', '.', 'log_config', ',', 'security_opt', '=', 'conf', '.', 'security_opt', ',', '*', '*', 'conf', '.', 'other_options', '.', 'host_config', ')', 'container_id', '=', 'conf', '.', 'harpoon', '.', 'docker_api', '.', 'create_container', '(', 'image_name', ',', 'name', '=', 'container_name', ',', 'detach', '=', 'detach', ',', 'command', '=', 'command', ',', 'volumes', '=', 'volume_names', ',', 'environment', '=', 'env', ',', 'tty', '=', 'False', 'if', 'no_tty_option', 'else', 'tty', ',', 'user', '=', 'conf', '.', 'user', ',', 'ports', '=', 'ports', ',', 'stdin_open', '=', 'tty', ',', 'hostname', '=', 'conf', '.', 'network', '.', 'hostname', ',', 'domainname', '=', 'conf', '.', 'network', '.', 'domainname', ',', 'network_disabled', '=', 'conf', '.', 'network', '.', 'disabled', ',', 'host_config', '=', 'host_config', ',', '*', '*', 'conf', '.', 'other_options', '.', 'create', ')', 'if', 'isinstance', '(', 'container_id', ',', 'dict', ')', ':', 'if', '"errorDetail"', 'in', 'container_id', ':', 'raise', 'BadImage', '(', '"Failed to create container"', ',', 'image', '=', 'name', ',', 'error', '=', 'container_id', '[', '"errorDetail"', ']', ')', 'container_id', '=', 'container_id', '[', '"Id"', ']', 'return', 'container_id']
Create a single container
['Create', 'a', 'single', 'container']
train
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/ship/runner.py#L226-L328
9,723
MacHu-GWU/angora-project
angora/math/img2waveform.py
img2wav
def img2wav(path, min_x, max_x, min_y, max_y, window_size=3): """Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output """ image = Image.open(path).convert("L") matrix = np.array(image)[::-1] # you can customize the gray scale fix behavior to fit color image matrix[np.where(matrix >= 128)] = 255 matrix[np.where(matrix < 128)] = 0 tick_x = (max_x - min_x) / matrix.shape[1] tick_y = (max_y - min_y) / matrix.shape[0] x, y = list(), list() for i in range(matrix.shape[1]): window = expand_window( # slide margin window i, window_size, matrix.shape[1]) margin_dots_y_indices = np.where(matrix[:, window] == 0)[0] # if found at least one dots in margin if len(margin_dots_y_indices) > 0: x.append(min_x + (i + 1) * tick_x) y.append(min_y + margin_dots_y_indices.mean() * tick_y) return np.array(x), np.array(y)
python
def img2wav(path, min_x, max_x, min_y, max_y, window_size=3): """Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output """ image = Image.open(path).convert("L") matrix = np.array(image)[::-1] # you can customize the gray scale fix behavior to fit color image matrix[np.where(matrix >= 128)] = 255 matrix[np.where(matrix < 128)] = 0 tick_x = (max_x - min_x) / matrix.shape[1] tick_y = (max_y - min_y) / matrix.shape[0] x, y = list(), list() for i in range(matrix.shape[1]): window = expand_window( # slide margin window i, window_size, matrix.shape[1]) margin_dots_y_indices = np.where(matrix[:, window] == 0)[0] # if found at least one dots in margin if len(margin_dots_y_indices) > 0: x.append(min_x + (i + 1) * tick_x) y.append(min_y + margin_dots_y_indices.mean() * tick_y) return np.array(x), np.array(y)
['def', 'img2wav', '(', 'path', ',', 'min_x', ',', 'max_x', ',', 'min_y', ',', 'max_y', ',', 'window_size', '=', '3', ')', ':', 'image', '=', 'Image', '.', 'open', '(', 'path', ')', '.', 'convert', '(', '"L"', ')', 'matrix', '=', 'np', '.', 'array', '(', 'image', ')', '[', ':', ':', '-', '1', ']', '# you can customize the gray scale fix behavior to fit color image', 'matrix', '[', 'np', '.', 'where', '(', 'matrix', '>=', '128', ')', ']', '=', '255', 'matrix', '[', 'np', '.', 'where', '(', 'matrix', '<', '128', ')', ']', '=', '0', 'tick_x', '=', '(', 'max_x', '-', 'min_x', ')', '/', 'matrix', '.', 'shape', '[', '1', ']', 'tick_y', '=', '(', 'max_y', '-', 'min_y', ')', '/', 'matrix', '.', 'shape', '[', '0', ']', 'x', ',', 'y', '=', 'list', '(', ')', ',', 'list', '(', ')', 'for', 'i', 'in', 'range', '(', 'matrix', '.', 'shape', '[', '1', ']', ')', ':', 'window', '=', 'expand_window', '(', '# slide margin window', 'i', ',', 'window_size', ',', 'matrix', '.', 'shape', '[', '1', ']', ')', 'margin_dots_y_indices', '=', 'np', '.', 'where', '(', 'matrix', '[', ':', ',', 'window', ']', '==', '0', ')', '[', '0', ']', '# if found at least one dots in margin', 'if', 'len', '(', 'margin_dots_y_indices', ')', '>', '0', ':', 'x', '.', 'append', '(', 'min_x', '+', '(', 'i', '+', '1', ')', '*', 'tick_x', ')', 'y', '.', 'append', '(', 'min_y', '+', 'margin_dots_y_indices', '.', 'mean', '(', ')', '*', 'tick_y', ')', 'return', 'np', '.', 'array', '(', 'x', ')', ',', 'np', '.', 'array', '(', 'y', ')']
Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output
['Generate', '1', '-', 'D', 'data', 'y', '=', 'f', '(', 'x', ')', 'from', 'a', 'black', '/', 'white', 'image', '.']
train
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/math/img2waveform.py#L264-L327
9,724
stevearc/dql
dql/models.py
GlobalIndex.pformat
def pformat(self, consumed_capacity=None): """ Pretty format for insertion into table pformat """ consumed_capacity = consumed_capacity or {} lines = [] parts = ["GLOBAL", self.index_type, "INDEX", self.name] if self.status != "ACTIVE": parts.insert(0, "[%s]" % self.status) lines.append(" ".join(parts)) lines.append(" items: {0:,} ({1:,} bytes)".format(self.item_count, self.size)) read = "Read: " + format_throughput( self.read_throughput, consumed_capacity.get("read") ) write = "Write: " + format_throughput( self.write_throughput, consumed_capacity.get("write") ) lines.append(" " + read + " " + write) lines.append(" " + self.hash_key.schema) if self.range_key is not None: lines.append(" " + self.range_key.schema) if self.includes is not None: keys = "[%s]" % ", ".join(("'%s'" % i for i in self.includes)) lines.append(" Projection: %s" % keys) return "\n".join(lines)
python
def pformat(self, consumed_capacity=None): """ Pretty format for insertion into table pformat """ consumed_capacity = consumed_capacity or {} lines = [] parts = ["GLOBAL", self.index_type, "INDEX", self.name] if self.status != "ACTIVE": parts.insert(0, "[%s]" % self.status) lines.append(" ".join(parts)) lines.append(" items: {0:,} ({1:,} bytes)".format(self.item_count, self.size)) read = "Read: " + format_throughput( self.read_throughput, consumed_capacity.get("read") ) write = "Write: " + format_throughput( self.write_throughput, consumed_capacity.get("write") ) lines.append(" " + read + " " + write) lines.append(" " + self.hash_key.schema) if self.range_key is not None: lines.append(" " + self.range_key.schema) if self.includes is not None: keys = "[%s]" % ", ".join(("'%s'" % i for i in self.includes)) lines.append(" Projection: %s" % keys) return "\n".join(lines)
['def', 'pformat', '(', 'self', ',', 'consumed_capacity', '=', 'None', ')', ':', 'consumed_capacity', '=', 'consumed_capacity', 'or', '{', '}', 'lines', '=', '[', ']', 'parts', '=', '[', '"GLOBAL"', ',', 'self', '.', 'index_type', ',', '"INDEX"', ',', 'self', '.', 'name', ']', 'if', 'self', '.', 'status', '!=', '"ACTIVE"', ':', 'parts', '.', 'insert', '(', '0', ',', '"[%s]"', '%', 'self', '.', 'status', ')', 'lines', '.', 'append', '(', '" "', '.', 'join', '(', 'parts', ')', ')', 'lines', '.', 'append', '(', '" items: {0:,} ({1:,} bytes)"', '.', 'format', '(', 'self', '.', 'item_count', ',', 'self', '.', 'size', ')', ')', 'read', '=', '"Read: "', '+', 'format_throughput', '(', 'self', '.', 'read_throughput', ',', 'consumed_capacity', '.', 'get', '(', '"read"', ')', ')', 'write', '=', '"Write: "', '+', 'format_throughput', '(', 'self', '.', 'write_throughput', ',', 'consumed_capacity', '.', 'get', '(', '"write"', ')', ')', 'lines', '.', 'append', '(', '" "', '+', 'read', '+', '" "', '+', 'write', ')', 'lines', '.', 'append', '(', '" "', '+', 'self', '.', 'hash_key', '.', 'schema', ')', 'if', 'self', '.', 'range_key', 'is', 'not', 'None', ':', 'lines', '.', 'append', '(', '" "', '+', 'self', '.', 'range_key', '.', 'schema', ')', 'if', 'self', '.', 'includes', 'is', 'not', 'None', ':', 'keys', '=', '"[%s]"', '%', '", "', '.', 'join', '(', '(', '"\'%s\'"', '%', 'i', 'for', 'i', 'in', 'self', '.', 'includes', ')', ')', 'lines', '.', 'append', '(', '" Projection: %s"', '%', 'keys', ')', 'return', '"\\n"', '.', 'join', '(', 'lines', ')']
Pretty format for insertion into table pformat
['Pretty', 'format', 'for', 'insertion', 'into', 'table', 'pformat']
train
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L282-L305
9,725
bachya/pyairvisual
pyairvisual/supported.py
Supported.cities
async def cities(self, country: str, state: str) -> list: """Return a list of supported cities in a country/state.""" data = await self._request( 'get', 'cities', params={ 'state': state, 'country': country }) return [d['city'] for d in data['data']]
python
async def cities(self, country: str, state: str) -> list: """Return a list of supported cities in a country/state.""" data = await self._request( 'get', 'cities', params={ 'state': state, 'country': country }) return [d['city'] for d in data['data']]
['async', 'def', 'cities', '(', 'self', ',', 'country', ':', 'str', ',', 'state', ':', 'str', ')', '->', 'list', ':', 'data', '=', 'await', 'self', '.', '_request', '(', "'get'", ',', "'cities'", ',', 'params', '=', '{', "'state'", ':', 'state', ',', "'country'", ':', 'country', '}', ')', 'return', '[', 'd', '[', "'city'", ']', 'for', 'd', 'in', 'data', '[', "'data'", ']', ']']
Return a list of supported cities in a country/state.
['Return', 'a', 'list', 'of', 'supported', 'cities', 'in', 'a', 'country', '/', 'state', '.']
train
https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/supported.py#L12-L19
9,726
mitsei/dlkit
dlkit/handcar/learning/managers.py
LearningManager.get_objective_bank_lookup_session
def get_objective_bank_lookup_session(self, *args, **kwargs): """Gets the OsidSession associated with the objective bank lookup service. return: (osid.learning.ObjectiveBankLookupSession) - an ObjectiveBankLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_bank_lookup() is false compliance: optional - This method must be implemented if supports_objective_bank_lookup() is true. """ if not self.supports_objective_bank_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ObjectiveBankLookupSession(runtime=self._runtime) except AttributeError: raise OperationFailed() return session
python
def get_objective_bank_lookup_session(self, *args, **kwargs): """Gets the OsidSession associated with the objective bank lookup service. return: (osid.learning.ObjectiveBankLookupSession) - an ObjectiveBankLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_bank_lookup() is false compliance: optional - This method must be implemented if supports_objective_bank_lookup() is true. """ if not self.supports_objective_bank_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ObjectiveBankLookupSession(runtime=self._runtime) except AttributeError: raise OperationFailed() return session
['def', 'get_objective_bank_lookup_session', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'self', '.', 'supports_objective_bank_lookup', '(', ')', ':', 'raise', 'Unimplemented', '(', ')', 'try', ':', 'from', '.', 'import', 'sessions', 'except', 'ImportError', ':', 'raise', 'OperationFailed', '(', ')', 'try', ':', 'session', '=', 'sessions', '.', 'ObjectiveBankLookupSession', '(', 'runtime', '=', 'self', '.', '_runtime', ')', 'except', 'AttributeError', ':', 'raise', 'OperationFailed', '(', ')', 'return', 'session']
Gets the OsidSession associated with the objective bank lookup service. return: (osid.learning.ObjectiveBankLookupSession) - an ObjectiveBankLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_bank_lookup() is false compliance: optional - This method must be implemented if supports_objective_bank_lookup() is true.
['Gets', 'the', 'OsidSession', 'associated', 'with', 'the', 'objective', 'bank', 'lookup', 'service', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/managers.py#L2082-L2105
9,727
cggh/scikit-allel
allel/stats/diversity.py
tajima_d
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3): """Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366 """ # check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
python
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3): """Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366 """ # check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
['def', 'tajima_d', '(', 'ac', ',', 'pos', '=', 'None', ',', 'start', '=', 'None', ',', 'stop', '=', 'None', ',', 'min_sites', '=', '3', ')', ':', '# check inputs', 'if', 'not', 'hasattr', '(', 'ac', ',', "'count_segregating'", ')', ':', 'ac', '=', 'AlleleCountsArray', '(', 'ac', ',', 'copy', '=', 'False', ')', '# deal with subregion', 'if', 'pos', 'is', 'not', 'None', 'and', '(', 'start', 'is', 'not', 'None', 'or', 'stop', 'is', 'not', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'pos', ',', 'SortedIndex', ')', ':', 'pos', '=', 'SortedIndex', '(', 'pos', ',', 'copy', '=', 'False', ')', 'loc', '=', 'pos', '.', 'locate_range', '(', 'start', ',', 'stop', ')', 'ac', '=', 'ac', '[', 'loc', ']', '# count segregating variants', 'S', '=', 'ac', '.', 'count_segregating', '(', ')', 'if', 'S', '<', 'min_sites', ':', 'return', 'np', '.', 'nan', '# assume number of chromosomes sampled is constant for all variants', 'n', '=', 'ac', '.', 'sum', '(', 'axis', '=', '1', ')', '.', 'max', '(', ')', '# (n-1)th harmonic number', 'a1', '=', 'np', '.', 'sum', '(', '1', '/', 'np', '.', 'arange', '(', '1', ',', 'n', ')', ')', "# calculate Watterson's theta (absolute value)", 'theta_hat_w_abs', '=', 'S', '/', 'a1', '# calculate mean pairwise difference', 'mpd', '=', 'mean_pairwise_difference', '(', 'ac', ',', 'fill', '=', '0', ')', '# calculate theta_hat pi (sum differences over variants)', 'theta_hat_pi_abs', '=', 'np', '.', 'sum', '(', 'mpd', ')', '# N.B., both theta estimates are usually divided by the number of', '# (accessible) bases but here we want the absolute difference', 'd', '=', 'theta_hat_pi_abs', '-', 'theta_hat_w_abs', '# calculate the denominator (standard deviation)', 'a2', '=', 'np', '.', 'sum', '(', '1', '/', '(', 'np', '.', 'arange', '(', '1', ',', 'n', ')', '**', '2', ')', ')', 'b1', '=', '(', 'n', '+', '1', ')', '/', '(', '3', '*', '(', 'n', '-', '1', ')', ')', 'b2', '=', '2', '*', '(', 'n', '**', '2', '+', 'n', '+', '3', ')', '/', '(', '9', '*', 'n', '*', '(', 'n', '-', '1', ')', ')', 'c1', '=', 'b1', '-', '(', '1', '/', 'a1', ')', 'c2', '=', 'b2', '-', '(', '(', 'n', '+', '2', ')', '/', '(', 'a1', '*', 'n', ')', ')', '+', '(', 'a2', '/', '(', 'a1', '**', '2', ')', ')', 'e1', '=', 'c1', '/', 'a1', 'e2', '=', 'c2', '/', '(', 'a1', '**', '2', '+', 'a2', ')', 'd_stdev', '=', 'np', '.', 'sqrt', '(', '(', 'e1', '*', 'S', ')', '+', '(', 'e2', '*', 'S', '*', '(', 'S', '-', '1', ')', ')', ')', "# finally calculate Tajima's D", 'D', '=', 'd', '/', 'd_stdev', 'return', 'D']
Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366
['Calculate', 'the', 'value', 'of', 'Tajima', 's', 'D', 'over', 'a', 'given', 'region', '.']
train
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L863-L954
9,728
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
EventListener.clean_by_request
def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request]
python
def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request]
['def', 'clean_by_request', '(', 'self', ',', 'request', ')', ':', 'if', 'request', 'not', 'in', 'self', '.', 'request_map', ':', 'return', 'for', 'tag', ',', 'matcher', ',', 'future', 'in', 'self', '.', 'request_map', '[', 'request', ']', ':', '# timeout the future', 'self', '.', '_timeout_future', '(', 'tag', ',', 'matcher', ',', 'future', ')', '# remove the timeout', 'if', 'future', 'in', 'self', '.', 'timeout_map', ':', 'tornado', '.', 'ioloop', '.', 'IOLoop', '.', 'current', '(', ')', '.', 'remove_timeout', '(', 'self', '.', 'timeout_map', '[', 'future', ']', ')', 'del', 'self', '.', 'timeout_map', '[', 'future', ']', 'del', 'self', '.', 'request_map', '[', 'request', ']']
Remove all futures that were waiting for request `request` since it is done waiting
['Remove', 'all', 'futures', 'that', 'were', 'waiting', 'for', 'request', 'request', 'since', 'it', 'is', 'done', 'waiting']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L302-L316
9,729
sorgerlab/indra
indra/explanation/model_checker.py
_get_signed_predecessors
def _get_signed_predecessors(im, node, polarity): """Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node. """ signed_pred_list = [] for pred in im.predecessors(node): pred_edge = (pred, node) yield (pred, _get_edge_sign(im, pred_edge) * polarity)
python
def _get_signed_predecessors(im, node, polarity): """Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node. """ signed_pred_list = [] for pred in im.predecessors(node): pred_edge = (pred, node) yield (pred, _get_edge_sign(im, pred_edge) * polarity)
['def', '_get_signed_predecessors', '(', 'im', ',', 'node', ',', 'polarity', ')', ':', 'signed_pred_list', '=', '[', ']', 'for', 'pred', 'in', 'im', '.', 'predecessors', '(', 'node', ')', ':', 'pred_edge', '=', '(', 'pred', ',', 'node', ')', 'yield', '(', 'pred', ',', '_get_edge_sign', '(', 'im', ',', 'pred_edge', ')', '*', 'polarity', ')']
Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node.
['Get', 'upstream', 'nodes', 'in', 'the', 'influence', 'map', '.']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1009-L1037
9,730
bukun/TorCMS
torcms/script/autocrud/fetch_html_dic.py
__get_switch_arr
def __get_switch_arr(work_sheet, row_num): ''' if valud of the column of the row is `1`, it will be added to the array. ''' u_dic = [] for col_idx in FILTER_COLUMNS: cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value if cell_val in [1, '1']: # Appending the slug name of the switcher. u_dic.append(work_sheet['{0}1'.format(col_idx)].value.strip().split(',')[0]) return u_dic
python
def __get_switch_arr(work_sheet, row_num): ''' if valud of the column of the row is `1`, it will be added to the array. ''' u_dic = [] for col_idx in FILTER_COLUMNS: cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value if cell_val in [1, '1']: # Appending the slug name of the switcher. u_dic.append(work_sheet['{0}1'.format(col_idx)].value.strip().split(',')[0]) return u_dic
['def', '__get_switch_arr', '(', 'work_sheet', ',', 'row_num', ')', ':', 'u_dic', '=', '[', ']', 'for', 'col_idx', 'in', 'FILTER_COLUMNS', ':', 'cell_val', '=', 'work_sheet', '[', "'{0}{1}'", '.', 'format', '(', 'col_idx', ',', 'row_num', ')', ']', '.', 'value', 'if', 'cell_val', 'in', '[', '1', ',', "'1'", ']', ':', '# Appending the slug name of the switcher.', 'u_dic', '.', 'append', '(', 'work_sheet', '[', "'{0}1'", '.', 'format', '(', 'col_idx', ')', ']', '.', 'value', '.', 'strip', '(', ')', '.', 'split', '(', "','", ')', '[', '0', ']', ')', 'return', 'u_dic']
if valud of the column of the row is `1`, it will be added to the array.
['if', 'valud', 'of', 'the', 'column', 'of', 'the', 'row', 'is', '1', 'it', 'will', 'be', 'added', 'to', 'the', 'array', '.']
train
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/autocrud/fetch_html_dic.py#L135-L146
9,731
hydraplatform/hydra-base
hydra_base/db/model.py
Network.add_node
def add_node(self, name, desc, layout, node_x, node_y): """ Add a node to a network. """ existing_node = get_session().query(Node).filter(Node.name==name, Node.network_id==self.id).first() if existing_node is not None: raise HydraError("A node with name %s is already in network %s"%(name, self.id)) node = Node() node.name = name node.description = desc node.layout = str(layout) if layout is not None else None node.x = node_x node.y = node_y #Do not call save here because it is likely that we may want #to bulk insert nodes, not one at a time. get_session().add(node) self.nodes.append(node) return node
python
def add_node(self, name, desc, layout, node_x, node_y): """ Add a node to a network. """ existing_node = get_session().query(Node).filter(Node.name==name, Node.network_id==self.id).first() if existing_node is not None: raise HydraError("A node with name %s is already in network %s"%(name, self.id)) node = Node() node.name = name node.description = desc node.layout = str(layout) if layout is not None else None node.x = node_x node.y = node_y #Do not call save here because it is likely that we may want #to bulk insert nodes, not one at a time. get_session().add(node) self.nodes.append(node) return node
['def', 'add_node', '(', 'self', ',', 'name', ',', 'desc', ',', 'layout', ',', 'node_x', ',', 'node_y', ')', ':', 'existing_node', '=', 'get_session', '(', ')', '.', 'query', '(', 'Node', ')', '.', 'filter', '(', 'Node', '.', 'name', '==', 'name', ',', 'Node', '.', 'network_id', '==', 'self', '.', 'id', ')', '.', 'first', '(', ')', 'if', 'existing_node', 'is', 'not', 'None', ':', 'raise', 'HydraError', '(', '"A node with name %s is already in network %s"', '%', '(', 'name', ',', 'self', '.', 'id', ')', ')', 'node', '=', 'Node', '(', ')', 'node', '.', 'name', '=', 'name', 'node', '.', 'description', '=', 'desc', 'node', '.', 'layout', '=', 'str', '(', 'layout', ')', 'if', 'layout', 'is', 'not', 'None', 'else', 'None', 'node', '.', 'x', '=', 'node_x', 'node', '.', 'y', '=', 'node_y', '#Do not call save here because it is likely that we may want', '#to bulk insert nodes, not one at a time.', 'get_session', '(', ')', '.', 'add', '(', 'node', ')', 'self', '.', 'nodes', '.', 'append', '(', 'node', ')', 'return', 'node']
Add a node to a network.
['Add', 'a', 'node', 'to', 'a', 'network', '.']
train
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/db/model.py#L885-L907
9,732
hermanschaaf/mafan
mafan/encoding.py
detect
def detect(filename, include_confidence=False): """ Detect the encoding of a file. Returns only the predicted current encoding as a string. If `include_confidence` is True, Returns tuple containing: (str encoding, float confidence) """ f = open(filename) detection = chardet.detect(f.read()) f.close() encoding = detection.get('encoding') confidence = detection.get('confidence') if include_confidence: return (encoding, confidence) return encoding
python
def detect(filename, include_confidence=False): """ Detect the encoding of a file. Returns only the predicted current encoding as a string. If `include_confidence` is True, Returns tuple containing: (str encoding, float confidence) """ f = open(filename) detection = chardet.detect(f.read()) f.close() encoding = detection.get('encoding') confidence = detection.get('confidence') if include_confidence: return (encoding, confidence) return encoding
['def', 'detect', '(', 'filename', ',', 'include_confidence', '=', 'False', ')', ':', 'f', '=', 'open', '(', 'filename', ')', 'detection', '=', 'chardet', '.', 'detect', '(', 'f', '.', 'read', '(', ')', ')', 'f', '.', 'close', '(', ')', 'encoding', '=', 'detection', '.', 'get', '(', "'encoding'", ')', 'confidence', '=', 'detection', '.', 'get', '(', "'confidence'", ')', 'if', 'include_confidence', ':', 'return', '(', 'encoding', ',', 'confidence', ')', 'return', 'encoding']
Detect the encoding of a file. Returns only the predicted current encoding as a string. If `include_confidence` is True, Returns tuple containing: (str encoding, float confidence)
['Detect', 'the', 'encoding', 'of', 'a', 'file', '.']
train
https://github.com/hermanschaaf/mafan/blob/373ddf299aeb2bd8413bf921c71768af7a8170ea/mafan/encoding.py#L67-L83
9,733
tensorpack/tensorpack
tensorpack/graph_builder/training.py
AsyncMultiGPUBuilder.call_for_each_tower
def call_for_each_tower(self, tower_fn): """ Call the function `tower_fn` under :class:`TowerContext` for each tower. Returns: a list, contains the return values of `tower_fn` on each tower. """ ps_device = 'cpu' if len(self.towers) >= 4 else 'gpu' raw_devices = ['/gpu:{}'.format(k) for k in self.towers] if ps_device == 'gpu': devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices] else: devices = [tf.train.replica_device_setter( worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices] return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)
python
def call_for_each_tower(self, tower_fn): """ Call the function `tower_fn` under :class:`TowerContext` for each tower. Returns: a list, contains the return values of `tower_fn` on each tower. """ ps_device = 'cpu' if len(self.towers) >= 4 else 'gpu' raw_devices = ['/gpu:{}'.format(k) for k in self.towers] if ps_device == 'gpu': devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices] else: devices = [tf.train.replica_device_setter( worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices] return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)
['def', 'call_for_each_tower', '(', 'self', ',', 'tower_fn', ')', ':', 'ps_device', '=', "'cpu'", 'if', 'len', '(', 'self', '.', 'towers', ')', '>=', '4', 'else', "'gpu'", 'raw_devices', '=', '[', "'/gpu:{}'", '.', 'format', '(', 'k', ')', 'for', 'k', 'in', 'self', '.', 'towers', ']', 'if', 'ps_device', '==', "'gpu'", ':', 'devices', '=', '[', 'LeastLoadedDeviceSetter', '(', 'd', ',', 'raw_devices', ')', 'for', 'd', 'in', 'raw_devices', ']', 'else', ':', 'devices', '=', '[', 'tf', '.', 'train', '.', 'replica_device_setter', '(', 'worker_device', '=', 'd', ',', 'ps_device', '=', "'/cpu:0'", ',', 'ps_tasks', '=', '1', ')', 'for', 'd', 'in', 'raw_devices', ']', 'return', 'DataParallelBuilder', '.', 'build_on_towers', '(', 'self', '.', 'towers', ',', 'tower_fn', ',', 'devices', ')']
Call the function `tower_fn` under :class:`TowerContext` for each tower. Returns: a list, contains the return values of `tower_fn` on each tower.
['Call', 'the', 'function', 'tower_fn', 'under', ':', 'class', ':', 'TowerContext', 'for', 'each', 'tower', '.']
train
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/training.py#L369-L385
9,734
openatx/facebook-wda
wda/__init__.py
Client.source
def source(self, format='xml', accessible=False): """ Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json' """ if accessible: return self.http.get('/wda/accessibleSource').value return self.http.get('source?format='+format).value
python
def source(self, format='xml', accessible=False): """ Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json' """ if accessible: return self.http.get('/wda/accessibleSource').value return self.http.get('source?format='+format).value
['def', 'source', '(', 'self', ',', 'format', '=', "'xml'", ',', 'accessible', '=', 'False', ')', ':', 'if', 'accessible', ':', 'return', 'self', '.', 'http', '.', 'get', '(', "'/wda/accessibleSource'", ')', '.', 'value', 'return', 'self', '.', 'http', '.', 'get', '(', "'source?format='", '+', 'format', ')', '.', 'value']
Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json'
['Args', ':', 'format', '(', 'str', ')', ':', 'only', 'xml', 'and', 'json', 'source', 'types', 'are', 'supported', 'accessible', '(', 'bool', ')', ':', 'when', 'set', 'to', 'true', 'format', 'is', 'always', 'json']
train
https://github.com/openatx/facebook-wda/blob/aa644204620c6d5c7705a9c7452d8c0cc39330d5/wda/__init__.py#L229-L237
9,735
raghakot/keras-vis
vis/utils/utils.py
find_layer_idx
def find_layer_idx(model, layer_name): """Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise. """ layer_idx = None for idx, layer in enumerate(model.layers): if layer.name == layer_name: layer_idx = idx break if layer_idx is None: raise ValueError("No layer with name '{}' within the model".format(layer_name)) return layer_idx
python
def find_layer_idx(model, layer_name): """Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise. """ layer_idx = None for idx, layer in enumerate(model.layers): if layer.name == layer_name: layer_idx = idx break if layer_idx is None: raise ValueError("No layer with name '{}' within the model".format(layer_name)) return layer_idx
['def', 'find_layer_idx', '(', 'model', ',', 'layer_name', ')', ':', 'layer_idx', '=', 'None', 'for', 'idx', ',', 'layer', 'in', 'enumerate', '(', 'model', '.', 'layers', ')', ':', 'if', 'layer', '.', 'name', '==', 'layer_name', ':', 'layer_idx', '=', 'idx', 'break', 'if', 'layer_idx', 'is', 'None', ':', 'raise', 'ValueError', '(', '"No layer with name \'{}\' within the model"', '.', 'format', '(', 'layer_name', ')', ')', 'return', 'layer_idx']
Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise.
['Looks', 'up', 'the', 'layer', 'index', 'corresponding', 'to', 'layer_name', 'from', 'model', '.']
train
https://github.com/raghakot/keras-vis/blob/668b0e11dab93f3487f23c17e07f40554a8939e9/vis/utils/utils.py#L136-L154
9,736
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
FabricBase.create_dcnm_in_nwk
def create_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False): """Create the DCNM In Network and store the result in DB. """ tenant_name = fw_dict.get('tenant_name') ret = self._create_service_nwk(tenant_id, tenant_name, 'in') if ret: res = fw_const.DCNM_IN_NETWORK_CREATE_SUCCESS LOG.info("In Service network created for tenant %s", tenant_id) else: res = fw_const.DCNM_IN_NETWORK_CREATE_FAIL LOG.info("In Service network create failed for tenant %s", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret
python
def create_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False): """Create the DCNM In Network and store the result in DB. """ tenant_name = fw_dict.get('tenant_name') ret = self._create_service_nwk(tenant_id, tenant_name, 'in') if ret: res = fw_const.DCNM_IN_NETWORK_CREATE_SUCCESS LOG.info("In Service network created for tenant %s", tenant_id) else: res = fw_const.DCNM_IN_NETWORK_CREATE_FAIL LOG.info("In Service network create failed for tenant %s", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret
['def', 'create_dcnm_in_nwk', '(', 'self', ',', 'tenant_id', ',', 'fw_dict', ',', 'is_fw_virt', '=', 'False', ')', ':', 'tenant_name', '=', 'fw_dict', '.', 'get', '(', "'tenant_name'", ')', 'ret', '=', 'self', '.', '_create_service_nwk', '(', 'tenant_id', ',', 'tenant_name', ',', "'in'", ')', 'if', 'ret', ':', 'res', '=', 'fw_const', '.', 'DCNM_IN_NETWORK_CREATE_SUCCESS', 'LOG', '.', 'info', '(', '"In Service network created for tenant %s"', ',', 'tenant_id', ')', 'else', ':', 'res', '=', 'fw_const', '.', 'DCNM_IN_NETWORK_CREATE_FAIL', 'LOG', '.', 'info', '(', '"In Service network create failed for tenant %s"', ',', 'tenant_id', ')', 'self', '.', 'update_fw_db_result', '(', 'tenant_id', ',', 'dcnm_status', '=', 'res', ')', 'return', 'ret']
Create the DCNM In Network and store the result in DB.
['Create', 'the', 'DCNM', 'In', 'Network', 'and', 'store', 'the', 'result', 'in', 'DB', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1289-L1302
9,737
googleapis/dialogflow-python-client-v2
samples/knowledge_base_management.py
list_knowledge_bases
def list_knowledge_bases(project_id): """Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.""" import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) print('Knowledge Bases for: {}'.format(project_id)) for knowledge_base in client.list_knowledge_bases(project_path): print(' - Display Name: {}'.format(knowledge_base.display_name)) print(' - Knowledge ID: {}\n'.format(knowledge_base.name))
python
def list_knowledge_bases(project_id): """Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.""" import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) print('Knowledge Bases for: {}'.format(project_id)) for knowledge_base in client.list_knowledge_bases(project_path): print(' - Display Name: {}'.format(knowledge_base.display_name)) print(' - Knowledge ID: {}\n'.format(knowledge_base.name))
['def', 'list_knowledge_bases', '(', 'project_id', ')', ':', 'import', 'dialogflow_v2beta1', 'as', 'dialogflow', 'client', '=', 'dialogflow', '.', 'KnowledgeBasesClient', '(', ')', 'project_path', '=', 'client', '.', 'project_path', '(', 'project_id', ')', 'print', '(', "'Knowledge Bases for: {}'", '.', 'format', '(', 'project_id', ')', ')', 'for', 'knowledge_base', 'in', 'client', '.', 'list_knowledge_bases', '(', 'project_path', ')', ':', 'print', '(', "' - Display Name: {}'", '.', 'format', '(', 'knowledge_base', '.', 'display_name', ')', ')', 'print', '(', "' - Knowledge ID: {}\\n'", '.', 'format', '(', 'knowledge_base', '.', 'name', ')', ')']
Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.
['Lists', 'the', 'Knowledge', 'bases', 'belonging', 'to', 'a', 'project', '.']
train
https://github.com/googleapis/dialogflow-python-client-v2/blob/8c9c8709222efe427b76c9c8fcc04a0c4a0760b5/samples/knowledge_base_management.py#L35-L47
9,738
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
_is_viable_phone_number
def _is_viable_phone_number(number): """Checks to see if a string could possibly be a phone number. At the moment, checks to see that the string begins with at least 2 digits, ignoring any punctuation commonly found in phone numbers. This method does not require the number to be normalized in advance - but does assume that leading non-number symbols have been removed, such as by the method _extract_possible_number. Arguments: number -- string to be checked for viability as a phone number Returns True if the number could be a phone number of some sort, otherwise False """ if len(number) < _MIN_LENGTH_FOR_NSN: return False match = fullmatch(_VALID_PHONE_NUMBER_PATTERN, number) return bool(match)
python
def _is_viable_phone_number(number): """Checks to see if a string could possibly be a phone number. At the moment, checks to see that the string begins with at least 2 digits, ignoring any punctuation commonly found in phone numbers. This method does not require the number to be normalized in advance - but does assume that leading non-number symbols have been removed, such as by the method _extract_possible_number. Arguments: number -- string to be checked for viability as a phone number Returns True if the number could be a phone number of some sort, otherwise False """ if len(number) < _MIN_LENGTH_FOR_NSN: return False match = fullmatch(_VALID_PHONE_NUMBER_PATTERN, number) return bool(match)
['def', '_is_viable_phone_number', '(', 'number', ')', ':', 'if', 'len', '(', 'number', ')', '<', '_MIN_LENGTH_FOR_NSN', ':', 'return', 'False', 'match', '=', 'fullmatch', '(', '_VALID_PHONE_NUMBER_PATTERN', ',', 'number', ')', 'return', 'bool', '(', 'match', ')']
Checks to see if a string could possibly be a phone number. At the moment, checks to see that the string begins with at least 2 digits, ignoring any punctuation commonly found in phone numbers. This method does not require the number to be normalized in advance - but does assume that leading non-number symbols have been removed, such as by the method _extract_possible_number. Arguments: number -- string to be checked for viability as a phone number Returns True if the number could be a phone number of some sort, otherwise False
['Checks', 'to', 'see', 'if', 'a', 'string', 'could', 'possibly', 'be', 'a', 'phone', 'number', '.']
train
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L545-L563
9,739
welbornprod/colr
colr/progress_frames.py
BarSet.with_wrapper
def with_wrapper(self, wrapper=None, name=None): """ Copy this BarSet, and return a new BarSet with the specified name and wrapper. If no name is given, `{self.name}_custom_wrapper` is used. If no wrapper is given, the new BarSet will have no wrapper. """ name = name or '{}_custom_wrapper'.format(self.name) return self.__class__(self.data, name=name, wrapper=wrapper)
python
def with_wrapper(self, wrapper=None, name=None): """ Copy this BarSet, and return a new BarSet with the specified name and wrapper. If no name is given, `{self.name}_custom_wrapper` is used. If no wrapper is given, the new BarSet will have no wrapper. """ name = name or '{}_custom_wrapper'.format(self.name) return self.__class__(self.data, name=name, wrapper=wrapper)
['def', 'with_wrapper', '(', 'self', ',', 'wrapper', '=', 'None', ',', 'name', '=', 'None', ')', ':', 'name', '=', 'name', 'or', "'{}_custom_wrapper'", '.', 'format', '(', 'self', '.', 'name', ')', 'return', 'self', '.', '__class__', '(', 'self', '.', 'data', ',', 'name', '=', 'name', ',', 'wrapper', '=', 'wrapper', ')']
Copy this BarSet, and return a new BarSet with the specified name and wrapper. If no name is given, `{self.name}_custom_wrapper` is used. If no wrapper is given, the new BarSet will have no wrapper.
['Copy', 'this', 'BarSet', 'and', 'return', 'a', 'new', 'BarSet', 'with', 'the', 'specified', 'name', 'and', 'wrapper', '.', 'If', 'no', 'name', 'is', 'given', '{', 'self', '.', 'name', '}', '_custom_wrapper', 'is', 'used', '.', 'If', 'no', 'wrapper', 'is', 'given', 'the', 'new', 'BarSet', 'will', 'have', 'no', 'wrapper', '.']
train
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/progress_frames.py#L599-L606
9,740
yyuu/botornado
boto/mturk/connection.py
MTurkConnection._process_response
def _process_response(self, response, marker_elems=None): """ Helper to process the xml response from AWS """ body = response.read() #print body if '<Errors>' not in body: rs = ResultSet(marker_elems) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise MTurkRequestError(response.status, response.reason, body)
python
def _process_response(self, response, marker_elems=None): """ Helper to process the xml response from AWS """ body = response.read() #print body if '<Errors>' not in body: rs = ResultSet(marker_elems) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise MTurkRequestError(response.status, response.reason, body)
['def', '_process_response', '(', 'self', ',', 'response', ',', 'marker_elems', '=', 'None', ')', ':', 'body', '=', 'response', '.', 'read', '(', ')', '#print body', 'if', "'<Errors>'", 'not', 'in', 'body', ':', 'rs', '=', 'ResultSet', '(', 'marker_elems', ')', 'h', '=', 'handler', '.', 'XmlHandler', '(', 'rs', ',', 'self', ')', 'xml', '.', 'sax', '.', 'parseString', '(', 'body', ',', 'h', ')', 'return', 'rs', 'else', ':', 'raise', 'MTurkRequestError', '(', 'response', '.', 'status', ',', 'response', '.', 'reason', ',', 'body', ')']
Helper to process the xml response from AWS
['Helper', 'to', 'process', 'the', 'xml', 'response', 'from', 'AWS']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L734-L746
9,741
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/vcenter.py
Vcenter.add_vcenter
def add_vcenter(self, **kwargs): """ Add vCenter on the switch Args: id(str) : Name of an established vCenter url (bool) : vCenter URL username (str): Username of the vCenter password (str): Password of the vCenter callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id = ET.SubElement(vcenter, "id") id.text = kwargs.pop('id') credentials = ET.SubElement(vcenter, "credentials") url = ET.SubElement(credentials, "url") url.text = kwargs.pop('url') username = ET.SubElement(credentials, "username") username.text = kwargs.pop('username') password = ET.SubElement(credentials, "password") password.text = kwargs.pop('password') try: self._callback(config) return True except Exception as error: logging.error(error) return False
python
def add_vcenter(self, **kwargs): """ Add vCenter on the switch Args: id(str) : Name of an established vCenter url (bool) : vCenter URL username (str): Username of the vCenter password (str): Password of the vCenter callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id = ET.SubElement(vcenter, "id") id.text = kwargs.pop('id') credentials = ET.SubElement(vcenter, "credentials") url = ET.SubElement(credentials, "url") url.text = kwargs.pop('url') username = ET.SubElement(credentials, "username") username.text = kwargs.pop('username') password = ET.SubElement(credentials, "password") password.text = kwargs.pop('password') try: self._callback(config) return True except Exception as error: logging.error(error) return False
['def', 'add_vcenter', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'vcenter', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"vcenter"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-vswitch"', ')', 'id', '=', 'ET', '.', 'SubElement', '(', 'vcenter', ',', '"id"', ')', 'id', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'id'", ')', 'credentials', '=', 'ET', '.', 'SubElement', '(', 'vcenter', ',', '"credentials"', ')', 'url', '=', 'ET', '.', 'SubElement', '(', 'credentials', ',', '"url"', ')', 'url', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'url'", ')', 'username', '=', 'ET', '.', 'SubElement', '(', 'credentials', ',', '"username"', ')', 'username', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'username'", ')', 'password', '=', 'ET', '.', 'SubElement', '(', 'credentials', ',', '"password"', ')', 'password', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'password'", ')', 'try', ':', 'self', '.', '_callback', '(', 'config', ')', 'return', 'True', 'except', 'Exception', 'as', 'error', ':', 'logging', '.', 'error', '(', 'error', ')', 'return', 'False']
Add vCenter on the switch Args: id(str) : Name of an established vCenter url (bool) : vCenter URL username (str): Username of the vCenter password (str): Password of the vCenter callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
['Add', 'vCenter', 'on', 'the', 'switch']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/vcenter.py#L40-L77
9,742
yangl1996/libpagure
libpagure/libpagure.py
Pagure.list_tags
def list_tags(self, pattern=None): """ List all tags made on this project. :param pattern: filters the starting letters of the return value :return: """ request_url = "{}tags".format(self.create_basic_url()) params = None if pattern: params = {'pattern': pattern} return_value = self._call_api(request_url, params=params) return return_value['tags']
python
def list_tags(self, pattern=None): """ List all tags made on this project. :param pattern: filters the starting letters of the return value :return: """ request_url = "{}tags".format(self.create_basic_url()) params = None if pattern: params = {'pattern': pattern} return_value = self._call_api(request_url, params=params) return return_value['tags']
['def', 'list_tags', '(', 'self', ',', 'pattern', '=', 'None', ')', ':', 'request_url', '=', '"{}tags"', '.', 'format', '(', 'self', '.', 'create_basic_url', '(', ')', ')', 'params', '=', 'None', 'if', 'pattern', ':', 'params', '=', '{', "'pattern'", ':', 'pattern', '}', 'return_value', '=', 'self', '.', '_call_api', '(', 'request_url', ',', 'params', '=', 'params', ')', 'return', 'return_value', '[', "'tags'", ']']
List all tags made on this project. :param pattern: filters the starting letters of the return value :return:
['List', 'all', 'tags', 'made', 'on', 'this', 'project', '.', ':', 'param', 'pattern', ':', 'filters', 'the', 'starting', 'letters', 'of', 'the', 'return', 'value', ':', 'return', ':']
train
https://github.com/yangl1996/libpagure/blob/dd96ed29142407463790c66ed321984a6ea7465a/libpagure/libpagure.py#L138-L151
9,743
WebarchivCZ/WA-KAT
src/wa_kat/connectors/seeder.py
_send_request
def _send_request(url_id, data=None, json=None, req_type=None): """ Send request to Seeder's API. Args: url_id (str): ID used as identification in Seeder. data (obj, default None): Optional parameter for data. json (obj, default None): Optional parameter for JSON body. req_type (fn, default None): Request method used to send/download the data. If none, `requests.get` is used. Returns: dict: Data from Seeder. """ url = settings.SEEDER_INFO_URL % url_id if not req_type: req_type = requests.get resp = req_type( url, data=data, json=json, timeout=settings.SEEDER_TIMEOUT, headers={ "User-Agent": settings.USER_AGENT, "Authorization": settings.SEEDER_TOKEN, } ) resp.raise_for_status() data = resp.json() return data
python
def _send_request(url_id, data=None, json=None, req_type=None): """ Send request to Seeder's API. Args: url_id (str): ID used as identification in Seeder. data (obj, default None): Optional parameter for data. json (obj, default None): Optional parameter for JSON body. req_type (fn, default None): Request method used to send/download the data. If none, `requests.get` is used. Returns: dict: Data from Seeder. """ url = settings.SEEDER_INFO_URL % url_id if not req_type: req_type = requests.get resp = req_type( url, data=data, json=json, timeout=settings.SEEDER_TIMEOUT, headers={ "User-Agent": settings.USER_AGENT, "Authorization": settings.SEEDER_TOKEN, } ) resp.raise_for_status() data = resp.json() return data
['def', '_send_request', '(', 'url_id', ',', 'data', '=', 'None', ',', 'json', '=', 'None', ',', 'req_type', '=', 'None', ')', ':', 'url', '=', 'settings', '.', 'SEEDER_INFO_URL', '%', 'url_id', 'if', 'not', 'req_type', ':', 'req_type', '=', 'requests', '.', 'get', 'resp', '=', 'req_type', '(', 'url', ',', 'data', '=', 'data', ',', 'json', '=', 'json', ',', 'timeout', '=', 'settings', '.', 'SEEDER_TIMEOUT', ',', 'headers', '=', '{', '"User-Agent"', ':', 'settings', '.', 'USER_AGENT', ',', '"Authorization"', ':', 'settings', '.', 'SEEDER_TOKEN', ',', '}', ')', 'resp', '.', 'raise_for_status', '(', ')', 'data', '=', 'resp', '.', 'json', '(', ')', 'return', 'data']
Send request to Seeder's API. Args: url_id (str): ID used as identification in Seeder. data (obj, default None): Optional parameter for data. json (obj, default None): Optional parameter for JSON body. req_type (fn, default None): Request method used to send/download the data. If none, `requests.get` is used. Returns: dict: Data from Seeder.
['Send', 'request', 'to', 'Seeder', 's', 'API', '.']
train
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/connectors/seeder.py#L123-L155
9,744
bcbio/bcbio-nextgen
bcbio/heterogeneity/bubbletree.py
_is_possible_loh
def _is_possible_loh(rec, vcf_rec, params, somatic_info, use_status=False, max_normal_depth=None): """Check if the VCF record is a het in the normal with sufficient support. Only returns SNPs, since indels tend to have less precise frequency measurements. """ if _is_biallelic_snp(rec) and _passes_plus_germline(rec, use_status=use_status): stats = _tumor_normal_stats(rec, somatic_info, vcf_rec) depths = [tz.get_in([x, "depth"], stats) for x in ["normal", "tumor"]] depths = [d for d in depths if d is not None] normal_freq = tz.get_in(["normal", "freq"], stats) tumor_freq = tz.get_in(["tumor", "freq"], stats) if all([d > params["min_depth"] for d in depths]): if max_normal_depth and tz.get_in(["normal", "depth"], stats, 0) > max_normal_depth: return None if normal_freq is not None: if normal_freq >= params["min_freq"] and normal_freq <= params["max_freq"]: return stats elif (tumor_freq >= params["tumor_only"]["min_freq"] and tumor_freq <= params["tumor_only"]["max_freq"]): if (vcf_rec and not _has_population_germline(vcf_rec)) or is_population_germline(rec): return stats
python
def _is_possible_loh(rec, vcf_rec, params, somatic_info, use_status=False, max_normal_depth=None): """Check if the VCF record is a het in the normal with sufficient support. Only returns SNPs, since indels tend to have less precise frequency measurements. """ if _is_biallelic_snp(rec) and _passes_plus_germline(rec, use_status=use_status): stats = _tumor_normal_stats(rec, somatic_info, vcf_rec) depths = [tz.get_in([x, "depth"], stats) for x in ["normal", "tumor"]] depths = [d for d in depths if d is not None] normal_freq = tz.get_in(["normal", "freq"], stats) tumor_freq = tz.get_in(["tumor", "freq"], stats) if all([d > params["min_depth"] for d in depths]): if max_normal_depth and tz.get_in(["normal", "depth"], stats, 0) > max_normal_depth: return None if normal_freq is not None: if normal_freq >= params["min_freq"] and normal_freq <= params["max_freq"]: return stats elif (tumor_freq >= params["tumor_only"]["min_freq"] and tumor_freq <= params["tumor_only"]["max_freq"]): if (vcf_rec and not _has_population_germline(vcf_rec)) or is_population_germline(rec): return stats
['def', '_is_possible_loh', '(', 'rec', ',', 'vcf_rec', ',', 'params', ',', 'somatic_info', ',', 'use_status', '=', 'False', ',', 'max_normal_depth', '=', 'None', ')', ':', 'if', '_is_biallelic_snp', '(', 'rec', ')', 'and', '_passes_plus_germline', '(', 'rec', ',', 'use_status', '=', 'use_status', ')', ':', 'stats', '=', '_tumor_normal_stats', '(', 'rec', ',', 'somatic_info', ',', 'vcf_rec', ')', 'depths', '=', '[', 'tz', '.', 'get_in', '(', '[', 'x', ',', '"depth"', ']', ',', 'stats', ')', 'for', 'x', 'in', '[', '"normal"', ',', '"tumor"', ']', ']', 'depths', '=', '[', 'd', 'for', 'd', 'in', 'depths', 'if', 'd', 'is', 'not', 'None', ']', 'normal_freq', '=', 'tz', '.', 'get_in', '(', '[', '"normal"', ',', '"freq"', ']', ',', 'stats', ')', 'tumor_freq', '=', 'tz', '.', 'get_in', '(', '[', '"tumor"', ',', '"freq"', ']', ',', 'stats', ')', 'if', 'all', '(', '[', 'd', '>', 'params', '[', '"min_depth"', ']', 'for', 'd', 'in', 'depths', ']', ')', ':', 'if', 'max_normal_depth', 'and', 'tz', '.', 'get_in', '(', '[', '"normal"', ',', '"depth"', ']', ',', 'stats', ',', '0', ')', '>', 'max_normal_depth', ':', 'return', 'None', 'if', 'normal_freq', 'is', 'not', 'None', ':', 'if', 'normal_freq', '>=', 'params', '[', '"min_freq"', ']', 'and', 'normal_freq', '<=', 'params', '[', '"max_freq"', ']', ':', 'return', 'stats', 'elif', '(', 'tumor_freq', '>=', 'params', '[', '"tumor_only"', ']', '[', '"min_freq"', ']', 'and', 'tumor_freq', '<=', 'params', '[', '"tumor_only"', ']', '[', '"max_freq"', ']', ')', ':', 'if', '(', 'vcf_rec', 'and', 'not', '_has_population_germline', '(', 'vcf_rec', ')', ')', 'or', 'is_population_germline', '(', 'rec', ')', ':', 'return', 'stats']
Check if the VCF record is a het in the normal with sufficient support. Only returns SNPs, since indels tend to have less precise frequency measurements.
['Check', 'if', 'the', 'VCF', 'record', 'is', 'a', 'het', 'in', 'the', 'normal', 'with', 'sufficient', 'support', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L358-L378
9,745
binbrain/OpenSesame
OpenSesame/keyring.py
OpenKeyring._auto_unlock_key_position
def _auto_unlock_key_position(self): """Find the open sesame password in the default keyring """ found_pos = None default_keyring_ids = gkr.list_item_ids_sync(self.default_keyring) for pos in default_keyring_ids: item_attrs = gkr.item_get_attributes_sync(self.default_keyring, pos) app = 'application' if item_attrs.has_key(app) and item_attrs[app] == "opensesame": found_pos = pos break return found_pos
python
def _auto_unlock_key_position(self): """Find the open sesame password in the default keyring """ found_pos = None default_keyring_ids = gkr.list_item_ids_sync(self.default_keyring) for pos in default_keyring_ids: item_attrs = gkr.item_get_attributes_sync(self.default_keyring, pos) app = 'application' if item_attrs.has_key(app) and item_attrs[app] == "opensesame": found_pos = pos break return found_pos
['def', '_auto_unlock_key_position', '(', 'self', ')', ':', 'found_pos', '=', 'None', 'default_keyring_ids', '=', 'gkr', '.', 'list_item_ids_sync', '(', 'self', '.', 'default_keyring', ')', 'for', 'pos', 'in', 'default_keyring_ids', ':', 'item_attrs', '=', 'gkr', '.', 'item_get_attributes_sync', '(', 'self', '.', 'default_keyring', ',', 'pos', ')', 'app', '=', "'application'", 'if', 'item_attrs', '.', 'has_key', '(', 'app', ')', 'and', 'item_attrs', '[', 'app', ']', '==', '"opensesame"', ':', 'found_pos', '=', 'pos', 'break', 'return', 'found_pos']
Find the open sesame password in the default keyring
['Find', 'the', 'open', 'sesame', 'password', 'in', 'the', 'default', 'keyring']
train
https://github.com/binbrain/OpenSesame/blob/e32c306385012646400ecb49fc65c64b14ce3a93/OpenSesame/keyring.py#L50-L61
9,746
blockstack/blockstack-core
blockstack/lib/nameset/db.py
namedb_get_names_owned_by_address
def namedb_get_names_owned_by_address( cur, address, current_block ): """ Get the list of non-expired, non-revoked names owned by an address. Only works if there is a *singular* address for the name. """ unexpired_fragment, unexpired_args = namedb_select_where_unexpired_names( current_block ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.address = ? AND name_records.revoked = 0 AND " + unexpired_fragment + ";" args = (address,) + unexpired_args name_rows = namedb_query_execute( cur, select_query, args ) names = [] for name_row in name_rows: names.append( name_row['name'] ) if len(names) == 0: return None else: return names
python
def namedb_get_names_owned_by_address( cur, address, current_block ): """ Get the list of non-expired, non-revoked names owned by an address. Only works if there is a *singular* address for the name. """ unexpired_fragment, unexpired_args = namedb_select_where_unexpired_names( current_block ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.address = ? AND name_records.revoked = 0 AND " + unexpired_fragment + ";" args = (address,) + unexpired_args name_rows = namedb_query_execute( cur, select_query, args ) names = [] for name_row in name_rows: names.append( name_row['name'] ) if len(names) == 0: return None else: return names
['def', 'namedb_get_names_owned_by_address', '(', 'cur', ',', 'address', ',', 'current_block', ')', ':', 'unexpired_fragment', ',', 'unexpired_args', '=', 'namedb_select_where_unexpired_names', '(', 'current_block', ')', 'select_query', '=', '"SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id "', '+', '"WHERE name_records.address = ? AND name_records.revoked = 0 AND "', '+', 'unexpired_fragment', '+', '";"', 'args', '=', '(', 'address', ',', ')', '+', 'unexpired_args', 'name_rows', '=', 'namedb_query_execute', '(', 'cur', ',', 'select_query', ',', 'args', ')', 'names', '=', '[', ']', 'for', 'name_row', 'in', 'name_rows', ':', 'names', '.', 'append', '(', 'name_row', '[', "'name'", ']', ')', 'if', 'len', '(', 'names', ')', '==', '0', ':', 'return', 'None', 'else', ':', 'return', 'names']
Get the list of non-expired, non-revoked names owned by an address. Only works if there is a *singular* address for the name.
['Get', 'the', 'list', 'of', 'non', '-', 'expired', 'non', '-', 'revoked', 'names', 'owned', 'by', 'an', 'address', '.', 'Only', 'works', 'if', 'there', 'is', 'a', '*', 'singular', '*', 'address', 'for', 'the', 'name', '.']
train
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/db.py#L2245-L2266
9,747
Kronuz/pyScss
scss/source.py
SourceFile.from_string
def from_string(cls, string, relpath=None, encoding=None, is_sass=None): """Read Sass source from the contents of a string. The origin is always None. `relpath` defaults to "string:...". """ if isinstance(string, six.text_type): # Already decoded; we don't know what encoding to use for output, # though, so still check for a @charset. # TODO what if the given encoding conflicts with the one in the # file? do we care? if encoding is None: encoding = determine_encoding(string) byte_contents = string.encode(encoding) text_contents = string elif isinstance(string, six.binary_type): encoding = determine_encoding(string) byte_contents = string text_contents = string.decode(encoding) else: raise TypeError("Expected text or bytes, got {0!r}".format(string)) origin = None if relpath is None: m = hashlib.sha256() m.update(byte_contents) relpath = repr("string:{0}:{1}".format( m.hexdigest()[:16], text_contents[:100])) return cls( origin, relpath, text_contents, encoding=encoding, is_sass=is_sass, )
python
def from_string(cls, string, relpath=None, encoding=None, is_sass=None): """Read Sass source from the contents of a string. The origin is always None. `relpath` defaults to "string:...". """ if isinstance(string, six.text_type): # Already decoded; we don't know what encoding to use for output, # though, so still check for a @charset. # TODO what if the given encoding conflicts with the one in the # file? do we care? if encoding is None: encoding = determine_encoding(string) byte_contents = string.encode(encoding) text_contents = string elif isinstance(string, six.binary_type): encoding = determine_encoding(string) byte_contents = string text_contents = string.decode(encoding) else: raise TypeError("Expected text or bytes, got {0!r}".format(string)) origin = None if relpath is None: m = hashlib.sha256() m.update(byte_contents) relpath = repr("string:{0}:{1}".format( m.hexdigest()[:16], text_contents[:100])) return cls( origin, relpath, text_contents, encoding=encoding, is_sass=is_sass, )
['def', 'from_string', '(', 'cls', ',', 'string', ',', 'relpath', '=', 'None', ',', 'encoding', '=', 'None', ',', 'is_sass', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'string', ',', 'six', '.', 'text_type', ')', ':', "# Already decoded; we don't know what encoding to use for output,", '# though, so still check for a @charset.', '# TODO what if the given encoding conflicts with the one in the', '# file? do we care?', 'if', 'encoding', 'is', 'None', ':', 'encoding', '=', 'determine_encoding', '(', 'string', ')', 'byte_contents', '=', 'string', '.', 'encode', '(', 'encoding', ')', 'text_contents', '=', 'string', 'elif', 'isinstance', '(', 'string', ',', 'six', '.', 'binary_type', ')', ':', 'encoding', '=', 'determine_encoding', '(', 'string', ')', 'byte_contents', '=', 'string', 'text_contents', '=', 'string', '.', 'decode', '(', 'encoding', ')', 'else', ':', 'raise', 'TypeError', '(', '"Expected text or bytes, got {0!r}"', '.', 'format', '(', 'string', ')', ')', 'origin', '=', 'None', 'if', 'relpath', 'is', 'None', ':', 'm', '=', 'hashlib', '.', 'sha256', '(', ')', 'm', '.', 'update', '(', 'byte_contents', ')', 'relpath', '=', 'repr', '(', '"string:{0}:{1}"', '.', 'format', '(', 'm', '.', 'hexdigest', '(', ')', '[', ':', '16', ']', ',', 'text_contents', '[', ':', '100', ']', ')', ')', 'return', 'cls', '(', 'origin', ',', 'relpath', ',', 'text_contents', ',', 'encoding', '=', 'encoding', ',', 'is_sass', '=', 'is_sass', ',', ')']
Read Sass source from the contents of a string. The origin is always None. `relpath` defaults to "string:...".
['Read', 'Sass', 'source', 'from', 'the', 'contents', 'of', 'a', 'string', '.']
train
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/source.py#L224-L256
9,748
Boudewijn26/gTTS-token
gtts_token/gtts_token.py
Token.calculate_token
def calculate_token(self, text, seed=None): """ Calculate the request token (`tk`) of a string :param text: str The text to calculate a token for :param seed: str The seed to use. By default this is the number of hours since epoch """ if seed is None: seed = self._get_token_key() [first_seed, second_seed] = seed.split(".") try: d = bytearray(text.encode('UTF-8')) except UnicodeDecodeError: # This will probably only occur when d is actually a str containing UTF-8 chars, which means we don't need # to encode. d = bytearray(text) a = int(first_seed) for value in d: a += value a = self._work_token(a, self.SALT_1) a = self._work_token(a, self.SALT_2) a ^= int(second_seed) if 0 > a: a = (a & 2147483647) + 2147483648 a %= 1E6 a = int(a) return str(a) + "." + str(a ^ int(first_seed))
python
def calculate_token(self, text, seed=None): """ Calculate the request token (`tk`) of a string :param text: str The text to calculate a token for :param seed: str The seed to use. By default this is the number of hours since epoch """ if seed is None: seed = self._get_token_key() [first_seed, second_seed] = seed.split(".") try: d = bytearray(text.encode('UTF-8')) except UnicodeDecodeError: # This will probably only occur when d is actually a str containing UTF-8 chars, which means we don't need # to encode. d = bytearray(text) a = int(first_seed) for value in d: a += value a = self._work_token(a, self.SALT_1) a = self._work_token(a, self.SALT_2) a ^= int(second_seed) if 0 > a: a = (a & 2147483647) + 2147483648 a %= 1E6 a = int(a) return str(a) + "." + str(a ^ int(first_seed))
['def', 'calculate_token', '(', 'self', ',', 'text', ',', 'seed', '=', 'None', ')', ':', 'if', 'seed', 'is', 'None', ':', 'seed', '=', 'self', '.', '_get_token_key', '(', ')', '[', 'first_seed', ',', 'second_seed', ']', '=', 'seed', '.', 'split', '(', '"."', ')', 'try', ':', 'd', '=', 'bytearray', '(', 'text', '.', 'encode', '(', "'UTF-8'", ')', ')', 'except', 'UnicodeDecodeError', ':', "# This will probably only occur when d is actually a str containing UTF-8 chars, which means we don't need", '# to encode.', 'd', '=', 'bytearray', '(', 'text', ')', 'a', '=', 'int', '(', 'first_seed', ')', 'for', 'value', 'in', 'd', ':', 'a', '+=', 'value', 'a', '=', 'self', '.', '_work_token', '(', 'a', ',', 'self', '.', 'SALT_1', ')', 'a', '=', 'self', '.', '_work_token', '(', 'a', ',', 'self', '.', 'SALT_2', ')', 'a', '^=', 'int', '(', 'second_seed', ')', 'if', '0', '>', 'a', ':', 'a', '=', '(', 'a', '&', '2147483647', ')', '+', '2147483648', 'a', '%=', '1E6', 'a', '=', 'int', '(', 'a', ')', 'return', 'str', '(', 'a', ')', '+', '"."', '+', 'str', '(', 'a', '^', 'int', '(', 'first_seed', ')', ')']
Calculate the request token (`tk`) of a string :param text: str The text to calculate a token for :param seed: str The seed to use. By default this is the number of hours since epoch
['Calculate', 'the', 'request', 'token', '(', 'tk', ')', 'of', 'a', 'string', ':', 'param', 'text', ':', 'str', 'The', 'text', 'to', 'calculate', 'a', 'token', 'for', ':', 'param', 'seed', ':', 'str', 'The', 'seed', 'to', 'use', '.', 'By', 'default', 'this', 'is', 'the', 'number', 'of', 'hours', 'since', 'epoch']
train
https://github.com/Boudewijn26/gTTS-token/blob/9a1bb569bcce1ec091bfd9586dd54f9c879e7d3c/gtts_token/gtts_token.py#L21-L49
9,749
Azure/azure-event-hubs-python
azure/eventhub/sender.py
Sender.open
def open(self): """ Open the Sender using the supplied conneciton. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. :param connection: The underlying client shared connection. :type: connection: ~uamqp.connection.Connection """ self.running = True if self.redirected: self.target = self.redirected.address self._handler = SendClient( self.target, auth=self.client.get_auth(), debug=self.client.debug, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, properties=self.client.create_properties()) self._handler.open() while not self._handler.client_ready(): time.sleep(0.05)
python
def open(self): """ Open the Sender using the supplied conneciton. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. :param connection: The underlying client shared connection. :type: connection: ~uamqp.connection.Connection """ self.running = True if self.redirected: self.target = self.redirected.address self._handler = SendClient( self.target, auth=self.client.get_auth(), debug=self.client.debug, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, properties=self.client.create_properties()) self._handler.open() while not self._handler.client_ready(): time.sleep(0.05)
['def', 'open', '(', 'self', ')', ':', 'self', '.', 'running', '=', 'True', 'if', 'self', '.', 'redirected', ':', 'self', '.', 'target', '=', 'self', '.', 'redirected', '.', 'address', 'self', '.', '_handler', '=', 'SendClient', '(', 'self', '.', 'target', ',', 'auth', '=', 'self', '.', 'client', '.', 'get_auth', '(', ')', ',', 'debug', '=', 'self', '.', 'client', '.', 'debug', ',', 'msg_timeout', '=', 'self', '.', 'timeout', ',', 'error_policy', '=', 'self', '.', 'retry_policy', ',', 'keep_alive_interval', '=', 'self', '.', 'keep_alive', ',', 'client_name', '=', 'self', '.', 'name', ',', 'properties', '=', 'self', '.', 'client', '.', 'create_properties', '(', ')', ')', 'self', '.', '_handler', '.', 'open', '(', ')', 'while', 'not', 'self', '.', '_handler', '.', 'client_ready', '(', ')', ':', 'time', '.', 'sleep', '(', '0.05', ')']
Open the Sender using the supplied conneciton. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. :param connection: The underlying client shared connection. :type: connection: ~uamqp.connection.Connection
['Open', 'the', 'Sender', 'using', 'the', 'supplied', 'conneciton', '.', 'If', 'the', 'handler', 'has', 'previously', 'been', 'redirected', 'the', 'redirect', 'context', 'will', 'be', 'used', 'to', 'create', 'a', 'new', 'handler', 'before', 'opening', 'it', '.']
train
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventhub/sender.py#L72-L95
9,750
frejanordsiek/hdf5storage
hdf5storage/utilities.py
set_attributes_all
def set_attributes_all(target, attributes, discard_others=True): """ Set Attributes in bulk and optionally discard others. Sets each Attribute in turn (modifying it in place if possible if it is already present) and optionally discarding all other Attributes not explicitly set. This function yields much greater performance than the required individual calls to ``set_attribute``, ``set_attribute_string``, ``set_attribute_string_array`` and ``del_attribute`` put together. .. versionadded:: 0.2 Parameters ---------- target : Dataset or Group Dataset or Group to set the Attributes of. attributes : dict The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. discard_others : bool, optional Whether to discard all other Attributes not explicitly set (default) or not. See Also -------- set_attribute set_attribute_string set_attribute_string_array """ attrs = target.attrs existing = dict(attrs.items()) # Generate special dtype for string arrays. if sys.hexversion >= 0x03000000: str_arr_dtype = h5py.special_dtype(vlen=str) else: str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode) # Go through each attribute. If it is already present, modify it if # possible and create it otherwise (deletes old value.) for k, (kind, value) in attributes.items(): if kind == 'string_array': attrs.create(k, [convert_to_str(s) for s in value], dtype=str_arr_dtype) else: if kind == 'string': value = np.bytes_(value) if k not in existing: attrs.create(k, value) else: try: if value.dtype == existing[k].dtype \ and value.shape == existing[k].shape: attrs.modify(k, value) except: attrs.create(k, value) # Discard all other attributes. if discard_others: for k in set(existing) - set(attributes): del attrs[k]
python
def set_attributes_all(target, attributes, discard_others=True): """ Set Attributes in bulk and optionally discard others. Sets each Attribute in turn (modifying it in place if possible if it is already present) and optionally discarding all other Attributes not explicitly set. This function yields much greater performance than the required individual calls to ``set_attribute``, ``set_attribute_string``, ``set_attribute_string_array`` and ``del_attribute`` put together. .. versionadded:: 0.2 Parameters ---------- target : Dataset or Group Dataset or Group to set the Attributes of. attributes : dict The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. discard_others : bool, optional Whether to discard all other Attributes not explicitly set (default) or not. See Also -------- set_attribute set_attribute_string set_attribute_string_array """ attrs = target.attrs existing = dict(attrs.items()) # Generate special dtype for string arrays. if sys.hexversion >= 0x03000000: str_arr_dtype = h5py.special_dtype(vlen=str) else: str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode) # Go through each attribute. If it is already present, modify it if # possible and create it otherwise (deletes old value.) for k, (kind, value) in attributes.items(): if kind == 'string_array': attrs.create(k, [convert_to_str(s) for s in value], dtype=str_arr_dtype) else: if kind == 'string': value = np.bytes_(value) if k not in existing: attrs.create(k, value) else: try: if value.dtype == existing[k].dtype \ and value.shape == existing[k].shape: attrs.modify(k, value) except: attrs.create(k, value) # Discard all other attributes. if discard_others: for k in set(existing) - set(attributes): del attrs[k]
['def', 'set_attributes_all', '(', 'target', ',', 'attributes', ',', 'discard_others', '=', 'True', ')', ':', 'attrs', '=', 'target', '.', 'attrs', 'existing', '=', 'dict', '(', 'attrs', '.', 'items', '(', ')', ')', '# Generate special dtype for string arrays.', 'if', 'sys', '.', 'hexversion', '>=', '0x03000000', ':', 'str_arr_dtype', '=', 'h5py', '.', 'special_dtype', '(', 'vlen', '=', 'str', ')', 'else', ':', 'str_arr_dtype', '=', 'dtype', '=', 'h5py', '.', 'special_dtype', '(', 'vlen', '=', 'unicode', ')', '# Go through each attribute. If it is already present, modify it if', '# possible and create it otherwise (deletes old value.)', 'for', 'k', ',', '(', 'kind', ',', 'value', ')', 'in', 'attributes', '.', 'items', '(', ')', ':', 'if', 'kind', '==', "'string_array'", ':', 'attrs', '.', 'create', '(', 'k', ',', '[', 'convert_to_str', '(', 's', ')', 'for', 's', 'in', 'value', ']', ',', 'dtype', '=', 'str_arr_dtype', ')', 'else', ':', 'if', 'kind', '==', "'string'", ':', 'value', '=', 'np', '.', 'bytes_', '(', 'value', ')', 'if', 'k', 'not', 'in', 'existing', ':', 'attrs', '.', 'create', '(', 'k', ',', 'value', ')', 'else', ':', 'try', ':', 'if', 'value', '.', 'dtype', '==', 'existing', '[', 'k', ']', '.', 'dtype', 'and', 'value', '.', 'shape', '==', 'existing', '[', 'k', ']', '.', 'shape', ':', 'attrs', '.', 'modify', '(', 'k', ',', 'value', ')', 'except', ':', 'attrs', '.', 'create', '(', 'k', ',', 'value', ')', '# Discard all other attributes.', 'if', 'discard_others', ':', 'for', 'k', 'in', 'set', '(', 'existing', ')', '-', 'set', '(', 'attributes', ')', ':', 'del', 'attrs', '[', 'k', ']']
Set Attributes in bulk and optionally discard others. Sets each Attribute in turn (modifying it in place if possible if it is already present) and optionally discarding all other Attributes not explicitly set. This function yields much greater performance than the required individual calls to ``set_attribute``, ``set_attribute_string``, ``set_attribute_string_array`` and ``del_attribute`` put together. .. versionadded:: 0.2 Parameters ---------- target : Dataset or Group Dataset or Group to set the Attributes of. attributes : dict The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. discard_others : bool, optional Whether to discard all other Attributes not explicitly set (default) or not. See Also -------- set_attribute set_attribute_string set_attribute_string_array
['Set', 'Attributes', 'in', 'bulk', 'and', 'optionally', 'discard', 'others', '.']
train
https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/utilities.py#L1537-L1599
9,751
bykof/billomapy
billomapy/billomapy.py
Billomapy.complete_delivery_note
def complete_delivery_note(self, delivery_note_id, complete_dict): """ Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response """ return self._create_put_request( resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=COMPLETE, send_data=complete_dict )
python
def complete_delivery_note(self, delivery_note_id, complete_dict): """ Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response """ return self._create_put_request( resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=COMPLETE, send_data=complete_dict )
['def', 'complete_delivery_note', '(', 'self', ',', 'delivery_note_id', ',', 'complete_dict', ')', ':', 'return', 'self', '.', '_create_put_request', '(', 'resource', '=', 'DELIVERY_NOTES', ',', 'billomat_id', '=', 'delivery_note_id', ',', 'command', '=', 'COMPLETE', ',', 'send_data', '=', 'complete_dict', ')']
Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response
['Completes', 'an', 'delivery', 'note']
train
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3564-L3577
9,752
bram85/topydo
topydo/ui/prompt/PromptCompleter.py
_dates
def _dates(p_word_before_cursor): """ Generator for date completion. """ to_absolute = lambda s: relative_date_to_date(s).isoformat() start_value_pos = p_word_before_cursor.find(':') + 1 value = p_word_before_cursor[start_value_pos:] for reldate in date_suggestions(): if not reldate.startswith(value): continue yield Completion(reldate, -len(value), display_meta=to_absolute(reldate))
python
def _dates(p_word_before_cursor): """ Generator for date completion. """ to_absolute = lambda s: relative_date_to_date(s).isoformat() start_value_pos = p_word_before_cursor.find(':') + 1 value = p_word_before_cursor[start_value_pos:] for reldate in date_suggestions(): if not reldate.startswith(value): continue yield Completion(reldate, -len(value), display_meta=to_absolute(reldate))
['def', '_dates', '(', 'p_word_before_cursor', ')', ':', 'to_absolute', '=', 'lambda', 's', ':', 'relative_date_to_date', '(', 's', ')', '.', 'isoformat', '(', ')', 'start_value_pos', '=', 'p_word_before_cursor', '.', 'find', '(', "':'", ')', '+', '1', 'value', '=', 'p_word_before_cursor', '[', 'start_value_pos', ':', ']', 'for', 'reldate', 'in', 'date_suggestions', '(', ')', ':', 'if', 'not', 'reldate', '.', 'startswith', '(', 'value', ')', ':', 'continue', 'yield', 'Completion', '(', 'reldate', ',', '-', 'len', '(', 'value', ')', ',', 'display_meta', '=', 'to_absolute', '(', 'reldate', ')', ')']
Generator for date completion.
['Generator', 'for', 'date', 'completion', '.']
train
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/prompt/PromptCompleter.py#L31-L42
9,753
nerdvegas/rez
src/rez/vendor/sortedcontainers/sortedset.py
SortedSet.copy
def copy(self): """Create a shallow copy of the sorted set.""" return self._fromset(set(self._set), key=self._key)
python
def copy(self): """Create a shallow copy of the sorted set.""" return self._fromset(set(self._set), key=self._key)
['def', 'copy', '(', 'self', ')', ':', 'return', 'self', '.', '_fromset', '(', 'set', '(', 'self', '.', '_set', ')', ',', 'key', '=', 'self', '.', '_key', ')']
Create a shallow copy of the sorted set.
['Create', 'a', 'shallow', 'copy', 'of', 'the', 'sorted', 'set', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/sortedcontainers/sortedset.py#L168-L170
9,754
gawel/aiocron
aiocron/__init__.py
Cron.call_func
def call_func(self, *args, **kwargs): """Called. Take care of exceptions using gather""" asyncio.gather( self.cron(*args, **kwargs), loop=self.loop, return_exceptions=True ).add_done_callback(self.set_result)
python
def call_func(self, *args, **kwargs): """Called. Take care of exceptions using gather""" asyncio.gather( self.cron(*args, **kwargs), loop=self.loop, return_exceptions=True ).add_done_callback(self.set_result)
['def', 'call_func', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'asyncio', '.', 'gather', '(', 'self', '.', 'cron', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ',', 'loop', '=', 'self', '.', 'loop', ',', 'return_exceptions', '=', 'True', ')', '.', 'add_done_callback', '(', 'self', '.', 'set_result', ')']
Called. Take care of exceptions using gather
['Called', '.', 'Take', 'care', 'of', 'exceptions', 'using', 'gather']
train
https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L83-L88
9,755
thautwarm/Redy
Redy/Collections/Traversal.py
reduce_by
def reduce_by(fn: Callable[[T1, T1], T1]) -> Callable[[ActualIterable[T1]], T1]: """ >>> from Redy.Collections import Traversal, Flow >>> def mul(a: int, b: int): return a * b >>> lst: Iterable[int] = [1, 2, 3] >>> x = Flow(lst)[Traversal.reduce_by(mul)].unbox >>> assert x is 6 """ return lambda collection: functools.reduce(fn, collection)
python
def reduce_by(fn: Callable[[T1, T1], T1]) -> Callable[[ActualIterable[T1]], T1]: """ >>> from Redy.Collections import Traversal, Flow >>> def mul(a: int, b: int): return a * b >>> lst: Iterable[int] = [1, 2, 3] >>> x = Flow(lst)[Traversal.reduce_by(mul)].unbox >>> assert x is 6 """ return lambda collection: functools.reduce(fn, collection)
['def', 'reduce_by', '(', 'fn', ':', 'Callable', '[', '[', 'T1', ',', 'T1', ']', ',', 'T1', ']', ')', '->', 'Callable', '[', '[', 'ActualIterable', '[', 'T1', ']', ']', ',', 'T1', ']', ':', 'return', 'lambda', 'collection', ':', 'functools', '.', 'reduce', '(', 'fn', ',', 'collection', ')']
>>> from Redy.Collections import Traversal, Flow >>> def mul(a: int, b: int): return a * b >>> lst: Iterable[int] = [1, 2, 3] >>> x = Flow(lst)[Traversal.reduce_by(mul)].unbox >>> assert x is 6
['>>>', 'from', 'Redy', '.', 'Collections', 'import', 'Traversal', 'Flow', '>>>', 'def', 'mul', '(', 'a', ':', 'int', 'b', ':', 'int', ')', ':', 'return', 'a', '*', 'b', '>>>', 'lst', ':', 'Iterable', '[', 'int', ']', '=', '[', '1', '2', '3', ']', '>>>', 'x', '=', 'Flow', '(', 'lst', ')', '[', 'Traversal', '.', 'reduce_by', '(', 'mul', ')', ']', '.', 'unbox', '>>>', 'assert', 'x', 'is', '6']
train
https://github.com/thautwarm/Redy/blob/8beee5c5f752edfd2754bb1e6b5f4acb016a7770/Redy/Collections/Traversal.py#L26-L34
9,756
SpamScope/mail-parser
mailparser/mailparser.py
MailParser.from_string
def from_string(cls, s): """ Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser """ log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
python
def from_string(cls, s): """ Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser """ log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
['def', 'from_string', '(', 'cls', ',', 's', ')', ':', 'log', '.', 'debug', '(', '"Parsing email from string"', ')', 'message', '=', 'email', '.', 'message_from_string', '(', 's', ')', 'return', 'cls', '(', 'message', ')']
Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser
['Init', 'a', 'new', 'object', 'from', 'a', 'string', '.']
train
https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L207-L220
9,757
mosdef-hub/mbuild
mbuild/compound.py
Compound.boundingbox
def boundingbox(self): """Compute the bounding box of the compound. Returns ------- mb.Box The bounding box for this Compound """ xyz = self.xyz return Box(mins=xyz.min(axis=0), maxs=xyz.max(axis=0))
python
def boundingbox(self): """Compute the bounding box of the compound. Returns ------- mb.Box The bounding box for this Compound """ xyz = self.xyz return Box(mins=xyz.min(axis=0), maxs=xyz.max(axis=0))
['def', 'boundingbox', '(', 'self', ')', ':', 'xyz', '=', 'self', '.', 'xyz', 'return', 'Box', '(', 'mins', '=', 'xyz', '.', 'min', '(', 'axis', '=', '0', ')', ',', 'maxs', '=', 'xyz', '.', 'max', '(', 'axis', '=', '0', ')', ')']
Compute the bounding box of the compound. Returns ------- mb.Box The bounding box for this Compound
['Compute', 'the', 'bounding', 'box', 'of', 'the', 'compound', '.']
train
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/compound.py#L1078-L1088
9,758
Workiva/furious
furious/async.py
decode_async_options
def decode_async_options(options): """Decode Async options from JSON decoding.""" async_options = copy.deepcopy(options) # JSON don't like datetimes. eta = async_options.get('task_args', {}).get('eta') if eta: from datetime import datetime async_options['task_args']['eta'] = datetime.fromtimestamp(eta) # If there are callbacks, reconstitute them. callbacks = async_options.get('callbacks', {}) if callbacks: async_options['callbacks'] = decode_callbacks(callbacks) if '__context_checker' in options: _checker = options['__context_checker'] async_options['_context_checker'] = path_to_reference(_checker) if '__process_results' in options: _processor = options['__process_results'] async_options['_process_results'] = path_to_reference(_processor) return async_options
python
def decode_async_options(options): """Decode Async options from JSON decoding.""" async_options = copy.deepcopy(options) # JSON don't like datetimes. eta = async_options.get('task_args', {}).get('eta') if eta: from datetime import datetime async_options['task_args']['eta'] = datetime.fromtimestamp(eta) # If there are callbacks, reconstitute them. callbacks = async_options.get('callbacks', {}) if callbacks: async_options['callbacks'] = decode_callbacks(callbacks) if '__context_checker' in options: _checker = options['__context_checker'] async_options['_context_checker'] = path_to_reference(_checker) if '__process_results' in options: _processor = options['__process_results'] async_options['_process_results'] = path_to_reference(_processor) return async_options
['def', 'decode_async_options', '(', 'options', ')', ':', 'async_options', '=', 'copy', '.', 'deepcopy', '(', 'options', ')', "# JSON don't like datetimes.", 'eta', '=', 'async_options', '.', 'get', '(', "'task_args'", ',', '{', '}', ')', '.', 'get', '(', "'eta'", ')', 'if', 'eta', ':', 'from', 'datetime', 'import', 'datetime', 'async_options', '[', "'task_args'", ']', '[', "'eta'", ']', '=', 'datetime', '.', 'fromtimestamp', '(', 'eta', ')', '# If there are callbacks, reconstitute them.', 'callbacks', '=', 'async_options', '.', 'get', '(', "'callbacks'", ',', '{', '}', ')', 'if', 'callbacks', ':', 'async_options', '[', "'callbacks'", ']', '=', 'decode_callbacks', '(', 'callbacks', ')', 'if', "'__context_checker'", 'in', 'options', ':', '_checker', '=', 'options', '[', "'__context_checker'", ']', 'async_options', '[', "'_context_checker'", ']', '=', 'path_to_reference', '(', '_checker', ')', 'if', "'__process_results'", 'in', 'options', ':', '_processor', '=', 'options', '[', "'__process_results'", ']', 'async_options', '[', "'_process_results'", ']', '=', 'path_to_reference', '(', '_processor', ')', 'return', 'async_options']
Decode Async options from JSON decoding.
['Decode', 'Async', 'options', 'from', 'JSON', 'decoding', '.']
train
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L625-L648
9,759
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodescene.py
XNodeScene.findNodeById
def findNodeById( self, objectId ): """ Looks up the node based on the unique node identifier. :param nodeId """ for item in self.items(): if ( isinstance(item, XNode) and item.objectId() == objectId): return item return None
python
def findNodeById( self, objectId ): """ Looks up the node based on the unique node identifier. :param nodeId """ for item in self.items(): if ( isinstance(item, XNode) and item.objectId() == objectId): return item return None
['def', 'findNodeById', '(', 'self', ',', 'objectId', ')', ':', 'for', 'item', 'in', 'self', '.', 'items', '(', ')', ':', 'if', '(', 'isinstance', '(', 'item', ',', 'XNode', ')', 'and', 'item', '.', 'objectId', '(', ')', '==', 'objectId', ')', ':', 'return', 'item', 'return', 'None']
Looks up the node based on the unique node identifier. :param nodeId
['Looks', 'up', 'the', 'node', 'based', 'on', 'the', 'unique', 'node', 'identifier', '.', ':', 'param', 'nodeId']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L809-L818
9,760
googlesamples/assistant-sdk-python
google-assistant-sdk/googlesamples/assistant/grpc/audio_helpers.py
ConversationStream.stop_recording
def stop_recording(self): """Stop recording from the audio source.""" self._stop_recording.set() with self._source_lock: self._source.stop() self._recording = False
python
def stop_recording(self): """Stop recording from the audio source.""" self._stop_recording.set() with self._source_lock: self._source.stop() self._recording = False
['def', 'stop_recording', '(', 'self', ')', ':', 'self', '.', '_stop_recording', '.', 'set', '(', ')', 'with', 'self', '.', '_source_lock', ':', 'self', '.', '_source', '.', 'stop', '(', ')', 'self', '.', '_recording', '=', 'False']
Stop recording from the audio source.
['Stop', 'recording', 'from', 'the', 'audio', 'source', '.']
train
https://github.com/googlesamples/assistant-sdk-python/blob/84995692f35be8e085de8dfa7032039a13ae3fab/google-assistant-sdk/googlesamples/assistant/grpc/audio_helpers.py#L281-L286
9,761
bitesofcode/projexui
projexui/widgets/xpopupwidget.py
XPopupWidget.keyPressEvent
def keyPressEvent( self, event ): """ Looks for the Esc key to close the popup. :param event | <QKeyEvent> """ if ( event.key() == Qt.Key_Escape ): self.reject() event.accept() return elif ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ): if self._autoDefault: self.accept() event.accept() return super(XPopupWidget, self).keyPressEvent(event)
python
def keyPressEvent( self, event ): """ Looks for the Esc key to close the popup. :param event | <QKeyEvent> """ if ( event.key() == Qt.Key_Escape ): self.reject() event.accept() return elif ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ): if self._autoDefault: self.accept() event.accept() return super(XPopupWidget, self).keyPressEvent(event)
['def', 'keyPressEvent', '(', 'self', ',', 'event', ')', ':', 'if', '(', 'event', '.', 'key', '(', ')', '==', 'Qt', '.', 'Key_Escape', ')', ':', 'self', '.', 'reject', '(', ')', 'event', '.', 'accept', '(', ')', 'return', 'elif', '(', 'event', '.', 'key', '(', ')', 'in', '(', 'Qt', '.', 'Key_Return', ',', 'Qt', '.', 'Key_Enter', ')', ')', ':', 'if', 'self', '.', '_autoDefault', ':', 'self', '.', 'accept', '(', ')', 'event', '.', 'accept', '(', ')', 'return', 'super', '(', 'XPopupWidget', ',', 'self', ')', '.', 'keyPressEvent', '(', 'event', ')']
Looks for the Esc key to close the popup. :param event | <QKeyEvent>
['Looks', 'for', 'the', 'Esc', 'key', 'to', 'close', 'the', 'popup', '.', ':', 'param', 'event', '|', '<QKeyEvent', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xpopupwidget.py#L677-L694
9,762
piotr-rusin/spam-lists
spam_lists/host_collections.py
SortedHostCollection._get_match
def _get_match(self, host_object): """Get an item matching the given host object. The item may be either a parent domain or identical value. Parent domains and existing identical values always precede insertion point for given value - therefore, we treat an item just before insertion point as potential match. :param host_object: an object representing ip address or hostname whose match we are trying to find """ i = self._get_insertion_point(host_object) potential_match = None try: potential_match = self[i-1] except IndexError: pass if host_object.is_match(potential_match): return potential_match return None
python
def _get_match(self, host_object): """Get an item matching the given host object. The item may be either a parent domain or identical value. Parent domains and existing identical values always precede insertion point for given value - therefore, we treat an item just before insertion point as potential match. :param host_object: an object representing ip address or hostname whose match we are trying to find """ i = self._get_insertion_point(host_object) potential_match = None try: potential_match = self[i-1] except IndexError: pass if host_object.is_match(potential_match): return potential_match return None
['def', '_get_match', '(', 'self', ',', 'host_object', ')', ':', 'i', '=', 'self', '.', '_get_insertion_point', '(', 'host_object', ')', 'potential_match', '=', 'None', 'try', ':', 'potential_match', '=', 'self', '[', 'i', '-', '1', ']', 'except', 'IndexError', ':', 'pass', 'if', 'host_object', '.', 'is_match', '(', 'potential_match', ')', ':', 'return', 'potential_match', 'return', 'None']
Get an item matching the given host object. The item may be either a parent domain or identical value. Parent domains and existing identical values always precede insertion point for given value - therefore, we treat an item just before insertion point as potential match. :param host_object: an object representing ip address or hostname whose match we are trying to find
['Get', 'an', 'item', 'matching', 'the', 'given', 'host', 'object', '.']
train
https://github.com/piotr-rusin/spam-lists/blob/fd616e8761b28f3eaa503fee5e45f7748e8f88f2/spam_lists/host_collections.py#L118-L138
9,763
aliyun/aliyun-log-python-sdk
aliyun/log/logclient.py
LogClient.get_begin_cursor
def get_begin_cursor(self, project_name, logstore_name, shard_id): """ Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException """ return self.get_cursor(project_name, logstore_name, shard_id, "begin")
python
def get_begin_cursor(self, project_name, logstore_name, shard_id): """ Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException """ return self.get_cursor(project_name, logstore_name, shard_id, "begin")
['def', 'get_begin_cursor', '(', 'self', ',', 'project_name', ',', 'logstore_name', ',', 'shard_id', ')', ':', 'return', 'self', '.', 'get_cursor', '(', 'project_name', ',', 'logstore_name', ',', 'shard_id', ',', '"begin"', ')']
Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException
['Get', 'begin', 'cursor', 'from', 'log', 'service', 'for', 'batch', 'pull', 'logs', 'Unsuccessful', 'opertaion', 'will', 'cause', 'an', 'LogException', '.', ':', 'type', 'project_name', ':', 'string', ':', 'param', 'project_name', ':', 'the', 'Project', 'name', ':', 'type', 'logstore_name', ':', 'string', ':', 'param', 'logstore_name', ':', 'the', 'logstore', 'name', ':', 'type', 'shard_id', ':', 'int', ':', 'param', 'shard_id', ':', 'the', 'shard', 'id', ':', 'return', ':', 'GetLogsResponse', ':', 'raise', ':', 'LogException']
train
https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L749-L766
9,764
jart/fabulous
fabulous/gotham.py
main
def main(): """I provide a command-line interface for this module """ print() print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print(lorem_gotham_title().center(50)) print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print() poem = lorem_gotham() for n in range(16): if n in (4, 8, 12): print() print(next(poem)) print()
python
def main(): """I provide a command-line interface for this module """ print() print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print(lorem_gotham_title().center(50)) print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print() poem = lorem_gotham() for n in range(16): if n in (4, 8, 12): print() print(next(poem)) print()
['def', 'main', '(', ')', ':', 'print', '(', ')', 'print', '(', '"-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-"', ')', 'print', '(', 'lorem_gotham_title', '(', ')', '.', 'center', '(', '50', ')', ')', 'print', '(', '"-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-"', ')', 'print', '(', ')', 'poem', '=', 'lorem_gotham', '(', ')', 'for', 'n', 'in', 'range', '(', '16', ')', ':', 'if', 'n', 'in', '(', '4', ',', '8', ',', '12', ')', ':', 'print', '(', ')', 'print', '(', 'next', '(', 'poem', ')', ')', 'print', '(', ')']
I provide a command-line interface for this module
['I', 'provide', 'a', 'command', '-', 'line', 'interface', 'for', 'this', 'module']
train
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/gotham.py#L108-L121
9,765
Gorialis/jishaku
jishaku/paginators.py
PaginatorInterface.wait_loop
async def wait_loop(self): """ Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`. """ start, back, forward, end, close = self.emojis def check(payload: discord.RawReactionActionEvent): """ Checks if this reaction is related to the paginator interface. """ owner_check = not self.owner or payload.user_id == self.owner.id emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name return payload.message_id == self.message.id and \ emoji and emoji in self.emojis and \ payload.user_id != self.bot.user.id and owner_check try: while not self.bot.is_closed(): payload = await self.bot.wait_for('raw_reaction_add', check=check, timeout=self.timeout) emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name if emoji == close: await self.message.delete() return if emoji == start: self._display_page = 0 elif emoji == end: self._display_page = self.page_count - 1 elif emoji == back: self._display_page -= 1 elif emoji == forward: self._display_page += 1 self.bot.loop.create_task(self.update()) try: await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id)) except discord.Forbidden: pass except asyncio.TimeoutError: if self.delete_message: return await self.message.delete() for emoji in filter(None, self.emojis): try: await self.message.remove_reaction(emoji, self.message.guild.me) except (discord.Forbidden, discord.NotFound): pass
python
async def wait_loop(self): """ Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`. """ start, back, forward, end, close = self.emojis def check(payload: discord.RawReactionActionEvent): """ Checks if this reaction is related to the paginator interface. """ owner_check = not self.owner or payload.user_id == self.owner.id emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name return payload.message_id == self.message.id and \ emoji and emoji in self.emojis and \ payload.user_id != self.bot.user.id and owner_check try: while not self.bot.is_closed(): payload = await self.bot.wait_for('raw_reaction_add', check=check, timeout=self.timeout) emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name if emoji == close: await self.message.delete() return if emoji == start: self._display_page = 0 elif emoji == end: self._display_page = self.page_count - 1 elif emoji == back: self._display_page -= 1 elif emoji == forward: self._display_page += 1 self.bot.loop.create_task(self.update()) try: await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id)) except discord.Forbidden: pass except asyncio.TimeoutError: if self.delete_message: return await self.message.delete() for emoji in filter(None, self.emojis): try: await self.message.remove_reaction(emoji, self.message.guild.me) except (discord.Forbidden, discord.NotFound): pass
['async', 'def', 'wait_loop', '(', 'self', ')', ':', 'start', ',', 'back', ',', 'forward', ',', 'end', ',', 'close', '=', 'self', '.', 'emojis', 'def', 'check', '(', 'payload', ':', 'discord', '.', 'RawReactionActionEvent', ')', ':', '"""\n Checks if this reaction is related to the paginator interface.\n """', 'owner_check', '=', 'not', 'self', '.', 'owner', 'or', 'payload', '.', 'user_id', '==', 'self', '.', 'owner', '.', 'id', 'emoji', '=', 'payload', '.', 'emoji', 'if', 'isinstance', '(', 'emoji', ',', 'discord', '.', 'PartialEmoji', ')', 'and', 'emoji', '.', 'is_unicode_emoji', '(', ')', ':', 'emoji', '=', 'emoji', '.', 'name', 'return', 'payload', '.', 'message_id', '==', 'self', '.', 'message', '.', 'id', 'and', 'emoji', 'and', 'emoji', 'in', 'self', '.', 'emojis', 'and', 'payload', '.', 'user_id', '!=', 'self', '.', 'bot', '.', 'user', '.', 'id', 'and', 'owner_check', 'try', ':', 'while', 'not', 'self', '.', 'bot', '.', 'is_closed', '(', ')', ':', 'payload', '=', 'await', 'self', '.', 'bot', '.', 'wait_for', '(', "'raw_reaction_add'", ',', 'check', '=', 'check', ',', 'timeout', '=', 'self', '.', 'timeout', ')', 'emoji', '=', 'payload', '.', 'emoji', 'if', 'isinstance', '(', 'emoji', ',', 'discord', '.', 'PartialEmoji', ')', 'and', 'emoji', '.', 'is_unicode_emoji', '(', ')', ':', 'emoji', '=', 'emoji', '.', 'name', 'if', 'emoji', '==', 'close', ':', 'await', 'self', '.', 'message', '.', 'delete', '(', ')', 'return', 'if', 'emoji', '==', 'start', ':', 'self', '.', '_display_page', '=', '0', 'elif', 'emoji', '==', 'end', ':', 'self', '.', '_display_page', '=', 'self', '.', 'page_count', '-', '1', 'elif', 'emoji', '==', 'back', ':', 'self', '.', '_display_page', '-=', '1', 'elif', 'emoji', '==', 'forward', ':', 'self', '.', '_display_page', '+=', '1', 'self', '.', 'bot', '.', 'loop', '.', 'create_task', '(', 'self', '.', 'update', '(', ')', ')', 'try', ':', 'await', 'self', '.', 'message', '.', 'remove_reaction', '(', 'payload', '.', 'emoji', ',', 'discord', '.', 'Object', '(', 'id', '=', 'payload', '.', 'user_id', ')', ')', 'except', 'discord', '.', 'Forbidden', ':', 'pass', 'except', 'asyncio', '.', 'TimeoutError', ':', 'if', 'self', '.', 'delete_message', ':', 'return', 'await', 'self', '.', 'message', '.', 'delete', '(', ')', 'for', 'emoji', 'in', 'filter', '(', 'None', ',', 'self', '.', 'emojis', ')', ':', 'try', ':', 'await', 'self', '.', 'message', '.', 'remove_reaction', '(', 'emoji', ',', 'self', '.', 'message', '.', 'guild', '.', 'me', ')', 'except', '(', 'discord', '.', 'Forbidden', ',', 'discord', '.', 'NotFound', ')', ':', 'pass']
Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`.
['Waits', 'on', 'a', 'loop', 'for', 'reactions', 'to', 'the', 'message', '.', 'This', 'should', 'not', 'be', 'called', 'manually', '-', 'it', 'is', 'handled', 'by', 'send_to', '.']
train
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/paginators.py#L197-L255
9,766
basler/pypylon
scripts/builddoxy2swig/doxy2swig/doxy2swig.py
Doxy2SWIG.handle_typical_memberdefs_no_overload
def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): """Produce standard documentation for memberdef_nodes.""" for n in memberdef_nodes: self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) self.add_text(['";', '\n'])
python
def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): """Produce standard documentation for memberdef_nodes.""" for n in memberdef_nodes: self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) self.add_text(['";', '\n'])
['def', 'handle_typical_memberdefs_no_overload', '(', 'self', ',', 'signature', ',', 'memberdef_nodes', ')', ':', 'for', 'n', 'in', 'memberdef_nodes', ':', 'self', '.', 'add_text', '(', '[', "'\\n'", ',', '\'%feature("docstring") \'', ',', 'signature', ',', '\' "\'', ',', "'\\n'", ']', ')', 'if', 'self', '.', 'with_function_signature', ':', 'self', '.', 'add_line_with_subsequent_indent', '(', 'self', '.', 'get_function_signature', '(', 'n', ')', ')', 'self', '.', 'subnode_parse', '(', 'n', ',', 'pieces', '=', '[', ']', ',', 'ignore', '=', '[', "'definition'", ',', "'name'", ']', ')', 'self', '.', 'add_text', '(', '[', '\'";\'', ',', "'\\n'", ']', ')']
Produce standard documentation for memberdef_nodes.
['Produce', 'standard', 'documentation', 'for', 'memberdef_nodes', '.']
train
https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L431-L438
9,767
bitprophet/botox
botox/aws.py
AWS.get_volumes_for_instance
def get_volumes_for_instance(self, arg, device=None): """ Return all EC2 Volume objects attached to ``arg`` instance name or ID. May specify ``device`` to limit to the (single) volume attached as that device. """ instance = self.get(arg) filters = {'attachment.instance-id': instance.id} if device is not None: filters['attachment.device'] = device return self.get_all_volumes(filters=filters)
python
def get_volumes_for_instance(self, arg, device=None): """ Return all EC2 Volume objects attached to ``arg`` instance name or ID. May specify ``device`` to limit to the (single) volume attached as that device. """ instance = self.get(arg) filters = {'attachment.instance-id': instance.id} if device is not None: filters['attachment.device'] = device return self.get_all_volumes(filters=filters)
['def', 'get_volumes_for_instance', '(', 'self', ',', 'arg', ',', 'device', '=', 'None', ')', ':', 'instance', '=', 'self', '.', 'get', '(', 'arg', ')', 'filters', '=', '{', "'attachment.instance-id'", ':', 'instance', '.', 'id', '}', 'if', 'device', 'is', 'not', 'None', ':', 'filters', '[', "'attachment.device'", ']', '=', 'device', 'return', 'self', '.', 'get_all_volumes', '(', 'filters', '=', 'filters', ')']
Return all EC2 Volume objects attached to ``arg`` instance name or ID. May specify ``device`` to limit to the (single) volume attached as that device.
['Return', 'all', 'EC2', 'Volume', 'objects', 'attached', 'to', 'arg', 'instance', 'name', 'or', 'ID', '.']
train
https://github.com/bitprophet/botox/blob/02c887a28bd2638273548cc7d1e6d6f1d4d38bf9/botox/aws.py#L340-L351
9,768
pgmpy/pgmpy
pgmpy/readwrite/XMLBIF.py
XMLBIFWriter.get_definition
def get_definition(self): """ Add Definition to XMLBIF Return ------ dict: dict of type {variable: definition tag} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_definition() {'hear-bark': <Element DEFINITION at 0x7f1d48977408>, 'family-out': <Element DEFINITION at 0x7f1d489773c8>, 'dog-out': <Element DEFINITION at 0x7f1d48977388>, 'bowel-problem': <Element DEFINITION at 0x7f1d48977348>, 'light-on': <Element DEFINITION at 0x7f1d48977448>} """ cpds = self.model.get_cpds() cpds.sort(key=lambda x: x.variable) definition_tag = {} for cpd in cpds: definition_tag[cpd.variable] = etree.SubElement(self.network, "DEFINITION") etree.SubElement(definition_tag[cpd.variable], "FOR").text = cpd.variable for child in sorted(cpd.variables[:0:-1]): etree.SubElement(definition_tag[cpd.variable], "GIVEN").text = child return definition_tag
python
def get_definition(self): """ Add Definition to XMLBIF Return ------ dict: dict of type {variable: definition tag} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_definition() {'hear-bark': <Element DEFINITION at 0x7f1d48977408>, 'family-out': <Element DEFINITION at 0x7f1d489773c8>, 'dog-out': <Element DEFINITION at 0x7f1d48977388>, 'bowel-problem': <Element DEFINITION at 0x7f1d48977348>, 'light-on': <Element DEFINITION at 0x7f1d48977448>} """ cpds = self.model.get_cpds() cpds.sort(key=lambda x: x.variable) definition_tag = {} for cpd in cpds: definition_tag[cpd.variable] = etree.SubElement(self.network, "DEFINITION") etree.SubElement(definition_tag[cpd.variable], "FOR").text = cpd.variable for child in sorted(cpd.variables[:0:-1]): etree.SubElement(definition_tag[cpd.variable], "GIVEN").text = child return definition_tag
['def', 'get_definition', '(', 'self', ')', ':', 'cpds', '=', 'self', '.', 'model', '.', 'get_cpds', '(', ')', 'cpds', '.', 'sort', '(', 'key', '=', 'lambda', 'x', ':', 'x', '.', 'variable', ')', 'definition_tag', '=', '{', '}', 'for', 'cpd', 'in', 'cpds', ':', 'definition_tag', '[', 'cpd', '.', 'variable', ']', '=', 'etree', '.', 'SubElement', '(', 'self', '.', 'network', ',', '"DEFINITION"', ')', 'etree', '.', 'SubElement', '(', 'definition_tag', '[', 'cpd', '.', 'variable', ']', ',', '"FOR"', ')', '.', 'text', '=', 'cpd', '.', 'variable', 'for', 'child', 'in', 'sorted', '(', 'cpd', '.', 'variables', '[', ':', '0', ':', '-', '1', ']', ')', ':', 'etree', '.', 'SubElement', '(', 'definition_tag', '[', 'cpd', '.', 'variable', ']', ',', '"GIVEN"', ')', '.', 'text', '=', 'child', 'return', 'definition_tag']
Add Definition to XMLBIF Return ------ dict: dict of type {variable: definition tag} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_definition() {'hear-bark': <Element DEFINITION at 0x7f1d48977408>, 'family-out': <Element DEFINITION at 0x7f1d489773c8>, 'dog-out': <Element DEFINITION at 0x7f1d48977388>, 'bowel-problem': <Element DEFINITION at 0x7f1d48977348>, 'light-on': <Element DEFINITION at 0x7f1d48977448>}
['Add', 'Definition', 'to', 'XMLBIF']
train
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/XMLBIF.py#L372-L399
9,769
architv/soccer-cli
soccer/writers.py
Stdout.scores
def scores(self, result, add_new_line=True): """Prints out the scores in a pretty format""" if result.goalsHomeTeam > result.goalsAwayTeam: homeColor, awayColor = (self.colors.WIN, self.colors.LOSE) elif result.goalsHomeTeam < result.goalsAwayTeam: homeColor, awayColor = (self.colors.LOSE, self.colors.WIN) else: homeColor = awayColor = self.colors.TIE click.secho('%-25s %2s' % (result.homeTeam, result.goalsHomeTeam), fg=homeColor, nl=False) click.secho(" vs ", nl=False) click.secho('%2s %s' % (result.goalsAwayTeam, result.awayTeam.rjust(25)), fg=awayColor, nl=add_new_line)
python
def scores(self, result, add_new_line=True): """Prints out the scores in a pretty format""" if result.goalsHomeTeam > result.goalsAwayTeam: homeColor, awayColor = (self.colors.WIN, self.colors.LOSE) elif result.goalsHomeTeam < result.goalsAwayTeam: homeColor, awayColor = (self.colors.LOSE, self.colors.WIN) else: homeColor = awayColor = self.colors.TIE click.secho('%-25s %2s' % (result.homeTeam, result.goalsHomeTeam), fg=homeColor, nl=False) click.secho(" vs ", nl=False) click.secho('%2s %s' % (result.goalsAwayTeam, result.awayTeam.rjust(25)), fg=awayColor, nl=add_new_line)
['def', 'scores', '(', 'self', ',', 'result', ',', 'add_new_line', '=', 'True', ')', ':', 'if', 'result', '.', 'goalsHomeTeam', '>', 'result', '.', 'goalsAwayTeam', ':', 'homeColor', ',', 'awayColor', '=', '(', 'self', '.', 'colors', '.', 'WIN', ',', 'self', '.', 'colors', '.', 'LOSE', ')', 'elif', 'result', '.', 'goalsHomeTeam', '<', 'result', '.', 'goalsAwayTeam', ':', 'homeColor', ',', 'awayColor', '=', '(', 'self', '.', 'colors', '.', 'LOSE', ',', 'self', '.', 'colors', '.', 'WIN', ')', 'else', ':', 'homeColor', '=', 'awayColor', '=', 'self', '.', 'colors', '.', 'TIE', 'click', '.', 'secho', '(', "'%-25s %2s'", '%', '(', 'result', '.', 'homeTeam', ',', 'result', '.', 'goalsHomeTeam', ')', ',', 'fg', '=', 'homeColor', ',', 'nl', '=', 'False', ')', 'click', '.', 'secho', '(', '" vs "', ',', 'nl', '=', 'False', ')', 'click', '.', 'secho', '(', "'%2s %s'", '%', '(', 'result', '.', 'goalsAwayTeam', ',', 'result', '.', 'awayTeam', '.', 'rjust', '(', '25', ')', ')', ',', 'fg', '=', 'awayColor', ',', 'nl', '=', 'add_new_line', ')']
Prints out the scores in a pretty format
['Prints', 'out', 'the', 'scores', 'in', 'a', 'pretty', 'format']
train
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L149-L163
9,770
tjcsl/cslbot
cslbot/commands/weather.py
set_default
def set_default(nick, location, session, send, apikey): """Sets nick's default location to location.""" if valid_location(location, apikey): send("Setting default location") default = session.query(Weather_prefs).filter(Weather_prefs.nick == nick).first() if default is None: default = Weather_prefs(nick=nick, location=location) session.add(default) else: default.location = location else: send("Invalid or Ambiguous Location")
python
def set_default(nick, location, session, send, apikey): """Sets nick's default location to location.""" if valid_location(location, apikey): send("Setting default location") default = session.query(Weather_prefs).filter(Weather_prefs.nick == nick).first() if default is None: default = Weather_prefs(nick=nick, location=location) session.add(default) else: default.location = location else: send("Invalid or Ambiguous Location")
['def', 'set_default', '(', 'nick', ',', 'location', ',', 'session', ',', 'send', ',', 'apikey', ')', ':', 'if', 'valid_location', '(', 'location', ',', 'apikey', ')', ':', 'send', '(', '"Setting default location"', ')', 'default', '=', 'session', '.', 'query', '(', 'Weather_prefs', ')', '.', 'filter', '(', 'Weather_prefs', '.', 'nick', '==', 'nick', ')', '.', 'first', '(', ')', 'if', 'default', 'is', 'None', ':', 'default', '=', 'Weather_prefs', '(', 'nick', '=', 'nick', ',', 'location', '=', 'location', ')', 'session', '.', 'add', '(', 'default', ')', 'else', ':', 'default', '.', 'location', '=', 'location', 'else', ':', 'send', '(', '"Invalid or Ambiguous Location"', ')']
Sets nick's default location to location.
['Sets', 'nick', 's', 'default', 'location', 'to', 'location', '.']
train
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/weather.py#L73-L84
9,771
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
get_external_subprocess_output
def get_external_subprocess_output(command_list, print_output=False, indent_string="", split_lines=True, ignore_called_process_errors=False, env=None): """Run the command and arguments in the command_list. Will search the system PATH. Returns the output as a list of lines. If print_output is True the output is echoed to stdout, indented (or otherwise prefixed) by indent_string. Waits for command completion. Called process errors can be set to be ignored if necessary.""" # Note ghostscript bounding box output writes to stderr! So we need to # be sure to capture the stderr along with the stdout. print_output = False # Useful for debugging to set True. use_popen = True # Needs to be True to set ignore_called_process_errors True if use_popen: # Use lower-level Popen call. p = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) output, errout = p.communicate() returncode = p.poll() if not ignore_called_process_errors and returncode != 0: raise subprocess.CalledProcessError(returncode, command_list, output=output) else: # Use a check_output call. # Note this does not work correctly if shell=True. output = subprocess.check_output(command_list, stderr=subprocess.STDOUT, shell=False, env=env) output = output.decode("utf-8") if split_lines or print_output: split_output = output.splitlines() if split_lines: output = split_output if print_output: print() for line in split_output: print(indent_string + line) sys.stdout.flush() return output
python
def get_external_subprocess_output(command_list, print_output=False, indent_string="", split_lines=True, ignore_called_process_errors=False, env=None): """Run the command and arguments in the command_list. Will search the system PATH. Returns the output as a list of lines. If print_output is True the output is echoed to stdout, indented (or otherwise prefixed) by indent_string. Waits for command completion. Called process errors can be set to be ignored if necessary.""" # Note ghostscript bounding box output writes to stderr! So we need to # be sure to capture the stderr along with the stdout. print_output = False # Useful for debugging to set True. use_popen = True # Needs to be True to set ignore_called_process_errors True if use_popen: # Use lower-level Popen call. p = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) output, errout = p.communicate() returncode = p.poll() if not ignore_called_process_errors and returncode != 0: raise subprocess.CalledProcessError(returncode, command_list, output=output) else: # Use a check_output call. # Note this does not work correctly if shell=True. output = subprocess.check_output(command_list, stderr=subprocess.STDOUT, shell=False, env=env) output = output.decode("utf-8") if split_lines or print_output: split_output = output.splitlines() if split_lines: output = split_output if print_output: print() for line in split_output: print(indent_string + line) sys.stdout.flush() return output
['def', 'get_external_subprocess_output', '(', 'command_list', ',', 'print_output', '=', 'False', ',', 'indent_string', '=', '""', ',', 'split_lines', '=', 'True', ',', 'ignore_called_process_errors', '=', 'False', ',', 'env', '=', 'None', ')', ':', '# Note ghostscript bounding box output writes to stderr! So we need to', '# be sure to capture the stderr along with the stdout.', 'print_output', '=', 'False', '# Useful for debugging to set True.', 'use_popen', '=', 'True', '# Needs to be True to set ignore_called_process_errors True', 'if', 'use_popen', ':', '# Use lower-level Popen call.', 'p', '=', 'subprocess', '.', 'Popen', '(', 'command_list', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'STDOUT', ',', 'env', '=', 'env', ')', 'output', ',', 'errout', '=', 'p', '.', 'communicate', '(', ')', 'returncode', '=', 'p', '.', 'poll', '(', ')', 'if', 'not', 'ignore_called_process_errors', 'and', 'returncode', '!=', '0', ':', 'raise', 'subprocess', '.', 'CalledProcessError', '(', 'returncode', ',', 'command_list', ',', 'output', '=', 'output', ')', 'else', ':', '# Use a check_output call.', '# Note this does not work correctly if shell=True.', 'output', '=', 'subprocess', '.', 'check_output', '(', 'command_list', ',', 'stderr', '=', 'subprocess', '.', 'STDOUT', ',', 'shell', '=', 'False', ',', 'env', '=', 'env', ')', 'output', '=', 'output', '.', 'decode', '(', '"utf-8"', ')', 'if', 'split_lines', 'or', 'print_output', ':', 'split_output', '=', 'output', '.', 'splitlines', '(', ')', 'if', 'split_lines', ':', 'output', '=', 'split_output', 'if', 'print_output', ':', 'print', '(', ')', 'for', 'line', 'in', 'split_output', ':', 'print', '(', 'indent_string', '+', 'line', ')', 'sys', '.', 'stdout', '.', 'flush', '(', ')', 'return', 'output']
Run the command and arguments in the command_list. Will search the system PATH. Returns the output as a list of lines. If print_output is True the output is echoed to stdout, indented (or otherwise prefixed) by indent_string. Waits for command completion. Called process errors can be set to be ignored if necessary.
['Run', 'the', 'command', 'and', 'arguments', 'in', 'the', 'command_list', '.', 'Will', 'search', 'the', 'system', 'PATH', '.', 'Returns', 'the', 'output', 'as', 'a', 'list', 'of', 'lines', '.', 'If', 'print_output', 'is', 'True', 'the', 'output', 'is', 'echoed', 'to', 'stdout', 'indented', '(', 'or', 'otherwise', 'prefixed', ')', 'by', 'indent_string', '.', 'Waits', 'for', 'command', 'completion', '.', 'Called', 'process', 'errors', 'can', 'be', 'set', 'to', 'be', 'ignored', 'if', 'necessary', '.']
train
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L240-L277
9,772
pavoni/pyvera
pyvera/__init__.py
VeraThermostat.get_current_temperature
def get_current_temperature(self, refresh=False): """Get current temperature""" if refresh: self.refresh() try: return float(self.get_value('temperature')) except (TypeError, ValueError): return None
python
def get_current_temperature(self, refresh=False): """Get current temperature""" if refresh: self.refresh() try: return float(self.get_value('temperature')) except (TypeError, ValueError): return None
['def', 'get_current_temperature', '(', 'self', ',', 'refresh', '=', 'False', ')', ':', 'if', 'refresh', ':', 'self', '.', 'refresh', '(', ')', 'try', ':', 'return', 'float', '(', 'self', '.', 'get_value', '(', "'temperature'", ')', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'return', 'None']
Get current temperature
['Get', 'current', 'temperature']
train
https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/__init__.py#L1076-L1083
9,773
spacetelescope/drizzlepac
drizzlepac/tweakutils.py
parse_skypos
def parse_skypos(ra, dec): """ Function to parse RA and Dec input values and turn them into decimal degrees Input formats could be: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" nn.nnnnnnnn "nn.nnnnnnn" """ rval = make_val_float(ra) dval = make_val_float(dec) if rval is None: rval, dval = radec_hmstodd(ra, dec) return rval, dval
python
def parse_skypos(ra, dec): """ Function to parse RA and Dec input values and turn them into decimal degrees Input formats could be: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" nn.nnnnnnnn "nn.nnnnnnn" """ rval = make_val_float(ra) dval = make_val_float(dec) if rval is None: rval, dval = radec_hmstodd(ra, dec) return rval, dval
['def', 'parse_skypos', '(', 'ra', ',', 'dec', ')', ':', 'rval', '=', 'make_val_float', '(', 'ra', ')', 'dval', '=', 'make_val_float', '(', 'dec', ')', 'if', 'rval', 'is', 'None', ':', 'rval', ',', 'dval', '=', 'radec_hmstodd', '(', 'ra', ',', 'dec', ')', 'return', 'rval', ',', 'dval']
Function to parse RA and Dec input values and turn them into decimal degrees Input formats could be: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" nn.nnnnnnnn "nn.nnnnnnn"
['Function', 'to', 'parse', 'RA', 'and', 'Dec', 'input', 'values', 'and', 'turn', 'them', 'into', 'decimal', 'degrees']
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/tweakutils.py#L173-L191
9,774
steffann/pylisp
pylisp/packet/lisp/control/map_reply_record.py
MapReplyRecord.from_bytes
def from_bytes(cls, bitstream): ''' Parse the given record and update properties accordingly ''' record = cls() # Convert to ConstBitStream (if not already provided) if not isinstance(bitstream, ConstBitStream): if isinstance(bitstream, Bits): bitstream = ConstBitStream(auto=bitstream) else: bitstream = ConstBitStream(bytes=bitstream) # Read the record TTL record.ttl = bitstream.read('uint:32') # Store the locator record count until we need it locator_record_count = bitstream.read('uint:8') # Store the EID prefix mask length until we need it eid_prefix_len = bitstream.read('uint:8') # Read the Negative Map_Reply action record.action = bitstream.read('uint:3') # Read the flag record.authoritative = bitstream.read('bool') # Read reserved bits record._reserved1 = bitstream.read(12 + 4) # Read the map version record.map_version = bitstream.read('uint:12') # Read the EID prefix record.eid_prefix = read_afi_address_from_bitstream(bitstream, eid_prefix_len) # Read the locator records for dummy in range(locator_record_count): locator_record = LocatorRecord.from_bytes(bitstream) record.locator_records.append(locator_record) # Verify that the properties make sense record.sanitize() return record
python
def from_bytes(cls, bitstream): ''' Parse the given record and update properties accordingly ''' record = cls() # Convert to ConstBitStream (if not already provided) if not isinstance(bitstream, ConstBitStream): if isinstance(bitstream, Bits): bitstream = ConstBitStream(auto=bitstream) else: bitstream = ConstBitStream(bytes=bitstream) # Read the record TTL record.ttl = bitstream.read('uint:32') # Store the locator record count until we need it locator_record_count = bitstream.read('uint:8') # Store the EID prefix mask length until we need it eid_prefix_len = bitstream.read('uint:8') # Read the Negative Map_Reply action record.action = bitstream.read('uint:3') # Read the flag record.authoritative = bitstream.read('bool') # Read reserved bits record._reserved1 = bitstream.read(12 + 4) # Read the map version record.map_version = bitstream.read('uint:12') # Read the EID prefix record.eid_prefix = read_afi_address_from_bitstream(bitstream, eid_prefix_len) # Read the locator records for dummy in range(locator_record_count): locator_record = LocatorRecord.from_bytes(bitstream) record.locator_records.append(locator_record) # Verify that the properties make sense record.sanitize() return record
['def', 'from_bytes', '(', 'cls', ',', 'bitstream', ')', ':', 'record', '=', 'cls', '(', ')', '# Convert to ConstBitStream (if not already provided)', 'if', 'not', 'isinstance', '(', 'bitstream', ',', 'ConstBitStream', ')', ':', 'if', 'isinstance', '(', 'bitstream', ',', 'Bits', ')', ':', 'bitstream', '=', 'ConstBitStream', '(', 'auto', '=', 'bitstream', ')', 'else', ':', 'bitstream', '=', 'ConstBitStream', '(', 'bytes', '=', 'bitstream', ')', '# Read the record TTL', 'record', '.', 'ttl', '=', 'bitstream', '.', 'read', '(', "'uint:32'", ')', '# Store the locator record count until we need it', 'locator_record_count', '=', 'bitstream', '.', 'read', '(', "'uint:8'", ')', '# Store the EID prefix mask length until we need it', 'eid_prefix_len', '=', 'bitstream', '.', 'read', '(', "'uint:8'", ')', '# Read the Negative Map_Reply action', 'record', '.', 'action', '=', 'bitstream', '.', 'read', '(', "'uint:3'", ')', '# Read the flag', 'record', '.', 'authoritative', '=', 'bitstream', '.', 'read', '(', "'bool'", ')', '# Read reserved bits', 'record', '.', '_reserved1', '=', 'bitstream', '.', 'read', '(', '12', '+', '4', ')', '# Read the map version', 'record', '.', 'map_version', '=', 'bitstream', '.', 'read', '(', "'uint:12'", ')', '# Read the EID prefix', 'record', '.', 'eid_prefix', '=', 'read_afi_address_from_bitstream', '(', 'bitstream', ',', 'eid_prefix_len', ')', '# Read the locator records', 'for', 'dummy', 'in', 'range', '(', 'locator_record_count', ')', ':', 'locator_record', '=', 'LocatorRecord', '.', 'from_bytes', '(', 'bitstream', ')', 'record', '.', 'locator_records', '.', 'append', '(', 'locator_record', ')', '# Verify that the properties make sense', 'record', '.', 'sanitize', '(', ')', 'return', 'record']
Parse the given record and update properties accordingly
['Parse', 'the', 'given', 'record', 'and', 'update', 'properties', 'accordingly']
train
https://github.com/steffann/pylisp/blob/907340f0c7ef2c4d4fe0c8e0a48df5be0d969407/pylisp/packet/lisp/control/map_reply_record.py#L142-L188
9,775
sethmlarson/virtualbox-python
virtualbox/library.py
IInternalSessionControl.on_network_adapter_change
def on_network_adapter_change(self, network_adapter, change_adapter): """Triggered when settings of a network adapter of the associated virtual machine have changed. in network_adapter of type :class:`INetworkAdapter` in change_adapter of type bool raises :class:`VBoxErrorInvalidVmState` Session state prevents operation. raises :class:`VBoxErrorInvalidObjectState` Session type prevents operation. """ if not isinstance(network_adapter, INetworkAdapter): raise TypeError("network_adapter can only be an instance of type INetworkAdapter") if not isinstance(change_adapter, bool): raise TypeError("change_adapter can only be an instance of type bool") self._call("onNetworkAdapterChange", in_p=[network_adapter, change_adapter])
python
def on_network_adapter_change(self, network_adapter, change_adapter): """Triggered when settings of a network adapter of the associated virtual machine have changed. in network_adapter of type :class:`INetworkAdapter` in change_adapter of type bool raises :class:`VBoxErrorInvalidVmState` Session state prevents operation. raises :class:`VBoxErrorInvalidObjectState` Session type prevents operation. """ if not isinstance(network_adapter, INetworkAdapter): raise TypeError("network_adapter can only be an instance of type INetworkAdapter") if not isinstance(change_adapter, bool): raise TypeError("change_adapter can only be an instance of type bool") self._call("onNetworkAdapterChange", in_p=[network_adapter, change_adapter])
['def', 'on_network_adapter_change', '(', 'self', ',', 'network_adapter', ',', 'change_adapter', ')', ':', 'if', 'not', 'isinstance', '(', 'network_adapter', ',', 'INetworkAdapter', ')', ':', 'raise', 'TypeError', '(', '"network_adapter can only be an instance of type INetworkAdapter"', ')', 'if', 'not', 'isinstance', '(', 'change_adapter', ',', 'bool', ')', ':', 'raise', 'TypeError', '(', '"change_adapter can only be an instance of type bool"', ')', 'self', '.', '_call', '(', '"onNetworkAdapterChange"', ',', 'in_p', '=', '[', 'network_adapter', ',', 'change_adapter', ']', ')']
Triggered when settings of a network adapter of the associated virtual machine have changed. in network_adapter of type :class:`INetworkAdapter` in change_adapter of type bool raises :class:`VBoxErrorInvalidVmState` Session state prevents operation. raises :class:`VBoxErrorInvalidObjectState` Session type prevents operation.
['Triggered', 'when', 'settings', 'of', 'a', 'network', 'adapter', 'of', 'the', 'associated', 'virtual', 'machine', 'have', 'changed', '.']
train
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L28324-L28344
9,776
bitesofcode/projexui
projexui/windows/xdkwindow/xdkwindow.py
XdkWindow.closeContentsWidget
def closeContentsWidget( self ): """ Closes the current contents widget. """ widget = self.currentContentsWidget() if ( not widget ): return widget.close() widget.setParent(None) widget.deleteLater()
python
def closeContentsWidget( self ): """ Closes the current contents widget. """ widget = self.currentContentsWidget() if ( not widget ): return widget.close() widget.setParent(None) widget.deleteLater()
['def', 'closeContentsWidget', '(', 'self', ')', ':', 'widget', '=', 'self', '.', 'currentContentsWidget', '(', ')', 'if', '(', 'not', 'widget', ')', ':', 'return', 'widget', '.', 'close', '(', ')', 'widget', '.', 'setParent', '(', 'None', ')', 'widget', '.', 'deleteLater', '(', ')']
Closes the current contents widget.
['Closes', 'the', 'current', 'contents', 'widget', '.']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/windows/xdkwindow/xdkwindow.py#L246-L256
9,777
jsvine/spectra
spectra/grapefruit.py
Color.NewFromLab
def NewFromLab(l, a, b, alpha=1.0, wref=_DEFAULT_WREF): '''Create a new instance based on the specifed CIE-LAB values. Parameters: :l: The L component [0...100] :a: The a component [-1...1] :b: The a component [-1...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692)) '(1, 0.5, 1.09491e-08, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, wref=Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5)) '(1, 0.5, 1.09491e-08, 0.5)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5, Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 0.5)' ''' return Color(Color.XyzToRgb(*Color.LabToXyz(l, a, b, wref)), 'rgb', alpha, wref)
python
def NewFromLab(l, a, b, alpha=1.0, wref=_DEFAULT_WREF): '''Create a new instance based on the specifed CIE-LAB values. Parameters: :l: The L component [0...100] :a: The a component [-1...1] :b: The a component [-1...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692)) '(1, 0.5, 1.09491e-08, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, wref=Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5)) '(1, 0.5, 1.09491e-08, 0.5)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5, Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 0.5)' ''' return Color(Color.XyzToRgb(*Color.LabToXyz(l, a, b, wref)), 'rgb', alpha, wref)
['def', 'NewFromLab', '(', 'l', ',', 'a', ',', 'b', ',', 'alpha', '=', '1.0', ',', 'wref', '=', '_DEFAULT_WREF', ')', ':', 'return', 'Color', '(', 'Color', '.', 'XyzToRgb', '(', '*', 'Color', '.', 'LabToXyz', '(', 'l', ',', 'a', ',', 'b', ',', 'wref', ')', ')', ',', "'rgb'", ',', 'alpha', ',', 'wref', ')']
Create a new instance based on the specifed CIE-LAB values. Parameters: :l: The L component [0...100] :a: The a component [-1...1] :b: The a component [-1...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692)) '(1, 0.5, 1.09491e-08, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, wref=Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5)) '(1, 0.5, 1.09491e-08, 0.5)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5, Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 0.5)'
['Create', 'a', 'new', 'instance', 'based', 'on', 'the', 'specifed', 'CIE', '-', 'LAB', 'values', '.']
train
https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/grapefruit.py#L1340-L1368
9,778
ska-sa/purr
Purr/Plugins/local_pychart/chart_data.py
write_csv
def write_csv(path, data): """This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.""" fd = _try_open_file(path, 'w', 'The first argument must be a pathname or an object that supports write() method') for v in data: fd.write(",".join([str(x) for x in v])) fd.write("\n") _try_close_file(fd, path)
python
def write_csv(path, data): """This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.""" fd = _try_open_file(path, 'w', 'The first argument must be a pathname or an object that supports write() method') for v in data: fd.write(",".join([str(x) for x in v])) fd.write("\n") _try_close_file(fd, path)
['def', 'write_csv', '(', 'path', ',', 'data', ')', ':', 'fd', '=', '_try_open_file', '(', 'path', ',', "'w'", ',', "'The first argument must be a pathname or an object that supports write() method'", ')', 'for', 'v', 'in', 'data', ':', 'fd', '.', 'write', '(', '","', '.', 'join', '(', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'v', ']', ')', ')', 'fd', '.', 'write', '(', '"\\n"', ')', '_try_close_file', '(', 'fd', ',', 'path', ')']
This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.
['This', 'function', 'writes', 'comma', '-', 'separated', '<data', '>', 'to', '<path', '>', '.', 'Parameter', '<path', '>', 'is', 'either', 'a', 'pathname', 'or', 'a', 'file', '-', 'like', 'object', 'that', 'supports', 'the', '|write', '()', '|', 'method', '.']
train
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/chart_data.py#L258-L268
9,779
django-danceschool/django-danceschool
danceschool/core/models.py
Event.numRegisteredForRole
def numRegisteredForRole(self, role, includeTemporaryRegs=False): ''' Accepts a DanceRole object and returns the number of registrations of that role. ''' count = self.eventregistration_set.filter(cancelled=False,dropIn=False,role=role).count() if includeTemporaryRegs: count += self.temporaryeventregistration_set.filter(dropIn=False,role=role).exclude( registration__expirationDate__lte=timezone.now()).count() return count
python
def numRegisteredForRole(self, role, includeTemporaryRegs=False): ''' Accepts a DanceRole object and returns the number of registrations of that role. ''' count = self.eventregistration_set.filter(cancelled=False,dropIn=False,role=role).count() if includeTemporaryRegs: count += self.temporaryeventregistration_set.filter(dropIn=False,role=role).exclude( registration__expirationDate__lte=timezone.now()).count() return count
['def', 'numRegisteredForRole', '(', 'self', ',', 'role', ',', 'includeTemporaryRegs', '=', 'False', ')', ':', 'count', '=', 'self', '.', 'eventregistration_set', '.', 'filter', '(', 'cancelled', '=', 'False', ',', 'dropIn', '=', 'False', ',', 'role', '=', 'role', ')', '.', 'count', '(', ')', 'if', 'includeTemporaryRegs', ':', 'count', '+=', 'self', '.', 'temporaryeventregistration_set', '.', 'filter', '(', 'dropIn', '=', 'False', ',', 'role', '=', 'role', ')', '.', 'exclude', '(', 'registration__expirationDate__lte', '=', 'timezone', '.', 'now', '(', ')', ')', '.', 'count', '(', ')', 'return', 'count']
Accepts a DanceRole object and returns the number of registrations of that role.
['Accepts', 'a', 'DanceRole', 'object', 'and', 'returns', 'the', 'number', 'of', 'registrations', 'of', 'that', 'role', '.']
train
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L967-L975
9,780
boriel/zxbasic
zxbparser.py
p_arr_assignment
def p_arr_assignment(p): """ statement : ARRAY_ID arg_list EQ expr | LET ARRAY_ID arg_list EQ expr """ i = 2 if p[1].upper() == 'LET' else 1 id_ = p[i] arg_list = p[i + 1] expr = p[i + 3] p[0] = None if arg_list is None or expr is None: return # There were errors entry = SYMBOL_TABLE.access_call(id_, p.lineno(i)) if entry is None: return if entry.type_ == TYPE.string: variable = gl.SYMBOL_TABLE.access_array(id_, p.lineno(i)) if len(variable.bounds) + 1 == len(arg_list): ss = arg_list.children.pop().value p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, (ss, ss), expr) return arr = make_array_access(id_, p.lineno(i), arg_list) if arr is None: return expr = make_typecast(arr.type_, expr, p.lineno(i)) if entry is None: return p[0] = make_sentence('LETARRAY', arr, expr)
python
def p_arr_assignment(p): """ statement : ARRAY_ID arg_list EQ expr | LET ARRAY_ID arg_list EQ expr """ i = 2 if p[1].upper() == 'LET' else 1 id_ = p[i] arg_list = p[i + 1] expr = p[i + 3] p[0] = None if arg_list is None or expr is None: return # There were errors entry = SYMBOL_TABLE.access_call(id_, p.lineno(i)) if entry is None: return if entry.type_ == TYPE.string: variable = gl.SYMBOL_TABLE.access_array(id_, p.lineno(i)) if len(variable.bounds) + 1 == len(arg_list): ss = arg_list.children.pop().value p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, (ss, ss), expr) return arr = make_array_access(id_, p.lineno(i), arg_list) if arr is None: return expr = make_typecast(arr.type_, expr, p.lineno(i)) if entry is None: return p[0] = make_sentence('LETARRAY', arr, expr)
['def', 'p_arr_assignment', '(', 'p', ')', ':', 'i', '=', '2', 'if', 'p', '[', '1', ']', '.', 'upper', '(', ')', '==', "'LET'", 'else', '1', 'id_', '=', 'p', '[', 'i', ']', 'arg_list', '=', 'p', '[', 'i', '+', '1', ']', 'expr', '=', 'p', '[', 'i', '+', '3', ']', 'p', '[', '0', ']', '=', 'None', 'if', 'arg_list', 'is', 'None', 'or', 'expr', 'is', 'None', ':', 'return', '# There were errors', 'entry', '=', 'SYMBOL_TABLE', '.', 'access_call', '(', 'id_', ',', 'p', '.', 'lineno', '(', 'i', ')', ')', 'if', 'entry', 'is', 'None', ':', 'return', 'if', 'entry', '.', 'type_', '==', 'TYPE', '.', 'string', ':', 'variable', '=', 'gl', '.', 'SYMBOL_TABLE', '.', 'access_array', '(', 'id_', ',', 'p', '.', 'lineno', '(', 'i', ')', ')', 'if', 'len', '(', 'variable', '.', 'bounds', ')', '+', '1', '==', 'len', '(', 'arg_list', ')', ':', 'ss', '=', 'arg_list', '.', 'children', '.', 'pop', '(', ')', '.', 'value', 'p', '[', '0', ']', '=', 'make_array_substr_assign', '(', 'p', '.', 'lineno', '(', 'i', ')', ',', 'id_', ',', 'arg_list', ',', '(', 'ss', ',', 'ss', ')', ',', 'expr', ')', 'return', 'arr', '=', 'make_array_access', '(', 'id_', ',', 'p', '.', 'lineno', '(', 'i', ')', ',', 'arg_list', ')', 'if', 'arr', 'is', 'None', ':', 'return', 'expr', '=', 'make_typecast', '(', 'arr', '.', 'type_', ',', 'expr', ',', 'p', '.', 'lineno', '(', 'i', ')', ')', 'if', 'entry', 'is', 'None', ':', 'return', 'p', '[', '0', ']', '=', 'make_sentence', '(', "'LETARRAY'", ',', 'arr', ',', 'expr', ')']
statement : ARRAY_ID arg_list EQ expr | LET ARRAY_ID arg_list EQ expr
['statement', ':', 'ARRAY_ID', 'arg_list', 'EQ', 'expr', '|', 'LET', 'ARRAY_ID', 'arg_list', 'EQ', 'expr']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L1069-L1101
9,781
dragnet-org/dragnet
dragnet/model_training.py
evaluate_extracted_tokens
def evaluate_extracted_tokens(gold_content, extr_content): """ Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float] """ if isinstance(gold_content, string_): gold_content = simple_tokenizer(gold_content) if isinstance(extr_content, string_): extr_content = simple_tokenizer(extr_content) gold_set = set(gold_content) extr_set = set(extr_content) jaccard = len(gold_set & extr_set) / len(gold_set | extr_set) levenshtein = dameraulevenshtein(gold_content, extr_content) return {'jaccard': jaccard, 'levenshtein': levenshtein}
python
def evaluate_extracted_tokens(gold_content, extr_content): """ Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float] """ if isinstance(gold_content, string_): gold_content = simple_tokenizer(gold_content) if isinstance(extr_content, string_): extr_content = simple_tokenizer(extr_content) gold_set = set(gold_content) extr_set = set(extr_content) jaccard = len(gold_set & extr_set) / len(gold_set | extr_set) levenshtein = dameraulevenshtein(gold_content, extr_content) return {'jaccard': jaccard, 'levenshtein': levenshtein}
['def', 'evaluate_extracted_tokens', '(', 'gold_content', ',', 'extr_content', ')', ':', 'if', 'isinstance', '(', 'gold_content', ',', 'string_', ')', ':', 'gold_content', '=', 'simple_tokenizer', '(', 'gold_content', ')', 'if', 'isinstance', '(', 'extr_content', ',', 'string_', ')', ':', 'extr_content', '=', 'simple_tokenizer', '(', 'extr_content', ')', 'gold_set', '=', 'set', '(', 'gold_content', ')', 'extr_set', '=', 'set', '(', 'extr_content', ')', 'jaccard', '=', 'len', '(', 'gold_set', '&', 'extr_set', ')', '/', 'len', '(', 'gold_set', '|', 'extr_set', ')', 'levenshtein', '=', 'dameraulevenshtein', '(', 'gold_content', ',', 'extr_content', ')', 'return', '{', "'jaccard'", ':', 'jaccard', ',', "'levenshtein'", ':', 'levenshtein', '}']
Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float]
['Evaluate', 'the', 'similarity', 'between', 'gold', '-', 'standard', 'and', 'extracted', 'content', 'typically', 'for', 'a', 'single', 'HTML', 'document', 'as', 'another', 'way', 'of', 'evaluating', 'the', 'performance', 'of', 'an', 'extractor', 'model', '.']
train
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/model_training.py#L51-L74
9,782
esheldon/fitsio
fitsio/hdu/base.py
HDUBase._get_repr_list
def _get_repr_list(self): """ Get some representation data common to all HDU types """ spacing = ' '*2 text = [''] text.append("%sfile: %s" % (spacing, self._filename)) text.append("%sextension: %d" % (spacing, self._info['hdunum']-1)) text.append( "%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']])) extname = self.get_extname() if extname != "": text.append("%sextname: %s" % (spacing, extname)) extver = self.get_extver() if extver != 0: text.append("%sextver: %s" % (spacing, extver)) return text, spacing
python
def _get_repr_list(self): """ Get some representation data common to all HDU types """ spacing = ' '*2 text = [''] text.append("%sfile: %s" % (spacing, self._filename)) text.append("%sextension: %d" % (spacing, self._info['hdunum']-1)) text.append( "%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']])) extname = self.get_extname() if extname != "": text.append("%sextname: %s" % (spacing, extname)) extver = self.get_extver() if extver != 0: text.append("%sextver: %s" % (spacing, extver)) return text, spacing
['def', '_get_repr_list', '(', 'self', ')', ':', 'spacing', '=', "' '", '*', '2', 'text', '=', '[', "''", ']', 'text', '.', 'append', '(', '"%sfile: %s"', '%', '(', 'spacing', ',', 'self', '.', '_filename', ')', ')', 'text', '.', 'append', '(', '"%sextension: %d"', '%', '(', 'spacing', ',', 'self', '.', '_info', '[', "'hdunum'", ']', '-', '1', ')', ')', 'text', '.', 'append', '(', '"%stype: %s"', '%', '(', 'spacing', ',', '_hdu_type_map', '[', 'self', '.', '_info', '[', "'hdutype'", ']', ']', ')', ')', 'extname', '=', 'self', '.', 'get_extname', '(', ')', 'if', 'extname', '!=', '""', ':', 'text', '.', 'append', '(', '"%sextname: %s"', '%', '(', 'spacing', ',', 'extname', ')', ')', 'extver', '=', 'self', '.', 'get_extver', '(', ')', 'if', 'extver', '!=', '0', ':', 'text', '.', 'append', '(', '"%sextver: %s"', '%', '(', 'spacing', ',', 'extver', ')', ')', 'return', 'text', ',', 'spacing']
Get some representation data common to all HDU types
['Get', 'some', 'representation', 'data', 'common', 'to', 'all', 'HDU', 'types']
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L333-L351
9,783
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
brocade_interface_ext.get_ip_interface_input_request_type_get_request_interface_type
def get_ip_interface_input_request_type_get_request_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_ip_interface = ET.Element("get_ip_interface") config = get_ip_interface input = ET.SubElement(get_ip_interface, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") interface_type = ET.SubElement(get_request, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_ip_interface_input_request_type_get_request_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_ip_interface = ET.Element("get_ip_interface") config = get_ip_interface input = ET.SubElement(get_ip_interface, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") interface_type = ET.SubElement(get_request, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_ip_interface_input_request_type_get_request_interface_type', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_ip_interface', '=', 'ET', '.', 'Element', '(', '"get_ip_interface"', ')', 'config', '=', 'get_ip_interface', 'input', '=', 'ET', '.', 'SubElement', '(', 'get_ip_interface', ',', '"input"', ')', 'request_type', '=', 'ET', '.', 'SubElement', '(', 'input', ',', '"request-type"', ')', 'get_request', '=', 'ET', '.', 'SubElement', '(', 'request_type', ',', '"get-request"', ')', 'interface_type', '=', 'ET', '.', 'SubElement', '(', 'get_request', ',', '"interface-type"', ')', 'interface_type', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'interface_type'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L398-L411
9,784
boriel/zxbasic
arch/zx48k/backend/__8bit.py
_band8
def _band8(ins): """ Pops top 2 operands out of the stack, and does 1st AND (bitwise) 2nd operand (top of the stack), pushes the result. 8 bit un/signed version """ op1, op2 = tuple(ins.quad[2:]) if _int_ops(op1, op2) is not None: op1, op2 = _int_ops(op1, op2) output = _8bit_oper(op1) if op2 == 0xFF: # X & 0xFF = X output.append('push af') return output if op2 == 0: # X and 0 = 0 output.append('xor a') output.append('push af') return output op1, op2 = tuple(ins.quad[2:]) output = _8bit_oper(op1, op2) output.append('and h') output.append('push af') return output
python
def _band8(ins): """ Pops top 2 operands out of the stack, and does 1st AND (bitwise) 2nd operand (top of the stack), pushes the result. 8 bit un/signed version """ op1, op2 = tuple(ins.quad[2:]) if _int_ops(op1, op2) is not None: op1, op2 = _int_ops(op1, op2) output = _8bit_oper(op1) if op2 == 0xFF: # X & 0xFF = X output.append('push af') return output if op2 == 0: # X and 0 = 0 output.append('xor a') output.append('push af') return output op1, op2 = tuple(ins.quad[2:]) output = _8bit_oper(op1, op2) output.append('and h') output.append('push af') return output
['def', '_band8', '(', 'ins', ')', ':', 'op1', ',', 'op2', '=', 'tuple', '(', 'ins', '.', 'quad', '[', '2', ':', ']', ')', 'if', '_int_ops', '(', 'op1', ',', 'op2', ')', 'is', 'not', 'None', ':', 'op1', ',', 'op2', '=', '_int_ops', '(', 'op1', ',', 'op2', ')', 'output', '=', '_8bit_oper', '(', 'op1', ')', 'if', 'op2', '==', '0xFF', ':', '# X & 0xFF = X', 'output', '.', 'append', '(', "'push af'", ')', 'return', 'output', 'if', 'op2', '==', '0', ':', '# X and 0 = 0', 'output', '.', 'append', '(', "'xor a'", ')', 'output', '.', 'append', '(', "'push af'", ')', 'return', 'output', 'op1', ',', 'op2', '=', 'tuple', '(', 'ins', '.', 'quad', '[', '2', ':', ']', ')', 'output', '=', '_8bit_oper', '(', 'op1', ',', 'op2', ')', 'output', '.', 'append', '(', "'and h'", ')', 'output', '.', 'append', '(', "'push af'", ')', 'return', 'output']
Pops top 2 operands out of the stack, and does 1st AND (bitwise) 2nd operand (top of the stack), pushes the result. 8 bit un/signed version
['Pops', 'top', '2', 'operands', 'out', 'of', 'the', 'stack', 'and', 'does', '1st', 'AND', '(', 'bitwise', ')', '2nd', 'operand', '(', 'top', 'of', 'the', 'stack', ')', 'pushes', 'the', 'result', '.']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__8bit.py#L764-L791
9,785
craffel/mir_eval
mir_eval/io.py
load_time_series
def load_time_series(filename, delimiter=r'\s+'): r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float) """ # Use our universal function to load in the events times, values = load_delimited(filename, [float, float], delimiter) times = np.array(times) values = np.array(values) return times, values
python
def load_time_series(filename, delimiter=r'\s+'): r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float) """ # Use our universal function to load in the events times, values = load_delimited(filename, [float, float], delimiter) times = np.array(times) values = np.array(values) return times, values
['def', 'load_time_series', '(', 'filename', ',', 'delimiter', '=', "r'\\s+'", ')', ':', '# Use our universal function to load in the events', 'times', ',', 'values', '=', 'load_delimited', '(', 'filename', ',', '[', 'float', ',', 'float', ']', ',', 'delimiter', ')', 'times', '=', 'np', '.', 'array', '(', 'times', ')', 'values', '=', 'np', '.', 'array', '(', 'values', ')', 'return', 'times', ',', 'values']
r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float)
['r', 'Import', 'a', 'time', 'series', 'from', 'an', 'annotation', 'file', '.', 'The', 'file', 'should', 'consist', 'of', 'two', 'columns', 'of', 'numeric', 'values', 'corresponding', 'to', 'the', 'time', 'and', 'value', 'of', 'each', 'sample', 'of', 'the', 'time', 'series', '.']
train
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L245-L271
9,786
miguelgrinberg/python-engineio
engineio/asyncio_server.py
AsyncServer.start_background_task
def start_background_task(self, target, *args, **kwargs): """Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. The return value is a ``asyncio.Task`` object. """ return asyncio.ensure_future(target(*args, **kwargs))
python
def start_background_task(self, target, *args, **kwargs): """Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. The return value is a ``asyncio.Task`` object. """ return asyncio.ensure_future(target(*args, **kwargs))
['def', 'start_background_task', '(', 'self', ',', 'target', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'asyncio', '.', 'ensure_future', '(', 'target', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ')']
Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. The return value is a ``asyncio.Task`` object.
['Start', 'a', 'background', 'task', 'using', 'the', 'appropriate', 'async', 'model', '.']
train
https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/asyncio_server.py#L266-L279
9,787
corpusops/pdbclone
lib/pdb_clone/pdb.py
Pdb.precmd
def precmd(self, line): """Handle alias expansion and ';;' separator.""" if not line.strip(): return line args = line.split() while args[0] in self.aliases: line = self.aliases[args[0]] ii = 1 for tmpArg in args[1:]: line = line.replace("%" + str(ii), tmpArg) ii += 1 line = line.replace("%*", ' '.join(args[1:])) args = line.split() # split into ';;' separated commands # unless it's an alias command if args[0] != 'alias': marker = line.find(';;') if marker >= 0: # queue up everything after marker next = line[marker+2:].lstrip() self.cmdqueue.append(next) line = line[:marker].rstrip() return line
python
def precmd(self, line): """Handle alias expansion and ';;' separator.""" if not line.strip(): return line args = line.split() while args[0] in self.aliases: line = self.aliases[args[0]] ii = 1 for tmpArg in args[1:]: line = line.replace("%" + str(ii), tmpArg) ii += 1 line = line.replace("%*", ' '.join(args[1:])) args = line.split() # split into ';;' separated commands # unless it's an alias command if args[0] != 'alias': marker = line.find(';;') if marker >= 0: # queue up everything after marker next = line[marker+2:].lstrip() self.cmdqueue.append(next) line = line[:marker].rstrip() return line
['def', 'precmd', '(', 'self', ',', 'line', ')', ':', 'if', 'not', 'line', '.', 'strip', '(', ')', ':', 'return', 'line', 'args', '=', 'line', '.', 'split', '(', ')', 'while', 'args', '[', '0', ']', 'in', 'self', '.', 'aliases', ':', 'line', '=', 'self', '.', 'aliases', '[', 'args', '[', '0', ']', ']', 'ii', '=', '1', 'for', 'tmpArg', 'in', 'args', '[', '1', ':', ']', ':', 'line', '=', 'line', '.', 'replace', '(', '"%"', '+', 'str', '(', 'ii', ')', ',', 'tmpArg', ')', 'ii', '+=', '1', 'line', '=', 'line', '.', 'replace', '(', '"%*"', ',', "' '", '.', 'join', '(', 'args', '[', '1', ':', ']', ')', ')', 'args', '=', 'line', '.', 'split', '(', ')', "# split into ';;' separated commands", "# unless it's an alias command", 'if', 'args', '[', '0', ']', '!=', "'alias'", ':', 'marker', '=', 'line', '.', 'find', '(', "';;'", ')', 'if', 'marker', '>=', '0', ':', '# queue up everything after marker', 'next', '=', 'line', '[', 'marker', '+', '2', ':', ']', '.', 'lstrip', '(', ')', 'self', '.', 'cmdqueue', '.', 'append', '(', 'next', ')', 'line', '=', 'line', '[', ':', 'marker', ']', '.', 'rstrip', '(', ')', 'return', 'line']
Handle alias expansion and ';;' separator.
['Handle', 'alias', 'expansion', 'and', ';;', 'separator', '.']
train
https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/pdb.py#L683-L706
9,788
chrisspen/webarticle2text
webarticle2text/webarticle2text.py
tidyHTML
def tidyHTML(dirtyHTML): """ Runs an arbitrary HTML string through Tidy. """ try: from tidylib import tidy_document except ImportError as e: raise ImportError(("%s\nYou need to install pytidylib.\n" + "e.g. sudo pip install pytidylib") % e) options = { 'output-xhtml':1, #add_xml_decl=1,#option in tidy but not pytidylib 'indent':1, 'tidy-mark':1, #'char-encoding':'utf8', 'char-encoding':'raw', } html, errors = tidy_document(dirtyHTML, options=options) return html
python
def tidyHTML(dirtyHTML): """ Runs an arbitrary HTML string through Tidy. """ try: from tidylib import tidy_document except ImportError as e: raise ImportError(("%s\nYou need to install pytidylib.\n" + "e.g. sudo pip install pytidylib") % e) options = { 'output-xhtml':1, #add_xml_decl=1,#option in tidy but not pytidylib 'indent':1, 'tidy-mark':1, #'char-encoding':'utf8', 'char-encoding':'raw', } html, errors = tidy_document(dirtyHTML, options=options) return html
['def', 'tidyHTML', '(', 'dirtyHTML', ')', ':', 'try', ':', 'from', 'tidylib', 'import', 'tidy_document', 'except', 'ImportError', 'as', 'e', ':', 'raise', 'ImportError', '(', '(', '"%s\\nYou need to install pytidylib.\\n"', '+', '"e.g. sudo pip install pytidylib"', ')', '%', 'e', ')', 'options', '=', '{', "'output-xhtml'", ':', '1', ',', '#add_xml_decl=1,#option in tidy but not pytidylib', "'indent'", ':', '1', ',', "'tidy-mark'", ':', '1', ',', "#'char-encoding':'utf8',", "'char-encoding'", ':', "'raw'", ',', '}', 'html', ',', 'errors', '=', 'tidy_document', '(', 'dirtyHTML', ',', 'options', '=', 'options', ')', 'return', 'html']
Runs an arbitrary HTML string through Tidy.
['Runs', 'an', 'arbitrary', 'HTML', 'string', 'through', 'Tidy', '.']
train
https://github.com/chrisspen/webarticle2text/blob/3c88e948e31aedf1eccfea2106e5848d224771eb/webarticle2text/webarticle2text.py#L337-L355
9,789
ContinuumIO/menuinst
menuinst/win32.py
quoted
def quoted(s): """ quotes a string if necessary. """ # strip any existing quotes s = s.strip(u'"') # don't add quotes for minus or leading space if s[0] in (u'-', u' '): return s if u' ' in s or u'/' in s: return u'"%s"' % s else: return s
python
def quoted(s): """ quotes a string if necessary. """ # strip any existing quotes s = s.strip(u'"') # don't add quotes for minus or leading space if s[0] in (u'-', u' '): return s if u' ' in s or u'/' in s: return u'"%s"' % s else: return s
['def', 'quoted', '(', 's', ')', ':', '# strip any existing quotes', 's', '=', 's', '.', 'strip', '(', 'u\'"\'', ')', "# don't add quotes for minus or leading space", 'if', 's', '[', '0', ']', 'in', '(', "u'-'", ',', "u' '", ')', ':', 'return', 's', 'if', "u' '", 'in', 's', 'or', "u'/'", 'in', 's', ':', 'return', 'u\'"%s"\'', '%', 's', 'else', ':', 'return', 's']
quotes a string if necessary.
['quotes', 'a', 'string', 'if', 'necessary', '.']
train
https://github.com/ContinuumIO/menuinst/blob/dae53065e9e82a3352b817cca5895a9b271ddfdb/menuinst/win32.py#L106-L118
9,790
ralphje/imagemounter
imagemounter/filesystems.py
LvmFileSystemType.mount
def mount(self, volume): """Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group """ os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find free loopback device volume._find_loopback() time.sleep(0.2) try: # Scan for new lvm volumes result = _util.check_output_(["lvm", "pvscan"]) for l in result.splitlines(): if volume.loopback in l or (volume.offset == 0 and volume.get_raw_path() in l): for vg in re.findall(r'VG (\S+)', l): volume.info['volume_group'] = vg if not volume.info.get('volume_group'): logger.warning("Volume is not a volume group. (Searching for %s)", volume.loopback) raise IncorrectFilesystemError() # Enable lvm volumes _util.check_call_(["lvm", "vgchange", "-a", "y", volume.info['volume_group']], stdout=subprocess.PIPE) except Exception: volume._free_loopback() raise volume.volumes.vstype = 'lvm' # fills it up. for _ in volume.volumes.detect_volumes('lvm'): pass
python
def mount(self, volume): """Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group """ os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find free loopback device volume._find_loopback() time.sleep(0.2) try: # Scan for new lvm volumes result = _util.check_output_(["lvm", "pvscan"]) for l in result.splitlines(): if volume.loopback in l or (volume.offset == 0 and volume.get_raw_path() in l): for vg in re.findall(r'VG (\S+)', l): volume.info['volume_group'] = vg if not volume.info.get('volume_group'): logger.warning("Volume is not a volume group. (Searching for %s)", volume.loopback) raise IncorrectFilesystemError() # Enable lvm volumes _util.check_call_(["lvm", "vgchange", "-a", "y", volume.info['volume_group']], stdout=subprocess.PIPE) except Exception: volume._free_loopback() raise volume.volumes.vstype = 'lvm' # fills it up. for _ in volume.volumes.detect_volumes('lvm'): pass
['def', 'mount', '(', 'self', ',', 'volume', ')', ':', 'os', '.', 'environ', '[', "'LVM_SUPPRESS_FD_WARNINGS'", ']', '=', "'1'", '# find free loopback device', 'volume', '.', '_find_loopback', '(', ')', 'time', '.', 'sleep', '(', '0.2', ')', 'try', ':', '# Scan for new lvm volumes', 'result', '=', '_util', '.', 'check_output_', '(', '[', '"lvm"', ',', '"pvscan"', ']', ')', 'for', 'l', 'in', 'result', '.', 'splitlines', '(', ')', ':', 'if', 'volume', '.', 'loopback', 'in', 'l', 'or', '(', 'volume', '.', 'offset', '==', '0', 'and', 'volume', '.', 'get_raw_path', '(', ')', 'in', 'l', ')', ':', 'for', 'vg', 'in', 're', '.', 'findall', '(', "r'VG (\\S+)'", ',', 'l', ')', ':', 'volume', '.', 'info', '[', "'volume_group'", ']', '=', 'vg', 'if', 'not', 'volume', '.', 'info', '.', 'get', '(', "'volume_group'", ')', ':', 'logger', '.', 'warning', '(', '"Volume is not a volume group. (Searching for %s)"', ',', 'volume', '.', 'loopback', ')', 'raise', 'IncorrectFilesystemError', '(', ')', '# Enable lvm volumes', '_util', '.', 'check_call_', '(', '[', '"lvm"', ',', '"vgchange"', ',', '"-a"', ',', '"y"', ',', 'volume', '.', 'info', '[', "'volume_group'", ']', ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ')', 'except', 'Exception', ':', 'volume', '.', '_free_loopback', '(', ')', 'raise', 'volume', '.', 'volumes', '.', 'vstype', '=', "'lvm'", '# fills it up.', 'for', '_', 'in', 'volume', '.', 'volumes', '.', 'detect_volumes', '(', "'lvm'", ')', ':', 'pass']
Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group
['Performs', 'mount', 'actions', 'on', 'a', 'LVM', '.', 'Scans', 'for', 'active', 'volume', 'groups', 'from', 'the', 'loopback', 'device', 'activates', 'it', 'and', 'fills', ':', 'attr', ':', 'volumes', 'with', 'the', 'logical', 'volumes', '.']
train
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L460-L494
9,791
10gen/mongo-orchestration
mongo_orchestration/replica_sets.py
ReplicaSet.repl_member_add
def repl_member_add(self, params): """create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False """ repl_config = self.config member_id = max([member['_id'] for member in repl_config['members']]) + 1 member_config = self.member_create(params, member_id) repl_config['members'].append(member_config) if not self.repl_update(repl_config): self.member_del(member_id, reconfig=True) raise ReplicaSetError("Could not add member to ReplicaSet.") return member_id
python
def repl_member_add(self, params): """create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False """ repl_config = self.config member_id = max([member['_id'] for member in repl_config['members']]) + 1 member_config = self.member_create(params, member_id) repl_config['members'].append(member_config) if not self.repl_update(repl_config): self.member_del(member_id, reconfig=True) raise ReplicaSetError("Could not add member to ReplicaSet.") return member_id
['def', 'repl_member_add', '(', 'self', ',', 'params', ')', ':', 'repl_config', '=', 'self', '.', 'config', 'member_id', '=', 'max', '(', '[', 'member', '[', "'_id'", ']', 'for', 'member', 'in', 'repl_config', '[', "'members'", ']', ']', ')', '+', '1', 'member_config', '=', 'self', '.', 'member_create', '(', 'params', ',', 'member_id', ')', 'repl_config', '[', "'members'", ']', '.', 'append', '(', 'member_config', ')', 'if', 'not', 'self', '.', 'repl_update', '(', 'repl_config', ')', ':', 'self', '.', 'member_del', '(', 'member_id', ',', 'reconfig', '=', 'True', ')', 'raise', 'ReplicaSetError', '(', '"Could not add member to ReplicaSet."', ')', 'return', 'member_id']
create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False
['create', 'new', 'mongod', 'instances', 'and', 'add', 'it', 'to', 'the', 'replica', 'set', '.', 'Args', ':', 'params', '-', 'mongod', 'params', 'return', 'True', 'if', 'operation', 'success', 'otherwise', 'False']
train
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/replica_sets.py#L245-L258
9,792
zetaops/pyoko
pyoko/lib/utils.py
ub_to_str
def ub_to_str(string): """ converts py2 unicode / py3 bytestring into str Args: string (unicode, byte_string): string to be converted Returns: (str) """ if not isinstance(string, str): if six.PY2: return str(string) else: return string.decode() return string
python
def ub_to_str(string): """ converts py2 unicode / py3 bytestring into str Args: string (unicode, byte_string): string to be converted Returns: (str) """ if not isinstance(string, str): if six.PY2: return str(string) else: return string.decode() return string
['def', 'ub_to_str', '(', 'string', ')', ':', 'if', 'not', 'isinstance', '(', 'string', ',', 'str', ')', ':', 'if', 'six', '.', 'PY2', ':', 'return', 'str', '(', 'string', ')', 'else', ':', 'return', 'string', '.', 'decode', '(', ')', 'return', 'string']
converts py2 unicode / py3 bytestring into str Args: string (unicode, byte_string): string to be converted Returns: (str)
['converts', 'py2', 'unicode', '/', 'py3', 'bytestring', 'into', 'str', 'Args', ':', 'string', '(', 'unicode', 'byte_string', ')', ':', 'string', 'to', 'be', 'converted', 'Returns', ':', '(', 'str', ')']
train
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/lib/utils.py#L22-L36
9,793
awslabs/aws-sam-cli
samcli/local/lambda_service/lambda_error_responses.py
LambdaErrorResponses.unsupported_media_type
def unsupported_media_type(content_type): """ Creates a Lambda Service UnsupportedMediaType Response Parameters ---------- content_type str Content Type of the request that was made Returns ------- Flask.Response A response object representing the UnsupportedMediaType Error """ exception_tuple = LambdaErrorResponses.UnsupportedMediaTypeException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.USER_ERROR, "Unsupported content type: {}".format(content_type)), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
python
def unsupported_media_type(content_type): """ Creates a Lambda Service UnsupportedMediaType Response Parameters ---------- content_type str Content Type of the request that was made Returns ------- Flask.Response A response object representing the UnsupportedMediaType Error """ exception_tuple = LambdaErrorResponses.UnsupportedMediaTypeException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.USER_ERROR, "Unsupported content type: {}".format(content_type)), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
['def', 'unsupported_media_type', '(', 'content_type', ')', ':', 'exception_tuple', '=', 'LambdaErrorResponses', '.', 'UnsupportedMediaTypeException', 'return', 'BaseLocalService', '.', 'service_response', '(', 'LambdaErrorResponses', '.', '_construct_error_response_body', '(', 'LambdaErrorResponses', '.', 'USER_ERROR', ',', '"Unsupported content type: {}"', '.', 'format', '(', 'content_type', ')', ')', ',', 'LambdaErrorResponses', '.', '_construct_headers', '(', 'exception_tuple', '[', '0', ']', ')', ',', 'exception_tuple', '[', '1', ']', ')']
Creates a Lambda Service UnsupportedMediaType Response Parameters ---------- content_type str Content Type of the request that was made Returns ------- Flask.Response A response object representing the UnsupportedMediaType Error
['Creates', 'a', 'Lambda', 'Service', 'UnsupportedMediaType', 'Response']
train
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambda_service/lambda_error_responses.py#L88-L109
9,794
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
ContainerState.change_state_id
def change_state_id(self, state_id=None): """ Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state """ old_state_id = self.state_id super(ContainerState, self).change_state_id(state_id) # Use private variables to change ids to prevent validity checks # change id in all transitions for transition in self.transitions.values(): if transition.from_state == old_state_id: transition._from_state = self.state_id if transition.to_state == old_state_id: transition._to_state = self.state_id # change id in all data_flows for data_flow in self.data_flows.values(): if data_flow.from_state == old_state_id: data_flow._from_state = self.state_id if data_flow.to_state == old_state_id: data_flow._to_state = self.state_id
python
def change_state_id(self, state_id=None): """ Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state """ old_state_id = self.state_id super(ContainerState, self).change_state_id(state_id) # Use private variables to change ids to prevent validity checks # change id in all transitions for transition in self.transitions.values(): if transition.from_state == old_state_id: transition._from_state = self.state_id if transition.to_state == old_state_id: transition._to_state = self.state_id # change id in all data_flows for data_flow in self.data_flows.values(): if data_flow.from_state == old_state_id: data_flow._from_state = self.state_id if data_flow.to_state == old_state_id: data_flow._to_state = self.state_id
['def', 'change_state_id', '(', 'self', ',', 'state_id', '=', 'None', ')', ':', 'old_state_id', '=', 'self', '.', 'state_id', 'super', '(', 'ContainerState', ',', 'self', ')', '.', 'change_state_id', '(', 'state_id', ')', '# Use private variables to change ids to prevent validity checks', '# change id in all transitions', 'for', 'transition', 'in', 'self', '.', 'transitions', '.', 'values', '(', ')', ':', 'if', 'transition', '.', 'from_state', '==', 'old_state_id', ':', 'transition', '.', '_from_state', '=', 'self', '.', 'state_id', 'if', 'transition', '.', 'to_state', '==', 'old_state_id', ':', 'transition', '.', '_to_state', '=', 'self', '.', 'state_id', '# change id in all data_flows', 'for', 'data_flow', 'in', 'self', '.', 'data_flows', '.', 'values', '(', ')', ':', 'if', 'data_flow', '.', 'from_state', '==', 'old_state_id', ':', 'data_flow', '.', '_from_state', '=', 'self', '.', 'state_id', 'if', 'data_flow', '.', 'to_state', '==', 'old_state_id', ':', 'data_flow', '.', '_to_state', '=', 'self', '.', 'state_id']
Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state
['Changes', 'the', 'id', 'of', 'the', 'state', 'to', 'a', 'new', 'id', '.', 'This', 'functions', 'replaces', 'the', 'old', 'state_id', 'with', 'the', 'new', 'state_id', 'in', 'all', 'data', 'flows', 'and', 'transitions', '.']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1624-L1646
9,795
bram85/topydo
topydo/lib/ChangeSet.py
ChangeSet._trim
def _trim(self): """ Removes oldest backups that exceed the limit configured in backup_count option. Does not write back to file system, make sure to call self._write() afterwards. """ index = self._get_index() backup_limit = config().backup_count() - 1 for changeset in index[backup_limit:]: self.delete(changeset[0], p_write=False)
python
def _trim(self): """ Removes oldest backups that exceed the limit configured in backup_count option. Does not write back to file system, make sure to call self._write() afterwards. """ index = self._get_index() backup_limit = config().backup_count() - 1 for changeset in index[backup_limit:]: self.delete(changeset[0], p_write=False)
['def', '_trim', '(', 'self', ')', ':', 'index', '=', 'self', '.', '_get_index', '(', ')', 'backup_limit', '=', 'config', '(', ')', '.', 'backup_count', '(', ')', '-', '1', 'for', 'changeset', 'in', 'index', '[', 'backup_limit', ':', ']', ':', 'self', '.', 'delete', '(', 'changeset', '[', '0', ']', ',', 'p_write', '=', 'False', ')']
Removes oldest backups that exceed the limit configured in backup_count option. Does not write back to file system, make sure to call self._write() afterwards.
['Removes', 'oldest', 'backups', 'that', 'exceed', 'the', 'limit', 'configured', 'in', 'backup_count', 'option', '.']
train
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/ChangeSet.py#L152-L164
9,796
SITools2/pySitools2_1.0
sitools2/core/pySitools2.py
SITools2Instance.__parseResponseServer
def __parseResponseServer(self): """Parses the response of the server. Exception --------- A Sitools2Exception is raised when the server does not send back a success.""" self.__logger.debug(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) result = Util.retrieveJsonResponseFromServer(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) isSuccess = result['success'] if isSuccess: data = result['data'] self.__logger.debug(data) for i, dataItem in enumerate(data): project = Project(Sitools2Abstract.getBaseUrl(self), dataItem) self.__projects.append(project) else: raise Sitools2Exception("Error when loading the server response")
python
def __parseResponseServer(self): """Parses the response of the server. Exception --------- A Sitools2Exception is raised when the server does not send back a success.""" self.__logger.debug(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) result = Util.retrieveJsonResponseFromServer(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) isSuccess = result['success'] if isSuccess: data = result['data'] self.__logger.debug(data) for i, dataItem in enumerate(data): project = Project(Sitools2Abstract.getBaseUrl(self), dataItem) self.__projects.append(project) else: raise Sitools2Exception("Error when loading the server response")
['def', '__parseResponseServer', '(', 'self', ')', ':', 'self', '.', '__logger', '.', 'debug', '(', 'Sitools2Abstract', '.', 'getBaseUrl', '(', 'self', ')', '+', 'SITools2Instance', '.', 'PROJECTS_URI', ')', 'result', '=', 'Util', '.', 'retrieveJsonResponseFromServer', '(', 'Sitools2Abstract', '.', 'getBaseUrl', '(', 'self', ')', '+', 'SITools2Instance', '.', 'PROJECTS_URI', ')', 'isSuccess', '=', 'result', '[', "'success'", ']', 'if', 'isSuccess', ':', 'data', '=', 'result', '[', "'data'", ']', 'self', '.', '__logger', '.', 'debug', '(', 'data', ')', 'for', 'i', ',', 'dataItem', 'in', 'enumerate', '(', 'data', ')', ':', 'project', '=', 'Project', '(', 'Sitools2Abstract', '.', 'getBaseUrl', '(', 'self', ')', ',', 'dataItem', ')', 'self', '.', '__projects', '.', 'append', '(', 'project', ')', 'else', ':', 'raise', 'Sitools2Exception', '(', '"Error when loading the server response"', ')']
Parses the response of the server. Exception --------- A Sitools2Exception is raised when the server does not send back a success.
['Parses', 'the', 'response', 'of', 'the', 'server', '.', 'Exception', '---------', 'A', 'Sitools2Exception', 'is', 'raised', 'when', 'the', 'server', 'does', 'not', 'send', 'back', 'a', 'success', '.']
train
https://github.com/SITools2/pySitools2_1.0/blob/acd13198162456ba401a0b923af989bb29feb3b6/sitools2/core/pySitools2.py#L96-L112
9,797
juju/python-libjuju
juju/client/_client1.py
ClientFacade.AddCharm
async def AddCharm(self, channel, url): ''' channel : str url : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Client', request='AddCharm', version=1, params=_params) _params['channel'] = channel _params['url'] = url reply = await self.rpc(msg) return reply
python
async def AddCharm(self, channel, url): ''' channel : str url : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Client', request='AddCharm', version=1, params=_params) _params['channel'] = channel _params['url'] = url reply = await self.rpc(msg) return reply
['async', 'def', 'AddCharm', '(', 'self', ',', 'channel', ',', 'url', ')', ':', '# map input types to rpc msg', '_params', '=', 'dict', '(', ')', 'msg', '=', 'dict', '(', 'type', '=', "'Client'", ',', 'request', '=', "'AddCharm'", ',', 'version', '=', '1', ',', 'params', '=', '_params', ')', '_params', '[', "'channel'", ']', '=', 'channel', '_params', '[', "'url'", ']', '=', 'url', 'reply', '=', 'await', 'self', '.', 'rpc', '(', 'msg', ')', 'return', 'reply']
channel : str url : str Returns -> None
['channel', ':', 'str', 'url', ':', 'str', 'Returns', '-', '>', 'None']
train
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client1.py#L2957-L2972
9,798
cs50/lib50
lib50/_api.py
Slug._check_endings
def _check_endings(self): """Check begin/end of slug, raises Error if malformed.""" if self.slug.startswith("/") and self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading and trailing slashes?".format(self.slug.strip("/")))) elif self.slug.startswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading slash?".format(self.slug.strip("/")))) elif self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the trailing slash?".format(self.slug.strip("/"))))
python
def _check_endings(self): """Check begin/end of slug, raises Error if malformed.""" if self.slug.startswith("/") and self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading and trailing slashes?".format(self.slug.strip("/")))) elif self.slug.startswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading slash?".format(self.slug.strip("/")))) elif self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the trailing slash?".format(self.slug.strip("/"))))
['def', '_check_endings', '(', 'self', ')', ':', 'if', 'self', '.', 'slug', '.', 'startswith', '(', '"/"', ')', 'and', 'self', '.', 'slug', '.', 'endswith', '(', '"/"', ')', ':', 'raise', 'InvalidSlugError', '(', '_', '(', '"Invalid slug. Did you mean {}, without the leading and trailing slashes?"', '.', 'format', '(', 'self', '.', 'slug', '.', 'strip', '(', '"/"', ')', ')', ')', ')', 'elif', 'self', '.', 'slug', '.', 'startswith', '(', '"/"', ')', ':', 'raise', 'InvalidSlugError', '(', '_', '(', '"Invalid slug. Did you mean {}, without the leading slash?"', '.', 'format', '(', 'self', '.', 'slug', '.', 'strip', '(', '"/"', ')', ')', ')', ')', 'elif', 'self', '.', 'slug', '.', 'endswith', '(', '"/"', ')', ':', 'raise', 'InvalidSlugError', '(', '_', '(', '"Invalid slug. Did you mean {}, without the trailing slash?"', '.', 'format', '(', 'self', '.', 'slug', '.', 'strip', '(', '"/"', ')', ')', ')', ')']
Check begin/end of slug, raises Error if malformed.
['Check', 'begin', '/', 'end', 'of', 'slug', 'raises', 'Error', 'if', 'malformed', '.']
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L420-L430
9,799
koszullab/metaTOR
metator/scripts/hicstuff.py
get_missing_bins
def get_missing_bins(original, trimmed): """Retrieve indices of a trimmed matrix with respect to the original matrix. Fairly fast but is only correct if diagonal values are different, which is always the case in practice. """ original_diag = np.diag(original) trimmed_diag = np.diag(trimmed) index = [] m = min(original.shape) for j in range(min(trimmed.shape)): k = 0 while original_diag[j + k] != trimmed_diag[j] and k < 2 * m: k += 1 index.append(k + j) return np.array(index)
python
def get_missing_bins(original, trimmed): """Retrieve indices of a trimmed matrix with respect to the original matrix. Fairly fast but is only correct if diagonal values are different, which is always the case in practice. """ original_diag = np.diag(original) trimmed_diag = np.diag(trimmed) index = [] m = min(original.shape) for j in range(min(trimmed.shape)): k = 0 while original_diag[j + k] != trimmed_diag[j] and k < 2 * m: k += 1 index.append(k + j) return np.array(index)
['def', 'get_missing_bins', '(', 'original', ',', 'trimmed', ')', ':', 'original_diag', '=', 'np', '.', 'diag', '(', 'original', ')', 'trimmed_diag', '=', 'np', '.', 'diag', '(', 'trimmed', ')', 'index', '=', '[', ']', 'm', '=', 'min', '(', 'original', '.', 'shape', ')', 'for', 'j', 'in', 'range', '(', 'min', '(', 'trimmed', '.', 'shape', ')', ')', ':', 'k', '=', '0', 'while', 'original_diag', '[', 'j', '+', 'k', ']', '!=', 'trimmed_diag', '[', 'j', ']', 'and', 'k', '<', '2', '*', 'm', ':', 'k', '+=', '1', 'index', '.', 'append', '(', 'k', '+', 'j', ')', 'return', 'np', '.', 'array', '(', 'index', ')']
Retrieve indices of a trimmed matrix with respect to the original matrix. Fairly fast but is only correct if diagonal values are different, which is always the case in practice.
['Retrieve', 'indices', 'of', 'a', 'trimmed', 'matrix', 'with', 'respect', 'to', 'the', 'original', 'matrix', '.', 'Fairly', 'fast', 'but', 'is', 'only', 'correct', 'if', 'diagonal', 'values', 'are', 'different', 'which', 'is', 'always', 'the', 'case', 'in', 'practice', '.']
train
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L801-L816