repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
honeynet/beeswarm
beeswarm/drones/client/models/dispatcher.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/client/models/dispatcher.py#L101-L107
def time_in_range(self): """Return true if current time is in the active range""" curr = datetime.datetime.now().time() if self.start_time <= self.end_time: return self.start_time <= curr <= self.end_time else: return self.start_time <= curr or curr <= self.end_time
[ "def", "time_in_range", "(", "self", ")", ":", "curr", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "time", "(", ")", "if", "self", ".", "start_time", "<=", "self", ".", "end_time", ":", "return", "self", ".", "start_time", "<=", "curr", "<=", "self", ".", "end_time", "else", ":", "return", "self", ".", "start_time", "<=", "curr", "or", "curr", "<=", "self", ".", "end_time" ]
Return true if current time is in the active range
[ "Return", "true", "if", "current", "time", "is", "in", "the", "active", "range" ]
python
train
45.142857
jaraco/keyring
keyring/core.py
https://github.com/jaraco/keyring/blob/71c798378e365286b7cc03c06e4d7d24c7de8fc4/keyring/core.py#L147-L174
def load_config(): """Load a keyring using the config file in the config root.""" filename = 'keyringrc.cfg' keyring_cfg = os.path.join(platform.config_root(), filename) if not os.path.exists(keyring_cfg): return config = configparser.RawConfigParser() config.read(keyring_cfg) _load_keyring_path(config) # load the keyring class name, and then load this keyring try: if config.has_section("backend"): keyring_name = config.get("backend", "default-keyring").strip() else: raise configparser.NoOptionError('backend', 'default-keyring') except (configparser.NoOptionError, ImportError): logger = logging.getLogger('keyring') logger.warning("Keyring config file contains incorrect values.\n" + "Config file: %s" % keyring_cfg) return return load_keyring(keyring_name)
[ "def", "load_config", "(", ")", ":", "filename", "=", "'keyringrc.cfg'", "keyring_cfg", "=", "os", ".", "path", ".", "join", "(", "platform", ".", "config_root", "(", ")", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "keyring_cfg", ")", ":", "return", "config", "=", "configparser", ".", "RawConfigParser", "(", ")", "config", ".", "read", "(", "keyring_cfg", ")", "_load_keyring_path", "(", "config", ")", "# load the keyring class name, and then load this keyring", "try", ":", "if", "config", ".", "has_section", "(", "\"backend\"", ")", ":", "keyring_name", "=", "config", ".", "get", "(", "\"backend\"", ",", "\"default-keyring\"", ")", ".", "strip", "(", ")", "else", ":", "raise", "configparser", ".", "NoOptionError", "(", "'backend'", ",", "'default-keyring'", ")", "except", "(", "configparser", ".", "NoOptionError", ",", "ImportError", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'keyring'", ")", "logger", ".", "warning", "(", "\"Keyring config file contains incorrect values.\\n\"", "+", "\"Config file: %s\"", "%", "keyring_cfg", ")", "return", "return", "load_keyring", "(", "keyring_name", ")" ]
Load a keyring using the config file in the config root.
[ "Load", "a", "keyring", "using", "the", "config", "file", "in", "the", "config", "root", "." ]
python
valid
31.535714
tcalmant/ipopo
samples/rsa/helloimpl.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/samples/rsa/helloimpl.py#L25-L38
def sayHello(self, name="Not given", message="nothing"): """ Synchronous implementation of IHello.sayHello synchronous method. The remote calling thread will be blocked until this is executed and responds. """ print( "Python.sayHello called by: {0} " "with message: '{1}'".format(name, message) ) return ( "PythonSync says: Howdy {0} " "that's a nice runtime you got there".format(name) )
[ "def", "sayHello", "(", "self", ",", "name", "=", "\"Not given\"", ",", "message", "=", "\"nothing\"", ")", ":", "print", "(", "\"Python.sayHello called by: {0} \"", "\"with message: '{1}'\"", ".", "format", "(", "name", ",", "message", ")", ")", "return", "(", "\"PythonSync says: Howdy {0} \"", "\"that's a nice runtime you got there\"", ".", "format", "(", "name", ")", ")" ]
Synchronous implementation of IHello.sayHello synchronous method. The remote calling thread will be blocked until this is executed and responds.
[ "Synchronous", "implementation", "of", "IHello", ".", "sayHello", "synchronous", "method", ".", "The", "remote", "calling", "thread", "will", "be", "blocked", "until", "this", "is", "executed", "and", "responds", "." ]
python
train
35.357143
BlockHub/blockhubdpostools
dpostools/legacy.py
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/legacy.py#L49-L54
def set_connection(host=None, database=None, user=None, password=None): """Set connection parameters. Call set_connection with no arguments to clear.""" c.CONNECTION['HOST'] = host c.CONNECTION['DATABASE'] = database c.CONNECTION['USER'] = user c.CONNECTION['PASSWORD'] = password
[ "def", "set_connection", "(", "host", "=", "None", ",", "database", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "c", ".", "CONNECTION", "[", "'HOST'", "]", "=", "host", "c", ".", "CONNECTION", "[", "'DATABASE'", "]", "=", "database", "c", ".", "CONNECTION", "[", "'USER'", "]", "=", "user", "c", ".", "CONNECTION", "[", "'PASSWORD'", "]", "=", "password" ]
Set connection parameters. Call set_connection with no arguments to clear.
[ "Set", "connection", "parameters", ".", "Call", "set_connection", "with", "no", "arguments", "to", "clear", "." ]
python
valid
49.166667
dcaune/perseus-lib-python-common
majormode/perseus/utils/email_util.py
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/utils/email_util.py#L44-L75
def __build_author_name_expr(author_name, author_email_address): """ Build the name of the author of a message as described in the Internet Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2 @param author_name: complete name of the originator of the message. @param author_email_address: address of the mailbox to which the author of the message suggests that replies be sent. @return: a string representing the author of the message, that is, the mailbox of the person or system responsible for the writing of the message. This string is intended to be used as the "From:" field of the message. """ assert author_name or author_email_address, 'Both arguments MUST NOT be bull' # Use the specified name of the author or the username of his email # address. author_name_expr = author_name or author_email_address[:author_email_address.find('@')] # Escape the name of the author if it contains a space character. if ' ' in author_name_expr: author_name_expr = '"%s"' % author_name_expr # Complete the name of the author with his email address when specified. if author_email_address: author_name_expr = '%s <%s>' % (author_name_expr, author_email_address) return author_name_expr
[ "def", "__build_author_name_expr", "(", "author_name", ",", "author_email_address", ")", ":", "assert", "author_name", "or", "author_email_address", ",", "'Both arguments MUST NOT be bull'", "# Use the specified name of the author or the username of his email", "# address.", "author_name_expr", "=", "author_name", "or", "author_email_address", "[", ":", "author_email_address", ".", "find", "(", "'@'", ")", "]", "# Escape the name of the author if it contains a space character.", "if", "' '", "in", "author_name_expr", ":", "author_name_expr", "=", "'\"%s\"'", "%", "author_name_expr", "# Complete the name of the author with his email address when specified.", "if", "author_email_address", ":", "author_name_expr", "=", "'%s <%s>'", "%", "(", "author_name_expr", ",", "author_email_address", ")", "return", "author_name_expr" ]
Build the name of the author of a message as described in the Internet Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2 @param author_name: complete name of the originator of the message. @param author_email_address: address of the mailbox to which the author of the message suggests that replies be sent. @return: a string representing the author of the message, that is, the mailbox of the person or system responsible for the writing of the message. This string is intended to be used as the "From:" field of the message.
[ "Build", "the", "name", "of", "the", "author", "of", "a", "message", "as", "described", "in", "the", "Internet", "Message", "Format", "specification", ":", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc5322#section", "-", "3", ".", "6", ".", "2" ]
python
train
40.53125
obriencj/python-javatools
javatools/ziputils.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/ziputils.py#L357-L375
def zip_entry_rollup(zipfile): """ returns a tuple of (files, dirs, size_uncompressed, size_compressed). files+dirs will equal len(zipfile.infolist) """ files = dirs = 0 total_c = total_u = 0 for i in zipfile.infolist(): if i.filename[-1] == '/': # I wonder if there's a better detection method than this dirs += 1 else: files += 1 total_c += i.compress_size total_u += i.file_size return files, dirs, total_c, total_u
[ "def", "zip_entry_rollup", "(", "zipfile", ")", ":", "files", "=", "dirs", "=", "0", "total_c", "=", "total_u", "=", "0", "for", "i", "in", "zipfile", ".", "infolist", "(", ")", ":", "if", "i", ".", "filename", "[", "-", "1", "]", "==", "'/'", ":", "# I wonder if there's a better detection method than this", "dirs", "+=", "1", "else", ":", "files", "+=", "1", "total_c", "+=", "i", ".", "compress_size", "total_u", "+=", "i", ".", "file_size", "return", "files", ",", "dirs", ",", "total_c", ",", "total_u" ]
returns a tuple of (files, dirs, size_uncompressed, size_compressed). files+dirs will equal len(zipfile.infolist)
[ "returns", "a", "tuple", "of", "(", "files", "dirs", "size_uncompressed", "size_compressed", ")", ".", "files", "+", "dirs", "will", "equal", "len", "(", "zipfile", ".", "infolist", ")" ]
python
train
26.894737
StanfordVL/robosuite
robosuite/environments/sawyer_stack.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/sawyer_stack.py#L195-L211
def _get_reference(self): """ Sets up references to important components. A reference is typically an index or a list of indices that point to the corresponding elements in a flatten array, which is how MuJoCo stores physical simulation data. """ super()._get_reference() self.cubeA_body_id = self.sim.model.body_name2id("cubeA") self.cubeB_body_id = self.sim.model.body_name2id("cubeB") self.l_finger_geom_ids = [ self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms ] self.r_finger_geom_ids = [ self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms ] self.cubeA_geom_id = self.sim.model.geom_name2id("cubeA") self.cubeB_geom_id = self.sim.model.geom_name2id("cubeB")
[ "def", "_get_reference", "(", "self", ")", ":", "super", "(", ")", ".", "_get_reference", "(", ")", "self", ".", "cubeA_body_id", "=", "self", ".", "sim", ".", "model", ".", "body_name2id", "(", "\"cubeA\"", ")", "self", ".", "cubeB_body_id", "=", "self", ".", "sim", ".", "model", ".", "body_name2id", "(", "\"cubeB\"", ")", "self", ".", "l_finger_geom_ids", "=", "[", "self", ".", "sim", ".", "model", ".", "geom_name2id", "(", "x", ")", "for", "x", "in", "self", ".", "gripper", ".", "left_finger_geoms", "]", "self", ".", "r_finger_geom_ids", "=", "[", "self", ".", "sim", ".", "model", ".", "geom_name2id", "(", "x", ")", "for", "x", "in", "self", ".", "gripper", ".", "right_finger_geoms", "]", "self", ".", "cubeA_geom_id", "=", "self", ".", "sim", ".", "model", ".", "geom_name2id", "(", "\"cubeA\"", ")", "self", ".", "cubeB_geom_id", "=", "self", ".", "sim", ".", "model", ".", "geom_name2id", "(", "\"cubeB\"", ")" ]
Sets up references to important components. A reference is typically an index or a list of indices that point to the corresponding elements in a flatten array, which is how MuJoCo stores physical simulation data.
[ "Sets", "up", "references", "to", "important", "components", ".", "A", "reference", "is", "typically", "an", "index", "or", "a", "list", "of", "indices", "that", "point", "to", "the", "corresponding", "elements", "in", "a", "flatten", "array", "which", "is", "how", "MuJoCo", "stores", "physical", "simulation", "data", "." ]
python
train
48.470588
opendatateam/udata
udata/app.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/app.py#L155-L188
def create_app(config='udata.settings.Defaults', override=None, init_logging=init_logging): '''Factory for a minimal application''' app = UDataApp(APP_NAME) app.config.from_object(config) settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg')) if exists(settings): app.settings_file = settings # Keep track of loaded settings for diagnostic app.config.from_pyfile(settings) if override: app.config.from_object(override) # Loads defaults from plugins for pkg in entrypoints.get_roots(app): if pkg == 'udata': continue # Defaults are already loaded module = '{}.settings'.format(pkg) if pkgutil.find_loader(module): settings = pkgutil.get_loader(module) for key, default in settings.__dict__.items(): app.config.setdefault(key, default) app.json_encoder = UDataJsonEncoder app.debug = app.config['DEBUG'] and not app.config['TESTING'] app.wsgi_app = ProxyFix(app.wsgi_app) init_logging(app) register_extensions(app) return app
[ "def", "create_app", "(", "config", "=", "'udata.settings.Defaults'", ",", "override", "=", "None", ",", "init_logging", "=", "init_logging", ")", ":", "app", "=", "UDataApp", "(", "APP_NAME", ")", "app", ".", "config", ".", "from_object", "(", "config", ")", "settings", "=", "os", ".", "environ", ".", "get", "(", "'UDATA_SETTINGS'", ",", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'udata.cfg'", ")", ")", "if", "exists", "(", "settings", ")", ":", "app", ".", "settings_file", "=", "settings", "# Keep track of loaded settings for diagnostic", "app", ".", "config", ".", "from_pyfile", "(", "settings", ")", "if", "override", ":", "app", ".", "config", ".", "from_object", "(", "override", ")", "# Loads defaults from plugins", "for", "pkg", "in", "entrypoints", ".", "get_roots", "(", "app", ")", ":", "if", "pkg", "==", "'udata'", ":", "continue", "# Defaults are already loaded", "module", "=", "'{}.settings'", ".", "format", "(", "pkg", ")", "if", "pkgutil", ".", "find_loader", "(", "module", ")", ":", "settings", "=", "pkgutil", ".", "get_loader", "(", "module", ")", "for", "key", ",", "default", "in", "settings", ".", "__dict__", ".", "items", "(", ")", ":", "app", ".", "config", ".", "setdefault", "(", "key", ",", "default", ")", "app", ".", "json_encoder", "=", "UDataJsonEncoder", "app", ".", "debug", "=", "app", ".", "config", "[", "'DEBUG'", "]", "and", "not", "app", ".", "config", "[", "'TESTING'", "]", "app", ".", "wsgi_app", "=", "ProxyFix", "(", "app", ".", "wsgi_app", ")", "init_logging", "(", "app", ")", "register_extensions", "(", "app", ")", "return", "app" ]
Factory for a minimal application
[ "Factory", "for", "a", "minimal", "application" ]
python
train
32.117647
PmagPy/PmagPy
pmagpy/builder2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L1193-L1207
def validate_data(self): """ Validate specimen, sample, site, and location data. """ warnings = {} spec_warnings, samp_warnings, site_warnings, loc_warnings = {}, {}, {}, {} if self.specimens: spec_warnings = self.validate_items(self.specimens, 'specimen') if self.samples: samp_warnings = self.validate_items(self.samples, 'sample') if self.sites: site_warnings = self.validate_items(self.sites, 'site') if self.locations: loc_warnings = self.validate_items(self.locations, 'location') return spec_warnings, samp_warnings, site_warnings, loc_warnings
[ "def", "validate_data", "(", "self", ")", ":", "warnings", "=", "{", "}", "spec_warnings", ",", "samp_warnings", ",", "site_warnings", ",", "loc_warnings", "=", "{", "}", ",", "{", "}", ",", "{", "}", ",", "{", "}", "if", "self", ".", "specimens", ":", "spec_warnings", "=", "self", ".", "validate_items", "(", "self", ".", "specimens", ",", "'specimen'", ")", "if", "self", ".", "samples", ":", "samp_warnings", "=", "self", ".", "validate_items", "(", "self", ".", "samples", ",", "'sample'", ")", "if", "self", ".", "sites", ":", "site_warnings", "=", "self", ".", "validate_items", "(", "self", ".", "sites", ",", "'site'", ")", "if", "self", ".", "locations", ":", "loc_warnings", "=", "self", ".", "validate_items", "(", "self", ".", "locations", ",", "'location'", ")", "return", "spec_warnings", ",", "samp_warnings", ",", "site_warnings", ",", "loc_warnings" ]
Validate specimen, sample, site, and location data.
[ "Validate", "specimen", "sample", "site", "and", "location", "data", "." ]
python
train
44.333333
w1ll1am23/pyeconet
src/pyeconet/api.py
https://github.com/w1ll1am23/pyeconet/blob/05abf965f67c7445355508a38f11992d13adac4f/src/pyeconet/api.py#L102-L111
def get_locations(): """ Pull the accounts locations. """ arequest = requests.get(LOCATIONS_URL, headers=HEADERS) status_code = str(arequest.status_code) if status_code == '401': _LOGGER.error("Token expired.") return False return arequest.json()
[ "def", "get_locations", "(", ")", ":", "arequest", "=", "requests", ".", "get", "(", "LOCATIONS_URL", ",", "headers", "=", "HEADERS", ")", "status_code", "=", "str", "(", "arequest", ".", "status_code", ")", "if", "status_code", "==", "'401'", ":", "_LOGGER", ".", "error", "(", "\"Token expired.\"", ")", "return", "False", "return", "arequest", ".", "json", "(", ")" ]
Pull the accounts locations.
[ "Pull", "the", "accounts", "locations", "." ]
python
valid
31.7
openpaperwork/paperwork-backend
paperwork_backend/index.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/index.py#L475-L487
def guess_labels(self, doc): """ return a prediction of label names """ if doc.nb_pages <= 0: return set() self.label_guesser.total_nb_documents = len(self._docs_by_id.keys()) label_names = self.label_guesser.guess(doc) labels = set() for label_name in label_names: label = self.labels[label_name] labels.add(label) return labels
[ "def", "guess_labels", "(", "self", ",", "doc", ")", ":", "if", "doc", ".", "nb_pages", "<=", "0", ":", "return", "set", "(", ")", "self", ".", "label_guesser", ".", "total_nb_documents", "=", "len", "(", "self", ".", "_docs_by_id", ".", "keys", "(", ")", ")", "label_names", "=", "self", ".", "label_guesser", ".", "guess", "(", "doc", ")", "labels", "=", "set", "(", ")", "for", "label_name", "in", "label_names", ":", "label", "=", "self", ".", "labels", "[", "label_name", "]", "labels", ".", "add", "(", "label", ")", "return", "labels" ]
return a prediction of label names
[ "return", "a", "prediction", "of", "label", "names" ]
python
train
32.692308
Kronuz/pyScss
scss/compiler.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/compiler.py#L473-L479
def _at_extend(self, calculator, rule, scope, block): """ Implements @extend """ from scss.selector import Selector selectors = calculator.apply_vars(block.argument) rule.extends_selectors.extend(Selector.parse_many(selectors))
[ "def", "_at_extend", "(", "self", ",", "calculator", ",", "rule", ",", "scope", ",", "block", ")", ":", "from", "scss", ".", "selector", "import", "Selector", "selectors", "=", "calculator", ".", "apply_vars", "(", "block", ".", "argument", ")", "rule", ".", "extends_selectors", ".", "extend", "(", "Selector", ".", "parse_many", "(", "selectors", ")", ")" ]
Implements @extend
[ "Implements" ]
python
train
38.428571
ace0/pyrelic
pyrelic/prf.py
https://github.com/ace0/pyrelic/blob/f23d4e6586674675f72304d5938548267d6413bf/pyrelic/prf.py#L46-L71
def wrap(x): """ Wraps an element or integer type by serializing it and base64 encoding the resulting bytes. """ # Detect the type so we can call the proper serialization routine if isinstance(x, G1Element): return _wrap(x, serializeG1) elif isinstance(x, G2Element): return _wrap(x, serializeG2) elif isinstance(x, GtElement): return _wrap(x, serializeGt) elif isinstance(x, str): return x elif isinstance(x, (int, long, BigInt)): return hex(long(x)) # All other items else: raise NotImplementedError("Cannot unwrap {}; only types {} supported". format(type(x), [G1Element, G2Element, GtElement, int, long, BigInt]) )
[ "def", "wrap", "(", "x", ")", ":", "# Detect the type so we can call the proper serialization routine", "if", "isinstance", "(", "x", ",", "G1Element", ")", ":", "return", "_wrap", "(", "x", ",", "serializeG1", ")", "elif", "isinstance", "(", "x", ",", "G2Element", ")", ":", "return", "_wrap", "(", "x", ",", "serializeG2", ")", "elif", "isinstance", "(", "x", ",", "GtElement", ")", ":", "return", "_wrap", "(", "x", ",", "serializeGt", ")", "elif", "isinstance", "(", "x", ",", "str", ")", ":", "return", "x", "elif", "isinstance", "(", "x", ",", "(", "int", ",", "long", ",", "BigInt", ")", ")", ":", "return", "hex", "(", "long", "(", "x", ")", ")", "# All other items", "else", ":", "raise", "NotImplementedError", "(", "\"Cannot unwrap {}; only types {} supported\"", ".", "format", "(", "type", "(", "x", ")", ",", "[", "G1Element", ",", "G2Element", ",", "GtElement", ",", "int", ",", "long", ",", "BigInt", "]", ")", ")" ]
Wraps an element or integer type by serializing it and base64 encoding the resulting bytes.
[ "Wraps", "an", "element", "or", "integer", "type", "by", "serializing", "it", "and", "base64", "encoding", "the", "resulting", "bytes", "." ]
python
train
28.153846
nanoporetech/ont_fast5_api
ont_fast5_api/analysis_tools/event_detection.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/analysis_tools/event_detection.py#L16-L53
def set_event_data(self, data, read_attrs): """ Set event data with the specied attributes. :param data: Event data table. :param read_attrs: Attributes to put on the read group. This must include the read_number, which must refer to a read present in the object. The attributes should not include the standard read attributes: * read_id * start_time * duration * start_mux Those will be pulled from the read information already present in the object for the specified read. """ if self.handle.mode == 'r': raise Exception('File is not open for writing.') read_number = read_attrs['read_number'] read_group = '{}/Reads/Read_{}'.format(self.group_name, read_number) read_info = self.handle.status.read_info read_number_map = self.handle.status.read_number_map index = read_number_map.get(read_number) if index is None: raise Exception('Cannot add event detection data for a read that does not exist.') info = read_info[index] read_attrs.update({'read_id': info.read_id, 'start_time': info.start_time, 'duration': info.duration, 'start_mux': info.start_mux, 'median_before': info.median_before}) attrs = self.handle.get_analysis_attributes(read_group) if attrs is None: self.handle.add_analysis_subgroup(self.group_name, 'Reads/Read_{}'.format(read_number), attrs=read_attrs) self.handle.add_analysis_dataset(read_group, 'Events', data) else: raise Exception('Event detection data already exists for this analysis and read.')
[ "def", "set_event_data", "(", "self", ",", "data", ",", "read_attrs", ")", ":", "if", "self", ".", "handle", ".", "mode", "==", "'r'", ":", "raise", "Exception", "(", "'File is not open for writing.'", ")", "read_number", "=", "read_attrs", "[", "'read_number'", "]", "read_group", "=", "'{}/Reads/Read_{}'", ".", "format", "(", "self", ".", "group_name", ",", "read_number", ")", "read_info", "=", "self", ".", "handle", ".", "status", ".", "read_info", "read_number_map", "=", "self", ".", "handle", ".", "status", ".", "read_number_map", "index", "=", "read_number_map", ".", "get", "(", "read_number", ")", "if", "index", "is", "None", ":", "raise", "Exception", "(", "'Cannot add event detection data for a read that does not exist.'", ")", "info", "=", "read_info", "[", "index", "]", "read_attrs", ".", "update", "(", "{", "'read_id'", ":", "info", ".", "read_id", ",", "'start_time'", ":", "info", ".", "start_time", ",", "'duration'", ":", "info", ".", "duration", ",", "'start_mux'", ":", "info", ".", "start_mux", ",", "'median_before'", ":", "info", ".", "median_before", "}", ")", "attrs", "=", "self", ".", "handle", ".", "get_analysis_attributes", "(", "read_group", ")", "if", "attrs", "is", "None", ":", "self", ".", "handle", ".", "add_analysis_subgroup", "(", "self", ".", "group_name", ",", "'Reads/Read_{}'", ".", "format", "(", "read_number", ")", ",", "attrs", "=", "read_attrs", ")", "self", ".", "handle", ".", "add_analysis_dataset", "(", "read_group", ",", "'Events'", ",", "data", ")", "else", ":", "raise", "Exception", "(", "'Event detection data already exists for this analysis and read.'", ")" ]
Set event data with the specied attributes. :param data: Event data table. :param read_attrs: Attributes to put on the read group. This must include the read_number, which must refer to a read present in the object. The attributes should not include the standard read attributes: * read_id * start_time * duration * start_mux Those will be pulled from the read information already present in the object for the specified read.
[ "Set", "event", "data", "with", "the", "specied", "attributes", ".", ":", "param", "data", ":", "Event", "data", "table", ".", ":", "param", "read_attrs", ":", "Attributes", "to", "put", "on", "the", "read", "group", ".", "This", "must", "include", "the", "read_number", "which", "must", "refer", "to", "a", "read", "present", "in", "the", "object", ".", "The", "attributes", "should", "not", "include", "the", "standard", "read", "attributes", ":" ]
python
train
48.394737
deepmind/sonnet
sonnet/python/modules/conv.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L2631-L2677
def _construct_w(self, inputs): """Connects the module into the graph, with input Tensor `inputs`. Args: inputs: A 4D Tensor of shape: [batch_size, input_height, input_width, input_channels] and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: A tuple of two 4D Tensors, each with the same dtype as `inputs`: 1. w_dw, the depthwise weight matrix, of shape: [kernel_size, input_channels, channel_multiplier] 2. w_pw, the pointwise weight matrix, of shape: [1, 1, channel_multiplier * input_channels, output_channels]. """ depthwise_weight_shape = self._kernel_shape + (self._input_channels, self._channel_multiplier) pointwise_input_size = self._channel_multiplier * self._input_channels pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels) if "w_dw" not in self._initializers: fan_in_shape = depthwise_weight_shape[:2] self._initializers["w_dw"] = create_weight_initializer(fan_in_shape, dtype=inputs.dtype) if "w_pw" not in self._initializers: fan_in_shape = pointwise_weight_shape[:3] self._initializers["w_pw"] = create_weight_initializer(fan_in_shape, dtype=inputs.dtype) w_dw = tf.get_variable( "w_dw", shape=depthwise_weight_shape, dtype=inputs.dtype, initializer=self._initializers["w_dw"], partitioner=self._partitioners.get("w_dw", None), regularizer=self._regularizers.get("w_dw", None)) w_pw = tf.get_variable( "w_pw", shape=pointwise_weight_shape, dtype=inputs.dtype, initializer=self._initializers["w_pw"], partitioner=self._partitioners.get("w_pw", None), regularizer=self._regularizers.get("w_pw", None)) return w_dw, w_pw
[ "def", "_construct_w", "(", "self", ",", "inputs", ")", ":", "depthwise_weight_shape", "=", "self", ".", "_kernel_shape", "+", "(", "self", ".", "_input_channels", ",", "self", ".", "_channel_multiplier", ")", "pointwise_input_size", "=", "self", ".", "_channel_multiplier", "*", "self", ".", "_input_channels", "pointwise_weight_shape", "=", "(", "1", ",", "1", ",", "pointwise_input_size", ",", "self", ".", "_output_channels", ")", "if", "\"w_dw\"", "not", "in", "self", ".", "_initializers", ":", "fan_in_shape", "=", "depthwise_weight_shape", "[", ":", "2", "]", "self", ".", "_initializers", "[", "\"w_dw\"", "]", "=", "create_weight_initializer", "(", "fan_in_shape", ",", "dtype", "=", "inputs", ".", "dtype", ")", "if", "\"w_pw\"", "not", "in", "self", ".", "_initializers", ":", "fan_in_shape", "=", "pointwise_weight_shape", "[", ":", "3", "]", "self", ".", "_initializers", "[", "\"w_pw\"", "]", "=", "create_weight_initializer", "(", "fan_in_shape", ",", "dtype", "=", "inputs", ".", "dtype", ")", "w_dw", "=", "tf", ".", "get_variable", "(", "\"w_dw\"", ",", "shape", "=", "depthwise_weight_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", "[", "\"w_dw\"", "]", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "\"w_dw\"", ",", "None", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "\"w_dw\"", ",", "None", ")", ")", "w_pw", "=", "tf", ".", "get_variable", "(", "\"w_pw\"", ",", "shape", "=", "pointwise_weight_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", "[", "\"w_pw\"", "]", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "\"w_pw\"", ",", "None", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "\"w_pw\"", ",", "None", ")", ")", "return", "w_dw", ",", "w_pw" ]
Connects the module into the graph, with input Tensor `inputs`. Args: inputs: A 4D Tensor of shape: [batch_size, input_height, input_width, input_channels] and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: A tuple of two 4D Tensors, each with the same dtype as `inputs`: 1. w_dw, the depthwise weight matrix, of shape: [kernel_size, input_channels, channel_multiplier] 2. w_pw, the pointwise weight matrix, of shape: [1, 1, channel_multiplier * input_channels, output_channels].
[ "Connects", "the", "module", "into", "the", "graph", "with", "input", "Tensor", "inputs", "." ]
python
train
41.361702
sentinel-hub/sentinelhub-py
sentinelhub/geometry.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geometry.py#L467-L485
def _parse_geometry(geometry): """ Parses given geometry into shapely object :param geometry: :return: Shapely polygon or multipolygon :rtype: shapely.geometry.Polygon or shapely.geometry.MultiPolygon :raises TypeError """ if isinstance(geometry, str): geometry = shapely.wkt.loads(geometry) elif isinstance(geometry, dict): geometry = shapely.geometry.shape(geometry) elif not isinstance(geometry, shapely.geometry.base.BaseGeometry): raise TypeError('Unsupported geometry representation') if not isinstance(geometry, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)): raise ValueError('Supported geometry types are polygon and multipolygon, got {}'.format(type(geometry))) return geometry
[ "def", "_parse_geometry", "(", "geometry", ")", ":", "if", "isinstance", "(", "geometry", ",", "str", ")", ":", "geometry", "=", "shapely", ".", "wkt", ".", "loads", "(", "geometry", ")", "elif", "isinstance", "(", "geometry", ",", "dict", ")", ":", "geometry", "=", "shapely", ".", "geometry", ".", "shape", "(", "geometry", ")", "elif", "not", "isinstance", "(", "geometry", ",", "shapely", ".", "geometry", ".", "base", ".", "BaseGeometry", ")", ":", "raise", "TypeError", "(", "'Unsupported geometry representation'", ")", "if", "not", "isinstance", "(", "geometry", ",", "(", "shapely", ".", "geometry", ".", "Polygon", ",", "shapely", ".", "geometry", ".", "MultiPolygon", ")", ")", ":", "raise", "ValueError", "(", "'Supported geometry types are polygon and multipolygon, got {}'", ".", "format", "(", "type", "(", "geometry", ")", ")", ")", "return", "geometry" ]
Parses given geometry into shapely object :param geometry: :return: Shapely polygon or multipolygon :rtype: shapely.geometry.Polygon or shapely.geometry.MultiPolygon :raises TypeError
[ "Parses", "given", "geometry", "into", "shapely", "object" ]
python
train
43.157895
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L1820-L1846
def _add_arg_python(self, key, value=None, mask=False): """Add CLI Arg formatted specifically for Python. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob). mask (boolean, default:False): Indicates whether no mask value. """ self._data[key] = value if not value: # both false boolean values (flags) and empty values should not be added. pass elif value is True: # true boolean values are flags and should not contain a value self._args.append('--{}'.format(key)) self._args_quoted.append('--{}'.format(key)) self._args_masked.append('--{}'.format(key)) else: self._args.append('--{}={}'.format(key, value)) if mask: # mask sensitive values value = 'x' * len(str(value)) else: # quote all values that would get displayed value = self.quote(value) self._args_quoted.append('--{}={}'.format(key, value)) self._args_masked.append('--{}={}'.format(key, value))
[ "def", "_add_arg_python", "(", "self", ",", "key", ",", "value", "=", "None", ",", "mask", "=", "False", ")", ":", "self", ".", "_data", "[", "key", "]", "=", "value", "if", "not", "value", ":", "# both false boolean values (flags) and empty values should not be added.", "pass", "elif", "value", "is", "True", ":", "# true boolean values are flags and should not contain a value", "self", ".", "_args", ".", "append", "(", "'--{}'", ".", "format", "(", "key", ")", ")", "self", ".", "_args_quoted", ".", "append", "(", "'--{}'", ".", "format", "(", "key", ")", ")", "self", ".", "_args_masked", ".", "append", "(", "'--{}'", ".", "format", "(", "key", ")", ")", "else", ":", "self", ".", "_args", ".", "append", "(", "'--{}={}'", ".", "format", "(", "key", ",", "value", ")", ")", "if", "mask", ":", "# mask sensitive values", "value", "=", "'x'", "*", "len", "(", "str", "(", "value", ")", ")", "else", ":", "# quote all values that would get displayed", "value", "=", "self", ".", "quote", "(", "value", ")", "self", ".", "_args_quoted", ".", "append", "(", "'--{}={}'", ".", "format", "(", "key", ",", "value", ")", ")", "self", ".", "_args_masked", ".", "append", "(", "'--{}={}'", ".", "format", "(", "key", ",", "value", ")", ")" ]
Add CLI Arg formatted specifically for Python. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob). mask (boolean, default:False): Indicates whether no mask value.
[ "Add", "CLI", "Arg", "formatted", "specifically", "for", "Python", "." ]
python
train
43.259259
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L1036-L1051
def _create_page_control(self): """ Creates and connects the underlying paging widget. """ if self.custom_page_control: control = self.custom_page_control() elif self.kind == 'plain': control = QtGui.QPlainTextEdit() elif self.kind == 'rich': control = QtGui.QTextEdit() control.installEventFilter(self) viewport = control.viewport() viewport.installEventFilter(self) control.setReadOnly(True) control.setUndoRedoEnabled(False) control.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn) return control
[ "def", "_create_page_control", "(", "self", ")", ":", "if", "self", ".", "custom_page_control", ":", "control", "=", "self", ".", "custom_page_control", "(", ")", "elif", "self", ".", "kind", "==", "'plain'", ":", "control", "=", "QtGui", ".", "QPlainTextEdit", "(", ")", "elif", "self", ".", "kind", "==", "'rich'", ":", "control", "=", "QtGui", ".", "QTextEdit", "(", ")", "control", ".", "installEventFilter", "(", "self", ")", "viewport", "=", "control", ".", "viewport", "(", ")", "viewport", ".", "installEventFilter", "(", "self", ")", "control", ".", "setReadOnly", "(", "True", ")", "control", ".", "setUndoRedoEnabled", "(", "False", ")", "control", ".", "setVerticalScrollBarPolicy", "(", "QtCore", ".", "Qt", ".", "ScrollBarAlwaysOn", ")", "return", "control" ]
Creates and connects the underlying paging widget.
[ "Creates", "and", "connects", "the", "underlying", "paging", "widget", "." ]
python
test
38.9375
bohea/sanic-limiter
sanic_limiter/extension.py
https://github.com/bohea/sanic-limiter/blob/54c9fc4a3a3f1a9bb69367262637d07701ae5694/sanic_limiter/extension.py#L297-L316
def limit(self, limit_value, key_func=None, per_method=False, methods=None, error_message=None, exempt_when=None): """ decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return: """ return self.__limit_decorator(limit_value, key_func, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when)
[ "def", "limit", "(", "self", ",", "limit_value", ",", "key_func", "=", "None", ",", "per_method", "=", "False", ",", "methods", "=", "None", ",", "error_message", "=", "None", ",", "exempt_when", "=", "None", ")", ":", "return", "self", ".", "__limit_decorator", "(", "limit_value", ",", "key_func", ",", "per_method", "=", "per_method", ",", "methods", "=", "methods", ",", "error_message", "=", "error_message", ",", "exempt_when", "=", "exempt_when", ")" ]
decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return:
[ "decorator", "to", "be", "used", "for", "rate", "limiting", "individual", "routes", "." ]
python
train
55.05
crdoconnor/strictyaml
strictyaml/utils.py
https://github.com/crdoconnor/strictyaml/blob/efdac7f89e81679fc95686288cd32b9563fde609/strictyaml/utils.py#L105-L145
def ruamel_structure(data, validator=None): """ Take dicts and lists and return a ruamel.yaml style structure of CommentedMaps, CommentedSeqs and data. If a validator is presented and the type is unknown, it is checked against the validator to see if it will turn it back in to YAML. """ if isinstance(data, dict): if len(data) == 0: raise exceptions.CannotBuildDocumentsFromEmptyDictOrList( "Document must be built with non-empty dicts and lists" ) return CommentedMap( [ (ruamel_structure(key), ruamel_structure(value)) for key, value in data.items() ] ) elif isinstance(data, list): if len(data) == 0: raise exceptions.CannotBuildDocumentsFromEmptyDictOrList( "Document must be built with non-empty dicts and lists" ) return CommentedSeq([ruamel_structure(item) for item in data]) elif isinstance(data, bool): return u"yes" if data else u"no" elif isinstance(data, (int, float)): return str(data) else: if not is_string(data): raise exceptions.CannotBuildDocumentFromInvalidData( ( "Document must be built from a combination of:\n" "string, int, float, bool or nonempty list/dict\n\n" "Instead, found variable with type '{}': '{}'" ).format(type(data).__name__, data) ) return data
[ "def", "ruamel_structure", "(", "data", ",", "validator", "=", "None", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "if", "len", "(", "data", ")", "==", "0", ":", "raise", "exceptions", ".", "CannotBuildDocumentsFromEmptyDictOrList", "(", "\"Document must be built with non-empty dicts and lists\"", ")", "return", "CommentedMap", "(", "[", "(", "ruamel_structure", "(", "key", ")", ",", "ruamel_structure", "(", "value", ")", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", "]", ")", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "if", "len", "(", "data", ")", "==", "0", ":", "raise", "exceptions", ".", "CannotBuildDocumentsFromEmptyDictOrList", "(", "\"Document must be built with non-empty dicts and lists\"", ")", "return", "CommentedSeq", "(", "[", "ruamel_structure", "(", "item", ")", "for", "item", "in", "data", "]", ")", "elif", "isinstance", "(", "data", ",", "bool", ")", ":", "return", "u\"yes\"", "if", "data", "else", "u\"no\"", "elif", "isinstance", "(", "data", ",", "(", "int", ",", "float", ")", ")", ":", "return", "str", "(", "data", ")", "else", ":", "if", "not", "is_string", "(", "data", ")", ":", "raise", "exceptions", ".", "CannotBuildDocumentFromInvalidData", "(", "(", "\"Document must be built from a combination of:\\n\"", "\"string, int, float, bool or nonempty list/dict\\n\\n\"", "\"Instead, found variable with type '{}': '{}'\"", ")", ".", "format", "(", "type", "(", "data", ")", ".", "__name__", ",", "data", ")", ")", "return", "data" ]
Take dicts and lists and return a ruamel.yaml style structure of CommentedMaps, CommentedSeqs and data. If a validator is presented and the type is unknown, it is checked against the validator to see if it will turn it back in to YAML.
[ "Take", "dicts", "and", "lists", "and", "return", "a", "ruamel", ".", "yaml", "style", "structure", "of", "CommentedMaps", "CommentedSeqs", "and", "data", "." ]
python
train
37.097561
DLR-RM/RAFCON
source/rafcon/gui/controllers/graphical_editor_gaphas.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/graphical_editor_gaphas.py#L231-L237
def _copy_selection(self, *event): """Copies the current selection to the clipboard. """ if react_to_event(self.view, self.view.editor, event): logger.debug("copy selection") global_clipboard.copy(self.model.selection) return True
[ "def", "_copy_selection", "(", "self", ",", "*", "event", ")", ":", "if", "react_to_event", "(", "self", ".", "view", ",", "self", ".", "view", ".", "editor", ",", "event", ")", ":", "logger", ".", "debug", "(", "\"copy selection\"", ")", "global_clipboard", ".", "copy", "(", "self", ".", "model", ".", "selection", ")", "return", "True" ]
Copies the current selection to the clipboard.
[ "Copies", "the", "current", "selection", "to", "the", "clipboard", "." ]
python
train
40.571429
tanghaibao/jcvi
jcvi/formats/genbank.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/genbank.py#L429-L478
def getquals(args): """ %prog getquals [--options] gbkfile > qualsfile Read GenBank file and extract all qualifiers per feature type into a tab-delimited file """ p = OptionParser(getquals.__doc__) p.add_option("--types", default="gene,mRNA,CDS", type="str", dest="quals_ftypes", help="Feature types from which to extract qualifiers") p.add_option("--ignore", default="locus_tag,product,codon_start,translation", type="str", dest="quals_ignore", help="Qualifiers to exclude from parsing") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gbkfile, = args quals_ftypes = opts.quals_ftypes.split(",") quals_ignore = opts.quals_ignore.split(",") locus = dict() locus_tag = None for rec in SeqIO.parse(gbkfile, "gb"): for f in rec.features: if f.type in quals_ftypes: locus_tag = f.qualifiers[LT][0] if locus_tag not in locus: locus[locus_tag] = dict() for ftype in quals_ftypes: if ftype not in locus[locus_tag]: locus[locus_tag][ftype] = [] if ftype == "CDS": # store the CDS protein_id locus[locus_tag]["protein_id"] = [] quals = [] for qual in f.qualifiers: if qual in quals_ignore: continue for qval in f.qualifiers[qual]: quals.append((locus_tag, qual, qval)) if qual == "protein_id": locus[locus_tag]["protein_id"].append(qval) if len(quals) > 0: locus[locus_tag][f.type].append(quals) for locus_tag in locus: print_locus_quals(locus_tag, locus, quals_ftypes)
[ "def", "getquals", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "getquals", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--types\"", ",", "default", "=", "\"gene,mRNA,CDS\"", ",", "type", "=", "\"str\"", ",", "dest", "=", "\"quals_ftypes\"", ",", "help", "=", "\"Feature types from which to extract qualifiers\"", ")", "p", ".", "add_option", "(", "\"--ignore\"", ",", "default", "=", "\"locus_tag,product,codon_start,translation\"", ",", "type", "=", "\"str\"", ",", "dest", "=", "\"quals_ignore\"", ",", "help", "=", "\"Qualifiers to exclude from parsing\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "gbkfile", ",", "=", "args", "quals_ftypes", "=", "opts", ".", "quals_ftypes", ".", "split", "(", "\",\"", ")", "quals_ignore", "=", "opts", ".", "quals_ignore", ".", "split", "(", "\",\"", ")", "locus", "=", "dict", "(", ")", "locus_tag", "=", "None", "for", "rec", "in", "SeqIO", ".", "parse", "(", "gbkfile", ",", "\"gb\"", ")", ":", "for", "f", "in", "rec", ".", "features", ":", "if", "f", ".", "type", "in", "quals_ftypes", ":", "locus_tag", "=", "f", ".", "qualifiers", "[", "LT", "]", "[", "0", "]", "if", "locus_tag", "not", "in", "locus", ":", "locus", "[", "locus_tag", "]", "=", "dict", "(", ")", "for", "ftype", "in", "quals_ftypes", ":", "if", "ftype", "not", "in", "locus", "[", "locus_tag", "]", ":", "locus", "[", "locus_tag", "]", "[", "ftype", "]", "=", "[", "]", "if", "ftype", "==", "\"CDS\"", ":", "# store the CDS protein_id", "locus", "[", "locus_tag", "]", "[", "\"protein_id\"", "]", "=", "[", "]", "quals", "=", "[", "]", "for", "qual", "in", "f", ".", "qualifiers", ":", "if", "qual", "in", "quals_ignore", ":", "continue", "for", "qval", "in", "f", ".", "qualifiers", "[", "qual", "]", ":", "quals", ".", "append", "(", "(", "locus_tag", ",", "qual", ",", "qval", ")", ")", "if", "qual", "==", "\"protein_id\"", ":", "locus", "[", "locus_tag", "]", "[", "\"protein_id\"", "]", ".", "append", "(", "qval", ")", "if", "len", "(", "quals", ")", ">", "0", ":", "locus", "[", "locus_tag", "]", "[", "f", ".", "type", "]", ".", "append", "(", "quals", ")", "for", "locus_tag", "in", "locus", ":", "print_locus_quals", "(", "locus_tag", ",", "locus", ",", "quals_ftypes", ")" ]
%prog getquals [--options] gbkfile > qualsfile Read GenBank file and extract all qualifiers per feature type into a tab-delimited file
[ "%prog", "getquals", "[", "--", "options", "]", "gbkfile", ">", "qualsfile" ]
python
train
38.04
google/grr
grr/core/grr_response_core/lib/config_lib.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/config_lib.py#L118-L136
def SetPlatformArchContext(): """Add the running contexts to the config system.""" # Initialize the running platform context: _CONFIG.AddContext("Platform:%s" % platform.system().title()) machine = platform.uname()[4] if machine in ["x86_64", "AMD64", "i686"]: # 32 bit binaries running on AMD64 will still have a i386 arch. if platform.architecture()[0] == "32bit": arch = "i386" else: arch = "amd64" elif machine == "x86": arch = "i386" else: arch = machine _CONFIG.AddContext("Arch:%s" % arch)
[ "def", "SetPlatformArchContext", "(", ")", ":", "# Initialize the running platform context:", "_CONFIG", ".", "AddContext", "(", "\"Platform:%s\"", "%", "platform", ".", "system", "(", ")", ".", "title", "(", ")", ")", "machine", "=", "platform", ".", "uname", "(", ")", "[", "4", "]", "if", "machine", "in", "[", "\"x86_64\"", ",", "\"AMD64\"", ",", "\"i686\"", "]", ":", "# 32 bit binaries running on AMD64 will still have a i386 arch.", "if", "platform", ".", "architecture", "(", ")", "[", "0", "]", "==", "\"32bit\"", ":", "arch", "=", "\"i386\"", "else", ":", "arch", "=", "\"amd64\"", "elif", "machine", "==", "\"x86\"", ":", "arch", "=", "\"i386\"", "else", ":", "arch", "=", "machine", "_CONFIG", ".", "AddContext", "(", "\"Arch:%s\"", "%", "arch", ")" ]
Add the running contexts to the config system.
[ "Add", "the", "running", "contexts", "to", "the", "config", "system", "." ]
python
train
27.842105
titusjan/argos
argos/qt/treeitems.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/qt/treeitems.py#L273-L281
def logBranch(self, indent=0, level=logging.DEBUG): """ Logs the item and all descendants, one line per child """ if 0: print(indent * " " + str(self)) else: logger.log(level, indent * " " + str(self)) for childItems in self.childItems: childItems.logBranch(indent + 1, level=level)
[ "def", "logBranch", "(", "self", ",", "indent", "=", "0", ",", "level", "=", "logging", ".", "DEBUG", ")", ":", "if", "0", ":", "print", "(", "indent", "*", "\" \"", "+", "str", "(", "self", ")", ")", "else", ":", "logger", ".", "log", "(", "level", ",", "indent", "*", "\" \"", "+", "str", "(", "self", ")", ")", "for", "childItems", "in", "self", ".", "childItems", ":", "childItems", ".", "logBranch", "(", "indent", "+", "1", ",", "level", "=", "level", ")" ]
Logs the item and all descendants, one line per child
[ "Logs", "the", "item", "and", "all", "descendants", "one", "line", "per", "child" ]
python
train
39.555556
KarchinLab/probabilistic2020
prob2020/python/utils.py
https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/utils.py#L174-L191
def rev_comp(seq): """Get reverse complement of sequence. rev_comp will maintain the case of the sequence. Parameters ---------- seq : str nucleotide sequence. valid {a, c, t, g, n} Returns ------- rev_comp_seq : str reverse complement of sequence """ rev_seq = seq[::-1] rev_comp_seq = ''.join([base_pairing[s] for s in rev_seq]) return rev_comp_seq
[ "def", "rev_comp", "(", "seq", ")", ":", "rev_seq", "=", "seq", "[", ":", ":", "-", "1", "]", "rev_comp_seq", "=", "''", ".", "join", "(", "[", "base_pairing", "[", "s", "]", "for", "s", "in", "rev_seq", "]", ")", "return", "rev_comp_seq" ]
Get reverse complement of sequence. rev_comp will maintain the case of the sequence. Parameters ---------- seq : str nucleotide sequence. valid {a, c, t, g, n} Returns ------- rev_comp_seq : str reverse complement of sequence
[ "Get", "reverse", "complement", "of", "sequence", "." ]
python
train
22.222222
pypa/setuptools
setuptools/package_index.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/package_index.py#L1108-L1136
def local_open(url): """Read a local path, with special support for directories""" scheme, server, path, param, query, frag = urllib.parse.urlparse(url) filename = urllib.request.url2pathname(path) if os.path.isfile(filename): return urllib.request.urlopen(url) elif path.endswith('/') and os.path.isdir(filename): files = [] for f in os.listdir(filename): filepath = os.path.join(filename, f) if f == 'index.html': with open(filepath, 'r') as fp: body = fp.read() break elif os.path.isdir(filepath): f += '/' files.append('<a href="{name}">{name}</a>'.format(name=f)) else: tmpl = ( "<html><head><title>{url}</title>" "</head><body>{files}</body></html>") body = tmpl.format(url=url, files='\n'.join(files)) status, message = 200, "OK" else: status, message, body = 404, "Path not found", "Not found" headers = {'content-type': 'text/html'} body_stream = six.StringIO(body) return urllib.error.HTTPError(url, status, message, headers, body_stream)
[ "def", "local_open", "(", "url", ")", ":", "scheme", ",", "server", ",", "path", ",", "param", ",", "query", ",", "frag", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "filename", "=", "urllib", ".", "request", ".", "url2pathname", "(", "path", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", "elif", "path", ".", "endswith", "(", "'/'", ")", "and", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "files", "=", "[", "]", "for", "f", "in", "os", ".", "listdir", "(", "filename", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "filename", ",", "f", ")", "if", "f", "==", "'index.html'", ":", "with", "open", "(", "filepath", ",", "'r'", ")", "as", "fp", ":", "body", "=", "fp", ".", "read", "(", ")", "break", "elif", "os", ".", "path", ".", "isdir", "(", "filepath", ")", ":", "f", "+=", "'/'", "files", ".", "append", "(", "'<a href=\"{name}\">{name}</a>'", ".", "format", "(", "name", "=", "f", ")", ")", "else", ":", "tmpl", "=", "(", "\"<html><head><title>{url}</title>\"", "\"</head><body>{files}</body></html>\"", ")", "body", "=", "tmpl", ".", "format", "(", "url", "=", "url", ",", "files", "=", "'\\n'", ".", "join", "(", "files", ")", ")", "status", ",", "message", "=", "200", ",", "\"OK\"", "else", ":", "status", ",", "message", ",", "body", "=", "404", ",", "\"Path not found\"", ",", "\"Not found\"", "headers", "=", "{", "'content-type'", ":", "'text/html'", "}", "body_stream", "=", "six", ".", "StringIO", "(", "body", ")", "return", "urllib", ".", "error", ".", "HTTPError", "(", "url", ",", "status", ",", "message", ",", "headers", ",", "body_stream", ")" ]
Read a local path, with special support for directories
[ "Read", "a", "local", "path", "with", "special", "support", "for", "directories" ]
python
train
40.586207
portantier/habu
habu/cli/cmd_tcpscan.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_tcpscan.py#L23-L85
def cmd_tcpscan(ip, port, iface, flags, sleeptime, timeout, show_all, verbose): """TCP Port Scanner. Print the ports that generated a response with the SYN flag or (if show use -a) all the ports that generated a response. It's really basic compared with nmap, but who is comparing? Example: \b # habu.tcpscan -p 22,23,80,443 -s 1 45.77.113.133 22 S -> SA 80 S -> SA 443 S -> SA """ if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') conf.verb = False if iface: conf.iface = iface port_regex = r'^[0-9,-]+$' if not re.match(port_regex, port): logging.critical("Invalid port specification") return False ports = [] for p in str(port).split(','): if '-' in p: first, last = p.split('-') for n in range(int(first), int(last)+1): ports.append(n) else: ports.append(int(p)) out = "{port} {sflags} -> {rflags}" pkts = IP(dst=ip)/TCP(flags=flags, dport=ports) if sleeptime: res = [] for pkt in pkts: logging.info(pkt.summary()) _ = sr1(pkt) if _: logging.info(_.summary()) res.append((pkt, _)) else: res, unans = sr(pkts, verbose=verbose) for s,r in res: if show_all or 'S' in r.sprintf(r"%TCP.flags%"): print(out.format( port=s[TCP].dport, sflags=s.sprintf(r"%TCP.flags%"), rflags=r.sprintf(r"%TCP.flags%") ))
[ "def", "cmd_tcpscan", "(", "ip", ",", "port", ",", "iface", ",", "flags", ",", "sleeptime", ",", "timeout", ",", "show_all", ",", "verbose", ")", ":", "if", "verbose", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "'%(message)s'", ")", "conf", ".", "verb", "=", "False", "if", "iface", ":", "conf", ".", "iface", "=", "iface", "port_regex", "=", "r'^[0-9,-]+$'", "if", "not", "re", ".", "match", "(", "port_regex", ",", "port", ")", ":", "logging", ".", "critical", "(", "\"Invalid port specification\"", ")", "return", "False", "ports", "=", "[", "]", "for", "p", "in", "str", "(", "port", ")", ".", "split", "(", "','", ")", ":", "if", "'-'", "in", "p", ":", "first", ",", "last", "=", "p", ".", "split", "(", "'-'", ")", "for", "n", "in", "range", "(", "int", "(", "first", ")", ",", "int", "(", "last", ")", "+", "1", ")", ":", "ports", ".", "append", "(", "n", ")", "else", ":", "ports", ".", "append", "(", "int", "(", "p", ")", ")", "out", "=", "\"{port} {sflags} -> {rflags}\"", "pkts", "=", "IP", "(", "dst", "=", "ip", ")", "/", "TCP", "(", "flags", "=", "flags", ",", "dport", "=", "ports", ")", "if", "sleeptime", ":", "res", "=", "[", "]", "for", "pkt", "in", "pkts", ":", "logging", ".", "info", "(", "pkt", ".", "summary", "(", ")", ")", "_", "=", "sr1", "(", "pkt", ")", "if", "_", ":", "logging", ".", "info", "(", "_", ".", "summary", "(", ")", ")", "res", ".", "append", "(", "(", "pkt", ",", "_", ")", ")", "else", ":", "res", ",", "unans", "=", "sr", "(", "pkts", ",", "verbose", "=", "verbose", ")", "for", "s", ",", "r", "in", "res", ":", "if", "show_all", "or", "'S'", "in", "r", ".", "sprintf", "(", "r\"%TCP.flags%\"", ")", ":", "print", "(", "out", ".", "format", "(", "port", "=", "s", "[", "TCP", "]", ".", "dport", ",", "sflags", "=", "s", ".", "sprintf", "(", "r\"%TCP.flags%\"", ")", ",", "rflags", "=", "r", ".", "sprintf", "(", "r\"%TCP.flags%\"", ")", ")", ")" ]
TCP Port Scanner. Print the ports that generated a response with the SYN flag or (if show use -a) all the ports that generated a response. It's really basic compared with nmap, but who is comparing? Example: \b # habu.tcpscan -p 22,23,80,443 -s 1 45.77.113.133 22 S -> SA 80 S -> SA 443 S -> SA
[ "TCP", "Port", "Scanner", "." ]
python
train
24.47619
hendrix/hendrix
hendrix/options.py
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/options.py#L195-L201
def options(argv=[]): """ A helper function that returns a dictionary of the default key-values pairs """ parser = HendrixOptionParser parsed_args = parser.parse_args(argv) return vars(parsed_args[0])
[ "def", "options", "(", "argv", "=", "[", "]", ")", ":", "parser", "=", "HendrixOptionParser", "parsed_args", "=", "parser", ".", "parse_args", "(", "argv", ")", "return", "vars", "(", "parsed_args", "[", "0", "]", ")" ]
A helper function that returns a dictionary of the default key-values pairs
[ "A", "helper", "function", "that", "returns", "a", "dictionary", "of", "the", "default", "key", "-", "values", "pairs" ]
python
train
31.142857
AnthonyBloomer/daftlistings
daftlistings/listing.py
https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/listing.py#L627-L649
def ber_code(self): """ This method gets ber code listed in Daft. :return: """ try: alt_text = self._ad_page_content.find( 'span', {'class': 'ber-hover'} ).find('img')['alt'] if ('exempt' in alt_text): return 'exempt' else: alt_arr = alt_text.split() if 'ber' in alt_arr[0].lower(): return alt_arr[1].lower() else: return None except Exception as e: if self._debug: logging.error( "Error getting the Ber Code. Error message: " + e.args[0]) return None
[ "def", "ber_code", "(", "self", ")", ":", "try", ":", "alt_text", "=", "self", ".", "_ad_page_content", ".", "find", "(", "'span'", ",", "{", "'class'", ":", "'ber-hover'", "}", ")", ".", "find", "(", "'img'", ")", "[", "'alt'", "]", "if", "(", "'exempt'", "in", "alt_text", ")", ":", "return", "'exempt'", "else", ":", "alt_arr", "=", "alt_text", ".", "split", "(", ")", "if", "'ber'", "in", "alt_arr", "[", "0", "]", ".", "lower", "(", ")", ":", "return", "alt_arr", "[", "1", "]", ".", "lower", "(", ")", "else", ":", "return", "None", "except", "Exception", "as", "e", ":", "if", "self", ".", "_debug", ":", "logging", ".", "error", "(", "\"Error getting the Ber Code. Error message: \"", "+", "e", ".", "args", "[", "0", "]", ")", "return", "None" ]
This method gets ber code listed in Daft. :return:
[ "This", "method", "gets", "ber", "code", "listed", "in", "Daft", ".", ":", "return", ":" ]
python
train
30.695652
liminspace/dju-image
dju_image/tools.py
https://github.com/liminspace/dju-image/blob/b06eb3be2069cd6cb52cf1e26c2c761883142d4e/dju_image/tools.py#L156-L171
def is_img_id_valid(img_id): """ Checks if img_id is valid. """ t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE) t = re.sub(r'\.+', '.', t) if img_id != t or img_id.count(':') != 1: return False profile, base_name = img_id.split(':', 1) if not profile or not base_name: return False try: get_profile_configs(profile) except ValueError: return False return True
[ "def", "is_img_id_valid", "(", "img_id", ")", ":", "t", "=", "re", ".", "sub", "(", "r'[^a-z0-9_:\\-\\.]'", ",", "''", ",", "img_id", ",", "re", ".", "IGNORECASE", ")", "t", "=", "re", ".", "sub", "(", "r'\\.+'", ",", "'.'", ",", "t", ")", "if", "img_id", "!=", "t", "or", "img_id", ".", "count", "(", "':'", ")", "!=", "1", ":", "return", "False", "profile", ",", "base_name", "=", "img_id", ".", "split", "(", "':'", ",", "1", ")", "if", "not", "profile", "or", "not", "base_name", ":", "return", "False", "try", ":", "get_profile_configs", "(", "profile", ")", "except", "ValueError", ":", "return", "False", "return", "True" ]
Checks if img_id is valid.
[ "Checks", "if", "img_id", "is", "valid", "." ]
python
train
26.875
thunder-project/thunder
thunder/series/series.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L459-L475
def mean_by_panel(self, length): """ Compute the mean across fixed sized panels of each record. Splits each record into panels of size `length`, and then computes the mean across panels. Panel length must subdivide record exactly. Parameters ---------- length : int Fixed length with which to subdivide. """ self._check_panel(length) func = lambda v: v.reshape(-1, length).mean(axis=0) newindex = arange(length) return self.map(func, index=newindex)
[ "def", "mean_by_panel", "(", "self", ",", "length", ")", ":", "self", ".", "_check_panel", "(", "length", ")", "func", "=", "lambda", "v", ":", "v", ".", "reshape", "(", "-", "1", ",", "length", ")", ".", "mean", "(", "axis", "=", "0", ")", "newindex", "=", "arange", "(", "length", ")", "return", "self", ".", "map", "(", "func", ",", "index", "=", "newindex", ")" ]
Compute the mean across fixed sized panels of each record. Splits each record into panels of size `length`, and then computes the mean across panels. Panel length must subdivide record exactly. Parameters ---------- length : int Fixed length with which to subdivide.
[ "Compute", "the", "mean", "across", "fixed", "sized", "panels", "of", "each", "record", "." ]
python
train
32.411765
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L373-L380
def check_str(obj): """ Returns a string for various input types """ if isinstance(obj, str): return obj if isinstance(obj, float): return str(int(obj)) else: return str(obj)
[ "def", "check_str", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "obj", "if", "isinstance", "(", "obj", ",", "float", ")", ":", "return", "str", "(", "int", "(", "obj", ")", ")", "else", ":", "return", "str", "(", "obj", ")" ]
Returns a string for various input types
[ "Returns", "a", "string", "for", "various", "input", "types" ]
python
train
29.375
uber-common/opentracing-python-instrumentation
opentracing_instrumentation/http_server.py
https://github.com/uber-common/opentracing-python-instrumentation/blob/57b29fb9f647e073cde8c75155f4708cb5661d20/opentracing_instrumentation/http_server.py#L174-L191
def _parse_wsgi_headers(wsgi_environ): """ HTTP headers are presented in WSGI environment with 'HTTP_' prefix. This method finds those headers, removes the prefix, converts underscores to dashes, and converts to lower case. :param wsgi_environ: :return: returns a dictionary of headers """ prefix = 'HTTP_' p_len = len(prefix) # use .items() despite suspected memory pressure bc GC occasionally # collects wsgi_environ.iteritems() during iteration. headers = { key[p_len:].replace('_', '-').lower(): val for (key, val) in wsgi_environ.items() if key.startswith(prefix)} return headers
[ "def", "_parse_wsgi_headers", "(", "wsgi_environ", ")", ":", "prefix", "=", "'HTTP_'", "p_len", "=", "len", "(", "prefix", ")", "# use .items() despite suspected memory pressure bc GC occasionally", "# collects wsgi_environ.iteritems() during iteration.", "headers", "=", "{", "key", "[", "p_len", ":", "]", ".", "replace", "(", "'_'", ",", "'-'", ")", ".", "lower", "(", ")", ":", "val", "for", "(", "key", ",", "val", ")", "in", "wsgi_environ", ".", "items", "(", ")", "if", "key", ".", "startswith", "(", "prefix", ")", "}", "return", "headers" ]
HTTP headers are presented in WSGI environment with 'HTTP_' prefix. This method finds those headers, removes the prefix, converts underscores to dashes, and converts to lower case. :param wsgi_environ: :return: returns a dictionary of headers
[ "HTTP", "headers", "are", "presented", "in", "WSGI", "environment", "with", "HTTP_", "prefix", ".", "This", "method", "finds", "those", "headers", "removes", "the", "prefix", "converts", "underscores", "to", "dashes", "and", "converts", "to", "lower", "case", "." ]
python
train
39.555556
python-rope/rope
rope/base/pycore.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/pycore.py#L336-L341
def consume_changes(self, start, end): """Clear the changed status of lines from start till end""" left, right = self._get_changed(start, end) if left < right: del self.lines[left:right] return left < right
[ "def", "consume_changes", "(", "self", ",", "start", ",", "end", ")", ":", "left", ",", "right", "=", "self", ".", "_get_changed", "(", "start", ",", "end", ")", "if", "left", "<", "right", ":", "del", "self", ".", "lines", "[", "left", ":", "right", "]", "return", "left", "<", "right" ]
Clear the changed status of lines from start till end
[ "Clear", "the", "changed", "status", "of", "lines", "from", "start", "till", "end" ]
python
train
40.833333
johnnoone/aioconsul
aioconsul/client/checks_endpoint.py
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/checks_endpoint.py#L125-L137
async def deregister(self, check): """Deregisters a local check Parameters: check (ObjectID): Check ID Returns: bool: ``True`` on success The agent will take care of deregistering the check from the Catalog. """ check_id = extract_attr(check, keys=["CheckID", "ID"]) response = await self._api.get("/v1/agent/check/deregister", check_id) return response.status == 200
[ "async", "def", "deregister", "(", "self", ",", "check", ")", ":", "check_id", "=", "extract_attr", "(", "check", ",", "keys", "=", "[", "\"CheckID\"", ",", "\"ID\"", "]", ")", "response", "=", "await", "self", ".", "_api", ".", "get", "(", "\"/v1/agent/check/deregister\"", ",", "check_id", ")", "return", "response", ".", "status", "==", "200" ]
Deregisters a local check Parameters: check (ObjectID): Check ID Returns: bool: ``True`` on success The agent will take care of deregistering the check from the Catalog.
[ "Deregisters", "a", "local", "check" ]
python
train
34.230769
bitshares/uptick
uptick/cli.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/cli.py#L40-L45
def set(ctx, key, value): """ Set configuration parameters """ if key == "default_account" and value[0] == "@": value = value[1:] ctx.bitshares.config[key] = value
[ "def", "set", "(", "ctx", ",", "key", ",", "value", ")", ":", "if", "key", "==", "\"default_account\"", "and", "value", "[", "0", "]", "==", "\"@\"", ":", "value", "=", "value", "[", "1", ":", "]", "ctx", ".", "bitshares", ".", "config", "[", "key", "]", "=", "value" ]
Set configuration parameters
[ "Set", "configuration", "parameters" ]
python
train
30.333333
AnthonyBloomer/daftlistings
daftlistings/listing.py
https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/listing.py#L104-L118
def price_change(self): """ This method returns any price change. :return: """ try: if self._data_from_search: return self._data_from_search.find('div', {'class': 'price-changes-sr'}).text else: return self._ad_page_content.find('div', {'class': 'price-changes-sr'}).text except Exception as e: if self._debug: logging.error( "Error getting price_change. Error message: " + e.args[0]) return
[ "def", "price_change", "(", "self", ")", ":", "try", ":", "if", "self", ".", "_data_from_search", ":", "return", "self", ".", "_data_from_search", ".", "find", "(", "'div'", ",", "{", "'class'", ":", "'price-changes-sr'", "}", ")", ".", "text", "else", ":", "return", "self", ".", "_ad_page_content", ".", "find", "(", "'div'", ",", "{", "'class'", ":", "'price-changes-sr'", "}", ")", ".", "text", "except", "Exception", "as", "e", ":", "if", "self", ".", "_debug", ":", "logging", ".", "error", "(", "\"Error getting price_change. Error message: \"", "+", "e", ".", "args", "[", "0", "]", ")", "return" ]
This method returns any price change. :return:
[ "This", "method", "returns", "any", "price", "change", ".", ":", "return", ":" ]
python
train
36.066667
Esri/ArcREST
src/arcrest/ags/_gpobjects.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_gpobjects.py#L607-L619
def fromJSON(value): """loads the GP object from a JSON string """ j = json.loads(value) v = GPString() if "defaultValue" in j: v.value = j['defaultValue'] else: v.value = j['value'] if 'paramName' in j: v.paramName = j['paramName'] elif 'name' in j: v.paramName = j['name'] return v
[ "def", "fromJSON", "(", "value", ")", ":", "j", "=", "json", ".", "loads", "(", "value", ")", "v", "=", "GPString", "(", ")", "if", "\"defaultValue\"", "in", "j", ":", "v", ".", "value", "=", "j", "[", "'defaultValue'", "]", "else", ":", "v", ".", "value", "=", "j", "[", "'value'", "]", "if", "'paramName'", "in", "j", ":", "v", ".", "paramName", "=", "j", "[", "'paramName'", "]", "elif", "'name'", "in", "j", ":", "v", ".", "paramName", "=", "j", "[", "'name'", "]", "return", "v" ]
loads the GP object from a JSON string
[ "loads", "the", "GP", "object", "from", "a", "JSON", "string" ]
python
train
29.461538
atl/py-smartdc
smartdc/datacenter.py
https://github.com/atl/py-smartdc/blob/cc5cd5910e19004cc46e376ce035affe28fc798e/smartdc/datacenter.py#L999-L1017
def delete_image(self, identifier): """ :: DELETE /:login/images/:id :param identifier: match on the listed image identifier :type identifier: :py:class:`basestring` or :py:class:`dict` A string or a dictionary containing an ``id`` key may be passed in. Will raise an error if the response was an error. """ if isinstance(identifier, dict): identifier = identifier.get('id', '') j, r = self.request('DELETE', '/images/' + str(identifier)) r.raise_for_status() return j
[ "def", "delete_image", "(", "self", ",", "identifier", ")", ":", "if", "isinstance", "(", "identifier", ",", "dict", ")", ":", "identifier", "=", "identifier", ".", "get", "(", "'id'", ",", "''", ")", "j", ",", "r", "=", "self", ".", "request", "(", "'DELETE'", ",", "'/images/'", "+", "str", "(", "identifier", ")", ")", "r", ".", "raise_for_status", "(", ")", "return", "j" ]
:: DELETE /:login/images/:id :param identifier: match on the listed image identifier :type identifier: :py:class:`basestring` or :py:class:`dict` A string or a dictionary containing an ``id`` key may be passed in. Will raise an error if the response was an error.
[ "::", "DELETE", "/", ":", "login", "/", "images", "/", ":", "id", ":", "param", "identifier", ":", "match", "on", "the", "listed", "image", "identifier", ":", "type", "identifier", ":", ":", "py", ":", "class", ":", "basestring", "or", ":", "py", ":", "class", ":", "dict", "A", "string", "or", "a", "dictionary", "containing", "an", "id", "key", "may", "be", "passed", "in", ".", "Will", "raise", "an", "error", "if", "the", "response", "was", "an", "error", "." ]
python
train
31.947368
hozn/keepassdb
keepassdb/db.py
https://github.com/hozn/keepassdb/blob/cb24985d1ed04e7d7db99ecdddf80dd1a91ee48b/keepassdb/db.py#L487-L500
def remove_entry(self, entry): """ Remove specified entry. :param entry: The Entry object to remove. :type entry: :class:`keepassdb.model.Entry` """ if not isinstance(entry, Entry): raise TypeError("entry param must be of type Entry.") if not entry in self.entries: raise ValueError("Entry doesn't exist / not bound to this datbase.") entry.group.entries.remove(entry) self.entries.remove(entry)
[ "def", "remove_entry", "(", "self", ",", "entry", ")", ":", "if", "not", "isinstance", "(", "entry", ",", "Entry", ")", ":", "raise", "TypeError", "(", "\"entry param must be of type Entry.\"", ")", "if", "not", "entry", "in", "self", ".", "entries", ":", "raise", "ValueError", "(", "\"Entry doesn't exist / not bound to this datbase.\"", ")", "entry", ".", "group", ".", "entries", ".", "remove", "(", "entry", ")", "self", ".", "entries", ".", "remove", "(", "entry", ")" ]
Remove specified entry. :param entry: The Entry object to remove. :type entry: :class:`keepassdb.model.Entry`
[ "Remove", "specified", "entry", ".", ":", "param", "entry", ":", "The", "Entry", "object", "to", "remove", ".", ":", "type", "entry", ":", ":", "class", ":", "keepassdb", ".", "model", ".", "Entry" ]
python
train
35.428571
radjkarl/fancyWidgets
fancywidgets/pyQtBased/Console.py
https://github.com/radjkarl/fancyWidgets/blob/ffe0d5747c5296c78575f0e0909af915a4a5698f/fancywidgets/pyQtBased/Console.py#L62-L68
def addText(self, text): """append text in the chosen color""" # move to the end of the doc self.moveCursor(QtGui.QTextCursor.End) # insert the text self.setTextColor(self._currentColor) self.textCursor().insertText(text)
[ "def", "addText", "(", "self", ",", "text", ")", ":", "# move to the end of the doc", "self", ".", "moveCursor", "(", "QtGui", ".", "QTextCursor", ".", "End", ")", "# insert the text", "self", ".", "setTextColor", "(", "self", ".", "_currentColor", ")", "self", ".", "textCursor", "(", ")", ".", "insertText", "(", "text", ")" ]
append text in the chosen color
[ "append", "text", "in", "the", "chosen", "color" ]
python
train
37.571429
boakley/robotframework-lint
rflint/parser/parser.py
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L92-L105
def robot_files(self): '''Return a list of all folders, and test suite files (.txt, .robot) ''' result = [] for name in os.listdir(self.path): fullpath = os.path.join(self.path, name) if os.path.isdir(fullpath): result.append(RobotFactory(fullpath, parent=self)) else: if ((name.endswith(".txt") or name.endswith(".robot")) and (name not in ("__init__.txt", "__init__.robot"))): result.append(RobotFactory(fullpath, parent=self)) return result
[ "def", "robot_files", "(", "self", ")", ":", "result", "=", "[", "]", "for", "name", "in", "os", ".", "listdir", "(", "self", ".", "path", ")", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "name", ")", "if", "os", ".", "path", ".", "isdir", "(", "fullpath", ")", ":", "result", ".", "append", "(", "RobotFactory", "(", "fullpath", ",", "parent", "=", "self", ")", ")", "else", ":", "if", "(", "(", "name", ".", "endswith", "(", "\".txt\"", ")", "or", "name", ".", "endswith", "(", "\".robot\"", ")", ")", "and", "(", "name", "not", "in", "(", "\"__init__.txt\"", ",", "\"__init__.robot\"", ")", ")", ")", ":", "result", ".", "append", "(", "RobotFactory", "(", "fullpath", ",", "parent", "=", "self", ")", ")", "return", "result" ]
Return a list of all folders, and test suite files (.txt, .robot)
[ "Return", "a", "list", "of", "all", "folders", "and", "test", "suite", "files", "(", ".", "txt", ".", "robot", ")" ]
python
valid
41.357143
fedora-infra/fmn.lib
fmn/lib/__init__.py
https://github.com/fedora-infra/fmn.lib/blob/3120725556153d07c1809530f0fadcf250439110/fmn/lib/__init__.py#L97-L127
def load_preferences(session, config, valid_paths, cull_disabled=False, openid=None, cull_backends=None): """ Every rule for every filter for every context for every user. Any preferences in the DB that are for contexts that are disabled in the config are omitted here. If the `openid` argument is None, then this is an expensive query that loads, practically, the whole database. However, if an openid string is submitted, then only the preferences of that user are returned (and this is less expensive). """ cull_backends = cull_backends or [] query = session.query(fmn.lib.models.Preference) if openid: query = query.filter(fmn.lib.models.Preference.openid==openid) preferences = query.all() return [ preference.__json__(reify=True) for preference in preferences if ( preference.context.name in config['fmn.backends'] and preference.context.name not in cull_backends and (not cull_disabled or preference.enabled) ) ]
[ "def", "load_preferences", "(", "session", ",", "config", ",", "valid_paths", ",", "cull_disabled", "=", "False", ",", "openid", "=", "None", ",", "cull_backends", "=", "None", ")", ":", "cull_backends", "=", "cull_backends", "or", "[", "]", "query", "=", "session", ".", "query", "(", "fmn", ".", "lib", ".", "models", ".", "Preference", ")", "if", "openid", ":", "query", "=", "query", ".", "filter", "(", "fmn", ".", "lib", ".", "models", ".", "Preference", ".", "openid", "==", "openid", ")", "preferences", "=", "query", ".", "all", "(", ")", "return", "[", "preference", ".", "__json__", "(", "reify", "=", "True", ")", "for", "preference", "in", "preferences", "if", "(", "preference", ".", "context", ".", "name", "in", "config", "[", "'fmn.backends'", "]", "and", "preference", ".", "context", ".", "name", "not", "in", "cull_backends", "and", "(", "not", "cull_disabled", "or", "preference", ".", "enabled", ")", ")", "]" ]
Every rule for every filter for every context for every user. Any preferences in the DB that are for contexts that are disabled in the config are omitted here. If the `openid` argument is None, then this is an expensive query that loads, practically, the whole database. However, if an openid string is submitted, then only the preferences of that user are returned (and this is less expensive).
[ "Every", "rule", "for", "every", "filter", "for", "every", "context", "for", "every", "user", "." ]
python
train
34.516129
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L4373-L4388
def GetRowHeaders(self) -> list: """ Call IUIAutomationTablePattern::GetCurrentRowHeaders. Return list, a list of `Control` subclasses, representing all the row headers in a table. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders """ eleArray = self.pattern.GetCurrentRowHeaders() if eleArray: controls = [] for i in range(eleArray.Length): ele = eleArray.GetElement(i) con = Control.CreateControlFromElement(element=ele) if con: controls.append(con) return controls return []
[ "def", "GetRowHeaders", "(", "self", ")", "->", "list", ":", "eleArray", "=", "self", ".", "pattern", ".", "GetCurrentRowHeaders", "(", ")", "if", "eleArray", ":", "controls", "=", "[", "]", "for", "i", "in", "range", "(", "eleArray", ".", "Length", ")", ":", "ele", "=", "eleArray", ".", "GetElement", "(", "i", ")", "con", "=", "Control", ".", "CreateControlFromElement", "(", "element", "=", "ele", ")", "if", "con", ":", "controls", ".", "append", "(", "con", ")", "return", "controls", "return", "[", "]" ]
Call IUIAutomationTablePattern::GetCurrentRowHeaders. Return list, a list of `Control` subclasses, representing all the row headers in a table. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders
[ "Call", "IUIAutomationTablePattern", "::", "GetCurrentRowHeaders", ".", "Return", "list", "a", "list", "of", "Control", "subclasses", "representing", "all", "the", "row", "headers", "in", "a", "table", ".", "Refer", "https", ":", "//", "docs", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "windows", "/", "desktop", "/", "api", "/", "uiautomationclient", "/", "nf", "-", "uiautomationclient", "-", "iuiautomationtablepattern", "-", "getcurrentrowheaders" ]
python
valid
45.4375
twilio/twilio-python
twilio/rest/api/v2010/account/usage/record/monthly.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/record/monthly.py#L185-L194
def get_instance(self, payload): """ Build an instance of MonthlyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance :rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance """ return MonthlyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "MonthlyInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of MonthlyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance :rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
[ "Build", "an", "instance", "of", "MonthlyInstance" ]
python
train
42.1
kubernetes-client/python
kubernetes/client/apis/extensions_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/extensions_v1beta1_api.py#L1483-L1509
def delete_collection_pod_security_policy(self, **kwargs): """ delete collection of PodSecurityPolicy This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_pod_security_policy(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_pod_security_policy_with_http_info(**kwargs) else: (data) = self.delete_collection_pod_security_policy_with_http_info(**kwargs) return data
[ "def", "delete_collection_pod_security_policy", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_collection_pod_security_policy_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_collection_pod_security_policy_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
delete collection of PodSecurityPolicy This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_pod_security_policy(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "collection", "of", "PodSecurityPolicy", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "delete_collection_pod_security_policy", "(", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
168.925926
hazelcast/hazelcast-python-client
hazelcast/proxy/multi_map.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/multi_map.py#L81-L94
def contains_entry(self, key, value): """ Returns whether the multimap contains an entry with the value. :param key: (object), the specified key. :param value: (object), the specified value. :return: (bool), ``true`` if this multimap contains the key-value tuple. """ check_not_none(key, "key can't be None") check_not_none(value, "value can't be None") key_data = self._to_data(key) value_data = self._to_data(value) return self._encode_invoke_on_key(multi_map_contains_entry_codec, key_data, key=key_data, value=value_data, thread_id=thread_id())
[ "def", "contains_entry", "(", "self", ",", "key", ",", "value", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be None\"", ")", "check_not_none", "(", "value", ",", "\"value can't be None\"", ")", "key_data", "=", "self", ".", "_to_data", "(", "key", ")", "value_data", "=", "self", ".", "_to_data", "(", "value", ")", "return", "self", ".", "_encode_invoke_on_key", "(", "multi_map_contains_entry_codec", ",", "key_data", ",", "key", "=", "key_data", ",", "value", "=", "value_data", ",", "thread_id", "=", "thread_id", "(", ")", ")" ]
Returns whether the multimap contains an entry with the value. :param key: (object), the specified key. :param value: (object), the specified value. :return: (bool), ``true`` if this multimap contains the key-value tuple.
[ "Returns", "whether", "the", "multimap", "contains", "an", "entry", "with", "the", "value", "." ]
python
train
47.571429
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/util.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/util.py#L179-L196
def parse_and_normalize_url_date(date_str): """Parse a ISO 8601 date-time with optional timezone. - Return as datetime with timezone adjusted to UTC. - Return naive date-time set to UTC. """ if date_str is None: return None try: return d1_common.date_time.dt_from_iso8601_str(date_str) except d1_common.date_time.iso8601.ParseError as e: raise d1_common.types.exceptions.InvalidRequest( 0, 'Invalid date format for URL parameter. date="{}" error="{}"'.format( date_str, str(e) ), )
[ "def", "parse_and_normalize_url_date", "(", "date_str", ")", ":", "if", "date_str", "is", "None", ":", "return", "None", "try", ":", "return", "d1_common", ".", "date_time", ".", "dt_from_iso8601_str", "(", "date_str", ")", "except", "d1_common", ".", "date_time", ".", "iso8601", ".", "ParseError", "as", "e", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Invalid date format for URL parameter. date=\"{}\" error=\"{}\"'", ".", "format", "(", "date_str", ",", "str", "(", "e", ")", ")", ",", ")" ]
Parse a ISO 8601 date-time with optional timezone. - Return as datetime with timezone adjusted to UTC. - Return naive date-time set to UTC.
[ "Parse", "a", "ISO", "8601", "date", "-", "time", "with", "optional", "timezone", "." ]
python
train
32.111111
projecthamster/hamster
src/hamster/lib/graphics.py
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/graphics.py#L465-L479
def create_layout(self, size = None): """utility function to create layout with the default font. Size and alignment parameters are shortcuts to according functions of the pango.Layout""" if not self.context: # TODO - this is rather sloppy as far as exception goes # should explain better raise Exception("Can not create layout without existing context!") layout = pangocairo.create_layout(self.context) font_desc = pango.FontDescription(_font_desc) if size: font_desc.set_absolute_size(size * pango.SCALE) layout.set_font_description(font_desc) return layout
[ "def", "create_layout", "(", "self", ",", "size", "=", "None", ")", ":", "if", "not", "self", ".", "context", ":", "# TODO - this is rather sloppy as far as exception goes", "# should explain better", "raise", "Exception", "(", "\"Can not create layout without existing context!\"", ")", "layout", "=", "pangocairo", ".", "create_layout", "(", "self", ".", "context", ")", "font_desc", "=", "pango", ".", "FontDescription", "(", "_font_desc", ")", "if", "size", ":", "font_desc", ".", "set_absolute_size", "(", "size", "*", "pango", ".", "SCALE", ")", "layout", ".", "set_font_description", "(", "font_desc", ")", "return", "layout" ]
utility function to create layout with the default font. Size and alignment parameters are shortcuts to according functions of the pango.Layout
[ "utility", "function", "to", "create", "layout", "with", "the", "default", "font", ".", "Size", "and", "alignment", "parameters", "are", "shortcuts", "to", "according", "functions", "of", "the", "pango", ".", "Layout" ]
python
train
44.133333
obriencj/python-javatools
javatools/classdiff.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/classdiff.py#L917-L940
def merge_code(left_code, right_code): """ { relative_line: ((left_abs_line, ((offset, op, args), ...)), (right_abs_line, ((offset, op, args), ...))), ... } """ data = dict() code_lines = (left_code and left_code.iter_code_by_lines()) or tuple() for abs_line, rel_line, dis in code_lines: data[rel_line] = [(abs_line, dis), None] code_lines = (right_code and right_code.iter_code_by_lines()) or tuple() for abs_line, rel_line, dis in code_lines: found = data.get(rel_line, None) if found is None: found = [None, (abs_line, dis)] data[rel_line] = found else: found[1] = (abs_line, dis) return data
[ "def", "merge_code", "(", "left_code", ",", "right_code", ")", ":", "data", "=", "dict", "(", ")", "code_lines", "=", "(", "left_code", "and", "left_code", ".", "iter_code_by_lines", "(", ")", ")", "or", "tuple", "(", ")", "for", "abs_line", ",", "rel_line", ",", "dis", "in", "code_lines", ":", "data", "[", "rel_line", "]", "=", "[", "(", "abs_line", ",", "dis", ")", ",", "None", "]", "code_lines", "=", "(", "right_code", "and", "right_code", ".", "iter_code_by_lines", "(", ")", ")", "or", "tuple", "(", ")", "for", "abs_line", ",", "rel_line", ",", "dis", "in", "code_lines", ":", "found", "=", "data", ".", "get", "(", "rel_line", ",", "None", ")", "if", "found", "is", "None", ":", "found", "=", "[", "None", ",", "(", "abs_line", ",", "dis", ")", "]", "data", "[", "rel_line", "]", "=", "found", "else", ":", "found", "[", "1", "]", "=", "(", "abs_line", ",", "dis", ")", "return", "data" ]
{ relative_line: ((left_abs_line, ((offset, op, args), ...)), (right_abs_line, ((offset, op, args), ...))), ... }
[ "{", "relative_line", ":", "((", "left_abs_line", "((", "offset", "op", "args", ")", "...", "))", "(", "right_abs_line", "((", "offset", "op", "args", ")", "...", ")))", "...", "}" ]
python
train
29.166667
mikedh/trimesh
trimesh/rendering.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/rendering.py#L334-L368
def light_to_gl(light, transform, lightN): """ Convert trimesh.scene.lighting.Light objects into args for gl.glLightFv calls Parameters -------------- light : trimesh.scene.lighting.Light Light object to be converted to GL transform : (4, 4) float Transformation matrix of light lightN : int Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc Returns -------------- multiarg : [tuple] List of args to pass to gl.glLightFv eg: [gl.glLightfb(*a) for a in multiarg] """ # convert color to opengl gl_color = vector_to_gl(light.color.astype(np.float64) / 255.0) assert len(gl_color) == 4 # cartesian translation from matrix gl_position = vector_to_gl(transform[:3, 3]) # create the different position and color arguments args = [(lightN, gl.GL_POSITION, gl_position), (lightN, gl.GL_SPECULAR, gl_color), (lightN, gl.GL_DIFFUSE, gl_color), (lightN, gl.GL_AMBIENT, gl_color)] return args
[ "def", "light_to_gl", "(", "light", ",", "transform", ",", "lightN", ")", ":", "# convert color to opengl", "gl_color", "=", "vector_to_gl", "(", "light", ".", "color", ".", "astype", "(", "np", ".", "float64", ")", "/", "255.0", ")", "assert", "len", "(", "gl_color", ")", "==", "4", "# cartesian translation from matrix", "gl_position", "=", "vector_to_gl", "(", "transform", "[", ":", "3", ",", "3", "]", ")", "# create the different position and color arguments", "args", "=", "[", "(", "lightN", ",", "gl", ".", "GL_POSITION", ",", "gl_position", ")", ",", "(", "lightN", ",", "gl", ".", "GL_SPECULAR", ",", "gl_color", ")", ",", "(", "lightN", ",", "gl", ".", "GL_DIFFUSE", ",", "gl_color", ")", ",", "(", "lightN", ",", "gl", ".", "GL_AMBIENT", ",", "gl_color", ")", "]", "return", "args" ]
Convert trimesh.scene.lighting.Light objects into args for gl.glLightFv calls Parameters -------------- light : trimesh.scene.lighting.Light Light object to be converted to GL transform : (4, 4) float Transformation matrix of light lightN : int Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc Returns -------------- multiarg : [tuple] List of args to pass to gl.glLightFv eg: [gl.glLightfb(*a) for a in multiarg]
[ "Convert", "trimesh", ".", "scene", ".", "lighting", ".", "Light", "objects", "into", "args", "for", "gl", ".", "glLightFv", "calls" ]
python
train
28.257143
Cog-Creators/Red-Lavalink
lavalink/player_manager.py
https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/player_manager.py#L285-L295
async def pause(self, pause: bool = True): """ Pauses the current song. Parameters ---------- pause : bool Set to ``False`` to resume. """ self._paused = pause await self.node.pause(self.channel.guild.id, pause)
[ "async", "def", "pause", "(", "self", ",", "pause", ":", "bool", "=", "True", ")", ":", "self", ".", "_paused", "=", "pause", "await", "self", ".", "node", ".", "pause", "(", "self", ".", "channel", ".", "guild", ".", "id", ",", "pause", ")" ]
Pauses the current song. Parameters ---------- pause : bool Set to ``False`` to resume.
[ "Pauses", "the", "current", "song", "." ]
python
train
25.272727
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/scene/cameras/arcball.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/scene/cameras/arcball.py#L65-L69
def _rotate_tr(self): """Rotate the transformation matrix based on camera parameters""" rot, x, y, z = self._quaternion.get_axis_angle() up, forward, right = self._get_dim_vectors() self.transform.rotate(180 * rot / np.pi, (x, z, y))
[ "def", "_rotate_tr", "(", "self", ")", ":", "rot", ",", "x", ",", "y", ",", "z", "=", "self", ".", "_quaternion", ".", "get_axis_angle", "(", ")", "up", ",", "forward", ",", "right", "=", "self", ".", "_get_dim_vectors", "(", ")", "self", ".", "transform", ".", "rotate", "(", "180", "*", "rot", "/", "np", ".", "pi", ",", "(", "x", ",", "z", ",", "y", ")", ")" ]
Rotate the transformation matrix based on camera parameters
[ "Rotate", "the", "transformation", "matrix", "based", "on", "camera", "parameters" ]
python
train
52.2
raymontag/kppy
kppy/database.py
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L846-L859
def _cbc_decrypt(self, final_key, crypted_content): """This method decrypts the database""" # Just decrypt the content with the created key aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv) decrypted_content = aes.decrypt(crypted_content) padding = decrypted_content[-1] if sys.version > '3': padding = decrypted_content[-1] else: padding = ord(decrypted_content[-1]) decrypted_content = decrypted_content[:len(decrypted_content)-padding] return decrypted_content
[ "def", "_cbc_decrypt", "(", "self", ",", "final_key", ",", "crypted_content", ")", ":", "# Just decrypt the content with the created key", "aes", "=", "AES", ".", "new", "(", "final_key", ",", "AES", ".", "MODE_CBC", ",", "self", ".", "_enc_iv", ")", "decrypted_content", "=", "aes", ".", "decrypt", "(", "crypted_content", ")", "padding", "=", "decrypted_content", "[", "-", "1", "]", "if", "sys", ".", "version", ">", "'3'", ":", "padding", "=", "decrypted_content", "[", "-", "1", "]", "else", ":", "padding", "=", "ord", "(", "decrypted_content", "[", "-", "1", "]", ")", "decrypted_content", "=", "decrypted_content", "[", ":", "len", "(", "decrypted_content", ")", "-", "padding", "]", "return", "decrypted_content" ]
This method decrypts the database
[ "This", "method", "decrypts", "the", "database" ]
python
train
39.928571
letuananh/chirptext
chirptext/texttaglib.py
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L603-L614
def add_sent(self, sent_obj): ''' Add a ttl.Sentence object to this document ''' if sent_obj is None: raise Exception("Sentence object cannot be None") elif sent_obj.ID is None: # if sentID is None, create a new ID sent_obj.ID = next(self.__idgen) elif self.has_id(sent_obj.ID): raise Exception("Sentence ID {} exists".format(sent_obj.ID)) self.__sent_map[sent_obj.ID] = sent_obj self.__sents.append(sent_obj) return sent_obj
[ "def", "add_sent", "(", "self", ",", "sent_obj", ")", ":", "if", "sent_obj", "is", "None", ":", "raise", "Exception", "(", "\"Sentence object cannot be None\"", ")", "elif", "sent_obj", ".", "ID", "is", "None", ":", "# if sentID is None, create a new ID", "sent_obj", ".", "ID", "=", "next", "(", "self", ".", "__idgen", ")", "elif", "self", ".", "has_id", "(", "sent_obj", ".", "ID", ")", ":", "raise", "Exception", "(", "\"Sentence ID {} exists\"", ".", "format", "(", "sent_obj", ".", "ID", ")", ")", "self", ".", "__sent_map", "[", "sent_obj", ".", "ID", "]", "=", "sent_obj", "self", ".", "__sents", ".", "append", "(", "sent_obj", ")", "return", "sent_obj" ]
Add a ttl.Sentence object to this document
[ "Add", "a", "ttl", ".", "Sentence", "object", "to", "this", "document" ]
python
train
43.166667
klen/muffin-session
muffin_session.py
https://github.com/klen/muffin-session/blob/f1d14d12b7d09d8cc40be14b0dfa0b1e2f4ae8e9/muffin_session.py#L156-L166
def save(self, set_cookie, **params): """Update cookies if the session has been changed.""" if set(self.store.items()) ^ set(self.items()): value = dict(self.items()) value = json.dumps(value) value = self.encrypt(value) if not isinstance(value, str): value = value.encode(self.encoding) set_cookie(self.key, value, **self.params) return True return False
[ "def", "save", "(", "self", ",", "set_cookie", ",", "*", "*", "params", ")", ":", "if", "set", "(", "self", ".", "store", ".", "items", "(", ")", ")", "^", "set", "(", "self", ".", "items", "(", ")", ")", ":", "value", "=", "dict", "(", "self", ".", "items", "(", ")", ")", "value", "=", "json", ".", "dumps", "(", "value", ")", "value", "=", "self", ".", "encrypt", "(", "value", ")", "if", "not", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "value", ".", "encode", "(", "self", ".", "encoding", ")", "set_cookie", "(", "self", ".", "key", ",", "value", ",", "*", "*", "self", ".", "params", ")", "return", "True", "return", "False" ]
Update cookies if the session has been changed.
[ "Update", "cookies", "if", "the", "session", "has", "been", "changed", "." ]
python
train
41.545455
iotile/coretools
iotilecore/iotile/core/hw/transport/server/standard.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/server/standard.py#L198-L225
async def teardown_client(self, client_id): """Release all resources held by a client. This method must be called and awaited whenever a client is disconnected. It ensures that all of the client's resources are properly released and any devices they have connected to are disconnected cleanly. Args: client_id (str): The client that we should tear down. Raises: ArgumentError: The client_id is unknown. """ client_info = self._client_info(client_id) self.adapter.remove_monitor(client_info['monitor']) conns = client_info['connections'] for conn_string, conn_id in conns.items(): try: self._logger.debug("Disconnecting client %s from conn %s at teardown", client_id, conn_string) await self.adapter.disconnect(conn_id) except: #pylint:disable=bare-except; This is a finalization method that should not raise unexpectedly self._logger.exception("Error disconnecting device during teardown_client: conn_string=%s", conn_string) del self._clients[client_id]
[ "async", "def", "teardown_client", "(", "self", ",", "client_id", ")", ":", "client_info", "=", "self", ".", "_client_info", "(", "client_id", ")", "self", ".", "adapter", ".", "remove_monitor", "(", "client_info", "[", "'monitor'", "]", ")", "conns", "=", "client_info", "[", "'connections'", "]", "for", "conn_string", ",", "conn_id", "in", "conns", ".", "items", "(", ")", ":", "try", ":", "self", ".", "_logger", ".", "debug", "(", "\"Disconnecting client %s from conn %s at teardown\"", ",", "client_id", ",", "conn_string", ")", "await", "self", ".", "adapter", ".", "disconnect", "(", "conn_id", ")", "except", ":", "#pylint:disable=bare-except; This is a finalization method that should not raise unexpectedly", "self", ".", "_logger", ".", "exception", "(", "\"Error disconnecting device during teardown_client: conn_string=%s\"", ",", "conn_string", ")", "del", "self", ".", "_clients", "[", "client_id", "]" ]
Release all resources held by a client. This method must be called and awaited whenever a client is disconnected. It ensures that all of the client's resources are properly released and any devices they have connected to are disconnected cleanly. Args: client_id (str): The client that we should tear down. Raises: ArgumentError: The client_id is unknown.
[ "Release", "all", "resources", "held", "by", "a", "client", "." ]
python
train
40.571429
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L3042-L3080
def set_data_length(self, length): # type: (int) -> None ''' A method to set the length of the data that this UDF File Entry points to. Parameters: length - The new length for the data. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized') len_diff = length - self.info_len if len_diff > 0: # If we are increasing the length, update the last alloc_desc up # to the max of 0x3ffff800, and throw an exception if we overflow. new_len = self.alloc_descs[-1][0] + len_diff if new_len > 0x3ffff800: raise pycdlibexception.PyCdlibInvalidInput('Cannot increase the size of a UDF file beyond the current descriptor') self.alloc_descs[-1][0] = new_len elif len_diff < 0: # We are decreasing the length. It's possible we are removing one # or more alloc_descs, so run through the list updating all of the # descriptors and remove any we no longer need. len_left = length alloc_descs_needed = 0 index = 0 while len_left > 0: this_len = min(len_left, 0x3ffff800) alloc_descs_needed += 1 self.alloc_descs[index][0] = this_len index += 1 len_left -= this_len self.alloc_descs = self.alloc_descs[:alloc_descs_needed] self.info_len = length
[ "def", "set_data_length", "(", "self", ",", "length", ")", ":", "# type: (int) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Directory Record not yet initialized'", ")", "len_diff", "=", "length", "-", "self", ".", "info_len", "if", "len_diff", ">", "0", ":", "# If we are increasing the length, update the last alloc_desc up", "# to the max of 0x3ffff800, and throw an exception if we overflow.", "new_len", "=", "self", ".", "alloc_descs", "[", "-", "1", "]", "[", "0", "]", "+", "len_diff", "if", "new_len", ">", "0x3ffff800", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'Cannot increase the size of a UDF file beyond the current descriptor'", ")", "self", ".", "alloc_descs", "[", "-", "1", "]", "[", "0", "]", "=", "new_len", "elif", "len_diff", "<", "0", ":", "# We are decreasing the length. It's possible we are removing one", "# or more alloc_descs, so run through the list updating all of the", "# descriptors and remove any we no longer need.", "len_left", "=", "length", "alloc_descs_needed", "=", "0", "index", "=", "0", "while", "len_left", ">", "0", ":", "this_len", "=", "min", "(", "len_left", ",", "0x3ffff800", ")", "alloc_descs_needed", "+=", "1", "self", ".", "alloc_descs", "[", "index", "]", "[", "0", "]", "=", "this_len", "index", "+=", "1", "len_left", "-=", "this_len", "self", ".", "alloc_descs", "=", "self", ".", "alloc_descs", "[", ":", "alloc_descs_needed", "]", "self", ".", "info_len", "=", "length" ]
A method to set the length of the data that this UDF File Entry points to. Parameters: length - The new length for the data. Returns: Nothing.
[ "A", "method", "to", "set", "the", "length", "of", "the", "data", "that", "this", "UDF", "File", "Entry", "points", "to", "." ]
python
train
39.641026
FutunnOpen/futuquant
futuquant/examples/TinyQuant/TinyQuantBase.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/TinyQuantBase.py#L154-L159
def rsi(self, n, array=False): """RSI指标""" result = talib.RSI(self.close, n) if array: return result return result[-1]
[ "def", "rsi", "(", "self", ",", "n", ",", "array", "=", "False", ")", ":", "result", "=", "talib", ".", "RSI", "(", "self", ".", "close", ",", "n", ")", "if", "array", ":", "return", "result", "return", "result", "[", "-", "1", "]" ]
RSI指标
[ "RSI指标" ]
python
train
26.166667
alexhayes/django-toolkit
django_toolkit/network.py
https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/network.py#L10-L39
def cidr_notation(ip_address, netmask): """ Retrieve the cidr notation given an ip address and netmask. For example: cidr_notation('12.34.56.78', '255.255.255.248') Would return: 12.34.56.72/29 @see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/ @see http://www.aelius.com/njh/subnet_sheet.html """ try: inet_aton(ip_address) except: raise Exception("Invalid ip address '%s'" % ip_address) try: inet_aton(netmask) except: raise Exception("Invalid netmask '%s'" % netmask) ip_address_split = ip_address.split('.') netmask_split = netmask.split('.') # calculate network start net_start = [str(int(ip_address_split[x]) & int(netmask_split[x])) for x in range(0,4)] return '.'.join(net_start) + '/' + get_net_size(netmask_split)
[ "def", "cidr_notation", "(", "ip_address", ",", "netmask", ")", ":", "try", ":", "inet_aton", "(", "ip_address", ")", "except", ":", "raise", "Exception", "(", "\"Invalid ip address '%s'\"", "%", "ip_address", ")", "try", ":", "inet_aton", "(", "netmask", ")", "except", ":", "raise", "Exception", "(", "\"Invalid netmask '%s'\"", "%", "netmask", ")", "ip_address_split", "=", "ip_address", ".", "split", "(", "'.'", ")", "netmask_split", "=", "netmask", ".", "split", "(", "'.'", ")", "# calculate network start", "net_start", "=", "[", "str", "(", "int", "(", "ip_address_split", "[", "x", "]", ")", "&", "int", "(", "netmask_split", "[", "x", "]", ")", ")", "for", "x", "in", "range", "(", "0", ",", "4", ")", "]", "return", "'.'", ".", "join", "(", "net_start", ")", "+", "'/'", "+", "get_net_size", "(", "netmask_split", ")" ]
Retrieve the cidr notation given an ip address and netmask. For example: cidr_notation('12.34.56.78', '255.255.255.248') Would return: 12.34.56.72/29 @see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/ @see http://www.aelius.com/njh/subnet_sheet.html
[ "Retrieve", "the", "cidr", "notation", "given", "an", "ip", "address", "and", "netmask", ".", "For", "example", ":", "cidr_notation", "(", "12", ".", "34", ".", "56", ".", "78", "255", ".", "255", ".", "255", ".", "248", ")", "Would", "return", ":", "12", ".", "34", ".", "56", ".", "72", "/", "29" ]
python
train
30.333333
microhomie/microhomie
homie/device.py
https://github.com/microhomie/microhomie/blob/44de239e05a20026cf7425ee0fe1f10549288fbf/homie/device.py#L79-L95
def subscribe_topics(self): """subscribe to all registered device and node topics""" base = self.topic subscribe = self.mqtt.subscribe # device topics subscribe(b"/".join((base, b"$stats/interval/set"))) subscribe(b"/".join((self.settings.MQTT_BASE_TOPIC, b"$broadcast/#"))) # node topics nodes = self.nodes for node in nodes: for topic in node.subscribe: topic = b"/".join((base, topic)) # print('MQTT SUBSCRIBE: {}'.format(topic)) subscribe(topic) self.topic_callbacks[topic] = node.callback
[ "def", "subscribe_topics", "(", "self", ")", ":", "base", "=", "self", ".", "topic", "subscribe", "=", "self", ".", "mqtt", ".", "subscribe", "# device topics", "subscribe", "(", "b\"/\"", ".", "join", "(", "(", "base", ",", "b\"$stats/interval/set\"", ")", ")", ")", "subscribe", "(", "b\"/\"", ".", "join", "(", "(", "self", ".", "settings", ".", "MQTT_BASE_TOPIC", ",", "b\"$broadcast/#\"", ")", ")", ")", "# node topics", "nodes", "=", "self", ".", "nodes", "for", "node", "in", "nodes", ":", "for", "topic", "in", "node", ".", "subscribe", ":", "topic", "=", "b\"/\"", ".", "join", "(", "(", "base", ",", "topic", ")", ")", "# print('MQTT SUBSCRIBE: {}'.format(topic))", "subscribe", "(", "topic", ")", "self", ".", "topic_callbacks", "[", "topic", "]", "=", "node", ".", "callback" ]
subscribe to all registered device and node topics
[ "subscribe", "to", "all", "registered", "device", "and", "node", "topics" ]
python
train
36.882353
qubole/qds-sdk-py
qds_sdk/app.py
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/app.py#L143-L148
def stop(cls, app_id): """ Stops an app by issuing a PUT request to the /apps/ID/stop endpoint. """ conn = Qubole.agent() return conn.put(cls.element_path(app_id) + "/stop")
[ "def", "stop", "(", "cls", ",", "app_id", ")", ":", "conn", "=", "Qubole", ".", "agent", "(", ")", "return", "conn", ".", "put", "(", "cls", ".", "element_path", "(", "app_id", ")", "+", "\"/stop\"", ")" ]
Stops an app by issuing a PUT request to the /apps/ID/stop endpoint.
[ "Stops", "an", "app", "by", "issuing", "a", "PUT", "request", "to", "the", "/", "apps", "/", "ID", "/", "stop", "endpoint", "." ]
python
train
34.666667
thebigmunch/google-music
src/google_music/clients/mobileclient.py
https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L798-L832
def playlist_create( self, name, description='', *, make_public=False, songs=None ): """Create a playlist. Parameters: name (str): Name to give the playlist. description (str): Description to give the playlist. make_public (bool, Optional): If ``True`` and account has a subscription, make playlist public. Default: ``False`` songs (list, Optional): A list of song dicts to add to the playlist. Returns: dict: Playlist information. """ share_state = 'PUBLIC' if make_public else 'PRIVATE' playlist = self._call( mc_calls.PlaylistsCreate, name, description, share_state ).body if songs: playlist = self.playlist_songs_add(songs, playlist) return playlist
[ "def", "playlist_create", "(", "self", ",", "name", ",", "description", "=", "''", ",", "*", ",", "make_public", "=", "False", ",", "songs", "=", "None", ")", ":", "share_state", "=", "'PUBLIC'", "if", "make_public", "else", "'PRIVATE'", "playlist", "=", "self", ".", "_call", "(", "mc_calls", ".", "PlaylistsCreate", ",", "name", ",", "description", ",", "share_state", ")", ".", "body", "if", "songs", ":", "playlist", "=", "self", ".", "playlist_songs_add", "(", "songs", ",", "playlist", ")", "return", "playlist" ]
Create a playlist. Parameters: name (str): Name to give the playlist. description (str): Description to give the playlist. make_public (bool, Optional): If ``True`` and account has a subscription, make playlist public. Default: ``False`` songs (list, Optional): A list of song dicts to add to the playlist. Returns: dict: Playlist information.
[ "Create", "a", "playlist", "." ]
python
train
19.828571
materialsvirtuallab/monty
monty/serialization.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/serialization.py#L47-L83
def loadfn(fn, *args, **kwargs): """ Loads json/yaml/msgpack directly from a filename instead of a File-like object. For YAML, ruamel.yaml must be installed. The file type is automatically detected. YAML is assumed if the filename contains "yaml" (lower or upper case). Otherwise, json is always assumed. Args: fn (str/Path): filename or pathlib.Path. \*args: Any of the args supported by json/yaml.load. \*\*kwargs: Any of the kwargs supported by json/yaml.load. Returns: (object) Result of json/yaml/msgpack.load. """ if "mpk" in os.path.basename(fn).lower(): if msgpack is None: raise RuntimeError( "Loading of message pack files is not " "possible as msgpack-python is not installed.") if "object_hook" not in kwargs: kwargs["object_hook"] = object_hook with zopen(fn, "rb") as fp: return msgpack.load(fp, *args, **kwargs) else: with zopen(fn) as fp: if "yaml" in os.path.basename(fn).lower(): if yaml is None: raise RuntimeError("Loading of YAML files is not " "possible as ruamel.yaml is not installed.") if "Loader" not in kwargs: kwargs["Loader"] = Loader return yaml.load(fp, *args, **kwargs) else: if "cls" not in kwargs: kwargs["cls"] = MontyDecoder return json.load(fp, *args, **kwargs)
[ "def", "loadfn", "(", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "\"mpk\"", "in", "os", ".", "path", ".", "basename", "(", "fn", ")", ".", "lower", "(", ")", ":", "if", "msgpack", "is", "None", ":", "raise", "RuntimeError", "(", "\"Loading of message pack files is not \"", "\"possible as msgpack-python is not installed.\"", ")", "if", "\"object_hook\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"object_hook\"", "]", "=", "object_hook", "with", "zopen", "(", "fn", ",", "\"rb\"", ")", "as", "fp", ":", "return", "msgpack", ".", "load", "(", "fp", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "with", "zopen", "(", "fn", ")", "as", "fp", ":", "if", "\"yaml\"", "in", "os", ".", "path", ".", "basename", "(", "fn", ")", ".", "lower", "(", ")", ":", "if", "yaml", "is", "None", ":", "raise", "RuntimeError", "(", "\"Loading of YAML files is not \"", "\"possible as ruamel.yaml is not installed.\"", ")", "if", "\"Loader\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"Loader\"", "]", "=", "Loader", "return", "yaml", ".", "load", "(", "fp", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "if", "\"cls\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"cls\"", "]", "=", "MontyDecoder", "return", "json", ".", "load", "(", "fp", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Loads json/yaml/msgpack directly from a filename instead of a File-like object. For YAML, ruamel.yaml must be installed. The file type is automatically detected. YAML is assumed if the filename contains "yaml" (lower or upper case). Otherwise, json is always assumed. Args: fn (str/Path): filename or pathlib.Path. \*args: Any of the args supported by json/yaml.load. \*\*kwargs: Any of the kwargs supported by json/yaml.load. Returns: (object) Result of json/yaml/msgpack.load.
[ "Loads", "json", "/", "yaml", "/", "msgpack", "directly", "from", "a", "filename", "instead", "of", "a", "File", "-", "like", "object", ".", "For", "YAML", "ruamel", ".", "yaml", "must", "be", "installed", ".", "The", "file", "type", "is", "automatically", "detected", ".", "YAML", "is", "assumed", "if", "the", "filename", "contains", "yaml", "(", "lower", "or", "upper", "case", ")", ".", "Otherwise", "json", "is", "always", "assumed", "." ]
python
train
41.594595
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Variables/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Variables/__init__.py#L160-L227
def Update(self, env, args=None): """ Update an environment with the option variables. env - the environment to update. """ values = {} # first set the defaults: for option in self.options: if not option.default is None: values[option.key] = option.default # next set the value specified in the options file for filename in self.files: if os.path.exists(filename): dir = os.path.split(os.path.abspath(filename))[0] if dir: sys.path.insert(0, dir) try: values['__name__'] = filename with open(filename, 'r') as f: contents = f.read() exec(contents, {}, values) finally: if dir: del sys.path[0] del values['__name__'] # set the values specified on the command line if args is None: args = self.args for arg, value in args.items(): added = False for option in self.options: if arg in list(option.aliases) + [ option.key ]: values[option.key] = value added = True if not added: self.unknown[arg] = value # put the variables in the environment: # (don't copy over variables that are not declared as options) for option in self.options: try: env[option.key] = values[option.key] except KeyError: pass # Call the convert functions: for option in self.options: if option.converter and option.key in values: value = env.subst('${%s}'%option.key) try: try: env[option.key] = option.converter(value) except TypeError: env[option.key] = option.converter(value, env) except ValueError as x: raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x)) # Finally validate the values: for option in self.options: if option.validator and option.key in values: option.validator(option.key, env.subst('${%s}'%option.key), env)
[ "def", "Update", "(", "self", ",", "env", ",", "args", "=", "None", ")", ":", "values", "=", "{", "}", "# first set the defaults:", "for", "option", "in", "self", ".", "options", ":", "if", "not", "option", ".", "default", "is", "None", ":", "values", "[", "option", ".", "key", "]", "=", "option", ".", "default", "# next set the value specified in the options file", "for", "filename", "in", "self", ".", "files", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "dir", "=", "os", ".", "path", ".", "split", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "[", "0", "]", "if", "dir", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "dir", ")", "try", ":", "values", "[", "'__name__'", "]", "=", "filename", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "contents", "=", "f", ".", "read", "(", ")", "exec", "(", "contents", ",", "{", "}", ",", "values", ")", "finally", ":", "if", "dir", ":", "del", "sys", ".", "path", "[", "0", "]", "del", "values", "[", "'__name__'", "]", "# set the values specified on the command line", "if", "args", "is", "None", ":", "args", "=", "self", ".", "args", "for", "arg", ",", "value", "in", "args", ".", "items", "(", ")", ":", "added", "=", "False", "for", "option", "in", "self", ".", "options", ":", "if", "arg", "in", "list", "(", "option", ".", "aliases", ")", "+", "[", "option", ".", "key", "]", ":", "values", "[", "option", ".", "key", "]", "=", "value", "added", "=", "True", "if", "not", "added", ":", "self", ".", "unknown", "[", "arg", "]", "=", "value", "# put the variables in the environment:", "# (don't copy over variables that are not declared as options)", "for", "option", "in", "self", ".", "options", ":", "try", ":", "env", "[", "option", ".", "key", "]", "=", "values", "[", "option", ".", "key", "]", "except", "KeyError", ":", "pass", "# Call the convert functions:", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "converter", "and", "option", ".", "key", "in", "values", ":", "value", "=", "env", ".", "subst", "(", "'${%s}'", "%", "option", ".", "key", ")", "try", ":", "try", ":", "env", "[", "option", ".", "key", "]", "=", "option", ".", "converter", "(", "value", ")", "except", "TypeError", ":", "env", "[", "option", ".", "key", "]", "=", "option", ".", "converter", "(", "value", ",", "env", ")", "except", "ValueError", "as", "x", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "'Error converting option: %s\\n%s'", "%", "(", "option", ".", "key", ",", "x", ")", ")", "# Finally validate the values:", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "validator", "and", "option", ".", "key", "in", "values", ":", "option", ".", "validator", "(", "option", ".", "key", ",", "env", ".", "subst", "(", "'${%s}'", "%", "option", ".", "key", ")", ",", "env", ")" ]
Update an environment with the option variables. env - the environment to update.
[ "Update", "an", "environment", "with", "the", "option", "variables", "." ]
python
train
34.794118
CalebBell/fluids
fluids/friction.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/friction.py#L1303-L1346
def Sonnad_Goudar_2006(Re, eD): r'''Calculates Darcy friction factor using the method in Sonnad and Goudar (2006) [2]_ as shown in [1]_. .. math:: \frac{1}{\sqrt{f_d}} = 0.8686\ln\left(\frac{0.4587Re}{S^{S/(S+1)}}\right) .. math:: S = 0.1240\times\frac{\epsilon}{D}\times Re + \ln(0.4587Re) Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- Range is 4E3 <= Re <= 1E8; 1E-6 <= eD <= 5E-2 Examples -------- >>> Sonnad_Goudar_2006(1E5, 1E-4) 0.0185971269898162 References ---------- .. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence and Combustion 90, no. 1 (January 1, 2013): 1-27. doi:10.1007/s10494-012-9419-7 .. [2] Travis, Quentin B., and Larry W. Mays."Relationship between Hazen-William and Colebrook-White Roughness Values." Journal of Hydraulic Engineering 133, no. 11 (November 2007): 1270-73. doi:10.1061/(ASCE)0733-9429(2007)133:11(1270). ''' S = 0.124*eD*Re + log(0.4587*Re) return (.8686*log(.4587*Re/S**(S/(S+1))))**-2
[ "def", "Sonnad_Goudar_2006", "(", "Re", ",", "eD", ")", ":", "S", "=", "0.124", "*", "eD", "*", "Re", "+", "log", "(", "0.4587", "*", "Re", ")", "return", "(", ".8686", "*", "log", "(", ".4587", "*", "Re", "/", "S", "**", "(", "S", "/", "(", "S", "+", "1", ")", ")", ")", ")", "**", "-", "2" ]
r'''Calculates Darcy friction factor using the method in Sonnad and Goudar (2006) [2]_ as shown in [1]_. .. math:: \frac{1}{\sqrt{f_d}} = 0.8686\ln\left(\frac{0.4587Re}{S^{S/(S+1)}}\right) .. math:: S = 0.1240\times\frac{\epsilon}{D}\times Re + \ln(0.4587Re) Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- Range is 4E3 <= Re <= 1E8; 1E-6 <= eD <= 5E-2 Examples -------- >>> Sonnad_Goudar_2006(1E5, 1E-4) 0.0185971269898162 References ---------- .. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence and Combustion 90, no. 1 (January 1, 2013): 1-27. doi:10.1007/s10494-012-9419-7 .. [2] Travis, Quentin B., and Larry W. Mays."Relationship between Hazen-William and Colebrook-White Roughness Values." Journal of Hydraulic Engineering 133, no. 11 (November 2007): 1270-73. doi:10.1061/(ASCE)0733-9429(2007)133:11(1270).
[ "r", "Calculates", "Darcy", "friction", "factor", "using", "the", "method", "in", "Sonnad", "and", "Goudar", "(", "2006", ")", "[", "2", "]", "_", "as", "shown", "in", "[", "1", "]", "_", "." ]
python
train
29
mikicz/arca
arca/backend/docker.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L412-L436
def build_image_from_inherited_image(self, image_name: str, image_tag: str, repo_path: Path, requirements_option: RequirementsOptions): """ Builds a image with installed requirements from the inherited image. (Or just tags the image if there are no requirements.) See :meth:`build_image` for parameters descriptions. :rtype: docker.models.images.Image """ base_name, base_tag = self.get_inherit_image() if requirements_option == RequirementsOptions.no_requirements: image = self.get_image(base_name, base_tag) image.tag(image_name, image_tag) # so ``build_image`` doesn't have to be called next time return image dockerfile = self.get_install_requirements_dockerfile(base_name, base_tag, repo_path, requirements_option) self.get_or_build_image(image_name, image_tag, dockerfile, build_context=repo_path.parent, pull=False) return self.get_image(image_name, image_tag)
[ "def", "build_image_from_inherited_image", "(", "self", ",", "image_name", ":", "str", ",", "image_tag", ":", "str", ",", "repo_path", ":", "Path", ",", "requirements_option", ":", "RequirementsOptions", ")", ":", "base_name", ",", "base_tag", "=", "self", ".", "get_inherit_image", "(", ")", "if", "requirements_option", "==", "RequirementsOptions", ".", "no_requirements", ":", "image", "=", "self", ".", "get_image", "(", "base_name", ",", "base_tag", ")", "image", ".", "tag", "(", "image_name", ",", "image_tag", ")", "# so ``build_image`` doesn't have to be called next time", "return", "image", "dockerfile", "=", "self", ".", "get_install_requirements_dockerfile", "(", "base_name", ",", "base_tag", ",", "repo_path", ",", "requirements_option", ")", "self", ".", "get_or_build_image", "(", "image_name", ",", "image_tag", ",", "dockerfile", ",", "build_context", "=", "repo_path", ".", "parent", ",", "pull", "=", "False", ")", "return", "self", ".", "get_image", "(", "image_name", ",", "image_tag", ")" ]
Builds a image with installed requirements from the inherited image. (Or just tags the image if there are no requirements.) See :meth:`build_image` for parameters descriptions. :rtype: docker.models.images.Image
[ "Builds", "a", "image", "with", "installed", "requirements", "from", "the", "inherited", "image", ".", "(", "Or", "just", "tags", "the", "image", "if", "there", "are", "no", "requirements", ".", ")" ]
python
train
42.32
domainaware/parsedmarc
parsedmarc/elastic.py
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/elastic.py#L272-L384
def save_aggregate_report_to_elasticsearch(aggregate_report, index_suffix=None, monthly_indexes=False): """ Saves a parsed DMARC aggregate report to ElasticSearch Args: aggregate_report (OrderedDict): A parsed forensic report index_suffix (str): The suffix of the name of the index to save to monthly_indexes (bool): Use monthly indexes instead of daily indexes Raises: AlreadySaved """ logger.debug("Saving aggregate report to Elasticsearch") aggregate_report = aggregate_report.copy() metadata = aggregate_report["report_metadata"] org_name = metadata["org_name"] report_id = metadata["report_id"] domain = aggregate_report["policy_published"]["domain"] begin_date = human_timestamp_to_datetime(metadata["begin_date"]) end_date = human_timestamp_to_datetime(metadata["end_date"]) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S") if monthly_indexes: index_date = begin_date.strftime("%Y-%m") else: index_date = begin_date.strftime("%Y-%m-%d") aggregate_report["begin_date"] = begin_date aggregate_report["end_date"] = end_date date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]] org_name_query = Q(dict(match=dict(org_name=org_name))) report_id_query = Q(dict(match=dict(report_id=report_id))) domain_query = Q(dict(match={"published_policy.domain": domain})) begin_date_query = Q(dict(match=dict(date_range=begin_date))) end_date_query = Q(dict(match=dict(date_range=end_date))) search = Search(index="dmarc_aggregate*") query = org_name_query & report_id_query & domain_query query = query & begin_date_query & end_date_query search.query = query existing = search.execute() if len(existing) > 0: raise AlreadySaved("An aggregate report ID {0} from {1} about {2} " "with a date range of {3} UTC to {4} UTC already " "exists in " "Elasticsearch".format(report_id, org_name, domain, begin_date_human, end_date_human)) published_policy = _PublishedPolicy( domain=aggregate_report["policy_published"]["domain"], adkim=aggregate_report["policy_published"]["adkim"], aspf=aggregate_report["policy_published"]["aspf"], p=aggregate_report["policy_published"]["p"], sp=aggregate_report["policy_published"]["sp"], pct=aggregate_report["policy_published"]["pct"], fo=aggregate_report["policy_published"]["fo"] ) for record in aggregate_report["records"]: agg_doc = _AggregateReportDoc( xml_schemea=aggregate_report["xml_schema"], org_name=metadata["org_name"], org_email=metadata["org_email"], org_extra_contact_info=metadata["org_extra_contact_info"], report_id=metadata["report_id"], date_range=date_range, errors=metadata["errors"], published_policy=published_policy, source_ip_address=record["source"]["ip_address"], source_country=record["source"]["country"], source_reverse_dns=record["source"]["reverse_dns"], source_base_domain=record["source"]["base_domain"], message_count=record["count"], disposition=record["policy_evaluated"]["disposition"], dkim_aligned=record["policy_evaluated"]["dkim"] == "pass", spf_aligned=record["policy_evaluated"]["spf"] == "pass", header_from=record["identifiers"]["header_from"], envelope_from=record["identifiers"]["envelope_from"], envelope_to=record["identifiers"]["envelope_to"] ) for override in record["policy_evaluated"]["policy_override_reasons"]: agg_doc.add_policy_override(type_=override["type"], comment=override["comment"]) for dkim_result in record["auth_results"]["dkim"]: agg_doc.add_dkim_result(domain=dkim_result["domain"], selector=dkim_result["selector"], result=dkim_result["result"]) for spf_result in record["auth_results"]["spf"]: agg_doc.add_spf_result(domain=spf_result["domain"], scope=spf_result["scope"], result=spf_result["result"]) index = "dmarc_aggregate" if index_suffix: index = "{0}_{1}".format(index, index_suffix) index = "{0}-{1}".format(index, index_date) create_indexes([index]) agg_doc.meta.index = index try: agg_doc.save() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__()))
[ "def", "save_aggregate_report_to_elasticsearch", "(", "aggregate_report", ",", "index_suffix", "=", "None", ",", "monthly_indexes", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Saving aggregate report to Elasticsearch\"", ")", "aggregate_report", "=", "aggregate_report", ".", "copy", "(", ")", "metadata", "=", "aggregate_report", "[", "\"report_metadata\"", "]", "org_name", "=", "metadata", "[", "\"org_name\"", "]", "report_id", "=", "metadata", "[", "\"report_id\"", "]", "domain", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"domain\"", "]", "begin_date", "=", "human_timestamp_to_datetime", "(", "metadata", "[", "\"begin_date\"", "]", ")", "end_date", "=", "human_timestamp_to_datetime", "(", "metadata", "[", "\"end_date\"", "]", ")", "begin_date_human", "=", "begin_date", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "end_date_human", "=", "end_date", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "if", "monthly_indexes", ":", "index_date", "=", "begin_date", ".", "strftime", "(", "\"%Y-%m\"", ")", "else", ":", "index_date", "=", "begin_date", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "aggregate_report", "[", "\"begin_date\"", "]", "=", "begin_date", "aggregate_report", "[", "\"end_date\"", "]", "=", "end_date", "date_range", "=", "[", "aggregate_report", "[", "\"begin_date\"", "]", ",", "aggregate_report", "[", "\"end_date\"", "]", "]", "org_name_query", "=", "Q", "(", "dict", "(", "match", "=", "dict", "(", "org_name", "=", "org_name", ")", ")", ")", "report_id_query", "=", "Q", "(", "dict", "(", "match", "=", "dict", "(", "report_id", "=", "report_id", ")", ")", ")", "domain_query", "=", "Q", "(", "dict", "(", "match", "=", "{", "\"published_policy.domain\"", ":", "domain", "}", ")", ")", "begin_date_query", "=", "Q", "(", "dict", "(", "match", "=", "dict", "(", "date_range", "=", "begin_date", ")", ")", ")", "end_date_query", "=", "Q", "(", "dict", "(", "match", "=", "dict", "(", "date_range", "=", "end_date", ")", ")", ")", "search", "=", "Search", "(", "index", "=", "\"dmarc_aggregate*\"", ")", "query", "=", "org_name_query", "&", "report_id_query", "&", "domain_query", "query", "=", "query", "&", "begin_date_query", "&", "end_date_query", "search", ".", "query", "=", "query", "existing", "=", "search", ".", "execute", "(", ")", "if", "len", "(", "existing", ")", ">", "0", ":", "raise", "AlreadySaved", "(", "\"An aggregate report ID {0} from {1} about {2} \"", "\"with a date range of {3} UTC to {4} UTC already \"", "\"exists in \"", "\"Elasticsearch\"", ".", "format", "(", "report_id", ",", "org_name", ",", "domain", ",", "begin_date_human", ",", "end_date_human", ")", ")", "published_policy", "=", "_PublishedPolicy", "(", "domain", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"domain\"", "]", ",", "adkim", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"adkim\"", "]", ",", "aspf", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"aspf\"", "]", ",", "p", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"p\"", "]", ",", "sp", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"sp\"", "]", ",", "pct", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"pct\"", "]", ",", "fo", "=", "aggregate_report", "[", "\"policy_published\"", "]", "[", "\"fo\"", "]", ")", "for", "record", "in", "aggregate_report", "[", "\"records\"", "]", ":", "agg_doc", "=", "_AggregateReportDoc", "(", "xml_schemea", "=", "aggregate_report", "[", "\"xml_schema\"", "]", ",", "org_name", "=", "metadata", "[", "\"org_name\"", "]", ",", "org_email", "=", "metadata", "[", "\"org_email\"", "]", ",", "org_extra_contact_info", "=", "metadata", "[", "\"org_extra_contact_info\"", "]", ",", "report_id", "=", "metadata", "[", "\"report_id\"", "]", ",", "date_range", "=", "date_range", ",", "errors", "=", "metadata", "[", "\"errors\"", "]", ",", "published_policy", "=", "published_policy", ",", "source_ip_address", "=", "record", "[", "\"source\"", "]", "[", "\"ip_address\"", "]", ",", "source_country", "=", "record", "[", "\"source\"", "]", "[", "\"country\"", "]", ",", "source_reverse_dns", "=", "record", "[", "\"source\"", "]", "[", "\"reverse_dns\"", "]", ",", "source_base_domain", "=", "record", "[", "\"source\"", "]", "[", "\"base_domain\"", "]", ",", "message_count", "=", "record", "[", "\"count\"", "]", ",", "disposition", "=", "record", "[", "\"policy_evaluated\"", "]", "[", "\"disposition\"", "]", ",", "dkim_aligned", "=", "record", "[", "\"policy_evaluated\"", "]", "[", "\"dkim\"", "]", "==", "\"pass\"", ",", "spf_aligned", "=", "record", "[", "\"policy_evaluated\"", "]", "[", "\"spf\"", "]", "==", "\"pass\"", ",", "header_from", "=", "record", "[", "\"identifiers\"", "]", "[", "\"header_from\"", "]", ",", "envelope_from", "=", "record", "[", "\"identifiers\"", "]", "[", "\"envelope_from\"", "]", ",", "envelope_to", "=", "record", "[", "\"identifiers\"", "]", "[", "\"envelope_to\"", "]", ")", "for", "override", "in", "record", "[", "\"policy_evaluated\"", "]", "[", "\"policy_override_reasons\"", "]", ":", "agg_doc", ".", "add_policy_override", "(", "type_", "=", "override", "[", "\"type\"", "]", ",", "comment", "=", "override", "[", "\"comment\"", "]", ")", "for", "dkim_result", "in", "record", "[", "\"auth_results\"", "]", "[", "\"dkim\"", "]", ":", "agg_doc", ".", "add_dkim_result", "(", "domain", "=", "dkim_result", "[", "\"domain\"", "]", ",", "selector", "=", "dkim_result", "[", "\"selector\"", "]", ",", "result", "=", "dkim_result", "[", "\"result\"", "]", ")", "for", "spf_result", "in", "record", "[", "\"auth_results\"", "]", "[", "\"spf\"", "]", ":", "agg_doc", ".", "add_spf_result", "(", "domain", "=", "spf_result", "[", "\"domain\"", "]", ",", "scope", "=", "spf_result", "[", "\"scope\"", "]", ",", "result", "=", "spf_result", "[", "\"result\"", "]", ")", "index", "=", "\"dmarc_aggregate\"", "if", "index_suffix", ":", "index", "=", "\"{0}_{1}\"", ".", "format", "(", "index", ",", "index_suffix", ")", "index", "=", "\"{0}-{1}\"", ".", "format", "(", "index", ",", "index_date", ")", "create_indexes", "(", "[", "index", "]", ")", "agg_doc", ".", "meta", ".", "index", "=", "index", "try", ":", "agg_doc", ".", "save", "(", ")", "except", "Exception", "as", "e", ":", "raise", "ElasticsearchError", "(", "\"Elasticsearch error: {0}\"", ".", "format", "(", "e", ".", "__str__", "(", ")", ")", ")" ]
Saves a parsed DMARC aggregate report to ElasticSearch Args: aggregate_report (OrderedDict): A parsed forensic report index_suffix (str): The suffix of the name of the index to save to monthly_indexes (bool): Use monthly indexes instead of daily indexes Raises: AlreadySaved
[ "Saves", "a", "parsed", "DMARC", "aggregate", "report", "to", "ElasticSearch" ]
python
test
45.283186
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L552-L559
def dictlist_convert_to_bool(dict_list: Iterable[Dict], key: str) -> None: """ Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a bool. If that fails, convert it to ``None``. """ for d in dict_list: # d[key] = True if d[key] == "Y" else False d[key] = 1 if d[key] == "Y" else 0
[ "def", "dictlist_convert_to_bool", "(", "dict_list", ":", "Iterable", "[", "Dict", "]", ",", "key", ":", "str", ")", "->", "None", ":", "for", "d", "in", "dict_list", ":", "# d[key] = True if d[key] == \"Y\" else False", "d", "[", "key", "]", "=", "1", "if", "d", "[", "key", "]", "==", "\"Y\"", "else", "0" ]
Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a bool. If that fails, convert it to ``None``.
[ "Process", "an", "iterable", "of", "dictionaries", ".", "For", "each", "dictionary", "d", "convert", "(", "in", "place", ")", "d", "[", "key", "]", "to", "a", "bool", ".", "If", "that", "fails", "convert", "it", "to", "None", "." ]
python
train
44.25
denisenkom/django-sqlserver
sqlserver/base.py
https://github.com/denisenkom/django-sqlserver/blob/f5d5dc8637799746f1bd11bd8c479d3acd468581/sqlserver/base.py#L216-L220
def _get_new_connection(self, conn_params): """Opens a connection to the database.""" self.__connection_string = conn_params.get('connection_string', '') conn = self.Database.connect(**conn_params) return conn
[ "def", "_get_new_connection", "(", "self", ",", "conn_params", ")", ":", "self", ".", "__connection_string", "=", "conn_params", ".", "get", "(", "'connection_string'", ",", "''", ")", "conn", "=", "self", ".", "Database", ".", "connect", "(", "*", "*", "conn_params", ")", "return", "conn" ]
Opens a connection to the database.
[ "Opens", "a", "connection", "to", "the", "database", "." ]
python
train
44.2
jmgilman/Neolib
neolib/user/Bank.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/user/Bank.py#L51-L64
def load(self): """ Loads the user's account details and Raises parseException """ pg = self.usr.getPage("http://www.neopets.com/bank.phtml") # Verifies account exists if not "great to see you again" in pg.content: logging.getLogger("neolib.user").info("Could not load user's bank. Most likely does not have an account.", {'pg': pg}) raise noBankAcct self.__loadDetails(pg)
[ "def", "load", "(", "self", ")", ":", "pg", "=", "self", ".", "usr", ".", "getPage", "(", "\"http://www.neopets.com/bank.phtml\"", ")", "# Verifies account exists", "if", "not", "\"great to see you again\"", "in", "pg", ".", "content", ":", "logging", ".", "getLogger", "(", "\"neolib.user\"", ")", ".", "info", "(", "\"Could not load user's bank. Most likely does not have an account.\"", ",", "{", "'pg'", ":", "pg", "}", ")", "raise", "noBankAcct", "self", ".", "__loadDetails", "(", "pg", ")" ]
Loads the user's account details and Raises parseException
[ "Loads", "the", "user", "s", "account", "details", "and", "Raises", "parseException" ]
python
train
34.714286
manahl/arctic
arctic/store/version_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/version_store.py#L420-L440
def get_arctic_version(self, symbol, as_of=None): """ Return the numerical representation of the arctic version used to write the last (or as_of) version for the given symbol. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time Returns ------- arctic_version : int The numerical representation of Arctic version, used to create the specified symbol version """ return self._read_metadata(symbol, as_of=as_of).get('arctic_version', 0)
[ "def", "get_arctic_version", "(", "self", ",", "symbol", ",", "as_of", "=", "None", ")", ":", "return", "self", ".", "_read_metadata", "(", "symbol", ",", "as_of", "=", "as_of", ")", ".", "get", "(", "'arctic_version'", ",", "0", ")" ]
Return the numerical representation of the arctic version used to write the last (or as_of) version for the given symbol. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time Returns ------- arctic_version : int The numerical representation of Arctic version, used to create the specified symbol version
[ "Return", "the", "numerical", "representation", "of", "the", "arctic", "version", "used", "to", "write", "the", "last", "(", "or", "as_of", ")", "version", "for", "the", "given", "symbol", "." ]
python
train
41.047619
eqcorrscan/EQcorrscan
eqcorrscan/utils/clustering.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L840-L906
def space_cluster(catalog, d_thresh, show=True): """ Cluster a catalog by distance only. Will compute the matrix of physical distances between events and utilize the :mod:`scipy.clustering.hierarchy` module to perform the clustering. :type catalog: obspy.core.event.Catalog :param catalog: Catalog of events to clustered :type d_thresh: float :param d_thresh: Maximum inter-event distance threshold :returns: list of :class:`obspy.core.event.Catalog` objects :rtype: list >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("NCEDC") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=2) >>> groups = space_cluster(catalog=cat, d_thresh=2, show=False) >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("https://earthquake.usgs.gov") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=6) >>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False) """ # Compute the distance matrix and linkage dist_mat = dist_mat_km(catalog) dist_vec = squareform(dist_mat) Z = linkage(dist_vec, method='average') # Cluster the linkage using the given threshold as the cutoff indices = fcluster(Z, t=d_thresh, criterion='distance') group_ids = list(set(indices)) indices = [(indices[i], i) for i in range(len(indices))] if show: # Plot the dendrogram...if it's not way too huge dendrogram(Z, color_threshold=d_thresh, distance_sort='ascending') plt.show() # Sort by group id indices.sort(key=lambda tup: tup[0]) groups = [] for group_id in group_ids: group = Catalog() for ind in indices: if ind[0] == group_id: group.append(catalog[ind[1]]) elif ind[0] > group_id: # Because we have sorted by group id, when the index is greater # than the group_id we can break the inner loop. # Patch applied by CJC 05/11/2015 groups.append(group) break groups.append(group) return groups
[ "def", "space_cluster", "(", "catalog", ",", "d_thresh", ",", "show", "=", "True", ")", ":", "# Compute the distance matrix and linkage", "dist_mat", "=", "dist_mat_km", "(", "catalog", ")", "dist_vec", "=", "squareform", "(", "dist_mat", ")", "Z", "=", "linkage", "(", "dist_vec", ",", "method", "=", "'average'", ")", "# Cluster the linkage using the given threshold as the cutoff", "indices", "=", "fcluster", "(", "Z", ",", "t", "=", "d_thresh", ",", "criterion", "=", "'distance'", ")", "group_ids", "=", "list", "(", "set", "(", "indices", ")", ")", "indices", "=", "[", "(", "indices", "[", "i", "]", ",", "i", ")", "for", "i", "in", "range", "(", "len", "(", "indices", ")", ")", "]", "if", "show", ":", "# Plot the dendrogram...if it's not way too huge", "dendrogram", "(", "Z", ",", "color_threshold", "=", "d_thresh", ",", "distance_sort", "=", "'ascending'", ")", "plt", ".", "show", "(", ")", "# Sort by group id", "indices", ".", "sort", "(", "key", "=", "lambda", "tup", ":", "tup", "[", "0", "]", ")", "groups", "=", "[", "]", "for", "group_id", "in", "group_ids", ":", "group", "=", "Catalog", "(", ")", "for", "ind", "in", "indices", ":", "if", "ind", "[", "0", "]", "==", "group_id", ":", "group", ".", "append", "(", "catalog", "[", "ind", "[", "1", "]", "]", ")", "elif", "ind", "[", "0", "]", ">", "group_id", ":", "# Because we have sorted by group id, when the index is greater", "# than the group_id we can break the inner loop.", "# Patch applied by CJC 05/11/2015", "groups", ".", "append", "(", "group", ")", "break", "groups", ".", "append", "(", "group", ")", "return", "groups" ]
Cluster a catalog by distance only. Will compute the matrix of physical distances between events and utilize the :mod:`scipy.clustering.hierarchy` module to perform the clustering. :type catalog: obspy.core.event.Catalog :param catalog: Catalog of events to clustered :type d_thresh: float :param d_thresh: Maximum inter-event distance threshold :returns: list of :class:`obspy.core.event.Catalog` objects :rtype: list >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("NCEDC") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=2) >>> groups = space_cluster(catalog=cat, d_thresh=2, show=False) >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("https://earthquake.usgs.gov") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=6) >>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False)
[ "Cluster", "a", "catalog", "by", "distance", "only", "." ]
python
train
37.940299
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L1986-L2019
def is_valid_number_for_region(numobj, region_code): """Tests whether a phone number is valid for a certain region. Note this doesn't verify the number is actually in use, which is impossible to tell by just looking at a number itself. If the country calling code is not the same as the country calling code for the region, this immediately exits with false. After this, the specific number pattern rules for the region are examined. This is useful for determining for example whether a particular number is valid for Canada, rather than just a valid NANPA number. Warning: In most cases, you want to use is_valid_number instead. For example, this method will mark numbers from British Crown dependencies such as the Isle of Man as invalid for the region "GB" (United Kingdom), since it has its own region code, "IM", which may be undesirable. Arguments: numobj -- The phone number object that we want to validate. region_code -- The region that we want to validate the phone number for. Returns a boolean that indicates whether the number is of a valid pattern. """ country_code = numobj.country_code if region_code is None: return False metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code.upper()) if (metadata is None or (region_code != REGION_CODE_FOR_NON_GEO_ENTITY and country_code != country_code_for_valid_region(region_code))): # Either the region code was invalid, or the country calling code for # this number does not match that of the region code. return False nsn = national_significant_number(numobj) return (_number_type_helper(nsn, metadata) != PhoneNumberType.UNKNOWN)
[ "def", "is_valid_number_for_region", "(", "numobj", ",", "region_code", ")", ":", "country_code", "=", "numobj", ".", "country_code", "if", "region_code", "is", "None", ":", "return", "False", "metadata", "=", "PhoneMetadata", ".", "metadata_for_region_or_calling_code", "(", "country_code", ",", "region_code", ".", "upper", "(", ")", ")", "if", "(", "metadata", "is", "None", "or", "(", "region_code", "!=", "REGION_CODE_FOR_NON_GEO_ENTITY", "and", "country_code", "!=", "country_code_for_valid_region", "(", "region_code", ")", ")", ")", ":", "# Either the region code was invalid, or the country calling code for", "# this number does not match that of the region code.", "return", "False", "nsn", "=", "national_significant_number", "(", "numobj", ")", "return", "(", "_number_type_helper", "(", "nsn", ",", "metadata", ")", "!=", "PhoneNumberType", ".", "UNKNOWN", ")" ]
Tests whether a phone number is valid for a certain region. Note this doesn't verify the number is actually in use, which is impossible to tell by just looking at a number itself. If the country calling code is not the same as the country calling code for the region, this immediately exits with false. After this, the specific number pattern rules for the region are examined. This is useful for determining for example whether a particular number is valid for Canada, rather than just a valid NANPA number. Warning: In most cases, you want to use is_valid_number instead. For example, this method will mark numbers from British Crown dependencies such as the Isle of Man as invalid for the region "GB" (United Kingdom), since it has its own region code, "IM", which may be undesirable. Arguments: numobj -- The phone number object that we want to validate. region_code -- The region that we want to validate the phone number for. Returns a boolean that indicates whether the number is of a valid pattern.
[ "Tests", "whether", "a", "phone", "number", "is", "valid", "for", "a", "certain", "region", "." ]
python
train
50.970588
roclark/sportsreference
sportsreference/nhl/schedule.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/schedule.py#L271-L281
def result(self): """ Returns a ``string`` constant to indicate whether the team lost in regulation, lost in overtime, or won. """ if self._result.lower() == 'w': return WIN if self._result.lower() == 'l' and \ self.overtime != 0: return OVERTIME_LOSS return LOSS
[ "def", "result", "(", "self", ")", ":", "if", "self", ".", "_result", ".", "lower", "(", ")", "==", "'w'", ":", "return", "WIN", "if", "self", ".", "_result", ".", "lower", "(", ")", "==", "'l'", "and", "self", ".", "overtime", "!=", "0", ":", "return", "OVERTIME_LOSS", "return", "LOSS" ]
Returns a ``string`` constant to indicate whether the team lost in regulation, lost in overtime, or won.
[ "Returns", "a", "string", "constant", "to", "indicate", "whether", "the", "team", "lost", "in", "regulation", "lost", "in", "overtime", "or", "won", "." ]
python
train
31.272727
gagneurlab/concise
concise/preprocessing/splines.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L8-L16
def _trunc(x, minval=None, maxval=None): """Truncate vector values to have values on range [minval, maxval] """ x = np.copy(x) if minval is not None: x[x < minval] = minval if maxval is not None: x[x > maxval] = maxval return x
[ "def", "_trunc", "(", "x", ",", "minval", "=", "None", ",", "maxval", "=", "None", ")", ":", "x", "=", "np", ".", "copy", "(", "x", ")", "if", "minval", "is", "not", "None", ":", "x", "[", "x", "<", "minval", "]", "=", "minval", "if", "maxval", "is", "not", "None", ":", "x", "[", "x", ">", "maxval", "]", "=", "maxval", "return", "x" ]
Truncate vector values to have values on range [minval, maxval]
[ "Truncate", "vector", "values", "to", "have", "values", "on", "range", "[", "minval", "maxval", "]" ]
python
train
28.777778
bslatkin/dpxdt
dpxdt/server/work_queue.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L219-L256
def _get_task_with_policy(queue_name, task_id, owner): """Fetches the specified task and enforces ownership policy. Args: queue_name: Name of the queue the work item is on. task_id: ID of the task that is finished. owner: Who or what has the current lease on the task. Returns: The valid WorkQueue task that is currently owned. Raises: TaskDoesNotExistError if the task does not exist. LeaseExpiredError if the lease is no longer active. NotOwnerError if the specified owner no longer owns the task. """ now = datetime.datetime.utcnow() task = ( WorkQueue.query .filter_by(queue_name=queue_name, task_id=task_id) .with_lockmode('update') .first()) if not task: raise TaskDoesNotExistError('task_id=%r' % task_id) # Lease delta should be positive, meaning it has not yet expired! lease_delta = now - task.eta if lease_delta > datetime.timedelta(0): db.session.rollback() raise LeaseExpiredError('queue=%r, task_id=%r expired %s' % ( task.queue_name, task_id, lease_delta)) if task.last_owner != owner: db.session.rollback() raise NotOwnerError('queue=%r, task_id=%r, owner=%r' % ( task.queue_name, task_id, task.last_owner)) return task
[ "def", "_get_task_with_policy", "(", "queue_name", ",", "task_id", ",", "owner", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "task", "=", "(", "WorkQueue", ".", "query", ".", "filter_by", "(", "queue_name", "=", "queue_name", ",", "task_id", "=", "task_id", ")", ".", "with_lockmode", "(", "'update'", ")", ".", "first", "(", ")", ")", "if", "not", "task", ":", "raise", "TaskDoesNotExistError", "(", "'task_id=%r'", "%", "task_id", ")", "# Lease delta should be positive, meaning it has not yet expired!", "lease_delta", "=", "now", "-", "task", ".", "eta", "if", "lease_delta", ">", "datetime", ".", "timedelta", "(", "0", ")", ":", "db", ".", "session", ".", "rollback", "(", ")", "raise", "LeaseExpiredError", "(", "'queue=%r, task_id=%r expired %s'", "%", "(", "task", ".", "queue_name", ",", "task_id", ",", "lease_delta", ")", ")", "if", "task", ".", "last_owner", "!=", "owner", ":", "db", ".", "session", ".", "rollback", "(", ")", "raise", "NotOwnerError", "(", "'queue=%r, task_id=%r, owner=%r'", "%", "(", "task", ".", "queue_name", ",", "task_id", ",", "task", ".", "last_owner", ")", ")", "return", "task" ]
Fetches the specified task and enforces ownership policy. Args: queue_name: Name of the queue the work item is on. task_id: ID of the task that is finished. owner: Who or what has the current lease on the task. Returns: The valid WorkQueue task that is currently owned. Raises: TaskDoesNotExistError if the task does not exist. LeaseExpiredError if the lease is no longer active. NotOwnerError if the specified owner no longer owns the task.
[ "Fetches", "the", "specified", "task", "and", "enforces", "ownership", "policy", "." ]
python
train
35.368421
timothyhahn/rui
rui/rui.py
https://github.com/timothyhahn/rui/blob/ac9f587fb486760d77332866c6e876f78a810f74/rui/rui.py#L52-L65
def register_entity_to_group(self, entity, group): ''' Add entity to a group. If group does not exist, entity will be added as first member entity is of type Entity group is a string that is the name of the group ''' if entity in self._entities: if group in self._groups: self._groups[group].append(entity) else: self._groups[group] = [entity] else: raise UnmanagedEntityError(entity)
[ "def", "register_entity_to_group", "(", "self", ",", "entity", ",", "group", ")", ":", "if", "entity", "in", "self", ".", "_entities", ":", "if", "group", "in", "self", ".", "_groups", ":", "self", ".", "_groups", "[", "group", "]", ".", "append", "(", "entity", ")", "else", ":", "self", ".", "_groups", "[", "group", "]", "=", "[", "entity", "]", "else", ":", "raise", "UnmanagedEntityError", "(", "entity", ")" ]
Add entity to a group. If group does not exist, entity will be added as first member entity is of type Entity group is a string that is the name of the group
[ "Add", "entity", "to", "a", "group", ".", "If", "group", "does", "not", "exist", "entity", "will", "be", "added", "as", "first", "member", "entity", "is", "of", "type", "Entity", "group", "is", "a", "string", "that", "is", "the", "name", "of", "the", "group" ]
python
train
35.928571
flo-compbio/goparser
goparser/annotation.py
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/annotation.py#L199-L213
def get_gaf_format(self): """Return a GAF 2.0-compatible string representation of the annotation. Parameters ---------- Returns ------- str The formatted string. """ sep = '\t' return sep.join( [self.gene, self.db_ref, self.term.id, self.evidence, '|'.join(self.db_ref), '|'.join(self.with_)])
[ "def", "get_gaf_format", "(", "self", ")", ":", "sep", "=", "'\\t'", "return", "sep", ".", "join", "(", "[", "self", ".", "gene", ",", "self", ".", "db_ref", ",", "self", ".", "term", ".", "id", ",", "self", ".", "evidence", ",", "'|'", ".", "join", "(", "self", ".", "db_ref", ")", ",", "'|'", ".", "join", "(", "self", ".", "with_", ")", "]", ")" ]
Return a GAF 2.0-compatible string representation of the annotation. Parameters ---------- Returns ------- str The formatted string.
[ "Return", "a", "GAF", "2", ".", "0", "-", "compatible", "string", "representation", "of", "the", "annotation", "." ]
python
train
26
cpburnz/python-path-specification
pathspec/patterns/gitwildmatch.py
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L177-L280
def _translate_segment_glob(pattern): """ Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). """ # NOTE: This is derived from `fnmatch.translate()` and is similar to # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. escape = False regex = '' i, end = 0, len(pattern) while i < end: # Get next character. char = pattern[i] i += 1 if escape: # Escape the character. escape = False regex += re.escape(char) elif char == '\\': # Escape character, escape next character. escape = True elif char == '*': # Multi-character wildcard. Match any string (except slashes), # including an empty string. regex += '[^/]*' elif char == '?': # Single-character wildcard. Match any single character (except # a slash). regex += '[^/]' elif char == '[': # Braket expression wildcard. Except for the beginning # exclamation mark, the whole braket expression can be used # directly as regex but we have to find where the expression # ends. # - "[][!]" matchs ']', '[' and '!'. # - "[]-]" matchs ']' and '-'. # - "[!]a-]" matchs any character except ']', 'a' and '-'. j = i # Pass brack expression negation. if j < end and pattern[j] == '!': j += 1 # Pass first closing braket if it is at the beginning of the # expression. if j < end and pattern[j] == ']': j += 1 # Find closing braket. Stop once we reach the end or find it. while j < end and pattern[j] != ']': j += 1 if j < end: # Found end of braket expression. Increment j to be one past # the closing braket: # # [...] # ^ ^ # i j # j += 1 expr = '[' if pattern[i] == '!': # Braket expression needs to be negated. expr += '^' i += 1 elif pattern[i] == '^': # POSIX declares that the regex braket expression negation # "[^...]" is undefined in a glob pattern. Python's # `fnmatch.translate()` escapes the caret ('^') as a # literal. To maintain consistency with undefined behavior, # I am escaping the '^' as well. expr += '\\^' i += 1 # Build regex braket expression. Escape slashes so they are # treated as literal slashes by regex as defined by POSIX. expr += pattern[i:j].replace('\\', '\\\\') # Add regex braket expression to regex result. regex += expr # Set i to one past the closing braket. i = j else: # Failed to find closing braket, treat opening braket as a # braket literal instead of as an expression. regex += '\\[' else: # Regular character, escape it for regex. regex += re.escape(char) return regex
[ "def", "_translate_segment_glob", "(", "pattern", ")", ":", "# NOTE: This is derived from `fnmatch.translate()` and is similar to", "# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.", "escape", "=", "False", "regex", "=", "''", "i", ",", "end", "=", "0", ",", "len", "(", "pattern", ")", "while", "i", "<", "end", ":", "# Get next character.", "char", "=", "pattern", "[", "i", "]", "i", "+=", "1", "if", "escape", ":", "# Escape the character.", "escape", "=", "False", "regex", "+=", "re", ".", "escape", "(", "char", ")", "elif", "char", "==", "'\\\\'", ":", "# Escape character, escape next character.", "escape", "=", "True", "elif", "char", "==", "'*'", ":", "# Multi-character wildcard. Match any string (except slashes),", "# including an empty string.", "regex", "+=", "'[^/]*'", "elif", "char", "==", "'?'", ":", "# Single-character wildcard. Match any single character (except", "# a slash).", "regex", "+=", "'[^/]'", "elif", "char", "==", "'['", ":", "# Braket expression wildcard. Except for the beginning", "# exclamation mark, the whole braket expression can be used", "# directly as regex but we have to find where the expression", "# ends.", "# - \"[][!]\" matchs ']', '[' and '!'.", "# - \"[]-]\" matchs ']' and '-'.", "# - \"[!]a-]\" matchs any character except ']', 'a' and '-'.", "j", "=", "i", "# Pass brack expression negation.", "if", "j", "<", "end", "and", "pattern", "[", "j", "]", "==", "'!'", ":", "j", "+=", "1", "# Pass first closing braket if it is at the beginning of the", "# expression.", "if", "j", "<", "end", "and", "pattern", "[", "j", "]", "==", "']'", ":", "j", "+=", "1", "# Find closing braket. Stop once we reach the end or find it.", "while", "j", "<", "end", "and", "pattern", "[", "j", "]", "!=", "']'", ":", "j", "+=", "1", "if", "j", "<", "end", ":", "# Found end of braket expression. Increment j to be one past", "# the closing braket:", "#", "# [...]", "# ^ ^", "# i j", "#", "j", "+=", "1", "expr", "=", "'['", "if", "pattern", "[", "i", "]", "==", "'!'", ":", "# Braket expression needs to be negated.", "expr", "+=", "'^'", "i", "+=", "1", "elif", "pattern", "[", "i", "]", "==", "'^'", ":", "# POSIX declares that the regex braket expression negation", "# \"[^...]\" is undefined in a glob pattern. Python's", "# `fnmatch.translate()` escapes the caret ('^') as a", "# literal. To maintain consistency with undefined behavior,", "# I am escaping the '^' as well.", "expr", "+=", "'\\\\^'", "i", "+=", "1", "# Build regex braket expression. Escape slashes so they are", "# treated as literal slashes by regex as defined by POSIX.", "expr", "+=", "pattern", "[", "i", ":", "j", "]", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "# Add regex braket expression to regex result.", "regex", "+=", "expr", "# Set i to one past the closing braket.", "i", "=", "j", "else", ":", "# Failed to find closing braket, treat opening braket as a", "# braket literal instead of as an expression.", "regex", "+=", "'\\\\['", "else", ":", "# Regular character, escape it for regex.", "regex", "+=", "re", ".", "escape", "(", "char", ")", "return", "regex" ]
Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`).
[ "Translates", "the", "glob", "pattern", "to", "a", "regular", "expression", ".", "This", "is", "used", "in", "the", "constructor", "to", "translate", "a", "path", "segment", "glob", "pattern", "to", "its", "corresponding", "regular", "expression", "." ]
python
train
27.432692
blockstack/blockstack-core
blockstack/lib/rpc.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/rpc.py#L458-L496
def GET_names( self, path_info ): """ Get all names in existence If `all=true` is set, then include expired names. Returns the list on success Returns 400 on invalid arguments Returns 502 on failure to get names """ include_expired = False qs_values = path_info['qs_values'] page = qs_values.get('page', None) if page is None: log.error("Page required") return self._reply_json({'error': 'page= argument required'}, status_code=400) try: page = int(page) if page < 0: raise ValueError("Page is negative") except ValueError: log.error("Invalid page") return self._reply_json({'error': 'Invalid page= value'}, status_code=400) if qs_values.get('all', '').lower() in ['1', 'true']: include_expired = True offset = page * 100 count = 100 blockstackd_url = get_blockstackd_url() res = blockstackd_client.get_all_names(offset, count, include_expired=include_expired, hostport=blockstackd_url) if json_is_error(res): log.error("Failed to list all names (offset={}, count={}): {}".format(offset, count, res['error'])) return self._reply_json({'error': 'Failed to list all names'}, status_code=res.get('http_status', 502)) return self._reply_json(res)
[ "def", "GET_names", "(", "self", ",", "path_info", ")", ":", "include_expired", "=", "False", "qs_values", "=", "path_info", "[", "'qs_values'", "]", "page", "=", "qs_values", ".", "get", "(", "'page'", ",", "None", ")", "if", "page", "is", "None", ":", "log", ".", "error", "(", "\"Page required\"", ")", "return", "self", ".", "_reply_json", "(", "{", "'error'", ":", "'page= argument required'", "}", ",", "status_code", "=", "400", ")", "try", ":", "page", "=", "int", "(", "page", ")", "if", "page", "<", "0", ":", "raise", "ValueError", "(", "\"Page is negative\"", ")", "except", "ValueError", ":", "log", ".", "error", "(", "\"Invalid page\"", ")", "return", "self", ".", "_reply_json", "(", "{", "'error'", ":", "'Invalid page= value'", "}", ",", "status_code", "=", "400", ")", "if", "qs_values", ".", "get", "(", "'all'", ",", "''", ")", ".", "lower", "(", ")", "in", "[", "'1'", ",", "'true'", "]", ":", "include_expired", "=", "True", "offset", "=", "page", "*", "100", "count", "=", "100", "blockstackd_url", "=", "get_blockstackd_url", "(", ")", "res", "=", "blockstackd_client", ".", "get_all_names", "(", "offset", ",", "count", ",", "include_expired", "=", "include_expired", ",", "hostport", "=", "blockstackd_url", ")", "if", "json_is_error", "(", "res", ")", ":", "log", ".", "error", "(", "\"Failed to list all names (offset={}, count={}): {}\"", ".", "format", "(", "offset", ",", "count", ",", "res", "[", "'error'", "]", ")", ")", "return", "self", ".", "_reply_json", "(", "{", "'error'", ":", "'Failed to list all names'", "}", ",", "status_code", "=", "res", ".", "get", "(", "'http_status'", ",", "502", ")", ")", "return", "self", ".", "_reply_json", "(", "res", ")" ]
Get all names in existence If `all=true` is set, then include expired names. Returns the list on success Returns 400 on invalid arguments Returns 502 on failure to get names
[ "Get", "all", "names", "in", "existence", "If", "all", "=", "true", "is", "set", "then", "include", "expired", "names", ".", "Returns", "the", "list", "on", "success", "Returns", "400", "on", "invalid", "arguments", "Returns", "502", "on", "failure", "to", "get", "names" ]
python
train
35.794872
bcbio/bcbio-nextgen
bcbio/heterogeneity/bubbletree.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L380-L386
def _has_population_germline(rec): """Check if header defines population annotated germline samples for tumor only. """ for k in population_keys: if k in rec.header.info: return True return False
[ "def", "_has_population_germline", "(", "rec", ")", ":", "for", "k", "in", "population_keys", ":", "if", "k", "in", "rec", ".", "header", ".", "info", ":", "return", "True", "return", "False" ]
Check if header defines population annotated germline samples for tumor only.
[ "Check", "if", "header", "defines", "population", "annotated", "germline", "samples", "for", "tumor", "only", "." ]
python
train
32.142857
numenta/htmresearch
projects/nik/hello_ik.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/nik/hello_ik.py#L36-L44
def printSegmentForCell(tm, cell): """Print segment information for this cell""" print "Segments for cell", cell, ":" for seg in tm.basalConnections._cells[cell]._segments: print " ", synapses = seg._synapses for s in synapses: print "%d:%g" %(s.presynapticCell,s.permanence), print
[ "def", "printSegmentForCell", "(", "tm", ",", "cell", ")", ":", "print", "\"Segments for cell\"", ",", "cell", ",", "\":\"", "for", "seg", "in", "tm", ".", "basalConnections", ".", "_cells", "[", "cell", "]", ".", "_segments", ":", "print", "\" \"", ",", "synapses", "=", "seg", ".", "_synapses", "for", "s", "in", "synapses", ":", "print", "\"%d:%g\"", "%", "(", "s", ".", "presynapticCell", ",", "s", ".", "permanence", ")", ",", "print" ]
Print segment information for this cell
[ "Print", "segment", "information", "for", "this", "cell" ]
python
train
33.888889
ergoithz/browsepy
browsepy/stream.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/stream.py#L98-L133
def read(self, want=0): ''' Read method, gets data from internal buffer while releasing :meth:`write` locks when needed. The lock usage means it must ran on a different thread than :meth:`fill`, ie. the main thread, otherwise will deadlock. The combination of both write and this method running on different threads makes tarfile being streamed on-the-fly, with data chunks being processed and retrieved on demand. :param want: number bytes to read, defaults to 0 (all available) :type want: int :returns: tarfile data as bytes :rtype: bytes ''' if self._finished: if self._finished == 1: self._finished += 1 return "" return EOFError("EOF reached") # Thread communication self._want = want self._add.set() self._result.wait() self._result.clear() if want: data = self._data[:want] self._data = self._data[want:] else: data = self._data self._data = bytes() return data
[ "def", "read", "(", "self", ",", "want", "=", "0", ")", ":", "if", "self", ".", "_finished", ":", "if", "self", ".", "_finished", "==", "1", ":", "self", ".", "_finished", "+=", "1", "return", "\"\"", "return", "EOFError", "(", "\"EOF reached\"", ")", "# Thread communication", "self", ".", "_want", "=", "want", "self", ".", "_add", ".", "set", "(", ")", "self", ".", "_result", ".", "wait", "(", ")", "self", ".", "_result", ".", "clear", "(", ")", "if", "want", ":", "data", "=", "self", ".", "_data", "[", ":", "want", "]", "self", ".", "_data", "=", "self", ".", "_data", "[", "want", ":", "]", "else", ":", "data", "=", "self", ".", "_data", "self", ".", "_data", "=", "bytes", "(", ")", "return", "data" ]
Read method, gets data from internal buffer while releasing :meth:`write` locks when needed. The lock usage means it must ran on a different thread than :meth:`fill`, ie. the main thread, otherwise will deadlock. The combination of both write and this method running on different threads makes tarfile being streamed on-the-fly, with data chunks being processed and retrieved on demand. :param want: number bytes to read, defaults to 0 (all available) :type want: int :returns: tarfile data as bytes :rtype: bytes
[ "Read", "method", "gets", "data", "from", "internal", "buffer", "while", "releasing", ":", "meth", ":", "write", "locks", "when", "needed", "." ]
python
train
31.083333
napalm-automation/napalm-logs
napalm_logs/serializer/__init__.py
https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/serializer/__init__.py#L34-L44
def get_serializer(name): ''' Return the serialize function. ''' try: log.debug('Using %s as serializer', name) return SERIALIZER_LOOKUP[name] except KeyError: msg = 'Serializer {} is not available'.format(name) log.error(msg, exc_info=True) raise InvalidSerializerException(msg)
[ "def", "get_serializer", "(", "name", ")", ":", "try", ":", "log", ".", "debug", "(", "'Using %s as serializer'", ",", "name", ")", "return", "SERIALIZER_LOOKUP", "[", "name", "]", "except", "KeyError", ":", "msg", "=", "'Serializer {} is not available'", ".", "format", "(", "name", ")", "log", ".", "error", "(", "msg", ",", "exc_info", "=", "True", ")", "raise", "InvalidSerializerException", "(", "msg", ")" ]
Return the serialize function.
[ "Return", "the", "serialize", "function", "." ]
python
train
29.909091
icometrix/dicom2nifti
scripts/shrink_singleframe.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/scripts/shrink_singleframe.py#L16-L72
def _shrink_file(dicom_file_in, subsample_factor): """ Anonimize a single dicomfile :param dicom_file_in: filepath for input file :param dicom_file_out: filepath for output file :param fields_to_keep: dicom tags to keep """ # Default meta_fields # Required fields according to reference dicom_file_out = dicom_file_in # Load dicom_file_in dicom_in = compressed_dicom.read_file(dicom_file_in) # Create new dicom file # Set new file meta information file_meta = pydicom.dataset.Dataset() for key, value in dicom_in.file_meta.items(): file_meta.add(value) # Create the FileDataset instance (initially no data elements, but file_meta supplied) dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\0' * 128) # Copy transfer syntax dicom_out.is_little_endian = dicom_in.is_little_endian dicom_out.is_implicit_VR = dicom_in.is_implicit_VR rows = 0 columns = 0 # Add the data elements for field_key, field_value in dicom_in.items(): logging.info(field_key) if field_key == (0x7fe0, 0x0010): pixel_array = dicom_in.pixel_array[::subsample_factor, ::subsample_factor] dicom_out.PixelData = pixel_array.tostring() # = byte array (see pydicom docs) rows = pixel_array.shape[1] columns = pixel_array.shape[0] # noinspection PyPep8Naming dicom_out[0x7fe0, 0x0010].VR = 'OB' else: dicom_out.add(field_value) dicom_out.PixelSpacing[0] *= subsample_factor dicom_out.PixelSpacing[1] *= subsample_factor dicom_out.Rows = rows dicom_out.Columns = columns # Save dicom_file_out # Make sure we have a directory if not os.path.exists(os.path.dirname(dicom_file_out)): logging.info('Decompressing files') # Save the file dicom_out.save_as(dicom_file_out, write_like_original=False)
[ "def", "_shrink_file", "(", "dicom_file_in", ",", "subsample_factor", ")", ":", "# Default meta_fields", "# Required fields according to reference", "dicom_file_out", "=", "dicom_file_in", "# Load dicom_file_in", "dicom_in", "=", "compressed_dicom", ".", "read_file", "(", "dicom_file_in", ")", "# Create new dicom file", "# Set new file meta information", "file_meta", "=", "pydicom", ".", "dataset", ".", "Dataset", "(", ")", "for", "key", ",", "value", "in", "dicom_in", ".", "file_meta", ".", "items", "(", ")", ":", "file_meta", ".", "add", "(", "value", ")", "# Create the FileDataset instance (initially no data elements, but file_meta supplied)", "dicom_out", "=", "pydicom", ".", "dataset", ".", "FileDataset", "(", "dicom_file_out", ",", "{", "}", ",", "file_meta", "=", "file_meta", ",", "preamble", "=", "b'\\0'", "*", "128", ")", "# Copy transfer syntax", "dicom_out", ".", "is_little_endian", "=", "dicom_in", ".", "is_little_endian", "dicom_out", ".", "is_implicit_VR", "=", "dicom_in", ".", "is_implicit_VR", "rows", "=", "0", "columns", "=", "0", "# Add the data elements", "for", "field_key", ",", "field_value", "in", "dicom_in", ".", "items", "(", ")", ":", "logging", ".", "info", "(", "field_key", ")", "if", "field_key", "==", "(", "0x7fe0", ",", "0x0010", ")", ":", "pixel_array", "=", "dicom_in", ".", "pixel_array", "[", ":", ":", "subsample_factor", ",", ":", ":", "subsample_factor", "]", "dicom_out", ".", "PixelData", "=", "pixel_array", ".", "tostring", "(", ")", "# = byte array (see pydicom docs)", "rows", "=", "pixel_array", ".", "shape", "[", "1", "]", "columns", "=", "pixel_array", ".", "shape", "[", "0", "]", "# noinspection PyPep8Naming", "dicom_out", "[", "0x7fe0", ",", "0x0010", "]", ".", "VR", "=", "'OB'", "else", ":", "dicom_out", ".", "add", "(", "field_value", ")", "dicom_out", ".", "PixelSpacing", "[", "0", "]", "*=", "subsample_factor", "dicom_out", ".", "PixelSpacing", "[", "1", "]", "*=", "subsample_factor", "dicom_out", ".", "Rows", "=", "rows", "dicom_out", ".", "Columns", "=", "columns", "# Save dicom_file_out", "# Make sure we have a directory", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "dicom_file_out", ")", ")", ":", "logging", ".", "info", "(", "'Decompressing files'", ")", "# Save the file", "dicom_out", ".", "save_as", "(", "dicom_file_out", ",", "write_like_original", "=", "False", ")" ]
Anonimize a single dicomfile :param dicom_file_in: filepath for input file :param dicom_file_out: filepath for output file :param fields_to_keep: dicom tags to keep
[ "Anonimize", "a", "single", "dicomfile", ":", "param", "dicom_file_in", ":", "filepath", "for", "input", "file", ":", "param", "dicom_file_out", ":", "filepath", "for", "output", "file", ":", "param", "fields_to_keep", ":", "dicom", "tags", "to", "keep" ]
python
train
33.561404
jespino/anillo
anillo/handlers/routing.py
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L175-L184
def _build_urlmapping(urls, strict_slashes=False, **kwargs): """Convers the anillo urlmappings list into werkzeug Map instance. :return: a werkzeug Map instance :rtype: Map """ rules = _build_rules(urls) return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs)
[ "def", "_build_urlmapping", "(", "urls", ",", "strict_slashes", "=", "False", ",", "*", "*", "kwargs", ")", ":", "rules", "=", "_build_rules", "(", "urls", ")", "return", "Map", "(", "rules", "=", "list", "(", "rules", ")", ",", "strict_slashes", "=", "strict_slashes", ",", "*", "*", "kwargs", ")" ]
Convers the anillo urlmappings list into werkzeug Map instance. :return: a werkzeug Map instance :rtype: Map
[ "Convers", "the", "anillo", "urlmappings", "list", "into", "werkzeug", "Map", "instance", "." ]
python
train
29.5
gmr/tinman
tinman/serializers.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/serializers.py#L39-L48
def _deserialize_datetime(self, data): """Take any values coming in as a datetime and deserialize them """ for key in data: if isinstance(data[key], dict): if data[key].get('type') == 'datetime': data[key] = \ datetime.datetime.fromtimestamp(data[key]['value']) return data
[ "def", "_deserialize_datetime", "(", "self", ",", "data", ")", ":", "for", "key", "in", "data", ":", "if", "isinstance", "(", "data", "[", "key", "]", ",", "dict", ")", ":", "if", "data", "[", "key", "]", ".", "get", "(", "'type'", ")", "==", "'datetime'", ":", "data", "[", "key", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "data", "[", "key", "]", "[", "'value'", "]", ")", "return", "data" ]
Take any values coming in as a datetime and deserialize them
[ "Take", "any", "values", "coming", "in", "as", "a", "datetime", "and", "deserialize", "them" ]
python
train
36.9
hvac/hvac
hvac/api/auth_methods/github.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/auth_methods/github.py#L191-L216
def login(self, token, use_token=True, mount_point=DEFAULT_MOUNT_POINT): """Login using GitHub access token. Supported methods: POST: /auth/{mount_point}/login. Produces: 200 application/json :param token: GitHub personal API token. :type token: str | unicode :param use_token: if True, uses the token in the response received from the auth request to set the "token" attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute. :type use_token: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the login request. :rtype: dict """ params = { 'token': token, } api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point) return self._adapter.login( url=api_path, use_token=use_token, json=params, )
[ "def", "login", "(", "self", ",", "token", ",", "use_token", "=", "True", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'token'", ":", "token", ",", "}", "api_path", "=", "'/v1/auth/{mount_point}/login'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "login", "(", "url", "=", "api_path", ",", "use_token", "=", "use_token", ",", "json", "=", "params", ",", ")" ]
Login using GitHub access token. Supported methods: POST: /auth/{mount_point}/login. Produces: 200 application/json :param token: GitHub personal API token. :type token: str | unicode :param use_token: if True, uses the token in the response received from the auth request to set the "token" attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute. :type use_token: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the login request. :rtype: dict
[ "Login", "using", "GitHub", "access", "token", "." ]
python
train
39
trevisanj/f311
f311/explorer/gui/a_XFileMainWindow.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/explorer/gui/a_XFileMainWindow.py#L329-L334
def _on_changed(self): """Slot for changed events""" page = self._get_page() if not page.flag_autosave: page.flag_changed = True self._update_gui_text_tabs()
[ "def", "_on_changed", "(", "self", ")", ":", "page", "=", "self", ".", "_get_page", "(", ")", "if", "not", "page", ".", "flag_autosave", ":", "page", ".", "flag_changed", "=", "True", "self", ".", "_update_gui_text_tabs", "(", ")" ]
Slot for changed events
[ "Slot", "for", "changed", "events" ]
python
train
34.166667
jtwhite79/pyemu
pyemu/utils/smp_utils.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/smp_utils.py#L132-L153
def date_parser(items): """ datetime parser to help load smp files Parameters ---------- items : iterable something or somethings to try to parse into datetimes Returns ------- dt : iterable the cast datetime things """ try: dt = datetime.strptime(items,"%d/%m/%Y %H:%M:%S") except Exception as e: try: dt = datetime.strptime(items,"%m/%d/%Y %H:%M:%S") except Exception as ee: raise Exception("error parsing datetime string" +\ " {0}: \n{1}\n{2}".format(str(items),str(e),str(ee))) return dt
[ "def", "date_parser", "(", "items", ")", ":", "try", ":", "dt", "=", "datetime", ".", "strptime", "(", "items", ",", "\"%d/%m/%Y %H:%M:%S\"", ")", "except", "Exception", "as", "e", ":", "try", ":", "dt", "=", "datetime", ".", "strptime", "(", "items", ",", "\"%m/%d/%Y %H:%M:%S\"", ")", "except", "Exception", "as", "ee", ":", "raise", "Exception", "(", "\"error parsing datetime string\"", "+", "\" {0}: \\n{1}\\n{2}\"", ".", "format", "(", "str", "(", "items", ")", ",", "str", "(", "e", ")", ",", "str", "(", "ee", ")", ")", ")", "return", "dt" ]
datetime parser to help load smp files Parameters ---------- items : iterable something or somethings to try to parse into datetimes Returns ------- dt : iterable the cast datetime things
[ "datetime", "parser", "to", "help", "load", "smp", "files" ]
python
train
27.636364
nuagenetworks/bambou
bambou/nurest_fetcher.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_fetcher.py#L481-L495
def _send_content(self, content, connection): """ Send a content array from the connection """ if connection: if connection.async: callback = connection.callbacks['remote'] if callback: callback(self, self.parent_object, content) self.current_connection.reset() self.current_connection = None else: return (self, self.parent_object, content)
[ "def", "_send_content", "(", "self", ",", "content", ",", "connection", ")", ":", "if", "connection", ":", "if", "connection", ".", "async", ":", "callback", "=", "connection", ".", "callbacks", "[", "'remote'", "]", "if", "callback", ":", "callback", "(", "self", ",", "self", ".", "parent_object", ",", "content", ")", "self", ".", "current_connection", ".", "reset", "(", ")", "self", ".", "current_connection", "=", "None", "else", ":", "return", "(", "self", ",", "self", ".", "parent_object", ",", "content", ")" ]
Send a content array from the connection
[ "Send", "a", "content", "array", "from", "the", "connection" ]
python
train
31.933333
calmjs/calmjs.parse
src/calmjs/parse/parsers/es5.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L490-L498
def p_new_expr(self, p): """new_expr : member_expr | NEW new_expr """ if len(p) == 2: p[0] = p[1] else: p[0] = self.asttypes.NewExpr(p[2]) p[0].setpos(p)
[ "def", "p_new_expr", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "else", ":", "p", "[", "0", "]", "=", "self", ".", "asttypes", ".", "NewExpr", "(", "p", "[", "2", "]", ")", "p", "[", "0", "]", ".", "setpos", "(", "p", ")" ]
new_expr : member_expr | NEW new_expr
[ "new_expr", ":", "member_expr", "|", "NEW", "new_expr" ]
python
train
25.888889
tensorflow/datasets
tensorflow_datasets/core/utils/py_utils.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L122-L143
def map_nested(function, data_struct, dict_only=False, map_tuple=False): """Apply a function recursively to each element of a nested data struct.""" # Could add support for more exotic data_struct, like OrderedDict if isinstance(data_struct, dict): return { k: map_nested(function, v, dict_only, map_tuple) for k, v in data_struct.items() } elif not dict_only: types = [list] if map_tuple: types.append(tuple) if isinstance(data_struct, tuple(types)): mapped = [map_nested(function, v, dict_only, map_tuple) for v in data_struct] if isinstance(data_struct, list): return mapped else: return tuple(mapped) # Singleton return function(data_struct)
[ "def", "map_nested", "(", "function", ",", "data_struct", ",", "dict_only", "=", "False", ",", "map_tuple", "=", "False", ")", ":", "# Could add support for more exotic data_struct, like OrderedDict", "if", "isinstance", "(", "data_struct", ",", "dict", ")", ":", "return", "{", "k", ":", "map_nested", "(", "function", ",", "v", ",", "dict_only", ",", "map_tuple", ")", "for", "k", ",", "v", "in", "data_struct", ".", "items", "(", ")", "}", "elif", "not", "dict_only", ":", "types", "=", "[", "list", "]", "if", "map_tuple", ":", "types", ".", "append", "(", "tuple", ")", "if", "isinstance", "(", "data_struct", ",", "tuple", "(", "types", ")", ")", ":", "mapped", "=", "[", "map_nested", "(", "function", ",", "v", ",", "dict_only", ",", "map_tuple", ")", "for", "v", "in", "data_struct", "]", "if", "isinstance", "(", "data_struct", ",", "list", ")", ":", "return", "mapped", "else", ":", "return", "tuple", "(", "mapped", ")", "# Singleton", "return", "function", "(", "data_struct", ")" ]
Apply a function recursively to each element of a nested data struct.
[ "Apply", "a", "function", "recursively", "to", "each", "element", "of", "a", "nested", "data", "struct", "." ]
python
train
33.136364
cuihantao/andes
andes/system.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/system.py#L187-L211
def setup(self): """ Set up the power system object by executing the following workflow: * Sort the loaded models to meet the initialization sequence * Create call strings for routines * Call the ``setup`` function of the loaded models * Assign addresses for the loaded models * Call ``dae.setup`` to assign memory for the numerical dae structure * Convert model parameters to the system base Returns ------- PowerSystem The instance of the PowerSystem """ self.devman.sort_device() self.call.setup() self.model_setup() self.xy_addr0() self.dae.setup() self.to_sysbase() return self
[ "def", "setup", "(", "self", ")", ":", "self", ".", "devman", ".", "sort_device", "(", ")", "self", ".", "call", ".", "setup", "(", ")", "self", ".", "model_setup", "(", ")", "self", ".", "xy_addr0", "(", ")", "self", ".", "dae", ".", "setup", "(", ")", "self", ".", "to_sysbase", "(", ")", "return", "self" ]
Set up the power system object by executing the following workflow: * Sort the loaded models to meet the initialization sequence * Create call strings for routines * Call the ``setup`` function of the loaded models * Assign addresses for the loaded models * Call ``dae.setup`` to assign memory for the numerical dae structure * Convert model parameters to the system base Returns ------- PowerSystem The instance of the PowerSystem
[ "Set", "up", "the", "power", "system", "object", "by", "executing", "the", "following", "workflow", ":" ]
python
train
29.28
ewels/MultiQC
multiqc/modules/rseqc/infer_experiment.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/rseqc/infer_experiment.py#L16-L89
def parse_reports(self): """ Find RSeQC infer_experiment reports and parse their data """ # Set up vars self.infer_exp = dict() regexes = { 'pe_sense': r"\"1\+\+,1--,2\+-,2-\+\": (\d\.\d+)", 'pe_antisense': r"\"1\+-,1-\+,2\+\+,2--\": (\d\.\d+)", 'se_sense': r"\"\+\+,--\": (\d\.\d+)", 'se_antisense': r"\+-,-\+\": (\d\.\d+)", 'failed': r"Fraction of reads failed to determine: (\d\.\d+)" } # Go through files and parse data using regexes for f in self.find_log_files('rseqc/infer_experiment'): d = dict() for k, r in regexes.items(): r_search = re.search(r, f['f'], re.MULTILINE) if r_search: d[k] = float(r_search.group(1)) if len(d) > 0: if f['s_name'] in self.infer_exp: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='infer_experiment') self.infer_exp[f['s_name']] = d # Filter to strip out ignored sample names self.infer_exp = self.ignore_samples(self.infer_exp) if len(self.infer_exp) > 0: # Write to file self.write_data_file(self.infer_exp, 'multiqc_rseqc_infer_experiment') # Merge PE and SE for plot pdata = dict() for s_name, vals in self.infer_exp.items(): pdata[s_name] = dict() for k, v in vals.items(): v *= 100.0 # Multiply to get percentage if k[:2] == 'pe' or k[:2] == 'se': k = k[3:] pdata[s_name][k] = v + pdata[s_name].get(k, 0) # Plot bar graph of groups keys = OrderedDict() keys['sense'] = {'name': "Sense"} keys['antisense'] = {'name': "Antisense"} keys['failed'] = {'name': "Undetermined"} # Config for the plot pconfig = { 'id': 'rseqc_infer_experiment_plot', 'title': 'RSeQC: Infer experiment', 'ylab': '% Tags', 'ymin': 0, 'ymax': 100, 'tt_percentages': False, 'ylab_format': '{value}%', 'cpswitch': False } self.add_section ( name = 'Infer experiment', anchor = 'rseqc-infer_experiment', description = '<a href="http://rseqc.sourceforge.net/#infer-experiment-py" target="_blank">Infer experiment</a>' \ " counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts." \ " It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).", plot = bargraph.plot(pdata, keys, pconfig) ) # Return number of samples found return len(self.infer_exp)
[ "def", "parse_reports", "(", "self", ")", ":", "# Set up vars", "self", ".", "infer_exp", "=", "dict", "(", ")", "regexes", "=", "{", "'pe_sense'", ":", "r\"\\\"1\\+\\+,1--,2\\+-,2-\\+\\\": (\\d\\.\\d+)\"", ",", "'pe_antisense'", ":", "r\"\\\"1\\+-,1-\\+,2\\+\\+,2--\\\": (\\d\\.\\d+)\"", ",", "'se_sense'", ":", "r\"\\\"\\+\\+,--\\\": (\\d\\.\\d+)\"", ",", "'se_antisense'", ":", "r\"\\+-,-\\+\\\": (\\d\\.\\d+)\"", ",", "'failed'", ":", "r\"Fraction of reads failed to determine: (\\d\\.\\d+)\"", "}", "# Go through files and parse data using regexes", "for", "f", "in", "self", ".", "find_log_files", "(", "'rseqc/infer_experiment'", ")", ":", "d", "=", "dict", "(", ")", "for", "k", ",", "r", "in", "regexes", ".", "items", "(", ")", ":", "r_search", "=", "re", ".", "search", "(", "r", ",", "f", "[", "'f'", "]", ",", "re", ".", "MULTILINE", ")", "if", "r_search", ":", "d", "[", "k", "]", "=", "float", "(", "r_search", ".", "group", "(", "1", ")", ")", "if", "len", "(", "d", ")", ">", "0", ":", "if", "f", "[", "'s_name'", "]", "in", "self", ".", "infer_exp", ":", "log", ".", "debug", "(", "\"Duplicate sample name found! Overwriting: {}\"", ".", "format", "(", "f", "[", "'s_name'", "]", ")", ")", "self", ".", "add_data_source", "(", "f", ",", "section", "=", "'infer_experiment'", ")", "self", ".", "infer_exp", "[", "f", "[", "'s_name'", "]", "]", "=", "d", "# Filter to strip out ignored sample names", "self", ".", "infer_exp", "=", "self", ".", "ignore_samples", "(", "self", ".", "infer_exp", ")", "if", "len", "(", "self", ".", "infer_exp", ")", ">", "0", ":", "# Write to file", "self", ".", "write_data_file", "(", "self", ".", "infer_exp", ",", "'multiqc_rseqc_infer_experiment'", ")", "# Merge PE and SE for plot", "pdata", "=", "dict", "(", ")", "for", "s_name", ",", "vals", "in", "self", ".", "infer_exp", ".", "items", "(", ")", ":", "pdata", "[", "s_name", "]", "=", "dict", "(", ")", "for", "k", ",", "v", "in", "vals", ".", "items", "(", ")", ":", "v", "*=", "100.0", "# Multiply to get percentage", "if", "k", "[", ":", "2", "]", "==", "'pe'", "or", "k", "[", ":", "2", "]", "==", "'se'", ":", "k", "=", "k", "[", "3", ":", "]", "pdata", "[", "s_name", "]", "[", "k", "]", "=", "v", "+", "pdata", "[", "s_name", "]", ".", "get", "(", "k", ",", "0", ")", "# Plot bar graph of groups", "keys", "=", "OrderedDict", "(", ")", "keys", "[", "'sense'", "]", "=", "{", "'name'", ":", "\"Sense\"", "}", "keys", "[", "'antisense'", "]", "=", "{", "'name'", ":", "\"Antisense\"", "}", "keys", "[", "'failed'", "]", "=", "{", "'name'", ":", "\"Undetermined\"", "}", "# Config for the plot", "pconfig", "=", "{", "'id'", ":", "'rseqc_infer_experiment_plot'", ",", "'title'", ":", "'RSeQC: Infer experiment'", ",", "'ylab'", ":", "'% Tags'", ",", "'ymin'", ":", "0", ",", "'ymax'", ":", "100", ",", "'tt_percentages'", ":", "False", ",", "'ylab_format'", ":", "'{value}%'", ",", "'cpswitch'", ":", "False", "}", "self", ".", "add_section", "(", "name", "=", "'Infer experiment'", ",", "anchor", "=", "'rseqc-infer_experiment'", ",", "description", "=", "'<a href=\"http://rseqc.sourceforge.net/#infer-experiment-py\" target=\"_blank\">Infer experiment</a>'", "\" counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts.\"", "\" It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).\"", ",", "plot", "=", "bargraph", ".", "plot", "(", "pdata", ",", "keys", ",", "pconfig", ")", ")", "# Return number of samples found", "return", "len", "(", "self", ".", "infer_exp", ")" ]
Find RSeQC infer_experiment reports and parse their data
[ "Find", "RSeQC", "infer_experiment", "reports", "and", "parse", "their", "data" ]
python
train
36.905405
IdentityPython/pysaml2
src/saml2/sigver.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/sigver.py#L436-L446
def cert_from_instance(instance): """ Find certificates that are part of an instance :param instance: An instance :return: possible empty list of certificates """ if instance.signature: if instance.signature.key_info: return cert_from_key_info(instance.signature.key_info, ignore_age=True) return []
[ "def", "cert_from_instance", "(", "instance", ")", ":", "if", "instance", ".", "signature", ":", "if", "instance", ".", "signature", ".", "key_info", ":", "return", "cert_from_key_info", "(", "instance", ".", "signature", ".", "key_info", ",", "ignore_age", "=", "True", ")", "return", "[", "]" ]
Find certificates that are part of an instance :param instance: An instance :return: possible empty list of certificates
[ "Find", "certificates", "that", "are", "part", "of", "an", "instance" ]
python
train
33.818182
tensorflow/datasets
tensorflow_datasets/image/sun.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/sun.py#L65-L102
def _decode_image(fobj, session, filename): """Reads and decodes an image from a file object as a Numpy array. The SUN dataset contains images in several formats (despite the fact that all of them have .jpg extension). Some of them are: - BMP (RGB) - PNG (grayscale, RGBA, RGB interlaced) - JPEG (RGB) - GIF (1-frame RGB) Since TFDS assumes that all images have the same number of channels, we convert all of them to RGB. Args: fobj: File object to read from. session: TF session used to decode the images. filename: Filename of the original image in the archive. Returns: Numpy array with shape (height, width, channels). """ buf = fobj.read() image = tfds.core.lazy_imports.cv2.imdecode( np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB. if image is None: logging.warning( "Image %s could not be decoded by OpenCV, falling back to TF", filename) try: image = tf.image.decode_image(buf, channels=3) image = session.run(image) except tf.errors.InvalidArgumentError: logging.fatal("Image %s could not be decoded by Tensorflow", filename) # The GIF images contain a single frame. if len(image.shape) == 4: # rank=4 -> rank=3 image = image.reshape(image.shape[1:]) return image
[ "def", "_decode_image", "(", "fobj", ",", "session", ",", "filename", ")", ":", "buf", "=", "fobj", ".", "read", "(", ")", "image", "=", "tfds", ".", "core", ".", "lazy_imports", ".", "cv2", ".", "imdecode", "(", "np", ".", "fromstring", "(", "buf", ",", "dtype", "=", "np", ".", "uint8", ")", ",", "flags", "=", "3", ")", "# Note: Converts to RGB.", "if", "image", "is", "None", ":", "logging", ".", "warning", "(", "\"Image %s could not be decoded by OpenCV, falling back to TF\"", ",", "filename", ")", "try", ":", "image", "=", "tf", ".", "image", ".", "decode_image", "(", "buf", ",", "channels", "=", "3", ")", "image", "=", "session", ".", "run", "(", "image", ")", "except", "tf", ".", "errors", ".", "InvalidArgumentError", ":", "logging", ".", "fatal", "(", "\"Image %s could not be decoded by Tensorflow\"", ",", "filename", ")", "# The GIF images contain a single frame.", "if", "len", "(", "image", ".", "shape", ")", "==", "4", ":", "# rank=4 -> rank=3", "image", "=", "image", ".", "reshape", "(", "image", ".", "shape", "[", "1", ":", "]", ")", "return", "image" ]
Reads and decodes an image from a file object as a Numpy array. The SUN dataset contains images in several formats (despite the fact that all of them have .jpg extension). Some of them are: - BMP (RGB) - PNG (grayscale, RGBA, RGB interlaced) - JPEG (RGB) - GIF (1-frame RGB) Since TFDS assumes that all images have the same number of channels, we convert all of them to RGB. Args: fobj: File object to read from. session: TF session used to decode the images. filename: Filename of the original image in the archive. Returns: Numpy array with shape (height, width, channels).
[ "Reads", "and", "decodes", "an", "image", "from", "a", "file", "object", "as", "a", "Numpy", "array", "." ]
python
train
33.5
HewlettPackard/python-hpOneView
hpOneView/oneview_client.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L398-L407
def scopes(self): """ Gets the Scopes API client. Returns: Scopes: """ if not self.__scopes: self.__scopes = Scopes(self.__connection) return self.__scopes
[ "def", "scopes", "(", "self", ")", ":", "if", "not", "self", ".", "__scopes", ":", "self", ".", "__scopes", "=", "Scopes", "(", "self", ".", "__connection", ")", "return", "self", ".", "__scopes" ]
Gets the Scopes API client. Returns: Scopes:
[ "Gets", "the", "Scopes", "API", "client", "." ]
python
train
21.9
awslabs/serverless-application-model
samtranslator/sdk/parameter.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/sdk/parameter.py#L19-L59
def add_default_parameter_values(self, sam_template): """ Method to read default values for template parameters and merge with user supplied values. Example: If the template contains the following parameters defined Parameters: Param1: Type: String Default: default_value Param2: Type: String Default: default_value And, the user explicitly provided the following parameter values: { Param2: "new value" } then, this method will grab default value for Param1 and return the following result: { Param1: "default_value", Param2: "new value" } :param dict sam_template: SAM template :param dict parameter_values: Dictionary of parameter values provided by the user :return dict: Merged parameter values """ parameter_definition = sam_template.get("Parameters", None) if not parameter_definition or not isinstance(parameter_definition, dict): return self.parameter_values for param_name, value in parameter_definition.items(): if param_name not in self.parameter_values and isinstance(value, dict) and "Default" in value: self.parameter_values[param_name] = value["Default"]
[ "def", "add_default_parameter_values", "(", "self", ",", "sam_template", ")", ":", "parameter_definition", "=", "sam_template", ".", "get", "(", "\"Parameters\"", ",", "None", ")", "if", "not", "parameter_definition", "or", "not", "isinstance", "(", "parameter_definition", ",", "dict", ")", ":", "return", "self", ".", "parameter_values", "for", "param_name", ",", "value", "in", "parameter_definition", ".", "items", "(", ")", ":", "if", "param_name", "not", "in", "self", ".", "parameter_values", "and", "isinstance", "(", "value", ",", "dict", ")", "and", "\"Default\"", "in", "value", ":", "self", ".", "parameter_values", "[", "param_name", "]", "=", "value", "[", "\"Default\"", "]" ]
Method to read default values for template parameters and merge with user supplied values. Example: If the template contains the following parameters defined Parameters: Param1: Type: String Default: default_value Param2: Type: String Default: default_value And, the user explicitly provided the following parameter values: { Param2: "new value" } then, this method will grab default value for Param1 and return the following result: { Param1: "default_value", Param2: "new value" } :param dict sam_template: SAM template :param dict parameter_values: Dictionary of parameter values provided by the user :return dict: Merged parameter values
[ "Method", "to", "read", "default", "values", "for", "template", "parameters", "and", "merge", "with", "user", "supplied", "values", "." ]
python
train
32.878049