repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
expfactory/expfactory
expfactory/database/relational.py
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/database/relational.py#L177-L213
def save_data(self,session, exp_id, content): '''save data will obtain the current subid from the session, and save it depending on the database type. Currently we just support flat files''' from expfactory.database.models import ( Participant, Result ) subid = session.get('subid') token = session.get('token') self.logger.info('Saving data for subid %s' % subid) # We only attempt save if there is a subject id, set at start if subid is not None: p = Participant.query.filter(Participant.id == subid).first() # better query here # Does if self.headless and p.token != token: self.logger.warning('%s attempting to use mismatched token [%s] skipping save' %(p.id, token)) elif self.headless and p.token.endswith(('finished','revoked')): self.logger.warning('%s attempting to use expired token [%s] skipping save' %(p.id, token)) else: # Preference is to save data under 'data', otherwise do all of it if "data" in content: content = content['data'] result = Result(data=content, exp_id=exp_id, participant_id=p.id) # check if changes from str/int # Create and save the result self.session.add(result) p.results.append(result) self.session.commit() self.logger.info("Save [participant] %s [result] %s" %(p, result))
[ "def", "save_data", "(", "self", ",", "session", ",", "exp_id", ",", "content", ")", ":", "from", "expfactory", ".", "database", ".", "models", "import", "(", "Participant", ",", "Result", ")", "subid", "=", "session", ".", "get", "(", "'subid'", ")", "token", "=", "session", ".", "get", "(", "'token'", ")", "self", ".", "logger", ".", "info", "(", "'Saving data for subid %s'", "%", "subid", ")", "# We only attempt save if there is a subject id, set at start", "if", "subid", "is", "not", "None", ":", "p", "=", "Participant", ".", "query", ".", "filter", "(", "Participant", ".", "id", "==", "subid", ")", ".", "first", "(", ")", "# better query here", "# Does ", "if", "self", ".", "headless", "and", "p", ".", "token", "!=", "token", ":", "self", ".", "logger", ".", "warning", "(", "'%s attempting to use mismatched token [%s] skipping save'", "%", "(", "p", ".", "id", ",", "token", ")", ")", "elif", "self", ".", "headless", "and", "p", ".", "token", ".", "endswith", "(", "(", "'finished'", ",", "'revoked'", ")", ")", ":", "self", ".", "logger", ".", "warning", "(", "'%s attempting to use expired token [%s] skipping save'", "%", "(", "p", ".", "id", ",", "token", ")", ")", "else", ":", "# Preference is to save data under 'data', otherwise do all of it", "if", "\"data\"", "in", "content", ":", "content", "=", "content", "[", "'data'", "]", "result", "=", "Result", "(", "data", "=", "content", ",", "exp_id", "=", "exp_id", ",", "participant_id", "=", "p", ".", "id", ")", "# check if changes from str/int", "# Create and save the result", "self", ".", "session", ".", "add", "(", "result", ")", "p", ".", "results", ".", "append", "(", "result", ")", "self", ".", "session", ".", "commit", "(", ")", "self", ".", "logger", ".", "info", "(", "\"Save [participant] %s [result] %s\"", "%", "(", "p", ",", "result", ")", ")" ]
save data will obtain the current subid from the session, and save it depending on the database type. Currently we just support flat files
[ "save", "data", "will", "obtain", "the", "current", "subid", "from", "the", "session", "and", "save", "it", "depending", "on", "the", "database", "type", ".", "Currently", "we", "just", "support", "flat", "files" ]
python
train
39.945946
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L43-L76
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): """Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types. """ support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
[ "def", "subset_by_supported", "(", "input_file", ",", "get_coords", ",", "calls_by_name", ",", "work_dir", ",", "data", ",", "headers", "=", "(", "\"#\"", ",", ")", ")", ":", "support_files", "=", "[", "(", "c", ",", "tz", ".", "get_in", "(", "[", "c", ",", "\"vrn_file\"", "]", ",", "calls_by_name", ")", ")", "for", "c", "in", "convert", ".", "SUBSET_BY_SUPPORT", "[", "\"cnvkit\"", "]", "]", "support_files", "=", "[", "(", "c", ",", "f", ")", "for", "(", "c", ",", "f", ")", "in", "support_files", "if", "f", "and", "vcfutils", ".", "vcf_has_variants", "(", "f", ")", "]", "if", "len", "(", "support_files", ")", "==", "0", ":", "return", "input_file", "else", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-havesupport%s\"", "%", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(", "input_file", ")", ")", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "input_file", ")", ":", "input_bed", "=", "_input_to_bed", "(", "input_file", ",", "work_dir", ",", "get_coords", ",", "headers", ")", "pass_coords", "=", "set", "(", "[", "]", ")", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "support_beds", "=", "\" \"", ".", "join", "(", "[", "_sv_vcf_to_bed", "(", "f", ",", "c", ",", "out_file", ")", "for", "c", ",", "f", "in", "support_files", "]", ")", "tmp_cmp_bed", "=", "\"%s-intersectwith.bed\"", "%", "utils", ".", "splitext_plus", "(", "tx_out_file", ")", "[", "0", "]", "cmd", "=", "\"bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}\"", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Intersect CNVs with support files\"", ")", "for", "r", "in", "pybedtools", ".", "BedTool", "(", "tmp_cmp_bed", ")", ":", "pass_coords", ".", "add", "(", "(", "str", "(", "r", ".", "chrom", ")", ",", "str", "(", "r", ".", "start", ")", ",", "str", "(", "r", ".", "stop", ")", ")", ")", "with", "open", "(", "input_file", ")", "as", "in_handle", ":", "with", "open", "(", "tx_out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "for", "line", "in", "in_handle", ":", "passes", "=", "True", "if", "not", "line", ".", "startswith", "(", "headers", ")", ":", "passes", "=", "get_coords", "(", "line", ")", "in", "pass_coords", "if", "passes", ":", "out_handle", ".", "write", "(", "line", ")", "return", "out_file" ]
Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types.
[ "Limit", "CNVkit", "input", "to", "calls", "with", "support", "from", "another", "caller", "." ]
python
train
57.735294
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/compiler.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/compiler.py#L125-L128
def add_special(self, name): """Register a special name like `loop`.""" self.undeclared.discard(name) self.declared.add(name)
[ "def", "add_special", "(", "self", ",", "name", ")", ":", "self", ".", "undeclared", ".", "discard", "(", "name", ")", "self", ".", "declared", ".", "add", "(", "name", ")" ]
Register a special name like `loop`.
[ "Register", "a", "special", "name", "like", "loop", "." ]
python
train
36.5
kontron/python-aardvark
pyaardvark/aardvark.py
https://github.com/kontron/python-aardvark/blob/9827f669fbdc5bceb98e7d08a294b4e4e455d0d5/pyaardvark/aardvark.py#L280-L286
def enable_i2c(self): """Set this to `True` to enable the hardware I2C interface. If set to `False` the hardware interface will be disabled and its pins (SDA and SCL) can be used as GPIOs. """ config = self._interface_configuration(CONFIG_QUERY) return config == CONFIG_GPIO_I2C or config == CONFIG_SPI_I2C
[ "def", "enable_i2c", "(", "self", ")", ":", "config", "=", "self", ".", "_interface_configuration", "(", "CONFIG_QUERY", ")", "return", "config", "==", "CONFIG_GPIO_I2C", "or", "config", "==", "CONFIG_SPI_I2C" ]
Set this to `True` to enable the hardware I2C interface. If set to `False` the hardware interface will be disabled and its pins (SDA and SCL) can be used as GPIOs.
[ "Set", "this", "to", "True", "to", "enable", "the", "hardware", "I2C", "interface", ".", "If", "set", "to", "False", "the", "hardware", "interface", "will", "be", "disabled", "and", "its", "pins", "(", "SDA", "and", "SCL", ")", "can", "be", "used", "as", "GPIOs", "." ]
python
train
49.714286
siemens/django-dingos
dingos/models.py
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L1822-L1836
def get_user_data_iobject(user=None,group=None,data_kind=DINGOS_USER_DATA_TYPE_NAME): """ Returns either stored settings of a given user or default settings. This behavior reflects the need for views to have some settings at hand when running. The settings are returned as dict object. """ logger.debug("Get user settings called") if not user.is_authenticated(): user = None try: user_config = UserData.objects.get(user=user,group=group,data_kind=data_kind) return user_config.identifier.latest except: return None
[ "def", "get_user_data_iobject", "(", "user", "=", "None", ",", "group", "=", "None", ",", "data_kind", "=", "DINGOS_USER_DATA_TYPE_NAME", ")", ":", "logger", ".", "debug", "(", "\"Get user settings called\"", ")", "if", "not", "user", ".", "is_authenticated", "(", ")", ":", "user", "=", "None", "try", ":", "user_config", "=", "UserData", ".", "objects", ".", "get", "(", "user", "=", "user", ",", "group", "=", "group", ",", "data_kind", "=", "data_kind", ")", "return", "user_config", ".", "identifier", ".", "latest", "except", ":", "return", "None" ]
Returns either stored settings of a given user or default settings. This behavior reflects the need for views to have some settings at hand when running. The settings are returned as dict object.
[ "Returns", "either", "stored", "settings", "of", "a", "given", "user", "or", "default", "settings", ".", "This", "behavior", "reflects", "the", "need", "for", "views", "to", "have", "some", "settings", "at", "hand", "when", "running", ".", "The", "settings", "are", "returned", "as", "dict", "object", "." ]
python
train
41.466667
limix/limix-core
limix_core/util/cobj.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/util/cobj.py#L42-L54
def cached_idxs(method): """ this function is used as a decorator for caching """ def method_wrapper(self,*args,**kwargs): tail = '_'.join(str(idx) for idx in args) _cache_attr_name = '_cache_'+method.__name__+'_'+tail _bool_attr_name = '_cached_'+method.__name__+'_'+tail is_cached = getattr(self,_bool_attr_name) if not is_cached: result = method(self, *args, **kwargs) setattr(self, _cache_attr_name, result) setattr(self, _bool_attr_name, True) return getattr(self,_cache_attr_name) return method_wrapper
[ "def", "cached_idxs", "(", "method", ")", ":", "def", "method_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tail", "=", "'_'", ".", "join", "(", "str", "(", "idx", ")", "for", "idx", "in", "args", ")", "_cache_attr_name", "=", "'_cache_'", "+", "method", ".", "__name__", "+", "'_'", "+", "tail", "_bool_attr_name", "=", "'_cached_'", "+", "method", ".", "__name__", "+", "'_'", "+", "tail", "is_cached", "=", "getattr", "(", "self", ",", "_bool_attr_name", ")", "if", "not", "is_cached", ":", "result", "=", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "setattr", "(", "self", ",", "_cache_attr_name", ",", "result", ")", "setattr", "(", "self", ",", "_bool_attr_name", ",", "True", ")", "return", "getattr", "(", "self", ",", "_cache_attr_name", ")", "return", "method_wrapper" ]
this function is used as a decorator for caching
[ "this", "function", "is", "used", "as", "a", "decorator", "for", "caching" ]
python
train
45.615385
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/text.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/text.py#L363-L399
def indent(instr,nspaces=4, ntabs=0, flatten=False): """Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- str|unicode : string indented by ntabs and nspaces. """ if instr is None: return ind = '\t'*ntabs+' '*nspaces if flatten: pat = re.compile(r'^\s*', re.MULTILINE) else: pat = re.compile(r'^', re.MULTILINE) outstr = re.sub(pat, ind, instr) if outstr.endswith(os.linesep+ind): return outstr[:-len(ind)] else: return outstr
[ "def", "indent", "(", "instr", ",", "nspaces", "=", "4", ",", "ntabs", "=", "0", ",", "flatten", "=", "False", ")", ":", "if", "instr", "is", "None", ":", "return", "ind", "=", "'\\t'", "*", "ntabs", "+", "' '", "*", "nspaces", "if", "flatten", ":", "pat", "=", "re", ".", "compile", "(", "r'^\\s*'", ",", "re", ".", "MULTILINE", ")", "else", ":", "pat", "=", "re", ".", "compile", "(", "r'^'", ",", "re", ".", "MULTILINE", ")", "outstr", "=", "re", ".", "sub", "(", "pat", ",", "ind", ",", "instr", ")", "if", "outstr", ".", "endswith", "(", "os", ".", "linesep", "+", "ind", ")", ":", "return", "outstr", "[", ":", "-", "len", "(", "ind", ")", "]", "else", ":", "return", "outstr" ]
Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- str|unicode : string indented by ntabs and nspaces.
[ "Indent", "a", "string", "a", "given", "number", "of", "spaces", "or", "tabstops", "." ]
python
test
27.702703
bkg/greenwich
greenwich/tile.py
https://github.com/bkg/greenwich/blob/57ec644dadfe43ce0ecf2cfd32a2de71e0c8c141/greenwich/tile.py#L20-L40
def to_lonlat(xtile, ytile, zoom): """Returns a tuple of (longitude, latitude) from a map tile xyz coordinate. See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2 Arguments: xtile - x tile location as int or float ytile - y tile location as int or float zoom - zoom level as int or float """ n = 2.0 ** zoom lon = xtile / n * 360.0 - 180.0 # Caculate latitude in radians and convert to degrees constrained from -90 # to 90. Values too big for tile coordinate pairs are invalid and could # overflow. try: lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))) except OverflowError: raise ValueError('Invalid tile coordinate for zoom level %d' % zoom) lat = math.degrees(lat_rad) return lon, lat
[ "def", "to_lonlat", "(", "xtile", ",", "ytile", ",", "zoom", ")", ":", "n", "=", "2.0", "**", "zoom", "lon", "=", "xtile", "/", "n", "*", "360.0", "-", "180.0", "# Caculate latitude in radians and convert to degrees constrained from -90", "# to 90. Values too big for tile coordinate pairs are invalid and could", "# overflow.", "try", ":", "lat_rad", "=", "math", ".", "atan", "(", "math", ".", "sinh", "(", "math", ".", "pi", "*", "(", "1", "-", "2", "*", "ytile", "/", "n", ")", ")", ")", "except", "OverflowError", ":", "raise", "ValueError", "(", "'Invalid tile coordinate for zoom level %d'", "%", "zoom", ")", "lat", "=", "math", ".", "degrees", "(", "lat_rad", ")", "return", "lon", ",", "lat" ]
Returns a tuple of (longitude, latitude) from a map tile xyz coordinate. See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2 Arguments: xtile - x tile location as int or float ytile - y tile location as int or float zoom - zoom level as int or float
[ "Returns", "a", "tuple", "of", "(", "longitude", "latitude", ")", "from", "a", "map", "tile", "xyz", "coordinate", "." ]
python
test
38.095238
gmr/queries
queries/session.py
https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L273-L307
def _connect(self): """Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError """ # Attempt to get a cached connection from the connection pool try: connection = self._pool_manager.get(self.pid, self) LOGGER.debug("Re-using connection for %s", self.pid) except pool.NoIdleConnectionsError: if self._pool_manager.is_full(self.pid): raise # Create a new PostgreSQL connection kwargs = utils.uri_to_kwargs(self._uri) LOGGER.debug("Creating a new connection for %s", self.pid) connection = self._psycopg2_connect(kwargs) self._pool_manager.add(self.pid, connection) self._pool_manager.lock(self.pid, connection, self) # Added in because psycopg2ct connects and leaves the connection in # a weird state: consts.STATUS_DATESTYLE, returning from # Connection._setup without setting the state as const.STATUS_OK if utils.PYPY: connection.reset() # Register the custom data types self._register_unicode(connection) self._register_uuid(connection) return connection
[ "def", "_connect", "(", "self", ")", ":", "# Attempt to get a cached connection from the connection pool", "try", ":", "connection", "=", "self", ".", "_pool_manager", ".", "get", "(", "self", ".", "pid", ",", "self", ")", "LOGGER", ".", "debug", "(", "\"Re-using connection for %s\"", ",", "self", ".", "pid", ")", "except", "pool", ".", "NoIdleConnectionsError", ":", "if", "self", ".", "_pool_manager", ".", "is_full", "(", "self", ".", "pid", ")", ":", "raise", "# Create a new PostgreSQL connection", "kwargs", "=", "utils", ".", "uri_to_kwargs", "(", "self", ".", "_uri", ")", "LOGGER", ".", "debug", "(", "\"Creating a new connection for %s\"", ",", "self", ".", "pid", ")", "connection", "=", "self", ".", "_psycopg2_connect", "(", "kwargs", ")", "self", ".", "_pool_manager", ".", "add", "(", "self", ".", "pid", ",", "connection", ")", "self", ".", "_pool_manager", ".", "lock", "(", "self", ".", "pid", ",", "connection", ",", "self", ")", "# Added in because psycopg2ct connects and leaves the connection in", "# a weird state: consts.STATUS_DATESTYLE, returning from", "# Connection._setup without setting the state as const.STATUS_OK", "if", "utils", ".", "PYPY", ":", "connection", ".", "reset", "(", ")", "# Register the custom data types", "self", ".", "_register_unicode", "(", "connection", ")", "self", ".", "_register_uuid", "(", "connection", ")", "return", "connection" ]
Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError
[ "Connect", "to", "PostgreSQL", "either", "by", "reusing", "a", "connection", "from", "the", "pool", "if", "possible", "or", "by", "creating", "the", "new", "connection", "." ]
python
train
38.885714
Crunch-io/crunch-cube
src/cr/cube/measures/wishart_pairwise_significance.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/measures/wishart_pairwise_significance.py#L86-L90
def _factory(slice_, axis, weighted): """return subclass for PairwiseSignificance, based on slice dimension types.""" if slice_.dim_types[0] == DT.MR_SUBVAR: return _MrXCatPairwiseSignificance(slice_, axis, weighted) return _CatXCatPairwiseSignificance(slice_, axis, weighted)
[ "def", "_factory", "(", "slice_", ",", "axis", ",", "weighted", ")", ":", "if", "slice_", ".", "dim_types", "[", "0", "]", "==", "DT", ".", "MR_SUBVAR", ":", "return", "_MrXCatPairwiseSignificance", "(", "slice_", ",", "axis", ",", "weighted", ")", "return", "_CatXCatPairwiseSignificance", "(", "slice_", ",", "axis", ",", "weighted", ")" ]
return subclass for PairwiseSignificance, based on slice dimension types.
[ "return", "subclass", "for", "PairwiseSignificance", "based", "on", "slice", "dimension", "types", "." ]
python
train
61.6
PaulHancock/Aegean
AegeanTools/BANE.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L467-L491
def write_fits(data, header, file_name): """ Combine data and a fits header to write a fits file. Parameters ---------- data : numpy.ndarray The data to be written. header : astropy.io.fits.hduheader The header for the fits file. file_name : string The file to write Returns ------- None """ hdu = fits.PrimaryHDU(data) hdu.header = header hdulist = fits.HDUList([hdu]) hdulist.writeto(file_name, overwrite=True) logging.info("Wrote {0}".format(file_name)) return
[ "def", "write_fits", "(", "data", ",", "header", ",", "file_name", ")", ":", "hdu", "=", "fits", ".", "PrimaryHDU", "(", "data", ")", "hdu", ".", "header", "=", "header", "hdulist", "=", "fits", ".", "HDUList", "(", "[", "hdu", "]", ")", "hdulist", ".", "writeto", "(", "file_name", ",", "overwrite", "=", "True", ")", "logging", ".", "info", "(", "\"Wrote {0}\"", ".", "format", "(", "file_name", ")", ")", "return" ]
Combine data and a fits header to write a fits file. Parameters ---------- data : numpy.ndarray The data to be written. header : astropy.io.fits.hduheader The header for the fits file. file_name : string The file to write Returns ------- None
[ "Combine", "data", "and", "a", "fits", "header", "to", "write", "a", "fits", "file", "." ]
python
train
21.4
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_heliplane.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_heliplane.py#L102-L130
def update_channels(self): '''update which channels provide input''' self.interlock_channel = -1 self.override_channel = -1 self.zero_I_channel = -1 self.no_vtol_channel = -1 # output channels self.rsc_out_channel = 9 self.fwd_thr_channel = 10 for ch in range(1,16): option = self.get_mav_param("RC%u_OPTION" % ch, 0) if option == 32: self.interlock_channel = ch; elif option == 63: self.override_channel = ch; elif option == 64: self.zero_I_channel = ch; elif option == 65: self.override_channel = ch; elif option == 66: self.no_vtol_channel = ch; function = self.get_mav_param("SERVO%u_FUNCTION" % ch, 0) if function == 32: self.rsc_out_channel = ch if function == 70: self.fwd_thr_channel = ch
[ "def", "update_channels", "(", "self", ")", ":", "self", ".", "interlock_channel", "=", "-", "1", "self", ".", "override_channel", "=", "-", "1", "self", ".", "zero_I_channel", "=", "-", "1", "self", ".", "no_vtol_channel", "=", "-", "1", "# output channels", "self", ".", "rsc_out_channel", "=", "9", "self", ".", "fwd_thr_channel", "=", "10", "for", "ch", "in", "range", "(", "1", ",", "16", ")", ":", "option", "=", "self", ".", "get_mav_param", "(", "\"RC%u_OPTION\"", "%", "ch", ",", "0", ")", "if", "option", "==", "32", ":", "self", ".", "interlock_channel", "=", "ch", "elif", "option", "==", "63", ":", "self", ".", "override_channel", "=", "ch", "elif", "option", "==", "64", ":", "self", ".", "zero_I_channel", "=", "ch", "elif", "option", "==", "65", ":", "self", ".", "override_channel", "=", "ch", "elif", "option", "==", "66", ":", "self", ".", "no_vtol_channel", "=", "ch", "function", "=", "self", ".", "get_mav_param", "(", "\"SERVO%u_FUNCTION\"", "%", "ch", ",", "0", ")", "if", "function", "==", "32", ":", "self", ".", "rsc_out_channel", "=", "ch", "if", "function", "==", "70", ":", "self", ".", "fwd_thr_channel", "=", "ch" ]
update which channels provide input
[ "update", "which", "channels", "provide", "input" ]
python
train
33.206897
pvlib/pvlib-python
pvlib/solarposition.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/solarposition.py#L1105-L1144
def declination_spencer71(dayofyear): """ Solar declination from Duffie & Beckman [1] and attributed to Spencer (1971) and Iqbal (1983). .. warning:: Return units are radians, not degrees. Parameters ---------- dayofyear : numeric Returns ------- declination (radians) : numeric Angular position of the sun at solar noon relative to the plane of the equator, approximately between +/-23.45 (degrees). References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006) [2] J. W. Spencer, "Fourier series representation of the position of the sun" in Search 2 (5), p. 172 (1971) [3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable Energy Applications", p. 4 CRC Press (2013) See Also -------- declination_cooper69 """ day_angle = _calculate_simple_day_angle(dayofyear) return ( 0.006918 - 0.399912 * np.cos(day_angle) + 0.070257 * np.sin(day_angle) - 0.006758 * np.cos(2. * day_angle) + 0.000907 * np.sin(2. * day_angle) - 0.002697 * np.cos(3. * day_angle) + 0.00148 * np.sin(3. * day_angle) )
[ "def", "declination_spencer71", "(", "dayofyear", ")", ":", "day_angle", "=", "_calculate_simple_day_angle", "(", "dayofyear", ")", "return", "(", "0.006918", "-", "0.399912", "*", "np", ".", "cos", "(", "day_angle", ")", "+", "0.070257", "*", "np", ".", "sin", "(", "day_angle", ")", "-", "0.006758", "*", "np", ".", "cos", "(", "2.", "*", "day_angle", ")", "+", "0.000907", "*", "np", ".", "sin", "(", "2.", "*", "day_angle", ")", "-", "0.002697", "*", "np", ".", "cos", "(", "3.", "*", "day_angle", ")", "+", "0.00148", "*", "np", ".", "sin", "(", "3.", "*", "day_angle", ")", ")" ]
Solar declination from Duffie & Beckman [1] and attributed to Spencer (1971) and Iqbal (1983). .. warning:: Return units are radians, not degrees. Parameters ---------- dayofyear : numeric Returns ------- declination (radians) : numeric Angular position of the sun at solar noon relative to the plane of the equator, approximately between +/-23.45 (degrees). References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006) [2] J. W. Spencer, "Fourier series representation of the position of the sun" in Search 2 (5), p. 172 (1971) [3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable Energy Applications", p. 4 CRC Press (2013) See Also -------- declination_cooper69
[ "Solar", "declination", "from", "Duffie", "&", "Beckman", "[", "1", "]", "and", "attributed", "to", "Spencer", "(", "1971", ")", "and", "Iqbal", "(", "1983", ")", "." ]
python
train
30.65
pennlabs/penn-sdk-python
penn/calendar3year.py
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/calendar3year.py#L12-L48
def pull_3year(self): """Returns a list (in JSON format) containing all the events from the Penn iCal Calendar. List contains events in chronological order. Each element of the list is a dictionary, containing: - Name of the event 'name' - Start date 'start' - End date 'end' """ events = [] for term in ["fall", "summer", "spring"]: url = "{}{}{}term.ics".format(BASE_URL, datetime.datetime.now().year, term) resp = requests.get(url) resp.raise_for_status() r = resp.text lines = r.split("\n") d = {} for line in lines: if line == "BEGIN:VEVENT": d = {} elif line.startswith("DTSTART"): raw_date = line.split(":")[1] start_date = datetime.datetime.strptime(raw_date, '%Y%m%d').date() d['start'] = start_date.strftime('%Y-%m-%d') elif line.startswith("DTEND"): raw_date = line.split(":")[1] end_date = datetime.datetime.strptime(raw_date, '%Y%m%d').date() d['end'] = end_date.strftime('%Y-%m-%d') elif line.startswith("SUMMARY"): name = line.split(":")[1] d['name'] = str(name).strip() elif line == "END:VEVENT": events.append(d) events.sort(key=lambda d: d['start']) return events
[ "def", "pull_3year", "(", "self", ")", ":", "events", "=", "[", "]", "for", "term", "in", "[", "\"fall\"", ",", "\"summer\"", ",", "\"spring\"", "]", ":", "url", "=", "\"{}{}{}term.ics\"", ".", "format", "(", "BASE_URL", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", ",", "term", ")", "resp", "=", "requests", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")", "r", "=", "resp", ".", "text", "lines", "=", "r", ".", "split", "(", "\"\\n\"", ")", "d", "=", "{", "}", "for", "line", "in", "lines", ":", "if", "line", "==", "\"BEGIN:VEVENT\"", ":", "d", "=", "{", "}", "elif", "line", ".", "startswith", "(", "\"DTSTART\"", ")", ":", "raw_date", "=", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", "start_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "raw_date", ",", "'%Y%m%d'", ")", ".", "date", "(", ")", "d", "[", "'start'", "]", "=", "start_date", ".", "strftime", "(", "'%Y-%m-%d'", ")", "elif", "line", ".", "startswith", "(", "\"DTEND\"", ")", ":", "raw_date", "=", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", "end_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "raw_date", ",", "'%Y%m%d'", ")", ".", "date", "(", ")", "d", "[", "'end'", "]", "=", "end_date", ".", "strftime", "(", "'%Y-%m-%d'", ")", "elif", "line", ".", "startswith", "(", "\"SUMMARY\"", ")", ":", "name", "=", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", "d", "[", "'name'", "]", "=", "str", "(", "name", ")", ".", "strip", "(", ")", "elif", "line", "==", "\"END:VEVENT\"", ":", "events", ".", "append", "(", "d", ")", "events", ".", "sort", "(", "key", "=", "lambda", "d", ":", "d", "[", "'start'", "]", ")", "return", "events" ]
Returns a list (in JSON format) containing all the events from the Penn iCal Calendar. List contains events in chronological order. Each element of the list is a dictionary, containing: - Name of the event 'name' - Start date 'start' - End date 'end'
[ "Returns", "a", "list", "(", "in", "JSON", "format", ")", "containing", "all", "the", "events", "from", "the", "Penn", "iCal", "Calendar", "." ]
python
train
40.864865
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7187-L7199
def validateOneElement(self, doc, elem): """Try to validate a single element and it's attributes, basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC: Required Attribute ] Then call xmlValidateOneAttribute() for each attribute present. The ID/IDREF checkings are done separately """ if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o) return ret
[ "def", "validateOneElement", "(", "self", ",", "doc", ",", "elem", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "elem__o", "=", "elem", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlValidateOneElement", "(", "self", ".", "_o", ",", "doc__o", ",", "elem__o", ")", "return", "ret" ]
Try to validate a single element and it's attributes, basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC: Required Attribute ] Then call xmlValidateOneAttribute() for each attribute present. The ID/IDREF checkings are done separately
[ "Try", "to", "validate", "a", "single", "element", "and", "it", "s", "attributes", "basically", "it", "does", "the", "following", "checks", "as", "described", "by", "the", "XML", "-", "1", ".", "0", "recommendation", ":", "-", "[", "VC", ":", "Element", "Valid", "]", "-", "[", "VC", ":", "Required", "Attribute", "]", "Then", "call", "xmlValidateOneAttribute", "()", "for", "each", "attribute", "present", ".", "The", "ID", "/", "IDREF", "checkings", "are", "done", "separately" ]
python
train
48
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_DGPS.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_DGPS.py#L21-L41
def idle_task(self): '''called in idle time''' try: data = self.port.recv(200) except socket.error as e: if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]: return raise if len(data) > 110: print("DGPS data too large: %u bytes" % len(data)) return try: self.master.mav.gps_inject_data_send( self.target_system, self.target_component, len(data), bytearray(data.ljust(110, '\0'))) except Exception(e): print ("DGPS Failed:", e)
[ "def", "idle_task", "(", "self", ")", ":", "try", ":", "data", "=", "self", ".", "port", ".", "recv", "(", "200", ")", "except", "socket", ".", "error", "as", "e", ":", "if", "e", ".", "errno", "in", "[", "errno", ".", "EAGAIN", ",", "errno", ".", "EWOULDBLOCK", "]", ":", "return", "raise", "if", "len", "(", "data", ")", ">", "110", ":", "print", "(", "\"DGPS data too large: %u bytes\"", "%", "len", "(", "data", ")", ")", "return", "try", ":", "self", ".", "master", ".", "mav", ".", "gps_inject_data_send", "(", "self", ".", "target_system", ",", "self", ".", "target_component", ",", "len", "(", "data", ")", ",", "bytearray", "(", "data", ".", "ljust", "(", "110", ",", "'\\0'", ")", ")", ")", "except", "Exception", "(", "e", ")", ":", "print", "(", "\"DGPS Failed:\"", ",", "e", ")" ]
called in idle time
[ "called", "in", "idle", "time" ]
python
train
29.428571
JohnVinyard/zounds
zounds/spectral/tfrepresentation.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/spectral/tfrepresentation.py#L57-L69
def validate(self, size): """ Ensure that the size of the dimension matches the number of bands in the scale Raises: ValueError: when the dimension size and number of bands don't match """ msg = 'scale and array size must match, ' \ 'but were scale: {self.scale.n_bands}, array size: {size}' if size != len(self.scale): raise ValueError(msg.format(**locals()))
[ "def", "validate", "(", "self", ",", "size", ")", ":", "msg", "=", "'scale and array size must match, '", "'but were scale: {self.scale.n_bands}, array size: {size}'", "if", "size", "!=", "len", "(", "self", ".", "scale", ")", ":", "raise", "ValueError", "(", "msg", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")" ]
Ensure that the size of the dimension matches the number of bands in the scale Raises: ValueError: when the dimension size and number of bands don't match
[ "Ensure", "that", "the", "size", "of", "the", "dimension", "matches", "the", "number", "of", "bands", "in", "the", "scale" ]
python
train
34.307692
Jajcus/pyxmpp2
pyxmpp2/transport.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/transport.py#L656-L665
def _initiate_starttls(self, **kwargs): """Initiate starttls handshake over the socket. """ if self._tls_state == "connected": raise RuntimeError("Already TLS-connected") kwargs["do_handshake_on_connect"] = False logger.debug("Wrapping the socket into ssl") self._socket = ssl.wrap_socket(self._socket, **kwargs) self._set_state("tls-handshake") self._continue_tls_handshake()
[ "def", "_initiate_starttls", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_tls_state", "==", "\"connected\"", ":", "raise", "RuntimeError", "(", "\"Already TLS-connected\"", ")", "kwargs", "[", "\"do_handshake_on_connect\"", "]", "=", "False", "logger", ".", "debug", "(", "\"Wrapping the socket into ssl\"", ")", "self", ".", "_socket", "=", "ssl", ".", "wrap_socket", "(", "self", ".", "_socket", ",", "*", "*", "kwargs", ")", "self", ".", "_set_state", "(", "\"tls-handshake\"", ")", "self", ".", "_continue_tls_handshake", "(", ")" ]
Initiate starttls handshake over the socket.
[ "Initiate", "starttls", "handshake", "over", "the", "socket", "." ]
python
valid
44.3
apache/incubator-mxnet
plugin/opencv/opencv.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L173-L188
def next(self): """Move iterator position forward""" batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3)) i = self.cur for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)): str_img = open(self.root+self.list[i]+'.jpg').read() img = imdecode(str_img, 1) img, _ = random_crop(img, self.size) batch[i - self.cur] = img batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2)) ret = mx.io.DataBatch(data=[batch], label=[], pad=self.batch_size-(i-self.cur), index=None) self.cur = i return ret
[ "def", "next", "(", "self", ")", ":", "batch", "=", "mx", ".", "nd", ".", "zeros", "(", "(", "self", ".", "batch_size", ",", "self", ".", "size", "[", "1", "]", ",", "self", ".", "size", "[", "0", "]", ",", "3", ")", ")", "i", "=", "self", ".", "cur", "for", "i", "in", "range", "(", "self", ".", "cur", ",", "min", "(", "len", "(", "self", ".", "list", ")", ",", "self", ".", "cur", "+", "self", ".", "batch_size", ")", ")", ":", "str_img", "=", "open", "(", "self", ".", "root", "+", "self", ".", "list", "[", "i", "]", "+", "'.jpg'", ")", ".", "read", "(", ")", "img", "=", "imdecode", "(", "str_img", ",", "1", ")", "img", ",", "_", "=", "random_crop", "(", "img", ",", "self", ".", "size", ")", "batch", "[", "i", "-", "self", ".", "cur", "]", "=", "img", "batch", "=", "mx", ".", "nd", ".", "transpose", "(", "batch", ",", "axes", "=", "(", "0", ",", "3", ",", "1", ",", "2", ")", ")", "ret", "=", "mx", ".", "io", ".", "DataBatch", "(", "data", "=", "[", "batch", "]", ",", "label", "=", "[", "]", ",", "pad", "=", "self", ".", "batch_size", "-", "(", "i", "-", "self", ".", "cur", ")", ",", "index", "=", "None", ")", "self", ".", "cur", "=", "i", "return", "ret" ]
Move iterator position forward
[ "Move", "iterator", "position", "forward" ]
python
train
44
ubc/github2gitlab
github2gitlab/main.py
https://github.com/ubc/github2gitlab/blob/795898f6d438621fa0c996a7156d70c382ff0493/github2gitlab/main.py#L254-L281
def add_key(self): "Add ssh key to gitlab if necessary" try: with open(self.args.ssh_public_key) as f: public_key = f.read().strip() except: log.debug("No key found in {}".format(self.args.ssh_public_key)) return None g = self.gitlab url = g['url'] + "/user/keys" query = {'private_token': g['token']} keys = requests.get(url, params=query).json() log.debug("looking for '" + public_key + "' in " + str(keys)) if (list(filter(lambda key: key['key'] == public_key, keys))): log.debug(self.args.ssh_public_key + " already exists") return None else: name = 'github2gitlab' log.info("add " + name + " ssh public key from " + self.args.ssh_public_key) query['title'] = name query['key'] = public_key result = requests.post(url, query) if result.status_code != requests.codes.created: log.warn('Key {} already in GitLab. ' 'Possible under a different user. Skipping...' .format(self.args.ssh_public_key)) return public_key
[ "def", "add_key", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "args", ".", "ssh_public_key", ")", "as", "f", ":", "public_key", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "except", ":", "log", ".", "debug", "(", "\"No key found in {}\"", ".", "format", "(", "self", ".", "args", ".", "ssh_public_key", ")", ")", "return", "None", "g", "=", "self", ".", "gitlab", "url", "=", "g", "[", "'url'", "]", "+", "\"/user/keys\"", "query", "=", "{", "'private_token'", ":", "g", "[", "'token'", "]", "}", "keys", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "query", ")", ".", "json", "(", ")", "log", ".", "debug", "(", "\"looking for '\"", "+", "public_key", "+", "\"' in \"", "+", "str", "(", "keys", ")", ")", "if", "(", "list", "(", "filter", "(", "lambda", "key", ":", "key", "[", "'key'", "]", "==", "public_key", ",", "keys", ")", ")", ")", ":", "log", ".", "debug", "(", "self", ".", "args", ".", "ssh_public_key", "+", "\" already exists\"", ")", "return", "None", "else", ":", "name", "=", "'github2gitlab'", "log", ".", "info", "(", "\"add \"", "+", "name", "+", "\" ssh public key from \"", "+", "self", ".", "args", ".", "ssh_public_key", ")", "query", "[", "'title'", "]", "=", "name", "query", "[", "'key'", "]", "=", "public_key", "result", "=", "requests", ".", "post", "(", "url", ",", "query", ")", "if", "result", ".", "status_code", "!=", "requests", ".", "codes", ".", "created", ":", "log", ".", "warn", "(", "'Key {} already in GitLab. '", "'Possible under a different user. Skipping...'", ".", "format", "(", "self", ".", "args", ".", "ssh_public_key", ")", ")", "return", "public_key" ]
Add ssh key to gitlab if necessary
[ "Add", "ssh", "key", "to", "gitlab", "if", "necessary" ]
python
train
43.428571
pytorch/vision
torchvision/transforms/functional.py
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L410-L432
def _get_perspective_coeffs(startpoints, endpoints): """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms. In Perspective Transform each pixel (x, y) in the orignal image gets transformed as, (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) ) Args: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image, List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image Returns: octuple (a, b, c, d, e, f, g, h) for transforming each pixel. """ matrix = [] for p1, p2 in zip(endpoints, startpoints): matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) A = torch.tensor(matrix, dtype=torch.float) B = torch.tensor(startpoints, dtype=torch.float).view(8) res = torch.gels(B, A)[0] return res.squeeze_(1).tolist()
[ "def", "_get_perspective_coeffs", "(", "startpoints", ",", "endpoints", ")", ":", "matrix", "=", "[", "]", "for", "p1", ",", "p2", "in", "zip", "(", "endpoints", ",", "startpoints", ")", ":", "matrix", ".", "append", "(", "[", "p1", "[", "0", "]", ",", "p1", "[", "1", "]", ",", "1", ",", "0", ",", "0", ",", "0", ",", "-", "p2", "[", "0", "]", "*", "p1", "[", "0", "]", ",", "-", "p2", "[", "0", "]", "*", "p1", "[", "1", "]", "]", ")", "matrix", ".", "append", "(", "[", "0", ",", "0", ",", "0", ",", "p1", "[", "0", "]", ",", "p1", "[", "1", "]", ",", "1", ",", "-", "p2", "[", "1", "]", "*", "p1", "[", "0", "]", ",", "-", "p2", "[", "1", "]", "*", "p1", "[", "1", "]", "]", ")", "A", "=", "torch", ".", "tensor", "(", "matrix", ",", "dtype", "=", "torch", ".", "float", ")", "B", "=", "torch", ".", "tensor", "(", "startpoints", ",", "dtype", "=", "torch", ".", "float", ")", ".", "view", "(", "8", ")", "res", "=", "torch", ".", "gels", "(", "B", ",", "A", ")", "[", "0", "]", "return", "res", ".", "squeeze_", "(", "1", ")", ".", "tolist", "(", ")" ]
Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms. In Perspective Transform each pixel (x, y) in the orignal image gets transformed as, (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) ) Args: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image, List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image Returns: octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
[ "Helper", "function", "to", "get", "the", "coefficients", "(", "a", "b", "c", "d", "e", "f", "g", "h", ")", "for", "the", "perspective", "transforms", "." ]
python
test
44.434783
apache/incubator-mxnet
example/gluon/lipnet/utils/preprocess_data.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/preprocess_data.py#L118-L124
def process_frames_mouth(self, frames): """ Preprocess from frames using mouth detector """ self.face = np.array(frames) self.mouth = np.array(frames) self.set_data(frames)
[ "def", "process_frames_mouth", "(", "self", ",", "frames", ")", ":", "self", ".", "face", "=", "np", ".", "array", "(", "frames", ")", "self", ".", "mouth", "=", "np", ".", "array", "(", "frames", ")", "self", ".", "set_data", "(", "frames", ")" ]
Preprocess from frames using mouth detector
[ "Preprocess", "from", "frames", "using", "mouth", "detector" ]
python
train
30.571429
heroku/heroku.py
heroku/helpers.py
https://github.com/heroku/heroku.py/blob/cadc0a074896cf29c65a457c5c5bdb2069470af0/heroku/helpers.py#L99-L131
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None): """Extends a given object for API Production.""" # Cast all int_keys to int() if int_keys: for in_key in int_keys: if (in_key in in_dict) and (in_dict.get(in_key, None) is not None): in_dict[in_key] = int(in_dict[in_key]) # Cast all date_keys to datetime.isoformat if date_keys: for in_key in date_keys: if (in_key in in_dict) and (in_dict.get(in_key, None) is not None): _from = in_dict[in_key] if isinstance(_from, basestring): dtime = parse_datetime(_from) elif isinstance(_from, datetime): dtime = _from in_dict[in_key] = dtime.isoformat() elif (in_key in in_dict) and in_dict.get(in_key, None) is None: del in_dict[in_key] # Remove all Nones for k, v in in_dict.items(): if v is None: del in_dict[k] return in_dict
[ "def", "to_api", "(", "in_dict", ",", "int_keys", "=", "None", ",", "date_keys", "=", "None", ",", "bool_keys", "=", "None", ")", ":", "# Cast all int_keys to int()", "if", "int_keys", ":", "for", "in_key", "in", "int_keys", ":", "if", "(", "in_key", "in", "in_dict", ")", "and", "(", "in_dict", ".", "get", "(", "in_key", ",", "None", ")", "is", "not", "None", ")", ":", "in_dict", "[", "in_key", "]", "=", "int", "(", "in_dict", "[", "in_key", "]", ")", "# Cast all date_keys to datetime.isoformat", "if", "date_keys", ":", "for", "in_key", "in", "date_keys", ":", "if", "(", "in_key", "in", "in_dict", ")", "and", "(", "in_dict", ".", "get", "(", "in_key", ",", "None", ")", "is", "not", "None", ")", ":", "_from", "=", "in_dict", "[", "in_key", "]", "if", "isinstance", "(", "_from", ",", "basestring", ")", ":", "dtime", "=", "parse_datetime", "(", "_from", ")", "elif", "isinstance", "(", "_from", ",", "datetime", ")", ":", "dtime", "=", "_from", "in_dict", "[", "in_key", "]", "=", "dtime", ".", "isoformat", "(", ")", "elif", "(", "in_key", "in", "in_dict", ")", "and", "in_dict", ".", "get", "(", "in_key", ",", "None", ")", "is", "None", ":", "del", "in_dict", "[", "in_key", "]", "# Remove all Nones", "for", "k", ",", "v", "in", "in_dict", ".", "items", "(", ")", ":", "if", "v", "is", "None", ":", "del", "in_dict", "[", "k", "]", "return", "in_dict" ]
Extends a given object for API Production.
[ "Extends", "a", "given", "object", "for", "API", "Production", "." ]
python
train
30.424242
aws/aws-xray-sdk-python
aws_xray_sdk/core/sampling/sampler.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/sampler.py#L83-L96
def load_settings(self, daemon_config, context, origin=None): """ The pollers have dependency on the context manager of the X-Ray recorder. They will respect the customer specified xray client to poll sampling rules/targets. Otherwise they falls back to use the same X-Ray daemon as the emitter. """ self._connector.setup_xray_client(ip=daemon_config.tcp_ip, port=daemon_config.tcp_port, client=self.xray_client) self._connector.context = context self._origin = origin
[ "def", "load_settings", "(", "self", ",", "daemon_config", ",", "context", ",", "origin", "=", "None", ")", ":", "self", ".", "_connector", ".", "setup_xray_client", "(", "ip", "=", "daemon_config", ".", "tcp_ip", ",", "port", "=", "daemon_config", ".", "tcp_port", ",", "client", "=", "self", ".", "xray_client", ")", "self", ".", "_connector", ".", "context", "=", "context", "self", ".", "_origin", "=", "origin" ]
The pollers have dependency on the context manager of the X-Ray recorder. They will respect the customer specified xray client to poll sampling rules/targets. Otherwise they falls back to use the same X-Ray daemon as the emitter.
[ "The", "pollers", "have", "dependency", "on", "the", "context", "manager", "of", "the", "X", "-", "Ray", "recorder", ".", "They", "will", "respect", "the", "customer", "specified", "xray", "client", "to", "poll", "sampling", "rules", "/", "targets", ".", "Otherwise", "they", "falls", "back", "to", "use", "the", "same", "X", "-", "Ray", "daemon", "as", "the", "emitter", "." ]
python
train
44.285714
odlgroup/odl
odl/phantom/transmission.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/transmission.py#L79-L111
def shepp_logan_ellipsoids(ndim, modified=False): """Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions. Parameters ---------- ndim : {2, 3} Dimension of the space the ellipsoids should be in. modified : bool, optional True if the modified Shepp-Logan phantom should be given. The modified phantom has greatly amplified contrast to aid visualization. See Also -------- odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms shepp_logan : Create a phantom with these ellipsoids References ---------- .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom """ if ndim == 2: ellipsoids = _shepp_logan_ellipse_2d() elif ndim == 3: ellipsoids = _shepp_logan_ellipsoids_3d() else: raise ValueError('dimension not 2 or 3, no phantom available') if modified: _modified_shepp_logan_ellipsoids(ellipsoids) return ellipsoids
[ "def", "shepp_logan_ellipsoids", "(", "ndim", ",", "modified", "=", "False", ")", ":", "if", "ndim", "==", "2", ":", "ellipsoids", "=", "_shepp_logan_ellipse_2d", "(", ")", "elif", "ndim", "==", "3", ":", "ellipsoids", "=", "_shepp_logan_ellipsoids_3d", "(", ")", "else", ":", "raise", "ValueError", "(", "'dimension not 2 or 3, no phantom available'", ")", "if", "modified", ":", "_modified_shepp_logan_ellipsoids", "(", "ellipsoids", ")", "return", "ellipsoids" ]
Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions. Parameters ---------- ndim : {2, 3} Dimension of the space the ellipsoids should be in. modified : bool, optional True if the modified Shepp-Logan phantom should be given. The modified phantom has greatly amplified contrast to aid visualization. See Also -------- odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms shepp_logan : Create a phantom with these ellipsoids References ---------- .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
[ "Ellipsoids", "for", "the", "standard", "Shepp", "-", "Logan", "phantom", "in", "2", "or", "3", "dimensions", "." ]
python
train
30.515152
google/transitfeed
transitfeed/serviceperiod.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/serviceperiod.py#L106-L110
def GetCalendarFieldValuesTuple(self): """Return the tuple of calendar.txt values or None if this ServicePeriod should not be in calendar.txt .""" if self.start_date and self.end_date: return [getattr(self, fn) for fn in self._FIELD_NAMES]
[ "def", "GetCalendarFieldValuesTuple", "(", "self", ")", ":", "if", "self", ".", "start_date", "and", "self", ".", "end_date", ":", "return", "[", "getattr", "(", "self", ",", "fn", ")", "for", "fn", "in", "self", ".", "_FIELD_NAMES", "]" ]
Return the tuple of calendar.txt values or None if this ServicePeriod should not be in calendar.txt .
[ "Return", "the", "tuple", "of", "calendar", ".", "txt", "values", "or", "None", "if", "this", "ServicePeriod", "should", "not", "be", "in", "calendar", ".", "txt", "." ]
python
train
50.6
APSL/puput
setup.py
https://github.com/APSL/puput/blob/c3294f6bb0dd784f881ce9e3089cbf40d0528e47/setup.py#L11-L16
def get_version(package): """ Return package version as listed in `__version__` in `init.py`. """ init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read() return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
[ "def", "get_version", "(", "package", ")", ":", "init_py", "=", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "package", ",", "'__init__.py'", ")", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "return", "re", ".", "search", "(", "\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\"", ",", "init_py", ",", "re", ".", "MULTILINE", ")", ".", "group", "(", "1", ")" ]
Return package version as listed in `__version__` in `init.py`.
[ "Return", "package", "version", "as", "listed", "in", "__version__", "in", "init", ".", "py", "." ]
python
train
47.333333
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5064-L5074
def setOverlayTexture(self, ulOverlayHandle): """ Texture to draw for the overlay. This function can only be called by the overlay's creator or renderer process (see SetOverlayRenderingPid) . * OpenGL dirty state: glBindTexture """ fn = self.function_table.setOverlayTexture pTexture = Texture_t() result = fn(ulOverlayHandle, byref(pTexture)) return result, pTexture
[ "def", "setOverlayTexture", "(", "self", ",", "ulOverlayHandle", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTexture", "pTexture", "=", "Texture_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "pTexture", ")", ")", "return", "result", ",", "pTexture" ]
Texture to draw for the overlay. This function can only be called by the overlay's creator or renderer process (see SetOverlayRenderingPid) . * OpenGL dirty state: glBindTexture
[ "Texture", "to", "draw", "for", "the", "overlay", ".", "This", "function", "can", "only", "be", "called", "by", "the", "overlay", "s", "creator", "or", "renderer", "process", "(", "see", "SetOverlayRenderingPid", ")", ".", "*", "OpenGL", "dirty", "state", ":", "glBindTexture" ]
python
train
39.090909
BlockHub/blockhubdpostools
dpostools/api.py
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/api.py#L76-L101
def status(self): """ check the status of the network and the peers :return: network_height, peer_status """ peer = random.choice(self.PEERS) formatted_peer = 'http://{}:4001'.format(peer) peerdata = requests.get(url=formatted_peer + '/api/peers/').json()['peers'] peers_status = {} networkheight = max([x['height'] for x in peerdata]) for i in peerdata: if 'http://{}:4001'.format(i['ip']) in self.PEERS: peers_status.update({i['ip']: { 'height': i['height'], 'status': i['status'], 'version': i['version'], 'delay': i['delay'], }}) return { 'network_height': networkheight, 'peer_status': peers_status }
[ "def", "status", "(", "self", ")", ":", "peer", "=", "random", ".", "choice", "(", "self", ".", "PEERS", ")", "formatted_peer", "=", "'http://{}:4001'", ".", "format", "(", "peer", ")", "peerdata", "=", "requests", ".", "get", "(", "url", "=", "formatted_peer", "+", "'/api/peers/'", ")", ".", "json", "(", ")", "[", "'peers'", "]", "peers_status", "=", "{", "}", "networkheight", "=", "max", "(", "[", "x", "[", "'height'", "]", "for", "x", "in", "peerdata", "]", ")", "for", "i", "in", "peerdata", ":", "if", "'http://{}:4001'", ".", "format", "(", "i", "[", "'ip'", "]", ")", "in", "self", ".", "PEERS", ":", "peers_status", ".", "update", "(", "{", "i", "[", "'ip'", "]", ":", "{", "'height'", ":", "i", "[", "'height'", "]", ",", "'status'", ":", "i", "[", "'status'", "]", ",", "'version'", ":", "i", "[", "'version'", "]", ",", "'delay'", ":", "i", "[", "'delay'", "]", ",", "}", "}", ")", "return", "{", "'network_height'", ":", "networkheight", ",", "'peer_status'", ":", "peers_status", "}" ]
check the status of the network and the peers :return: network_height, peer_status
[ "check", "the", "status", "of", "the", "network", "and", "the", "peers" ]
python
valid
31.846154
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/monitoring.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/monitoring.py#L711-L716
def event_listeners(self): """List of registered event listeners.""" return (self.__command_listeners[:], self.__server_heartbeat_listeners[:], self.__server_listeners[:], self.__topology_listeners[:])
[ "def", "event_listeners", "(", "self", ")", ":", "return", "(", "self", ".", "__command_listeners", "[", ":", "]", ",", "self", ".", "__server_heartbeat_listeners", "[", ":", "]", ",", "self", ".", "__server_listeners", "[", ":", "]", ",", "self", ".", "__topology_listeners", "[", ":", "]", ")" ]
List of registered event listeners.
[ "List", "of", "registered", "event", "listeners", "." ]
python
train
43.333333
deepmind/sonnet
sonnet/python/modules/nets/convnet.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/nets/convnet.py#L588-L712
def transpose(self, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, normalization_ctor=None, normalization_kwargs=None, normalize_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None, data_format=None, custom_getter=None): """Returns transposed version of this network. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. output_channels: Optional iterable of numbers of output channels. kernel_shapes: Optional iterable of kernel sizes. The default value is constructed by reversing `self.kernel_shapes`. strides: Optional iterable of kernel strides. The default value is constructed by reversing `self.strides`. paddings: Optional iterable of padding options, either `snt.SAME` or `snt.VALID`; The default value is constructed by reversing `self.paddings`. activation: Optional activation op. Default value is `self.activation`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. normalization_ctor: Constructor to return a callable which will perform normalization at each layer. Defaults to None / no normalization. Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If a string is provided, importlib is used to convert the string to a callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be provided. normalization_kwargs: kwargs to be provided to `normalization_ctor` when it is called. normalize_final: Whether to apply normalization after the final conv layer. Default is to take the value of activate_final. initializers: Optional dict containing ops to initialize the filters of the whole network (with key 'w') or biases (with key 'b'). The default value is `self.initializers`. partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). The default value is `self.partitioners`. regularizers: Optional dict containing regularizers for the filters of the whole network (with key 'w') or biases (with key 'b'). The default is `self.regularizers`. use_batch_norm: Optional boolean determining if batch normalization is applied after convolution. The default value is `self.use_batch_norm`. use_bias: Optional boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default is constructed by reversing `self.use_bias`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. Default is `self.batch_norm_config`. data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether the channel dimension of the input and output is the last dimension. Default is `self._data_format`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. Returns: Matching `ConvNet2DTranspose` module. Raises: ValueError: If output_channels is specified and its length does not match the number of layers. ValueError: If the given data_format is not a supported format ("NHWC" or "NCHW"). NotImplementedError: If the convolutions are dilated. """ for rate in self._rates: if rate != 1: raise NotImplementedError("Transpose dilated convolutions " "are not supported") output_shapes = [] if data_format is None: data_format = self._data_format if data_format == DATA_FORMAT_NHWC: start_dim, end_dim = 1, -1 elif data_format == DATA_FORMAT_NCHW: start_dim, end_dim = 2, 4 else: raise ValueError("Invalid data_format {:s}. Allowed formats " "{}".format(data_format, SUPPORTED_2D_DATA_FORMATS)) if custom_getter is None and self._custom_getter is not None: tf.logging.warning( "This convnet was constructed with a custom getter, but the " "`transpose` method was not given any. The transposed ConvNet will " "not be using any custom_getter.") for layer in reversed(self._layers): output_shapes.append(lambda l=layer: l.input_shape[start_dim:end_dim]) transpose_constructor = functools.partial(ConvNet2DTranspose, output_shapes=output_shapes, custom_getter=custom_getter) return self._transpose( transpose_constructor=transpose_constructor, name=name, output_channels=output_channels, kernel_shapes=kernel_shapes, strides=strides, paddings=paddings, activation=activation, activate_final=activate_final, normalization_ctor=normalization_ctor, normalization_kwargs=normalization_kwargs, normalize_final=normalize_final, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_bias=use_bias, data_format=data_format)
[ "def", "transpose", "(", "self", ",", "name", "=", "None", ",", "output_channels", "=", "None", ",", "kernel_shapes", "=", "None", ",", "strides", "=", "None", ",", "paddings", "=", "None", ",", "activation", "=", "None", ",", "activate_final", "=", "None", ",", "normalization_ctor", "=", "None", ",", "normalization_kwargs", "=", "None", ",", "normalize_final", "=", "None", ",", "initializers", "=", "None", ",", "partitioners", "=", "None", ",", "regularizers", "=", "None", ",", "use_batch_norm", "=", "None", ",", "use_bias", "=", "None", ",", "batch_norm_config", "=", "None", ",", "data_format", "=", "None", ",", "custom_getter", "=", "None", ")", ":", "for", "rate", "in", "self", ".", "_rates", ":", "if", "rate", "!=", "1", ":", "raise", "NotImplementedError", "(", "\"Transpose dilated convolutions \"", "\"are not supported\"", ")", "output_shapes", "=", "[", "]", "if", "data_format", "is", "None", ":", "data_format", "=", "self", ".", "_data_format", "if", "data_format", "==", "DATA_FORMAT_NHWC", ":", "start_dim", ",", "end_dim", "=", "1", ",", "-", "1", "elif", "data_format", "==", "DATA_FORMAT_NCHW", ":", "start_dim", ",", "end_dim", "=", "2", ",", "4", "else", ":", "raise", "ValueError", "(", "\"Invalid data_format {:s}. Allowed formats \"", "\"{}\"", ".", "format", "(", "data_format", ",", "SUPPORTED_2D_DATA_FORMATS", ")", ")", "if", "custom_getter", "is", "None", "and", "self", ".", "_custom_getter", "is", "not", "None", ":", "tf", ".", "logging", ".", "warning", "(", "\"This convnet was constructed with a custom getter, but the \"", "\"`transpose` method was not given any. The transposed ConvNet will \"", "\"not be using any custom_getter.\"", ")", "for", "layer", "in", "reversed", "(", "self", ".", "_layers", ")", ":", "output_shapes", ".", "append", "(", "lambda", "l", "=", "layer", ":", "l", ".", "input_shape", "[", "start_dim", ":", "end_dim", "]", ")", "transpose_constructor", "=", "functools", ".", "partial", "(", "ConvNet2DTranspose", ",", "output_shapes", "=", "output_shapes", ",", "custom_getter", "=", "custom_getter", ")", "return", "self", ".", "_transpose", "(", "transpose_constructor", "=", "transpose_constructor", ",", "name", "=", "name", ",", "output_channels", "=", "output_channels", ",", "kernel_shapes", "=", "kernel_shapes", ",", "strides", "=", "strides", ",", "paddings", "=", "paddings", ",", "activation", "=", "activation", ",", "activate_final", "=", "activate_final", ",", "normalization_ctor", "=", "normalization_ctor", ",", "normalization_kwargs", "=", "normalization_kwargs", ",", "normalize_final", "=", "normalize_final", ",", "initializers", "=", "initializers", ",", "partitioners", "=", "partitioners", ",", "regularizers", "=", "regularizers", ",", "use_bias", "=", "use_bias", ",", "data_format", "=", "data_format", ")" ]
Returns transposed version of this network. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. output_channels: Optional iterable of numbers of output channels. kernel_shapes: Optional iterable of kernel sizes. The default value is constructed by reversing `self.kernel_shapes`. strides: Optional iterable of kernel strides. The default value is constructed by reversing `self.strides`. paddings: Optional iterable of padding options, either `snt.SAME` or `snt.VALID`; The default value is constructed by reversing `self.paddings`. activation: Optional activation op. Default value is `self.activation`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. normalization_ctor: Constructor to return a callable which will perform normalization at each layer. Defaults to None / no normalization. Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If a string is provided, importlib is used to convert the string to a callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be provided. normalization_kwargs: kwargs to be provided to `normalization_ctor` when it is called. normalize_final: Whether to apply normalization after the final conv layer. Default is to take the value of activate_final. initializers: Optional dict containing ops to initialize the filters of the whole network (with key 'w') or biases (with key 'b'). The default value is `self.initializers`. partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). The default value is `self.partitioners`. regularizers: Optional dict containing regularizers for the filters of the whole network (with key 'w') or biases (with key 'b'). The default is `self.regularizers`. use_batch_norm: Optional boolean determining if batch normalization is applied after convolution. The default value is `self.use_batch_norm`. use_bias: Optional boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default is constructed by reversing `self.use_bias`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. Default is `self.batch_norm_config`. data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether the channel dimension of the input and output is the last dimension. Default is `self._data_format`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. Returns: Matching `ConvNet2DTranspose` module. Raises: ValueError: If output_channels is specified and its length does not match the number of layers. ValueError: If the given data_format is not a supported format ("NHWC" or "NCHW"). NotImplementedError: If the convolutions are dilated.
[ "Returns", "transposed", "version", "of", "this", "network", "." ]
python
train
46.488
drdoctr/doctr
doctr/local.py
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L314-L335
def generate_ssh_key(): """ Generates an SSH deploy public and private key. Returns (private key, public key), a tuple of byte strings. """ key = rsa.generate_private_key( backend=default_backend(), public_exponent=65537, key_size=4096 ) private_key = key.private_bytes( serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()) public_key = key.public_key().public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH ) return private_key, public_key
[ "def", "generate_ssh_key", "(", ")", ":", "key", "=", "rsa", ".", "generate_private_key", "(", "backend", "=", "default_backend", "(", ")", ",", "public_exponent", "=", "65537", ",", "key_size", "=", "4096", ")", "private_key", "=", "key", ".", "private_bytes", "(", "serialization", ".", "Encoding", ".", "PEM", ",", "serialization", ".", "PrivateFormat", ".", "PKCS8", ",", "serialization", ".", "NoEncryption", "(", ")", ")", "public_key", "=", "key", ".", "public_key", "(", ")", ".", "public_bytes", "(", "serialization", ".", "Encoding", ".", "OpenSSH", ",", "serialization", ".", "PublicFormat", ".", "OpenSSH", ")", "return", "private_key", ",", "public_key" ]
Generates an SSH deploy public and private key. Returns (private key, public key), a tuple of byte strings.
[ "Generates", "an", "SSH", "deploy", "public", "and", "private", "key", "." ]
python
train
27.136364
saltstack/salt
salt/modules/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L856-L879
def create_api_stage(restApiId, stageName, deploymentId, description='', cacheClusterEnabled=False, cacheClusterSize='0.5', variables=None, region=None, key=None, keyid=None, profile=None): ''' Creates a new API stage for a given restApiId and deploymentId. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\ description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}' ''' try: variables = dict() if variables is None else variables conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) stage = conn.create_stage(restApiId=restApiId, stageName=stageName, deploymentId=deploymentId, description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize, variables=variables) return {'created': True, 'stage': _convert_datetime_str(stage)} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "create_api_stage", "(", "restApiId", ",", "stageName", ",", "deploymentId", ",", "description", "=", "''", ",", "cacheClusterEnabled", "=", "False", ",", "cacheClusterSize", "=", "'0.5'", ",", "variables", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "variables", "=", "dict", "(", ")", "if", "variables", "is", "None", "else", "variables", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "stage", "=", "conn", ".", "create_stage", "(", "restApiId", "=", "restApiId", ",", "stageName", "=", "stageName", ",", "deploymentId", "=", "deploymentId", ",", "description", "=", "description", ",", "cacheClusterEnabled", "=", "cacheClusterEnabled", ",", "cacheClusterSize", "=", "cacheClusterSize", ",", "variables", "=", "variables", ")", "return", "{", "'created'", ":", "True", ",", "'stage'", ":", "_convert_datetime_str", "(", "stage", ")", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'created'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
Creates a new API stage for a given restApiId and deploymentId. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\ description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
[ "Creates", "a", "new", "API", "stage", "for", "a", "given", "restApiId", "and", "deploymentId", "." ]
python
train
48.416667
bmcfee/muda
muda/base.py
https://github.com/bmcfee/muda/blob/ff82efdfaeb98da0a9f9124845826eb20536a9ba/muda/base.py#L227-L242
def transform(self, jam): '''Apply the sequence of transformations to a single jam object. Parameters ---------- jam : jams.JAMS The jam object to transform Yields ------ jam_out : jams.JAMS The jam objects produced by the transformation sequence ''' for output in self.__recursive_transform(jam, self.steps): yield output
[ "def", "transform", "(", "self", ",", "jam", ")", ":", "for", "output", "in", "self", ".", "__recursive_transform", "(", "jam", ",", "self", ".", "steps", ")", ":", "yield", "output" ]
Apply the sequence of transformations to a single jam object. Parameters ---------- jam : jams.JAMS The jam object to transform Yields ------ jam_out : jams.JAMS The jam objects produced by the transformation sequence
[ "Apply", "the", "sequence", "of", "transformations", "to", "a", "single", "jam", "object", "." ]
python
valid
26.125
raphaelvallat/pingouin
pingouin/power.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/power.py#L523-L667
def power_corr(r=None, n=None, power=None, alpha=0.05, tail='two-sided'): """ Evaluate power, sample size, correlation coefficient or significance level of a correlation test. Parameters ---------- r : float Correlation coefficient. n : int Number of observations (sample size). power : float Test power (= 1 - type II error). alpha : float Significance level (type I error probability). The default is 0.05. tail : str Indicates whether the test is "two-sided" or "one-sided". Notes ----- Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must be passed as None, and that parameter is determined from the others. Notice that ``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to compute it. :py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e. sample size, effect size, or significance level). If the solving fails, a nan value is returned. This function is a mere Python translation of the original `pwr.r.test` function implemented in the `pwr` R package. All credit goes to the author, Stephane Champely. References ---------- .. [1] Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum. .. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf Examples -------- 1. Compute achieved power given ``r``, ``n`` and ``alpha`` >>> from pingouin import power_corr >>> print('power: %.4f' % power_corr(r=0.5, n=20)) power: 0.6379 2. Compute required sample size given ``r``, ``power`` and ``alpha`` >>> print('n: %.4f' % power_corr(r=0.5, power=0.80, ... tail='one-sided')) n: 22.6091 3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level >>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05)) r: 0.5822 4. Compute achieved alpha level given ``r``, ``n`` and ``power`` >>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80, ... alpha=None)) alpha: 0.1377 """ # Check the number of arguments that are None n_none = sum([v is None for v in [r, n, power, alpha]]) if n_none != 1: raise ValueError('Exactly one of n, r, power, and alpha must be None') # Safety checks if r is not None: assert -1 <= r <= 1 r = abs(r) if alpha is not None: assert 0 < alpha <= 1 if power is not None: assert 0 < power <= 1 if n is not None: assert n > 4 # Define main function if tail == 'two-sided': def func(r, n, power, alpha): dof = n - 2 ttt = stats.t.ppf(1 - alpha / 2, dof) rc = np.sqrt(ttt**2 / (ttt**2 + dof)) zr = np.arctanh(r) + r / (2 * (n - 1)) zrc = np.arctanh(rc) power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) + \ stats.norm.cdf((-zr - zrc) * np.sqrt(n - 3)) return power else: def func(r, n, power, alpha): dof = n - 2 ttt = stats.t.ppf(1 - alpha, dof) rc = np.sqrt(ttt**2 / (ttt**2 + dof)) zr = np.arctanh(r) + r / (2 * (n - 1)) zrc = np.arctanh(rc) power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) return power # Evaluate missing variable if power is None and n is not None and r is not None: # Compute achieved power given r, n and alpha return func(r, n, power=None, alpha=alpha) elif n is None and power is not None and r is not None: # Compute required sample size given r, power and alpha def _eval_n(n, r, power, alpha): return func(r, n, power, alpha) - power try: return brenth(_eval_n, 4 + 1e-10, 1e+09, args=(r, power, alpha)) except ValueError: # pragma: no cover return np.nan elif r is None and power is not None and n is not None: # Compute achieved r given sample size, power and alpha level def _eval_r(r, n, power, alpha): return func(r, n, power, alpha) - power try: return brenth(_eval_r, 1e-10, 1 - 1e-10, args=(n, power, alpha)) except ValueError: # pragma: no cover return np.nan else: # Compute achieved alpha (significance) level given r, n and power def _eval_alpha(alpha, r, n, power): return func(r, n, power, alpha) - power try: return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(r, n, power)) except ValueError: # pragma: no cover return np.nan
[ "def", "power_corr", "(", "r", "=", "None", ",", "n", "=", "None", ",", "power", "=", "None", ",", "alpha", "=", "0.05", ",", "tail", "=", "'two-sided'", ")", ":", "# Check the number of arguments that are None", "n_none", "=", "sum", "(", "[", "v", "is", "None", "for", "v", "in", "[", "r", ",", "n", ",", "power", ",", "alpha", "]", "]", ")", "if", "n_none", "!=", "1", ":", "raise", "ValueError", "(", "'Exactly one of n, r, power, and alpha must be None'", ")", "# Safety checks", "if", "r", "is", "not", "None", ":", "assert", "-", "1", "<=", "r", "<=", "1", "r", "=", "abs", "(", "r", ")", "if", "alpha", "is", "not", "None", ":", "assert", "0", "<", "alpha", "<=", "1", "if", "power", "is", "not", "None", ":", "assert", "0", "<", "power", "<=", "1", "if", "n", "is", "not", "None", ":", "assert", "n", ">", "4", "# Define main function", "if", "tail", "==", "'two-sided'", ":", "def", "func", "(", "r", ",", "n", ",", "power", ",", "alpha", ")", ":", "dof", "=", "n", "-", "2", "ttt", "=", "stats", ".", "t", ".", "ppf", "(", "1", "-", "alpha", "/", "2", ",", "dof", ")", "rc", "=", "np", ".", "sqrt", "(", "ttt", "**", "2", "/", "(", "ttt", "**", "2", "+", "dof", ")", ")", "zr", "=", "np", ".", "arctanh", "(", "r", ")", "+", "r", "/", "(", "2", "*", "(", "n", "-", "1", ")", ")", "zrc", "=", "np", ".", "arctanh", "(", "rc", ")", "power", "=", "stats", ".", "norm", ".", "cdf", "(", "(", "zr", "-", "zrc", ")", "*", "np", ".", "sqrt", "(", "n", "-", "3", ")", ")", "+", "stats", ".", "norm", ".", "cdf", "(", "(", "-", "zr", "-", "zrc", ")", "*", "np", ".", "sqrt", "(", "n", "-", "3", ")", ")", "return", "power", "else", ":", "def", "func", "(", "r", ",", "n", ",", "power", ",", "alpha", ")", ":", "dof", "=", "n", "-", "2", "ttt", "=", "stats", ".", "t", ".", "ppf", "(", "1", "-", "alpha", ",", "dof", ")", "rc", "=", "np", ".", "sqrt", "(", "ttt", "**", "2", "/", "(", "ttt", "**", "2", "+", "dof", ")", ")", "zr", "=", "np", ".", "arctanh", "(", "r", ")", "+", "r", "/", "(", "2", "*", "(", "n", "-", "1", ")", ")", "zrc", "=", "np", ".", "arctanh", "(", "rc", ")", "power", "=", "stats", ".", "norm", ".", "cdf", "(", "(", "zr", "-", "zrc", ")", "*", "np", ".", "sqrt", "(", "n", "-", "3", ")", ")", "return", "power", "# Evaluate missing variable", "if", "power", "is", "None", "and", "n", "is", "not", "None", "and", "r", "is", "not", "None", ":", "# Compute achieved power given r, n and alpha", "return", "func", "(", "r", ",", "n", ",", "power", "=", "None", ",", "alpha", "=", "alpha", ")", "elif", "n", "is", "None", "and", "power", "is", "not", "None", "and", "r", "is", "not", "None", ":", "# Compute required sample size given r, power and alpha", "def", "_eval_n", "(", "n", ",", "r", ",", "power", ",", "alpha", ")", ":", "return", "func", "(", "r", ",", "n", ",", "power", ",", "alpha", ")", "-", "power", "try", ":", "return", "brenth", "(", "_eval_n", ",", "4", "+", "1e-10", ",", "1e+09", ",", "args", "=", "(", "r", ",", "power", ",", "alpha", ")", ")", "except", "ValueError", ":", "# pragma: no cover", "return", "np", ".", "nan", "elif", "r", "is", "None", "and", "power", "is", "not", "None", "and", "n", "is", "not", "None", ":", "# Compute achieved r given sample size, power and alpha level", "def", "_eval_r", "(", "r", ",", "n", ",", "power", ",", "alpha", ")", ":", "return", "func", "(", "r", ",", "n", ",", "power", ",", "alpha", ")", "-", "power", "try", ":", "return", "brenth", "(", "_eval_r", ",", "1e-10", ",", "1", "-", "1e-10", ",", "args", "=", "(", "n", ",", "power", ",", "alpha", ")", ")", "except", "ValueError", ":", "# pragma: no cover", "return", "np", ".", "nan", "else", ":", "# Compute achieved alpha (significance) level given r, n and power", "def", "_eval_alpha", "(", "alpha", ",", "r", ",", "n", ",", "power", ")", ":", "return", "func", "(", "r", ",", "n", ",", "power", ",", "alpha", ")", "-", "power", "try", ":", "return", "brenth", "(", "_eval_alpha", ",", "1e-10", ",", "1", "-", "1e-10", ",", "args", "=", "(", "r", ",", "n", ",", "power", ")", ")", "except", "ValueError", ":", "# pragma: no cover", "return", "np", ".", "nan" ]
Evaluate power, sample size, correlation coefficient or significance level of a correlation test. Parameters ---------- r : float Correlation coefficient. n : int Number of observations (sample size). power : float Test power (= 1 - type II error). alpha : float Significance level (type I error probability). The default is 0.05. tail : str Indicates whether the test is "two-sided" or "one-sided". Notes ----- Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must be passed as None, and that parameter is determined from the others. Notice that ``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to compute it. :py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e. sample size, effect size, or significance level). If the solving fails, a nan value is returned. This function is a mere Python translation of the original `pwr.r.test` function implemented in the `pwr` R package. All credit goes to the author, Stephane Champely. References ---------- .. [1] Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum. .. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf Examples -------- 1. Compute achieved power given ``r``, ``n`` and ``alpha`` >>> from pingouin import power_corr >>> print('power: %.4f' % power_corr(r=0.5, n=20)) power: 0.6379 2. Compute required sample size given ``r``, ``power`` and ``alpha`` >>> print('n: %.4f' % power_corr(r=0.5, power=0.80, ... tail='one-sided')) n: 22.6091 3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level >>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05)) r: 0.5822 4. Compute achieved alpha level given ``r``, ``n`` and ``power`` >>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80, ... alpha=None)) alpha: 0.1377
[ "Evaluate", "power", "sample", "size", "correlation", "coefficient", "or", "significance", "level", "of", "a", "correlation", "test", "." ]
python
train
32.296552
genialis/genesis-pyapi
genesis/genesis.py
https://github.com/genialis/genesis-pyapi/blob/dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a/genesis/genesis.py#L108-L157
def data(self, **query): """Query for Data object annotation.""" objects = self.cache['objects'] data = self.api.data.get(**query)['objects'] data_objects = [] for d in data: _id = d['id'] if _id in objects: # Update existing object objects[_id].update(d) else: # Insert new object objects[_id] = GenData(d, self) data_objects.append(objects[_id]) # Hydrate reference fields for d in data_objects: count += 1 while True: ref_annotation = {} remove_annotation = [] for path, ann in d.annotation.items(): if ann['type'].startswith('data:'): # Referenced data object found # Copy annotation _id = ann['value'] if _id not in objects: try: d_tmp = self.api.data(_id).get() except slumber.exceptions.HttpClientError as ex: if ex.response.status_code == 404: continue else: raise ex objects[_id] = GenData(d_tmp, self) annotation = objects[_id].annotation ref_annotation.update({path + '.' + k: v for k, v in annotation.items()}) remove_annotation.append(path) if ref_annotation: d.annotation.update(ref_annotation) for path in remove_annotation: del d.annotation[path] else: break return data_objects
[ "def", "data", "(", "self", ",", "*", "*", "query", ")", ":", "objects", "=", "self", ".", "cache", "[", "'objects'", "]", "data", "=", "self", ".", "api", ".", "data", ".", "get", "(", "*", "*", "query", ")", "[", "'objects'", "]", "data_objects", "=", "[", "]", "for", "d", "in", "data", ":", "_id", "=", "d", "[", "'id'", "]", "if", "_id", "in", "objects", ":", "# Update existing object", "objects", "[", "_id", "]", ".", "update", "(", "d", ")", "else", ":", "# Insert new object", "objects", "[", "_id", "]", "=", "GenData", "(", "d", ",", "self", ")", "data_objects", ".", "append", "(", "objects", "[", "_id", "]", ")", "# Hydrate reference fields", "for", "d", "in", "data_objects", ":", "count", "+=", "1", "while", "True", ":", "ref_annotation", "=", "{", "}", "remove_annotation", "=", "[", "]", "for", "path", ",", "ann", "in", "d", ".", "annotation", ".", "items", "(", ")", ":", "if", "ann", "[", "'type'", "]", ".", "startswith", "(", "'data:'", ")", ":", "# Referenced data object found", "# Copy annotation", "_id", "=", "ann", "[", "'value'", "]", "if", "_id", "not", "in", "objects", ":", "try", ":", "d_tmp", "=", "self", ".", "api", ".", "data", "(", "_id", ")", ".", "get", "(", ")", "except", "slumber", ".", "exceptions", ".", "HttpClientError", "as", "ex", ":", "if", "ex", ".", "response", ".", "status_code", "==", "404", ":", "continue", "else", ":", "raise", "ex", "objects", "[", "_id", "]", "=", "GenData", "(", "d_tmp", ",", "self", ")", "annotation", "=", "objects", "[", "_id", "]", ".", "annotation", "ref_annotation", ".", "update", "(", "{", "path", "+", "'.'", "+", "k", ":", "v", "for", "k", ",", "v", "in", "annotation", ".", "items", "(", ")", "}", ")", "remove_annotation", ".", "append", "(", "path", ")", "if", "ref_annotation", ":", "d", ".", "annotation", ".", "update", "(", "ref_annotation", ")", "for", "path", "in", "remove_annotation", ":", "del", "d", ".", "annotation", "[", "path", "]", "else", ":", "break", "return", "data_objects" ]
Query for Data object annotation.
[ "Query", "for", "Data", "object", "annotation", "." ]
python
test
37.1
gccxml/pygccxml
pygccxml/parser/declarations_cache.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/declarations_cache.py#L17-L34
def file_signature(filename): """ Return a signature for a file. """ if not os.path.isfile(filename): return None if not os.path.exists(filename): return None # Duplicate auto-generated files can be recognized with the sha1 hash. sig = hashlib.sha1() with open(filename, "rb") as f: buf = f.read() sig.update(buf) return sig.hexdigest()
[ "def", "file_signature", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "None", "# Duplicate auto-generated files can be recognized with the sha1 hash.", "sig", "=", "hashlib", ".", "sha1", "(", ")", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "buf", "=", "f", ".", "read", "(", ")", "sig", ".", "update", "(", "buf", ")", "return", "sig", ".", "hexdigest", "(", ")" ]
Return a signature for a file.
[ "Return", "a", "signature", "for", "a", "file", "." ]
python
train
21.722222
Nachtfeuer/pipeline
spline/components/tasks.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L98-L127
def prepare_shell_data(self, shells, key, entry): """Prepare one shell or docker task.""" if self.can_process_shell(entry): if key in ['python']: entry['type'] = key if 'with' in entry and isinstance(entry['with'], str): rendered_with = ast.literal_eval(render(entry['with'], variables=self.pipeline.variables, model=self.pipeline.model, env=self.get_merged_env(include_os=True))) elif 'with' in entry: rendered_with = entry['with'] else: rendered_with = [''] for item in rendered_with: shells.append({ 'id': self.next_task_id, 'creator': key, 'entry': entry, 'model': self.pipeline.model, 'env': self.get_merged_env(), 'item': item, 'dry_run': self.pipeline.options.dry_run, 'debug': self.pipeline.options.debug, 'strict': self.pipeline.options.strict, 'variables': self.pipeline.variables, 'temporary_scripts_path': self.pipeline.options.temporary_scripts_path}) self.next_task_id += 1
[ "def", "prepare_shell_data", "(", "self", ",", "shells", ",", "key", ",", "entry", ")", ":", "if", "self", ".", "can_process_shell", "(", "entry", ")", ":", "if", "key", "in", "[", "'python'", "]", ":", "entry", "[", "'type'", "]", "=", "key", "if", "'with'", "in", "entry", "and", "isinstance", "(", "entry", "[", "'with'", "]", ",", "str", ")", ":", "rendered_with", "=", "ast", ".", "literal_eval", "(", "render", "(", "entry", "[", "'with'", "]", ",", "variables", "=", "self", ".", "pipeline", ".", "variables", ",", "model", "=", "self", ".", "pipeline", ".", "model", ",", "env", "=", "self", ".", "get_merged_env", "(", "include_os", "=", "True", ")", ")", ")", "elif", "'with'", "in", "entry", ":", "rendered_with", "=", "entry", "[", "'with'", "]", "else", ":", "rendered_with", "=", "[", "''", "]", "for", "item", "in", "rendered_with", ":", "shells", ".", "append", "(", "{", "'id'", ":", "self", ".", "next_task_id", ",", "'creator'", ":", "key", ",", "'entry'", ":", "entry", ",", "'model'", ":", "self", ".", "pipeline", ".", "model", ",", "'env'", ":", "self", ".", "get_merged_env", "(", ")", ",", "'item'", ":", "item", ",", "'dry_run'", ":", "self", ".", "pipeline", ".", "options", ".", "dry_run", ",", "'debug'", ":", "self", ".", "pipeline", ".", "options", ".", "debug", ",", "'strict'", ":", "self", ".", "pipeline", ".", "options", ".", "strict", ",", "'variables'", ":", "self", ".", "pipeline", ".", "variables", ",", "'temporary_scripts_path'", ":", "self", ".", "pipeline", ".", "options", ".", "temporary_scripts_path", "}", ")", "self", ".", "next_task_id", "+=", "1" ]
Prepare one shell or docker task.
[ "Prepare", "one", "shell", "or", "docker", "task", "." ]
python
train
47.333333
nephila/django-knocker
knocker/consumers.py
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/consumers.py#L9-L18
def ws_connect(message): """ Channels connection setup. Register the current client on the related Group according to the language """ prefix, language = message['path'].strip('/').split('/') gr = Group('knocker-{0}'.format(language)) gr.add(message.reply_channel) message.channel_session['knocker'] = language message.reply_channel.send({"accept": True})
[ "def", "ws_connect", "(", "message", ")", ":", "prefix", ",", "language", "=", "message", "[", "'path'", "]", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "gr", "=", "Group", "(", "'knocker-{0}'", ".", "format", "(", "language", ")", ")", "gr", ".", "add", "(", "message", ".", "reply_channel", ")", "message", ".", "channel_session", "[", "'knocker'", "]", "=", "language", "message", ".", "reply_channel", ".", "send", "(", "{", "\"accept\"", ":", "True", "}", ")" ]
Channels connection setup. Register the current client on the related Group according to the language
[ "Channels", "connection", "setup", ".", "Register", "the", "current", "client", "on", "the", "related", "Group", "according", "to", "the", "language" ]
python
train
38.2
pybel/pybel-tools
src/pybel_tools/mutation/expansion.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/mutation/expansion.py#L299-L324
def expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None: """Edges between entities in the sub-graph that pass the given filters. :param universe: The full graph :param graph: A sub-graph to find the upstream information :param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool """ edge_filter = and_edge_predicates(edge_predicates) for u, v in itt.product(graph, repeat=2): if graph.has_edge(u, v) or not universe.has_edge(u, v): continue rs = defaultdict(list) for key, data in universe[u][v].items(): if not edge_filter(universe, u, v, key): continue rs[data[RELATION]].append((key, data)) if 1 == len(rs): relation = list(rs)[0] for key, data in rs[relation]: graph.add_edge(u, v, key=key, **data) else: log.debug('Multiple relationship types found between %s and %s', u, v)
[ "def", "expand_internal", "(", "universe", ":", "BELGraph", ",", "graph", ":", "BELGraph", ",", "edge_predicates", ":", "EdgePredicates", "=", "None", ")", "->", "None", ":", "edge_filter", "=", "and_edge_predicates", "(", "edge_predicates", ")", "for", "u", ",", "v", "in", "itt", ".", "product", "(", "graph", ",", "repeat", "=", "2", ")", ":", "if", "graph", ".", "has_edge", "(", "u", ",", "v", ")", "or", "not", "universe", ".", "has_edge", "(", "u", ",", "v", ")", ":", "continue", "rs", "=", "defaultdict", "(", "list", ")", "for", "key", ",", "data", "in", "universe", "[", "u", "]", "[", "v", "]", ".", "items", "(", ")", ":", "if", "not", "edge_filter", "(", "universe", ",", "u", ",", "v", ",", "key", ")", ":", "continue", "rs", "[", "data", "[", "RELATION", "]", "]", ".", "append", "(", "(", "key", ",", "data", ")", ")", "if", "1", "==", "len", "(", "rs", ")", ":", "relation", "=", "list", "(", "rs", ")", "[", "0", "]", "for", "key", ",", "data", "in", "rs", "[", "relation", "]", ":", "graph", ".", "add_edge", "(", "u", ",", "v", ",", "key", "=", "key", ",", "*", "*", "data", ")", "else", ":", "log", ".", "debug", "(", "'Multiple relationship types found between %s and %s'", ",", "u", ",", "v", ")" ]
Edges between entities in the sub-graph that pass the given filters. :param universe: The full graph :param graph: A sub-graph to find the upstream information :param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool
[ "Edges", "between", "entities", "in", "the", "sub", "-", "graph", "that", "pass", "the", "given", "filters", "." ]
python
valid
39.346154
Fizzadar/pyinfra
pyinfra/modules/mysql.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/mysql.py#L306-L328
def dump( state, host, remote_filename, database=None, # Details for speaking to MySQL via `mysql` CLI mysql_user=None, mysql_password=None, mysql_host=None, mysql_port=None, ): ''' Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``. + database: name of the database to dump + remote_filename: name of the file to dump the SQL to + mysql_*: global module arguments, see above ''' yield '{0} > {1}'.format(make_mysql_command( executable='mysqldump', database=database, user=mysql_user, password=mysql_password, host=mysql_host, port=mysql_port, ), remote_filename)
[ "def", "dump", "(", "state", ",", "host", ",", "remote_filename", ",", "database", "=", "None", ",", "# Details for speaking to MySQL via `mysql` CLI", "mysql_user", "=", "None", ",", "mysql_password", "=", "None", ",", "mysql_host", "=", "None", ",", "mysql_port", "=", "None", ",", ")", ":", "yield", "'{0} > {1}'", ".", "format", "(", "make_mysql_command", "(", "executable", "=", "'mysqldump'", ",", "database", "=", "database", ",", "user", "=", "mysql_user", ",", "password", "=", "mysql_password", ",", "host", "=", "mysql_host", ",", "port", "=", "mysql_port", ",", ")", ",", "remote_filename", ")" ]
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``. + database: name of the database to dump + remote_filename: name of the file to dump the SQL to + mysql_*: global module arguments, see above
[ "Dump", "a", "MySQL", "database", "into", "a", ".", "sql", "file", ".", "Requires", "mysqldump", "." ]
python
train
28.652174
apache/airflow
scripts/perf/scheduler_ops_metrics.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/scripts/perf/scheduler_ops_metrics.py#L103-L135
def heartbeat(self): """ Override the scheduler heartbeat to determine when the test is complete """ super(SchedulerMetricsJob, self).heartbeat() session = settings.Session() # Get all the relevant task instances TI = TaskInstance successful_tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .filter(TI.state.in_([State.SUCCESS])) .all() ) session.commit() dagbag = DagBag(SUBDIR) dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS] # the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval. num_task_instances = sum([(timezone.utcnow() - task.start_date).days for dag in dags for task in dag.tasks]) if (len(successful_tis) == num_task_instances or (timezone.utcnow() - self.start_date).total_seconds() > MAX_RUNTIME_SECS): if len(successful_tis) == num_task_instances: self.log.info("All tasks processed! Printing stats.") else: self.log.info("Test timeout reached. Printing available stats.") self.print_stats() set_dags_paused_state(True) sys.exit()
[ "def", "heartbeat", "(", "self", ")", ":", "super", "(", "SchedulerMetricsJob", ",", "self", ")", ".", "heartbeat", "(", ")", "session", "=", "settings", ".", "Session", "(", ")", "# Get all the relevant task instances", "TI", "=", "TaskInstance", "successful_tis", "=", "(", "session", ".", "query", "(", "TI", ")", ".", "filter", "(", "TI", ".", "dag_id", ".", "in_", "(", "DAG_IDS", ")", ")", ".", "filter", "(", "TI", ".", "state", ".", "in_", "(", "[", "State", ".", "SUCCESS", "]", ")", ")", ".", "all", "(", ")", ")", "session", ".", "commit", "(", ")", "dagbag", "=", "DagBag", "(", "SUBDIR", ")", "dags", "=", "[", "dagbag", ".", "dags", "[", "dag_id", "]", "for", "dag_id", "in", "DAG_IDS", "]", "# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.", "num_task_instances", "=", "sum", "(", "[", "(", "timezone", ".", "utcnow", "(", ")", "-", "task", ".", "start_date", ")", ".", "days", "for", "dag", "in", "dags", "for", "task", "in", "dag", ".", "tasks", "]", ")", "if", "(", "len", "(", "successful_tis", ")", "==", "num_task_instances", "or", "(", "timezone", ".", "utcnow", "(", ")", "-", "self", ".", "start_date", ")", ".", "total_seconds", "(", ")", ">", "MAX_RUNTIME_SECS", ")", ":", "if", "len", "(", "successful_tis", ")", "==", "num_task_instances", ":", "self", ".", "log", ".", "info", "(", "\"All tasks processed! Printing stats.\"", ")", "else", ":", "self", ".", "log", ".", "info", "(", "\"Test timeout reached. Printing available stats.\"", ")", "self", ".", "print_stats", "(", ")", "set_dags_paused_state", "(", "True", ")", "sys", ".", "exit", "(", ")" ]
Override the scheduler heartbeat to determine when the test is complete
[ "Override", "the", "scheduler", "heartbeat", "to", "determine", "when", "the", "test", "is", "complete" ]
python
test
38.818182
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L4684-L4692
def create(self, create_missing=None): """Manually fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1216236 <https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_. """ attrs = self.create_json(create_missing) return Location(self._server_config, id=attrs['id']).read()
[ "def", "create", "(", "self", ",", "create_missing", "=", "None", ")", ":", "attrs", "=", "self", ".", "create_json", "(", "create_missing", ")", "return", "Location", "(", "self", ".", "_server_config", ",", "id", "=", "attrs", "[", "'id'", "]", ")", ".", "read", "(", ")" ]
Manually fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1216236 <https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_.
[ "Manually", "fetch", "a", "complete", "set", "of", "attributes", "for", "this", "entity", "." ]
python
train
39
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L3954-L4050
def search_asn(self, auth, query, search_options=None): """ Search ASNs for entries matching 'query' * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to specify how quite advanced search operations should be performed in a generic format. It is internally expanded to a SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any prefix attribute or an entire query dict. :attr:`val2` can be either the value you want to compare the prefix attribute to, or an entire `query` dict. The search options can also be used to limit the number of rows returned or set an offset for the result. The following options are available: * :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`). * :attr:`offset` - Offset the result list this many prefixes (default :data:`0`). This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full understanding. """ if search_options is None: search_options = {} # # sanitize search options and set default if option missing # # max_result if 'max_result' not in search_options: search_options['max_result'] = 50 else: try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'max_result'. Only integer values allowed.''') # offset if 'offset' not in search_options: search_options['offset'] = 0 else: try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'offset'. Only integer values allowed.''') self._logger.debug('search_asn search_options: %s' % unicode(search_options)) opt = None sql = """ SELECT * FROM ip_net_asn """ # add where clause if we have any search terms if query != {}: where, opt = self._expand_asn_query(query) sql += " WHERE " + where sql += " ORDER BY asn LIMIT " + unicode(search_options['max_result']) self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) return { 'search_options': search_options, 'result': result }
[ "def", "search_asn", "(", "self", ",", "auth", ",", "query", ",", "search_options", "=", "None", ")", ":", "if", "search_options", "is", "None", ":", "search_options", "=", "{", "}", "#", "# sanitize search options and set default if option missing", "#", "# max_result", "if", "'max_result'", "not", "in", "search_options", ":", "search_options", "[", "'max_result'", "]", "=", "50", "else", ":", "try", ":", "search_options", "[", "'max_result'", "]", "=", "int", "(", "search_options", "[", "'max_result'", "]", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "NipapValueError", "(", "'Invalid value for option'", "+", "''' 'max_result'. Only integer values allowed.'''", ")", "# offset", "if", "'offset'", "not", "in", "search_options", ":", "search_options", "[", "'offset'", "]", "=", "0", "else", ":", "try", ":", "search_options", "[", "'offset'", "]", "=", "int", "(", "search_options", "[", "'offset'", "]", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "NipapValueError", "(", "'Invalid value for option'", "+", "''' 'offset'. Only integer values allowed.'''", ")", "self", ".", "_logger", ".", "debug", "(", "'search_asn search_options: %s'", "%", "unicode", "(", "search_options", ")", ")", "opt", "=", "None", "sql", "=", "\"\"\" SELECT * FROM ip_net_asn \"\"\"", "# add where clause if we have any search terms", "if", "query", "!=", "{", "}", ":", "where", ",", "opt", "=", "self", ".", "_expand_asn_query", "(", "query", ")", "sql", "+=", "\" WHERE \"", "+", "where", "sql", "+=", "\" ORDER BY asn LIMIT \"", "+", "unicode", "(", "search_options", "[", "'max_result'", "]", ")", "self", ".", "_execute", "(", "sql", ",", "opt", ")", "result", "=", "list", "(", ")", "for", "row", "in", "self", ".", "_curs_pg", ":", "result", ".", "append", "(", "dict", "(", "row", ")", ")", "return", "{", "'search_options'", ":", "search_options", ",", "'result'", ":", "result", "}" ]
Search ASNs for entries matching 'query' * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to specify how quite advanced search operations should be performed in a generic format. It is internally expanded to a SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any prefix attribute or an entire query dict. :attr:`val2` can be either the value you want to compare the prefix attribute to, or an entire `query` dict. The search options can also be used to limit the number of rows returned or set an offset for the result. The following options are available: * :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`). * :attr:`offset` - Offset the result list this many prefixes (default :data:`0`). This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full understanding.
[ "Search", "ASNs", "for", "entries", "matching", "query" ]
python
train
39.494845
blockstack/blockstack-core
blockstack/lib/snv.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/snv.py#L58-L68
def get_bitcoind_client(): """ Connect to the bitcoind node """ bitcoind_opts = get_bitcoin_opts() bitcoind_host = bitcoind_opts['bitcoind_server'] bitcoind_port = bitcoind_opts['bitcoind_port'] bitcoind_user = bitcoind_opts['bitcoind_user'] bitcoind_passwd = bitcoind_opts['bitcoind_passwd'] return create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port)
[ "def", "get_bitcoind_client", "(", ")", ":", "bitcoind_opts", "=", "get_bitcoin_opts", "(", ")", "bitcoind_host", "=", "bitcoind_opts", "[", "'bitcoind_server'", "]", "bitcoind_port", "=", "bitcoind_opts", "[", "'bitcoind_port'", "]", "bitcoind_user", "=", "bitcoind_opts", "[", "'bitcoind_user'", "]", "bitcoind_passwd", "=", "bitcoind_opts", "[", "'bitcoind_passwd'", "]", "return", "create_bitcoind_service_proxy", "(", "bitcoind_user", ",", "bitcoind_passwd", ",", "server", "=", "bitcoind_host", ",", "port", "=", "bitcoind_port", ")" ]
Connect to the bitcoind node
[ "Connect", "to", "the", "bitcoind", "node" ]
python
train
39.454545
SALib/SALib
src/SALib/sample/morris/local.py
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/local.py#L91-L121
def sum_distances(self, indices, distance_matrix): """Calculate combinatorial distance between a select group of trajectories, indicated by indices Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Returns ------- numpy.ndarray Notes ----- This function can perhaps be quickened by calculating the sum of the distances. The calculated distances, as they are right now, are only used in a relative way. Purely summing distances would lead to the same result, at a perhaps quicker rate. """ combs_tup = np.array(tuple(combinations(indices, 2))) # Put indices from tuples into two-dimensional array. combs = np.array([[i[0] for i in combs_tup], [i[1] for i in combs_tup]]) # Calculate distance (vectorized) dist = np.sqrt( np.sum(np.square(distance_matrix[combs[0], combs[1]]), axis=0)) return dist
[ "def", "sum_distances", "(", "self", ",", "indices", ",", "distance_matrix", ")", ":", "combs_tup", "=", "np", ".", "array", "(", "tuple", "(", "combinations", "(", "indices", ",", "2", ")", ")", ")", "# Put indices from tuples into two-dimensional array.", "combs", "=", "np", ".", "array", "(", "[", "[", "i", "[", "0", "]", "for", "i", "in", "combs_tup", "]", ",", "[", "i", "[", "1", "]", "for", "i", "in", "combs_tup", "]", "]", ")", "# Calculate distance (vectorized)", "dist", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "np", ".", "square", "(", "distance_matrix", "[", "combs", "[", "0", "]", ",", "combs", "[", "1", "]", "]", ")", ",", "axis", "=", "0", ")", ")", "return", "dist" ]
Calculate combinatorial distance between a select group of trajectories, indicated by indices Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Returns ------- numpy.ndarray Notes ----- This function can perhaps be quickened by calculating the sum of the distances. The calculated distances, as they are right now, are only used in a relative way. Purely summing distances would lead to the same result, at a perhaps quicker rate.
[ "Calculate", "combinatorial", "distance", "between", "a", "select", "group", "of", "trajectories", "indicated", "by", "indices" ]
python
train
32.548387
gholt/swiftly
swiftly/cli/iomanager.py
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/iomanager.py#L98-L108
def os_path_to_client_path(self, os_path): """ Converts an operating system path into a client path by replacing instances of os.path.sep with '/'. Note: If the client path contains any instances of '/' already, they will be replaced with '-'. """ if os.path.sep == '/': return os_path return os_path.replace('/', '-').replace(os.path.sep, '/')
[ "def", "os_path_to_client_path", "(", "self", ",", "os_path", ")", ":", "if", "os", ".", "path", ".", "sep", "==", "'/'", ":", "return", "os_path", "return", "os_path", ".", "replace", "(", "'/'", ",", "'-'", ")", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "'/'", ")" ]
Converts an operating system path into a client path by replacing instances of os.path.sep with '/'. Note: If the client path contains any instances of '/' already, they will be replaced with '-'.
[ "Converts", "an", "operating", "system", "path", "into", "a", "client", "path", "by", "replacing", "instances", "of", "os", ".", "path", ".", "sep", "with", "/", "." ]
python
test
37.363636
wandb/client
wandb/vendor/prompt_toolkit/key_binding/bindings/named_commands.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/named_commands.py#L403-L409
def yank_last_arg(event): """ Like `yank_nth_arg`, but if no argument has been given, yank the last word of each line. """ n = (event.arg if event.arg_present else None) event.current_buffer.yank_last_arg(n)
[ "def", "yank_last_arg", "(", "event", ")", ":", "n", "=", "(", "event", ".", "arg", "if", "event", ".", "arg_present", "else", "None", ")", "event", ".", "current_buffer", ".", "yank_last_arg", "(", "n", ")" ]
Like `yank_nth_arg`, but if no argument has been given, yank the last word of each line.
[ "Like", "yank_nth_arg", "but", "if", "no", "argument", "has", "been", "given", "yank", "the", "last", "word", "of", "each", "line", "." ]
python
train
32.142857
rycus86/prometheus_flask_exporter
prometheus_flask_exporter/__init__.py
https://github.com/rycus86/prometheus_flask_exporter/blob/678dbf3097e82a0ddb697268406004cc1f4a26bc/prometheus_flask_exporter/__init__.py#L480-L495
def do_not_track(): """ Decorator to skip the default metrics collection for the method. *Note*: explicit metrics decorators will still collect the data """ def decorator(f): @functools.wraps(f) def func(*args, **kwargs): request.prom_do_not_track = True return f(*args, **kwargs) return func return decorator
[ "def", "do_not_track", "(", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "request", ".", "prom_do_not_track", "=", "True", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "func", "return", "decorator" ]
Decorator to skip the default metrics collection for the method. *Note*: explicit metrics decorators will still collect the data
[ "Decorator", "to", "skip", "the", "default", "metrics", "collection", "for", "the", "method", "." ]
python
train
25.875
rabitt/pysox
sox/file_info.py
https://github.com/rabitt/pysox/blob/eae89bde74567136ec3f723c3e6b369916d9b837/sox/file_info.py#L134-L153
def num_samples(input_filepath): ''' Show number of samples (0 if unavailable). Parameters ---------- input_filepath : str Path to audio file. Returns ------- n_samples : int total number of samples in audio file. Returns 0 if empty or unavailable ''' validate_input_file(input_filepath) output = soxi(input_filepath, 's') if output == '0': logger.warning("Number of samples unavailable for %s", input_filepath) return int(output)
[ "def", "num_samples", "(", "input_filepath", ")", ":", "validate_input_file", "(", "input_filepath", ")", "output", "=", "soxi", "(", "input_filepath", ",", "'s'", ")", "if", "output", "==", "'0'", ":", "logger", ".", "warning", "(", "\"Number of samples unavailable for %s\"", ",", "input_filepath", ")", "return", "int", "(", "output", ")" ]
Show number of samples (0 if unavailable). Parameters ---------- input_filepath : str Path to audio file. Returns ------- n_samples : int total number of samples in audio file. Returns 0 if empty or unavailable
[ "Show", "number", "of", "samples", "(", "0", "if", "unavailable", ")", "." ]
python
valid
24.85
googledatalab/pydatalab
solutionbox/image_classification/mltoolbox/image/classification/_util.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L192-L208
def decode_and_resize(image_str_tensor): """Decodes jpeg string, resizes it and returns a uint8 tensor.""" # These constants are set by Inception v3's expectations. height = 299 width = 299 channels = 3 image = tf.image.decode_jpeg(image_str_tensor, channels=channels) # Note resize expects a batch_size, but tf_map supresses that index, # thus we have to expand then squeeze. Resize returns float32 in the # range [0, uint8_max] image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [height, width], align_corners=False) image = tf.squeeze(image, squeeze_dims=[0]) image = tf.cast(image, dtype=tf.uint8) return image
[ "def", "decode_and_resize", "(", "image_str_tensor", ")", ":", "# These constants are set by Inception v3's expectations.", "height", "=", "299", "width", "=", "299", "channels", "=", "3", "image", "=", "tf", ".", "image", ".", "decode_jpeg", "(", "image_str_tensor", ",", "channels", "=", "channels", ")", "# Note resize expects a batch_size, but tf_map supresses that index,", "# thus we have to expand then squeeze. Resize returns float32 in the", "# range [0, uint8_max]", "image", "=", "tf", ".", "expand_dims", "(", "image", ",", "0", ")", "image", "=", "tf", ".", "image", ".", "resize_bilinear", "(", "image", ",", "[", "height", ",", "width", "]", ",", "align_corners", "=", "False", ")", "image", "=", "tf", ".", "squeeze", "(", "image", ",", "squeeze_dims", "=", "[", "0", "]", ")", "image", "=", "tf", ".", "cast", "(", "image", ",", "dtype", "=", "tf", ".", "uint8", ")", "return", "image" ]
Decodes jpeg string, resizes it and returns a uint8 tensor.
[ "Decodes", "jpeg", "string", "resizes", "it", "and", "returns", "a", "uint8", "tensor", "." ]
python
train
38.294118
graphql-python/graphql-core
graphql/execution/values.py
https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/values.py#L34-L86
def get_variable_values( schema, # type: GraphQLSchema definition_asts, # type: List[VariableDefinition] inputs, # type: Any ): # type: (...) -> Dict[str, Any] """Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input. If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown.""" if inputs is None: inputs = {} values = {} for def_ast in definition_asts: var_name = def_ast.variable.name.value var_type = type_from_ast(schema, def_ast.type) value = inputs.get(var_name) if not is_input_type(var_type): raise GraphQLError( 'Variable "${var_name}" expected value of type "{var_type}" which cannot be used as an input type.'.format( var_name=var_name, var_type=print_ast(def_ast.type) ), [def_ast], ) elif value is None: if def_ast.default_value is not None: values[var_name] = value_from_ast( def_ast.default_value, var_type ) # type: ignore if isinstance(var_type, GraphQLNonNull): raise GraphQLError( 'Variable "${var_name}" of required type "{var_type}" was not provided.'.format( var_name=var_name, var_type=var_type ), [def_ast], ) else: errors = is_valid_value(value, var_type) if errors: message = u"\n" + u"\n".join(errors) raise GraphQLError( 'Variable "${}" got invalid value {}.{}'.format( var_name, json.dumps(value, sort_keys=True), message ), [def_ast], ) coerced_value = coerce_value(var_type, value) if coerced_value is None: raise Exception("Should have reported error.") values[var_name] = coerced_value return values
[ "def", "get_variable_values", "(", "schema", ",", "# type: GraphQLSchema", "definition_asts", ",", "# type: List[VariableDefinition]", "inputs", ",", "# type: Any", ")", ":", "# type: (...) -> Dict[str, Any]", "if", "inputs", "is", "None", ":", "inputs", "=", "{", "}", "values", "=", "{", "}", "for", "def_ast", "in", "definition_asts", ":", "var_name", "=", "def_ast", ".", "variable", ".", "name", ".", "value", "var_type", "=", "type_from_ast", "(", "schema", ",", "def_ast", ".", "type", ")", "value", "=", "inputs", ".", "get", "(", "var_name", ")", "if", "not", "is_input_type", "(", "var_type", ")", ":", "raise", "GraphQLError", "(", "'Variable \"${var_name}\" expected value of type \"{var_type}\" which cannot be used as an input type.'", ".", "format", "(", "var_name", "=", "var_name", ",", "var_type", "=", "print_ast", "(", "def_ast", ".", "type", ")", ")", ",", "[", "def_ast", "]", ",", ")", "elif", "value", "is", "None", ":", "if", "def_ast", ".", "default_value", "is", "not", "None", ":", "values", "[", "var_name", "]", "=", "value_from_ast", "(", "def_ast", ".", "default_value", ",", "var_type", ")", "# type: ignore", "if", "isinstance", "(", "var_type", ",", "GraphQLNonNull", ")", ":", "raise", "GraphQLError", "(", "'Variable \"${var_name}\" of required type \"{var_type}\" was not provided.'", ".", "format", "(", "var_name", "=", "var_name", ",", "var_type", "=", "var_type", ")", ",", "[", "def_ast", "]", ",", ")", "else", ":", "errors", "=", "is_valid_value", "(", "value", ",", "var_type", ")", "if", "errors", ":", "message", "=", "u\"\\n\"", "+", "u\"\\n\"", ".", "join", "(", "errors", ")", "raise", "GraphQLError", "(", "'Variable \"${}\" got invalid value {}.{}'", ".", "format", "(", "var_name", ",", "json", ".", "dumps", "(", "value", ",", "sort_keys", "=", "True", ")", ",", "message", ")", ",", "[", "def_ast", "]", ",", ")", "coerced_value", "=", "coerce_value", "(", "var_type", ",", "value", ")", "if", "coerced_value", "is", "None", ":", "raise", "Exception", "(", "\"Should have reported error.\"", ")", "values", "[", "var_name", "]", "=", "coerced_value", "return", "values" ]
Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input. If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown.
[ "Prepares", "an", "object", "map", "of", "variables", "of", "the", "correct", "type", "based", "on", "the", "provided", "variable", "definitions", "and", "arbitrary", "input", ".", "If", "the", "input", "cannot", "be", "parsed", "to", "match", "the", "variable", "definitions", "a", "GraphQLError", "will", "be", "thrown", "." ]
python
train
39.226415
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L3855-L4013
def array_info(self, dump=None, paths=None, attrs=True, standardize_dims=True, pwd=None, use_rel_paths=True, alternative_paths={}, ds_description={'fname', 'store'}, full_ds=True, copy=False, **kwargs): """ Get dimension informations on you arrays This method returns a dictionary containing informations on the array in this instance Parameters ---------- dump: bool If True and the dataset has not been dumped so far, it is dumped to a temporary file or the one generated by `paths` is used. If it is False or both, `dump` and `paths` are None, no data will be stored. If it is None and `paths` is not None, `dump` is set to True. %(get_filename_ds.parameters.no_ds|dump)s attrs: bool, optional If True (default), the :attr:`ArrayList.attrs` and :attr:`xarray.DataArray.attrs` attributes are included in the returning dictionary standardize_dims: bool, optional If True (default), the real dimension names in the dataset are replaced by x, y, z and t to be more general. pwd: str Path to the working directory from where the data can be imported. If None, use the current working directory. use_rel_paths: bool, optional If True (default), paths relative to the current working directory are used. Otherwise absolute paths to `pwd` are used ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'} Keys to describe the datasets of the arrays. If all, all keys are used. The key descriptions are fname the file name is inserted in the ``'fname'`` key store the data store class and module is inserted in the ``'store'`` key ds the dataset is inserted in the ``'ds'`` key num The unique number assigned to the dataset is inserted in the ``'num'`` key arr The array itself is inserted in the ``'arr'`` key full_ds: bool If True and ``'ds'`` is in `ds_description`, the entire dataset is included. Otherwise, only the DataArray converted to a dataset is included copy: bool If True, the arrays and datasets are deep copied Other Parameters ---------------- %(get_filename_ds.other_parameters)s Returns ------- OrderedDict An ordered mapping from array names to dimensions and filename corresponding to the array See Also -------- from_dict""" saved_ds = kwargs.pop('_saved_ds', {}) def get_alternative(f): return next(filter(lambda t: osp.samefile(f, t[0]), six.iteritems(alternative_paths)), [False, f]) if copy: def copy_obj(obj): # try to get the number of the dataset and create only one copy # copy for each dataset try: num = obj.psy.num except AttributeError: pass else: try: return saved_ds[num] except KeyError: saved_ds[num] = obj.psy.copy(True) return saved_ds[num] return obj.psy.copy(True) else: def copy_obj(obj): return obj ret = OrderedDict() if ds_description == 'all': ds_description = {'fname', 'ds', 'num', 'arr', 'store'} if paths is not None: if dump is None: dump = True paths = iter(paths) elif dump is None: dump = False if pwd is None: pwd = getcwd() for arr in self: if isinstance(arr, InteractiveList): ret[arr.arr_name] = arr.array_info( dump, paths, pwd=pwd, attrs=attrs, standardize_dims=standardize_dims, use_rel_paths=use_rel_paths, ds_description=ds_description, alternative_paths=alternative_paths, copy=copy, _saved_ds=saved_ds, **kwargs) else: if standardize_dims: idims = arr.psy.decoder.standardize_dims( next(arr.psy.iter_base_variables), arr.psy.idims) else: idims = arr.psy.idims ret[arr.psy.arr_name] = d = {'dims': idims} if 'variable' in arr.coords: d['name'] = [list(arr.coords['variable'].values)] else: d['name'] = arr.name if 'fname' in ds_description or 'store' in ds_description: fname, store_mod, store_cls = get_filename_ds( arr.psy.base, dump=dump, paths=paths, **kwargs) if 'store' in ds_description: d['store'] = (store_mod, store_cls) if 'fname' in ds_description: d['fname'] = [] for i, f in enumerate(safe_list(fname)): if (f is None or utils.is_remote_url(f)): d['fname'].append(f) else: found, f = get_alternative(f) if use_rel_paths: f = osp.relpath(f, pwd) else: f = osp.abspath(f) d['fname'].append(f) if fname is None or isinstance(fname, six.string_types): d['fname'] = d['fname'][0] else: d['fname'] = tuple(safe_list(fname)) if arr.psy.base.psy._concat_dim is not None: d['concat_dim'] = arr.psy.base.psy._concat_dim if 'ds' in ds_description: if full_ds: d['ds'] = copy_obj(arr.psy.base) else: d['ds'] = copy_obj(arr.to_dataset()) if 'num' in ds_description: d['num'] = arr.psy.base.psy.num if 'arr' in ds_description: d['arr'] = copy_obj(arr) if attrs: d['attrs'] = arr.attrs ret['attrs'] = self.attrs return ret
[ "def", "array_info", "(", "self", ",", "dump", "=", "None", ",", "paths", "=", "None", ",", "attrs", "=", "True", ",", "standardize_dims", "=", "True", ",", "pwd", "=", "None", ",", "use_rel_paths", "=", "True", ",", "alternative_paths", "=", "{", "}", ",", "ds_description", "=", "{", "'fname'", ",", "'store'", "}", ",", "full_ds", "=", "True", ",", "copy", "=", "False", ",", "*", "*", "kwargs", ")", ":", "saved_ds", "=", "kwargs", ".", "pop", "(", "'_saved_ds'", ",", "{", "}", ")", "def", "get_alternative", "(", "f", ")", ":", "return", "next", "(", "filter", "(", "lambda", "t", ":", "osp", ".", "samefile", "(", "f", ",", "t", "[", "0", "]", ")", ",", "six", ".", "iteritems", "(", "alternative_paths", ")", ")", ",", "[", "False", ",", "f", "]", ")", "if", "copy", ":", "def", "copy_obj", "(", "obj", ")", ":", "# try to get the number of the dataset and create only one copy", "# copy for each dataset", "try", ":", "num", "=", "obj", ".", "psy", ".", "num", "except", "AttributeError", ":", "pass", "else", ":", "try", ":", "return", "saved_ds", "[", "num", "]", "except", "KeyError", ":", "saved_ds", "[", "num", "]", "=", "obj", ".", "psy", ".", "copy", "(", "True", ")", "return", "saved_ds", "[", "num", "]", "return", "obj", ".", "psy", ".", "copy", "(", "True", ")", "else", ":", "def", "copy_obj", "(", "obj", ")", ":", "return", "obj", "ret", "=", "OrderedDict", "(", ")", "if", "ds_description", "==", "'all'", ":", "ds_description", "=", "{", "'fname'", ",", "'ds'", ",", "'num'", ",", "'arr'", ",", "'store'", "}", "if", "paths", "is", "not", "None", ":", "if", "dump", "is", "None", ":", "dump", "=", "True", "paths", "=", "iter", "(", "paths", ")", "elif", "dump", "is", "None", ":", "dump", "=", "False", "if", "pwd", "is", "None", ":", "pwd", "=", "getcwd", "(", ")", "for", "arr", "in", "self", ":", "if", "isinstance", "(", "arr", ",", "InteractiveList", ")", ":", "ret", "[", "arr", ".", "arr_name", "]", "=", "arr", ".", "array_info", "(", "dump", ",", "paths", ",", "pwd", "=", "pwd", ",", "attrs", "=", "attrs", ",", "standardize_dims", "=", "standardize_dims", ",", "use_rel_paths", "=", "use_rel_paths", ",", "ds_description", "=", "ds_description", ",", "alternative_paths", "=", "alternative_paths", ",", "copy", "=", "copy", ",", "_saved_ds", "=", "saved_ds", ",", "*", "*", "kwargs", ")", "else", ":", "if", "standardize_dims", ":", "idims", "=", "arr", ".", "psy", ".", "decoder", ".", "standardize_dims", "(", "next", "(", "arr", ".", "psy", ".", "iter_base_variables", ")", ",", "arr", ".", "psy", ".", "idims", ")", "else", ":", "idims", "=", "arr", ".", "psy", ".", "idims", "ret", "[", "arr", ".", "psy", ".", "arr_name", "]", "=", "d", "=", "{", "'dims'", ":", "idims", "}", "if", "'variable'", "in", "arr", ".", "coords", ":", "d", "[", "'name'", "]", "=", "[", "list", "(", "arr", ".", "coords", "[", "'variable'", "]", ".", "values", ")", "]", "else", ":", "d", "[", "'name'", "]", "=", "arr", ".", "name", "if", "'fname'", "in", "ds_description", "or", "'store'", "in", "ds_description", ":", "fname", ",", "store_mod", ",", "store_cls", "=", "get_filename_ds", "(", "arr", ".", "psy", ".", "base", ",", "dump", "=", "dump", ",", "paths", "=", "paths", ",", "*", "*", "kwargs", ")", "if", "'store'", "in", "ds_description", ":", "d", "[", "'store'", "]", "=", "(", "store_mod", ",", "store_cls", ")", "if", "'fname'", "in", "ds_description", ":", "d", "[", "'fname'", "]", "=", "[", "]", "for", "i", ",", "f", "in", "enumerate", "(", "safe_list", "(", "fname", ")", ")", ":", "if", "(", "f", "is", "None", "or", "utils", ".", "is_remote_url", "(", "f", ")", ")", ":", "d", "[", "'fname'", "]", ".", "append", "(", "f", ")", "else", ":", "found", ",", "f", "=", "get_alternative", "(", "f", ")", "if", "use_rel_paths", ":", "f", "=", "osp", ".", "relpath", "(", "f", ",", "pwd", ")", "else", ":", "f", "=", "osp", ".", "abspath", "(", "f", ")", "d", "[", "'fname'", "]", ".", "append", "(", "f", ")", "if", "fname", "is", "None", "or", "isinstance", "(", "fname", ",", "six", ".", "string_types", ")", ":", "d", "[", "'fname'", "]", "=", "d", "[", "'fname'", "]", "[", "0", "]", "else", ":", "d", "[", "'fname'", "]", "=", "tuple", "(", "safe_list", "(", "fname", ")", ")", "if", "arr", ".", "psy", ".", "base", ".", "psy", ".", "_concat_dim", "is", "not", "None", ":", "d", "[", "'concat_dim'", "]", "=", "arr", ".", "psy", ".", "base", ".", "psy", ".", "_concat_dim", "if", "'ds'", "in", "ds_description", ":", "if", "full_ds", ":", "d", "[", "'ds'", "]", "=", "copy_obj", "(", "arr", ".", "psy", ".", "base", ")", "else", ":", "d", "[", "'ds'", "]", "=", "copy_obj", "(", "arr", ".", "to_dataset", "(", ")", ")", "if", "'num'", "in", "ds_description", ":", "d", "[", "'num'", "]", "=", "arr", ".", "psy", ".", "base", ".", "psy", ".", "num", "if", "'arr'", "in", "ds_description", ":", "d", "[", "'arr'", "]", "=", "copy_obj", "(", "arr", ")", "if", "attrs", ":", "d", "[", "'attrs'", "]", "=", "arr", ".", "attrs", "ret", "[", "'attrs'", "]", "=", "self", ".", "attrs", "return", "ret" ]
Get dimension informations on you arrays This method returns a dictionary containing informations on the array in this instance Parameters ---------- dump: bool If True and the dataset has not been dumped so far, it is dumped to a temporary file or the one generated by `paths` is used. If it is False or both, `dump` and `paths` are None, no data will be stored. If it is None and `paths` is not None, `dump` is set to True. %(get_filename_ds.parameters.no_ds|dump)s attrs: bool, optional If True (default), the :attr:`ArrayList.attrs` and :attr:`xarray.DataArray.attrs` attributes are included in the returning dictionary standardize_dims: bool, optional If True (default), the real dimension names in the dataset are replaced by x, y, z and t to be more general. pwd: str Path to the working directory from where the data can be imported. If None, use the current working directory. use_rel_paths: bool, optional If True (default), paths relative to the current working directory are used. Otherwise absolute paths to `pwd` are used ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'} Keys to describe the datasets of the arrays. If all, all keys are used. The key descriptions are fname the file name is inserted in the ``'fname'`` key store the data store class and module is inserted in the ``'store'`` key ds the dataset is inserted in the ``'ds'`` key num The unique number assigned to the dataset is inserted in the ``'num'`` key arr The array itself is inserted in the ``'arr'`` key full_ds: bool If True and ``'ds'`` is in `ds_description`, the entire dataset is included. Otherwise, only the DataArray converted to a dataset is included copy: bool If True, the arrays and datasets are deep copied Other Parameters ---------------- %(get_filename_ds.other_parameters)s Returns ------- OrderedDict An ordered mapping from array names to dimensions and filename corresponding to the array See Also -------- from_dict
[ "Get", "dimension", "informations", "on", "you", "arrays" ]
python
train
42.314465
brainiak/brainiak
brainiak/isc.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L1386-L1519
def phaseshift_isc(data, pairwise=False, summary_statistic='median', n_shifts=1000, tolerate_nans=True, random_state=None): """Phase randomization for one-sample ISC test For each voxel or ROI, compute the actual ISC and p-values from a null distribution of ISCs where response time series are phase randomized prior to computing ISC. If pairwise, apply phase randomization to each subject and compute pairwise ISCs. If leave-one-out approach is used (pairwise=False), only apply phase randomization to the left-out subject in each iteration of the leave-one-out procedure. Input data should be a list where each item is a time-points by voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a single ndarray is supplied, the last dimension is assumed to correspond to subjects. When using leave-one-out approach, NaNs are ignored when computing mean time series of N-1 subjects (default: tolerate_nans=True). Alternatively, you may supply a float between 0 and 1 indicating a threshold proportion of N subjects with non-NaN values required when computing the average time series for a given voxel. For example, if tolerate_nans=.8, ISCs will be computed for any voxel where >= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN values will be assigned NaNs. If set to False, NaNs are not tolerated and voxels with one or more NaNs among the N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False will not affect the pairwise approach; however, if a threshold float is provided, voxels that do not reach this threshold will be excluded. Note that accommodating NaNs may be notably slower than setting tolerate_nans to False. Returns the observed ISC and p-values (two-tailed test), as well as the null distribution of ISCs computed on phase-randomized data. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. .. [Lerner2011] "Topographic mapping of a hierarchy of temporal receptive windows using a narrated story.", Y. Lerner, C. J. Honey, L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915. https://doi.org/10.1523/jneurosci.3684-10.2011 Parameters ---------- data : list or ndarray (n_TRs x n_voxels x n_subjects) fMRI data for which to compute ISFC pairwise : bool, default: False Whether to use pairwise (True) or leave-one-out (False) approach summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' n_shifts : int, default: 1000 Number of randomly shifted samples tolerate_nans : bool or float, default: True Accommodate NaNs (when averaging in leave-one-out approach) random_state = int, None, or np.random.RandomState, default: None Initial random seed Returns ------- observed : float, observed ISC (without time-shifting) Actual ISCs p : float, p-value p-value based on time-shifting randomization test distribution : ndarray, time-shifts by voxels (optional) Time-shifted null distribution if return_bootstrap=True """ # Check response time series input format data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) # Get actual observed ISC observed = isc(data, pairwise=pairwise, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) # Iterate through randomized shifts to create null distribution distribution = [] for i in np.arange(n_shifts): # Random seed to be deterministically re-randomized at each iteration if isinstance(random_state, np.random.RandomState): prng = random_state else: prng = np.random.RandomState(random_state) # Get shifted version of data shifted_data = phase_randomize(data, random_state=prng) # In pairwise approach, apply all shifts then compute pairwise ISCs if pairwise: # Compute null ISC on shifted data for pairwise approach shifted_isc = isc(shifted_data, pairwise=True, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) # In leave-one-out, apply shift only to each left-out participant elif not pairwise: # Roll subject axis of phase-randomized data shifted_data = np.rollaxis(shifted_data, 2, 0) shifted_isc = [] for s, shifted_subject in enumerate(shifted_data): # ISC of shifted left-out subject vs mean of N-1 subjects nonshifted_mean = np.mean(np.delete(data, s, axis=2), axis=2) loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)), pairwise=False, summary_statistic=None, tolerate_nans=tolerate_nans) shifted_isc.append(loo_isc) # Get summary statistics across left-out subjects shifted_isc = compute_summary_statistic( np.dstack(shifted_isc), summary_statistic=summary_statistic, axis=2) distribution.append(shifted_isc) # Update random state for next iteration random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED)) # Convert distribution to numpy array distribution = np.vstack(distribution) # Get p-value for actual median from shifted distribution p = p_from_null(observed, distribution, side='two-sided', exact=False, axis=0) return observed, p, distribution
[ "def", "phaseshift_isc", "(", "data", ",", "pairwise", "=", "False", ",", "summary_statistic", "=", "'median'", ",", "n_shifts", "=", "1000", ",", "tolerate_nans", "=", "True", ",", "random_state", "=", "None", ")", ":", "# Check response time series input format", "data", ",", "n_TRs", ",", "n_voxels", ",", "n_subjects", "=", "_check_timeseries_input", "(", "data", ")", "# Get actual observed ISC", "observed", "=", "isc", "(", "data", ",", "pairwise", "=", "pairwise", ",", "summary_statistic", "=", "summary_statistic", ",", "tolerate_nans", "=", "tolerate_nans", ")", "# Iterate through randomized shifts to create null distribution", "distribution", "=", "[", "]", "for", "i", "in", "np", ".", "arange", "(", "n_shifts", ")", ":", "# Random seed to be deterministically re-randomized at each iteration", "if", "isinstance", "(", "random_state", ",", "np", ".", "random", ".", "RandomState", ")", ":", "prng", "=", "random_state", "else", ":", "prng", "=", "np", ".", "random", ".", "RandomState", "(", "random_state", ")", "# Get shifted version of data", "shifted_data", "=", "phase_randomize", "(", "data", ",", "random_state", "=", "prng", ")", "# In pairwise approach, apply all shifts then compute pairwise ISCs", "if", "pairwise", ":", "# Compute null ISC on shifted data for pairwise approach", "shifted_isc", "=", "isc", "(", "shifted_data", ",", "pairwise", "=", "True", ",", "summary_statistic", "=", "summary_statistic", ",", "tolerate_nans", "=", "tolerate_nans", ")", "# In leave-one-out, apply shift only to each left-out participant", "elif", "not", "pairwise", ":", "# Roll subject axis of phase-randomized data", "shifted_data", "=", "np", ".", "rollaxis", "(", "shifted_data", ",", "2", ",", "0", ")", "shifted_isc", "=", "[", "]", "for", "s", ",", "shifted_subject", "in", "enumerate", "(", "shifted_data", ")", ":", "# ISC of shifted left-out subject vs mean of N-1 subjects", "nonshifted_mean", "=", "np", ".", "mean", "(", "np", ".", "delete", "(", "data", ",", "s", ",", "axis", "=", "2", ")", ",", "axis", "=", "2", ")", "loo_isc", "=", "isc", "(", "np", ".", "dstack", "(", "(", "shifted_subject", ",", "nonshifted_mean", ")", ")", ",", "pairwise", "=", "False", ",", "summary_statistic", "=", "None", ",", "tolerate_nans", "=", "tolerate_nans", ")", "shifted_isc", ".", "append", "(", "loo_isc", ")", "# Get summary statistics across left-out subjects", "shifted_isc", "=", "compute_summary_statistic", "(", "np", ".", "dstack", "(", "shifted_isc", ")", ",", "summary_statistic", "=", "summary_statistic", ",", "axis", "=", "2", ")", "distribution", ".", "append", "(", "shifted_isc", ")", "# Update random state for next iteration", "random_state", "=", "np", ".", "random", ".", "RandomState", "(", "prng", ".", "randint", "(", "0", ",", "MAX_RANDOM_SEED", ")", ")", "# Convert distribution to numpy array", "distribution", "=", "np", ".", "vstack", "(", "distribution", ")", "# Get p-value for actual median from shifted distribution", "p", "=", "p_from_null", "(", "observed", ",", "distribution", ",", "side", "=", "'two-sided'", ",", "exact", "=", "False", ",", "axis", "=", "0", ")", "return", "observed", ",", "p", ",", "distribution" ]
Phase randomization for one-sample ISC test For each voxel or ROI, compute the actual ISC and p-values from a null distribution of ISCs where response time series are phase randomized prior to computing ISC. If pairwise, apply phase randomization to each subject and compute pairwise ISCs. If leave-one-out approach is used (pairwise=False), only apply phase randomization to the left-out subject in each iteration of the leave-one-out procedure. Input data should be a list where each item is a time-points by voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a single ndarray is supplied, the last dimension is assumed to correspond to subjects. When using leave-one-out approach, NaNs are ignored when computing mean time series of N-1 subjects (default: tolerate_nans=True). Alternatively, you may supply a float between 0 and 1 indicating a threshold proportion of N subjects with non-NaN values required when computing the average time series for a given voxel. For example, if tolerate_nans=.8, ISCs will be computed for any voxel where >= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN values will be assigned NaNs. If set to False, NaNs are not tolerated and voxels with one or more NaNs among the N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False will not affect the pairwise approach; however, if a threshold float is provided, voxels that do not reach this threshold will be excluded. Note that accommodating NaNs may be notably slower than setting tolerate_nans to False. Returns the observed ISC and p-values (two-tailed test), as well as the null distribution of ISCs computed on phase-randomized data. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. .. [Lerner2011] "Topographic mapping of a hierarchy of temporal receptive windows using a narrated story.", Y. Lerner, C. J. Honey, L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915. https://doi.org/10.1523/jneurosci.3684-10.2011 Parameters ---------- data : list or ndarray (n_TRs x n_voxels x n_subjects) fMRI data for which to compute ISFC pairwise : bool, default: False Whether to use pairwise (True) or leave-one-out (False) approach summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' n_shifts : int, default: 1000 Number of randomly shifted samples tolerate_nans : bool or float, default: True Accommodate NaNs (when averaging in leave-one-out approach) random_state = int, None, or np.random.RandomState, default: None Initial random seed Returns ------- observed : float, observed ISC (without time-shifting) Actual ISCs p : float, p-value p-value based on time-shifting randomization test distribution : ndarray, time-shifts by voxels (optional) Time-shifted null distribution if return_bootstrap=True
[ "Phase", "randomization", "for", "one", "-", "sample", "ISC", "test" ]
python
train
42.850746
astroML/gatspy
gatspy/datasets/rrlyrae.py
https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L125-L173
def get_lightcurve(self, star_id, return_1d=True): """Get the light curves for the given ID Parameters ---------- star_id : int A valid integer star id representing an object in the dataset return_1d : boolean (default=True) Specify whether to return 1D arrays of (t, y, dy, filts) or 2D arrays of (t, y, dy) where each column is a filter. Returns ------- t, y, dy : np.ndarrays (if return_1d == False) Times, magnitudes, and magnitude errors. The shape of each array is [Nobs, 5], where the columns refer to [u,g,r,i,z] bands. Non-observations are indicated by NaN. t, y, dy, filts : np.ndarrays (if return_1d == True) Times, magnitudes, magnitude errors, and filters The shape of each array is [Nobs], and non-observations are filtered out. """ filename = '{0}/{1}.dat'.format(self.dirname, star_id) try: data = np.loadtxt(self.data.extractfile(filename)) except KeyError: raise ValueError("invalid star id: {0}".format(star_id)) RA = data[:, 0] DEC = data[:, 1] t = data[:, 2::3] y = data[:, 3::3] dy = data[:, 4::3] nans = (y == -99.99) t[nans] = np.nan y[nans] = np.nan dy[nans] = np.nan if return_1d: t, y, dy, filts = np.broadcast_arrays(t, y, dy, ['u', 'g', 'r', 'i', 'z']) good = ~np.isnan(t) return t[good], y[good], dy[good], filts[good] else: return t, y, dy
[ "def", "get_lightcurve", "(", "self", ",", "star_id", ",", "return_1d", "=", "True", ")", ":", "filename", "=", "'{0}/{1}.dat'", ".", "format", "(", "self", ".", "dirname", ",", "star_id", ")", "try", ":", "data", "=", "np", ".", "loadtxt", "(", "self", ".", "data", ".", "extractfile", "(", "filename", ")", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"invalid star id: {0}\"", ".", "format", "(", "star_id", ")", ")", "RA", "=", "data", "[", ":", ",", "0", "]", "DEC", "=", "data", "[", ":", ",", "1", "]", "t", "=", "data", "[", ":", ",", "2", ":", ":", "3", "]", "y", "=", "data", "[", ":", ",", "3", ":", ":", "3", "]", "dy", "=", "data", "[", ":", ",", "4", ":", ":", "3", "]", "nans", "=", "(", "y", "==", "-", "99.99", ")", "t", "[", "nans", "]", "=", "np", ".", "nan", "y", "[", "nans", "]", "=", "np", ".", "nan", "dy", "[", "nans", "]", "=", "np", ".", "nan", "if", "return_1d", ":", "t", ",", "y", ",", "dy", ",", "filts", "=", "np", ".", "broadcast_arrays", "(", "t", ",", "y", ",", "dy", ",", "[", "'u'", ",", "'g'", ",", "'r'", ",", "'i'", ",", "'z'", "]", ")", "good", "=", "~", "np", ".", "isnan", "(", "t", ")", "return", "t", "[", "good", "]", ",", "y", "[", "good", "]", ",", "dy", "[", "good", "]", ",", "filts", "[", "good", "]", "else", ":", "return", "t", ",", "y", ",", "dy" ]
Get the light curves for the given ID Parameters ---------- star_id : int A valid integer star id representing an object in the dataset return_1d : boolean (default=True) Specify whether to return 1D arrays of (t, y, dy, filts) or 2D arrays of (t, y, dy) where each column is a filter. Returns ------- t, y, dy : np.ndarrays (if return_1d == False) Times, magnitudes, and magnitude errors. The shape of each array is [Nobs, 5], where the columns refer to [u,g,r,i,z] bands. Non-observations are indicated by NaN. t, y, dy, filts : np.ndarrays (if return_1d == True) Times, magnitudes, magnitude errors, and filters The shape of each array is [Nobs], and non-observations are filtered out.
[ "Get", "the", "light", "curves", "for", "the", "given", "ID" ]
python
train
33.714286
greenelab/PathCORE-T
pathcore/network.py
https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L568-L652
def _pathway_feature_permutation(pathway_feature_tuples, permutation_max_iters): """Permute the pathways across features for one side in the network. Used in `permute_pathways_across_features` Parameters ----------- pathway_feature_tuples : list(tup(str, int)) a tuple list [(pathway, feature)] where the pathway, feature pairing indicates that a pathway was overrepresented in that feature permutation_max_iters : int specify the maximum number of iterations, limit the number of attempts we have to generate a permutation Returns ----------- list(tup(str, int)), the list of pathway, feature pairings after the permutation """ pathways, features = [list(elements_at_position) for elements_at_position in zip(*pathway_feature_tuples)] original_pathways = pathways[:] random.shuffle(pathways) feature_block_locations = {} i = 0 while i < len(pathways): starting_index = i current_feature = features[i] pathway_set = set() # input is grouped by feature, so we want to keep track of the start # and end of a given "block" of the same feature--this corresponds # to all the pathways overrepresented in that feature. while i < len(pathways) and features[i] == current_feature: # check the results of the permutation. if `pathway_set` does # not contain the current pathway, we are maintaining the # necessary invariants in our permutation thus far. if pathways[i] not in pathway_set: pathway_set.add(pathways[i]) else: k = 0 random_pathway = None while True: # select another random pathway from the list # and get the feature to which it belongs j = random.choice(range(0, len(pathways))) random_pathway = pathways[j] random_feature = features[j] if (random_pathway != pathways[i] and random_pathway not in pathway_set): # if this is a feature we have not already seen, # we are done. if random_feature not in feature_block_locations: break # otherwise, look at the indices that correspond # to that feature's block of pathways feature_block_start, feature_block_end = \ feature_block_locations[random_feature] pathway_block = pathways[feature_block_start: feature_block_end] # make sure that the current pathway is not in # that block--ensures that we maintain the invariant # after the swap if pathways[i] not in pathway_block: break k += 1 if k > permutation_max_iters: print("Permutation step: reached the maximum " "number of iterations {0}.".format( permutation_max_iters)) return None pathway_set.add(random_pathway) pathways[j] = pathways[i] pathways[i] = random_pathway i += 1 ending_index = i feature_block_locations[current_feature] = ( starting_index, ending_index) if original_pathways == pathways: return None return list(zip(pathways, features))
[ "def", "_pathway_feature_permutation", "(", "pathway_feature_tuples", ",", "permutation_max_iters", ")", ":", "pathways", ",", "features", "=", "[", "list", "(", "elements_at_position", ")", "for", "elements_at_position", "in", "zip", "(", "*", "pathway_feature_tuples", ")", "]", "original_pathways", "=", "pathways", "[", ":", "]", "random", ".", "shuffle", "(", "pathways", ")", "feature_block_locations", "=", "{", "}", "i", "=", "0", "while", "i", "<", "len", "(", "pathways", ")", ":", "starting_index", "=", "i", "current_feature", "=", "features", "[", "i", "]", "pathway_set", "=", "set", "(", ")", "# input is grouped by feature, so we want to keep track of the start", "# and end of a given \"block\" of the same feature--this corresponds", "# to all the pathways overrepresented in that feature.", "while", "i", "<", "len", "(", "pathways", ")", "and", "features", "[", "i", "]", "==", "current_feature", ":", "# check the results of the permutation. if `pathway_set` does", "# not contain the current pathway, we are maintaining the", "# necessary invariants in our permutation thus far.", "if", "pathways", "[", "i", "]", "not", "in", "pathway_set", ":", "pathway_set", ".", "add", "(", "pathways", "[", "i", "]", ")", "else", ":", "k", "=", "0", "random_pathway", "=", "None", "while", "True", ":", "# select another random pathway from the list", "# and get the feature to which it belongs", "j", "=", "random", ".", "choice", "(", "range", "(", "0", ",", "len", "(", "pathways", ")", ")", ")", "random_pathway", "=", "pathways", "[", "j", "]", "random_feature", "=", "features", "[", "j", "]", "if", "(", "random_pathway", "!=", "pathways", "[", "i", "]", "and", "random_pathway", "not", "in", "pathway_set", ")", ":", "# if this is a feature we have not already seen,", "# we are done.", "if", "random_feature", "not", "in", "feature_block_locations", ":", "break", "# otherwise, look at the indices that correspond", "# to that feature's block of pathways", "feature_block_start", ",", "feature_block_end", "=", "feature_block_locations", "[", "random_feature", "]", "pathway_block", "=", "pathways", "[", "feature_block_start", ":", "feature_block_end", "]", "# make sure that the current pathway is not in", "# that block--ensures that we maintain the invariant", "# after the swap", "if", "pathways", "[", "i", "]", "not", "in", "pathway_block", ":", "break", "k", "+=", "1", "if", "k", ">", "permutation_max_iters", ":", "print", "(", "\"Permutation step: reached the maximum \"", "\"number of iterations {0}.\"", ".", "format", "(", "permutation_max_iters", ")", ")", "return", "None", "pathway_set", ".", "add", "(", "random_pathway", ")", "pathways", "[", "j", "]", "=", "pathways", "[", "i", "]", "pathways", "[", "i", "]", "=", "random_pathway", "i", "+=", "1", "ending_index", "=", "i", "feature_block_locations", "[", "current_feature", "]", "=", "(", "starting_index", ",", "ending_index", ")", "if", "original_pathways", "==", "pathways", ":", "return", "None", "return", "list", "(", "zip", "(", "pathways", ",", "features", ")", ")" ]
Permute the pathways across features for one side in the network. Used in `permute_pathways_across_features` Parameters ----------- pathway_feature_tuples : list(tup(str, int)) a tuple list [(pathway, feature)] where the pathway, feature pairing indicates that a pathway was overrepresented in that feature permutation_max_iters : int specify the maximum number of iterations, limit the number of attempts we have to generate a permutation Returns ----------- list(tup(str, int)), the list of pathway, feature pairings after the permutation
[ "Permute", "the", "pathways", "across", "features", "for", "one", "side", "in", "the", "network", ".", "Used", "in", "permute_pathways_across_features" ]
python
train
43.964706
tensorflow/cleverhans
cleverhans/plot/success_fail.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/success_fail.py#L18-L54
def plot_report_from_path(path, success_name=DEFAULT_SUCCESS_NAME, fail_names=DEFAULT_FAIL_NAMES, label=None, is_max_confidence=True, linewidth=LINEWIDTH, plot_upper_bound=True): """ Plots a success-fail curve from a confidence report stored on disk, :param path: string filepath for the stored report. (Should be the output of make_confidence_report*.py) :param success_name: The name (confidence report key) of the data that should be used to measure success rate :param fail_names: A list of names (confidence report keys) of the data that should be used to measure failure rate. *Only one of these keys will be plotted*. Each key will be tried in order until one is found in the report. This is to support both the output of `make_confidence_report` and `make_confidence_report_bundled`. :param label: Optional string. Name to use for this curve in the legend. :param is_max_confidence: bool. If True, when measuring the failure rate, treat the data as the output of a maximum confidence attack procedure. This means that the attack is optimal (assuming the underlying optimizer is good enough, *which is probably false*, so interpret the plot accordingly) for thresholds >= .5 but for lower thresholds the observed failure rate is a lower bound on the true worst failure rate and the observed coverage is an upper bound (assuming good enough optimization) on the true failure rate. The plot thus draws the threshold >= .5 portion of the curve with a solid line and the upper and lower bounds with a dashed line. See https://openreview.net/forum?id=H1g0piA9tQ for details. If False, the attack procedure is regarded as an ad hoc way of obtaining a loose lower bound, and thus the whole curve is drawn with dashed lines. :param linewidth: thickness of the line to draw :param plot_upper_bound: include upper bound on error rate in plot """ report = load(path) plot_report(report, success_name, fail_names, label, is_max_confidence, linewidth, plot_upper_bound)
[ "def", "plot_report_from_path", "(", "path", ",", "success_name", "=", "DEFAULT_SUCCESS_NAME", ",", "fail_names", "=", "DEFAULT_FAIL_NAMES", ",", "label", "=", "None", ",", "is_max_confidence", "=", "True", ",", "linewidth", "=", "LINEWIDTH", ",", "plot_upper_bound", "=", "True", ")", ":", "report", "=", "load", "(", "path", ")", "plot_report", "(", "report", ",", "success_name", ",", "fail_names", ",", "label", ",", "is_max_confidence", ",", "linewidth", ",", "plot_upper_bound", ")" ]
Plots a success-fail curve from a confidence report stored on disk, :param path: string filepath for the stored report. (Should be the output of make_confidence_report*.py) :param success_name: The name (confidence report key) of the data that should be used to measure success rate :param fail_names: A list of names (confidence report keys) of the data that should be used to measure failure rate. *Only one of these keys will be plotted*. Each key will be tried in order until one is found in the report. This is to support both the output of `make_confidence_report` and `make_confidence_report_bundled`. :param label: Optional string. Name to use for this curve in the legend. :param is_max_confidence: bool. If True, when measuring the failure rate, treat the data as the output of a maximum confidence attack procedure. This means that the attack is optimal (assuming the underlying optimizer is good enough, *which is probably false*, so interpret the plot accordingly) for thresholds >= .5 but for lower thresholds the observed failure rate is a lower bound on the true worst failure rate and the observed coverage is an upper bound (assuming good enough optimization) on the true failure rate. The plot thus draws the threshold >= .5 portion of the curve with a solid line and the upper and lower bounds with a dashed line. See https://openreview.net/forum?id=H1g0piA9tQ for details. If False, the attack procedure is regarded as an ad hoc way of obtaining a loose lower bound, and thus the whole curve is drawn with dashed lines. :param linewidth: thickness of the line to draw :param plot_upper_bound: include upper bound on error rate in plot
[ "Plots", "a", "success", "-", "fail", "curve", "from", "a", "confidence", "report", "stored", "on", "disk", ":", "param", "path", ":", "string", "filepath", "for", "the", "stored", "report", ".", "(", "Should", "be", "the", "output", "of", "make_confidence_report", "*", ".", "py", ")", ":", "param", "success_name", ":", "The", "name", "(", "confidence", "report", "key", ")", "of", "the", "data", "that", "should", "be", "used", "to", "measure", "success", "rate", ":", "param", "fail_names", ":", "A", "list", "of", "names", "(", "confidence", "report", "keys", ")", "of", "the", "data", "that", "should", "be", "used", "to", "measure", "failure", "rate", ".", "*", "Only", "one", "of", "these", "keys", "will", "be", "plotted", "*", ".", "Each", "key", "will", "be", "tried", "in", "order", "until", "one", "is", "found", "in", "the", "report", ".", "This", "is", "to", "support", "both", "the", "output", "of", "make_confidence_report", "and", "make_confidence_report_bundled", ".", ":", "param", "label", ":", "Optional", "string", ".", "Name", "to", "use", "for", "this", "curve", "in", "the", "legend", ".", ":", "param", "is_max_confidence", ":", "bool", ".", "If", "True", "when", "measuring", "the", "failure", "rate", "treat", "the", "data", "as", "the", "output", "of", "a", "maximum", "confidence", "attack", "procedure", ".", "This", "means", "that", "the", "attack", "is", "optimal", "(", "assuming", "the", "underlying", "optimizer", "is", "good", "enough", "*", "which", "is", "probably", "false", "*", "so", "interpret", "the", "plot", "accordingly", ")", "for", "thresholds", ">", "=", ".", "5", "but", "for", "lower", "thresholds", "the", "observed", "failure", "rate", "is", "a", "lower", "bound", "on", "the", "true", "worst", "failure", "rate", "and", "the", "observed", "coverage", "is", "an", "upper", "bound", "(", "assuming", "good", "enough", "optimization", ")", "on", "the", "true", "failure", "rate", ".", "The", "plot", "thus", "draws", "the", "threshold", ">", "=", ".", "5", "portion", "of", "the", "curve", "with", "a", "solid", "line", "and", "the", "upper", "and", "lower", "bounds", "with", "a", "dashed", "line", ".", "See", "https", ":", "//", "openreview", ".", "net", "/", "forum?id", "=", "H1g0piA9tQ", "for", "details", ".", "If", "False", "the", "attack", "procedure", "is", "regarded", "as", "an", "ad", "hoc", "way", "of", "obtaining", "a", "loose", "lower", "bound", "and", "thus", "the", "whole", "curve", "is", "drawn", "with", "dashed", "lines", ".", ":", "param", "linewidth", ":", "thickness", "of", "the", "line", "to", "draw", ":", "param", "plot_upper_bound", ":", "include", "upper", "bound", "on", "error", "rate", "in", "plot" ]
python
train
57.972973
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L197-L235
def integer_list_file(cls, filename): """ Read a list of integers from a file. The file format is: - # anywhere in the line begins a comment - leading and trailing spaces are ignored - empty lines are ignored - integers can be specified as: - decimal numbers ("100" is 100) - hexadecimal numbers ("0x100" is 256) - binary numbers ("0b100" is 4) - octal numbers ("0100" is 64) @type filename: str @param filename: Name of the file to read. @rtype: list( int ) @return: List of integers read from the file. """ count = 0 result = list() fd = open(filename, 'r') for line in fd: count = count + 1 if '#' in line: line = line[ : line.find('#') ] line = line.strip() if line: try: value = cls.integer(line) except ValueError: e = sys.exc_info()[1] msg = "Error in line %d of %s: %s" msg = msg % (count, filename, str(e)) raise ValueError(msg) result.append(value) return result
[ "def", "integer_list_file", "(", "cls", ",", "filename", ")", ":", "count", "=", "0", "result", "=", "list", "(", ")", "fd", "=", "open", "(", "filename", ",", "'r'", ")", "for", "line", "in", "fd", ":", "count", "=", "count", "+", "1", "if", "'#'", "in", "line", ":", "line", "=", "line", "[", ":", "line", ".", "find", "(", "'#'", ")", "]", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ":", "try", ":", "value", "=", "cls", ".", "integer", "(", "line", ")", "except", "ValueError", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "msg", "=", "\"Error in line %d of %s: %s\"", "msg", "=", "msg", "%", "(", "count", ",", "filename", ",", "str", "(", "e", ")", ")", "raise", "ValueError", "(", "msg", ")", "result", ".", "append", "(", "value", ")", "return", "result" ]
Read a list of integers from a file. The file format is: - # anywhere in the line begins a comment - leading and trailing spaces are ignored - empty lines are ignored - integers can be specified as: - decimal numbers ("100" is 100) - hexadecimal numbers ("0x100" is 256) - binary numbers ("0b100" is 4) - octal numbers ("0100" is 64) @type filename: str @param filename: Name of the file to read. @rtype: list( int ) @return: List of integers read from the file.
[ "Read", "a", "list", "of", "integers", "from", "a", "file", "." ]
python
train
31.923077
shaunduncan/smokesignal
smokesignal.py
https://github.com/shaunduncan/smokesignal/blob/7906ad0e469b5d4121377c9ee67f77d2f140f2b9/smokesignal.py#L242-L249
def clear(*signals): """ Clears all callbacks for a particular signal or signals """ signals = signals if signals else receivers.keys() for signal in signals: receivers[signal].clear()
[ "def", "clear", "(", "*", "signals", ")", ":", "signals", "=", "signals", "if", "signals", "else", "receivers", ".", "keys", "(", ")", "for", "signal", "in", "signals", ":", "receivers", "[", "signal", "]", ".", "clear", "(", ")" ]
Clears all callbacks for a particular signal or signals
[ "Clears", "all", "callbacks", "for", "a", "particular", "signal", "or", "signals" ]
python
train
25.75
icholy/durationpy
durationpy/duration.py
https://github.com/icholy/durationpy/blob/c67ec62dc6e28807c8a06a23074459cf1671aac0/durationpy/duration.py#L33-L58
def from_str(duration): """Parse a duration string to a datetime.timedelta""" if duration in ("0", "+0", "-0"): return datetime.timedelta() pattern = re.compile('([\d\.]+)([a-zµμ]+)') total = 0 sign = -1 if duration[0] == '-' else 1 matches = pattern.findall(duration) if not len(matches): raise Exception("Invalid duration {}".format(duration)) for (value, unit) in matches: if unit not in units: raise Exception( "Unknown unit {} in duration {}".format(unit, duration)) try: total += float(value) * units[unit] except: raise Exception( "Invalid value {} in duration {}".format(value, duration)) microseconds = total / _microsecond_size return datetime.timedelta(microseconds=sign * microseconds)
[ "def", "from_str", "(", "duration", ")", ":", "if", "duration", "in", "(", "\"0\"", ",", "\"+0\"", ",", "\"-0\"", ")", ":", "return", "datetime", ".", "timedelta", "(", ")", "pattern", "=", "re", ".", "compile", "(", "'([\\d\\.]+)([a-zµμ]+)')", "", "total", "=", "0", "sign", "=", "-", "1", "if", "duration", "[", "0", "]", "==", "'-'", "else", "1", "matches", "=", "pattern", ".", "findall", "(", "duration", ")", "if", "not", "len", "(", "matches", ")", ":", "raise", "Exception", "(", "\"Invalid duration {}\"", ".", "format", "(", "duration", ")", ")", "for", "(", "value", ",", "unit", ")", "in", "matches", ":", "if", "unit", "not", "in", "units", ":", "raise", "Exception", "(", "\"Unknown unit {} in duration {}\"", ".", "format", "(", "unit", ",", "duration", ")", ")", "try", ":", "total", "+=", "float", "(", "value", ")", "*", "units", "[", "unit", "]", "except", ":", "raise", "Exception", "(", "\"Invalid value {} in duration {}\"", ".", "format", "(", "value", ",", "duration", ")", ")", "microseconds", "=", "total", "/", "_microsecond_size", "return", "datetime", ".", "timedelta", "(", "microseconds", "=", "sign", "*", "microseconds", ")" ]
Parse a duration string to a datetime.timedelta
[ "Parse", "a", "duration", "string", "to", "a", "datetime", ".", "timedelta" ]
python
train
31.730769
deepmipt/DeepPavlov
deeppavlov/agents/rich_content/default_rich_content.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/agents/rich_content/default_rich_content.py#L152-L170
def json(self) -> dict: """Returns json compatible state of the ButtonsFrame instance. Returns json compatible state of the ButtonsFrame instance including all nested buttons. Returns: control_json: Json representation of ButtonsFrame state. """ content = {} if self.text: content['text'] = self.text content['controls'] = [control.json() for control in self.content] self.control_json['content'] = content return self.control_json
[ "def", "json", "(", "self", ")", "->", "dict", ":", "content", "=", "{", "}", "if", "self", ".", "text", ":", "content", "[", "'text'", "]", "=", "self", ".", "text", "content", "[", "'controls'", "]", "=", "[", "control", ".", "json", "(", ")", "for", "control", "in", "self", ".", "content", "]", "self", ".", "control_json", "[", "'content'", "]", "=", "content", "return", "self", ".", "control_json" ]
Returns json compatible state of the ButtonsFrame instance. Returns json compatible state of the ButtonsFrame instance including all nested buttons. Returns: control_json: Json representation of ButtonsFrame state.
[ "Returns", "json", "compatible", "state", "of", "the", "ButtonsFrame", "instance", "." ]
python
test
27.526316
corydolphin/flask-cors
flask_cors/core.py
https://github.com/corydolphin/flask-cors/blob/13fbb1ea4c1bb422de91a726c3c7f1038d3743a3/flask_cors/core.py#L349-L378
def serialize_options(opts): """ A helper method to serialize and processes the options dictionary. """ options = (opts or {}).copy() for key in opts.keys(): if key not in DEFAULT_OPTIONS: LOG.warning("Unknown option passed to Flask-CORS: %s", key) # Ensure origins is a list of allowed origins with at least one entry. options['origins'] = sanitize_regex_param(options.get('origins')) options['allow_headers'] = sanitize_regex_param(options.get('allow_headers')) # This is expressly forbidden by the spec. Raise a value error so people # don't get burned in production. if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']: raise ValueError("Cannot use supports_credentials in conjunction with" "an origin string of '*'. See: " "http://www.w3.org/TR/cors/#resource-requests") serialize_option(options, 'expose_headers') serialize_option(options, 'methods', upper=True) if isinstance(options.get('max_age'), timedelta): options['max_age'] = str(int(options['max_age'].total_seconds())) return options
[ "def", "serialize_options", "(", "opts", ")", ":", "options", "=", "(", "opts", "or", "{", "}", ")", ".", "copy", "(", ")", "for", "key", "in", "opts", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "DEFAULT_OPTIONS", ":", "LOG", ".", "warning", "(", "\"Unknown option passed to Flask-CORS: %s\"", ",", "key", ")", "# Ensure origins is a list of allowed origins with at least one entry.", "options", "[", "'origins'", "]", "=", "sanitize_regex_param", "(", "options", ".", "get", "(", "'origins'", ")", ")", "options", "[", "'allow_headers'", "]", "=", "sanitize_regex_param", "(", "options", ".", "get", "(", "'allow_headers'", ")", ")", "# This is expressly forbidden by the spec. Raise a value error so people", "# don't get burned in production.", "if", "r'.*'", "in", "options", "[", "'origins'", "]", "and", "options", "[", "'supports_credentials'", "]", "and", "options", "[", "'send_wildcard'", "]", ":", "raise", "ValueError", "(", "\"Cannot use supports_credentials in conjunction with\"", "\"an origin string of '*'. See: \"", "\"http://www.w3.org/TR/cors/#resource-requests\"", ")", "serialize_option", "(", "options", ",", "'expose_headers'", ")", "serialize_option", "(", "options", ",", "'methods'", ",", "upper", "=", "True", ")", "if", "isinstance", "(", "options", ".", "get", "(", "'max_age'", ")", ",", "timedelta", ")", ":", "options", "[", "'max_age'", "]", "=", "str", "(", "int", "(", "options", "[", "'max_age'", "]", ".", "total_seconds", "(", ")", ")", ")", "return", "options" ]
A helper method to serialize and processes the options dictionary.
[ "A", "helper", "method", "to", "serialize", "and", "processes", "the", "options", "dictionary", "." ]
python
valid
38.966667
openearth/bmi-python
bmi/wrapper.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/wrapper.py#L429-L438
def inq_compound(self, name): """ Return the number of fields and size (not yet) of a compound type. """ name = create_string_buffer(name) self.library.inq_compound.argtypes = [c_char_p, POINTER(c_int)] self.library.inq_compound.restype = None nfields = c_int() self.library.inq_compound(name, byref(nfields)) return nfields.value
[ "def", "inq_compound", "(", "self", ",", "name", ")", ":", "name", "=", "create_string_buffer", "(", "name", ")", "self", ".", "library", ".", "inq_compound", ".", "argtypes", "=", "[", "c_char_p", ",", "POINTER", "(", "c_int", ")", "]", "self", ".", "library", ".", "inq_compound", ".", "restype", "=", "None", "nfields", "=", "c_int", "(", ")", "self", ".", "library", ".", "inq_compound", "(", "name", ",", "byref", "(", "nfields", ")", ")", "return", "nfields", ".", "value" ]
Return the number of fields and size (not yet) of a compound type.
[ "Return", "the", "number", "of", "fields", "and", "size", "(", "not", "yet", ")", "of", "a", "compound", "type", "." ]
python
train
39.3
apache/airflow
airflow/contrib/hooks/mongo_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L281-L297
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs): """ Deletes one or more documents in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many :param mongo_collection: The name of the collection to delete from. :type mongo_collection: str :param filter_doc: A query that matches the documents to delete. :type filter_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.delete_many(filter_doc, **kwargs)
[ "def", "delete_many", "(", "self", ",", "mongo_collection", ",", "filter_doc", ",", "mongo_db", "=", "None", ",", "*", "*", "kwargs", ")", ":", "collection", "=", "self", ".", "get_collection", "(", "mongo_collection", ",", "mongo_db", "=", "mongo_db", ")", "return", "collection", ".", "delete_many", "(", "filter_doc", ",", "*", "*", "kwargs", ")" ]
Deletes one or more documents in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many :param mongo_collection: The name of the collection to delete from. :type mongo_collection: str :param filter_doc: A query that matches the documents to delete. :type filter_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str
[ "Deletes", "one", "or", "more", "documents", "in", "a", "mongo", "collection", ".", "https", ":", "//", "api", ".", "mongodb", ".", "com", "/", "python", "/", "current", "/", "api", "/", "pymongo", "/", "collection", ".", "html#pymongo", ".", "collection", ".", "Collection", ".", "delete_many" ]
python
test
46.352941
learningequality/morango
morango/api/permissions.py
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/api/permissions.py#L15-L43
def authenticate_credentials(self, userargs, password, request=None): """ Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params. """ credentials = { 'password': password } if "=" not in userargs: # if it doesn't seem to be in querystring format, just use it as the username credentials[get_user_model().USERNAME_FIELD] = userargs else: # parse out the user args from querystring format into the credentials dict for arg in userargs.split("&"): key, val = arg.split("=") credentials[key] = val # authenticate the user via Django's auth backends user = authenticate(**credentials) if user is None: raise exceptions.AuthenticationFailed('Invalid credentials.') if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return (user, None)
[ "def", "authenticate_credentials", "(", "self", ",", "userargs", ",", "password", ",", "request", "=", "None", ")", ":", "credentials", "=", "{", "'password'", ":", "password", "}", "if", "\"=\"", "not", "in", "userargs", ":", "# if it doesn't seem to be in querystring format, just use it as the username", "credentials", "[", "get_user_model", "(", ")", ".", "USERNAME_FIELD", "]", "=", "userargs", "else", ":", "# parse out the user args from querystring format into the credentials dict", "for", "arg", "in", "userargs", ".", "split", "(", "\"&\"", ")", ":", "key", ",", "val", "=", "arg", ".", "split", "(", "\"=\"", ")", "credentials", "[", "key", "]", "=", "val", "# authenticate the user via Django's auth backends", "user", "=", "authenticate", "(", "*", "*", "credentials", ")", "if", "user", "is", "None", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "'Invalid credentials.'", ")", "if", "not", "user", ".", "is_active", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "'User inactive or deleted.'", ")", "return", "(", "user", ",", "None", ")" ]
Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params.
[ "Authenticate", "the", "userargs", "and", "password", "against", "Django", "auth", "backends", ".", "The", "userargs", "string", "may", "be", "just", "the", "username", "or", "a", "querystring", "-", "encoded", "set", "of", "params", "." ]
python
valid
36.793103
apache/incubator-mxnet
example/gluon/lipnet/utils/download_data.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/download_data.py#L28-L52
def download_mp4(from_idx, to_idx, _params): """ download mp4s """ succ = set() fail = set() for idx in range(from_idx, to_idx): name = 's' + str(idx) save_folder = '{src_path}/{nm}'.format(src_path=_params['src_path'], nm=name) if idx == 0 or os.path.isdir(save_folder): continue script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/video/{nm}.mpg_vcd.zip".format( \ nm=name) down_sc = 'cd {src_path} && curl {script} --output {nm}.mpg_vcd.zip && \ unzip {nm}.mpg_vcd.zip'.format(script=script, nm=name, src_path=_params['src_path']) try: print(down_sc) os.system(down_sc) succ.add(idx) except OSError as error: print(error) fail.add(idx) return (succ, fail)
[ "def", "download_mp4", "(", "from_idx", ",", "to_idx", ",", "_params", ")", ":", "succ", "=", "set", "(", ")", "fail", "=", "set", "(", ")", "for", "idx", "in", "range", "(", "from_idx", ",", "to_idx", ")", ":", "name", "=", "'s'", "+", "str", "(", "idx", ")", "save_folder", "=", "'{src_path}/{nm}'", ".", "format", "(", "src_path", "=", "_params", "[", "'src_path'", "]", ",", "nm", "=", "name", ")", "if", "idx", "==", "0", "or", "os", ".", "path", ".", "isdir", "(", "save_folder", ")", ":", "continue", "script", "=", "\"http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/video/{nm}.mpg_vcd.zip\"", ".", "format", "(", "nm", "=", "name", ")", "down_sc", "=", "'cd {src_path} && curl {script} --output {nm}.mpg_vcd.zip && \\\n unzip {nm}.mpg_vcd.zip'", ".", "format", "(", "script", "=", "script", ",", "nm", "=", "name", ",", "src_path", "=", "_params", "[", "'src_path'", "]", ")", "try", ":", "print", "(", "down_sc", ")", "os", ".", "system", "(", "down_sc", ")", "succ", ".", "add", "(", "idx", ")", "except", "OSError", "as", "error", ":", "print", "(", "error", ")", "fail", ".", "add", "(", "idx", ")", "return", "(", "succ", ",", "fail", ")" ]
download mp4s
[ "download", "mp4s" ]
python
train
37.4
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L2552-L2572
def _parse_prefix_as_idd(idd_pattern, number): """Strips the IDD from the start of the number if present. Helper function used by _maybe_strip_i18n_prefix_and_normalize(). Returns a 2-tuple: - Boolean indicating if IDD was stripped - Number with IDD stripped """ match = idd_pattern.match(number) if match: match_end = match.end() # Only strip this if the first digit after the match is not a 0, since # country calling codes cannot begin with 0. digit_match = _CAPTURING_DIGIT_PATTERN.search(number[match_end:]) if digit_match: normalized_group = normalize_digits_only(digit_match.group(1)) if normalized_group == U_ZERO: return (False, number) return (True, number[match_end:]) return (False, number)
[ "def", "_parse_prefix_as_idd", "(", "idd_pattern", ",", "number", ")", ":", "match", "=", "idd_pattern", ".", "match", "(", "number", ")", "if", "match", ":", "match_end", "=", "match", ".", "end", "(", ")", "# Only strip this if the first digit after the match is not a 0, since", "# country calling codes cannot begin with 0.", "digit_match", "=", "_CAPTURING_DIGIT_PATTERN", ".", "search", "(", "number", "[", "match_end", ":", "]", ")", "if", "digit_match", ":", "normalized_group", "=", "normalize_digits_only", "(", "digit_match", ".", "group", "(", "1", ")", ")", "if", "normalized_group", "==", "U_ZERO", ":", "return", "(", "False", ",", "number", ")", "return", "(", "True", ",", "number", "[", "match_end", ":", "]", ")", "return", "(", "False", ",", "number", ")" ]
Strips the IDD from the start of the number if present. Helper function used by _maybe_strip_i18n_prefix_and_normalize(). Returns a 2-tuple: - Boolean indicating if IDD was stripped - Number with IDD stripped
[ "Strips", "the", "IDD", "from", "the", "start", "of", "the", "number", "if", "present", "." ]
python
train
38.666667
Unidata/siphon
siphon/simplewebservice/iastate.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/iastate.py#L53-L75
def request_all_data(cls, time, pressure=None, **kwargs): """Retrieve upper air observations from Iowa State's archive for all stations. Parameters ---------- time : datetime The date and time of the desired observation. pressure : float, optional The mandatory pressure level at which to request data (in hPa). If none is given, all the available data in the profiles is returned. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data """ endpoint = cls() df = endpoint._get_data(time, None, pressure, **kwargs) return df
[ "def", "request_all_data", "(", "cls", ",", "time", ",", "pressure", "=", "None", ",", "*", "*", "kwargs", ")", ":", "endpoint", "=", "cls", "(", ")", "df", "=", "endpoint", ".", "_get_data", "(", "time", ",", "None", ",", "pressure", ",", "*", "*", "kwargs", ")", "return", "df" ]
Retrieve upper air observations from Iowa State's archive for all stations. Parameters ---------- time : datetime The date and time of the desired observation. pressure : float, optional The mandatory pressure level at which to request data (in hPa). If none is given, all the available data in the profiles is returned. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data
[ "Retrieve", "upper", "air", "observations", "from", "Iowa", "State", "s", "archive", "for", "all", "stations", "." ]
python
train
31.826087
cortical-io/retina-sdk.py
retinasdk/full_client.py
https://github.com/cortical-io/retina-sdk.py/blob/474c13ad399fe1e974d2650335537608f4456b07/retinasdk/full_client.py#L116-L126
def getTokensForText(self, body, POStags=None): """Get tokenized input text Args: body, str: The text to be tokenized (required) POStags, str: Specify desired POS types (optional) Returns: list of str Raises: CorticalioException: if the request was not successful """ return self._text.getTokensForText(self._retina, body, POStags)
[ "def", "getTokensForText", "(", "self", ",", "body", ",", "POStags", "=", "None", ")", ":", "return", "self", ".", "_text", ".", "getTokensForText", "(", "self", ".", "_retina", ",", "body", ",", "POStags", ")" ]
Get tokenized input text Args: body, str: The text to be tokenized (required) POStags, str: Specify desired POS types (optional) Returns: list of str Raises: CorticalioException: if the request was not successful
[ "Get", "tokenized", "input", "text", "Args", ":", "body", "str", ":", "The", "text", "to", "be", "tokenized", "(", "required", ")", "POStags", "str", ":", "Specify", "desired", "POS", "types", "(", "optional", ")", "Returns", ":", "list", "of", "str", "Raises", ":", "CorticalioException", ":", "if", "the", "request", "was", "not", "successful" ]
python
train
37.909091
pypa/pipenv
pipenv/vendor/pexpect/screen.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/screen.py#L138-L144
def pretty (self): '''This returns a copy of the screen as a unicode string with an ASCII text box around the screen border. This is similar to __str__/__unicode__ except that it adds a box.''' top_bot = u'+' + u'-'*self.cols + u'+\n' return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot
[ "def", "pretty", "(", "self", ")", ":", "top_bot", "=", "u'+'", "+", "u'-'", "*", "self", ".", "cols", "+", "u'+\\n'", "return", "top_bot", "+", "u'\\n'", ".", "join", "(", "[", "u'|'", "+", "line", "+", "u'|'", "for", "line", "in", "unicode", "(", "self", ")", ".", "split", "(", "u'\\n'", ")", "]", ")", "+", "u'\\n'", "+", "top_bot" ]
This returns a copy of the screen as a unicode string with an ASCII text box around the screen border. This is similar to __str__/__unicode__ except that it adds a box.
[ "This", "returns", "a", "copy", "of", "the", "screen", "as", "a", "unicode", "string", "with", "an", "ASCII", "text", "box", "around", "the", "screen", "border", ".", "This", "is", "similar", "to", "__str__", "/", "__unicode__", "except", "that", "it", "adds", "a", "box", "." ]
python
train
53.142857
marcomusy/vtkplotter
vtkplotter/analysis.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/analysis.py#L1696-L1729
def convexHull(actor_or_list, alphaConstant=0): """ Create a 3D Delaunay triangulation of input points. :param actor_or_list: can be either an ``Actor`` or a list of 3D points. :param float alphaConstant: For a non-zero alpha value, only verts, edges, faces, or tetra contained within the circumsphere (of radius alpha) will be output. Otherwise, only tetrahedra will be output. .. hint:: |convexHull| |convexHull.py|_ """ if vu.isSequence(actor_or_list): actor = vs.Points(actor_or_list) else: actor = actor_or_list apoly = actor.clean().polydata() triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(apoly) triangleFilter.Update() poly = triangleFilter.GetOutput() delaunay = vtk.vtkDelaunay3D() # Create the convex hull of the pointcloud if alphaConstant: delaunay.SetAlpha(alphaConstant) delaunay.SetInputData(poly) delaunay.Update() surfaceFilter = vtk.vtkDataSetSurfaceFilter() surfaceFilter.SetInputConnection(delaunay.GetOutputPort()) surfaceFilter.Update() chuact = Actor(surfaceFilter.GetOutput()) return chuact
[ "def", "convexHull", "(", "actor_or_list", ",", "alphaConstant", "=", "0", ")", ":", "if", "vu", ".", "isSequence", "(", "actor_or_list", ")", ":", "actor", "=", "vs", ".", "Points", "(", "actor_or_list", ")", "else", ":", "actor", "=", "actor_or_list", "apoly", "=", "actor", ".", "clean", "(", ")", ".", "polydata", "(", ")", "triangleFilter", "=", "vtk", ".", "vtkTriangleFilter", "(", ")", "triangleFilter", ".", "SetInputData", "(", "apoly", ")", "triangleFilter", ".", "Update", "(", ")", "poly", "=", "triangleFilter", ".", "GetOutput", "(", ")", "delaunay", "=", "vtk", ".", "vtkDelaunay3D", "(", ")", "# Create the convex hull of the pointcloud", "if", "alphaConstant", ":", "delaunay", ".", "SetAlpha", "(", "alphaConstant", ")", "delaunay", ".", "SetInputData", "(", "poly", ")", "delaunay", ".", "Update", "(", ")", "surfaceFilter", "=", "vtk", ".", "vtkDataSetSurfaceFilter", "(", ")", "surfaceFilter", ".", "SetInputConnection", "(", "delaunay", ".", "GetOutputPort", "(", ")", ")", "surfaceFilter", ".", "Update", "(", ")", "chuact", "=", "Actor", "(", "surfaceFilter", ".", "GetOutput", "(", ")", ")", "return", "chuact" ]
Create a 3D Delaunay triangulation of input points. :param actor_or_list: can be either an ``Actor`` or a list of 3D points. :param float alphaConstant: For a non-zero alpha value, only verts, edges, faces, or tetra contained within the circumsphere (of radius alpha) will be output. Otherwise, only tetrahedra will be output. .. hint:: |convexHull| |convexHull.py|_
[ "Create", "a", "3D", "Delaunay", "triangulation", "of", "input", "points", "." ]
python
train
33.529412
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAData/base_datastruct.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L1180-L1199
def select_day(self, day): """选取日期(一般用于分钟线) Arguments: day {[type]} -- [description] Raises: ValueError -- [description] Returns: [type] -- [description] """ def _select_day(day): return self.data.loc[day, slice(None)] try: return self.new(_select_day(day), self.type, self.if_fq) except: raise ValueError('QA CANNOT GET THIS Day {} '.format(day))
[ "def", "select_day", "(", "self", ",", "day", ")", ":", "def", "_select_day", "(", "day", ")", ":", "return", "self", ".", "data", ".", "loc", "[", "day", ",", "slice", "(", "None", ")", "]", "try", ":", "return", "self", ".", "new", "(", "_select_day", "(", "day", ")", ",", "self", ".", "type", ",", "self", ".", "if_fq", ")", "except", ":", "raise", "ValueError", "(", "'QA CANNOT GET THIS Day {} '", ".", "format", "(", "day", ")", ")" ]
选取日期(一般用于分钟线) Arguments: day {[type]} -- [description] Raises: ValueError -- [description] Returns: [type] -- [description]
[ "选取日期", "(", "一般用于分钟线", ")" ]
python
train
23.45
tanghaibao/jcvi
jcvi/formats/bed.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L956-L981
def mergebydepth(args): """ %prog mergebydepth reads.bed genome.fasta Similar to mergeBed, but only returns regions beyond certain depth. """ p = OptionParser(mergebydepth.__doc__) p.add_option("--mindepth", default=3, type="int", help="Minimum depth required") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bedfile, fastafile = args mindepth = opts.mindepth bedgraph = make_bedgraph(bedfile) bedgraphfiltered = bedgraph + ".d{0}".format(mindepth) if need_update(bedgraph, bedgraphfiltered): filter([bedgraph, "--minaccn={0}".format(mindepth), "--outfile={0}".format(bedgraphfiltered)]) merged = bedgraphfiltered + ".merge.fasta" if need_update(bedgraphfiltered, merged): mergeBed(bedgraphfiltered, sorted=True)
[ "def", "mergebydepth", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "mergebydepth", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--mindepth\"", ",", "default", "=", "3", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Minimum depth required\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "bedfile", ",", "fastafile", "=", "args", "mindepth", "=", "opts", ".", "mindepth", "bedgraph", "=", "make_bedgraph", "(", "bedfile", ")", "bedgraphfiltered", "=", "bedgraph", "+", "\".d{0}\"", ".", "format", "(", "mindepth", ")", "if", "need_update", "(", "bedgraph", ",", "bedgraphfiltered", ")", ":", "filter", "(", "[", "bedgraph", ",", "\"--minaccn={0}\"", ".", "format", "(", "mindepth", ")", ",", "\"--outfile={0}\"", ".", "format", "(", "bedgraphfiltered", ")", "]", ")", "merged", "=", "bedgraphfiltered", "+", "\".merge.fasta\"", "if", "need_update", "(", "bedgraphfiltered", ",", "merged", ")", ":", "mergeBed", "(", "bedgraphfiltered", ",", "sorted", "=", "True", ")" ]
%prog mergebydepth reads.bed genome.fasta Similar to mergeBed, but only returns regions beyond certain depth.
[ "%prog", "mergebydepth", "reads", ".", "bed", "genome", ".", "fasta" ]
python
train
32.384615
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py#L865-L976
def generate_common(env): """Add internal Builders and construction variables for LaTeX to an Environment.""" # Add OSX system paths so TeX tools can be found # when a list of tools is given the exists() method is not called generate_darwin(env) # A generic tex file Action, sufficient for all tex files. global TeXAction if TeXAction is None: TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR") # An Action to build a latex file. This might be needed more # than once if we are dealing with labels and bibtex. global LaTeXAction if LaTeXAction is None: LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR") # Define an action to run BibTeX on a file. global BibTeXAction if BibTeXAction is None: BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR") # Define an action to run Biber on a file. global BiberAction if BiberAction is None: BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR") # Define an action to run MakeIndex on a file. global MakeIndexAction if MakeIndexAction is None: MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR") # Define an action to run MakeIndex on a file for nomenclatures. global MakeNclAction if MakeNclAction is None: MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR") # Define an action to run MakeIndex on a file for glossaries. global MakeGlossaryAction if MakeGlossaryAction is None: MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR") # Define an action to run MakeIndex on a file for acronyms. global MakeAcronymsAction if MakeAcronymsAction is None: MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR") try: environ = env['ENV'] except KeyError: environ = {} env['ENV'] = environ # Some Linux platforms have pdflatex set up in a way # that requires that the HOME environment variable be set. # Add it here if defined. v = os.environ.get('HOME') if v: environ['HOME'] = v CDCOM = 'cd ' if platform.system() == 'Windows': # allow cd command to change drives on Windows CDCOM = 'cd /D ' env['TEX'] = 'tex' env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}' env['PDFTEX'] = 'pdftex' env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}' env['LATEX'] = 'latex' env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}' env['LATEXRETRIES'] = 4 env['PDFLATEX'] = 'pdflatex' env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}' env['BIBTEX'] = 'bibtex' env['BIBTEXFLAGS'] = SCons.Util.CLVar('') env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}' env['BIBER'] = 'biber' env['BIBERFLAGS'] = SCons.Util.CLVar('') env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}' env['MAKEINDEX'] = 'makeindex' env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('') env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}' env['MAKEGLOSSARY'] = 'makeindex' env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist' env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg') env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls' env['MAKEACRONYMS'] = 'makeindex' env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist' env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg') env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr' env['MAKENCL'] = 'makeindex' env['MAKENCLSTYLE'] = 'nomencl.ist' env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg' env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls' env['MAKENEWGLOSSARY'] = 'makeindex' env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY '
[ "def", "generate_common", "(", "env", ")", ":", "# Add OSX system paths so TeX tools can be found", "# when a list of tools is given the exists() method is not called", "generate_darwin", "(", "env", ")", "# A generic tex file Action, sufficient for all tex files.", "global", "TeXAction", "if", "TeXAction", "is", "None", ":", "TeXAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$TEXCOM\"", ",", "\"$TEXCOMSTR\"", ")", "# An Action to build a latex file. This might be needed more", "# than once if we are dealing with labels and bibtex.", "global", "LaTeXAction", "if", "LaTeXAction", "is", "None", ":", "LaTeXAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$LATEXCOM\"", ",", "\"$LATEXCOMSTR\"", ")", "# Define an action to run BibTeX on a file.", "global", "BibTeXAction", "if", "BibTeXAction", "is", "None", ":", "BibTeXAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$BIBTEXCOM\"", ",", "\"$BIBTEXCOMSTR\"", ")", "# Define an action to run Biber on a file.", "global", "BiberAction", "if", "BiberAction", "is", "None", ":", "BiberAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$BIBERCOM\"", ",", "\"$BIBERCOMSTR\"", ")", "# Define an action to run MakeIndex on a file.", "global", "MakeIndexAction", "if", "MakeIndexAction", "is", "None", ":", "MakeIndexAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$MAKEINDEXCOM\"", ",", "\"$MAKEINDEXCOMSTR\"", ")", "# Define an action to run MakeIndex on a file for nomenclatures.", "global", "MakeNclAction", "if", "MakeNclAction", "is", "None", ":", "MakeNclAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$MAKENCLCOM\"", ",", "\"$MAKENCLCOMSTR\"", ")", "# Define an action to run MakeIndex on a file for glossaries.", "global", "MakeGlossaryAction", "if", "MakeGlossaryAction", "is", "None", ":", "MakeGlossaryAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$MAKEGLOSSARYCOM\"", ",", "\"$MAKEGLOSSARYCOMSTR\"", ")", "# Define an action to run MakeIndex on a file for acronyms.", "global", "MakeAcronymsAction", "if", "MakeAcronymsAction", "is", "None", ":", "MakeAcronymsAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$MAKEACRONYMSCOM\"", ",", "\"$MAKEACRONYMSCOMSTR\"", ")", "try", ":", "environ", "=", "env", "[", "'ENV'", "]", "except", "KeyError", ":", "environ", "=", "{", "}", "env", "[", "'ENV'", "]", "=", "environ", "# Some Linux platforms have pdflatex set up in a way", "# that requires that the HOME environment variable be set.", "# Add it here if defined.", "v", "=", "os", ".", "environ", ".", "get", "(", "'HOME'", ")", "if", "v", ":", "environ", "[", "'HOME'", "]", "=", "v", "CDCOM", "=", "'cd '", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "# allow cd command to change drives on Windows", "CDCOM", "=", "'cd /D '", "env", "[", "'TEX'", "]", "=", "'tex'", "env", "[", "'TEXFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "'-interaction=nonstopmode -recorder'", ")", "env", "[", "'TEXCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'", "env", "[", "'PDFTEX'", "]", "=", "'pdftex'", "env", "[", "'PDFTEXFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "'-interaction=nonstopmode -recorder'", ")", "env", "[", "'PDFTEXCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'", "env", "[", "'LATEX'", "]", "=", "'latex'", "env", "[", "'LATEXFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "'-interaction=nonstopmode -recorder'", ")", "env", "[", "'LATEXCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'", "env", "[", "'LATEXRETRIES'", "]", "=", "4", "env", "[", "'PDFLATEX'", "]", "=", "'pdflatex'", "env", "[", "'PDFLATEXFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "'-interaction=nonstopmode -recorder'", ")", "env", "[", "'PDFLATEXCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'", "env", "[", "'BIBTEX'", "]", "=", "'bibtex'", "env", "[", "'BIBTEXFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "''", ")", "env", "[", "'BIBTEXCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'", "env", "[", "'BIBER'", "]", "=", "'biber'", "env", "[", "'BIBERFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "''", ")", "env", "[", "'BIBERCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'", "env", "[", "'MAKEINDEX'", "]", "=", "'makeindex'", "env", "[", "'MAKEINDEXFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "''", ")", "env", "[", "'MAKEINDEXCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'", "env", "[", "'MAKEGLOSSARY'", "]", "=", "'makeindex'", "env", "[", "'MAKEGLOSSARYSTYLE'", "]", "=", "'${SOURCE.filebase}.ist'", "env", "[", "'MAKEGLOSSARYFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "'-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg'", ")", "env", "[", "'MAKEGLOSSARYCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'", "env", "[", "'MAKEACRONYMS'", "]", "=", "'makeindex'", "env", "[", "'MAKEACRONYMSSTYLE'", "]", "=", "'${SOURCE.filebase}.ist'", "env", "[", "'MAKEACRONYMSFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "'-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg'", ")", "env", "[", "'MAKEACRONYMSCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'", "env", "[", "'MAKENCL'", "]", "=", "'makeindex'", "env", "[", "'MAKENCLSTYLE'", "]", "=", "'nomencl.ist'", "env", "[", "'MAKENCLFLAGS'", "]", "=", "'-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'", "env", "[", "'MAKENCLCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'", "env", "[", "'MAKENEWGLOSSARY'", "]", "=", "'makeindex'", "env", "[", "'MAKENEWGLOSSARYCOM'", "]", "=", "CDCOM", "+", "'${TARGET.dir} && $MAKENEWGLOSSARY '" ]
Add internal Builders and construction variables for LaTeX to an Environment.
[ "Add", "internal", "Builders", "and", "construction", "variables", "for", "LaTeX", "to", "an", "Environment", "." ]
python
train
41.75
awacha/credolib
credolib/atsas.py
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L443-L518
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None): """Run GNOM on the dataset. Inputs: curve: an instance of sastool.classes2.Curve or anything which has a save() method, saving the scattering curve to a given .dat file, in q=4*pi*sin(theta)/lambda [1/nm] units Rmax: the estimated maximum extent of the scattering object, in nm. outputfilename: the preferred name of the output file. If not given, the .out file produced by gnom will be lost. Npoints_realspace: the expected number of points in the real space initial_alpha: the initial value of the regularization parameter. Outputs: the same as of read_gnom_pr() """ with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td: curve.save(os.path.join(td, 'curve.dat')) print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max())) if Npoints_realspace is None: Npoints_realspace = "" else: Npoints_realspace = str(Npoints_realspace) if initial_alpha is None: initial_alpha = "" else: initial_alpha = str(initial_alpha) # GNOM questions and our answers: # Printer type [ postscr ] : <ENTER> # Input data, first file : <curve.dat in the temporary directory><ENTER> # Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER> # No of start points to skip [ 0 ] : 0<ENTER> # ... (just GNOM output) # ... (just GNOM output) # Input data, second file [ none ] : <ENTER> # No of end points to omit [ 0 ] : 0<ENTER> # ... (just GNOM output) # ... (just GNOM output) # Angular scale (1/2/3/4) [ 1 ] : 2<ENTER> # Plot input dataa (Y/N) [ Yes ] : N<ENTER> # File containing expert parameters [ none ] : <ENTER> # Kernel already calculated (Y/N) [ No ] : N<ENTER> # Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER> # Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER> # Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER> # -- Arbitrary monodisperse system -- # Rmin=0, Rmax is maximum particle diameter # Rmax for evaluating p(r) : <Rmax * 10><ENTER> # Number of points in real space [(always different)] : <Npoints_realspace><ENTER> # Kernel-storage file name [ kern.bin ] : <ENTER> # Experimental setup (0/1/2) [ 0 ] : 0<ENTER> # Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER> # Plot alpha distribution (Y/N) [ Yes ] : N<ENTER> # Plot results (Y/N) [ Yes ] : N<ENTER> # ... solution ... # Your choice : <ENTER> # Evaluate errors (Y/N) [ Yes ] : Y<ENTER> # Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER> # Next data set (Yes/No/Same) [ No ] : N<ENTER> gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % ( os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha) result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=gnominput.encode('utf-8')) pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True) pr[:, 0] /= 10 metadata['q'] *= 10 metadata['qj'] *= 10 metadata['qmin'] *= 10 metadata['qmax'] *= 10 metadata['dmax'] /= 10 metadata['dmin'] /= 10 metadata['Rg_guinier'] /= 10 metadata['Rg_gnom'] /= 10 if outputfilename is not None: shutil.copy(os.path.join(td, 'gnom.out'), outputfilename) return pr, metadata
[ "def", "gnom", "(", "curve", ",", "Rmax", ",", "outputfilename", "=", "None", ",", "Npoints_realspace", "=", "None", ",", "initial_alpha", "=", "None", ")", ":", "with", "tempfile", ".", "TemporaryDirectory", "(", "prefix", "=", "'credolib_gnom'", ")", "as", "td", ":", "curve", ".", "save", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'curve.dat'", ")", ")", "print", "(", "'Using curve for GNOM: qrange from {} to {}'", ".", "format", "(", "curve", ".", "q", ".", "min", "(", ")", ",", "curve", ".", "q", ".", "max", "(", ")", ")", ")", "if", "Npoints_realspace", "is", "None", ":", "Npoints_realspace", "=", "\"\"", "else", ":", "Npoints_realspace", "=", "str", "(", "Npoints_realspace", ")", "if", "initial_alpha", "is", "None", ":", "initial_alpha", "=", "\"\"", "else", ":", "initial_alpha", "=", "str", "(", "initial_alpha", ")", "# GNOM questions and our answers:", "# Printer type [ postscr ] : <ENTER>", "# Input data, first file : <curve.dat in the temporary directory><ENTER>", "# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>", "# No of start points to skip [ 0 ] : 0<ENTER>", "# ... (just GNOM output)", "# ... (just GNOM output)", "# Input data, second file [ none ] : <ENTER>", "# No of end points to omit [ 0 ] : 0<ENTER>", "# ... (just GNOM output)", "# ... (just GNOM output)", "# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>", "# Plot input dataa (Y/N) [ Yes ] : N<ENTER>", "# File containing expert parameters [ none ] : <ENTER>", "# Kernel already calculated (Y/N) [ No ] : N<ENTER>", "# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>", "# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>", "# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>", "# -- Arbitrary monodisperse system --", "# Rmin=0, Rmax is maximum particle diameter", "# Rmax for evaluating p(r) : <Rmax * 10><ENTER>", "# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>", "# Kernel-storage file name [ kern.bin ] : <ENTER>", "# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>", "# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>", "# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>", "# Plot results (Y/N) [ Yes ] : N<ENTER>", "# ... solution ...", "# Your choice : <ENTER>", "# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>", "# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>", "# Next data set (Yes/No/Same) [ No ] : N<ENTER>", "gnominput", "=", "\"\\n%s\\n%s\\n0\\n\\n0\\n2\\nN\\n\\nN\\n0\\nY\\nY\\n%f\\n%s\\n\\n0\\n%s\\nN\\nN\\n\\nY\\nN\\nN\\n\"", "%", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'curve.dat'", ")", ",", "os", ".", "path", ".", "join", "(", "td", ",", "'gnom.out'", ")", ",", "10", "*", "Rmax", ",", "Npoints_realspace", ",", "initial_alpha", ")", "result", "=", "subprocess", ".", "run", "(", "[", "'gnom'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "input", "=", "gnominput", ".", "encode", "(", "'utf-8'", ")", ")", "pr", ",", "metadata", "=", "read_gnom_pr", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'gnom.out'", ")", ",", "True", ")", "pr", "[", ":", ",", "0", "]", "/=", "10", "metadata", "[", "'q'", "]", "*=", "10", "metadata", "[", "'qj'", "]", "*=", "10", "metadata", "[", "'qmin'", "]", "*=", "10", "metadata", "[", "'qmax'", "]", "*=", "10", "metadata", "[", "'dmax'", "]", "/=", "10", "metadata", "[", "'dmin'", "]", "/=", "10", "metadata", "[", "'Rg_guinier'", "]", "/=", "10", "metadata", "[", "'Rg_gnom'", "]", "/=", "10", "if", "outputfilename", "is", "not", "None", ":", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'gnom.out'", ")", ",", "outputfilename", ")", "return", "pr", ",", "metadata" ]
Run GNOM on the dataset. Inputs: curve: an instance of sastool.classes2.Curve or anything which has a save() method, saving the scattering curve to a given .dat file, in q=4*pi*sin(theta)/lambda [1/nm] units Rmax: the estimated maximum extent of the scattering object, in nm. outputfilename: the preferred name of the output file. If not given, the .out file produced by gnom will be lost. Npoints_realspace: the expected number of points in the real space initial_alpha: the initial value of the regularization parameter. Outputs: the same as of read_gnom_pr()
[ "Run", "GNOM", "on", "the", "dataset", "." ]
python
train
55.210526
brutasse/graphite-api
graphite_api/_vendor/whisper.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/_vendor/whisper.py#L727-L739
def info(path): """info(path) path is a string """ fh = None try: fh = open(path,'rb') return __readHeader(fh) finally: if fh: fh.close() return None
[ "def", "info", "(", "path", ")", ":", "fh", "=", "None", "try", ":", "fh", "=", "open", "(", "path", ",", "'rb'", ")", "return", "__readHeader", "(", "fh", ")", "finally", ":", "if", "fh", ":", "fh", ".", "close", "(", ")", "return", "None" ]
info(path) path is a string
[ "info", "(", "path", ")" ]
python
train
12.769231
shoebot/shoebot
lib/graph/proximity.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/graph/proximity.py#L23-L46
def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True): """ Simple, multi-purpose depth-first search. Visits all the nodes connected to the root, depth-first. The visit function is called on each node. Recursion will stop if it returns True, and ubsequently dfs() will return True. The traversable function takes the current node and edge, and returns True if we are allowed to follow this connection to the next node. For example, the traversable for directed edges is follows: lambda node, edge: node == edge.node1 Note: node._visited is expected to be False for all nodes. """ stop = visit(root) root._visited = True for node in root.links: if stop: return True if not traversable(root, root.links.edge(node)): continue if not node._visited: stop = depth_first_search(node, visit, traversable) return stop
[ "def", "depth_first_search", "(", "root", ",", "visit", "=", "lambda", "node", ":", "False", ",", "traversable", "=", "lambda", "node", ",", "edge", ":", "True", ")", ":", "stop", "=", "visit", "(", "root", ")", "root", ".", "_visited", "=", "True", "for", "node", "in", "root", ".", "links", ":", "if", "stop", ":", "return", "True", "if", "not", "traversable", "(", "root", ",", "root", ".", "links", ".", "edge", "(", "node", ")", ")", ":", "continue", "if", "not", "node", ".", "_visited", ":", "stop", "=", "depth_first_search", "(", "node", ",", "visit", ",", "traversable", ")", "return", "stop" ]
Simple, multi-purpose depth-first search. Visits all the nodes connected to the root, depth-first. The visit function is called on each node. Recursion will stop if it returns True, and ubsequently dfs() will return True. The traversable function takes the current node and edge, and returns True if we are allowed to follow this connection to the next node. For example, the traversable for directed edges is follows: lambda node, edge: node == edge.node1 Note: node._visited is expected to be False for all nodes.
[ "Simple", "multi", "-", "purpose", "depth", "-", "first", "search", ".", "Visits", "all", "the", "nodes", "connected", "to", "the", "root", "depth", "-", "first", ".", "The", "visit", "function", "is", "called", "on", "each", "node", ".", "Recursion", "will", "stop", "if", "it", "returns", "True", "and", "ubsequently", "dfs", "()", "will", "return", "True", ".", "The", "traversable", "function", "takes", "the", "current", "node", "and", "edge", "and", "returns", "True", "if", "we", "are", "allowed", "to", "follow", "this", "connection", "to", "the", "next", "node", ".", "For", "example", "the", "traversable", "for", "directed", "edges", "is", "follows", ":", "lambda", "node", "edge", ":", "node", "==", "edge", ".", "node1", "Note", ":", "node", ".", "_visited", "is", "expected", "to", "be", "False", "for", "all", "nodes", "." ]
python
valid
38.791667
numenta/nupic
examples/tm/tm_high_order.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/tm/tm_high_order.py#L53-L75
def corruptVector(v1, noiseLevel, numActiveCols): """ Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits. @param v1 (array) binary vector whose copy will be corrupted @param noiseLevel (float) amount of noise to be applied on the new vector @param numActiveCols (int) number of sparse columns that represent an input @return v2 (array) corrupted binary vector """ size = len(v1) v2 = np.zeros(size, dtype="uint32") bitsToSwap = int(noiseLevel * numActiveCols) # Copy the contents of v1 into v2 for i in range(size): v2[i] = v1[i] for _ in range(bitsToSwap): i = random.randrange(size) if v2[i] == 1: v2[i] = 0 else: v2[i] = 1 return v2
[ "def", "corruptVector", "(", "v1", ",", "noiseLevel", ",", "numActiveCols", ")", ":", "size", "=", "len", "(", "v1", ")", "v2", "=", "np", ".", "zeros", "(", "size", ",", "dtype", "=", "\"uint32\"", ")", "bitsToSwap", "=", "int", "(", "noiseLevel", "*", "numActiveCols", ")", "# Copy the contents of v1 into v2", "for", "i", "in", "range", "(", "size", ")", ":", "v2", "[", "i", "]", "=", "v1", "[", "i", "]", "for", "_", "in", "range", "(", "bitsToSwap", ")", ":", "i", "=", "random", ".", "randrange", "(", "size", ")", "if", "v2", "[", "i", "]", "==", "1", ":", "v2", "[", "i", "]", "=", "0", "else", ":", "v2", "[", "i", "]", "=", "1", "return", "v2" ]
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits. @param v1 (array) binary vector whose copy will be corrupted @param noiseLevel (float) amount of noise to be applied on the new vector @param numActiveCols (int) number of sparse columns that represent an input @return v2 (array) corrupted binary vector
[ "Corrupts", "a", "copy", "of", "a", "binary", "vector", "by", "inverting", "noiseLevel", "percent", "of", "its", "bits", "." ]
python
valid
31
Esri/ArcREST
src/arcrest/common/symbology.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/symbology.py#L240-L247
def value(self): """gets the color value""" return { "type" : self._type, "style" : self._style, "color" : self._color.value, "width" : self._width }
[ "def", "value", "(", "self", ")", ":", "return", "{", "\"type\"", ":", "self", ".", "_type", ",", "\"style\"", ":", "self", ".", "_style", ",", "\"color\"", ":", "self", ".", "_color", ".", "value", ",", "\"width\"", ":", "self", ".", "_width", "}" ]
gets the color value
[ "gets", "the", "color", "value" ]
python
train
26.75
tensorflow/tensor2tensor
tensor2tensor/utils/pruning_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/pruning_utils.py#L27-L33
def weight(w, sparsity): """Weight-level magnitude pruning.""" w_shape = common_layers.shape_list(w) k = int(np.prod(w_shape[:-1])) count = tf.to_int32(k * sparsity) mask = common_layers.weight_targeting(w, count) return (1 - mask) * w
[ "def", "weight", "(", "w", ",", "sparsity", ")", ":", "w_shape", "=", "common_layers", ".", "shape_list", "(", "w", ")", "k", "=", "int", "(", "np", ".", "prod", "(", "w_shape", "[", ":", "-", "1", "]", ")", ")", "count", "=", "tf", ".", "to_int32", "(", "k", "*", "sparsity", ")", "mask", "=", "common_layers", ".", "weight_targeting", "(", "w", ",", "count", ")", "return", "(", "1", "-", "mask", ")", "*", "w" ]
Weight-level magnitude pruning.
[ "Weight", "-", "level", "magnitude", "pruning", "." ]
python
train
34.428571
mwouts/jupytext
jupytext/contentsmanager.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/contentsmanager.py#L222-L293
def save(self, model, path=''): """Save the file model and return the model with no content.""" if model['type'] != 'notebook': return super(TextFileContentsManager, self).save(model, path) nbk = model['content'] try: metadata = nbk.get('metadata') rearrange_jupytext_metadata(metadata) jupytext_metadata = metadata.setdefault('jupytext', {}) jupytext_formats = jupytext_metadata.get('formats') or self.default_formats(path) if not jupytext_formats: text_representation = jupytext_metadata.get('text_representation', {}) ext = os.path.splitext(path)[1] fmt = {'extension': ext} if ext == text_representation.get('extension') and text_representation.get('format_name'): fmt['format_name'] = text_representation.get('format_name') jupytext_formats = [fmt] jupytext_formats = long_form_multiple_formats(jupytext_formats, metadata) # Set preferred formats if not format name is given yet jupytext_formats = [preferred_format(fmt, self.preferred_jupytext_formats_save) for fmt in jupytext_formats] base, fmt = find_base_path_and_format(path, jupytext_formats) self.update_paired_notebooks(path, fmt, jupytext_formats) self.set_default_format_options(jupytext_metadata) if not jupytext_metadata: metadata.pop('jupytext') # Save as ipynb first return_value = None value = None for fmt in jupytext_formats[::-1]: if fmt['extension'] != '.ipynb': continue alt_path = full_path(base, fmt) self.create_prefix_dir(alt_path, fmt) self.log.info("Saving %s", os.path.basename(alt_path)) value = super(TextFileContentsManager, self).save(model, alt_path) if alt_path == path: return_value = value # And then to the other formats, in reverse order so that # the first format is the most recent for fmt in jupytext_formats[::-1]: if fmt['extension'] == '.ipynb': continue alt_path = full_path(base, fmt) self.create_prefix_dir(alt_path, fmt) if 'format_name' in fmt and fmt['extension'] not in ['.Rmd', '.md']: self.log.info("Saving %s in format %s:%s", os.path.basename(alt_path), fmt['extension'][1:], fmt['format_name']) else: self.log.info("Saving %s", os.path.basename(alt_path)) with mock.patch('nbformat.writes', _jupytext_writes(fmt)): value = super(TextFileContentsManager, self).save(model, alt_path) if alt_path == path: return_value = value # Update modified timestamp to match that of the pair #207 return_value['last_modified'] = value['last_modified'] return return_value except Exception as err: raise HTTPError(400, str(err))
[ "def", "save", "(", "self", ",", "model", ",", "path", "=", "''", ")", ":", "if", "model", "[", "'type'", "]", "!=", "'notebook'", ":", "return", "super", "(", "TextFileContentsManager", ",", "self", ")", ".", "save", "(", "model", ",", "path", ")", "nbk", "=", "model", "[", "'content'", "]", "try", ":", "metadata", "=", "nbk", ".", "get", "(", "'metadata'", ")", "rearrange_jupytext_metadata", "(", "metadata", ")", "jupytext_metadata", "=", "metadata", ".", "setdefault", "(", "'jupytext'", ",", "{", "}", ")", "jupytext_formats", "=", "jupytext_metadata", ".", "get", "(", "'formats'", ")", "or", "self", ".", "default_formats", "(", "path", ")", "if", "not", "jupytext_formats", ":", "text_representation", "=", "jupytext_metadata", ".", "get", "(", "'text_representation'", ",", "{", "}", ")", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", "fmt", "=", "{", "'extension'", ":", "ext", "}", "if", "ext", "==", "text_representation", ".", "get", "(", "'extension'", ")", "and", "text_representation", ".", "get", "(", "'format_name'", ")", ":", "fmt", "[", "'format_name'", "]", "=", "text_representation", ".", "get", "(", "'format_name'", ")", "jupytext_formats", "=", "[", "fmt", "]", "jupytext_formats", "=", "long_form_multiple_formats", "(", "jupytext_formats", ",", "metadata", ")", "# Set preferred formats if not format name is given yet", "jupytext_formats", "=", "[", "preferred_format", "(", "fmt", ",", "self", ".", "preferred_jupytext_formats_save", ")", "for", "fmt", "in", "jupytext_formats", "]", "base", ",", "fmt", "=", "find_base_path_and_format", "(", "path", ",", "jupytext_formats", ")", "self", ".", "update_paired_notebooks", "(", "path", ",", "fmt", ",", "jupytext_formats", ")", "self", ".", "set_default_format_options", "(", "jupytext_metadata", ")", "if", "not", "jupytext_metadata", ":", "metadata", ".", "pop", "(", "'jupytext'", ")", "# Save as ipynb first", "return_value", "=", "None", "value", "=", "None", "for", "fmt", "in", "jupytext_formats", "[", ":", ":", "-", "1", "]", ":", "if", "fmt", "[", "'extension'", "]", "!=", "'.ipynb'", ":", "continue", "alt_path", "=", "full_path", "(", "base", ",", "fmt", ")", "self", ".", "create_prefix_dir", "(", "alt_path", ",", "fmt", ")", "self", ".", "log", ".", "info", "(", "\"Saving %s\"", ",", "os", ".", "path", ".", "basename", "(", "alt_path", ")", ")", "value", "=", "super", "(", "TextFileContentsManager", ",", "self", ")", ".", "save", "(", "model", ",", "alt_path", ")", "if", "alt_path", "==", "path", ":", "return_value", "=", "value", "# And then to the other formats, in reverse order so that", "# the first format is the most recent", "for", "fmt", "in", "jupytext_formats", "[", ":", ":", "-", "1", "]", ":", "if", "fmt", "[", "'extension'", "]", "==", "'.ipynb'", ":", "continue", "alt_path", "=", "full_path", "(", "base", ",", "fmt", ")", "self", ".", "create_prefix_dir", "(", "alt_path", ",", "fmt", ")", "if", "'format_name'", "in", "fmt", "and", "fmt", "[", "'extension'", "]", "not", "in", "[", "'.Rmd'", ",", "'.md'", "]", ":", "self", ".", "log", ".", "info", "(", "\"Saving %s in format %s:%s\"", ",", "os", ".", "path", ".", "basename", "(", "alt_path", ")", ",", "fmt", "[", "'extension'", "]", "[", "1", ":", "]", ",", "fmt", "[", "'format_name'", "]", ")", "else", ":", "self", ".", "log", ".", "info", "(", "\"Saving %s\"", ",", "os", ".", "path", ".", "basename", "(", "alt_path", ")", ")", "with", "mock", ".", "patch", "(", "'nbformat.writes'", ",", "_jupytext_writes", "(", "fmt", ")", ")", ":", "value", "=", "super", "(", "TextFileContentsManager", ",", "self", ")", ".", "save", "(", "model", ",", "alt_path", ")", "if", "alt_path", "==", "path", ":", "return_value", "=", "value", "# Update modified timestamp to match that of the pair #207", "return_value", "[", "'last_modified'", "]", "=", "value", "[", "'last_modified'", "]", "return", "return_value", "except", "Exception", "as", "err", ":", "raise", "HTTPError", "(", "400", ",", "str", "(", "err", ")", ")" ]
Save the file model and return the model with no content.
[ "Save", "the", "file", "model", "and", "return", "the", "model", "with", "no", "content", "." ]
python
train
44.458333
cmorisse/ikp3db
ikp3db.py
https://github.com/cmorisse/ikp3db/blob/a0f318d4e8494b2e6f2f07ec0f1202ca023c920f/ikp3db.py#L228-L284
def send(self, command, _id=None, result={}, frames=[], threads=None, error_messages=[], warning_messages=[], info_messages=[], exception=None): """ Build a message from parameters and send it to debugger. :param command: The command sent to the debugger client. :type command: str :param _id: Unique id of the sent message. Right now, it's always `None` for messages by debugger to client. :type _id: int :param result: Used to send `exit_code` and updated `executionStatus` to debugger client. :type result: dict :param frames: contains the complete stack frames when debugger sends the `programBreak` message. :type frames: list :param error_messages: A list of error messages the debugger client must display to the user. :type error_messages: list of str :param warning_messages: A list of warning messages the debugger client must display to the user. :type warning_messages: list of str :param info_messages: A list of info messages the debugger client must display to the user. :type info_messages: list of str :param exception: If debugger encounter an exception, this dict contains 2 keys: `type` and `info` (the later is the message). :type exception: dict """ with self._connection_lock: payload = { '_id': _id, 'command': command, 'result': result, 'commandExecStatus': 'ok', 'frames': frames, 'info_messages': info_messages, 'warning_messages': warning_messages, 'error_messages': error_messages, 'exception': exception } if threads: payload['threads'] = threads msg = self.encode(payload) if self._connection: msg_bytes = bytearray(msg, 'utf-8') send_bytes_count = self._connection.sendall(msg_bytes) self.log_sent(msg) return send_bytes_count raise IKPdbConnectionError("Connection lost!")
[ "def", "send", "(", "self", ",", "command", ",", "_id", "=", "None", ",", "result", "=", "{", "}", ",", "frames", "=", "[", "]", ",", "threads", "=", "None", ",", "error_messages", "=", "[", "]", ",", "warning_messages", "=", "[", "]", ",", "info_messages", "=", "[", "]", ",", "exception", "=", "None", ")", ":", "with", "self", ".", "_connection_lock", ":", "payload", "=", "{", "'_id'", ":", "_id", ",", "'command'", ":", "command", ",", "'result'", ":", "result", ",", "'commandExecStatus'", ":", "'ok'", ",", "'frames'", ":", "frames", ",", "'info_messages'", ":", "info_messages", ",", "'warning_messages'", ":", "warning_messages", ",", "'error_messages'", ":", "error_messages", ",", "'exception'", ":", "exception", "}", "if", "threads", ":", "payload", "[", "'threads'", "]", "=", "threads", "msg", "=", "self", ".", "encode", "(", "payload", ")", "if", "self", ".", "_connection", ":", "msg_bytes", "=", "bytearray", "(", "msg", ",", "'utf-8'", ")", "send_bytes_count", "=", "self", ".", "_connection", ".", "sendall", "(", "msg_bytes", ")", "self", ".", "log_sent", "(", "msg", ")", "return", "send_bytes_count", "raise", "IKPdbConnectionError", "(", "\"Connection lost!\"", ")" ]
Build a message from parameters and send it to debugger. :param command: The command sent to the debugger client. :type command: str :param _id: Unique id of the sent message. Right now, it's always `None` for messages by debugger to client. :type _id: int :param result: Used to send `exit_code` and updated `executionStatus` to debugger client. :type result: dict :param frames: contains the complete stack frames when debugger sends the `programBreak` message. :type frames: list :param error_messages: A list of error messages the debugger client must display to the user. :type error_messages: list of str :param warning_messages: A list of warning messages the debugger client must display to the user. :type warning_messages: list of str :param info_messages: A list of info messages the debugger client must display to the user. :type info_messages: list of str :param exception: If debugger encounter an exception, this dict contains 2 keys: `type` and `info` (the later is the message). :type exception: dict
[ "Build", "a", "message", "from", "parameters", "and", "send", "it", "to", "debugger", ".", ":", "param", "command", ":", "The", "command", "sent", "to", "the", "debugger", "client", ".", ":", "type", "command", ":", "str", ":", "param", "_id", ":", "Unique", "id", "of", "the", "sent", "message", ".", "Right", "now", "it", "s", "always", "None", "for", "messages", "by", "debugger", "to", "client", ".", ":", "type", "_id", ":", "int", ":", "param", "result", ":", "Used", "to", "send", "exit_code", "and", "updated", "executionStatus", "to", "debugger", "client", ".", ":", "type", "result", ":", "dict", ":", "param", "frames", ":", "contains", "the", "complete", "stack", "frames", "when", "debugger", "sends", "the", "programBreak", "message", ".", ":", "type", "frames", ":", "list" ]
python
train
41.526316
onnx/onnxmltools
onnxmltools/convert/coreml/shape_calculators/OneHotEncoder.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/OneHotEncoder.py#L12-L36
def calculate_one_hot_encoder_output_shapes(operator): ''' Allowed input/output patterns are 1. [N, 1] ---> [N, C'] C' is the total number of categorical values. ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) if operator.inputs[0].type.shape[1] != 1 or len(operator.inputs[0].type.shape) > 2: raise RuntimeError('Input must be [N, 1]-tensor') int_categories = operator.raw_operator.oneHotEncoder.int64Categories.vector str_categories = operator.raw_operator.oneHotEncoder.stringCategories.vector N = operator.inputs[0].type.shape[0] if len(int_categories) > 0: operator.outputs[0].type = FloatTensorType([N, len(int_categories)], doc_string=operator.outputs[0].type.doc_string) elif len(str_categories) > 0 and type(operator.inputs[0].type) == StringTensorType: operator.outputs[0].type = FloatTensorType([N, len(str_categories)], doc_string=operator.outputs[0].type.doc_string) else: raise ValueError('Categorical indexes are missing')
[ "def", "calculate_one_hot_encoder_output_shapes", "(", "operator", ")", ":", "check_input_and_output_numbers", "(", "operator", ",", "input_count_range", "=", "1", ",", "output_count_range", "=", "1", ")", "if", "operator", ".", "inputs", "[", "0", "]", ".", "type", ".", "shape", "[", "1", "]", "!=", "1", "or", "len", "(", "operator", ".", "inputs", "[", "0", "]", ".", "type", ".", "shape", ")", ">", "2", ":", "raise", "RuntimeError", "(", "'Input must be [N, 1]-tensor'", ")", "int_categories", "=", "operator", ".", "raw_operator", ".", "oneHotEncoder", ".", "int64Categories", ".", "vector", "str_categories", "=", "operator", ".", "raw_operator", ".", "oneHotEncoder", ".", "stringCategories", ".", "vector", "N", "=", "operator", ".", "inputs", "[", "0", "]", ".", "type", ".", "shape", "[", "0", "]", "if", "len", "(", "int_categories", ")", ">", "0", ":", "operator", ".", "outputs", "[", "0", "]", ".", "type", "=", "FloatTensorType", "(", "[", "N", ",", "len", "(", "int_categories", ")", "]", ",", "doc_string", "=", "operator", ".", "outputs", "[", "0", "]", ".", "type", ".", "doc_string", ")", "elif", "len", "(", "str_categories", ")", ">", "0", "and", "type", "(", "operator", ".", "inputs", "[", "0", "]", ".", "type", ")", "==", "StringTensorType", ":", "operator", ".", "outputs", "[", "0", "]", ".", "type", "=", "FloatTensorType", "(", "[", "N", ",", "len", "(", "str_categories", ")", "]", ",", "doc_string", "=", "operator", ".", "outputs", "[", "0", "]", ".", "type", ".", "doc_string", ")", "else", ":", "raise", "ValueError", "(", "'Categorical indexes are missing'", ")" ]
Allowed input/output patterns are 1. [N, 1] ---> [N, C'] C' is the total number of categorical values.
[ "Allowed", "input", "/", "output", "patterns", "are", "1", ".", "[", "N", "1", "]", "---", ">", "[", "N", "C", "]" ]
python
train
45.92
tanghaibao/jcvi
jcvi/compara/quota.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/quota.py#L152-L198
def format_lp(nodes, constraints_x, qa, constraints_y, qb): """ Maximize 4 x1 + 2 x2 + 3 x3 + x4 Subject To x1 + x2 <= 1 End """ lp_handle = cStringIO.StringIO() lp_handle.write("Maximize\n ") records = 0 for i, score in nodes: lp_handle.write("+ %d x%d " % (score, i)) # SCIP does not like really long string per row records += 1 if records % 10 == 0: lp_handle.write("\n") lp_handle.write("\n") num_of_constraints = 0 lp_handle.write("Subject To\n") for c in constraints_x: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qa)) num_of_constraints += len(constraints_x) # non-self if not (constraints_x is constraints_y): for c in constraints_y: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qb)) num_of_constraints += len(constraints_y) print("number of variables (%d), number of constraints (%d)" % (len(nodes), num_of_constraints), file=sys.stderr) lp_handle.write("Binary\n") for i, score in nodes: lp_handle.write(" x%d\n" % i) lp_handle.write("End\n") lp_data = lp_handle.getvalue() lp_handle.close() return lp_data
[ "def", "format_lp", "(", "nodes", ",", "constraints_x", ",", "qa", ",", "constraints_y", ",", "qb", ")", ":", "lp_handle", "=", "cStringIO", ".", "StringIO", "(", ")", "lp_handle", ".", "write", "(", "\"Maximize\\n \"", ")", "records", "=", "0", "for", "i", ",", "score", "in", "nodes", ":", "lp_handle", ".", "write", "(", "\"+ %d x%d \"", "%", "(", "score", ",", "i", ")", ")", "# SCIP does not like really long string per row", "records", "+=", "1", "if", "records", "%", "10", "==", "0", ":", "lp_handle", ".", "write", "(", "\"\\n\"", ")", "lp_handle", ".", "write", "(", "\"\\n\"", ")", "num_of_constraints", "=", "0", "lp_handle", ".", "write", "(", "\"Subject To\\n\"", ")", "for", "c", "in", "constraints_x", ":", "additions", "=", "\" + \"", ".", "join", "(", "\"x%d\"", "%", "(", "x", "+", "1", ")", "for", "x", "in", "c", ")", "lp_handle", ".", "write", "(", "\" %s <= %d\\n\"", "%", "(", "additions", ",", "qa", ")", ")", "num_of_constraints", "+=", "len", "(", "constraints_x", ")", "# non-self", "if", "not", "(", "constraints_x", "is", "constraints_y", ")", ":", "for", "c", "in", "constraints_y", ":", "additions", "=", "\" + \"", ".", "join", "(", "\"x%d\"", "%", "(", "x", "+", "1", ")", "for", "x", "in", "c", ")", "lp_handle", ".", "write", "(", "\" %s <= %d\\n\"", "%", "(", "additions", ",", "qb", ")", ")", "num_of_constraints", "+=", "len", "(", "constraints_y", ")", "print", "(", "\"number of variables (%d), number of constraints (%d)\"", "%", "(", "len", "(", "nodes", ")", ",", "num_of_constraints", ")", ",", "file", "=", "sys", ".", "stderr", ")", "lp_handle", ".", "write", "(", "\"Binary\\n\"", ")", "for", "i", ",", "score", "in", "nodes", ":", "lp_handle", ".", "write", "(", "\" x%d\\n\"", "%", "i", ")", "lp_handle", ".", "write", "(", "\"End\\n\"", ")", "lp_data", "=", "lp_handle", ".", "getvalue", "(", ")", "lp_handle", ".", "close", "(", ")", "return", "lp_data" ]
Maximize 4 x1 + 2 x2 + 3 x3 + x4 Subject To x1 + x2 <= 1 End
[ "Maximize", "4", "x1", "+", "2", "x2", "+", "3", "x3", "+", "x4", "Subject", "To", "x1", "+", "x2", "<", "=", "1", "End" ]
python
train
27.553191
Kortemme-Lab/pull_into_place
pull_into_place/pipeline.py
https://github.com/Kortemme-Lab/pull_into_place/blob/247f303100a612cc90cf31c86e4fe5052eb28c8d/pull_into_place/pipeline.py#L844-L863
def load_loops(directory, loops_path=None): """ Return a list of tuples indicating the start and end points of the loops that were sampled in the given directory. """ if loops_path is None: workspace = workspace_from_dir(directory) loops_path = workspace.loops_path from klab.rosetta.input_files import LoopsFile loops_parser = LoopsFile.from_filepath(loops_path) # We have to account for some weird indexing behavior in the loops file # parser that I don't really understand. It seems to shrink the loop by # one residue on each side. At first I thought it might be trying to # convert the indices to python indexing, but on second thought I have no # idea what it's trying to do. return [(x-1, y+1) for x, y in loops_parser.get_distinct_segments()]
[ "def", "load_loops", "(", "directory", ",", "loops_path", "=", "None", ")", ":", "if", "loops_path", "is", "None", ":", "workspace", "=", "workspace_from_dir", "(", "directory", ")", "loops_path", "=", "workspace", ".", "loops_path", "from", "klab", ".", "rosetta", ".", "input_files", "import", "LoopsFile", "loops_parser", "=", "LoopsFile", ".", "from_filepath", "(", "loops_path", ")", "# We have to account for some weird indexing behavior in the loops file", "# parser that I don't really understand. It seems to shrink the loop by", "# one residue on each side. At first I thought it might be trying to", "# convert the indices to python indexing, but on second thought I have no", "# idea what it's trying to do.", "return", "[", "(", "x", "-", "1", ",", "y", "+", "1", ")", "for", "x", ",", "y", "in", "loops_parser", ".", "get_distinct_segments", "(", ")", "]" ]
Return a list of tuples indicating the start and end points of the loops that were sampled in the given directory.
[ "Return", "a", "list", "of", "tuples", "indicating", "the", "start", "and", "end", "points", "of", "the", "loops", "that", "were", "sampled", "in", "the", "given", "directory", "." ]
python
train
40.25
eumis/pyviews
pyviews/core/ioc.py
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/ioc.py#L81-L83
def register(key, initializer: callable, param=None): '''Adds resolver to global container''' get_current_scope().container.register(key, initializer, param)
[ "def", "register", "(", "key", ",", "initializer", ":", "callable", ",", "param", "=", "None", ")", ":", "get_current_scope", "(", ")", ".", "container", ".", "register", "(", "key", ",", "initializer", ",", "param", ")" ]
Adds resolver to global container
[ "Adds", "resolver", "to", "global", "container" ]
python
train
54.333333
ClearcodeHQ/matchbox
src/matchbox/index.py
https://github.com/ClearcodeHQ/matchbox/blob/22f5bd163ad22ceacb0fcd5d4ddae9069d1a94f4/src/matchbox/index.py#L153-L180
def add_match(self, entity, *traits): """ Add a matching entity to the index. We have to maintain the constraints of the data layout: - `self.mismatch_unknown` must still contain all matched entities - each key of the index must mismatch all known matching entities except those this particular key explicitly includes For data layout description, see the class-level docstring. :param collections.Hashable entity: an object to be matching the values of `traits_indexed_by` :param list traits: a list of hashable values to index the object with """ # The index traits of `traits_indexed_by` might have already been used to index some other entities. Those # relations are to be preserved. If the trait was not used to index any entity, we initialize them to mismatch # all matching entities known so far. for trait in traits: if trait not in self.index: self.index[trait] = self.mismatch_unknown.copy() # Now each known trait this entity is not matching, will explicitly mismatch currently added entity. for existing_trait in self.index: if existing_trait not in traits: self.index[existing_trait].add(entity) # From now on, any new matching or mismatching index will mismatch this entity by default. self.mismatch_unknown.add(entity)
[ "def", "add_match", "(", "self", ",", "entity", ",", "*", "traits", ")", ":", "# The index traits of `traits_indexed_by` might have already been used to index some other entities. Those", "# relations are to be preserved. If the trait was not used to index any entity, we initialize them to mismatch", "# all matching entities known so far.", "for", "trait", "in", "traits", ":", "if", "trait", "not", "in", "self", ".", "index", ":", "self", ".", "index", "[", "trait", "]", "=", "self", ".", "mismatch_unknown", ".", "copy", "(", ")", "# Now each known trait this entity is not matching, will explicitly mismatch currently added entity.", "for", "existing_trait", "in", "self", ".", "index", ":", "if", "existing_trait", "not", "in", "traits", ":", "self", ".", "index", "[", "existing_trait", "]", ".", "add", "(", "entity", ")", "# From now on, any new matching or mismatching index will mismatch this entity by default.", "self", ".", "mismatch_unknown", ".", "add", "(", "entity", ")" ]
Add a matching entity to the index. We have to maintain the constraints of the data layout: - `self.mismatch_unknown` must still contain all matched entities - each key of the index must mismatch all known matching entities except those this particular key explicitly includes For data layout description, see the class-level docstring. :param collections.Hashable entity: an object to be matching the values of `traits_indexed_by` :param list traits: a list of hashable values to index the object with
[ "Add", "a", "matching", "entity", "to", "the", "index", "." ]
python
train
50.928571
jupyter-widgets/ipywidgets
ipywidgets/widgets/widget.py
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget.py#L117-L130
def _remove_buffers(state): """Return (state_without_buffers, buffer_paths, buffers) for binary message parts A binary message part is a memoryview, bytearray, or python 3 bytes object. As an example: >>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}} >>> _remove_buffers(state) ({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']], [<memory at 0x107ffec48>, <memory at 0x107ffed08>]) """ buffer_paths, buffers = [], [] state = _separate_buffers(state, [], buffer_paths, buffers) return state, buffer_paths, buffers
[ "def", "_remove_buffers", "(", "state", ")", ":", "buffer_paths", ",", "buffers", "=", "[", "]", ",", "[", "]", "state", "=", "_separate_buffers", "(", "state", ",", "[", "]", ",", "buffer_paths", ",", "buffers", ")", "return", "state", ",", "buffer_paths", ",", "buffers" ]
Return (state_without_buffers, buffer_paths, buffers) for binary message parts A binary message part is a memoryview, bytearray, or python 3 bytes object. As an example: >>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}} >>> _remove_buffers(state) ({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']], [<memory at 0x107ffec48>, <memory at 0x107ffed08>])
[ "Return", "(", "state_without_buffers", "buffer_paths", "buffers", ")", "for", "binary", "message", "parts" ]
python
train
46.5
pmichali/whodunit
whodunit/__init__.py
https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L195-L218
def collect_blame_info(cls, matches): """Runs git blame on files, for the specified sets of line ranges. If no line range tuples are provided, it will do all lines. """ old_area = None for filename, ranges in matches: area, name = os.path.split(filename) if not area: area = '.' if area != old_area: print("\n\n%s/\n" % area) old_area = area print("%s " % name, end="") filter = cls.build_line_range_filter(ranges) command = ['git', 'blame', '--line-porcelain'] + filter + [name] os.chdir(area) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if err: print(" <<<<<<<<<< Unable to collect 'git blame' info:", err) else: yield out
[ "def", "collect_blame_info", "(", "cls", ",", "matches", ")", ":", "old_area", "=", "None", "for", "filename", ",", "ranges", "in", "matches", ":", "area", ",", "name", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "if", "not", "area", ":", "area", "=", "'.'", "if", "area", "!=", "old_area", ":", "print", "(", "\"\\n\\n%s/\\n\"", "%", "area", ")", "old_area", "=", "area", "print", "(", "\"%s \"", "%", "name", ",", "end", "=", "\"\"", ")", "filter", "=", "cls", ".", "build_line_range_filter", "(", "ranges", ")", "command", "=", "[", "'git'", ",", "'blame'", ",", "'--line-porcelain'", "]", "+", "filter", "+", "[", "name", "]", "os", ".", "chdir", "(", "area", ")", "p", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "if", "err", ":", "print", "(", "\" <<<<<<<<<< Unable to collect 'git blame' info:\"", ",", "err", ")", "else", ":", "yield", "out" ]
Runs git blame on files, for the specified sets of line ranges. If no line range tuples are provided, it will do all lines.
[ "Runs", "git", "blame", "on", "files", "for", "the", "specified", "sets", "of", "line", "ranges", "." ]
python
train
39.5
pydata/xarray
xarray/core/coordinates.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/coordinates.py#L93-L101
def _merge_raw(self, other): """For use with binary arithmetic.""" if other is None: variables = OrderedDict(self.variables) else: # don't align because we already called xarray.align variables = expand_and_merge_variables( [self.variables, other.variables]) return variables
[ "def", "_merge_raw", "(", "self", ",", "other", ")", ":", "if", "other", "is", "None", ":", "variables", "=", "OrderedDict", "(", "self", ".", "variables", ")", "else", ":", "# don't align because we already called xarray.align", "variables", "=", "expand_and_merge_variables", "(", "[", "self", ".", "variables", ",", "other", ".", "variables", "]", ")", "return", "variables" ]
For use with binary arithmetic.
[ "For", "use", "with", "binary", "arithmetic", "." ]
python
train
39
GNS3/gns3-server
gns3server/controller/snapshot.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/snapshot.py#L75-L92
def restore(self): """ Restore the snapshot """ yield from self._project.delete_on_computes() # We don't send close notif to clients because the close / open dance is purely internal yield from self._project.close(ignore_notification=True) self._project.controller.notification.emit("snapshot.restored", self.__json__()) try: if os.path.exists(os.path.join(self._project.path, "project-files")): shutil.rmtree(os.path.join(self._project.path, "project-files")) with open(self._path, "rb") as f: project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path) except (OSError, PermissionError) as e: raise aiohttp.web.HTTPConflict(text=str(e)) yield from project.open() return project
[ "def", "restore", "(", "self", ")", ":", "yield", "from", "self", ".", "_project", ".", "delete_on_computes", "(", ")", "# We don't send close notif to clients because the close / open dance is purely internal", "yield", "from", "self", ".", "_project", ".", "close", "(", "ignore_notification", "=", "True", ")", "self", ".", "_project", ".", "controller", ".", "notification", ".", "emit", "(", "\"snapshot.restored\"", ",", "self", ".", "__json__", "(", ")", ")", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_project", ".", "path", ",", "\"project-files\"", ")", ")", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_project", ".", "path", ",", "\"project-files\"", ")", ")", "with", "open", "(", "self", ".", "_path", ",", "\"rb\"", ")", "as", "f", ":", "project", "=", "yield", "from", "import_project", "(", "self", ".", "_project", ".", "controller", ",", "self", ".", "_project", ".", "id", ",", "f", ",", "location", "=", "self", ".", "_project", ".", "path", ")", "except", "(", "OSError", ",", "PermissionError", ")", "as", "e", ":", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "str", "(", "e", ")", ")", "yield", "from", "project", ".", "open", "(", ")", "return", "project" ]
Restore the snapshot
[ "Restore", "the", "snapshot" ]
python
train
48.388889
rerb/django-fortune
fortune/views.py
https://github.com/rerb/django-fortune/blob/f84d34f616ecabd4fab8351ad7d3062cc9d6b127/fortune/views.py#L30-L35
def loaded(self, request, *args, **kwargs): """Return a list of loaded Packs. """ serializer = self.get_serializer(list(Pack.objects.all()), many=True) return Response(serializer.data)
[ "def", "loaded", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "serializer", "=", "self", ".", "get_serializer", "(", "list", "(", "Pack", ".", "objects", ".", "all", "(", ")", ")", ",", "many", "=", "True", ")", "return", "Response", "(", "serializer", ".", "data", ")" ]
Return a list of loaded Packs.
[ "Return", "a", "list", "of", "loaded", "Packs", "." ]
python
train
42
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py#L825-L839
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info output = ET.SubElement(show_fabric_trunk_info, "output") show_trunk_list = ET.SubElement(output, "show-trunk-list") trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups") trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member") trunk_list_src_port = ET.SubElement(trunk_list_member, "trunk-list-src-port") trunk_list_src_port.text = kwargs.pop('trunk_list_src_port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_fabric_trunk_info", "=", "ET", ".", "Element", "(", "\"show_fabric_trunk_info\"", ")", "config", "=", "show_fabric_trunk_info", "output", "=", "ET", ".", "SubElement", "(", "show_fabric_trunk_info", ",", "\"output\"", ")", "show_trunk_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-trunk-list\"", ")", "trunk_list_groups", "=", "ET", ".", "SubElement", "(", "show_trunk_list", ",", "\"trunk-list-groups\"", ")", "trunk_list_member", "=", "ET", ".", "SubElement", "(", "trunk_list_groups", ",", "\"trunk-list-member\"", ")", "trunk_list_src_port", "=", "ET", ".", "SubElement", "(", "trunk_list_member", ",", "\"trunk-list-src-port\"", ")", "trunk_list_src_port", ".", "text", "=", "kwargs", ".", "pop", "(", "'trunk_list_src_port'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
55.933333
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_swift.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_swift.py#L328-L362
def generate_enums_info(enums, msgs): """Add camel case swift names for enums an entries, descriptions and sort enums alphabetically""" for enum in enums: enum.swift_name = camel_case_from_underscores(enum.name) enum.raw_value_type = get_enum_raw_type(enum, msgs) enum.formatted_description = "" if enum.description: enum.description = " ".join(enum.description.split()) enum.formatted_description = "\n/**\n %s\n*/\n" % enum.description all_entities = [] entities_info = [] for entry in enum.entry: name = entry.name.replace(enum.name + '_', '') """Ensure that enums entry name does not start from digit""" if name[0].isdigit(): name = "MAV_" + name entry.swift_name = camel_case_from_underscores(name) entry.formatted_description = "" if entry.description: entry.description = " ".join(entry.description.split()) entry.formatted_description = "\n\t/// " + entry.description + "\n" all_entities.append(entry.swift_name) entities_info.append('("%s", "%s")' % (entry.name, entry.description.replace('"','\\"'))) enum.all_entities = ", ".join(all_entities) enum.entities_info = ", ".join(entities_info) enum.entity_description = enum.description.replace('"','\\"') enums.sort(key = lambda enum : enum.swift_name)
[ "def", "generate_enums_info", "(", "enums", ",", "msgs", ")", ":", "for", "enum", "in", "enums", ":", "enum", ".", "swift_name", "=", "camel_case_from_underscores", "(", "enum", ".", "name", ")", "enum", ".", "raw_value_type", "=", "get_enum_raw_type", "(", "enum", ",", "msgs", ")", "enum", ".", "formatted_description", "=", "\"\"", "if", "enum", ".", "description", ":", "enum", ".", "description", "=", "\" \"", ".", "join", "(", "enum", ".", "description", ".", "split", "(", ")", ")", "enum", ".", "formatted_description", "=", "\"\\n/**\\n %s\\n*/\\n\"", "%", "enum", ".", "description", "all_entities", "=", "[", "]", "entities_info", "=", "[", "]", "for", "entry", "in", "enum", ".", "entry", ":", "name", "=", "entry", ".", "name", ".", "replace", "(", "enum", ".", "name", "+", "'_'", ",", "''", ")", "\"\"\"Ensure that enums entry name does not start from digit\"\"\"", "if", "name", "[", "0", "]", ".", "isdigit", "(", ")", ":", "name", "=", "\"MAV_\"", "+", "name", "entry", ".", "swift_name", "=", "camel_case_from_underscores", "(", "name", ")", "entry", ".", "formatted_description", "=", "\"\"", "if", "entry", ".", "description", ":", "entry", ".", "description", "=", "\" \"", ".", "join", "(", "entry", ".", "description", ".", "split", "(", ")", ")", "entry", ".", "formatted_description", "=", "\"\\n\\t/// \"", "+", "entry", ".", "description", "+", "\"\\n\"", "all_entities", ".", "append", "(", "entry", ".", "swift_name", ")", "entities_info", ".", "append", "(", "'(\"%s\", \"%s\")'", "%", "(", "entry", ".", "name", ",", "entry", ".", "description", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", ")", ")", "enum", ".", "all_entities", "=", "\", \"", ".", "join", "(", "all_entities", ")", "enum", ".", "entities_info", "=", "\", \"", ".", "join", "(", "entities_info", ")", "enum", ".", "entity_description", "=", "enum", ".", "description", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "enums", ".", "sort", "(", "key", "=", "lambda", "enum", ":", "enum", ".", "swift_name", ")" ]
Add camel case swift names for enums an entries, descriptions and sort enums alphabetically
[ "Add", "camel", "case", "swift", "names", "for", "enums", "an", "entries", "descriptions", "and", "sort", "enums", "alphabetically" ]
python
train
41.685714
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L683-L696
def _configure_manager(self): """ Creates a manager to handle the instances, and another to handle flavors. """ self._manager = CloudDatabaseManager(self, resource_class=CloudDatabaseInstance, response_key="instance", uri_base="instances") self._flavor_manager = BaseManager(self, resource_class=CloudDatabaseFlavor, response_key="flavor", uri_base="flavors") self._backup_manager = CloudDatabaseBackupManager(self, resource_class=CloudDatabaseBackup, response_key="backup", uri_base="backups")
[ "def", "_configure_manager", "(", "self", ")", ":", "self", ".", "_manager", "=", "CloudDatabaseManager", "(", "self", ",", "resource_class", "=", "CloudDatabaseInstance", ",", "response_key", "=", "\"instance\"", ",", "uri_base", "=", "\"instances\"", ")", "self", ".", "_flavor_manager", "=", "BaseManager", "(", "self", ",", "resource_class", "=", "CloudDatabaseFlavor", ",", "response_key", "=", "\"flavor\"", ",", "uri_base", "=", "\"flavors\"", ")", "self", ".", "_backup_manager", "=", "CloudDatabaseBackupManager", "(", "self", ",", "resource_class", "=", "CloudDatabaseBackup", ",", "response_key", "=", "\"backup\"", ",", "uri_base", "=", "\"backups\"", ")" ]
Creates a manager to handle the instances, and another to handle flavors.
[ "Creates", "a", "manager", "to", "handle", "the", "instances", "and", "another", "to", "handle", "flavors", "." ]
python
train
45.214286
anrosent/perg
enum.py
https://github.com/anrosent/perg/blob/53293f7ed5e0f2e45fb185b60e344ea8436f02aa/enum.py#L88-L100
def enum_subpattern(p): subpattern_id, d = p patterns = list(enum_gen(d)) ''' if subpattern_id: subpat_iter = EnumSubpatternIterator(subpattern_id, patterns) SUBPATTERNS[subpattern_id] = subpat_iter return subpat_iter else: return patterns ''' return patterns
[ "def", "enum_subpattern", "(", "p", ")", ":", "subpattern_id", ",", "d", "=", "p", "patterns", "=", "list", "(", "enum_gen", "(", "d", ")", ")", "return", "patterns" ]
if subpattern_id: subpat_iter = EnumSubpatternIterator(subpattern_id, patterns) SUBPATTERNS[subpattern_id] = subpat_iter return subpat_iter else: return patterns
[ "if", "subpattern_id", ":", "subpat_iter", "=", "EnumSubpatternIterator", "(", "subpattern_id", "patterns", ")", "SUBPATTERNS", "[", "subpattern_id", "]", "=", "subpat_iter", "return", "subpat_iter", "else", ":", "return", "patterns" ]
python
train
24
SetBased/py-stratum
pystratum/RoutineLoaderHelper.py
https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L599-L617
def _print_sql_with_error(self, sql, error_line): """ Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted. :param str sql: The SQL statement. :param int error_line: The line where the error occurs. """ if os.linesep in sql: lines = sql.split(os.linesep) digits = math.ceil(math.log(len(lines) + 1, 10)) i = 1 for line in lines: if i == error_line: self._io.text('<error>{0:{width}} {1}</error>'.format(i, line, width=digits, )) else: self._io.text('{0:{width}} {1}'.format(i, line, width=digits, )) i += 1 else: self._io.text(sql)
[ "def", "_print_sql_with_error", "(", "self", ",", "sql", ",", "error_line", ")", ":", "if", "os", ".", "linesep", "in", "sql", ":", "lines", "=", "sql", ".", "split", "(", "os", ".", "linesep", ")", "digits", "=", "math", ".", "ceil", "(", "math", ".", "log", "(", "len", "(", "lines", ")", "+", "1", ",", "10", ")", ")", "i", "=", "1", "for", "line", "in", "lines", ":", "if", "i", "==", "error_line", ":", "self", ".", "_io", ".", "text", "(", "'<error>{0:{width}} {1}</error>'", ".", "format", "(", "i", ",", "line", ",", "width", "=", "digits", ",", ")", ")", "else", ":", "self", ".", "_io", ".", "text", "(", "'{0:{width}} {1}'", ".", "format", "(", "i", ",", "line", ",", "width", "=", "digits", ",", ")", ")", "i", "+=", "1", "else", ":", "self", ".", "_io", ".", "text", "(", "sql", ")" ]
Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted. :param str sql: The SQL statement. :param int error_line: The line where the error occurs.
[ "Writes", "a", "SQL", "statement", "with", "an", "syntax", "error", "to", "the", "output", ".", "The", "line", "where", "the", "error", "occurs", "is", "highlighted", "." ]
python
train
40.578947
IntelPython/mkl_fft
mkl_fft/_numpy_fft.py
https://github.com/IntelPython/mkl_fft/blob/54b3271d64666f9af9f11418b4ca43d69054eb94/mkl_fft/_numpy_fft.py#L682-L778
def ifftn(a, s=None, axes=None, norm=None): """ Compute the N-dimensional inverse discrete Fourier Transform. This function computes the inverse of the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, ``ifftn(fftn(a)) == a`` to within numerical accuracy. For a description of the definitions and conventions used, see `numpy.fft`. The input, analogously to `ifft`, should be ordered in the same way as is returned by `fftn`, i.e. it should have the term for zero frequency in all axes in the low-order corner, the positive frequency terms in the first half of all axes, the term for the Nyquist frequency in the middle of all axes and the negative frequency terms in the second half of all axes, in order of decreasingly negative frequency. Parameters ---------- a : array_like Input array, can be complex. s : sequence of ints, optional Shape (length of each transformed axis) of the output (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). This corresponds to ``n`` for ``ifft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. See notes for issue on `ifft` zero padding. axes : sequence of ints, optional Axes over which to compute the IFFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. Repeated indices in `axes` means that the inverse transform over that axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` or `a`, as explained in the parameters section above. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- numpy.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. ifft : The one-dimensional inverse FFT. ifft2 : The two-dimensional inverse FFT. ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning of array. Notes ----- See `numpy.fft` for definitions and conventions used. Zero-padding, analogously with `ifft`, is performed by appending zeros to the input along the specified dimension. Although this is the common approach, it might lead to surprising results. If another form of zero padding is desired, it must be performed before `ifftn` is called. Examples -------- >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt >>> n = np.zeros((200,200), dtype=complex) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) <matplotlib.image.AxesImage object at 0x...> >>> plt.show() """ unitary = _unitary(norm) output = mkl_fft.ifftn(a, s, axes) if unitary: output *= sqrt(_tot_size(output, axes)) return output
[ "def", "ifftn", "(", "a", ",", "s", "=", "None", ",", "axes", "=", "None", ",", "norm", "=", "None", ")", ":", "unitary", "=", "_unitary", "(", "norm", ")", "output", "=", "mkl_fft", ".", "ifftn", "(", "a", ",", "s", ",", "axes", ")", "if", "unitary", ":", "output", "*=", "sqrt", "(", "_tot_size", "(", "output", ",", "axes", ")", ")", "return", "output" ]
Compute the N-dimensional inverse discrete Fourier Transform. This function computes the inverse of the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, ``ifftn(fftn(a)) == a`` to within numerical accuracy. For a description of the definitions and conventions used, see `numpy.fft`. The input, analogously to `ifft`, should be ordered in the same way as is returned by `fftn`, i.e. it should have the term for zero frequency in all axes in the low-order corner, the positive frequency terms in the first half of all axes, the term for the Nyquist frequency in the middle of all axes and the negative frequency terms in the second half of all axes, in order of decreasingly negative frequency. Parameters ---------- a : array_like Input array, can be complex. s : sequence of ints, optional Shape (length of each transformed axis) of the output (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). This corresponds to ``n`` for ``ifft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. See notes for issue on `ifft` zero padding. axes : sequence of ints, optional Axes over which to compute the IFFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. Repeated indices in `axes` means that the inverse transform over that axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` or `a`, as explained in the parameters section above. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- numpy.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. ifft : The one-dimensional inverse FFT. ifft2 : The two-dimensional inverse FFT. ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning of array. Notes ----- See `numpy.fft` for definitions and conventions used. Zero-padding, analogously with `ifft`, is performed by appending zeros to the input along the specified dimension. Although this is the common approach, it might lead to surprising results. If another form of zero padding is desired, it must be performed before `ifftn` is called. Examples -------- >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt >>> n = np.zeros((200,200), dtype=complex) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) <matplotlib.image.AxesImage object at 0x...> >>> plt.show()
[ "Compute", "the", "N", "-", "dimensional", "inverse", "discrete", "Fourier", "Transform", "." ]
python
train
39.845361
r4fek/django-cassandra-engine
django_cassandra_engine/base/operations.py
https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/base/operations.py#L36-L47
def sql_flush(self, style, tables, sequences, allow_cascade=False): """ Truncate all existing tables in current keyspace. :returns: an empty list """ for table in tables: qs = "TRUNCATE {}".format(table) self.connection.connection.execute(qs) return []
[ "def", "sql_flush", "(", "self", ",", "style", ",", "tables", ",", "sequences", ",", "allow_cascade", "=", "False", ")", ":", "for", "table", "in", "tables", ":", "qs", "=", "\"TRUNCATE {}\"", ".", "format", "(", "table", ")", "self", ".", "connection", ".", "connection", ".", "execute", "(", "qs", ")", "return", "[", "]" ]
Truncate all existing tables in current keyspace. :returns: an empty list
[ "Truncate", "all", "existing", "tables", "in", "current", "keyspace", "." ]
python
train
26.333333
bukun/TorCMS
torcms/model/user_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/user_model.py#L186-L195
def update_time_login(u_name): ''' Update the login time for user. ''' entry = TabMember.update( time_login=tools.timestamp() ).where( TabMember.user_name == u_name ) entry.execute()
[ "def", "update_time_login", "(", "u_name", ")", ":", "entry", "=", "TabMember", ".", "update", "(", "time_login", "=", "tools", ".", "timestamp", "(", ")", ")", ".", "where", "(", "TabMember", ".", "user_name", "==", "u_name", ")", "entry", ".", "execute", "(", ")" ]
Update the login time for user.
[ "Update", "the", "login", "time", "for", "user", "." ]
python
train
25.3