repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
pytroll/satpy
satpy/readers/seviri_base.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/seviri_base.py#L203-L242
def dec10216(inbuf): """Decode 10 bits data into 16 bits words. :: /* * pack 4 10-bit words in 5 bytes into 4 16-bit words * * 0 1 2 3 4 5 * 01234567890123456789012345678901234567890 * 0 1 2 3 4 */ ip = &in_buffer[i]; op = &out_buffer[j]; op[0] = ip[0]*4 + ip[1]/64; op[1] = (ip[1] & 0x3F)*16 + ip[2]/16; op[2] = (ip[2] & 0x0F)*64 + ip[3]/4; op[3] = (ip[3] & 0x03)*256 +ip[4]; """ arr10 = inbuf.astype(np.uint16) arr16_len = int(len(arr10) * 4 / 5) arr10_len = int((arr16_len * 5) / 4) arr10 = arr10[:arr10_len] # adjust size # dask is slow with indexing arr10_0 = arr10[::5] arr10_1 = arr10[1::5] arr10_2 = arr10[2::5] arr10_3 = arr10[3::5] arr10_4 = arr10[4::5] arr16_0 = (arr10_0 << 2) + (arr10_1 >> 6) arr16_1 = ((arr10_1 & 63) << 4) + (arr10_2 >> 4) arr16_2 = ((arr10_2 & 15) << 6) + (arr10_3 >> 2) arr16_3 = ((arr10_3 & 3) << 8) + arr10_4 arr16 = da.stack([arr16_0, arr16_1, arr16_2, arr16_3], axis=-1).ravel() arr16 = da.rechunk(arr16, arr16.shape[0]) return arr16
[ "def", "dec10216", "(", "inbuf", ")", ":", "arr10", "=", "inbuf", ".", "astype", "(", "np", ".", "uint16", ")", "arr16_len", "=", "int", "(", "len", "(", "arr10", ")", "*", "4", "/", "5", ")", "arr10_len", "=", "int", "(", "(", "arr16_len", "*", ...
Decode 10 bits data into 16 bits words. :: /* * pack 4 10-bit words in 5 bytes into 4 16-bit words * * 0 1 2 3 4 5 * 01234567890123456789012345678901234567890 * 0 1 2 3 4 */ ip = &in_buffer[i]; op = &out_buffer[j]; op[0] = ip[0]*4 + ip[1]/64; op[1] = (ip[1] & 0x3F)*16 + ip[2]/16; op[2] = (ip[2] & 0x0F)*64 + ip[3]/4; op[3] = (ip[3] & 0x03)*256 +ip[4];
[ "Decode", "10", "bits", "data", "into", "16", "bits", "words", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/kmeans.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmeans.py#L232-L268
def animate_cluster_allocation(data, observer, animation_velocity = 500, movie_fps = 1, save_movie = None): """! @brief Animates clustering process that is performed by K-Means algorithm. @param[in] data (list): Dataset that is used for clustering. @param[in] observer (kmeans_observer): EM observer that was used for collection information about clustering process. @param[in] animation_velocity (uint): Interval between frames in milliseconds (for run-time animation only). @param[in] movie_fps (uint): Defines frames per second (for rendering movie only). @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter. """ figure = plt.figure() def init_frame(): return frame_generation(0) def frame_generation(index_iteration): figure.clf() figure.suptitle("K-Means algorithm (iteration: " + str(index_iteration) + ")", fontsize=18, fontweight='bold') clusters = observer.get_clusters(index_iteration) centers = observer.get_centers(index_iteration) kmeans_visualizer.show_clusters(data, clusters, centers, None, figure=figure, display=False) figure.subplots_adjust(top=0.85) return [figure.gca()] iterations = len(observer) cluster_animation = animation.FuncAnimation(figure, frame_generation, iterations, interval=animation_velocity, init_func=init_frame, repeat_delay=5000) if save_movie is not None: cluster_animation.save(save_movie, writer='ffmpeg', fps=movie_fps, bitrate=3000) else: plt.show()
[ "def", "animate_cluster_allocation", "(", "data", ",", "observer", ",", "animation_velocity", "=", "500", ",", "movie_fps", "=", "1", ",", "save_movie", "=", "None", ")", ":", "figure", "=", "plt", ".", "figure", "(", ")", "def", "init_frame", "(", ")", ...
! @brief Animates clustering process that is performed by K-Means algorithm. @param[in] data (list): Dataset that is used for clustering. @param[in] observer (kmeans_observer): EM observer that was used for collection information about clustering process. @param[in] animation_velocity (uint): Interval between frames in milliseconds (for run-time animation only). @param[in] movie_fps (uint): Defines frames per second (for rendering movie only). @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
[ "!" ]
python
valid
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/displaypub.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/displaypub.py#L46-L66
def _validate_data(self, source, data, metadata=None): """Validate the display data. Parameters ---------- source : str The fully dotted name of the callable that created the data, like :func:`foo.bar.my_formatter`. data : dict The formata data dictionary. metadata : dict Any metadata for the data. """ if not isinstance(source, basestring): raise TypeError('source must be a str, got: %r' % source) if not isinstance(data, dict): raise TypeError('data must be a dict, got: %r' % data) if metadata is not None: if not isinstance(metadata, dict): raise TypeError('metadata must be a dict, got: %r' % data)
[ "def", "_validate_data", "(", "self", ",", "source", ",", "data", ",", "metadata", "=", "None", ")", ":", "if", "not", "isinstance", "(", "source", ",", "basestring", ")", ":", "raise", "TypeError", "(", "'source must be a str, got: %r'", "%", "source", ")",...
Validate the display data. Parameters ---------- source : str The fully dotted name of the callable that created the data, like :func:`foo.bar.my_formatter`. data : dict The formata data dictionary. metadata : dict Any metadata for the data.
[ "Validate", "the", "display", "data", "." ]
python
test
materialsproject/pymatgen
pymatgen/io/qchem/outputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/qchem/outputs.py#L651-L696
def _check_completion_errors(self): """ Parses four potential errors that can cause jobs to crash: inability to transform coordinates due to a bad symmetric specification, an input file that fails to pass inspection, and errors reading and writing files. """ if read_pattern( self.text, { "key": r"Coordinates do not transform within specified threshold" }, terminate_on_match=True).get('key') == [[]]: self.data["errors"] += ["failed_to_transform_coords"] elif read_pattern( self.text, { "key": r"The Q\-Chem input file has failed to pass inspection" }, terminate_on_match=True).get('key') == [[]]: self.data["errors"] += ["input_file_error"] elif read_pattern( self.text, { "key": r"Error opening input stream" }, terminate_on_match=True).get('key') == [[]]: self.data["errors"] += ["failed_to_read_input"] elif read_pattern( self.text, { "key": r"FileMan error: End of file reached prematurely" }, terminate_on_match=True).get('key') == [[]]: self.data["errors"] += ["IO_error"] elif read_pattern( self.text, { "key": r"Could not find \$molecule section in ParseQInput" }, terminate_on_match=True).get('key') == [[]]: self.data["errors"] += ["read_molecule_error"] elif read_pattern( self.text, { "key": r"Welcome to Q-Chem" }, terminate_on_match=True).get('key') != [[]]: self.data["errors"] += ["never_called_qchem"] else: self.data["errors"] += ["unknown_error"]
[ "def", "_check_completion_errors", "(", "self", ")", ":", "if", "read_pattern", "(", "self", ".", "text", ",", "{", "\"key\"", ":", "r\"Coordinates do not transform within specified threshold\"", "}", ",", "terminate_on_match", "=", "True", ")", ".", "get", "(", "...
Parses four potential errors that can cause jobs to crash: inability to transform coordinates due to a bad symmetric specification, an input file that fails to pass inspection, and errors reading and writing files.
[ "Parses", "four", "potential", "errors", "that", "can", "cause", "jobs", "to", "crash", ":", "inability", "to", "transform", "coordinates", "due", "to", "a", "bad", "symmetric", "specification", "an", "input", "file", "that", "fails", "to", "pass", "inspection...
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/launchpad.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/launchpad.py#L347-L356
def issue_collection(self, issue_id, collection_name): """Get a collection list of a given issue""" path = urijoin("bugs", str(issue_id), collection_name) url_collection = self.__get_url(path) payload = {'ws.size': self.items_per_page, 'ws.start': 0, 'order_by': 'date_last_updated'} raw_items = self.__fetch_items(path=url_collection, payload=payload) return raw_items
[ "def", "issue_collection", "(", "self", ",", "issue_id", ",", "collection_name", ")", ":", "path", "=", "urijoin", "(", "\"bugs\"", ",", "str", "(", "issue_id", ")", ",", "collection_name", ")", "url_collection", "=", "self", ".", "__get_url", "(", "path", ...
Get a collection list of a given issue
[ "Get", "a", "collection", "list", "of", "a", "given", "issue" ]
python
test
happyleavesaoc/python-myusps
myusps/__init__.py
https://github.com/happyleavesaoc/python-myusps/blob/827e74f25d1c1ef0149bb7fda7c606297b743c02/myusps/__init__.py#L221-L235
def get_profile(session): """Get profile data.""" response = session.get(PROFILE_URL, allow_redirects=False) if response.status_code == 302: raise USPSError('expired session') parsed = BeautifulSoup(response.text, HTML_PARSER) profile = parsed.find('div', {'class': 'atg_store_myProfileInfo'}) data = {} for row in profile.find_all('tr'): cells = row.find_all('td') if len(cells) == 2: key = ' '.join(cells[0].find_all(text=True)).strip().lower().replace(' ', '_') value = ' '.join(cells[1].find_all(text=True)).strip() data[key] = value return data
[ "def", "get_profile", "(", "session", ")", ":", "response", "=", "session", ".", "get", "(", "PROFILE_URL", ",", "allow_redirects", "=", "False", ")", "if", "response", ".", "status_code", "==", "302", ":", "raise", "USPSError", "(", "'expired session'", ")"...
Get profile data.
[ "Get", "profile", "data", "." ]
python
train
NoneGG/aredis
aredis/commands/streams.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/streams.py#L52-L93
async def xadd(self, name: str, entry: dict, max_len=None, stream_id='*', approximate=True) -> str: """ Appends the specified stream entry to the stream at the specified key. If the key does not exist, as a side effect of running this command the key is created with a stream value. Available since 5.0.0. Time complexity: O(log(N)) with N being the number of items already into the stream. :param name: name of the stream :param entry: key-values to be appended to the stream :param max_len: max length of the stream length will not be limited max_len is set to None notice: max_len should be int greater than 0, if set to 0 or negative, the stream length will not be limited :param stream_id: id of the options appended to the stream. The XADD command will auto-generate a unique id for you if the id argument specified is the * character. ID are specified by two numbers separated by a "-" character :param approximate: whether redis will limit the stream with given max length exactly, if set to True, there will be a few tens of entries more, but never less than 1000 items :return: id auto generated or the specified id given. notice: specified id without "-" character will be completed like "id-0" """ pieces = [] if max_len is not None: if not isinstance(max_len, int) or max_len < 1: raise RedisError("XADD maxlen must be a positive integer") pieces.append('MAXLEN') if approximate: pieces.append('~') pieces.append(str(max_len)) pieces.append(stream_id) for kv in entry.items(): pieces.extend(list(kv)) return await self.execute_command('XADD', name, *pieces)
[ "async", "def", "xadd", "(", "self", ",", "name", ":", "str", ",", "entry", ":", "dict", ",", "max_len", "=", "None", ",", "stream_id", "=", "'*'", ",", "approximate", "=", "True", ")", "->", "str", ":", "pieces", "=", "[", "]", "if", "max_len", ...
Appends the specified stream entry to the stream at the specified key. If the key does not exist, as a side effect of running this command the key is created with a stream value. Available since 5.0.0. Time complexity: O(log(N)) with N being the number of items already into the stream. :param name: name of the stream :param entry: key-values to be appended to the stream :param max_len: max length of the stream length will not be limited max_len is set to None notice: max_len should be int greater than 0, if set to 0 or negative, the stream length will not be limited :param stream_id: id of the options appended to the stream. The XADD command will auto-generate a unique id for you if the id argument specified is the * character. ID are specified by two numbers separated by a "-" character :param approximate: whether redis will limit the stream with given max length exactly, if set to True, there will be a few tens of entries more, but never less than 1000 items :return: id auto generated or the specified id given. notice: specified id without "-" character will be completed like "id-0"
[ "Appends", "the", "specified", "stream", "entry", "to", "the", "stream", "at", "the", "specified", "key", ".", "If", "the", "key", "does", "not", "exist", "as", "a", "side", "effect", "of", "running", "this", "command", "the", "key", "is", "created", "wi...
python
train
wummel/linkchecker
linkcheck/logger/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/__init__.py#L273-L280
def check_date (self): """ Check for special dates. """ now = datetime.date.today() if now.day == 7 and now.month == 1: msg = _("Happy birthday for LinkChecker, I'm %d years old today!") self.comment(msg % (now.year - 2000))
[ "def", "check_date", "(", "self", ")", ":", "now", "=", "datetime", ".", "date", ".", "today", "(", ")", "if", "now", ".", "day", "==", "7", "and", "now", ".", "month", "==", "1", ":", "msg", "=", "_", "(", "\"Happy birthday for LinkChecker, I'm %d yea...
Check for special dates.
[ "Check", "for", "special", "dates", "." ]
python
train
cloudendpoints/endpoints-management-python
endpoints_management/control/quota_request.py
https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/quota_request.py#L82-L107
def convert_response(allocate_quota_response, project_id): """Computes a http status code and message `AllocateQuotaResponse` The return value a tuple (code, message) where code: is the http status code message: is the message to return Args: allocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`): the response from calling an api Returns: tuple(code, message) """ if not allocate_quota_response or not allocate_quota_response.allocateErrors: return _IS_OK # only allocate_quota the first error for now, as per ESP theError = allocate_quota_response.allocateErrors[0] error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN) if error_tuple[1].find(u'{') == -1: # no replacements needed: return error_tuple updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.description or u'') return error_tuple[0], updated_msg
[ "def", "convert_response", "(", "allocate_quota_response", ",", "project_id", ")", ":", "if", "not", "allocate_quota_response", "or", "not", "allocate_quota_response", ".", "allocateErrors", ":", "return", "_IS_OK", "# only allocate_quota the first error for now, as per ESP", ...
Computes a http status code and message `AllocateQuotaResponse` The return value a tuple (code, message) where code: is the http status code message: is the message to return Args: allocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`): the response from calling an api Returns: tuple(code, message)
[ "Computes", "a", "http", "status", "code", "and", "message", "AllocateQuotaResponse" ]
python
train
sdispater/eloquent
eloquent/orm/relations/has_one_or_many.py
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/has_one_or_many.py#L193-L211
def first_or_new(self, _attributes=None, **attributes): """ Get the first related model record matching the attributes or instantiate it. :param attributes: The attributes :type attributes: dict :rtype: Model """ if _attributes is not None: attributes.update(_attributes) instance = self.where(attributes).first() if instance is None: instance = self._related.new_instance() instance.set_attribute(self.get_plain_foreign_key(), self.get_parent_key()) return instance
[ "def", "first_or_new", "(", "self", ",", "_attributes", "=", "None", ",", "*", "*", "attributes", ")", ":", "if", "_attributes", "is", "not", "None", ":", "attributes", ".", "update", "(", "_attributes", ")", "instance", "=", "self", ".", "where", "(", ...
Get the first related model record matching the attributes or instantiate it. :param attributes: The attributes :type attributes: dict :rtype: Model
[ "Get", "the", "first", "related", "model", "record", "matching", "the", "attributes", "or", "instantiate", "it", "." ]
python
train
trevisanj/a99
a99/textinterface.py
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/textinterface.py#L168-L202
def yesno(question, default=None): """Asks a yes/no question Args: question: string **without** the question mark and without the options. Example: 'Create links' default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of these valus (this argument is case-insensitive) Returns: bool: True if user answered Yes, False otherwise """ if default is not None: if isinstance(default, bool): pass else: default_ = default.upper() if default_ not in ('Y', 'YES', 'N', 'NO'): raise RuntimeError("Invalid default value: '{}'".format(default)) default = default_ in ('Y', 'YES') while True: ans = input("{} ({}/{})? ".format(question, "Y" if default == True else "y", "N" if default == False else "n")).upper() if ans == "" and default is not None: ret = default break elif ans in ("N", "NO"): ret = False break elif ans in ("Y", "YES"): ret = True break return ret
[ "def", "yesno", "(", "question", ",", "default", "=", "None", ")", ":", "if", "default", "is", "not", "None", ":", "if", "isinstance", "(", "default", ",", "bool", ")", ":", "pass", "else", ":", "default_", "=", "default", ".", "upper", "(", ")", "...
Asks a yes/no question Args: question: string **without** the question mark and without the options. Example: 'Create links' default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of these valus (this argument is case-insensitive) Returns: bool: True if user answered Yes, False otherwise
[ "Asks", "a", "yes", "/", "no", "question", "Args", ":", "question", ":", "string", "**", "without", "**", "the", "question", "mark", "and", "without", "the", "options", ".", "Example", ":", "Create", "links", "default", ":", "default", "option", ".", "Ac...
python
train
TaurusOlson/incisive
incisive/core.py
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L21-L25
def determine_type(x): """Determine the type of x""" types = (int, float, str) _type = filter(lambda a: is_type(a, x), types)[0] return _type(x)
[ "def", "determine_type", "(", "x", ")", ":", "types", "=", "(", "int", ",", "float", ",", "str", ")", "_type", "=", "filter", "(", "lambda", "a", ":", "is_type", "(", "a", ",", "x", ")", ",", "types", ")", "[", "0", "]", "return", "_type", "(",...
Determine the type of x
[ "Determine", "the", "type", "of", "x" ]
python
valid
pandas-dev/pandas
pandas/io/formats/printing.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L405-L428
def format_object_attrs(obj): """ Return a list of tuples of the (attr, formatted_value) for common attrs, including dtype, name, length Parameters ---------- obj : object must be iterable Returns ------- list """ attrs = [] if hasattr(obj, 'dtype'): attrs.append(('dtype', "'{}'".format(obj.dtype))) if getattr(obj, 'name', None) is not None: attrs.append(('name', default_pprint(obj.name))) max_seq_items = get_option('display.max_seq_items') or len(obj) if len(obj) > max_seq_items: attrs.append(('length', len(obj))) return attrs
[ "def", "format_object_attrs", "(", "obj", ")", ":", "attrs", "=", "[", "]", "if", "hasattr", "(", "obj", ",", "'dtype'", ")", ":", "attrs", ".", "append", "(", "(", "'dtype'", ",", "\"'{}'\"", ".", "format", "(", "obj", ".", "dtype", ")", ")", ")",...
Return a list of tuples of the (attr, formatted_value) for common attrs, including dtype, name, length Parameters ---------- obj : object must be iterable Returns ------- list
[ "Return", "a", "list", "of", "tuples", "of", "the", "(", "attr", "formatted_value", ")", "for", "common", "attrs", "including", "dtype", "name", "length" ]
python
train
SpringerPE/python-cfconfigurator
cfconfigurator/cf.py
https://github.com/SpringerPE/python-cfconfigurator/blob/198b4e00cd9e362abee726c0242c1d5f986eb073/cfconfigurator/cf.py#L192-L198
def clean_blobstore_cache(self): """Deletes all of the existing buildpack caches in the blobstore""" url = self.api_url + self.blobstores_builpack_cache_url resp, rcode = self.request('DELETE', url) if rcode != 202: raise CFException(resp, rcode) return resp
[ "def", "clean_blobstore_cache", "(", "self", ")", ":", "url", "=", "self", ".", "api_url", "+", "self", ".", "blobstores_builpack_cache_url", "resp", ",", "rcode", "=", "self", ".", "request", "(", "'DELETE'", ",", "url", ")", "if", "rcode", "!=", "202", ...
Deletes all of the existing buildpack caches in the blobstore
[ "Deletes", "all", "of", "the", "existing", "buildpack", "caches", "in", "the", "blobstore" ]
python
train
reorx/torext
torext/utils.py
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/utils.py#L56-L60
def instance(cls, *args, **kwgs): """Will be the only instance""" if not hasattr(cls, "_instance"): cls._instance = cls(*args, **kwgs) return cls._instance
[ "def", "instance", "(", "cls", ",", "*", "args", ",", "*", "*", "kwgs", ")", ":", "if", "not", "hasattr", "(", "cls", ",", "\"_instance\"", ")", ":", "cls", ".", "_instance", "=", "cls", "(", "*", "args", ",", "*", "*", "kwgs", ")", "return", "...
Will be the only instance
[ "Will", "be", "the", "only", "instance" ]
python
train
aouyar/PyMunin
pysysinfo/diskio.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L367-L380
def getDevStats(self, dev, devtype = None): """Returns I/O stats for block device. @param dev: Device name @param devtype: Device type. (Ignored if None.) @return: Dict of stats. """ if devtype is not None: if self._devClassTree is None: self._initDevClasses() if devtype <> self._mapDevType.get(dev): return None return self._diskStats.get(dev)
[ "def", "getDevStats", "(", "self", ",", "dev", ",", "devtype", "=", "None", ")", ":", "if", "devtype", "is", "not", "None", ":", "if", "self", ".", "_devClassTree", "is", "None", ":", "self", ".", "_initDevClasses", "(", ")", "if", "devtype", "<>", "...
Returns I/O stats for block device. @param dev: Device name @param devtype: Device type. (Ignored if None.) @return: Dict of stats.
[ "Returns", "I", "/", "O", "stats", "for", "block", "device", "." ]
python
train
jopohl/urh
src/urh/signalprocessing/Spectrogram.py
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/signalprocessing/Spectrogram.py#L100-L126
def export_to_fta(self, sample_rate, filename: str, include_amplitude=False): """ Export to Frequency, Time, Amplitude file. Frequency is double, Time (nanosecond) is uint32, Amplitude is float32 :return: """ spectrogram = self.__calculate_spectrogram(self.samples) spectrogram = np.flipud(spectrogram.T) if include_amplitude: result = np.empty((spectrogram.shape[0], spectrogram.shape[1], 3), dtype=[('f', np.float64), ('t', np.uint32), ('a', np.float32)]) else: result = np.empty((spectrogram.shape[0], spectrogram.shape[1], 2), dtype=[('f', np.float64), ('t', np.uint32)]) fft_freqs = np.fft.fftshift(np.fft.fftfreq(spectrogram.shape[0], 1/sample_rate)) time_width = 1e9 * ((len(self.samples) / sample_rate) / spectrogram.shape[1]) for i in range(spectrogram.shape[0]): for j in range(spectrogram.shape[1]): if include_amplitude: result[i, j] = (fft_freqs[i], int(j*time_width), spectrogram[i, j]) else: result[i, j] = (fft_freqs[i], int(j * time_width)) result.tofile(filename)
[ "def", "export_to_fta", "(", "self", ",", "sample_rate", ",", "filename", ":", "str", ",", "include_amplitude", "=", "False", ")", ":", "spectrogram", "=", "self", ".", "__calculate_spectrogram", "(", "self", ".", "samples", ")", "spectrogram", "=", "np", "....
Export to Frequency, Time, Amplitude file. Frequency is double, Time (nanosecond) is uint32, Amplitude is float32 :return:
[ "Export", "to", "Frequency", "Time", "Amplitude", "file", ".", "Frequency", "is", "double", "Time", "(", "nanosecond", ")", "is", "uint32", "Amplitude", "is", "float32" ]
python
train
bharadwaj-raju/libdesktop
libdesktop/system.py
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/system.py#L177-L202
def is_in_path(program): ''' Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``? ''' if sys.version_info.major == 2: path = os.getenv('PATH') if os.name == 'nt': path = path.split(';') else: path = path.split(':') else: path = os.get_exec_path() for i in path: if os.path.isdir(i): if program in os.listdir(i): return True
[ "def", "is_in_path", "(", "program", ")", ":", "if", "sys", ".", "version_info", ".", "major", "==", "2", ":", "path", "=", "os", ".", "getenv", "(", "'PATH'", ")", "if", "os", ".", "name", "==", "'nt'", ":", "path", "=", "path", ".", "split", "(...
Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``?
[ "Check", "if", "a", "program", "is", "in", "the", "system", "PATH", "." ]
python
train
worldcompany/djangoembed
oembed/views.py
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/views.py#L19-L56
def json(request, *args, **kwargs): """ The oembed endpoint, or the url to which requests for metadata are passed. Third parties will want to access this view with URLs for your site's content and be returned OEmbed metadata. """ # coerce to dictionary params = dict(request.GET.items()) callback = params.pop('callback', None) url = params.pop('url', None) if not url: return HttpResponseBadRequest('Required parameter missing: URL') try: provider = oembed.site.provider_for_url(url) if not provider.provides: raise OEmbedMissingEndpoint() except OEmbedMissingEndpoint: raise Http404('No provider found for %s' % url) query = dict([(smart_str(k), smart_str(v)) for k, v in params.items() if v]) try: resource = oembed.site.embed(url, **query) except OEmbedException, e: raise Http404('Error embedding %s: %s' % (url, str(e))) response = HttpResponse(mimetype='application/json') json = resource.json if callback: response.write('%s(%s)' % (defaultfilters.force_escape(callback), json)) else: response.write(json) return response
[ "def", "json", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# coerce to dictionary", "params", "=", "dict", "(", "request", ".", "GET", ".", "items", "(", ")", ")", "callback", "=", "params", ".", "pop", "(", "'callback'", ",...
The oembed endpoint, or the url to which requests for metadata are passed. Third parties will want to access this view with URLs for your site's content and be returned OEmbed metadata.
[ "The", "oembed", "endpoint", "or", "the", "url", "to", "which", "requests", "for", "metadata", "are", "passed", ".", "Third", "parties", "will", "want", "to", "access", "this", "view", "with", "URLs", "for", "your", "site", "s", "content", "and", "be", "...
python
valid
agoragames/leaderboard-python
leaderboard/leaderboard.py
https://github.com/agoragames/leaderboard-python/blob/ec309859b197a751ac0322374b36d134d8c5522f/leaderboard/leaderboard.py#L654-L680
def percentile_for_in(self, leaderboard_name, member): ''' Retrieve the percentile for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the percentile for a member in the named leaderboard. ''' if not self.check_member_in(leaderboard_name, member): return None responses = self.redis_connection.pipeline().zcard( leaderboard_name).zrevrank(leaderboard_name, member).execute() percentile = math.ceil( (float( (responses[0] - responses[1] - 1)) / float( responses[0]) * 100)) if self.order == self.ASC: return 100 - percentile else: return percentile
[ "def", "percentile_for_in", "(", "self", ",", "leaderboard_name", ",", "member", ")", ":", "if", "not", "self", ".", "check_member_in", "(", "leaderboard_name", ",", "member", ")", ":", "return", "None", "responses", "=", "self", ".", "redis_connection", ".", ...
Retrieve the percentile for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the percentile for a member in the named leaderboard.
[ "Retrieve", "the", "percentile", "for", "a", "member", "in", "the", "named", "leaderboard", "." ]
python
train
pyblish/pyblish-qml
pyblish_qml/host.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/host.py#L532-L540
def _install_houdini(use_threaded_wrapper): """Helper function to SideFx Houdini support""" import hdefereval def threaded_wrapper(func, *args, **kwargs): return hdefereval.executeInMainThreadWithResult( func, *args, **kwargs) _common_setup("Houdini", threaded_wrapper, use_threaded_wrapper)
[ "def", "_install_houdini", "(", "use_threaded_wrapper", ")", ":", "import", "hdefereval", "def", "threaded_wrapper", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "hdefereval", ".", "executeInMainThreadWithResult", "(", "func", ",",...
Helper function to SideFx Houdini support
[ "Helper", "function", "to", "SideFx", "Houdini", "support" ]
python
train
PetrochukM/PyTorch-NLP
torchnlp/utils.py
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/utils.py#L241-L281
def tensors_to(tensors, *args, **kwargs): """ Apply ``torch.Tensor.to`` to tensors in a generic data structure. Inspired by: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31 Args: tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to move. *args: Arguments passed to ``torch.Tensor.to``. **kwargs: Keyword arguments passed to ``torch.Tensor.to``. Example use case: This is useful as a complementary function to ``collate_tensors``. Following collating, it's important to move your tensors to the appropriate device. Returns: The inputted ``tensors`` with ``torch.Tensor.to`` applied. Example: >>> import torch >>> batch = [ ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... ] >>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS [{'column_a': tensor(...}] """ if torch.is_tensor(tensors): return tensors.to(*args, **kwargs) elif isinstance(tensors, dict): return {k: tensors_to(v, *args, **kwargs) for k, v in tensors.items()} elif hasattr(tensors, '_asdict') and isinstance(tensors, tuple): # Handle ``namedtuple`` return tensors.__class__(**tensors_to(tensors._asdict(), *args, **kwargs)) elif isinstance(tensors, list): return [tensors_to(t, *args, **kwargs) for t in tensors] elif isinstance(tensors, tuple): return tuple([tensors_to(t, *args, **kwargs) for t in tensors]) else: return tensors
[ "def", "tensors_to", "(", "tensors", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "torch", ".", "is_tensor", "(", "tensors", ")", ":", "return", "tensors", ".", "to", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "isinstance...
Apply ``torch.Tensor.to`` to tensors in a generic data structure. Inspired by: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31 Args: tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to move. *args: Arguments passed to ``torch.Tensor.to``. **kwargs: Keyword arguments passed to ``torch.Tensor.to``. Example use case: This is useful as a complementary function to ``collate_tensors``. Following collating, it's important to move your tensors to the appropriate device. Returns: The inputted ``tensors`` with ``torch.Tensor.to`` applied. Example: >>> import torch >>> batch = [ ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... ] >>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS [{'column_a': tensor(...}]
[ "Apply", "torch", ".", "Tensor", ".", "to", "to", "tensors", "in", "a", "generic", "data", "structure", "." ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L786-L809
def rotation_and_translation_from_matrix(matrix): """Helper to convert 4x4 matrix to rotation matrix and translation vector. Parameters ---------- matrix : :obj:`numpy.ndarray` of float 4x4 rigid transformation matrix to be converted. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A 3x3 rotation matrix and a 3-entry translation vector. Raises ------ ValueError If the incoming matrix isn't a 4x4 ndarray. """ if not isinstance(matrix, np.ndarray) or \ matrix.shape[0] != 4 or matrix.shape[1] != 4: raise ValueError('Matrix must be specified as a 4x4 ndarray') rotation = matrix[:3,:3] translation = matrix[:3,3] return rotation, translation
[ "def", "rotation_and_translation_from_matrix", "(", "matrix", ")", ":", "if", "not", "isinstance", "(", "matrix", ",", "np", ".", "ndarray", ")", "or", "matrix", ".", "shape", "[", "0", "]", "!=", "4", "or", "matrix", ".", "shape", "[", "1", "]", "!=",...
Helper to convert 4x4 matrix to rotation matrix and translation vector. Parameters ---------- matrix : :obj:`numpy.ndarray` of float 4x4 rigid transformation matrix to be converted. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A 3x3 rotation matrix and a 3-entry translation vector. Raises ------ ValueError If the incoming matrix isn't a 4x4 ndarray.
[ "Helper", "to", "convert", "4x4", "matrix", "to", "rotation", "matrix", "and", "translation", "vector", "." ]
python
train
thomasdelaet/python-velbus
velbus/message.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/message.py#L95-L102
def to_json_basic(self): """ Create JSON structure with generic attributes :return: dict """ return {'name': self.__class__.__name__, 'priority': self.priority, 'address': self.address, 'rtr': self.rtr}
[ "def", "to_json_basic", "(", "self", ")", ":", "return", "{", "'name'", ":", "self", ".", "__class__", ".", "__name__", ",", "'priority'", ":", "self", ".", "priority", ",", "'address'", ":", "self", ".", "address", ",", "'rtr'", ":", "self", ".", "rtr...
Create JSON structure with generic attributes :return: dict
[ "Create", "JSON", "structure", "with", "generic", "attributes", ":", "return", ":", "dict" ]
python
train
hydpy-dev/hydpy
hydpy/models/dam/dam_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L47-L51
def pic_totalremotedischarge_v1(self): """Update the receiver link sequence.""" flu = self.sequences.fluxes.fastaccess rec = self.sequences.receivers.fastaccess flu.totalremotedischarge = rec.q[0]
[ "def", "pic_totalremotedischarge_v1", "(", "self", ")", ":", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "rec", "=", "self", ".", "sequences", ".", "receivers", ".", "fastaccess", "flu", ".", "totalremotedischarge", "=", "rec", "....
Update the receiver link sequence.
[ "Update", "the", "receiver", "link", "sequence", "." ]
python
train
closeio/tasktiger
tasktiger/__init__.py
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L396-L424
def get_queue_stats(self): """ Returns a dict with stats about all the queues. The keys are the queue names, the values are dicts representing how many tasks are in a given status ("queued", "active", "error" or "scheduled"). Example return value: { "default": { "queued": 1, "error": 2 } } """ states = (QUEUED, ACTIVE, SCHEDULED, ERROR) pipeline = self.connection.pipeline() for state in states: pipeline.smembers(self._key(state)) queue_results = pipeline.execute() pipeline = self.connection.pipeline() for state, result in zip(states, queue_results): for queue in result: pipeline.zcard(self._key(state, queue)) card_results = pipeline.execute() queue_stats = defaultdict(dict) for state, result in zip(states, queue_results): for queue in result: queue_stats[queue][state] = card_results.pop(0) return queue_stats
[ "def", "get_queue_stats", "(", "self", ")", ":", "states", "=", "(", "QUEUED", ",", "ACTIVE", ",", "SCHEDULED", ",", "ERROR", ")", "pipeline", "=", "self", ".", "connection", ".", "pipeline", "(", ")", "for", "state", "in", "states", ":", "pipeline", "...
Returns a dict with stats about all the queues. The keys are the queue names, the values are dicts representing how many tasks are in a given status ("queued", "active", "error" or "scheduled"). Example return value: { "default": { "queued": 1, "error": 2 } }
[ "Returns", "a", "dict", "with", "stats", "about", "all", "the", "queues", ".", "The", "keys", "are", "the", "queue", "names", "the", "values", "are", "dicts", "representing", "how", "many", "tasks", "are", "in", "a", "given", "status", "(", "queued", "ac...
python
train
Erotemic/utool
utool/util_list.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L78-L113
def replace_nones(list_, repl=-1): r""" Recursively removes Nones in all lists and sublists and replaces them with the repl variable Args: list_ (list): repl (obj): replacement value Returns: list CommandLine: python -m utool.util_list --test-replace_nones Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> # build test data >>> list_ = [None, 0, 1, 2] >>> repl = -1 >>> # execute function >>> repl_list = replace_nones(list_, repl) >>> # verify results >>> result = str(repl_list) >>> print(result) [-1, 0, 1, 2] """ repl_list = [ repl if item is None else ( replace_nones(item, repl) if isinstance(item, list) else item ) for item in list_ ] return repl_list
[ "def", "replace_nones", "(", "list_", ",", "repl", "=", "-", "1", ")", ":", "repl_list", "=", "[", "repl", "if", "item", "is", "None", "else", "(", "replace_nones", "(", "item", ",", "repl", ")", "if", "isinstance", "(", "item", ",", "list", ")", "...
r""" Recursively removes Nones in all lists and sublists and replaces them with the repl variable Args: list_ (list): repl (obj): replacement value Returns: list CommandLine: python -m utool.util_list --test-replace_nones Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> # build test data >>> list_ = [None, 0, 1, 2] >>> repl = -1 >>> # execute function >>> repl_list = replace_nones(list_, repl) >>> # verify results >>> result = str(repl_list) >>> print(result) [-1, 0, 1, 2]
[ "r", "Recursively", "removes", "Nones", "in", "all", "lists", "and", "sublists", "and", "replaces", "them", "with", "the", "repl", "variable" ]
python
train
adrn/gala
gala/dynamics/_genfunc/genfunc_3d.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/_genfunc/genfunc_3d.py#L314-L386
def plot3D_stacktriax(initial,final_t,N_MAT,file_output): """ For producing plots from paper """ # Setup Stackel potential TT = pot.stackel_triax() times = choose_NT(N_MAT) timeseries=np.linspace(0.,final_t,times) # Integrate orbit results = odeint(pot.orbit_derivs2,initial,timeseries,args=(TT,),rtol=1e-13,atol=1e-13) # Find actions, angles and frequencies (act,ang,n_vec,toy_aa, pars),loop = find_actions(results, timeseries,N_matrix=N_MAT,ifloop=True) toy_pot = 0 if(loop[2]>0.5 or loop[0]>0.5): toy_pot = pot.isochrone(par=np.append(pars,0.)) else: toy_pot = pot.harmonic_oscillator(omega=pars[:3]) # Integrate initial condition in toy potential timeseries_2=np.linspace(0.,2.*final_t,3500) results_toy = odeint(pot.orbit_derivs2,initial,timeseries_2,args=(toy_pot,)) # and plot f,a = plt.subplots(2,3,figsize=[3.32,5.5]) a[0,0] = plt.subplot2grid((3,2), (0, 0)) a[1,0] = plt.subplot2grid((3,2), (0, 1)) a[0,1] = plt.subplot2grid((3,2), (1, 0)) a[1,1] = plt.subplot2grid((3,2), (1, 1)) a[0,2] = plt.subplot2grid((3,2), (2, 0),colspan=2) plt.subplots_adjust(wspace=0.5,hspace=0.45) # xy orbit a[0,0].plot(results.T[0],results.T[1],'k') a[0,0].set_xlabel(r'$x/{\rm kpc}$') a[0,0].set_ylabel(r'$y/{\rm kpc}$') a[0,0].xaxis.set_major_locator(MaxNLocator(5)) # xz orbit a[1,0].plot(results.T[0],results.T[2],'k') a[1,0].set_xlabel(r'$x/{\rm kpc}$') a[1,0].set_ylabel(r'$z/{\rm kpc}$') a[1,0].xaxis.set_major_locator(MaxNLocator(5)) # toy orbits a[0,0].plot(results_toy.T[0],results_toy.T[1],'r',alpha=0.2,linewidth=0.3) a[1,0].plot(results_toy.T[0],results_toy.T[2],'r',alpha=0.2,linewidth=0.3) # Toy actions a[0,2].plot(Conv*timeseries,toy_aa.T[0],'k:',label='Toy action') a[0,2].plot(Conv*timeseries,toy_aa.T[1],'r:') a[0,2].plot(Conv*timeseries,toy_aa.T[2],'b:') # Arrows to show approx. actions arrow_end = a[0,2].get_xlim()[1] arrowd = 0.08*(arrow_end-a[0,2].get_xlim()[0]) a[0,2].annotate('',(arrow_end+arrowd,act[0]),(arrow_end,act[0]),arrowprops=dict(arrowstyle='<-',color='k'),annotation_clip=False) a[0,2].annotate('',(arrow_end+arrowd,act[1]),(arrow_end,act[1]),arrowprops=dict(arrowstyle='<-',color='r'),annotation_clip=False) a[0,2].annotate('',(arrow_end+arrowd,act[2]),(arrow_end,act[2]),arrowprops=dict(arrowstyle='<-',color='b'),annotation_clip=False) # True actions a[0,2].plot(Conv*timeseries,TT.action(results[0])[0]*np.ones(len(timeseries)),'k',label='True action') a[0,2].plot(Conv*timeseries,TT.action(results[0])[1]*np.ones(len(timeseries)),'k') a[0,2].plot(Conv*timeseries,TT.action(results[0])[2]*np.ones(len(timeseries)),'k') a[0,2].set_xlabel(r'$t/{\rm Gyr}$') a[0,2].set_ylabel(r'$J/{\rm kpc\,km\,s}^{-1}$') leg = a[0,2].legend(loc='upper center',bbox_to_anchor=(0.5,1.2),ncol=3, numpoints = 1) leg.draw_frame(False) # Toy angle coverage a[0,1].plot(toy_aa.T[3]/(np.pi),toy_aa.T[4]/(np.pi),'k.',markersize=0.4) a[0,1].set_xlabel(r'$\theta_1/\pi$') a[0,1].set_ylabel(r'$\theta_2/\pi$') a[1,1].plot(toy_aa.T[3]/(np.pi),toy_aa.T[5]/(np.pi),'k.',markersize=0.4) a[1,1].set_xlabel(r'$\theta_1/\pi$') a[1,1].set_ylabel(r'$\theta_3/\pi$') plt.savefig(file_output,bbox_inches='tight') return act
[ "def", "plot3D_stacktriax", "(", "initial", ",", "final_t", ",", "N_MAT", ",", "file_output", ")", ":", "# Setup Stackel potential", "TT", "=", "pot", ".", "stackel_triax", "(", ")", "times", "=", "choose_NT", "(", "N_MAT", ")", "timeseries", "=", "np", ".",...
For producing plots from paper
[ "For", "producing", "plots", "from", "paper" ]
python
train
idlesign/uwsgiconf
uwsgiconf/options/alarms.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/alarms.py#L88-L119
def alarm_on_fd_ready(self, alarm, fd, message, byte_count=None): """Triggers the alarm when the specified file descriptor is ready for read. This is really useful for integration with the Linux eventfd() facility. Pretty low-level and the basis of most of the alarm plugins. * http://uwsgi-docs.readthedocs.io/en/latest/Changelog-1.9.7.html#alarm-fd :param AlarmType|list[AlarmType] alarm: Alarm. :param str|unicode fd: File descriptor. :param str|unicode message: Message to send. :param int byte_count: Files to read. Default: 1 byte. .. note:: For ``eventfd`` set 8. """ self.register_alarm(alarm) value = fd if byte_count: value += ':%s' % byte_count value += ' %s' % message for alarm in listify(alarm): self._set('alarm-fd', '%s %s' % (alarm.alias, value), multi=True) return self._section
[ "def", "alarm_on_fd_ready", "(", "self", ",", "alarm", ",", "fd", ",", "message", ",", "byte_count", "=", "None", ")", ":", "self", ".", "register_alarm", "(", "alarm", ")", "value", "=", "fd", "if", "byte_count", ":", "value", "+=", "':%s'", "%", "byt...
Triggers the alarm when the specified file descriptor is ready for read. This is really useful for integration with the Linux eventfd() facility. Pretty low-level and the basis of most of the alarm plugins. * http://uwsgi-docs.readthedocs.io/en/latest/Changelog-1.9.7.html#alarm-fd :param AlarmType|list[AlarmType] alarm: Alarm. :param str|unicode fd: File descriptor. :param str|unicode message: Message to send. :param int byte_count: Files to read. Default: 1 byte. .. note:: For ``eventfd`` set 8.
[ "Triggers", "the", "alarm", "when", "the", "specified", "file", "descriptor", "is", "ready", "for", "read", "." ]
python
train
stephenmcd/sphinx-me
sphinx_me.py
https://github.com/stephenmcd/sphinx-me/blob/9f51a04d58a90834a787246ce475a564b4f9e5ee/sphinx_me.py#L87-L94
def get_setup_attribute(attribute, setup_path): """ Runs the project's setup.py script in a process with an arg that will print out the value for a particular attribute such as author or version, and returns the value. """ args = ["python", setup_path, "--%s" % attribute] return Popen(args, stdout=PIPE).communicate()[0].decode('utf-8').strip()
[ "def", "get_setup_attribute", "(", "attribute", ",", "setup_path", ")", ":", "args", "=", "[", "\"python\"", ",", "setup_path", ",", "\"--%s\"", "%", "attribute", "]", "return", "Popen", "(", "args", ",", "stdout", "=", "PIPE", ")", ".", "communicate", "("...
Runs the project's setup.py script in a process with an arg that will print out the value for a particular attribute such as author or version, and returns the value.
[ "Runs", "the", "project", "s", "setup", ".", "py", "script", "in", "a", "process", "with", "an", "arg", "that", "will", "print", "out", "the", "value", "for", "a", "particular", "attribute", "such", "as", "author", "or", "version", "and", "returns", "the...
python
train
portfors-lab/sparkle
sparkle/gui/plotting/pyqtgraph_widgets.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/pyqtgraph_widgets.py#L450-L472
def setSpecArgs(**kwargs): """Sets optional arguments for the spectrogram appearance. Available options: :param nfft: size of FFT window to use :type nfft: int :param overlap: percent overlap of window :type overlap: number :param window: Type of window to use, choices are hanning, hamming, blackman, bartlett or none (rectangular) :type window: string :param colormap: Gets set by colormap editor. Holds the information to generate the colormap. Items: :meth:`lut<pyqtgraph:pyqtgraph.ImageItem.setLookupTable>`, :meth:`levels<pyqtgraph:pyqtgraph.ImageItem.setLevels>`, state (info for editor) :type colormap: dict """ for key, value in kwargs.items(): if key == 'colormap': SpecWidget.imgArgs['lut'] = value['lut'] SpecWidget.imgArgs['levels'] = value['levels'] SpecWidget.imgArgs['state'] = value['state'] for w in SpecWidget.instances: w.updateColormap() else: SpecWidget.specgramArgs[key] = value
[ "def", "setSpecArgs", "(", "*", "*", "kwargs", ")", ":", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "==", "'colormap'", ":", "SpecWidget", ".", "imgArgs", "[", "'lut'", "]", "=", "value", "[", "'lut'", "]"...
Sets optional arguments for the spectrogram appearance. Available options: :param nfft: size of FFT window to use :type nfft: int :param overlap: percent overlap of window :type overlap: number :param window: Type of window to use, choices are hanning, hamming, blackman, bartlett or none (rectangular) :type window: string :param colormap: Gets set by colormap editor. Holds the information to generate the colormap. Items: :meth:`lut<pyqtgraph:pyqtgraph.ImageItem.setLookupTable>`, :meth:`levels<pyqtgraph:pyqtgraph.ImageItem.setLevels>`, state (info for editor) :type colormap: dict
[ "Sets", "optional", "arguments", "for", "the", "spectrogram", "appearance", "." ]
python
train
fermiPy/fermipy
fermipy/hpx_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/hpx_utils.py#L504-L556
def create_from_header(cls, header, ebins=None, pixels=None): """ Creates an HPX object from a FITS header. header : The FITS header ebins : Energy bin edges [optional] """ convname = HPX.identify_HPX_convention(header) conv = HPX_FITS_CONVENTIONS[convname] if conv.convname not in ['GALPROP', 'GALPROP2']: if header["PIXTYPE"] != "HEALPIX": raise Exception("PIXTYPE != HEALPIX") if header["PIXTYPE"] != "HEALPIX": raise Exception("PIXTYPE != HEALPIX") if header["ORDERING"] == "RING": nest = False elif header["ORDERING"] == "NESTED": nest = True else: raise Exception("ORDERING != RING | NESTED") try: order = header["ORDER"] except KeyError: order = -1 if order < 0: nside = header["NSIDE"] else: nside = -1 try: coordsys = header[conv.coordsys] except KeyError: coordsys = header['COORDSYS'] try: region = header["HPX_REG"] except KeyError: try: region = header["HPXREGION"] except KeyError: region = None try: if header['INDXSCHM'] in ['EXPLICIT', 'PARTIAL']: use_pixels = pixels else: use_pixels = None except KeyError: use_pixels = None return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=use_pixels)
[ "def", "create_from_header", "(", "cls", ",", "header", ",", "ebins", "=", "None", ",", "pixels", "=", "None", ")", ":", "convname", "=", "HPX", ".", "identify_HPX_convention", "(", "header", ")", "conv", "=", "HPX_FITS_CONVENTIONS", "[", "convname", "]", ...
Creates an HPX object from a FITS header. header : The FITS header ebins : Energy bin edges [optional]
[ "Creates", "an", "HPX", "object", "from", "a", "FITS", "header", "." ]
python
train
jaywink/federation
federation/entities/diaspora/mappers.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/entities/diaspora/mappers.py#L76-L135
def element_to_objects( element: etree.ElementTree, sender: str, sender_key_fetcher:Callable[[str], str]=None, user: UserType =None, ) -> List: """Transform an Element to a list of entities recursively. Possible child entities are added to each entity ``_children`` list. :param tree: Element :param sender: Payload sender id :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have an ``id``. :returns: list of entities """ entities = [] cls = MAPPINGS.get(element.tag) if not cls: return [] attrs = xml_children_as_dict(element) transformed = transform_attributes(attrs, cls) if hasattr(cls, "fill_extra_attributes"): transformed = cls.fill_extra_attributes(transformed) entity = cls(**transformed) # Add protocol name entity._source_protocol = "diaspora" # Save element object to entity for possible later use entity._source_object = etree.tostring(element) # Save receiving id to object if user: entity._receiving_actor_id = user.id if issubclass(cls, DiasporaRelayableMixin): # If relayable, fetch sender key for validation entity._xml_tags = get_element_child_info(element, "tag") if sender_key_fetcher: entity._sender_key = sender_key_fetcher(entity.actor_id) else: profile = retrieve_and_parse_profile(entity.handle) if profile: entity._sender_key = profile.public_key else: # If not relayable, ensure handles match if not check_sender_and_entity_handle_match(sender, entity.handle): return [] try: entity.validate() except ValueError as ex: logger.error("Failed to validate entity %s: %s", entity, ex, extra={ "attrs": attrs, "transformed": transformed, }) return [] # Extract mentions entity._mentions = entity.extract_mentions() # Do child elements for child in element: entity._children.extend(element_to_objects(child, sender, user=user)) # Add to entities list entities.append(entity) return entities
[ "def", "element_to_objects", "(", "element", ":", "etree", ".", "ElementTree", ",", "sender", ":", "str", ",", "sender_key_fetcher", ":", "Callable", "[", "[", "str", "]", ",", "str", "]", "=", "None", ",", "user", ":", "UserType", "=", "None", ",", ")...
Transform an Element to a list of entities recursively. Possible child entities are added to each entity ``_children`` list. :param tree: Element :param sender: Payload sender id :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have an ``id``. :returns: list of entities
[ "Transform", "an", "Element", "to", "a", "list", "of", "entities", "recursively", "." ]
python
train
tensorflow/cleverhans
cleverhans/devtools/version.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/devtools/version.py#L11-L24
def dev_version(): """ Returns a hexdigest of all the python files in the module. """ md5_hash = hashlib.md5() py_files = sorted(list_files(suffix=".py")) if not py_files: return '' for filename in py_files: with open(filename, 'rb') as fobj: content = fobj.read() md5_hash.update(content) return md5_hash.hexdigest()
[ "def", "dev_version", "(", ")", ":", "md5_hash", "=", "hashlib", ".", "md5", "(", ")", "py_files", "=", "sorted", "(", "list_files", "(", "suffix", "=", "\".py\"", ")", ")", "if", "not", "py_files", ":", "return", "''", "for", "filename", "in", "py_fil...
Returns a hexdigest of all the python files in the module.
[ "Returns", "a", "hexdigest", "of", "all", "the", "python", "files", "in", "the", "module", "." ]
python
train
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L841-L956
def _index_audio_ibm(self, basename=None, replace_already_indexed=False, continuous=True, model="en-US_BroadbandModel", word_confidence=True, word_alternatives_threshold=0.9, profanity_filter_for_US_results=False): """ Implements a search-suitable interface for Watson speech API. Some explaination of the parameters here have been taken from [1]_ Parameters ---------- basename : str, optional A specific basename to be indexed and is placed in src_dir e.g `audio.wav`. If `None` is selected, all the valid audio files would be indexed. Default is `None`. replace_already_indexed : bool `True`, To reindex some audio file that's already in the timestamps. Default is `False`. continuous : bool Indicates whether multiple final results that represent consecutive phrases separated by long pauses are returned. If true, such phrases are returned; if false (the default), recognition ends after the first end-of-speech (EOS) incident is detected. Default is `True`. model : { 'ar-AR_BroadbandModel', 'en-UK_BroadbandModel' 'en-UK_NarrowbandModel', 'en-US_BroadbandModel', (the default) 'en-US_NarrowbandModel', 'es-ES_BroadbandModel', 'es-ES_NarrowbandModel', 'fr-FR_BroadbandModel', 'ja-JP_BroadbandModel', 'ja-JP_NarrowbandModel', 'pt-BR_BroadbandModel', 'pt-BR_NarrowbandModel', 'zh-CN_BroadbandModel', 'zh-CN_NarrowbandModel' } The identifier of the model to be used for the recognition Default is 'en-US_BroadbandModel' word_confidence : bool Indicates whether a confidence measure in the range of 0 to 1 is returned for each word. The default is True. (It's False in the original) word_alternatives_threshold : numeric A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. Default is `0.9`. profanity_filter_for_US_results : bool Indicates whether profanity filtering is performed on the transcript. If true, the service filters profanity from all output by replacing inappropriate words with a series of asterisks. If false, the service returns results with no censoring. Applies to US English transcription only. Default is `False`. References ---------- .. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/ """ params = {'continuous': continuous, 'model': model, 'word_alternatives_threshold': word_alternatives_threshold, 'word_confidence': word_confidence, 'timestamps': True, 'inactivity_timeout': str(-1), 'profanity_filter': profanity_filter_for_US_results} self._prepare_audio(basename=basename, replace_already_indexed=replace_already_indexed) for staging_audio_basename in self._list_audio_files( sub_dir="staging"): original_audio_name = ''.join( staging_audio_basename.split('.')[:-1])[:-3] with open("{}/staging/{}".format( self.src_dir, staging_audio_basename), "rb") as f: if self.get_verbosity(): print("Uploading {}...".format(staging_audio_basename)) response = requests.post( url=("https://stream.watsonplatform.net/" "speech-to-text/api/v1/recognize"), auth=(self.get_username_ibm(), self.get_password_ibm()), headers={'content-type': 'audio/wav'}, data=f.read(), params=params) if self.get_verbosity(): print("Indexing {}...".format(staging_audio_basename)) self.__timestamps_unregulated[ original_audio_name + ".wav"].append( self._timestamp_extractor_ibm( staging_audio_basename, json.loads(response.text))) if self.get_verbosity(): print("Done indexing {}".format(staging_audio_basename)) self._timestamp_regulator() if self.get_verbosity(): print("Indexing procedure finished")
[ "def", "_index_audio_ibm", "(", "self", ",", "basename", "=", "None", ",", "replace_already_indexed", "=", "False", ",", "continuous", "=", "True", ",", "model", "=", "\"en-US_BroadbandModel\"", ",", "word_confidence", "=", "True", ",", "word_alternatives_threshold"...
Implements a search-suitable interface for Watson speech API. Some explaination of the parameters here have been taken from [1]_ Parameters ---------- basename : str, optional A specific basename to be indexed and is placed in src_dir e.g `audio.wav`. If `None` is selected, all the valid audio files would be indexed. Default is `None`. replace_already_indexed : bool `True`, To reindex some audio file that's already in the timestamps. Default is `False`. continuous : bool Indicates whether multiple final results that represent consecutive phrases separated by long pauses are returned. If true, such phrases are returned; if false (the default), recognition ends after the first end-of-speech (EOS) incident is detected. Default is `True`. model : { 'ar-AR_BroadbandModel', 'en-UK_BroadbandModel' 'en-UK_NarrowbandModel', 'en-US_BroadbandModel', (the default) 'en-US_NarrowbandModel', 'es-ES_BroadbandModel', 'es-ES_NarrowbandModel', 'fr-FR_BroadbandModel', 'ja-JP_BroadbandModel', 'ja-JP_NarrowbandModel', 'pt-BR_BroadbandModel', 'pt-BR_NarrowbandModel', 'zh-CN_BroadbandModel', 'zh-CN_NarrowbandModel' } The identifier of the model to be used for the recognition Default is 'en-US_BroadbandModel' word_confidence : bool Indicates whether a confidence measure in the range of 0 to 1 is returned for each word. The default is True. (It's False in the original) word_alternatives_threshold : numeric A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. Default is `0.9`. profanity_filter_for_US_results : bool Indicates whether profanity filtering is performed on the transcript. If true, the service filters profanity from all output by replacing inappropriate words with a series of asterisks. If false, the service returns results with no censoring. Applies to US English transcription only. Default is `False`. References ---------- .. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
[ "Implements", "a", "search", "-", "suitable", "interface", "for", "Watson", "speech", "API", "." ]
python
train
DecBayComp/RWA-python
rwa/generic.py
https://github.com/DecBayComp/RWA-python/blob/734a52e15a0e8c244d84d74acf3fd64721074732/rwa/generic.py#L438-L462
def defaultStorable(self, python_type=None, storable_type=None, version=None, **kwargs): """ Generate a default storable instance. Arguments: python_type (type): Python type of the object. storable_type (str): storable type name. version (tuple): version number of the storable handler. Returns: StorableHandler: storable instance. Extra keyword arguments are passed to :meth:`registerStorable`. """ if python_type is None: python_type = lookup_type(storable_type) if self.verbose: print('generating storable instance for type: {}'.format(python_type)) self.storables.registerStorable(default_storable(python_type, \ version=version, storable_type=storable_type), **kwargs) return self.byPythonType(python_type, True).asVersion(version)
[ "def", "defaultStorable", "(", "self", ",", "python_type", "=", "None", ",", "storable_type", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "python_type", "is", "None", ":", "python_type", "=", "lookup_type", "(", "st...
Generate a default storable instance. Arguments: python_type (type): Python type of the object. storable_type (str): storable type name. version (tuple): version number of the storable handler. Returns: StorableHandler: storable instance. Extra keyword arguments are passed to :meth:`registerStorable`.
[ "Generate", "a", "default", "storable", "instance", "." ]
python
train
projectatomic/atomic-reactor
docs/manpage/generate_manpage.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/docs/manpage/generate_manpage.py#L77-L87
def create_main_synopsis(self, parser): """ create synopsis from main parser """ self.add_usage(parser.usage, parser._actions, parser._mutually_exclusive_groups, prefix='') usage = self._format_usage(None, parser._actions, parser._mutually_exclusive_groups, '') usage = usage.replace('%s ' % self._prog, '') usage = '.SH SYNOPSIS\n \\fB%s\\fR %s\n' % (self._markup(self._prog), usage) return usage
[ "def", "create_main_synopsis", "(", "self", ",", "parser", ")", ":", "self", ".", "add_usage", "(", "parser", ".", "usage", ",", "parser", ".", "_actions", ",", "parser", ".", "_mutually_exclusive_groups", ",", "prefix", "=", "''", ")", "usage", "=", "self...
create synopsis from main parser
[ "create", "synopsis", "from", "main", "parser" ]
python
train
NoMore201/googleplay-api
gpapi/googleplay.py
https://github.com/NoMore201/googleplay-api/blob/e5e60b83563055bd7e13778ad13a260d2547cbf2/gpapi/googleplay.py#L417-L428
def browse(self, cat=None, subCat=None): """Browse categories. If neither cat nor subcat are specified, return a list of categories, otherwise it return a list of apps using cat (category ID) and subCat (subcategory ID) as filters.""" path = BROWSE_URL + "?c=3" if cat is not None: path += "&cat={}".format(requests.utils.quote(cat)) if subCat is not None: path += "&ctr={}".format(requests.utils.quote(subCat)) data = self.executeRequestApi2(path) return utils.parseProtobufObj(data.payload.browseResponse)
[ "def", "browse", "(", "self", ",", "cat", "=", "None", ",", "subCat", "=", "None", ")", ":", "path", "=", "BROWSE_URL", "+", "\"?c=3\"", "if", "cat", "is", "not", "None", ":", "path", "+=", "\"&cat={}\"", ".", "format", "(", "requests", ".", "utils",...
Browse categories. If neither cat nor subcat are specified, return a list of categories, otherwise it return a list of apps using cat (category ID) and subCat (subcategory ID) as filters.
[ "Browse", "categories", ".", "If", "neither", "cat", "nor", "subcat", "are", "specified", "return", "a", "list", "of", "categories", "otherwise", "it", "return", "a", "list", "of", "apps", "using", "cat", "(", "category", "ID", ")", "and", "subCat", "(", ...
python
valid
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py#L187-L199
def snmp_server_user_ipv4_acl(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") user = ET.SubElement(snmp_server, "user") username_key = ET.SubElement(user, "username") username_key.text = kwargs.pop('username') ipv4_acl = ET.SubElement(user, "ipv4-acl") ipv4_acl.text = kwargs.pop('ipv4_acl') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "snmp_server_user_ipv4_acl", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "snmp_server", "=", "ET", ".", "SubElement", "(", "config", ",", "\"snmp-server\"", ",", "xmlns", "=", "\"urn:b...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L1462-L1473
def is_new_preorder( self, preorder_hash, lastblock=None ): """ Given a preorder hash of a name, determine whether or not it is unseen before. """ if lastblock is None: lastblock = self.lastblock preorder = namedb_get_name_preorder( self.db, preorder_hash, lastblock ) if preorder is not None: return False else: return True
[ "def", "is_new_preorder", "(", "self", ",", "preorder_hash", ",", "lastblock", "=", "None", ")", ":", "if", "lastblock", "is", "None", ":", "lastblock", "=", "self", ".", "lastblock", "preorder", "=", "namedb_get_name_preorder", "(", "self", ".", "db", ",", ...
Given a preorder hash of a name, determine whether or not it is unseen before.
[ "Given", "a", "preorder", "hash", "of", "a", "name", "determine", "whether", "or", "not", "it", "is", "unseen", "before", "." ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/source_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/source_api.py#L36-L57
def add_source_tag(self, id, tag_value, **kwargs): # noqa: E501 """Add a tag to a specific source # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.add_source_tag(id, tag_value, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str tag_value: (required) :return: ResponseContainer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.add_source_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501 else: (data) = self.add_source_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501 return data
[ "def", "add_source_tag", "(", "self", ",", "id", ",", "tag_value", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ...
Add a tag to a specific source # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.add_source_tag(id, tag_value, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str tag_value: (required) :return: ResponseContainer If the method is called asynchronously, returns the request thread.
[ "Add", "a", "tag", "to", "a", "specific", "source", "#", "noqa", ":", "E501" ]
python
train
briancappello/flask-unchained
flask_unchained/bundles/security/commands/roles.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/commands/roles.py#L33-L42
def create_role(name): """ Create a new role. """ role = role_manager.create(name=name) if click.confirm(f'Are you sure you want to create {role!r}?'): role_manager.save(role, commit=True) click.echo(f'Successfully created {role!r}') else: click.echo('Cancelled.')
[ "def", "create_role", "(", "name", ")", ":", "role", "=", "role_manager", ".", "create", "(", "name", "=", "name", ")", "if", "click", ".", "confirm", "(", "f'Are you sure you want to create {role!r}?'", ")", ":", "role_manager", ".", "save", "(", "role", ",...
Create a new role.
[ "Create", "a", "new", "role", "." ]
python
train
ml4ai/delphi
delphi/utils/fp.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/utils/fp.py#L211-L213
def ptake(n: int, xs: Iterable[T]) -> Iterable[T]: """ take with a tqdm progress bar. """ return tqdm(take(n, xs), total=n)
[ "def", "ptake", "(", "n", ":", "int", ",", "xs", ":", "Iterable", "[", "T", "]", ")", "->", "Iterable", "[", "T", "]", ":", "return", "tqdm", "(", "take", "(", "n", ",", "xs", ")", ",", "total", "=", "n", ")" ]
take with a tqdm progress bar.
[ "take", "with", "a", "tqdm", "progress", "bar", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QAQuery.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery.py#L568-L582
def QA_fetch_user(user_cookie, db=DATABASE): """ get the user Arguments: user_cookie : str the unique cookie_id for a user Keyword Arguments: db: database for query Returns: list --- [ACCOUNT] """ collection = DATABASE.account return [res for res in collection.find({'user_cookie': user_cookie}, {"_id": 0})]
[ "def", "QA_fetch_user", "(", "user_cookie", ",", "db", "=", "DATABASE", ")", ":", "collection", "=", "DATABASE", ".", "account", "return", "[", "res", "for", "res", "in", "collection", ".", "find", "(", "{", "'user_cookie'", ":", "user_cookie", "}", ",", ...
get the user Arguments: user_cookie : str the unique cookie_id for a user Keyword Arguments: db: database for query Returns: list --- [ACCOUNT]
[ "get", "the", "user" ]
python
train
thieman/dagobah
dagobah/core/core.py
https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/core/core.py#L508-L547
def edit_task(self, task_name, **kwargs): """ Change the name of a Task owned by this Job. This will affect the historical data available for this Task, e.g. past run logs will no longer be accessible. """ logger.debug('Job {0} editing task {1}'.format(self.name, task_name)) if not self.state.allow_edit_task: raise DagobahError("tasks cannot be edited in this job's " + "current state") if task_name not in self.tasks: raise DagobahError('task %s not found' % task_name) if 'name' in kwargs and isinstance(kwargs['name'], str): if kwargs['name'] in self.tasks: raise DagobahError('task name %s is unavailable' % kwargs['name']) task = self.tasks[task_name] for key in ['name', 'command']: if key in kwargs and isinstance(kwargs[key], str): setattr(task, key, kwargs[key]) if 'soft_timeout' in kwargs: task.set_soft_timeout(kwargs['soft_timeout']) if 'hard_timeout' in kwargs: task.set_hard_timeout(kwargs['hard_timeout']) if 'hostname' in kwargs: task.set_hostname(kwargs['hostname']) if 'name' in kwargs and isinstance(kwargs['name'], str): self.rename_edges(task_name, kwargs['name']) self.tasks[kwargs['name']] = task del self.tasks[task_name] self.parent.commit(cascade=True)
[ "def", "edit_task", "(", "self", ",", "task_name", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "'Job {0} editing task {1}'", ".", "format", "(", "self", ".", "name", ",", "task_name", ")", ")", "if", "not", "self", ".", "state", "."...
Change the name of a Task owned by this Job. This will affect the historical data available for this Task, e.g. past run logs will no longer be accessible.
[ "Change", "the", "name", "of", "a", "Task", "owned", "by", "this", "Job", "." ]
python
train
xeBuz/Flask-Validator
flask_validator/validator.py
https://github.com/xeBuz/Flask-Validator/blob/ef3dd0a24300c88cb728e6dc1a221e7e7127e1f9/flask_validator/validator.py#L64-L70
def __create_event(self): """ Create an SQLAlchemy event listening the 'set' in a particular column. :rtype : object """ if not event.contains(self.field, 'set', self.__validate): event.listen(self.field, 'set', self.__validate, retval=True)
[ "def", "__create_event", "(", "self", ")", ":", "if", "not", "event", ".", "contains", "(", "self", ".", "field", ",", "'set'", ",", "self", ".", "__validate", ")", ":", "event", ".", "listen", "(", "self", ".", "field", ",", "'set'", ",", "self", ...
Create an SQLAlchemy event listening the 'set' in a particular column. :rtype : object
[ "Create", "an", "SQLAlchemy", "event", "listening", "the", "set", "in", "a", "particular", "column", "." ]
python
train
ttinies/sc2players
sc2players/playerRecord.py
https://github.com/ttinies/sc2players/blob/fd9b37c268bf1005d9ef73a25e65ed97c8b7895f/sc2players/playerRecord.py#L229-L233
def apmAggregate(self, **criteria): """collect all match history's apm data to report player's calculated MMR""" apms = [m.apm(self) for m in self.matchSubset(**criteria)] if not apms: return 0 # no apm information without match history return sum(apms) / len(apms)
[ "def", "apmAggregate", "(", "self", ",", "*", "*", "criteria", ")", ":", "apms", "=", "[", "m", ".", "apm", "(", "self", ")", "for", "m", "in", "self", ".", "matchSubset", "(", "*", "*", "criteria", ")", "]", "if", "not", "apms", ":", "return", ...
collect all match history's apm data to report player's calculated MMR
[ "collect", "all", "match", "history", "s", "apm", "data", "to", "report", "player", "s", "calculated", "MMR" ]
python
train
tensorflow/tensorboard
tensorboard/backend/event_processing/db_import_multiplexer.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/db_import_multiplexer.py#L329-L350
def _process_event(self, event, tagged_data): """Processes a single tf.Event and records it in tagged_data.""" event_type = event.WhichOneof('what') # Handle the most common case first. if event_type == 'summary': for value in event.summary.value: value = data_compat.migrate_value(value) tag, metadata, values = tagged_data.get(value.tag, (None, None, [])) values.append((event.step, event.wall_time, value.tensor)) if tag is None: # Store metadata only from the first event. tagged_data[value.tag] = sqlite_writer.TagData( value.tag, value.metadata, values) elif event_type == 'file_version': pass # TODO: reject file version < 2 (at loader level) elif event_type == 'session_log': if event.session_log.status == event_pb2.SessionLog.START: pass # TODO: implement purging via sqlite writer truncation method elif event_type in ('graph_def', 'meta_graph_def'): pass # TODO: support graphs elif event_type == 'tagged_run_metadata': pass
[ "def", "_process_event", "(", "self", ",", "event", ",", "tagged_data", ")", ":", "event_type", "=", "event", ".", "WhichOneof", "(", "'what'", ")", "# Handle the most common case first.", "if", "event_type", "==", "'summary'", ":", "for", "value", "in", "event"...
Processes a single tf.Event and records it in tagged_data.
[ "Processes", "a", "single", "tf", ".", "Event", "and", "records", "it", "in", "tagged_data", "." ]
python
train
fgmacedo/django-export-action
export_action/introspection.py
https://github.com/fgmacedo/django-export-action/blob/215fecb9044d22e3ae19d86c3b220041a11fad07/export_action/introspection.py#L36-L47
def _get_all_field_names(model): """ 100% compatible version of the old API of model._meta.get_all_field_names() From: https://docs.djangoproject.com/en/1.9/ref/models/meta/#migrating-from-the-old-api """ return list(set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in model._meta.get_fields() # For complete backwards compatibility, you may want to exclude # GenericForeignKey from the results. if not (field.many_to_one and field.related_model is None) )))
[ "def", "_get_all_field_names", "(", "model", ")", ":", "return", "list", "(", "set", "(", "chain", ".", "from_iterable", "(", "(", "field", ".", "name", ",", "field", ".", "attname", ")", "if", "hasattr", "(", "field", ",", "'attname'", ")", "else", "(...
100% compatible version of the old API of model._meta.get_all_field_names() From: https://docs.djangoproject.com/en/1.9/ref/models/meta/#migrating-from-the-old-api
[ "100%", "compatible", "version", "of", "the", "old", "API", "of", "model", ".", "_meta", ".", "get_all_field_names", "()", "From", ":", "https", ":", "//", "docs", ".", "djangoproject", ".", "com", "/", "en", "/", "1", ".", "9", "/", "ref", "/", "mod...
python
train
raiden-network/raiden
raiden/blockchain_events_handler.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/blockchain_events_handler.py#L276-L351
def handle_channel_settled(raiden: 'RaidenService', event: Event): data = event.event_data token_network_identifier = event.originating_contract channel_identifier = data['args']['channel_identifier'] block_number = data['block_number'] block_hash = data['block_hash'] transaction_hash = data['transaction_hash'] chain_state = views.state_from_raiden(raiden) channel_state = views.get_channelstate_by_canonical_identifier( chain_state=chain_state, canonical_identifier=CanonicalIdentifier( chain_identifier=chain_state.chain_id, token_network_address=token_network_identifier, channel_identifier=channel_identifier, ), ) # This may happen for two reasons: # - This node is not a participant for the given channel (normal operation, # the event should be ignored). # - Something went wrong in our code and the channel state was cleared # before settle (a bug, this should raise an exception on development # mode). # Because we cannot distinguish the two cases, assume the channel is not of # interest and ignore the event. if not channel_state: return """ This is resolving a corner case where the current node view of the channel state does not reflect what the blockchain contains. The corner case goes as follows in a setup of nodes: A -> B: - A sends out a LockedTransfer to B - B sends a refund to A - B goes offline - A sends LockExpired to B Here: (1) the lock is removed from A's state (2) B never received the message - A closes the channel with B's refund - B comes back online and calls updateNonClosingBalanceProof with A's LockedTransfer (LockExpired was never processed). - When channel is settled, B unlocks it's refund transfer lock provided that it gains from doing so. - A does NOT try to unlock its lock because its side of the channel state is empty (lock expired and was removed). The above is resolved by providing the state machine with the onchain locksroots for both participants in the channel so that the channel state is updated to store these locksroots. In `raiden_event_handler:handle_contract_send_channelunlock`, those values are used to restore the channel state back to where the locksroots values existed and this channel state is used to calculate the gain and potentially perform unlocks in case there is value to be gained. """ our_locksroot, partner_locksroot = get_onchain_locksroots( chain=raiden.chain, canonical_identifier=channel_state.canonical_identifier, participant1=channel_state.our_state.address, participant2=channel_state.partner_state.address, block_identifier=block_hash, ) channel_settled = ContractReceiveChannelSettled( transaction_hash=transaction_hash, canonical_identifier=channel_state.canonical_identifier, our_onchain_locksroot=our_locksroot, partner_onchain_locksroot=partner_locksroot, block_number=block_number, block_hash=block_hash, ) raiden.handle_and_track_state_change(channel_settled)
[ "def", "handle_channel_settled", "(", "raiden", ":", "'RaidenService'", ",", "event", ":", "Event", ")", ":", "data", "=", "event", ".", "event_data", "token_network_identifier", "=", "event", ".", "originating_contract", "channel_identifier", "=", "data", "[", "'...
This is resolving a corner case where the current node view of the channel state does not reflect what the blockchain contains. The corner case goes as follows in a setup of nodes: A -> B: - A sends out a LockedTransfer to B - B sends a refund to A - B goes offline - A sends LockExpired to B Here: (1) the lock is removed from A's state (2) B never received the message - A closes the channel with B's refund - B comes back online and calls updateNonClosingBalanceProof with A's LockedTransfer (LockExpired was never processed). - When channel is settled, B unlocks it's refund transfer lock provided that it gains from doing so. - A does NOT try to unlock its lock because its side of the channel state is empty (lock expired and was removed). The above is resolved by providing the state machine with the onchain locksroots for both participants in the channel so that the channel state is updated to store these locksroots. In `raiden_event_handler:handle_contract_send_channelunlock`, those values are used to restore the channel state back to where the locksroots values existed and this channel state is used to calculate the gain and potentially perform unlocks in case there is value to be gained.
[ "This", "is", "resolving", "a", "corner", "case", "where", "the", "current", "node", "view", "of", "the", "channel", "state", "does", "not", "reflect", "what", "the", "blockchain", "contains", ".", "The", "corner", "case", "goes", "as", "follows", "in", "a...
python
train
OCA/vertical-hotel
hotel/models/hotel.py
https://github.com/OCA/vertical-hotel/blob/a01442e92b5ea1fda7fb9e6180b3211e8749a35a/hotel/models/hotel.py#L749-L773
def unlink(self): """ Overrides orm unlink method. @param self: The object pointer @return: True/False. """ sale_line_obj = self.env['sale.order.line'] fr_obj = self.env['folio.room.line'] for line in self: if line.order_line_id: sale_unlink_obj = (sale_line_obj.browse ([line.order_line_id.id])) for rec in sale_unlink_obj: room_obj = self.env['hotel.room' ].search([('name', '=', rec.name)]) if room_obj.id: folio_arg = [('folio_id', '=', line.folio_id.id), ('room_id', '=', room_obj.id)] folio_room_line_myobj = fr_obj.search(folio_arg) if folio_room_line_myobj.id: folio_room_line_myobj.unlink() room_obj.write({'isroom': True, 'status': 'available'}) sale_unlink_obj.unlink() return super(HotelFolioLine, self).unlink()
[ "def", "unlink", "(", "self", ")", ":", "sale_line_obj", "=", "self", ".", "env", "[", "'sale.order.line'", "]", "fr_obj", "=", "self", ".", "env", "[", "'folio.room.line'", "]", "for", "line", "in", "self", ":", "if", "line", ".", "order_line_id", ":", ...
Overrides orm unlink method. @param self: The object pointer @return: True/False.
[ "Overrides", "orm", "unlink", "method", "." ]
python
train
mitsei/dlkit
dlkit/json_/resource/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/managers.py#L1015-L1032
def get_resource_search_session(self, proxy): """Gets a resource search session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceSearchSession) - ``a ResourceSearchSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_search()`` is ``true``.* """ if not self.supports_resource_search(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ResourceSearchSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_resource_search_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_resource_search", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ResourceSe...
Gets a resource search session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceSearchSession) - ``a ResourceSearchSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_search()`` is ``true``.*
[ "Gets", "a", "resource", "search", "session", "." ]
python
train
CityOfZion/neo-python-core
neocore/IO/BinaryWriter.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/IO/BinaryWriter.py#L358-L375
def WriteVarString(self, value, encoding="utf-8"): """ Write a string value to the stream. Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention Args: value (string): value to write to the stream. encoding (str): string encoding format. """ if type(value) is str: value = value.encode(encoding) length = len(value) ba = bytearray(value) byts = binascii.hexlify(ba) string = byts.decode(encoding) self.WriteVarInt(length) self.WriteBytes(string)
[ "def", "WriteVarString", "(", "self", ",", "value", ",", "encoding", "=", "\"utf-8\"", ")", ":", "if", "type", "(", "value", ")", "is", "str", ":", "value", "=", "value", ".", "encode", "(", "encoding", ")", "length", "=", "len", "(", "value", ")", ...
Write a string value to the stream. Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention Args: value (string): value to write to the stream. encoding (str): string encoding format.
[ "Write", "a", "string", "value", "to", "the", "stream", ".", "Read", "more", "about", "variable", "size", "encoding", "here", ":", "http", ":", "//", "docs", ".", "neo", ".", "org", "/", "en", "-", "us", "/", "node", "/", "network", "-", "protocol", ...
python
train
ejeschke/ginga
ginga/rv/plugins/Command.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Command.py#L218-L228
def cmd_reload_local(self, plname): """reload_local `plname` Reload the *local* plugin named `plname`. You should close all instances of the plugin before attempting to reload. """ self.fv.mm.load_module(plname) for chname in self.fv.get_channel_names(): chinfo = self.fv.get_channel(chname) chinfo.opmon.reload_plugin(plname, chinfo=chinfo) return True
[ "def", "cmd_reload_local", "(", "self", ",", "plname", ")", ":", "self", ".", "fv", ".", "mm", ".", "load_module", "(", "plname", ")", "for", "chname", "in", "self", ".", "fv", ".", "get_channel_names", "(", ")", ":", "chinfo", "=", "self", ".", "fv"...
reload_local `plname` Reload the *local* plugin named `plname`. You should close all instances of the plugin before attempting to reload.
[ "reload_local", "plname" ]
python
train
Gandi/gandi.cli
gandi/cli/commands/domain.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/domain.py#L108-L114
def renew(gandi, domain, duration, background): """Renew a domain.""" result = gandi.domain.renew(domain, duration, background) if background: gandi.pretty_echo(result) return result
[ "def", "renew", "(", "gandi", ",", "domain", ",", "duration", ",", "background", ")", ":", "result", "=", "gandi", ".", "domain", ".", "renew", "(", "domain", ",", "duration", ",", "background", ")", "if", "background", ":", "gandi", ".", "pretty_echo", ...
Renew a domain.
[ "Renew", "a", "domain", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/mcmc/hmc.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/hmc.py#L1189-L1192
def _log_sum_sq(x, axis=None): """Computes log(sum(x**2)).""" return tf.reduce_logsumexp( input_tensor=2. * tf.math.log(tf.abs(x)), axis=axis)
[ "def", "_log_sum_sq", "(", "x", ",", "axis", "=", "None", ")", ":", "return", "tf", ".", "reduce_logsumexp", "(", "input_tensor", "=", "2.", "*", "tf", ".", "math", ".", "log", "(", "tf", ".", "abs", "(", "x", ")", ")", ",", "axis", "=", "axis", ...
Computes log(sum(x**2)).
[ "Computes", "log", "(", "sum", "(", "x", "**", "2", "))", "." ]
python
test
BlackEarth/bxml
bxml/xml.py
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xml.py#L112-L120
def prefixed_to_namespaced(C, prefixed_name, namespaces): """for a given prefix:name, return {namespace}name from the given namespaces dict """ if ':' not in prefixed_name: return prefixed_name else: prefix, name = prefixed_name.split(':') namespace = namespaces[prefix] return "{%s}%s" % (namespace, name)
[ "def", "prefixed_to_namespaced", "(", "C", ",", "prefixed_name", ",", "namespaces", ")", ":", "if", "':'", "not", "in", "prefixed_name", ":", "return", "prefixed_name", "else", ":", "prefix", ",", "name", "=", "prefixed_name", ".", "split", "(", "':'", ")", ...
for a given prefix:name, return {namespace}name from the given namespaces dict
[ "for", "a", "given", "prefix", ":", "name", "return", "{", "namespace", "}", "name", "from", "the", "given", "namespaces", "dict" ]
python
train
inasafe/inasafe
safe/definitions/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/utilities.py#L145-L155
def get_classifications(subcategory_key): """Get hazard or exposure classifications. :param subcategory_key: The hazard or exposure key :type subcategory_key: str :returns: List of hazard or exposure classifications :rtype: list """ classifications = definition(subcategory_key)['classifications'] return sorted(classifications, key=lambda k: k['key'])
[ "def", "get_classifications", "(", "subcategory_key", ")", ":", "classifications", "=", "definition", "(", "subcategory_key", ")", "[", "'classifications'", "]", "return", "sorted", "(", "classifications", ",", "key", "=", "lambda", "k", ":", "k", "[", "'key'", ...
Get hazard or exposure classifications. :param subcategory_key: The hazard or exposure key :type subcategory_key: str :returns: List of hazard or exposure classifications :rtype: list
[ "Get", "hazard", "or", "exposure", "classifications", "." ]
python
train
vaab/colour
colour.py
https://github.com/vaab/colour/blob/11f138eb7841d2045160b378a2eec0c2321144c0/colour.py#L693-L730
def color_scale(begin_hsl, end_hsl, nb): """Returns a list of nb color HSL tuples between begin_hsl and end_hsl >>> from colour import color_scale >>> [rgb2hex(hsl2rgb(hsl)) for hsl in color_scale((0, 1, 0.5), ... (1, 1, 0.5), 3)] ['#f00', '#0f0', '#00f', '#f00'] >>> [rgb2hex(hsl2rgb(hsl)) ... for hsl in color_scale((0, 0, 0), ... (0, 0, 1), ... 15)] # doctest: +ELLIPSIS ['#000', '#111', '#222', ..., '#ccc', '#ddd', '#eee', '#fff'] Of course, asking for negative values is not supported: >>> color_scale((0, 1, 0.5), (1, 1, 0.5), -2) Traceback (most recent call last): ... ValueError: Unsupported negative number of colors (nb=-2). """ if nb < 0: raise ValueError( "Unsupported negative number of colors (nb=%r)." % nb) step = tuple([float(end_hsl[i] - begin_hsl[i]) / nb for i in range(0, 3)]) \ if nb > 0 else (0, 0, 0) def mul(step, value): return tuple([v * value for v in step]) def add_v(step, step2): return tuple([v + step2[i] for i, v in enumerate(step)]) return [add_v(begin_hsl, mul(step, r)) for r in range(0, nb + 1)]
[ "def", "color_scale", "(", "begin_hsl", ",", "end_hsl", ",", "nb", ")", ":", "if", "nb", "<", "0", ":", "raise", "ValueError", "(", "\"Unsupported negative number of colors (nb=%r).\"", "%", "nb", ")", "step", "=", "tuple", "(", "[", "float", "(", "end_hsl",...
Returns a list of nb color HSL tuples between begin_hsl and end_hsl >>> from colour import color_scale >>> [rgb2hex(hsl2rgb(hsl)) for hsl in color_scale((0, 1, 0.5), ... (1, 1, 0.5), 3)] ['#f00', '#0f0', '#00f', '#f00'] >>> [rgb2hex(hsl2rgb(hsl)) ... for hsl in color_scale((0, 0, 0), ... (0, 0, 1), ... 15)] # doctest: +ELLIPSIS ['#000', '#111', '#222', ..., '#ccc', '#ddd', '#eee', '#fff'] Of course, asking for negative values is not supported: >>> color_scale((0, 1, 0.5), (1, 1, 0.5), -2) Traceback (most recent call last): ... ValueError: Unsupported negative number of colors (nb=-2).
[ "Returns", "a", "list", "of", "nb", "color", "HSL", "tuples", "between", "begin_hsl", "and", "end_hsl" ]
python
train
jepegit/cellpy
cellpy/readers/instruments/pec.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/instruments/pec.py#L75-L97
def get_raw_limits(self): """Include the settings for how to decide what kind of step you are examining here. The raw limits are 'epsilons' used to check if the current and/or voltage is stable (for example for galvanostatic steps, one would expect that the current is stable (constant) and non-zero). It is expected that different instruments (with different resolution etc.) have different 'epsilons'. Returns: the raw limits (dict) """ warnings.warn("raw limits have not been subject for testing yet") raw_limits = dict() raw_limits["current_hard"] = 0.1 # There is a bug in PEC raw_limits["current_soft"] = 1.0 raw_limits["stable_current_hard"] = 2.0 raw_limits["stable_current_soft"] = 4.0 raw_limits["stable_voltage_hard"] = 2.0 raw_limits["stable_voltage_soft"] = 4.0 raw_limits["stable_charge_hard"] = 2.0 raw_limits["stable_charge_soft"] = 5.0 raw_limits["ir_change"] = 0.00001 return raw_limits
[ "def", "get_raw_limits", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"raw limits have not been subject for testing yet\"", ")", "raw_limits", "=", "dict", "(", ")", "raw_limits", "[", "\"current_hard\"", "]", "=", "0.1", "# There is a bug in PEC", "raw_limi...
Include the settings for how to decide what kind of step you are examining here. The raw limits are 'epsilons' used to check if the current and/or voltage is stable (for example for galvanostatic steps, one would expect that the current is stable (constant) and non-zero). It is expected that different instruments (with different resolution etc.) have different 'epsilons'. Returns: the raw limits (dict)
[ "Include", "the", "settings", "for", "how", "to", "decide", "what", "kind", "of", "step", "you", "are", "examining", "here", "." ]
python
train
treasure-data/pandas-td
pandas_td/td.py
https://github.com/treasure-data/pandas-td/blob/0f4e1ee726f593dbb9cc74312c286e62afdf7de2/pandas_td/td.py#L439-L486
def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True): '''Create a handler for query engine based on a URL. The following environment variables are used for default connection: TD_API_KEY API key TD_API_SERVER API server (default: api.treasuredata.com) HTTP_PROXY HTTP proxy (optional) Parameters ---------- url : string Engine descriptor in the form "type://apikey@host/database?params..." Use shorthand notation "type:database?params..." for the default connection. con : Connection, optional Handler returned by connect. If not given, default connection is used. header : string or boolean, default True Prepend comment strings, in the form "-- comment", as a header of queries. Set False to disable header. show_progress : double or boolean, default 5.0 Number of seconds to wait before printing progress. Set False to disable progress entirely. clear_progress : boolean, default True If True, clear progress when query completed. Returns ------- QueryEngine ''' url = urlparse(url) engine_type = url.scheme if url.scheme else 'presto' if con is None: if url.netloc: # create connection apikey, host = url.netloc.split('@') con = Connection(apikey=apikey, endpoint="https://{0}/".format(host)) else: # default connection con = Connection() database = url.path[1:] if url.path.startswith('/') else url.path params = { 'type': engine_type, } params.update(parse_qsl(url.query)) return QueryEngine(con, database, params, header=header, show_progress=show_progress, clear_progress=clear_progress)
[ "def", "create_engine", "(", "url", ",", "con", "=", "None", ",", "header", "=", "True", ",", "show_progress", "=", "5.0", ",", "clear_progress", "=", "True", ")", ":", "url", "=", "urlparse", "(", "url", ")", "engine_type", "=", "url", ".", "scheme", ...
Create a handler for query engine based on a URL. The following environment variables are used for default connection: TD_API_KEY API key TD_API_SERVER API server (default: api.treasuredata.com) HTTP_PROXY HTTP proxy (optional) Parameters ---------- url : string Engine descriptor in the form "type://apikey@host/database?params..." Use shorthand notation "type:database?params..." for the default connection. con : Connection, optional Handler returned by connect. If not given, default connection is used. header : string or boolean, default True Prepend comment strings, in the form "-- comment", as a header of queries. Set False to disable header. show_progress : double or boolean, default 5.0 Number of seconds to wait before printing progress. Set False to disable progress entirely. clear_progress : boolean, default True If True, clear progress when query completed. Returns ------- QueryEngine
[ "Create", "a", "handler", "for", "query", "engine", "based", "on", "a", "URL", "." ]
python
train
yvesalexandre/bandicoot
bandicoot/helper/maths.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/maths.py#L220-L240
def great_circle_distance(pt1, pt2): """ Return the great-circle distance in kilometers between two points, defined by a tuple (lat, lon). Examples -------- >>> brussels = (50.8503, 4.3517) >>> paris = (48.8566, 2.3522) >>> great_circle_distance(brussels, paris) 263.9754164080347 """ r = 6371. delta_latitude = math.radians(pt1[0] - pt2[0]) delta_longitude = math.radians(pt1[1] - pt2[1]) latitude1 = math.radians(pt1[0]) latitude2 = math.radians(pt2[0]) a = math.sin(delta_latitude / 2) ** 2 + math.cos(latitude1) * math.cos(latitude2) * math.sin(delta_longitude / 2) ** 2 return r * 2. * math.asin(math.sqrt(a))
[ "def", "great_circle_distance", "(", "pt1", ",", "pt2", ")", ":", "r", "=", "6371.", "delta_latitude", "=", "math", ".", "radians", "(", "pt1", "[", "0", "]", "-", "pt2", "[", "0", "]", ")", "delta_longitude", "=", "math", ".", "radians", "(", "pt1",...
Return the great-circle distance in kilometers between two points, defined by a tuple (lat, lon). Examples -------- >>> brussels = (50.8503, 4.3517) >>> paris = (48.8566, 2.3522) >>> great_circle_distance(brussels, paris) 263.9754164080347
[ "Return", "the", "great", "-", "circle", "distance", "in", "kilometers", "between", "two", "points", "defined", "by", "a", "tuple", "(", "lat", "lon", ")", "." ]
python
train
IdentityPython/pysaml2
src/saml2/mcache.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mcache.py#L124-L133
def reset(self, subject_id, entity_id): """ Scrap the assertions received from a IdP or an AA about a special subject. :param subject_id: The subjects identifier :param entity_id: The identifier of the entity_id of the assertion :return: """ if not self._cache.set(_key(subject_id, entity_id), {}, 0): raise CacheError("reset failed")
[ "def", "reset", "(", "self", ",", "subject_id", ",", "entity_id", ")", ":", "if", "not", "self", ".", "_cache", ".", "set", "(", "_key", "(", "subject_id", ",", "entity_id", ")", ",", "{", "}", ",", "0", ")", ":", "raise", "CacheError", "(", "\"res...
Scrap the assertions received from a IdP or an AA about a special subject. :param subject_id: The subjects identifier :param entity_id: The identifier of the entity_id of the assertion :return:
[ "Scrap", "the", "assertions", "received", "from", "a", "IdP", "or", "an", "AA", "about", "a", "special", "subject", "." ]
python
train
uw-it-aca/uw-restclients-sws
uw_sws/section.py
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L267-L280
def get_joint_sections(section, include_instructor_not_on_time_schedule=True): """ Returns a list of uw_sws.models.Section objects, representing joint sections for the passed section. """ joint_sections = [] for url in section.joint_section_urls: section = get_section_by_url(url, include_instructor_not_on_time_schedule) joint_sections.append(section) return joint_sections
[ "def", "get_joint_sections", "(", "section", ",", "include_instructor_not_on_time_schedule", "=", "True", ")", ":", "joint_sections", "=", "[", "]", "for", "url", "in", "section", ".", "joint_section_urls", ":", "section", "=", "get_section_by_url", "(", "url", ",...
Returns a list of uw_sws.models.Section objects, representing joint sections for the passed section.
[ "Returns", "a", "list", "of", "uw_sws", ".", "models", ".", "Section", "objects", "representing", "joint", "sections", "for", "the", "passed", "section", "." ]
python
train
LonamiWebs/Telethon
telethon/tl/custom/message.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/message.py#L489-L497
def geo(self): """ If the message media is geo, geo live or a venue, this returns the :tl:`GeoPoint`. """ if isinstance(self.media, (types.MessageMediaGeo, types.MessageMediaGeoLive, types.MessageMediaVenue)): return self.media.geo
[ "def", "geo", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "media", ",", "(", "types", ".", "MessageMediaGeo", ",", "types", ".", "MessageMediaGeoLive", ",", "types", ".", "MessageMediaVenue", ")", ")", ":", "return", "self", ".", "media"...
If the message media is geo, geo live or a venue, this returns the :tl:`GeoPoint`.
[ "If", "the", "message", "media", "is", "geo", "geo", "live", "or", "a", "venue", "this", "returns", "the", ":", "tl", ":", "GeoPoint", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/debugger/interactive_debugger_server_lib.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/interactive_debugger_server_lib.py#L467-L498
def get_file_tracebacks(self, file_path): """Get the lists of ops created at lines of a specified source file. Args: file_path: Path to the source file. Returns: A dict mapping line number to a list of 2-tuples, `(op_name, stack_position)` `op_name` is the name of the name of the op whose creation traceback includes the line. `stack_position` is the position of the line in the op's creation traceback, represented as a 0-based integer. Raises: ValueError: If `file_path` does not point to a source file that has been received by this instance of `SourceManager`. """ if file_path not in self._source_file_content: raise ValueError( 'Source file of path "%s" has not been received by this instance of ' 'SourceManager.' % file_path) lineno_to_op_names_and_stack_position = dict() for op_log_entry in self._graph_traceback.log_entries: for stack_pos, trace in enumerate(op_log_entry.code_def.traces): if self._graph_traceback.id_to_string[trace.file_id] == file_path: if trace.lineno not in lineno_to_op_names_and_stack_position: lineno_to_op_names_and_stack_position[trace.lineno] = [] lineno_to_op_names_and_stack_position[trace.lineno].append( (op_log_entry.name, stack_pos)) return lineno_to_op_names_and_stack_position
[ "def", "get_file_tracebacks", "(", "self", ",", "file_path", ")", ":", "if", "file_path", "not", "in", "self", ".", "_source_file_content", ":", "raise", "ValueError", "(", "'Source file of path \"%s\" has not been received by this instance of '", "'SourceManager.'", "%", ...
Get the lists of ops created at lines of a specified source file. Args: file_path: Path to the source file. Returns: A dict mapping line number to a list of 2-tuples, `(op_name, stack_position)` `op_name` is the name of the name of the op whose creation traceback includes the line. `stack_position` is the position of the line in the op's creation traceback, represented as a 0-based integer. Raises: ValueError: If `file_path` does not point to a source file that has been received by this instance of `SourceManager`.
[ "Get", "the", "lists", "of", "ops", "created", "at", "lines", "of", "a", "specified", "source", "file", "." ]
python
train
KartikTalwar/Duolingo
duolingo.py
https://github.com/KartikTalwar/Duolingo/blob/0f7e9a0d4bfa864ade82890fca3789679ef38bee/duolingo.py#L65-L86
def get_activity_stream(self, before=None): """ Get user's activity stream from ``https://www.duolingo.com/stream/<user_id>?before=<date> if before date is given or else ``https://www.duolingo.com/activity/<user_id>`` :param before: Datetime in format '2015-07-06 05:42:24' :type before: str :rtype: dict """ if before: url = "https://www.duolingo.com/stream/{}?before={}" url = url.format(self.user_data.id, before) else: url = "https://www.duolingo.com/activity/{}" url = url.format(self.user_data.id) request = self._make_req(url) try: return request.json() except: raise Exception('Could not get activity stream')
[ "def", "get_activity_stream", "(", "self", ",", "before", "=", "None", ")", ":", "if", "before", ":", "url", "=", "\"https://www.duolingo.com/stream/{}?before={}\"", "url", "=", "url", ".", "format", "(", "self", ".", "user_data", ".", "id", ",", "before", "...
Get user's activity stream from ``https://www.duolingo.com/stream/<user_id>?before=<date> if before date is given or else ``https://www.duolingo.com/activity/<user_id>`` :param before: Datetime in format '2015-07-06 05:42:24' :type before: str :rtype: dict
[ "Get", "user", "s", "activity", "stream", "from", "https", ":", "//", "www", ".", "duolingo", ".", "com", "/", "stream", "/", "<user_id", ">", "?before", "=", "<date", ">", "if", "before", "date", "is", "given", "or", "else", "https", ":", "//", "www...
python
train
h2oai/h2o-3
h2o-py/h2o/utils/typechecks.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/typechecks.py#L563-L603
def _check_type(var, vtype): """ Return True if the variable is of the specified type, and False otherwise. :param var: variable to check :param vtype: expected variable's type """ if vtype is None: return var is None if isinstance(vtype, _primitive_type): return var == vtype if vtype is str: return isinstance(var, _str_type) if vtype is int: return isinstance(var, _int_type) if vtype is numeric: return isinstance(var, _num_type) if isinstance(vtype, MagicType): return vtype.check(var) if isinstance(vtype, type): # ``vtype`` is a name of the class, or a built-in type such as "list", "tuple", etc return isinstance(var, vtype) if isinstance(vtype, list): # ``vtype`` is a list literal elem_type = U(*vtype) return isinstance(var, list) and all(_check_type(item, elem_type) for item in var) if isinstance(vtype, set): # ``vtype`` is a set literal elem_type = U(*vtype) return isinstance(var, set) and all(_check_type(item, elem_type) for item in var) if isinstance(vtype, tuple): # ``vtype`` is a tuple literal return (isinstance(var, tuple) and len(vtype) == len(var) and all(_check_type(var[i], vtype[i]) for i in range(len(vtype)))) if isinstance(vtype, dict): # ``vtype`` is a dict literal ttkv = U(*viewitems(vtype)) return isinstance(var, dict) and all(_check_type(kv, ttkv) for kv in viewitems(var)) if isinstance(vtype, (FunctionType, BuiltinFunctionType)): return vtype(var) raise RuntimeError("Ivalid type %r in _check_type()" % vtype)
[ "def", "_check_type", "(", "var", ",", "vtype", ")", ":", "if", "vtype", "is", "None", ":", "return", "var", "is", "None", "if", "isinstance", "(", "vtype", ",", "_primitive_type", ")", ":", "return", "var", "==", "vtype", "if", "vtype", "is", "str", ...
Return True if the variable is of the specified type, and False otherwise. :param var: variable to check :param vtype: expected variable's type
[ "Return", "True", "if", "the", "variable", "is", "of", "the", "specified", "type", "and", "False", "otherwise", "." ]
python
test
senaite/senaite.core.supermodel
src/senaite/core/supermodel/model.py
https://github.com/senaite/senaite.core.supermodel/blob/1819154332b8776f187aa98a2e299701983a0119/src/senaite/core/supermodel/model.py#L252-L258
def catalog(self): """Primary registered catalog for the wrapped portal type """ if self._catalog is None: logger.debug("SuperModel::catalog: *Fetch catalog*") self._catalog = self.get_catalog_for(self.brain) return self._catalog
[ "def", "catalog", "(", "self", ")", ":", "if", "self", ".", "_catalog", "is", "None", ":", "logger", ".", "debug", "(", "\"SuperModel::catalog: *Fetch catalog*\"", ")", "self", ".", "_catalog", "=", "self", ".", "get_catalog_for", "(", "self", ".", "brain", ...
Primary registered catalog for the wrapped portal type
[ "Primary", "registered", "catalog", "for", "the", "wrapped", "portal", "type" ]
python
train
crytic/slither
slither/core/declarations/function.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/function.py#L621-L627
def all_state_variables_read(self): """ recursive version of variables_read """ if self._all_state_variables_read is None: self._all_state_variables_read = self._explore_functions( lambda x: x.state_variables_read) return self._all_state_variables_read
[ "def", "all_state_variables_read", "(", "self", ")", ":", "if", "self", ".", "_all_state_variables_read", "is", "None", ":", "self", ".", "_all_state_variables_read", "=", "self", ".", "_explore_functions", "(", "lambda", "x", ":", "x", ".", "state_variables_read"...
recursive version of variables_read
[ "recursive", "version", "of", "variables_read" ]
python
train
datastore/datastore
datastore/core/basic.py
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L853-L864
def directoryRemove(self, dir_key, key): '''Removes directory entry `key` from directory at `dir_key`. If either the directory `dir_key` or the directory entry `key` don't exist, this method is a no-op. ''' key = str(key) dir_items = self.get(dir_key) or [] if key in dir_items: dir_items = [k for k in dir_items if k != key] self.put(dir_key, dir_items)
[ "def", "directoryRemove", "(", "self", ",", "dir_key", ",", "key", ")", ":", "key", "=", "str", "(", "key", ")", "dir_items", "=", "self", ".", "get", "(", "dir_key", ")", "or", "[", "]", "if", "key", "in", "dir_items", ":", "dir_items", "=", "[", ...
Removes directory entry `key` from directory at `dir_key`. If either the directory `dir_key` or the directory entry `key` don't exist, this method is a no-op.
[ "Removes", "directory", "entry", "key", "from", "directory", "at", "dir_key", "." ]
python
train
wmayner/pyphi
pyphi/partition.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L169-L190
def directed_bipartition_of_one(seq): """Generate directed bipartitions where one part is of length 1. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> partitions = directed_bipartition_of_one((1, 2, 3)) >>> list(partitions) # doctest: +NORMALIZE_WHITESPACE [((1,), (2, 3)), ((2,), (1, 3)), ((3,), (1, 2)), ((2, 3), (1,)), ((1, 3), (2,)), ((1, 2), (3,))] """ bipartitions = list(bipartition_of_one(seq)) return chain(bipartitions, reverse_elements(bipartitions))
[ "def", "directed_bipartition_of_one", "(", "seq", ")", ":", "bipartitions", "=", "list", "(", "bipartition_of_one", "(", "seq", ")", ")", "return", "chain", "(", "bipartitions", ",", "reverse_elements", "(", "bipartitions", ")", ")" ]
Generate directed bipartitions where one part is of length 1. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> partitions = directed_bipartition_of_one((1, 2, 3)) >>> list(partitions) # doctest: +NORMALIZE_WHITESPACE [((1,), (2, 3)), ((2,), (1, 3)), ((3,), (1, 2)), ((2, 3), (1,)), ((1, 3), (2,)), ((1, 2), (3,))]
[ "Generate", "directed", "bipartitions", "where", "one", "part", "is", "of", "length", "1", "." ]
python
train
NiklasRosenstein-Python/nr-deprecated
nr/futures.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/futures.py#L371-L388
def set_result(self, result): """ Allows you to set the result of the future without requiring the future to actually be executed. This can be used if the result is available before the future is run, allowing you to keep the future as the interface for retrieving the result data. :param result: The result of the future. :raise RuntimeError: If the future is already enqueued. """ with self._lock: if self._enqueued: raise RuntimeError('can not set result of enqueued Future') self._result = result self._completed = True callbacks = self._prepare_done_callbacks() callbacks()
[ "def", "set_result", "(", "self", ",", "result", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_enqueued", ":", "raise", "RuntimeError", "(", "'can not set result of enqueued Future'", ")", "self", ".", "_result", "=", "result", "self", "....
Allows you to set the result of the future without requiring the future to actually be executed. This can be used if the result is available before the future is run, allowing you to keep the future as the interface for retrieving the result data. :param result: The result of the future. :raise RuntimeError: If the future is already enqueued.
[ "Allows", "you", "to", "set", "the", "result", "of", "the", "future", "without", "requiring", "the", "future", "to", "actually", "be", "executed", ".", "This", "can", "be", "used", "if", "the", "result", "is", "available", "before", "the", "future", "is", ...
python
train
python-openxml/python-docx
docx/opc/pkgwriter.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/pkgwriter.py#L48-L56
def _write_parts(phys_writer, parts): """ Write the blob of each part in *parts* to the package, along with a rels item for its relationships if and only if it has any. """ for part in parts: phys_writer.write(part.partname, part.blob) if len(part._rels): phys_writer.write(part.partname.rels_uri, part._rels.xml)
[ "def", "_write_parts", "(", "phys_writer", ",", "parts", ")", ":", "for", "part", "in", "parts", ":", "phys_writer", ".", "write", "(", "part", ".", "partname", ",", "part", ".", "blob", ")", "if", "len", "(", "part", ".", "_rels", ")", ":", "phys_wr...
Write the blob of each part in *parts* to the package, along with a rels item for its relationships if and only if it has any.
[ "Write", "the", "blob", "of", "each", "part", "in", "*", "parts", "*", "to", "the", "package", "along", "with", "a", "rels", "item", "for", "its", "relationships", "if", "and", "only", "if", "it", "has", "any", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1059-L1062
def p_expression_eql(self, p): 'expression : expression EQL expression' p[0] = Eql(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_expression_eql", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Eql", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",...
expression : expression EQL expression
[ "expression", ":", "expression", "EQL", "expression" ]
python
train
jbloomlab/phydms
phydmslib/models.py
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L2082-L2085
def M(self, k, t, tips=None, gaps=None): """See docs for `DistributionModel` abstract base class.""" assert 0 <= k < self.ncats return self._models[k].M(t, tips=tips, gaps=gaps)
[ "def", "M", "(", "self", ",", "k", ",", "t", ",", "tips", "=", "None", ",", "gaps", "=", "None", ")", ":", "assert", "0", "<=", "k", "<", "self", ".", "ncats", "return", "self", ".", "_models", "[", "k", "]", ".", "M", "(", "t", ",", "tips"...
See docs for `DistributionModel` abstract base class.
[ "See", "docs", "for", "DistributionModel", "abstract", "base", "class", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/quasiharmonic.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/quasiharmonic.py#L185-L214
def debye_temperature(self, volume): """ Calculates the debye temperature. Eq(6) in doi.org/10.1016/j.comphy.2003.12.001. Thanks to Joey. Eq(6) above is equivalent to Eq(3) in doi.org/10.1103/PhysRevB.37.790 which does not consider anharmonic effects. Eq(20) in the same paper and Eq(18) in doi.org/10.1016/j.commatsci.2009.12.006 both consider anharmonic contributions to the Debye temperature through the Gruneisen parameter at 0K (Gruneisen constant). The anharmonic contribution is toggled by setting the anharmonic_contribution to True or False in the QuasiharmonicDebyeApprox constructor. Args: volume (float): in Ang^3 Returns: float: debye temperature in K """ term1 = (2./3. * (1. + self.poisson) / (1. - 2. * self.poisson))**1.5 term2 = (1./3. * (1. + self.poisson) / (1. - self.poisson))**1.5 f = (3. / (2. * term1 + term2))**(1. / 3.) debye = 2.9772e-11 * (volume / self.natoms) ** (-1. / 6.) * f * \ np.sqrt(self.bulk_modulus/self.avg_mass) if self.anharmonic_contribution: gamma = self.gruneisen_parameter(0, self.ev_eos_fit.v0) # 0K equilibrium Gruneisen parameter return debye * (self.ev_eos_fit.v0 / volume) ** (gamma) else: return debye
[ "def", "debye_temperature", "(", "self", ",", "volume", ")", ":", "term1", "=", "(", "2.", "/", "3.", "*", "(", "1.", "+", "self", ".", "poisson", ")", "/", "(", "1.", "-", "2.", "*", "self", ".", "poisson", ")", ")", "**", "1.5", "term2", "=",...
Calculates the debye temperature. Eq(6) in doi.org/10.1016/j.comphy.2003.12.001. Thanks to Joey. Eq(6) above is equivalent to Eq(3) in doi.org/10.1103/PhysRevB.37.790 which does not consider anharmonic effects. Eq(20) in the same paper and Eq(18) in doi.org/10.1016/j.commatsci.2009.12.006 both consider anharmonic contributions to the Debye temperature through the Gruneisen parameter at 0K (Gruneisen constant). The anharmonic contribution is toggled by setting the anharmonic_contribution to True or False in the QuasiharmonicDebyeApprox constructor. Args: volume (float): in Ang^3 Returns: float: debye temperature in K
[ "Calculates", "the", "debye", "temperature", ".", "Eq", "(", "6", ")", "in", "doi", ".", "org", "/", "10", ".", "1016", "/", "j", ".", "comphy", ".", "2003", ".", "12", ".", "001", ".", "Thanks", "to", "Joey", "." ]
python
train
markovmodel/PyEMMA
pyemma/util/types.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/types.py#L46-L51
def is_iterable_of_int(l): r""" Checks if l is iterable and contains only integral types """ if not is_iterable(l): return False return all(is_int(value) for value in l)
[ "def", "is_iterable_of_int", "(", "l", ")", ":", "if", "not", "is_iterable", "(", "l", ")", ":", "return", "False", "return", "all", "(", "is_int", "(", "value", ")", "for", "value", "in", "l", ")" ]
r""" Checks if l is iterable and contains only integral types
[ "r", "Checks", "if", "l", "is", "iterable", "and", "contains", "only", "integral", "types" ]
python
train
Phylliade/ikpy
src/ikpy/geometry_utils.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/geometry_utils.py#L19-L25
def Rz_matrix(theta): """Rotation matrix around the Z axis""" return np.array([ [np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1] ])
[ "def", "Rz_matrix", "(", "theta", ")", ":", "return", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "theta", ")", ",", "-", "np", ".", "sin", "(", "theta", ")", ",", "0", "]", ",", "[", "np", ".", "sin", "(", "theta", ")", ",", ...
Rotation matrix around the Z axis
[ "Rotation", "matrix", "around", "the", "Z", "axis" ]
python
train
xtuml/pyxtuml
xtuml/load.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/load.py#L381-L422
def populate_connections(self, metamodel): ''' Populate links in a *metamodel* with connections between them. ''' storage = dict() for ass in metamodel.associations: source_class = ass.source_link.to_metaclass target_class = ass.target_link.to_metaclass if target_class not in storage: storage[target_class] = dict() link_key = frozenset(ass.source_link.key_map.values()) if link_key not in storage[target_class]: storage[target_class][link_key] = dict() for other_inst in target_class.storage: inst_key = ass.source_link.compute_index_key(other_inst) if inst_key is None: continue if inst_key not in storage[target_class][link_key]: storage[target_class][link_key][inst_key] = xtuml.OrderedSet() storage[target_class][link_key][inst_key].add(other_inst) for inst in source_class.storage: inst_key = ass.source_link.compute_lookup_key(inst) if inst_key is None: continue if inst_key not in storage[target_class][link_key]: continue for other_inst in storage[target_class][link_key][inst_key]: ass.source_link.connect(other_inst, inst, check=False) ass.target_link.connect(inst, other_inst, check=False) for inst in metamodel.instances: metaclass = xtuml.get_metaclass(inst) for attr in metaclass.referential_attributes: if attr in inst.__dict__: delattr(inst, attr)
[ "def", "populate_connections", "(", "self", ",", "metamodel", ")", ":", "storage", "=", "dict", "(", ")", "for", "ass", "in", "metamodel", ".", "associations", ":", "source_class", "=", "ass", ".", "source_link", ".", "to_metaclass", "target_class", "=", "as...
Populate links in a *metamodel* with connections between them.
[ "Populate", "links", "in", "a", "*", "metamodel", "*", "with", "connections", "between", "them", "." ]
python
test
deepmipt/DeepPavlov
deeppavlov/configs/__init__.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/configs/__init__.py#L56-L67
def _repr_pretty_(self, p, cycle): """method that defines ``Struct``'s pretty printing rules for iPython Args: p (IPython.lib.pretty.RepresentationPrinter): pretty printer object cycle (bool): is ``True`` if pretty detected a cycle """ if cycle: p.text('Struct(...)') else: with p.group(7, 'Struct(', ')'): p.pretty(self._asdict())
[ "def", "_repr_pretty_", "(", "self", ",", "p", ",", "cycle", ")", ":", "if", "cycle", ":", "p", ".", "text", "(", "'Struct(...)'", ")", "else", ":", "with", "p", ".", "group", "(", "7", ",", "'Struct('", ",", "')'", ")", ":", "p", ".", "pretty", ...
method that defines ``Struct``'s pretty printing rules for iPython Args: p (IPython.lib.pretty.RepresentationPrinter): pretty printer object cycle (bool): is ``True`` if pretty detected a cycle
[ "method", "that", "defines", "Struct", "s", "pretty", "printing", "rules", "for", "iPython" ]
python
test
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L7889-L7903
def flexifunction_buffer_function_encode(self, target_system, target_component, func_index, func_count, data_address, data_size, data): ''' Flexifunction type and parameters for component at function index from buffer target_system : System ID (uint8_t) target_component : Component ID (uint8_t) func_index : Function index (uint16_t) func_count : Total count of functions (uint16_t) data_address : Address in the flexifunction data, Set to 0xFFFF to use address in target memory (uint16_t) data_size : Size of the (uint16_t) data : Settings data (int8_t) ''' return MAVLink_flexifunction_buffer_function_message(target_system, target_component, func_index, func_count, data_address, data_size, data)
[ "def", "flexifunction_buffer_function_encode", "(", "self", ",", "target_system", ",", "target_component", ",", "func_index", ",", "func_count", ",", "data_address", ",", "data_size", ",", "data", ")", ":", "return", "MAVLink_flexifunction_buffer_function_message", "(", ...
Flexifunction type and parameters for component at function index from buffer target_system : System ID (uint8_t) target_component : Component ID (uint8_t) func_index : Function index (uint16_t) func_count : Total count of functions (uint16_t) data_address : Address in the flexifunction data, Set to 0xFFFF to use address in target memory (uint16_t) data_size : Size of the (uint16_t) data : Settings data (int8_t)
[ "Flexifunction", "type", "and", "parameters", "for", "component", "at", "function", "index", "from", "buffer" ]
python
train
cjdrake/pyeda
pyeda/boolalg/bfarray.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bfarray.py#L1208-L1218
def _iter_coords(nsls): """Iterate through all matching coordinates in a sequence of slices.""" # First convert all slices to ranges ranges = list() for nsl in nsls: if isinstance(nsl, int): ranges.append(range(nsl, nsl+1)) else: ranges.append(range(nsl.start, nsl.stop)) # Iterate through all matching coordinates yield from itertools.product(*ranges)
[ "def", "_iter_coords", "(", "nsls", ")", ":", "# First convert all slices to ranges", "ranges", "=", "list", "(", ")", "for", "nsl", "in", "nsls", ":", "if", "isinstance", "(", "nsl", ",", "int", ")", ":", "ranges", ".", "append", "(", "range", "(", "nsl...
Iterate through all matching coordinates in a sequence of slices.
[ "Iterate", "through", "all", "matching", "coordinates", "in", "a", "sequence", "of", "slices", "." ]
python
train
numenta/nupic
src/nupic/encoders/logarithm.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/logarithm.py#L162-L177
def encodeIntoArray(self, inpt, output): """ See the function description in base.py """ # Get the scaled value scaledVal = self._getScaledValue(inpt) if scaledVal is None: output[0:] = 0 else: self.encoder.encodeIntoArray(scaledVal, output) if self.verbosity >= 2: print "input:", inpt, "scaledVal:", scaledVal, "output:", output print "decoded:", self.decodedToStr(self.decode(output))
[ "def", "encodeIntoArray", "(", "self", ",", "inpt", ",", "output", ")", ":", "# Get the scaled value", "scaledVal", "=", "self", ".", "_getScaledValue", "(", "inpt", ")", "if", "scaledVal", "is", "None", ":", "output", "[", "0", ":", "]", "=", "0", "else...
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
python
valid
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L1536-L1539
def can_run(self): """The task can run if its status is < S_SUB and all the other dependencies (if any) are done!""" all_ok = all(stat == self.S_OK for stat in self.deps_status) return self.status < self.S_SUB and self.status != self.S_LOCKED and all_ok
[ "def", "can_run", "(", "self", ")", ":", "all_ok", "=", "all", "(", "stat", "==", "self", ".", "S_OK", "for", "stat", "in", "self", ".", "deps_status", ")", "return", "self", ".", "status", "<", "self", ".", "S_SUB", "and", "self", ".", "status", "...
The task can run if its status is < S_SUB and all the other dependencies (if any) are done!
[ "The", "task", "can", "run", "if", "its", "status", "is", "<", "S_SUB", "and", "all", "the", "other", "dependencies", "(", "if", "any", ")", "are", "done!" ]
python
train
jonathf/chaospy
chaospy/quad/collection/gauss_legendre.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/quad/collection/gauss_legendre.py#L94-L124
def _gauss_legendre(order, composite=1): """Backend function.""" inner = numpy.ones(order+1)*0.5 outer = numpy.arange(order+1)**2 outer = outer/(16*outer-4.) banded = numpy.diag(numpy.sqrt(outer[1:]), k=-1) + numpy.diag(inner) + \ numpy.diag(numpy.sqrt(outer[1:]), k=1) vals, vecs = numpy.linalg.eig(banded) abscis, weight = vals.real, vecs[0, :]**2 indices = numpy.argsort(abscis) abscis, weight = abscis[indices], weight[indices] n_abscis = len(abscis) composite = numpy.array(composite).flatten() composite = list(set(composite)) composite = [comp for comp in composite if (comp < 1) and (comp > 0)] composite.sort() composite = [0]+composite+[1] abscissas = numpy.empty(n_abscis*(len(composite)-1)) weights = numpy.empty(n_abscis*(len(composite)-1)) for dim in range(len(composite)-1): abscissas[dim*n_abscis:(dim+1)*n_abscis] = \ abscis*(composite[dim+1]-composite[dim]) + composite[dim] weights[dim*n_abscis:(dim+1)*n_abscis] = \ weight*(composite[dim+1]-composite[dim]) return abscissas, weights
[ "def", "_gauss_legendre", "(", "order", ",", "composite", "=", "1", ")", ":", "inner", "=", "numpy", ".", "ones", "(", "order", "+", "1", ")", "*", "0.5", "outer", "=", "numpy", ".", "arange", "(", "order", "+", "1", ")", "**", "2", "outer", "=",...
Backend function.
[ "Backend", "function", "." ]
python
train
reiinakano/xcessiv
xcessiv/rqtasks.py
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/rqtasks.py#L175-L221
def start_automated_run(path, automated_run_id): """Starts automated run. This will automatically create base learners until the run finishes or errors out. Args: path (str): Path to Xcessiv notebook automated_run_id (str): Automated Run ID """ with functions.DBContextManager(path) as session: automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first() if not automated_run: raise exceptions.UserError('Automated run {} ' 'does not exist'.format(automated_run_id)) automated_run.job_id = get_current_job().id automated_run.job_status = 'started' session.add(automated_run) session.commit() try: if automated_run.category == 'bayes': automatedruns.start_naive_bayes(automated_run, session, path) elif automated_run.category == 'tpot': automatedruns.start_tpot(automated_run, session, path) elif automated_run.category == 'greedy_ensemble_search': automatedruns.start_greedy_ensemble_search(automated_run, session, path) else: raise Exception('Something went wrong. Invalid category for automated run') automated_run.job_status = 'finished' session.add(automated_run) session.commit() except: session.rollback() automated_run.job_status = 'errored' automated_run.description['error_type'] = repr(sys.exc_info()[0]) automated_run.description['error_value'] = repr(sys.exc_info()[1]) automated_run.description['error_traceback'] = \ traceback.format_exception(*sys.exc_info()) session.add(automated_run) session.commit() raise
[ "def", "start_automated_run", "(", "path", ",", "automated_run_id", ")", ":", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "automated_run", "=", "session", ".", "query", "(", "models", ".", "AutomatedRun", ")", ".", "...
Starts automated run. This will automatically create base learners until the run finishes or errors out. Args: path (str): Path to Xcessiv notebook automated_run_id (str): Automated Run ID
[ "Starts", "automated", "run", ".", "This", "will", "automatically", "create", "base", "learners", "until", "the", "run", "finishes", "or", "errors", "out", "." ]
python
train
rocky/python-spark
spark_parser/spark.py
https://github.com/rocky/python-spark/blob/8899954bcf0e166726841a43e87c23790eb3441f/spark_parser/spark.py#L882-L918
def check_grammar(self, ok_start_symbols = set(), out=sys.stderr): ''' Check grammar for: - unused left-hand side nonterminals that are neither start symbols or listed in ok_start_symbols - unused right-hand side nonterminals, i.e. not tokens - right-recursive rules. These can slow down parsing. ''' warnings = 0 (lhs, rhs, tokens, right_recursive, dup_rhs) = self.check_sets() if lhs - ok_start_symbols: warnings += 1 out.write("LHS symbols not used on the RHS:\n") out.write(" " + (', '.join(sorted(lhs)) + "\n")) if rhs: warnings += 1 out.write("RHS symbols not used on the LHS:\n") out.write((', '.join(sorted(rhs))) + "\n" ) if right_recursive: warnings += 1 out.write("Right recursive rules:\n") for rule in sorted(right_recursive): out.write(" %s ::= %s\n" % (rule[0], ' '.join(rule[1]))) pass pass if dup_rhs: warnings += 1 out.write("Nonterminals with the same RHS\n") for rhs in sorted(dup_rhs.keys()): out.write(" RHS: %s\n" % ' '.join(rhs)) out.write(" LHS: %s\n" % ', '.join(dup_rhs[rhs])) out.write(" ---\n") pass pass return warnings
[ "def", "check_grammar", "(", "self", ",", "ok_start_symbols", "=", "set", "(", ")", ",", "out", "=", "sys", ".", "stderr", ")", ":", "warnings", "=", "0", "(", "lhs", ",", "rhs", ",", "tokens", ",", "right_recursive", ",", "dup_rhs", ")", "=", "self"...
Check grammar for: - unused left-hand side nonterminals that are neither start symbols or listed in ok_start_symbols - unused right-hand side nonterminals, i.e. not tokens - right-recursive rules. These can slow down parsing.
[ "Check", "grammar", "for", ":", "-", "unused", "left", "-", "hand", "side", "nonterminals", "that", "are", "neither", "start", "symbols", "or", "listed", "in", "ok_start_symbols", "-", "unused", "right", "-", "hand", "side", "nonterminals", "i", ".", "e", ...
python
train
MicroPyramid/django-blog-it
django_blog_it/django_blog_it/views.py
https://github.com/MicroPyramid/django-blog-it/blob/c18dd08e3ead8c932471547595e22e77e5858383/django_blog_it/django_blog_it/views.py#L463-L472
def recent_photos(request): ''' returns all the images from the data base ''' imgs = [] for obj in Image_File.objects.filter(is_image=True).order_by("-date_created"): upurl = "/" + obj.upload.url thumburl = "" if obj.thumbnail: thumburl = "/" + obj.thumbnail.url imgs.append({'src': upurl, 'thumb': thumburl, 'is_image': True}) return render_to_response('dashboard/browse.html', {'files': imgs})
[ "def", "recent_photos", "(", "request", ")", ":", "imgs", "=", "[", "]", "for", "obj", "in", "Image_File", ".", "objects", ".", "filter", "(", "is_image", "=", "True", ")", ".", "order_by", "(", "\"-date_created\"", ")", ":", "upurl", "=", "\"/\"", "+"...
returns all the images from the data base
[ "returns", "all", "the", "images", "from", "the", "data", "base" ]
python
train
openstack/proliantutils
proliantutils/ilo/ribcl.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L420-L428
def insert_virtual_media(self, url, device='FLOPPY'): """Notifies iLO of the location of a virtual media diskette image.""" dic = { 'DEVICE': device.upper(), 'IMAGE_URL': url, } data = self._execute_command( 'INSERT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic) return data
[ "def", "insert_virtual_media", "(", "self", ",", "url", ",", "device", "=", "'FLOPPY'", ")", ":", "dic", "=", "{", "'DEVICE'", ":", "device", ".", "upper", "(", ")", ",", "'IMAGE_URL'", ":", "url", ",", "}", "data", "=", "self", ".", "_execute_command"...
Notifies iLO of the location of a virtual media diskette image.
[ "Notifies", "iLO", "of", "the", "location", "of", "a", "virtual", "media", "diskette", "image", "." ]
python
train
BD2KGenomics/toil-scripts
src/toil_scripts/exome_variant_pipeline/exome_variant_pipeline.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/exome_variant_pipeline/exome_variant_pipeline.py#L29-L43
def download_shared_files(job, samples, config): """ Downloads files shared by all samples in the pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information """ job.fileStore.logToMaster('Downloaded shared files') file_names = ['reference', 'phase', 'mills', 'dbsnp', 'cosmic'] urls = [config.reference, config.phase, config.mills, config.dbsnp, config.cosmic] for name, url in zip(file_names, urls): if url: vars(config)[name] = job.addChildJobFn(download_url_job, url=url).rv() job.addFollowOnJobFn(reference_preprocessing, samples, config)
[ "def", "download_shared_files", "(", "job", ",", "samples", ",", "config", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Downloaded shared files'", ")", "file_names", "=", "[", "'reference'", ",", "'phase'", ",", "'mills'", ",", "'dbsnp'", ",",...
Downloads files shared by all samples in the pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information
[ "Downloads", "files", "shared", "by", "all", "samples", "in", "the", "pipeline" ]
python
train
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L2139-L2184
def port_channel_minimum_links(self, **kwargs): """Set minimum number of links in a port channel. Args: name (str): Port-channel number. (1, 5, etc) minimum_links (str): Minimum number of links in channel group. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `name` or `minimum_links` is not specified. ValueError: if `name` is not a valid value. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.port_channel_minimum_links( ... name='1', minimum_links='2') ... dev.interface.port_channel_minimum_links() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError """ name = str(kwargs.pop('name')) minimum_links = str(kwargs.pop('minimum_links')) callback = kwargs.pop('callback', self._callback) min_links_args = dict(name=name, minimum_links=minimum_links) if not pynos.utilities.valid_interface('port_channel', name): raise ValueError("`name` must match `^[0-9]{1,3}${1,3}$`") config = getattr( self._interface, 'interface_port_channel_minimum_links' )(**min_links_args) return callback(config)
[ "def", "port_channel_minimum_links", "(", "self", ",", "*", "*", "kwargs", ")", ":", "name", "=", "str", "(", "kwargs", ".", "pop", "(", "'name'", ")", ")", "minimum_links", "=", "str", "(", "kwargs", ".", "pop", "(", "'minimum_links'", ")", ")", "call...
Set minimum number of links in a port channel. Args: name (str): Port-channel number. (1, 5, etc) minimum_links (str): Minimum number of links in channel group. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `name` or `minimum_links` is not specified. ValueError: if `name` is not a valid value. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.port_channel_minimum_links( ... name='1', minimum_links='2') ... dev.interface.port_channel_minimum_links() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
[ "Set", "minimum", "number", "of", "links", "in", "a", "port", "channel", "." ]
python
train
saltstack/salt
salt/modules/mysql.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mysql.py#L742-L813
def file_query(database, file_name, **connection_args): ''' Run an arbitrary SQL query from the specified file and return the the number of affected rows. .. versionadded:: 2017.7.0 database database to run script inside file_name File name of the script. This can be on the minion, or a file that is reachable by the fileserver CLI Example: .. code-block:: bash salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql Return data: .. code-block:: python {'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L} ''' if any(file_name.startswith(proto) for proto in ('salt://', 'http://', 'https://', 'swift://', 's3://')): file_name = __salt__['cp.cache_file'](file_name) if os.path.exists(file_name): with salt.utils.files.fopen(file_name, 'r') as ifile: contents = salt.utils.stringutils.to_unicode(ifile.read()) else: log.error('File "%s" does not exist', file_name) return False query_string = "" ret = {'rows returned': 0, 'columns': [], 'results': [], 'rows affected': 0, 'query time': {'raw': 0}} for line in contents.splitlines(): if re.match(r'--', line): # ignore sql comments continue if not re.search(r'[^-;]+;', line): # keep appending lines that don't end in ; query_string = query_string + line else: query_string = query_string + line # append lines that end with ; and run query query_result = query(database, query_string, **connection_args) query_string = "" if query_result is False: # Fail out on error return False if 'query time' in query_result: ret['query time']['raw'] += float(query_result['query time']['raw']) if 'rows returned' in query_result: ret['rows returned'] += query_result['rows returned'] if 'columns' in query_result: ret['columns'].append(query_result['columns']) if 'results' in query_result: ret['results'].append(query_result['results']) if 'rows affected' in query_result: ret['rows affected'] += query_result['rows affected'] ret['query time']['human'] = six.text_type(round(float(ret['query time']['raw']), 2)) + 's' ret['query time']['raw'] = round(float(ret['query time']['raw']), 5) # Remove empty keys in ret ret = {k: v for k, v in six.iteritems(ret) if v} return ret
[ "def", "file_query", "(", "database", ",", "file_name", ",", "*", "*", "connection_args", ")", ":", "if", "any", "(", "file_name", ".", "startswith", "(", "proto", ")", "for", "proto", "in", "(", "'salt://'", ",", "'http://'", ",", "'https://'", ",", "'s...
Run an arbitrary SQL query from the specified file and return the the number of affected rows. .. versionadded:: 2017.7.0 database database to run script inside file_name File name of the script. This can be on the minion, or a file that is reachable by the fileserver CLI Example: .. code-block:: bash salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql Return data: .. code-block:: python {'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L}
[ "Run", "an", "arbitrary", "SQL", "query", "from", "the", "specified", "file", "and", "return", "the", "the", "number", "of", "affected", "rows", "." ]
python
train
cgoldberg/sauceclient
sauceclient.py
https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L121-L126
def get_subaccounts(self): """Get a list of sub accounts associated with a parent account.""" method = 'GET' endpoint = '/rest/v1/users/{}/list-subaccounts'.format( self.client.sauce_username) return self.client.request(method, endpoint)
[ "def", "get_subaccounts", "(", "self", ")", ":", "method", "=", "'GET'", "endpoint", "=", "'/rest/v1/users/{}/list-subaccounts'", ".", "format", "(", "self", ".", "client", ".", "sauce_username", ")", "return", "self", ".", "client", ".", "request", "(", "meth...
Get a list of sub accounts associated with a parent account.
[ "Get", "a", "list", "of", "sub", "accounts", "associated", "with", "a", "parent", "account", "." ]
python
train
cloud-custodian/cloud-custodian
tools/sandbox/c7n_sphere11/c7n_sphere11/wsgigw.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/sandbox/c7n_sphere11/c7n_sphere11/wsgigw.py#L25-L53
def create_gw_response(app, wsgi_env): """Create an api gw response from a wsgi app and environ. """ response = {} buf = [] result = [] def start_response(status, headers, exc_info=None): result[:] = [status, headers] return buf.append appr = app(wsgi_env, start_response) close_func = getattr(appr, 'close', None) try: buf.extend(list(appr)) finally: close_func and close_func() response['body'] = ''.join(buf) response['statusCode'] = result[0].split(' ', 1)[0] response['headers'] = {} for k, v in result[1]: response['headers'][k] = v if 'Content-Length' not in response['headers']: response['headers']['Content-Length'] = str(len(response['body'])) if 'Content-Type' not in response['headers']: response['headers']['Content-Type'] = 'text/plain' return response
[ "def", "create_gw_response", "(", "app", ",", "wsgi_env", ")", ":", "response", "=", "{", "}", "buf", "=", "[", "]", "result", "=", "[", "]", "def", "start_response", "(", "status", ",", "headers", ",", "exc_info", "=", "None", ")", ":", "result", "[...
Create an api gw response from a wsgi app and environ.
[ "Create", "an", "api", "gw", "response", "from", "a", "wsgi", "app", "and", "environ", "." ]
python
train
noxdafox/clipspy
clips/facts.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/facts.py#L67-L74
def facts(self): """Iterate over the asserted Facts.""" fact = lib.EnvGetNextFact(self._env, ffi.NULL) while fact != ffi.NULL: yield new_fact(self._env, fact) fact = lib.EnvGetNextFact(self._env, fact)
[ "def", "facts", "(", "self", ")", ":", "fact", "=", "lib", ".", "EnvGetNextFact", "(", "self", ".", "_env", ",", "ffi", ".", "NULL", ")", "while", "fact", "!=", "ffi", ".", "NULL", ":", "yield", "new_fact", "(", "self", ".", "_env", ",", "fact", ...
Iterate over the asserted Facts.
[ "Iterate", "over", "the", "asserted", "Facts", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/header_ecc.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/header_ecc.py#L174-L205
def ecc_correct_intra(ecc_manager_intra, ecc_params_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False): """ Correct an intra-field with its corresponding intra-ecc if necessary """ fentry_fields = {"ecc_field": ecc} field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in entry_assemble(fentry_fields, ecc_params_intra, len(field), '', field): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
[ "def", "ecc_correct_intra", "(", "ecc_manager_intra", ",", "ecc_params_intra", ",", "field", ",", "ecc", ",", "enable_erasures", "=", "False", ",", "erasures_char", "=", "\"\\x00\"", ",", "only_erasures", "=", "False", ")", ":", "fentry_fields", "=", "{", "\"ecc...
Correct an intra-field with its corresponding intra-ecc if necessary
[ "Correct", "an", "intra", "-", "field", "with", "its", "corresponding", "intra", "-", "ecc", "if", "necessary" ]
python
train
django-py/django-doberman
doberman/models.py
https://github.com/django-py/django-doberman/blob/2e5959737a1b64234ed5a179c93f96a0de1c3e5c/doberman/models.py#L50-L70
def get_last_failed_access_attempt(cls, **kwargs): """ Return Failed access attempt of Client :param ip_adress: String :return: """ try: lockout = cls.objects.get( **kwargs ) except cls.DoesNotExist: lockout = None if lockout: time_remaining = lockout.expiration_time if time_remaining and time_remaining <= 0: lockout.is_expired = True lockout.save() return None return lockout
[ "def", "get_last_failed_access_attempt", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "try", ":", "lockout", "=", "cls", ".", "objects", ".", "get", "(", "*", "*", "kwargs", ")", "except", "cls", ".", "DoesNotExist", ":", "lockout", "=", "None", "if"...
Return Failed access attempt of Client :param ip_adress: String :return:
[ "Return", "Failed", "access", "attempt", "of", "Client", ":", "param", "ip_adress", ":", "String", ":", "return", ":" ]
python
train
a1ezzz/wasp-general
wasp_general/network/web/cookies.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/web/cookies.py#L145-L157
def __attr_name(self, name): """ Return suitable and valid attribute name. This method replaces dash char to underscore. If name is invalid ValueError exception is raised :param name: cookie attribute name :return: str """ if name not in self.cookie_attr_value_compliance.keys(): suggested_name = name.replace('_', '-').lower() if suggested_name not in self.cookie_attr_value_compliance.keys(): raise ValueError('Invalid attribute name is specified') name = suggested_name return name
[ "def", "__attr_name", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "cookie_attr_value_compliance", ".", "keys", "(", ")", ":", "suggested_name", "=", "name", ".", "replace", "(", "'_'", ",", "'-'", ")", ".", "lower", "(",...
Return suitable and valid attribute name. This method replaces dash char to underscore. If name is invalid ValueError exception is raised :param name: cookie attribute name :return: str
[ "Return", "suitable", "and", "valid", "attribute", "name", ".", "This", "method", "replaces", "dash", "char", "to", "underscore", ".", "If", "name", "is", "invalid", "ValueError", "exception", "is", "raised" ]
python
train
sendwithus/sendwithus_python
sendwithus/__init__.py
https://github.com/sendwithus/sendwithus_python/blob/8ae50d514febd44f7d9be3c838b4d92f99412832/sendwithus/__init__.py#L142-L160
def _parse_response(self, response): """Parses the API response and raises appropriate errors if raise_errors was set to True """ if not self._raise_errors: return response is_4xx_error = str(response.status_code)[0] == '4' is_5xx_error = str(response.status_code)[0] == '5' content = response.content if response.status_code == 403: raise AuthenticationError(content) elif is_4xx_error: raise APIError(content) elif is_5xx_error: raise ServerError(content) return response
[ "def", "_parse_response", "(", "self", ",", "response", ")", ":", "if", "not", "self", ".", "_raise_errors", ":", "return", "response", "is_4xx_error", "=", "str", "(", "response", ".", "status_code", ")", "[", "0", "]", "==", "'4'", "is_5xx_error", "=", ...
Parses the API response and raises appropriate errors if raise_errors was set to True
[ "Parses", "the", "API", "response", "and", "raises", "appropriate", "errors", "if", "raise_errors", "was", "set", "to", "True" ]
python
valid