repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
dhermes/bezier
scripts/check_doc_templates.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/scripts/check_doc_templates.py#L229-L242
def doc_replace(match, sphinx_docs): """Convert Sphinx ``:doc:`` to plain reST link. Args: match (_sre.SRE_Match): A match (from ``re``) to be used in substitution. sphinx_docs (list): List to be track the documents that have been encountered. Returns: str: The ``match`` converted to a link. """ sphinx_docs.append(match.group("path")) return "`{}`_".format(match.group("value"))
[ "def", "doc_replace", "(", "match", ",", "sphinx_docs", ")", ":", "sphinx_docs", ".", "append", "(", "match", ".", "group", "(", "\"path\"", ")", ")", "return", "\"`{}`_\"", ".", "format", "(", "match", ".", "group", "(", "\"value\"", ")", ")" ]
Convert Sphinx ``:doc:`` to plain reST link. Args: match (_sre.SRE_Match): A match (from ``re``) to be used in substitution. sphinx_docs (list): List to be track the documents that have been encountered. Returns: str: The ``match`` converted to a link.
[ "Convert", "Sphinx", ":", "doc", ":", "to", "plain", "reST", "link", "." ]
python
train
bwohlberg/sporco
sporco/admm/ccmodmd.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmodmd.py#L978-L1032
def compute_residuals(self): """Compute residuals and stopping thresholds. The parent class method is overridden to ensure that the residual calculations include the additional variables introduced in the modification to the baseline algorithm. """ # The full primary residual is straightforward to compute from # the primary residuals for the baseline algorithm and for the # additional variables r0 = self.rsdl_r(self.AXnr, self.Y) r1 = self.AX1nr - self.Y1 - self.S r = np.sqrt(np.sum(r0**2) + np.sum(r1**2)) # The full dual residual is more complicated to compute than the # full primary residual ATU = self.swapaxes(self.U) + sl.irfftn( np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN), self.cri.Nv, self.cri.axisN) s = self.rho * np.linalg.norm(ATU) # The normalisation factor for the full primal residual is also not # straightforward nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 + np.linalg.norm(self.AX1nr)**2) nY = np.sqrt(np.linalg.norm(self.Y)**2 + np.linalg.norm(self.Y1)**2) rn = max(nAX, nY, np.linalg.norm(self.S)) # The normalisation factor for the full dual residual is # straightforward to compute sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 + np.linalg.norm(self.U1)**2) # Final residual values and stopping tolerances depend on # whether standard or normalised residuals are specified via the # options object if self.opt['AutoRho', 'StdResiduals']: epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \ rn*self.opt['RelStopTol'] edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \ sn*self.opt['RelStopTol'] else: if rn == 0.0: rn = 1.0 if sn == 0.0: sn = 1.0 r /= rn s /= sn epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \ self.opt['RelStopTol'] edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \ self.opt['RelStopTol'] return r, s, epri, edua
[ "def", "compute_residuals", "(", "self", ")", ":", "# The full primary residual is straightforward to compute from", "# the primary residuals for the baseline algorithm and for the", "# additional variables", "r0", "=", "self", ".", "rsdl_r", "(", "self", ".", "AXnr", ",", "self", ".", "Y", ")", "r1", "=", "self", ".", "AX1nr", "-", "self", ".", "Y1", "-", "self", ".", "S", "r", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "r0", "**", "2", ")", "+", "np", ".", "sum", "(", "r1", "**", "2", ")", ")", "# The full dual residual is more complicated to compute than the", "# full primary residual", "ATU", "=", "self", ".", "swapaxes", "(", "self", ".", "U", ")", "+", "sl", ".", "irfftn", "(", "np", ".", "conj", "(", "self", ".", "Zf", ")", "*", "sl", ".", "rfftn", "(", "self", ".", "U1", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", "s", "=", "self", ".", "rho", "*", "np", ".", "linalg", ".", "norm", "(", "ATU", ")", "# The normalisation factor for the full primal residual is also not", "# straightforward", "nAX", "=", "np", ".", "sqrt", "(", "np", ".", "linalg", ".", "norm", "(", "self", ".", "AXnr", ")", "**", "2", "+", "np", ".", "linalg", ".", "norm", "(", "self", ".", "AX1nr", ")", "**", "2", ")", "nY", "=", "np", ".", "sqrt", "(", "np", ".", "linalg", ".", "norm", "(", "self", ".", "Y", ")", "**", "2", "+", "np", ".", "linalg", ".", "norm", "(", "self", ".", "Y1", ")", "**", "2", ")", "rn", "=", "max", "(", "nAX", ",", "nY", ",", "np", ".", "linalg", ".", "norm", "(", "self", ".", "S", ")", ")", "# The normalisation factor for the full dual residual is", "# straightforward to compute", "sn", "=", "self", ".", "rho", "*", "np", ".", "sqrt", "(", "np", ".", "linalg", ".", "norm", "(", "self", ".", "U", ")", "**", "2", "+", "np", ".", "linalg", ".", "norm", "(", "self", ".", "U1", ")", "**", "2", ")", "# Final residual values and stopping tolerances depend on", "# whether standard or normalised residuals are specified via the", "# options object", "if", "self", ".", "opt", "[", "'AutoRho'", ",", "'StdResiduals'", "]", ":", "epri", "=", "np", ".", "sqrt", "(", "self", ".", "Nc", ")", "*", "self", ".", "opt", "[", "'AbsStopTol'", "]", "+", "rn", "*", "self", ".", "opt", "[", "'RelStopTol'", "]", "edua", "=", "np", ".", "sqrt", "(", "self", ".", "Nx", ")", "*", "self", ".", "opt", "[", "'AbsStopTol'", "]", "+", "sn", "*", "self", ".", "opt", "[", "'RelStopTol'", "]", "else", ":", "if", "rn", "==", "0.0", ":", "rn", "=", "1.0", "if", "sn", "==", "0.0", ":", "sn", "=", "1.0", "r", "/=", "rn", "s", "/=", "sn", "epri", "=", "np", ".", "sqrt", "(", "self", ".", "Nc", ")", "*", "self", ".", "opt", "[", "'AbsStopTol'", "]", "/", "rn", "+", "self", ".", "opt", "[", "'RelStopTol'", "]", "edua", "=", "np", ".", "sqrt", "(", "self", ".", "Nx", ")", "*", "self", ".", "opt", "[", "'AbsStopTol'", "]", "/", "sn", "+", "self", ".", "opt", "[", "'RelStopTol'", "]", "return", "r", ",", "s", ",", "epri", ",", "edua" ]
Compute residuals and stopping thresholds. The parent class method is overridden to ensure that the residual calculations include the additional variables introduced in the modification to the baseline algorithm.
[ "Compute", "residuals", "and", "stopping", "thresholds", ".", "The", "parent", "class", "method", "is", "overridden", "to", "ensure", "that", "the", "residual", "calculations", "include", "the", "additional", "variables", "introduced", "in", "the", "modification", "to", "the", "baseline", "algorithm", "." ]
python
train
qubell/contrib-python-qubell-client
qubell/api/private/platform.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/private/platform.py#L62-L72
def connect_to_another_user(self, user, password, token=None, is_public=False): """ Authenticates user with the same tenant as current platform using and returns new platform to user. :rtype: QubellPlatform :param str user: user email :param str password: user password :param str token: session token :param bool is_public: either to use public or private api (public is not fully supported use with caution) :return: New Platform instance """ return QubellPlatform.connect(self._router.base_url, user, password, token, is_public)
[ "def", "connect_to_another_user", "(", "self", ",", "user", ",", "password", ",", "token", "=", "None", ",", "is_public", "=", "False", ")", ":", "return", "QubellPlatform", ".", "connect", "(", "self", ".", "_router", ".", "base_url", ",", "user", ",", "password", ",", "token", ",", "is_public", ")" ]
Authenticates user with the same tenant as current platform using and returns new platform to user. :rtype: QubellPlatform :param str user: user email :param str password: user password :param str token: session token :param bool is_public: either to use public or private api (public is not fully supported use with caution) :return: New Platform instance
[ "Authenticates", "user", "with", "the", "same", "tenant", "as", "current", "platform", "using", "and", "returns", "new", "platform", "to", "user", ".", ":", "rtype", ":", "QubellPlatform", ":", "param", "str", "user", ":", "user", "email", ":", "param", "str", "password", ":", "user", "password", ":", "param", "str", "token", ":", "session", "token", ":", "param", "bool", "is_public", ":", "either", "to", "use", "public", "or", "private", "api", "(", "public", "is", "not", "fully", "supported", "use", "with", "caution", ")", ":", "return", ":", "New", "Platform", "instance" ]
python
train
PedalPi/Application
application/controller/current_controller.py
https://github.com/PedalPi/Application/blob/3fdf6f97cfef97a7f1d90a5881dd04324c229f9d/application/controller/current_controller.py#L217-L247
def set_bank(self, bank, try_preserve_index=False): """ Set the current :class:`Bank` for the bank only if the ``bank != current_bank`` The current pedalboard will be the first pedalboard of the new current bank **if it contains any pedalboard**, else will be ``None``. .. warning:: If the current :attr:`.pedalboard` is ``None``, a :class:`.CurrentPedalboardError` is raised. :param Bank bank: Bank that will be the current :param bool try_preserve_index: Tries to preserve the index of the current pedalboard when changing the bank. That is, if the current pedalboard is the fifth, when updating the bank, it will attempt to place the fifth pedalboard of the new bank as the current one. If it does not get (``len(bank.pedalboards) < 6``) the current pedalboard will be the first pedalboard. """ if bank not in self._manager: raise CurrentPedalboardError('Bank {} has not added in banks manager'.format(bank)) if self.bank == bank: return if bank.pedalboards: pedalboard = self._equivalent_pedalboard(bank) if try_preserve_index else bank.pedalboards[0] self.set_pedalboard(pedalboard) else: self.set_pedalboard(None)
[ "def", "set_bank", "(", "self", ",", "bank", ",", "try_preserve_index", "=", "False", ")", ":", "if", "bank", "not", "in", "self", ".", "_manager", ":", "raise", "CurrentPedalboardError", "(", "'Bank {} has not added in banks manager'", ".", "format", "(", "bank", ")", ")", "if", "self", ".", "bank", "==", "bank", ":", "return", "if", "bank", ".", "pedalboards", ":", "pedalboard", "=", "self", ".", "_equivalent_pedalboard", "(", "bank", ")", "if", "try_preserve_index", "else", "bank", ".", "pedalboards", "[", "0", "]", "self", ".", "set_pedalboard", "(", "pedalboard", ")", "else", ":", "self", ".", "set_pedalboard", "(", "None", ")" ]
Set the current :class:`Bank` for the bank only if the ``bank != current_bank`` The current pedalboard will be the first pedalboard of the new current bank **if it contains any pedalboard**, else will be ``None``. .. warning:: If the current :attr:`.pedalboard` is ``None``, a :class:`.CurrentPedalboardError` is raised. :param Bank bank: Bank that will be the current :param bool try_preserve_index: Tries to preserve the index of the current pedalboard when changing the bank. That is, if the current pedalboard is the fifth, when updating the bank, it will attempt to place the fifth pedalboard of the new bank as the current one. If it does not get (``len(bank.pedalboards) < 6``) the current pedalboard will be the first pedalboard.
[ "Set", "the", "current", ":", "class", ":", "Bank", "for", "the", "bank", "only", "if", "the", "bank", "!", "=", "current_bank" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/image_streamer/resources/golden_images.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/image_streamer/resources/golden_images.py#L108-L121
def download_archive(self, id_or_uri, file_path): """ Download the details of the Golden Image capture logs, which has been archived based on the specific attribute ID. Args: id_or_uri: ID or URI of the Golden Image. file_path (str): File name to save the archive. Returns: bool: Success. """ uri = self.URI + "/archive/" + extract_id_from_uri(id_or_uri) return self._client.download(uri, file_path)
[ "def", "download_archive", "(", "self", ",", "id_or_uri", ",", "file_path", ")", ":", "uri", "=", "self", ".", "URI", "+", "\"/archive/\"", "+", "extract_id_from_uri", "(", "id_or_uri", ")", "return", "self", ".", "_client", ".", "download", "(", "uri", ",", "file_path", ")" ]
Download the details of the Golden Image capture logs, which has been archived based on the specific attribute ID. Args: id_or_uri: ID or URI of the Golden Image. file_path (str): File name to save the archive. Returns: bool: Success.
[ "Download", "the", "details", "of", "the", "Golden", "Image", "capture", "logs", "which", "has", "been", "archived", "based", "on", "the", "specific", "attribute", "ID", "." ]
python
train
sosy-lab/benchexec
benchexec/util.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/util.py#L174-L190
def parse_int_list(s): """ Parse a comma-separated list of strings. The list may additionally contain ranges such as "1-5", which will be expanded into "1,2,3,4,5". """ result = [] for item in s.split(','): item = item.strip().split('-') if len(item) == 1: result.append(int(item[0])) elif len(item) == 2: start, end = item result.extend(range(int(start), int(end)+1)) else: raise ValueError("invalid range: '{0}'".format(s)) return result
[ "def", "parse_int_list", "(", "s", ")", ":", "result", "=", "[", "]", "for", "item", "in", "s", ".", "split", "(", "','", ")", ":", "item", "=", "item", ".", "strip", "(", ")", ".", "split", "(", "'-'", ")", "if", "len", "(", "item", ")", "==", "1", ":", "result", ".", "append", "(", "int", "(", "item", "[", "0", "]", ")", ")", "elif", "len", "(", "item", ")", "==", "2", ":", "start", ",", "end", "=", "item", "result", ".", "extend", "(", "range", "(", "int", "(", "start", ")", ",", "int", "(", "end", ")", "+", "1", ")", ")", "else", ":", "raise", "ValueError", "(", "\"invalid range: '{0}'\"", ".", "format", "(", "s", ")", ")", "return", "result" ]
Parse a comma-separated list of strings. The list may additionally contain ranges such as "1-5", which will be expanded into "1,2,3,4,5".
[ "Parse", "a", "comma", "-", "separated", "list", "of", "strings", ".", "The", "list", "may", "additionally", "contain", "ranges", "such", "as", "1", "-", "5", "which", "will", "be", "expanded", "into", "1", "2", "3", "4", "5", "." ]
python
train
RonenNess/Fileter
fileter/files_iterator.py
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L121-L130
def add_filter(self, files_filter, filter_type=DefaultFilterType): """ Add a files filter to this iterator. For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR. :param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI. :param filter_type: filter behavior, see FilterType for details. """ self.__filters.append((files_filter, filter_type)) return self
[ "def", "add_filter", "(", "self", ",", "files_filter", ",", "filter_type", "=", "DefaultFilterType", ")", ":", "self", ".", "__filters", ".", "append", "(", "(", "files_filter", ",", "filter_type", ")", ")", "return", "self" ]
Add a files filter to this iterator. For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR. :param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI. :param filter_type: filter behavior, see FilterType for details.
[ "Add", "a", "files", "filter", "to", "this", "iterator", ".", "For", "a", "file", "to", "be", "processed", "it", "must", "match", "ALL", "filters", "eg", "they", "are", "added", "with", "ADD", "not", "OR", "." ]
python
train
Shizmob/pydle
pydle/features/rfc1459/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L974-L978
async def on_raw_422(self, message): """ MOTD is missing. """ await self._registration_completed(message) self.motd = None await self.on_connect()
[ "async", "def", "on_raw_422", "(", "self", ",", "message", ")", ":", "await", "self", ".", "_registration_completed", "(", "message", ")", "self", ".", "motd", "=", "None", "await", "self", ".", "on_connect", "(", ")" ]
MOTD is missing.
[ "MOTD", "is", "missing", "." ]
python
train
saltstack/salt
salt/modules/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystone.py#L867-L907
def project_update(project_id=None, name=None, description=None, enabled=None, profile=None, **connection_args): ''' Update a tenant's information (keystone project-update) The following fields may be updated: name, description, enabled. Can only update name if targeting by ID Overrides keystone tenant_update form api V2. For keystone api V3 only. .. versionadded:: 2016.11.0 project_id The project id. name The project name, which must be unique within the owning domain. description The project description. enabled Enables or disables the project. profile Configuration profile - if configuration for multiple openstack accounts required. CLI Examples: .. code-block:: bash salt '*' keystone.project_update name=admin enabled=True salt '*' keystone.project_update c965f79c4f864eaaa9c3b41904e67082 name=admin email=admin@domain.com ''' auth(profile, **connection_args) if _OS_IDENTITY_API_VERSION > 2: return tenant_update(tenant_id=project_id, name=name, description=description, enabled=enabled, profile=profile, **connection_args) else: return False
[ "def", "project_update", "(", "project_id", "=", "None", ",", "name", "=", "None", ",", "description", "=", "None", ",", "enabled", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "if", "_OS_IDENTITY_API_VERSION", ">", "2", ":", "return", "tenant_update", "(", "tenant_id", "=", "project_id", ",", "name", "=", "name", ",", "description", "=", "description", ",", "enabled", "=", "enabled", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "else", ":", "return", "False" ]
Update a tenant's information (keystone project-update) The following fields may be updated: name, description, enabled. Can only update name if targeting by ID Overrides keystone tenant_update form api V2. For keystone api V3 only. .. versionadded:: 2016.11.0 project_id The project id. name The project name, which must be unique within the owning domain. description The project description. enabled Enables or disables the project. profile Configuration profile - if configuration for multiple openstack accounts required. CLI Examples: .. code-block:: bash salt '*' keystone.project_update name=admin enabled=True salt '*' keystone.project_update c965f79c4f864eaaa9c3b41904e67082 name=admin email=admin@domain.com
[ "Update", "a", "tenant", "s", "information", "(", "keystone", "project", "-", "update", ")", "The", "following", "fields", "may", "be", "updated", ":", "name", "description", "enabled", ".", "Can", "only", "update", "name", "if", "targeting", "by", "ID" ]
python
train
darkfeline/mir.anidb
mir/anidb/api.py
https://github.com/darkfeline/mir.anidb/blob/a0d25908f85fb1ff4bc595954bfc3f223f1b5acc/mir/anidb/api.py#L65-L68
def _check_for_errors(etree: ET.ElementTree): """Check AniDB response XML tree for errors.""" if etree.getroot().tag == 'error': raise APIError(etree.getroot().text)
[ "def", "_check_for_errors", "(", "etree", ":", "ET", ".", "ElementTree", ")", ":", "if", "etree", ".", "getroot", "(", ")", ".", "tag", "==", "'error'", ":", "raise", "APIError", "(", "etree", ".", "getroot", "(", ")", ".", "text", ")" ]
Check AniDB response XML tree for errors.
[ "Check", "AniDB", "response", "XML", "tree", "for", "errors", "." ]
python
train
davedoesdev/python-jwt
bench/generate_token_bench.py
https://github.com/davedoesdev/python-jwt/blob/5c753a26955cc666f00f6ff8e601406d95071368/bench/generate_token_bench.py#L24-L30
def make_bench_generate_token(alg): """ Return function which will generate token for particular algorithm """ def f(_): """ Generate token """ privk = priv_keys[alg].get('default', priv_key) jwt.generate_jwt(payload, privk, alg, timedelta(seconds=5)) return f
[ "def", "make_bench_generate_token", "(", "alg", ")", ":", "def", "f", "(", "_", ")", ":", "\"\"\" Generate token \"\"\"", "privk", "=", "priv_keys", "[", "alg", "]", ".", "get", "(", "'default'", ",", "priv_key", ")", "jwt", ".", "generate_jwt", "(", "payload", ",", "privk", ",", "alg", ",", "timedelta", "(", "seconds", "=", "5", ")", ")", "return", "f" ]
Return function which will generate token for particular algorithm
[ "Return", "function", "which", "will", "generate", "token", "for", "particular", "algorithm" ]
python
train
adrn/gala
gala/coordinates/greatcircle.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/greatcircle.py#L299-L322
def sph_midpoint(coord1, coord2): """Compute the midpoint between two points on the sphere. Parameters ---------- coord1 : `~astropy.coordinates.SkyCoord` Coordinate of one point on a great circle. coord2 : `~astropy.coordinates.SkyCoord` Coordinate of the other point on a great circle. Returns ------- midpt : `~astropy.coordinates.SkyCoord` The coordinates of the spherical midpoint. """ c1 = coord1.cartesian / coord1.cartesian.norm() coord2 = coord2.transform_to(coord1.frame) c2 = coord2.cartesian / coord2.cartesian.norm() midpt = 0.5 * (c1 + c2) usph = midpt.represent_as(coord.UnitSphericalRepresentation) return coord1.frame.realize_frame(usph)
[ "def", "sph_midpoint", "(", "coord1", ",", "coord2", ")", ":", "c1", "=", "coord1", ".", "cartesian", "/", "coord1", ".", "cartesian", ".", "norm", "(", ")", "coord2", "=", "coord2", ".", "transform_to", "(", "coord1", ".", "frame", ")", "c2", "=", "coord2", ".", "cartesian", "/", "coord2", ".", "cartesian", ".", "norm", "(", ")", "midpt", "=", "0.5", "*", "(", "c1", "+", "c2", ")", "usph", "=", "midpt", ".", "represent_as", "(", "coord", ".", "UnitSphericalRepresentation", ")", "return", "coord1", ".", "frame", ".", "realize_frame", "(", "usph", ")" ]
Compute the midpoint between two points on the sphere. Parameters ---------- coord1 : `~astropy.coordinates.SkyCoord` Coordinate of one point on a great circle. coord2 : `~astropy.coordinates.SkyCoord` Coordinate of the other point on a great circle. Returns ------- midpt : `~astropy.coordinates.SkyCoord` The coordinates of the spherical midpoint.
[ "Compute", "the", "midpoint", "between", "two", "points", "on", "the", "sphere", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/db.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/db.py#L840-L854
def namedb_namespace_fields_check( namespace_rec ): """ Given a namespace record, make sure the following fields are present: * namespace_id * buckets Makes the record suitable for insertion/update. NOTE: MODIFIES namespace_rec """ assert namespace_rec.has_key('namespace_id'), "BUG: namespace record has no ID" assert namespace_rec.has_key('buckets'), 'BUG: missing price buckets' assert isinstance(namespace_rec['buckets'], str), 'BUG: namespace data is not in canonical form' return namespace_rec
[ "def", "namedb_namespace_fields_check", "(", "namespace_rec", ")", ":", "assert", "namespace_rec", ".", "has_key", "(", "'namespace_id'", ")", ",", "\"BUG: namespace record has no ID\"", "assert", "namespace_rec", ".", "has_key", "(", "'buckets'", ")", ",", "'BUG: missing price buckets'", "assert", "isinstance", "(", "namespace_rec", "[", "'buckets'", "]", ",", "str", ")", ",", "'BUG: namespace data is not in canonical form'", "return", "namespace_rec" ]
Given a namespace record, make sure the following fields are present: * namespace_id * buckets Makes the record suitable for insertion/update. NOTE: MODIFIES namespace_rec
[ "Given", "a", "namespace", "record", "make", "sure", "the", "following", "fields", "are", "present", ":", "*", "namespace_id", "*", "buckets" ]
python
train
Chilipp/psy-simple
psy_simple/widgets/colors.py
https://github.com/Chilipp/psy-simple/blob/7d916406a6d3c3c27c0b7102f98fef07a4da0a61/psy_simple/widgets/colors.py#L76-L83
def headerData(self, section, orientation, role=Qt.DisplayRole): """Set header data""" if role != Qt.DisplayRole: return None if orientation == Qt.Vertical: return six.text_type(self.color_da.cmap[section].values) return super(ColormapModel, self).headerData(section, orientation, role)
[ "def", "headerData", "(", "self", ",", "section", ",", "orientation", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "role", "!=", "Qt", ".", "DisplayRole", ":", "return", "None", "if", "orientation", "==", "Qt", ".", "Vertical", ":", "return", "six", ".", "text_type", "(", "self", ".", "color_da", ".", "cmap", "[", "section", "]", ".", "values", ")", "return", "super", "(", "ColormapModel", ",", "self", ")", ".", "headerData", "(", "section", ",", "orientation", ",", "role", ")" ]
Set header data
[ "Set", "header", "data" ]
python
train
kislyuk/aegea
aegea/deploy.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/deploy.py#L138-L162
def grant(args): """ Given an IAM role or instance name, attach an IAM policy granting appropriate permissions to subscribe to deployments. Given a GitHub repo URL, create and record deployment keys for the repo and any of its private submodules, making the keys accessible to the IAM role. """ try: role = resources.iam.Role(args.iam_role_or_instance) role.load() except ClientError: role = get_iam_role_for_instance(args.iam_role_or_instance) role.attach_policy(PolicyArn=ensure_deploy_iam_policy().arn) for private_repo in [args.repo] + list(private_submodules(args.repo)): gh_owner_name, gh_repo_name = parse_repo_name(private_repo) secret = secrets.put(argparse.Namespace(secret_name="deploy.{}.{}".format(gh_owner_name, gh_repo_name), iam_role=role.name, instance_profile=None, iam_group=None, iam_user=None, generate_ssh_key=True)) get_repo(private_repo).create_key(__name__ + "." + role.name, secret["ssh_public_key"]) logger.info("Created deploy key %s for IAM role %s to access GitHub repo %s", secret["ssh_key_fingerprint"], role.name, private_repo)
[ "def", "grant", "(", "args", ")", ":", "try", ":", "role", "=", "resources", ".", "iam", ".", "Role", "(", "args", ".", "iam_role_or_instance", ")", "role", ".", "load", "(", ")", "except", "ClientError", ":", "role", "=", "get_iam_role_for_instance", "(", "args", ".", "iam_role_or_instance", ")", "role", ".", "attach_policy", "(", "PolicyArn", "=", "ensure_deploy_iam_policy", "(", ")", ".", "arn", ")", "for", "private_repo", "in", "[", "args", ".", "repo", "]", "+", "list", "(", "private_submodules", "(", "args", ".", "repo", ")", ")", ":", "gh_owner_name", ",", "gh_repo_name", "=", "parse_repo_name", "(", "private_repo", ")", "secret", "=", "secrets", ".", "put", "(", "argparse", ".", "Namespace", "(", "secret_name", "=", "\"deploy.{}.{}\"", ".", "format", "(", "gh_owner_name", ",", "gh_repo_name", ")", ",", "iam_role", "=", "role", ".", "name", ",", "instance_profile", "=", "None", ",", "iam_group", "=", "None", ",", "iam_user", "=", "None", ",", "generate_ssh_key", "=", "True", ")", ")", "get_repo", "(", "private_repo", ")", ".", "create_key", "(", "__name__", "+", "\".\"", "+", "role", ".", "name", ",", "secret", "[", "\"ssh_public_key\"", "]", ")", "logger", ".", "info", "(", "\"Created deploy key %s for IAM role %s to access GitHub repo %s\"", ",", "secret", "[", "\"ssh_key_fingerprint\"", "]", ",", "role", ".", "name", ",", "private_repo", ")" ]
Given an IAM role or instance name, attach an IAM policy granting appropriate permissions to subscribe to deployments. Given a GitHub repo URL, create and record deployment keys for the repo and any of its private submodules, making the keys accessible to the IAM role.
[ "Given", "an", "IAM", "role", "or", "instance", "name", "attach", "an", "IAM", "policy", "granting", "appropriate", "permissions", "to", "subscribe", "to", "deployments", ".", "Given", "a", "GitHub", "repo", "URL", "create", "and", "record", "deployment", "keys", "for", "the", "repo", "and", "any", "of", "its", "private", "submodules", "making", "the", "keys", "accessible", "to", "the", "IAM", "role", "." ]
python
train
Apstra/aeon-venos
pylib/aeon/nxos/autoload/install_os.py
https://github.com/Apstra/aeon-venos/blob/4d4f73d5904831ddc78c30922a8a226c90cf7d90/pylib/aeon/nxos/autoload/install_os.py#L56-L76
def copy_from(self, location, timeout=10 * 60): """ This method will fetch the image; the fetch will happen from the device-side using the 'copy' command. Note that the NXAPI appears to be single-threaded, so the code needs to wait until this operation has completed before attempting another API call. Therefore the :timeout: value is set very high (10min) :param location: URL to the location of the file. This URL must be a valid source field to the NXOS 'copy' command :keyword timeout: Timeout in seconds :return: """ cmd = 'copy {location} {dir}: vrf {vrf_name}'.format( location=location, dir=self.DESTDIR, vrf_name=self.VRF_NAME) run = self.device.api.exec_opcmd run(cmd, msg_type='cli_show_ascii', timeout=timeout)
[ "def", "copy_from", "(", "self", ",", "location", ",", "timeout", "=", "10", "*", "60", ")", ":", "cmd", "=", "'copy {location} {dir}: vrf {vrf_name}'", ".", "format", "(", "location", "=", "location", ",", "dir", "=", "self", ".", "DESTDIR", ",", "vrf_name", "=", "self", ".", "VRF_NAME", ")", "run", "=", "self", ".", "device", ".", "api", ".", "exec_opcmd", "run", "(", "cmd", ",", "msg_type", "=", "'cli_show_ascii'", ",", "timeout", "=", "timeout", ")" ]
This method will fetch the image; the fetch will happen from the device-side using the 'copy' command. Note that the NXAPI appears to be single-threaded, so the code needs to wait until this operation has completed before attempting another API call. Therefore the :timeout: value is set very high (10min) :param location: URL to the location of the file. This URL must be a valid source field to the NXOS 'copy' command :keyword timeout: Timeout in seconds :return:
[ "This", "method", "will", "fetch", "the", "image", ";", "the", "fetch", "will", "happen", "from", "the", "device", "-", "side", "using", "the", "copy", "command", ".", "Note", "that", "the", "NXAPI", "appears", "to", "be", "single", "-", "threaded", "so", "the", "code", "needs", "to", "wait", "until", "this", "operation", "has", "completed", "before", "attempting", "another", "API", "call", ".", "Therefore", "the", ":", "timeout", ":", "value", "is", "set", "very", "high", "(", "10min", ")" ]
python
train
guaix-ucm/pyemir
emirdrp/processing/wavecal/slitlet2d.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/slitlet2d.py#L316-L351
def extract_slitlet2d(self, image_2k2k): """Extract slitlet 2d image from image with original EMIR dimensions. Parameters ---------- image_2k2k : numpy array Original image (dimensions EMIR_NAXIS1 * EMIR_NAXIS2) Returns ------- slitlet2d : numpy array Image corresponding to the slitlet region defined by its bounding box. """ # protections naxis2, naxis1 = image_2k2k.shape if naxis1 != EMIR_NAXIS1: raise ValueError('Unexpected naxis1') if naxis2 != EMIR_NAXIS2: raise ValueError('Unexpected naxis2') # extract slitlet region slitlet2d = image_2k2k[(self.bb_ns1_orig - 1):self.bb_ns2_orig, (self.bb_nc1_orig - 1):self.bb_nc2_orig] # transform to float slitlet2d = slitlet2d.astype(np.float) # display slitlet2d with boundaries and middle spectrum trail if abs(self.debugplot) in [21, 22]: self.ximshow_unrectified(slitlet2d) # return slitlet image return slitlet2d
[ "def", "extract_slitlet2d", "(", "self", ",", "image_2k2k", ")", ":", "# protections", "naxis2", ",", "naxis1", "=", "image_2k2k", ".", "shape", "if", "naxis1", "!=", "EMIR_NAXIS1", ":", "raise", "ValueError", "(", "'Unexpected naxis1'", ")", "if", "naxis2", "!=", "EMIR_NAXIS2", ":", "raise", "ValueError", "(", "'Unexpected naxis2'", ")", "# extract slitlet region", "slitlet2d", "=", "image_2k2k", "[", "(", "self", ".", "bb_ns1_orig", "-", "1", ")", ":", "self", ".", "bb_ns2_orig", ",", "(", "self", ".", "bb_nc1_orig", "-", "1", ")", ":", "self", ".", "bb_nc2_orig", "]", "# transform to float", "slitlet2d", "=", "slitlet2d", ".", "astype", "(", "np", ".", "float", ")", "# display slitlet2d with boundaries and middle spectrum trail", "if", "abs", "(", "self", ".", "debugplot", ")", "in", "[", "21", ",", "22", "]", ":", "self", ".", "ximshow_unrectified", "(", "slitlet2d", ")", "# return slitlet image", "return", "slitlet2d" ]
Extract slitlet 2d image from image with original EMIR dimensions. Parameters ---------- image_2k2k : numpy array Original image (dimensions EMIR_NAXIS1 * EMIR_NAXIS2) Returns ------- slitlet2d : numpy array Image corresponding to the slitlet region defined by its bounding box.
[ "Extract", "slitlet", "2d", "image", "from", "image", "with", "original", "EMIR", "dimensions", "." ]
python
train
Erotemic/ubelt
ubelt/util_dict.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_dict.py#L415-L448
def dict_isect(*args): """ Constructs a dictionary that contains keys common between all inputs. The returned values will only belong to the first dictionary. Args: *args : a sequence of dictionaries (or sets of keys) Returns: Dict | OrderedDict : OrderedDict if the first argument is an OrderedDict, otherwise dict Notes: This function can be used as an alternative to `dict_subset` where any key not in the dictionary is ignored. See the following example: >>> dict_isect({'a': 1, 'b': 2, 'c': 3}, ['a', 'c', 'd']) {'a': 1, 'c': 3} Example: >>> dict_isect({'a': 1, 'b': 1}, {'b': 2, 'c': 2}) {'b': 1} >>> dict_isect(odict([('a', 1), ('b', 2)]), odict([('c', 3)])) OrderedDict() >>> dict_isect() {} """ if not args: return {} else: dictclass = OrderedDict if isinstance(args[0], OrderedDict) else dict common_keys = set.intersection(*map(set, args)) first_dict = args[0] return dictclass((k, first_dict[k]) for k in common_keys)
[ "def", "dict_isect", "(", "*", "args", ")", ":", "if", "not", "args", ":", "return", "{", "}", "else", ":", "dictclass", "=", "OrderedDict", "if", "isinstance", "(", "args", "[", "0", "]", ",", "OrderedDict", ")", "else", "dict", "common_keys", "=", "set", ".", "intersection", "(", "*", "map", "(", "set", ",", "args", ")", ")", "first_dict", "=", "args", "[", "0", "]", "return", "dictclass", "(", "(", "k", ",", "first_dict", "[", "k", "]", ")", "for", "k", "in", "common_keys", ")" ]
Constructs a dictionary that contains keys common between all inputs. The returned values will only belong to the first dictionary. Args: *args : a sequence of dictionaries (or sets of keys) Returns: Dict | OrderedDict : OrderedDict if the first argument is an OrderedDict, otherwise dict Notes: This function can be used as an alternative to `dict_subset` where any key not in the dictionary is ignored. See the following example: >>> dict_isect({'a': 1, 'b': 2, 'c': 3}, ['a', 'c', 'd']) {'a': 1, 'c': 3} Example: >>> dict_isect({'a': 1, 'b': 1}, {'b': 2, 'c': 2}) {'b': 1} >>> dict_isect(odict([('a', 1), ('b', 2)]), odict([('c', 3)])) OrderedDict() >>> dict_isect() {}
[ "Constructs", "a", "dictionary", "that", "contains", "keys", "common", "between", "all", "inputs", ".", "The", "returned", "values", "will", "only", "belong", "to", "the", "first", "dictionary", "." ]
python
valid
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L7932-L8019
def compare_dicts(i): """ Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters Note that if dict1 and dict2 has lists, the results will be as follows: * dict1={"key":['a','b','c']} dict2={"key":['a','b']} EQUAL * dict1={"key":['a','b']} dict2={"key":['a','b','c']} NOT EQUAL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal } """ d1=i.get('dict1',{}) d2=i.get('dict2',{}) equal='yes' bic=False ic=i.get('ignore_case','') if ic=='yes': bic=True for q2 in d2: v2=d2[q2] if type(v2)==dict: if q2 not in d1: equal='no' break v1=d1[q2] rx=compare_dicts({'dict1':v1,'dict2':v2, 'ignore_case':ic}) if rx['return']>0: return rx equal=rx['equal'] if equal=='no': break elif type(v2)==list: # For now can check only values in list if q2 not in d1: equal='no' break v1=d1[q2] if type(v1)!=list: equal='no' break for m in v2: if m not in v1: equal='no' break if equal=='no': break else: if q2 not in d1: equal='no' break if equal=='no': break v1=d1[q2] if bic and type(v1)!=int and type(v1)!=float and type(v1)!=bool: v1=v1.lower() v2=v2.lower() if v2!=v1: equal='no' break return {'return':0, 'equal':equal}
[ "def", "compare_dicts", "(", "i", ")", ":", "d1", "=", "i", ".", "get", "(", "'dict1'", ",", "{", "}", ")", "d2", "=", "i", ".", "get", "(", "'dict2'", ",", "{", "}", ")", "equal", "=", "'yes'", "bic", "=", "False", "ic", "=", "i", ".", "get", "(", "'ignore_case'", ",", "''", ")", "if", "ic", "==", "'yes'", ":", "bic", "=", "True", "for", "q2", "in", "d2", ":", "v2", "=", "d2", "[", "q2", "]", "if", "type", "(", "v2", ")", "==", "dict", ":", "if", "q2", "not", "in", "d1", ":", "equal", "=", "'no'", "break", "v1", "=", "d1", "[", "q2", "]", "rx", "=", "compare_dicts", "(", "{", "'dict1'", ":", "v1", ",", "'dict2'", ":", "v2", ",", "'ignore_case'", ":", "ic", "}", ")", "if", "rx", "[", "'return'", "]", ">", "0", ":", "return", "rx", "equal", "=", "rx", "[", "'equal'", "]", "if", "equal", "==", "'no'", ":", "break", "elif", "type", "(", "v2", ")", "==", "list", ":", "# For now can check only values in list", "if", "q2", "not", "in", "d1", ":", "equal", "=", "'no'", "break", "v1", "=", "d1", "[", "q2", "]", "if", "type", "(", "v1", ")", "!=", "list", ":", "equal", "=", "'no'", "break", "for", "m", "in", "v2", ":", "if", "m", "not", "in", "v1", ":", "equal", "=", "'no'", "break", "if", "equal", "==", "'no'", ":", "break", "else", ":", "if", "q2", "not", "in", "d1", ":", "equal", "=", "'no'", "break", "if", "equal", "==", "'no'", ":", "break", "v1", "=", "d1", "[", "q2", "]", "if", "bic", "and", "type", "(", "v1", ")", "!=", "int", "and", "type", "(", "v1", ")", "!=", "float", "and", "type", "(", "v1", ")", "!=", "bool", ":", "v1", "=", "v1", ".", "lower", "(", ")", "v2", "=", "v2", ".", "lower", "(", ")", "if", "v2", "!=", "v1", ":", "equal", "=", "'no'", "break", "return", "{", "'return'", ":", "0", ",", "'equal'", ":", "equal", "}" ]
Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters Note that if dict1 and dict2 has lists, the results will be as follows: * dict1={"key":['a','b','c']} dict2={"key":['a','b']} EQUAL * dict1={"key":['a','b']} dict2={"key":['a','b','c']} NOT EQUAL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal }
[ "Input", ":", "{", "dict1", "-", "dictionary", "1", "dict2", "-", "dictionary", "2", "(", "ignore_case", ")", "-", "ignore", "case", "of", "letters" ]
python
train
google/prettytensor
prettytensor/replay_queue.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/replay_queue.py#L26-L39
def _make_tuple(x): """TF has an obnoxious habit of being lenient with single vs tuple.""" if isinstance(x, prettytensor.PrettyTensor): if x.is_sequence(): return tuple(x.sequence) else: return (x.tensor,) elif isinstance(x, tuple): return x elif (isinstance(x, collections.Sequence) and not isinstance(x, six.string_types)): return tuple(x) else: return (x,)
[ "def", "_make_tuple", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "prettytensor", ".", "PrettyTensor", ")", ":", "if", "x", ".", "is_sequence", "(", ")", ":", "return", "tuple", "(", "x", ".", "sequence", ")", "else", ":", "return", "(", "x", ".", "tensor", ",", ")", "elif", "isinstance", "(", "x", ",", "tuple", ")", ":", "return", "x", "elif", "(", "isinstance", "(", "x", ",", "collections", ".", "Sequence", ")", "and", "not", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", ")", ":", "return", "tuple", "(", "x", ")", "else", ":", "return", "(", "x", ",", ")" ]
TF has an obnoxious habit of being lenient with single vs tuple.
[ "TF", "has", "an", "obnoxious", "habit", "of", "being", "lenient", "with", "single", "vs", "tuple", "." ]
python
train
saltstack/salt
salt/daemons/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/__init__.py#L200-L240
def parse_hostname(hostname, default_port): ''' Parse hostname string and return a tuple of (host, port) If port missing in hostname string then use default_port If anything is not a valid then return None hostname should contain a host and an option space delimited port host port As an attempt to prevent foolish mistakes the parser also tries to identify the port when it is colon delimited not space delimited. As in host:port. This is problematic since IPV6 addresses may have colons in them. Consequently the use of colon delimited ports is strongly discouraged. An ipv6 address must have at least 2 colons. ''' try: host, sep, port = hostname.strip().rpartition(' ') if not port: # invalid nothing there return None if not host: # no space separated port, only host as port use default port host = port port = default_port # ipv6 must have two or more colons if host.count(':') == 1: # only one so may be using colon delimited port host, sep, port = host.rpartition(':') if not host: # colon but not host so invalid return None if not port: # colon but no port so use default port = default_port host = host.strip() try: port = int(port) except ValueError: return None except AttributeError: return None return (host, port)
[ "def", "parse_hostname", "(", "hostname", ",", "default_port", ")", ":", "try", ":", "host", ",", "sep", ",", "port", "=", "hostname", ".", "strip", "(", ")", ".", "rpartition", "(", "' '", ")", "if", "not", "port", ":", "# invalid nothing there", "return", "None", "if", "not", "host", ":", "# no space separated port, only host as port use default port", "host", "=", "port", "port", "=", "default_port", "# ipv6 must have two or more colons", "if", "host", ".", "count", "(", "':'", ")", "==", "1", ":", "# only one so may be using colon delimited port", "host", ",", "sep", ",", "port", "=", "host", ".", "rpartition", "(", "':'", ")", "if", "not", "host", ":", "# colon but not host so invalid", "return", "None", "if", "not", "port", ":", "# colon but no port so use default", "port", "=", "default_port", "host", "=", "host", ".", "strip", "(", ")", "try", ":", "port", "=", "int", "(", "port", ")", "except", "ValueError", ":", "return", "None", "except", "AttributeError", ":", "return", "None", "return", "(", "host", ",", "port", ")" ]
Parse hostname string and return a tuple of (host, port) If port missing in hostname string then use default_port If anything is not a valid then return None hostname should contain a host and an option space delimited port host port As an attempt to prevent foolish mistakes the parser also tries to identify the port when it is colon delimited not space delimited. As in host:port. This is problematic since IPV6 addresses may have colons in them. Consequently the use of colon delimited ports is strongly discouraged. An ipv6 address must have at least 2 colons.
[ "Parse", "hostname", "string", "and", "return", "a", "tuple", "of", "(", "host", "port", ")", "If", "port", "missing", "in", "hostname", "string", "then", "use", "default_port", "If", "anything", "is", "not", "a", "valid", "then", "return", "None" ]
python
train
GoogleCloudPlatform/google-cloud-datastore
python/googledatastore/helper.py
https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L422-L432
def micros_to_timestamp(micros, timestamp): """Convert microseconds from utc epoch to google.protobuf.timestamp. Args: micros: a long, number of microseconds since utc epoch. timestamp: a google.protobuf.timestamp.Timestamp to populate. """ seconds = long(micros / _MICROS_PER_SECOND) micro_remainder = micros % _MICROS_PER_SECOND timestamp.seconds = seconds timestamp.nanos = micro_remainder * _NANOS_PER_MICRO
[ "def", "micros_to_timestamp", "(", "micros", ",", "timestamp", ")", ":", "seconds", "=", "long", "(", "micros", "/", "_MICROS_PER_SECOND", ")", "micro_remainder", "=", "micros", "%", "_MICROS_PER_SECOND", "timestamp", ".", "seconds", "=", "seconds", "timestamp", ".", "nanos", "=", "micro_remainder", "*", "_NANOS_PER_MICRO" ]
Convert microseconds from utc epoch to google.protobuf.timestamp. Args: micros: a long, number of microseconds since utc epoch. timestamp: a google.protobuf.timestamp.Timestamp to populate.
[ "Convert", "microseconds", "from", "utc", "epoch", "to", "google", ".", "protobuf", ".", "timestamp", "." ]
python
train
hyperledger/indy-crypto
wrappers/python/indy_crypto/bls.py
https://github.com/hyperledger/indy-crypto/blob/1675e29a2a5949b44899553d3d128335cf7a61b3/wrappers/python/indy_crypto/bls.py#L229-L250
def sign(message: bytes, sign_key: SignKey) -> Signature: """ Signs the message and returns signature. :param: message - Message to sign :param: sign_key - Sign key :return: Signature """ logger = logging.getLogger(__name__) logger.debug("Bls::sign: >>> message: %r, sign_key: %r", message, sign_key) c_instance = c_void_p() do_call('indy_crypto_bls_sign', message, len(message), sign_key.c_instance, byref(c_instance)) res = Signature(c_instance) logger.debug("Bls::sign: <<< res: %r", res) return res
[ "def", "sign", "(", "message", ":", "bytes", ",", "sign_key", ":", "SignKey", ")", "->", "Signature", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"Bls::sign: >>> message: %r, sign_key: %r\"", ",", "message", ",", "sign_key", ")", "c_instance", "=", "c_void_p", "(", ")", "do_call", "(", "'indy_crypto_bls_sign'", ",", "message", ",", "len", "(", "message", ")", ",", "sign_key", ".", "c_instance", ",", "byref", "(", "c_instance", ")", ")", "res", "=", "Signature", "(", "c_instance", ")", "logger", ".", "debug", "(", "\"Bls::sign: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Signs the message and returns signature. :param: message - Message to sign :param: sign_key - Sign key :return: Signature
[ "Signs", "the", "message", "and", "returns", "signature", "." ]
python
train
googleapis/google-auth-library-python
google/auth/compute_engine/_metadata.py
https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/auth/compute_engine/_metadata.py#L92-L140
def get(request, path, root=_METADATA_ROOT, recursive=False): """Fetch a resource from the metadata server. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. path (str): The resource to retrieve. For example, ``'instance/service-accounts/default'``. root (str): The full path to the metadata server root. recursive (bool): Whether to do a recursive query of metadata. See https://cloud.google.com/compute/docs/metadata#aggcontents for more details. Returns: Union[Mapping, str]: If the metadata server returns JSON, a mapping of the decoded JSON is return. Otherwise, the response content is returned as a string. Raises: google.auth.exceptions.TransportError: if an error occurred while retrieving metadata. """ base_url = urlparse.urljoin(root, path) query_params = {} if recursive: query_params['recursive'] = 'true' url = _helpers.update_query(base_url, query_params) response = request(url=url, method='GET', headers=_METADATA_HEADERS) if response.status == http_client.OK: content = _helpers.from_bytes(response.data) if response.headers['content-type'] == 'application/json': try: return json.loads(content) except ValueError as caught_exc: new_exc = exceptions.TransportError( 'Received invalid JSON from the Google Compute Engine' 'metadata service: {:.20}'.format(content)) six.raise_from(new_exc, caught_exc) else: return content else: raise exceptions.TransportError( 'Failed to retrieve {} from the Google Compute Engine' 'metadata service. Status: {} Response:\n{}'.format( url, response.status, response.data), response)
[ "def", "get", "(", "request", ",", "path", ",", "root", "=", "_METADATA_ROOT", ",", "recursive", "=", "False", ")", ":", "base_url", "=", "urlparse", ".", "urljoin", "(", "root", ",", "path", ")", "query_params", "=", "{", "}", "if", "recursive", ":", "query_params", "[", "'recursive'", "]", "=", "'true'", "url", "=", "_helpers", ".", "update_query", "(", "base_url", ",", "query_params", ")", "response", "=", "request", "(", "url", "=", "url", ",", "method", "=", "'GET'", ",", "headers", "=", "_METADATA_HEADERS", ")", "if", "response", ".", "status", "==", "http_client", ".", "OK", ":", "content", "=", "_helpers", ".", "from_bytes", "(", "response", ".", "data", ")", "if", "response", ".", "headers", "[", "'content-type'", "]", "==", "'application/json'", ":", "try", ":", "return", "json", ".", "loads", "(", "content", ")", "except", "ValueError", "as", "caught_exc", ":", "new_exc", "=", "exceptions", ".", "TransportError", "(", "'Received invalid JSON from the Google Compute Engine'", "'metadata service: {:.20}'", ".", "format", "(", "content", ")", ")", "six", ".", "raise_from", "(", "new_exc", ",", "caught_exc", ")", "else", ":", "return", "content", "else", ":", "raise", "exceptions", ".", "TransportError", "(", "'Failed to retrieve {} from the Google Compute Engine'", "'metadata service. Status: {} Response:\\n{}'", ".", "format", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", ",", "response", ")" ]
Fetch a resource from the metadata server. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. path (str): The resource to retrieve. For example, ``'instance/service-accounts/default'``. root (str): The full path to the metadata server root. recursive (bool): Whether to do a recursive query of metadata. See https://cloud.google.com/compute/docs/metadata#aggcontents for more details. Returns: Union[Mapping, str]: If the metadata server returns JSON, a mapping of the decoded JSON is return. Otherwise, the response content is returned as a string. Raises: google.auth.exceptions.TransportError: if an error occurred while retrieving metadata.
[ "Fetch", "a", "resource", "from", "the", "metadata", "server", "." ]
python
train
saltstack/salt
salt/utils/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/virt.py#L32-L64
def accept(self, pub): ''' Accept the provided key ''' try: with salt.utils.files.fopen(self.path, 'r') as fp_: expiry = int(fp_.read()) except (OSError, IOError): log.error( 'Request to sign key for minion \'%s\' on hyper \'%s\' ' 'denied: no authorization', self.id, self.hyper ) return False except ValueError: log.error('Invalid expiry data in %s', self.path) return False # Limit acceptance window to 10 minutes # TODO: Move this value to the master config file if (time.time() - expiry) > 600: log.warning( 'Request to sign key for minion "%s" on hyper "%s" denied: ' 'authorization expired', self.id, self.hyper ) return False pubfn = os.path.join(self.opts['pki_dir'], 'minions', self.id) with salt.utils.files.fopen(pubfn, 'w+') as fp_: fp_.write(pub) self.void() return True
[ "def", "accept", "(", "self", ",", "pub", ")", ":", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "self", ".", "path", ",", "'r'", ")", "as", "fp_", ":", "expiry", "=", "int", "(", "fp_", ".", "read", "(", ")", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "log", ".", "error", "(", "'Request to sign key for minion \\'%s\\' on hyper \\'%s\\' '", "'denied: no authorization'", ",", "self", ".", "id", ",", "self", ".", "hyper", ")", "return", "False", "except", "ValueError", ":", "log", ".", "error", "(", "'Invalid expiry data in %s'", ",", "self", ".", "path", ")", "return", "False", "# Limit acceptance window to 10 minutes", "# TODO: Move this value to the master config file", "if", "(", "time", ".", "time", "(", ")", "-", "expiry", ")", ">", "600", ":", "log", ".", "warning", "(", "'Request to sign key for minion \"%s\" on hyper \"%s\" denied: '", "'authorization expired'", ",", "self", ".", "id", ",", "self", ".", "hyper", ")", "return", "False", "pubfn", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "'minions'", ",", "self", ".", "id", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "pubfn", ",", "'w+'", ")", "as", "fp_", ":", "fp_", ".", "write", "(", "pub", ")", "self", ".", "void", "(", ")", "return", "True" ]
Accept the provided key
[ "Accept", "the", "provided", "key" ]
python
train
albertz/py_better_exchook
better_exchook.py
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L736-L743
def hide_button_span(self, mode, file=sys.stdout): """ :param int mode: 1 or 2 :param io.TextIOBase|io.StringIO file: """ file.write("\033[83;%iu" % mode) yield file.write("\033[83;0u")
[ "def", "hide_button_span", "(", "self", ",", "mode", ",", "file", "=", "sys", ".", "stdout", ")", ":", "file", ".", "write", "(", "\"\\033[83;%iu\"", "%", "mode", ")", "yield", "file", ".", "write", "(", "\"\\033[83;0u\"", ")" ]
:param int mode: 1 or 2 :param io.TextIOBase|io.StringIO file:
[ ":", "param", "int", "mode", ":", "1", "or", "2", ":", "param", "io", ".", "TextIOBase|io", ".", "StringIO", "file", ":" ]
python
train
jobovy/galpy
galpy/df/evolveddiskdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/evolveddiskdf.py#L339-L482
def vmomentsurfacemass(self,R,n,m,t=0.,nsigma=None,deg=False, epsrel=1.e-02,epsabs=1.e-05,phi=0., grid=None,gridpoints=101,returnGrid=False, hierarchgrid=False,nlevels=2, print_progress=False, integrate_method='dopr54_c', deriv=None): """ NAME: vmomentsurfacemass PURPOSE: calculate the an arbitrary moment of the velocity distribution at (R,phi) times the surfacmass INPUT: R - radius at which to calculate the moment (in natural units) phi= azimuth (rad unless deg=True) n - vR^n m - vT^m t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous, but not too generous) deg= azimuth is in degree (default=False) epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid; if this was created for a list of times, moments are calculated for each time gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid print_progress= if True, print progress updates integrate_method= orbit.integrate method argument deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **onnly with grid options** OUTPUT: <vR^n vT^m x surface-mass> at R,phi (no support for units) COMMENT: grid-based calculation is the only one that is heavily tested (although the test suite also tests the direct calculation) HISTORY: 2011-03-30 - Written - Bovy (NYU) """ #if we have already precalculated a grid, use that if not grid is None and isinstance(grid,evolveddiskdfGrid): if returnGrid: return (self._vmomentsurfacemassGrid(n,m,grid),grid) else: return self._vmomentsurfacemassGrid(n,m,grid) elif not grid is None \ and isinstance(grid,evolveddiskdfHierarchicalGrid): if returnGrid: return (self._vmomentsurfacemassHierarchicalGrid(n,m,grid), grid) else: return self._vmomentsurfacemassHierarchicalGrid(n,m,grid) #Otherwise we need to do some more work if deg: az= phi*_DEGTORAD else: az= phi if nsigma is None: nsigma= _NSIGMA if _PROFILE: #pragma: no cover start= time_module.time() if hasattr(self._initdf,'_estimatemeanvR') \ and hasattr(self._initdf,'_estimatemeanvT') \ and hasattr(self._initdf,'_estimateSigmaR2') \ and hasattr(self._initdf,'_estimateSigmaT2'): sigmaR1= nu.sqrt(self._initdf._estimateSigmaR2(R,phi=az)) sigmaT1= nu.sqrt(self._initdf._estimateSigmaT2(R,phi=az)) meanvR= self._initdf._estimatemeanvR(R,phi=az) meanvT= self._initdf._estimatemeanvT(R,phi=az) else: warnings.warn("No '_estimateSigmaR2' etc. functions found for initdf in evolveddf; thus using potentially slow sigmaR2 etc functions", galpyWarning) sigmaR1= nu.sqrt(self._initdf.sigmaR2(R,phi=az,use_physical=False)) sigmaT1= nu.sqrt(self._initdf.sigmaT2(R,phi=az,use_physical=False)) meanvR= self._initdf.meanvR(R,phi=az,use_physical=False) meanvT= self._initdf.meanvT(R,phi=az,use_physical=False) if _PROFILE: #pragma: no cover setup_time= (time_module.time()-start) if not grid is None and isinstance(grid,bool) and grid: if not hierarchgrid: if _PROFILE: #pragma: no cover start= time_module.time() grido= self._buildvgrid(R,az,nsigma,t, sigmaR1,sigmaT1,meanvR,meanvT, gridpoints,print_progress, integrate_method,deriv) if _PROFILE: #pragma: no cover grid_time= (time_module.time()-start) print(setup_time/(setup_time+grid_time), \ grid_time/(setup_time+grid_time), \ setup_time+grid_time) if returnGrid: return (self._vmomentsurfacemassGrid(n,m,grido),grido) else: return self._vmomentsurfacemassGrid(n,m,grido) else: #hierarchical grid grido= evolveddiskdfHierarchicalGrid(self,R,az,nsigma,t, sigmaR1,sigmaT1,meanvR, meanvT, gridpoints,nlevels,deriv, print_progress=print_progress) if returnGrid: return (self._vmomentsurfacemassHierarchicalGrid(n,m, grido), grido) else: return self._vmomentsurfacemassHierarchicalGrid(n,m,grido) #Calculate the initdf moment and then calculate the ratio initvmoment= self._initdf.vmomentsurfacemass(R,n,m,nsigma=nsigma, phi=phi) if initvmoment == 0.: initvmoment= 1. norm= sigmaR1**(n+1)*sigmaT1**(m+1)*initvmoment if isinstance(t,(list,nu.ndarray)): raise IOError("list of times is only supported with grid-based calculation") return dblquad(_vmomentsurfaceIntegrand, meanvT/sigmaT1-nsigma, meanvT/sigmaT1+nsigma, lambda x: meanvR/sigmaR1 -nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.), lambda x: meanvR/sigmaR1 +nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.), (R,az,self,n,m,sigmaR1,sigmaT1,t,initvmoment), epsrel=epsrel,epsabs=epsabs)[0]*norm
[ "def", "vmomentsurfacemass", "(", "self", ",", "R", ",", "n", ",", "m", ",", "t", "=", "0.", ",", "nsigma", "=", "None", ",", "deg", "=", "False", ",", "epsrel", "=", "1.e-02", ",", "epsabs", "=", "1.e-05", ",", "phi", "=", "0.", ",", "grid", "=", "None", ",", "gridpoints", "=", "101", ",", "returnGrid", "=", "False", ",", "hierarchgrid", "=", "False", ",", "nlevels", "=", "2", ",", "print_progress", "=", "False", ",", "integrate_method", "=", "'dopr54_c'", ",", "deriv", "=", "None", ")", ":", "#if we have already precalculated a grid, use that", "if", "not", "grid", "is", "None", "and", "isinstance", "(", "grid", ",", "evolveddiskdfGrid", ")", ":", "if", "returnGrid", ":", "return", "(", "self", ".", "_vmomentsurfacemassGrid", "(", "n", ",", "m", ",", "grid", ")", ",", "grid", ")", "else", ":", "return", "self", ".", "_vmomentsurfacemassGrid", "(", "n", ",", "m", ",", "grid", ")", "elif", "not", "grid", "is", "None", "and", "isinstance", "(", "grid", ",", "evolveddiskdfHierarchicalGrid", ")", ":", "if", "returnGrid", ":", "return", "(", "self", ".", "_vmomentsurfacemassHierarchicalGrid", "(", "n", ",", "m", ",", "grid", ")", ",", "grid", ")", "else", ":", "return", "self", ".", "_vmomentsurfacemassHierarchicalGrid", "(", "n", ",", "m", ",", "grid", ")", "#Otherwise we need to do some more work", "if", "deg", ":", "az", "=", "phi", "*", "_DEGTORAD", "else", ":", "az", "=", "phi", "if", "nsigma", "is", "None", ":", "nsigma", "=", "_NSIGMA", "if", "_PROFILE", ":", "#pragma: no cover", "start", "=", "time_module", ".", "time", "(", ")", "if", "hasattr", "(", "self", ".", "_initdf", ",", "'_estimatemeanvR'", ")", "and", "hasattr", "(", "self", ".", "_initdf", ",", "'_estimatemeanvT'", ")", "and", "hasattr", "(", "self", ".", "_initdf", ",", "'_estimateSigmaR2'", ")", "and", "hasattr", "(", "self", ".", "_initdf", ",", "'_estimateSigmaT2'", ")", ":", "sigmaR1", "=", "nu", ".", "sqrt", "(", "self", ".", "_initdf", ".", "_estimateSigmaR2", "(", "R", ",", "phi", "=", "az", ")", ")", "sigmaT1", "=", "nu", ".", "sqrt", "(", "self", ".", "_initdf", ".", "_estimateSigmaT2", "(", "R", ",", "phi", "=", "az", ")", ")", "meanvR", "=", "self", ".", "_initdf", ".", "_estimatemeanvR", "(", "R", ",", "phi", "=", "az", ")", "meanvT", "=", "self", ".", "_initdf", ".", "_estimatemeanvT", "(", "R", ",", "phi", "=", "az", ")", "else", ":", "warnings", ".", "warn", "(", "\"No '_estimateSigmaR2' etc. functions found for initdf in evolveddf; thus using potentially slow sigmaR2 etc functions\"", ",", "galpyWarning", ")", "sigmaR1", "=", "nu", ".", "sqrt", "(", "self", ".", "_initdf", ".", "sigmaR2", "(", "R", ",", "phi", "=", "az", ",", "use_physical", "=", "False", ")", ")", "sigmaT1", "=", "nu", ".", "sqrt", "(", "self", ".", "_initdf", ".", "sigmaT2", "(", "R", ",", "phi", "=", "az", ",", "use_physical", "=", "False", ")", ")", "meanvR", "=", "self", ".", "_initdf", ".", "meanvR", "(", "R", ",", "phi", "=", "az", ",", "use_physical", "=", "False", ")", "meanvT", "=", "self", ".", "_initdf", ".", "meanvT", "(", "R", ",", "phi", "=", "az", ",", "use_physical", "=", "False", ")", "if", "_PROFILE", ":", "#pragma: no cover", "setup_time", "=", "(", "time_module", ".", "time", "(", ")", "-", "start", ")", "if", "not", "grid", "is", "None", "and", "isinstance", "(", "grid", ",", "bool", ")", "and", "grid", ":", "if", "not", "hierarchgrid", ":", "if", "_PROFILE", ":", "#pragma: no cover", "start", "=", "time_module", ".", "time", "(", ")", "grido", "=", "self", ".", "_buildvgrid", "(", "R", ",", "az", ",", "nsigma", ",", "t", ",", "sigmaR1", ",", "sigmaT1", ",", "meanvR", ",", "meanvT", ",", "gridpoints", ",", "print_progress", ",", "integrate_method", ",", "deriv", ")", "if", "_PROFILE", ":", "#pragma: no cover", "grid_time", "=", "(", "time_module", ".", "time", "(", ")", "-", "start", ")", "print", "(", "setup_time", "/", "(", "setup_time", "+", "grid_time", ")", ",", "grid_time", "/", "(", "setup_time", "+", "grid_time", ")", ",", "setup_time", "+", "grid_time", ")", "if", "returnGrid", ":", "return", "(", "self", ".", "_vmomentsurfacemassGrid", "(", "n", ",", "m", ",", "grido", ")", ",", "grido", ")", "else", ":", "return", "self", ".", "_vmomentsurfacemassGrid", "(", "n", ",", "m", ",", "grido", ")", "else", ":", "#hierarchical grid", "grido", "=", "evolveddiskdfHierarchicalGrid", "(", "self", ",", "R", ",", "az", ",", "nsigma", ",", "t", ",", "sigmaR1", ",", "sigmaT1", ",", "meanvR", ",", "meanvT", ",", "gridpoints", ",", "nlevels", ",", "deriv", ",", "print_progress", "=", "print_progress", ")", "if", "returnGrid", ":", "return", "(", "self", ".", "_vmomentsurfacemassHierarchicalGrid", "(", "n", ",", "m", ",", "grido", ")", ",", "grido", ")", "else", ":", "return", "self", ".", "_vmomentsurfacemassHierarchicalGrid", "(", "n", ",", "m", ",", "grido", ")", "#Calculate the initdf moment and then calculate the ratio", "initvmoment", "=", "self", ".", "_initdf", ".", "vmomentsurfacemass", "(", "R", ",", "n", ",", "m", ",", "nsigma", "=", "nsigma", ",", "phi", "=", "phi", ")", "if", "initvmoment", "==", "0.", ":", "initvmoment", "=", "1.", "norm", "=", "sigmaR1", "**", "(", "n", "+", "1", ")", "*", "sigmaT1", "**", "(", "m", "+", "1", ")", "*", "initvmoment", "if", "isinstance", "(", "t", ",", "(", "list", ",", "nu", ".", "ndarray", ")", ")", ":", "raise", "IOError", "(", "\"list of times is only supported with grid-based calculation\"", ")", "return", "dblquad", "(", "_vmomentsurfaceIntegrand", ",", "meanvT", "/", "sigmaT1", "-", "nsigma", ",", "meanvT", "/", "sigmaT1", "+", "nsigma", ",", "lambda", "x", ":", "meanvR", "/", "sigmaR1", "-", "nu", ".", "sqrt", "(", "nsigma", "**", "2.", "-", "(", "x", "-", "meanvT", "/", "sigmaT1", ")", "**", "2.", ")", ",", "lambda", "x", ":", "meanvR", "/", "sigmaR1", "+", "nu", ".", "sqrt", "(", "nsigma", "**", "2.", "-", "(", "x", "-", "meanvT", "/", "sigmaT1", ")", "**", "2.", ")", ",", "(", "R", ",", "az", ",", "self", ",", "n", ",", "m", ",", "sigmaR1", ",", "sigmaT1", ",", "t", ",", "initvmoment", ")", ",", "epsrel", "=", "epsrel", ",", "epsabs", "=", "epsabs", ")", "[", "0", "]", "*", "norm" ]
NAME: vmomentsurfacemass PURPOSE: calculate the an arbitrary moment of the velocity distribution at (R,phi) times the surfacmass INPUT: R - radius at which to calculate the moment (in natural units) phi= azimuth (rad unless deg=True) n - vR^n m - vT^m t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous, but not too generous) deg= azimuth is in degree (default=False) epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid; if this was created for a list of times, moments are calculated for each time gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid print_progress= if True, print progress updates integrate_method= orbit.integrate method argument deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **onnly with grid options** OUTPUT: <vR^n vT^m x surface-mass> at R,phi (no support for units) COMMENT: grid-based calculation is the only one that is heavily tested (although the test suite also tests the direct calculation) HISTORY: 2011-03-30 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
saltstack/salt
salt/modules/pagerduty_util.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pagerduty_util.py#L371-L407
def resource_absent(resource, identifier_fields, profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Generic resource.absent state method. Pagerduty state modules should be a thin wrapper over this method, with a custom diff function. This method calls delete_resource() and formats the result as a salt state return value. example: resource_absent("users", ["id","name","email"]) ''' ret = {'name': kwargs['name'], 'changes': {}, 'result': None, 'comment': ''} for k, v in kwargs.items(): if k not in identifier_fields: continue result = delete_resource(resource, v, identifier_fields, profile=profile, subdomain=subdomain, api_key=api_key) if result is None: ret['result'] = True ret['comment'] = '{0} deleted'.format(v) return ret elif result is True: continue elif __opts__['test']: ret['comment'] = result return ret elif 'error' in result: ret['result'] = False ret['comment'] = result return ret return ret
[ "def", "resource_absent", "(", "resource", ",", "identifier_fields", ",", "profile", "=", "'pagerduty'", ",", "subdomain", "=", "None", ",", "api_key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "kwargs", "[", "'name'", "]", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "not", "in", "identifier_fields", ":", "continue", "result", "=", "delete_resource", "(", "resource", ",", "v", ",", "identifier_fields", ",", "profile", "=", "profile", ",", "subdomain", "=", "subdomain", ",", "api_key", "=", "api_key", ")", "if", "result", "is", "None", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'{0} deleted'", ".", "format", "(", "v", ")", "return", "ret", "elif", "result", "is", "True", ":", "continue", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "result", "return", "ret", "elif", "'error'", "in", "result", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "result", "return", "ret", "return", "ret" ]
Generic resource.absent state method. Pagerduty state modules should be a thin wrapper over this method, with a custom diff function. This method calls delete_resource() and formats the result as a salt state return value. example: resource_absent("users", ["id","name","email"])
[ "Generic", "resource", ".", "absent", "state", "method", ".", "Pagerduty", "state", "modules", "should", "be", "a", "thin", "wrapper", "over", "this", "method", "with", "a", "custom", "diff", "function", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavwp.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavwp.py#L599-L604
def polygon(self): '''return a polygon for the fence''' points = [] for fp in self.points[1:]: points.append((fp.lat, fp.lng)) return points
[ "def", "polygon", "(", "self", ")", ":", "points", "=", "[", "]", "for", "fp", "in", "self", ".", "points", "[", "1", ":", "]", ":", "points", ".", "append", "(", "(", "fp", ".", "lat", ",", "fp", ".", "lng", ")", ")", "return", "points" ]
return a polygon for the fence
[ "return", "a", "polygon", "for", "the", "fence" ]
python
train
DarkEnergySurvey/ugali
ugali/scratch/simulation/survey_selection_function.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/simulation/survey_selection_function.py#L425-L449
def predict(self, lon, lat, **kwargs): """ distance, abs_mag, r_physical """ assert self.classifier is not None, 'ERROR' pred = np.zeros(len(lon)) cut_geometry, flags_geometry = self.applyGeometry(lon, lat) x_test = [] for key, operation in self.config['operation']['params_intrinsic']: assert operation.lower() in ['linear', 'log'], 'ERROR' if operation.lower() == 'linear': x_test.append(kwargs[key]) else: x_test.append(np.log10(kwargs[key])) x_test = np.vstack(x_test).T #import pdb; pdb.set_trace() pred[cut_geometry] = self.classifier.predict_proba(x_test[cut_geometry])[:,1] self.validatePredict(pred, flags_geometry, lon, lat, kwargs['r_physical'], kwargs['abs_mag'], kwargs['distance']) return pred, flags_geometry
[ "def", "predict", "(", "self", ",", "lon", ",", "lat", ",", "*", "*", "kwargs", ")", ":", "assert", "self", ".", "classifier", "is", "not", "None", ",", "'ERROR'", "pred", "=", "np", ".", "zeros", "(", "len", "(", "lon", ")", ")", "cut_geometry", ",", "flags_geometry", "=", "self", ".", "applyGeometry", "(", "lon", ",", "lat", ")", "x_test", "=", "[", "]", "for", "key", ",", "operation", "in", "self", ".", "config", "[", "'operation'", "]", "[", "'params_intrinsic'", "]", ":", "assert", "operation", ".", "lower", "(", ")", "in", "[", "'linear'", ",", "'log'", "]", ",", "'ERROR'", "if", "operation", ".", "lower", "(", ")", "==", "'linear'", ":", "x_test", ".", "append", "(", "kwargs", "[", "key", "]", ")", "else", ":", "x_test", ".", "append", "(", "np", ".", "log10", "(", "kwargs", "[", "key", "]", ")", ")", "x_test", "=", "np", ".", "vstack", "(", "x_test", ")", ".", "T", "#import pdb; pdb.set_trace()", "pred", "[", "cut_geometry", "]", "=", "self", ".", "classifier", ".", "predict_proba", "(", "x_test", "[", "cut_geometry", "]", ")", "[", ":", ",", "1", "]", "self", ".", "validatePredict", "(", "pred", ",", "flags_geometry", ",", "lon", ",", "lat", ",", "kwargs", "[", "'r_physical'", "]", ",", "kwargs", "[", "'abs_mag'", "]", ",", "kwargs", "[", "'distance'", "]", ")", "return", "pred", ",", "flags_geometry" ]
distance, abs_mag, r_physical
[ "distance", "abs_mag", "r_physical" ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/search_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/search_api.py#L4431-L4451
def search_user_entities(self, **kwargs): # noqa: E501 """Search over a customer's users # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_user_entities(async_req=True) >>> result = thread.get() :param async_req bool :param SortableSearchRequest body: :return: ResponseContainerPagedCustomerFacingUserObject If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_user_entities_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_user_entities_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "search_user_entities", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "search_user_entities_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "search_user_entities_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Search over a customer's users # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_user_entities(async_req=True) >>> result = thread.get() :param async_req bool :param SortableSearchRequest body: :return: ResponseContainerPagedCustomerFacingUserObject If the method is called asynchronously, returns the request thread.
[ "Search", "over", "a", "customer", "s", "users", "#", "noqa", ":", "E501" ]
python
train
raiden-network/raiden
raiden/network/rpc/client.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/rpc/client.py#L145-L151
def parity_discover_next_available_nonce( web3: Web3, address: AddressHex, ) -> Nonce: """Returns the next available nonce for `address`.""" next_nonce_encoded = web3.manager.request_blocking('parity_nextNonce', [address]) return Nonce(int(next_nonce_encoded, 16))
[ "def", "parity_discover_next_available_nonce", "(", "web3", ":", "Web3", ",", "address", ":", "AddressHex", ",", ")", "->", "Nonce", ":", "next_nonce_encoded", "=", "web3", ".", "manager", ".", "request_blocking", "(", "'parity_nextNonce'", ",", "[", "address", "]", ")", "return", "Nonce", "(", "int", "(", "next_nonce_encoded", ",", "16", ")", ")" ]
Returns the next available nonce for `address`.
[ "Returns", "the", "next", "available", "nonce", "for", "address", "." ]
python
train
mikicz/arca
arca/backend/vagrant.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/vagrant.py#L87-L106
def validate_configuration(self): """ Runs :meth:`arca.DockerBackend.validate_configuration` and checks extra: * ``box`` format * ``provider`` format * ``use_registry_name`` is set and ``registry_pull_only`` is not enabled. """ super().validate_configuration() if self.use_registry_name is None: raise ArcaMisconfigured("Use registry name setting is required for VagrantBackend") if not re.match(r"^[a-z]+/[a-zA-Z0-9\-_]+$", self.box): raise ArcaMisconfigured("Provided Vagrant box is not valid") if not re.match(r"^[a-z_]+$", self.provider): raise ArcaMisconfigured("Provided Vagrant provider is not valid") if self.registry_pull_only: raise ArcaMisconfigured("Push must be enabled for VagrantBackend")
[ "def", "validate_configuration", "(", "self", ")", ":", "super", "(", ")", ".", "validate_configuration", "(", ")", "if", "self", ".", "use_registry_name", "is", "None", ":", "raise", "ArcaMisconfigured", "(", "\"Use registry name setting is required for VagrantBackend\"", ")", "if", "not", "re", ".", "match", "(", "r\"^[a-z]+/[a-zA-Z0-9\\-_]+$\"", ",", "self", ".", "box", ")", ":", "raise", "ArcaMisconfigured", "(", "\"Provided Vagrant box is not valid\"", ")", "if", "not", "re", ".", "match", "(", "r\"^[a-z_]+$\"", ",", "self", ".", "provider", ")", ":", "raise", "ArcaMisconfigured", "(", "\"Provided Vagrant provider is not valid\"", ")", "if", "self", ".", "registry_pull_only", ":", "raise", "ArcaMisconfigured", "(", "\"Push must be enabled for VagrantBackend\"", ")" ]
Runs :meth:`arca.DockerBackend.validate_configuration` and checks extra: * ``box`` format * ``provider`` format * ``use_registry_name`` is set and ``registry_pull_only`` is not enabled.
[ "Runs", ":", "meth", ":", "arca", ".", "DockerBackend", ".", "validate_configuration", "and", "checks", "extra", ":" ]
python
train
adeel/timed
timed/client.py
https://github.com/adeel/timed/blob/9f85e004de491cd4863d31b09991a1e2591b1b66/timed/client.py#L52-L63
def start(project, logfile, time_format): "start tracking for <project>" records = read(logfile, time_format) if records and not records[-1][1][1]: print "error: there is a project already active" return write(server.start(project, records), logfile, time_format) print "starting work on %s" % colored(project, attrs=['bold']) print " at %s" % colored(server.date_to_txt(now(), time_format), 'green')
[ "def", "start", "(", "project", ",", "logfile", ",", "time_format", ")", ":", "records", "=", "read", "(", "logfile", ",", "time_format", ")", "if", "records", "and", "not", "records", "[", "-", "1", "]", "[", "1", "]", "[", "1", "]", ":", "print", "\"error: there is a project already active\"", "return", "write", "(", "server", ".", "start", "(", "project", ",", "records", ")", ",", "logfile", ",", "time_format", ")", "print", "\"starting work on %s\"", "%", "colored", "(", "project", ",", "attrs", "=", "[", "'bold'", "]", ")", "print", "\" at %s\"", "%", "colored", "(", "server", ".", "date_to_txt", "(", "now", "(", ")", ",", "time_format", ")", ",", "'green'", ")" ]
start tracking for <project>
[ "start", "tracking", "for", "<project", ">" ]
python
train
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L3440-L3469
def invert(self): """ Inverts the ScienceSegments in the class (i.e. set NOT). Returns the number of ScienceSegments after inversion. """ # check for an empty list if len(self) == 0: # return a segment representing all time self.__sci_segs = ScienceSegment(tuple([0,0,1999999999,1999999999])) # go through the list checking for validity as we go outlist = [] ostart = 0 for seg in self: start = seg.start() stop = seg.end() if start < 0 or stop < start or start < ostart: raise SegmentError, "Invalid list" if start > 0: x = ScienceSegment(tuple([0,ostart,start,start-ostart])) outlist.append(x) ostart = stop if ostart < 1999999999: x = ScienceSegment(tuple([0,ostart,1999999999,1999999999-ostart])) outlist.append(x) self.__sci_segs = outlist return len(self)
[ "def", "invert", "(", "self", ")", ":", "# check for an empty list", "if", "len", "(", "self", ")", "==", "0", ":", "# return a segment representing all time", "self", ".", "__sci_segs", "=", "ScienceSegment", "(", "tuple", "(", "[", "0", ",", "0", ",", "1999999999", ",", "1999999999", "]", ")", ")", "# go through the list checking for validity as we go", "outlist", "=", "[", "]", "ostart", "=", "0", "for", "seg", "in", "self", ":", "start", "=", "seg", ".", "start", "(", ")", "stop", "=", "seg", ".", "end", "(", ")", "if", "start", "<", "0", "or", "stop", "<", "start", "or", "start", "<", "ostart", ":", "raise", "SegmentError", ",", "\"Invalid list\"", "if", "start", ">", "0", ":", "x", "=", "ScienceSegment", "(", "tuple", "(", "[", "0", ",", "ostart", ",", "start", ",", "start", "-", "ostart", "]", ")", ")", "outlist", ".", "append", "(", "x", ")", "ostart", "=", "stop", "if", "ostart", "<", "1999999999", ":", "x", "=", "ScienceSegment", "(", "tuple", "(", "[", "0", ",", "ostart", ",", "1999999999", ",", "1999999999", "-", "ostart", "]", ")", ")", "outlist", ".", "append", "(", "x", ")", "self", ".", "__sci_segs", "=", "outlist", "return", "len", "(", "self", ")" ]
Inverts the ScienceSegments in the class (i.e. set NOT). Returns the number of ScienceSegments after inversion.
[ "Inverts", "the", "ScienceSegments", "in", "the", "class", "(", "i", ".", "e", ".", "set", "NOT", ")", ".", "Returns", "the", "number", "of", "ScienceSegments", "after", "inversion", "." ]
python
train
Feneric/doxypypy
doxypypy/doxypypy.py
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L616-L656
def visit_Assign(self, node, **kwargs): """ Handles assignments within code. Variable assignments in Python are used to represent interface attributes in addition to basic variables. If an assignment appears to be an attribute, it gets labeled as such for Doxygen. If a variable name uses Python mangling or is just a bed lump, it is labeled as private for Doxygen. """ lineNum = node.lineno - 1 # Assignments have one Doxygen-significant special case: # interface attributes. match = AstWalker.__attributeRE.match(self.lines[lineNum]) if match: self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \ '{0}# @hideinitializer{2}{4}{2}'.format( match.group(1), match.group(2), linesep, match.group(3), self.lines[lineNum].rstrip() ) if self.options.debug: stderr.write("# Attribute {0.id}{1}".format(node.targets[0], linesep)) if isinstance(node.targets[0], Name): match = AstWalker.__indentRE.match(self.lines[lineNum]) indentStr = match and match.group(1) or '' restrictionLevel = self._checkMemberName(node.targets[0].id) if restrictionLevel: self.lines[lineNum] = '{0}## @var {1}{2}{0}' \ '# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format( indentStr, node.targets[0].id, linesep, restrictionLevel, self.lines[lineNum].rstrip() ) # Visit any contained nodes. self.generic_visit(node, containingNodes=kwargs['containingNodes'])
[ "def", "visit_Assign", "(", "self", ",", "node", ",", "*", "*", "kwargs", ")", ":", "lineNum", "=", "node", ".", "lineno", "-", "1", "# Assignments have one Doxygen-significant special case:", "# interface attributes.", "match", "=", "AstWalker", ".", "__attributeRE", ".", "match", "(", "self", ".", "lines", "[", "lineNum", "]", ")", "if", "match", ":", "self", ".", "lines", "[", "lineNum", "]", "=", "'{0}## @property {1}{2}{0}# {3}{2}'", "'{0}# @hideinitializer{2}{4}{2}'", ".", "format", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ",", "linesep", ",", "match", ".", "group", "(", "3", ")", ",", "self", ".", "lines", "[", "lineNum", "]", ".", "rstrip", "(", ")", ")", "if", "self", ".", "options", ".", "debug", ":", "stderr", ".", "write", "(", "\"# Attribute {0.id}{1}\"", ".", "format", "(", "node", ".", "targets", "[", "0", "]", ",", "linesep", ")", ")", "if", "isinstance", "(", "node", ".", "targets", "[", "0", "]", ",", "Name", ")", ":", "match", "=", "AstWalker", ".", "__indentRE", ".", "match", "(", "self", ".", "lines", "[", "lineNum", "]", ")", "indentStr", "=", "match", "and", "match", ".", "group", "(", "1", ")", "or", "''", "restrictionLevel", "=", "self", ".", "_checkMemberName", "(", "node", ".", "targets", "[", "0", "]", ".", "id", ")", "if", "restrictionLevel", ":", "self", ".", "lines", "[", "lineNum", "]", "=", "'{0}## @var {1}{2}{0}'", "'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'", ".", "format", "(", "indentStr", ",", "node", ".", "targets", "[", "0", "]", ".", "id", ",", "linesep", ",", "restrictionLevel", ",", "self", ".", "lines", "[", "lineNum", "]", ".", "rstrip", "(", ")", ")", "# Visit any contained nodes.", "self", ".", "generic_visit", "(", "node", ",", "containingNodes", "=", "kwargs", "[", "'containingNodes'", "]", ")" ]
Handles assignments within code. Variable assignments in Python are used to represent interface attributes in addition to basic variables. If an assignment appears to be an attribute, it gets labeled as such for Doxygen. If a variable name uses Python mangling or is just a bed lump, it is labeled as private for Doxygen.
[ "Handles", "assignments", "within", "code", "." ]
python
train
spyder-ide/conda-manager
conda_manager/api/conda_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L603-L621
def update(self, *pkgs, **kwargs): """Update package(s) (in an environment) by name.""" cmd_list = ['update', '--json', '--yes'] if not pkgs and not kwargs.get('all'): raise TypeError("Must specify at least one package to update, or " "all=True.") cmd_list.extend( self._setup_install_commands_from_kwargs( kwargs, ('dry_run', 'no_deps', 'override_channels', 'no_pin', 'force', 'all', 'use_index_cache', 'use_local', 'alt_hint'))) cmd_list.extend(pkgs) return self._call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
[ "def", "update", "(", "self", ",", "*", "pkgs", ",", "*", "*", "kwargs", ")", ":", "cmd_list", "=", "[", "'update'", ",", "'--json'", ",", "'--yes'", "]", "if", "not", "pkgs", "and", "not", "kwargs", ".", "get", "(", "'all'", ")", ":", "raise", "TypeError", "(", "\"Must specify at least one package to update, or \"", "\"all=True.\"", ")", "cmd_list", ".", "extend", "(", "self", ".", "_setup_install_commands_from_kwargs", "(", "kwargs", ",", "(", "'dry_run'", ",", "'no_deps'", ",", "'override_channels'", ",", "'no_pin'", ",", "'force'", ",", "'all'", ",", "'use_index_cache'", ",", "'use_local'", ",", "'alt_hint'", ")", ")", ")", "cmd_list", ".", "extend", "(", "pkgs", ")", "return", "self", ".", "_call_and_parse", "(", "cmd_list", ",", "abspath", "=", "kwargs", ".", "get", "(", "'abspath'", ",", "True", ")", ")" ]
Update package(s) (in an environment) by name.
[ "Update", "package", "(", "s", ")", "(", "in", "an", "environment", ")", "by", "name", "." ]
python
train
elastic/apm-agent-python
elasticapm/processors.py
https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/processors.py#L174-L187
def sanitize_http_wsgi_env(client, event): """ Sanitizes WSGI environment variables :param client: an ElasticAPM client :param event: a transaction or error event :return: The modified event """ try: env = event["context"]["request"]["env"] event["context"]["request"]["env"] = varmap(_sanitize, env) except (KeyError, TypeError): pass return event
[ "def", "sanitize_http_wsgi_env", "(", "client", ",", "event", ")", ":", "try", ":", "env", "=", "event", "[", "\"context\"", "]", "[", "\"request\"", "]", "[", "\"env\"", "]", "event", "[", "\"context\"", "]", "[", "\"request\"", "]", "[", "\"env\"", "]", "=", "varmap", "(", "_sanitize", ",", "env", ")", "except", "(", "KeyError", ",", "TypeError", ")", ":", "pass", "return", "event" ]
Sanitizes WSGI environment variables :param client: an ElasticAPM client :param event: a transaction or error event :return: The modified event
[ "Sanitizes", "WSGI", "environment", "variables" ]
python
train
rigetti/pyquil
pyquil/pyqvm.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/pyqvm.py#L502-L526
def execute(self, program: Program): """ Execute a program on the QVM. Note that the QAM is stateful. Subsequent calls to :py:func:`execute` will not automatically reset the wavefunction or the classical RAM. If this is desired, consider starting your program with ``RESET``. :return: ``self`` to support method chaining. """ # TODO: why are DEFGATEs not just included in the list of instructions? for dg in program.defined_gates: if dg.parameters is not None: raise NotImplementedError("PyQVM does not support parameterized DEFGATEs") self.defined_gates[dg.name] = dg.matrix # initialize program counter self.program = program self.program_counter = 0 halted = len(program) == 0 while not halted: halted = self.transition() return self
[ "def", "execute", "(", "self", ",", "program", ":", "Program", ")", ":", "# TODO: why are DEFGATEs not just included in the list of instructions?", "for", "dg", "in", "program", ".", "defined_gates", ":", "if", "dg", ".", "parameters", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "\"PyQVM does not support parameterized DEFGATEs\"", ")", "self", ".", "defined_gates", "[", "dg", ".", "name", "]", "=", "dg", ".", "matrix", "# initialize program counter", "self", ".", "program", "=", "program", "self", ".", "program_counter", "=", "0", "halted", "=", "len", "(", "program", ")", "==", "0", "while", "not", "halted", ":", "halted", "=", "self", ".", "transition", "(", ")", "return", "self" ]
Execute a program on the QVM. Note that the QAM is stateful. Subsequent calls to :py:func:`execute` will not automatically reset the wavefunction or the classical RAM. If this is desired, consider starting your program with ``RESET``. :return: ``self`` to support method chaining.
[ "Execute", "a", "program", "on", "the", "QVM", "." ]
python
train
googledatalab/pydatalab
datalab/bigquery/_api.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L465-L477
def table_delete(self, table_name): """Issues a request to delete a table. Args: table_name: the name of the table as a tuple of components. Returns: A parsed result object. Raises: Exception if there is an error performing the operation. """ url = Api._ENDPOINT + (Api._TABLES_PATH % table_name) return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)
[ "def", "table_delete", "(", "self", ",", "table_name", ")", ":", "url", "=", "Api", ".", "_ENDPOINT", "+", "(", "Api", ".", "_TABLES_PATH", "%", "table_name", ")", "return", "datalab", ".", "utils", ".", "Http", ".", "request", "(", "url", ",", "method", "=", "'DELETE'", ",", "credentials", "=", "self", ".", "_credentials", ",", "raw_response", "=", "True", ")" ]
Issues a request to delete a table. Args: table_name: the name of the table as a tuple of components. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
[ "Issues", "a", "request", "to", "delete", "a", "table", "." ]
python
train
pkkid/python-plexapi
plexapi/library.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/library.py#L868-L870
def searchAlbums(self, title, **kwargs): """ Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='photoalbum', title=title, **kwargs)
[ "def", "searchAlbums", "(", "self", ",", "title", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "search", "(", "libtype", "=", "'photoalbum'", ",", "title", "=", "title", ",", "*", "*", "kwargs", ")" ]
Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage.
[ "Search", "for", "an", "album", ".", "See", ":", "func", ":", "~plexapi", ".", "library", ".", "LibrarySection", ".", "search", "()", "for", "usage", "." ]
python
train
widdowquinn/pyani
pyani/anim.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L101-L143
def construct_nucmer_cmdline( fname1, fname2, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, ): """Returns a tuple of NUCmer and delta-filter commands The split into a tuple was made necessary by changes to SGE/OGE. The delta-filter command must now be run as a dependency of the NUCmer command, and be wrapped in a Python script to capture STDOUT. NOTE: This command-line writes output data to a subdirectory of the passed outdir, called "nucmer_output". - fname1 - query FASTA filepath - fname2 - subject FASTA filepath - outdir - path to output directory - maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch option. If not, the -mum option is used instead """ outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"]) outprefix = os.path.join( outsubdir, "%s_vs_%s" % ( os.path.splitext(os.path.split(fname1)[-1])[0], os.path.splitext(os.path.split(fname2)[-1])[0], ), ) if maxmatch: mode = "--maxmatch" else: mode = "--mum" nucmercmd = "{0} {1} -p {2} {3} {4}".format( nucmer_exe, mode, outprefix, fname1, fname2 ) filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format( filter_exe, outprefix + ".delta", outprefix + ".filter" ) return (nucmercmd, filtercmd)
[ "def", "construct_nucmer_cmdline", "(", "fname1", ",", "fname2", ",", "outdir", "=", "\".\"", ",", "nucmer_exe", "=", "pyani_config", ".", "NUCMER_DEFAULT", ",", "filter_exe", "=", "pyani_config", ".", "FILTER_DEFAULT", ",", "maxmatch", "=", "False", ",", ")", ":", "outsubdir", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "pyani_config", ".", "ALIGNDIR", "[", "\"ANIm\"", "]", ")", "outprefix", "=", "os", ".", "path", ".", "join", "(", "outsubdir", ",", "\"%s_vs_%s\"", "%", "(", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "fname1", ")", "[", "-", "1", "]", ")", "[", "0", "]", ",", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "fname2", ")", "[", "-", "1", "]", ")", "[", "0", "]", ",", ")", ",", ")", "if", "maxmatch", ":", "mode", "=", "\"--maxmatch\"", "else", ":", "mode", "=", "\"--mum\"", "nucmercmd", "=", "\"{0} {1} -p {2} {3} {4}\"", ".", "format", "(", "nucmer_exe", ",", "mode", ",", "outprefix", ",", "fname1", ",", "fname2", ")", "filtercmd", "=", "\"delta_filter_wrapper.py \"", "+", "\"{0} -1 {1} {2}\"", ".", "format", "(", "filter_exe", ",", "outprefix", "+", "\".delta\"", ",", "outprefix", "+", "\".filter\"", ")", "return", "(", "nucmercmd", ",", "filtercmd", ")" ]
Returns a tuple of NUCmer and delta-filter commands The split into a tuple was made necessary by changes to SGE/OGE. The delta-filter command must now be run as a dependency of the NUCmer command, and be wrapped in a Python script to capture STDOUT. NOTE: This command-line writes output data to a subdirectory of the passed outdir, called "nucmer_output". - fname1 - query FASTA filepath - fname2 - subject FASTA filepath - outdir - path to output directory - maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch option. If not, the -mum option is used instead
[ "Returns", "a", "tuple", "of", "NUCmer", "and", "delta", "-", "filter", "commands" ]
python
train
django-danceschool/django-danceschool
danceschool/core/constants.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/constants.py#L15-L29
def getConstant(name): ''' This is a convenience function that makes it easy to access the value of a preference/constant without needing to check if the django_dynamic_preferences app has been set up and without needing to load from that model directly. ''' # We instantiate a manager for our global preferences if 'dynamic_preferences_globalpreferencemodel' in connection.introspection.table_names() and not isPreliminaryRun(): params = global_preferences_registry.manager() try: return params.get(name) except NotFoundInRegistry as e: logger.error('Error in getting constant: %s' % e) return None
[ "def", "getConstant", "(", "name", ")", ":", "# We instantiate a manager for our global preferences", "if", "'dynamic_preferences_globalpreferencemodel'", "in", "connection", ".", "introspection", ".", "table_names", "(", ")", "and", "not", "isPreliminaryRun", "(", ")", ":", "params", "=", "global_preferences_registry", ".", "manager", "(", ")", "try", ":", "return", "params", ".", "get", "(", "name", ")", "except", "NotFoundInRegistry", "as", "e", ":", "logger", ".", "error", "(", "'Error in getting constant: %s'", "%", "e", ")", "return", "None" ]
This is a convenience function that makes it easy to access the value of a preference/constant without needing to check if the django_dynamic_preferences app has been set up and without needing to load from that model directly.
[ "This", "is", "a", "convenience", "function", "that", "makes", "it", "easy", "to", "access", "the", "value", "of", "a", "preference", "/", "constant", "without", "needing", "to", "check", "if", "the", "django_dynamic_preferences", "app", "has", "been", "set", "up", "and", "without", "needing", "to", "load", "from", "that", "model", "directly", "." ]
python
train
v1k45/python-qBittorrent
qbittorrent/client.py
https://github.com/v1k45/python-qBittorrent/blob/04f9482a022dcc78c56b0b9acb9ca455f855ae24/qbittorrent/client.py#L309-L330
def download_from_file(self, file_buffer, **kwargs): """ Download torrent using a file. :param file_buffer: Single file() buffer or list of. :param save_path: Path to download the torrent. :param label: Label of the torrent(s). :return: Empty JSON data. """ if isinstance(file_buffer, list): torrent_files = {} for i, f in enumerate(file_buffer): torrent_files.update({'torrents%s' % i: f}) else: torrent_files = {'torrents': file_buffer} data = kwargs.copy() if data.get('save_path'): data.update({'savepath': data['save_path']}) return self._post('command/upload', data=data, files=torrent_files)
[ "def", "download_from_file", "(", "self", ",", "file_buffer", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "file_buffer", ",", "list", ")", ":", "torrent_files", "=", "{", "}", "for", "i", ",", "f", "in", "enumerate", "(", "file_buffer", ")", ":", "torrent_files", ".", "update", "(", "{", "'torrents%s'", "%", "i", ":", "f", "}", ")", "else", ":", "torrent_files", "=", "{", "'torrents'", ":", "file_buffer", "}", "data", "=", "kwargs", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'save_path'", ")", ":", "data", ".", "update", "(", "{", "'savepath'", ":", "data", "[", "'save_path'", "]", "}", ")", "return", "self", ".", "_post", "(", "'command/upload'", ",", "data", "=", "data", ",", "files", "=", "torrent_files", ")" ]
Download torrent using a file. :param file_buffer: Single file() buffer or list of. :param save_path: Path to download the torrent. :param label: Label of the torrent(s). :return: Empty JSON data.
[ "Download", "torrent", "using", "a", "file", "." ]
python
train
michaeljohnbarr/django-timezone-utils
timezone_utils/fields.py
https://github.com/michaeljohnbarr/django-timezone-utils/blob/61c8b50c59049cb7eccd4e3892f332f88b890f00/timezone_utils/fields.py#L349-L364
def _get_time_override(self): """ Retrieves the datetime.time or None from the `time_override` attribute. """ if callable(self.time_override): time_override = self.time_override() else: time_override = self.time_override if not isinstance(time_override, datetime_time): raise ValueError( 'Invalid type. Must be a datetime.time instance.' ) return time_override
[ "def", "_get_time_override", "(", "self", ")", ":", "if", "callable", "(", "self", ".", "time_override", ")", ":", "time_override", "=", "self", ".", "time_override", "(", ")", "else", ":", "time_override", "=", "self", ".", "time_override", "if", "not", "isinstance", "(", "time_override", ",", "datetime_time", ")", ":", "raise", "ValueError", "(", "'Invalid type. Must be a datetime.time instance.'", ")", "return", "time_override" ]
Retrieves the datetime.time or None from the `time_override` attribute.
[ "Retrieves", "the", "datetime", ".", "time", "or", "None", "from", "the", "time_override", "attribute", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xchartwidget/xchartwidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchartwidget/xchartwidgetitem.py#L225-L234
def keyColor( self, key ): """ Returns a color for the inputed key (used in pie charts). :param key | <str> :return <QColor> """ self._keyColors.setdefault(nativestring(key), self.color()) return self._keyColors[nativestring(key)]
[ "def", "keyColor", "(", "self", ",", "key", ")", ":", "self", ".", "_keyColors", ".", "setdefault", "(", "nativestring", "(", "key", ")", ",", "self", ".", "color", "(", ")", ")", "return", "self", ".", "_keyColors", "[", "nativestring", "(", "key", ")", "]" ]
Returns a color for the inputed key (used in pie charts). :param key | <str> :return <QColor>
[ "Returns", "a", "color", "for", "the", "inputed", "key", "(", "used", "in", "pie", "charts", ")", ".", ":", "param", "key", "|", "<str", ">", ":", "return", "<QColor", ">" ]
python
train
inveniosoftware/invenio-files-rest
invenio_files_rest/tasks.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/tasks.py#L256-L267
def remove_expired_multipartobjects(): """Remove expired multipart objects.""" delta = current_app.config['FILES_REST_MULTIPART_EXPIRES'] expired_dt = datetime.utcnow() - delta file_ids = [] for mp in MultipartObject.query_expired(expired_dt): file_ids.append(str(mp.file_id)) mp.delete() for fid in file_ids: remove_file_data.delay(fid)
[ "def", "remove_expired_multipartobjects", "(", ")", ":", "delta", "=", "current_app", ".", "config", "[", "'FILES_REST_MULTIPART_EXPIRES'", "]", "expired_dt", "=", "datetime", ".", "utcnow", "(", ")", "-", "delta", "file_ids", "=", "[", "]", "for", "mp", "in", "MultipartObject", ".", "query_expired", "(", "expired_dt", ")", ":", "file_ids", ".", "append", "(", "str", "(", "mp", ".", "file_id", ")", ")", "mp", ".", "delete", "(", ")", "for", "fid", "in", "file_ids", ":", "remove_file_data", ".", "delay", "(", "fid", ")" ]
Remove expired multipart objects.
[ "Remove", "expired", "multipart", "objects", "." ]
python
train
opencobra/cobrapy
cobra/flux_analysis/gapfilling.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/flux_analysis/gapfilling.py#L164-L200
def add_switches_and_objective(self): """ Update gapfilling model with switches and the indicator objective. """ constraints = list() big_m = max(max(abs(b) for b in r.bounds) for r in self.model.reactions) prob = self.model.problem for rxn in self.model.reactions: if not hasattr(rxn, 'gapfilling_type'): continue indicator = prob.Variable( name='indicator_{}'.format(rxn.id), lb=0, ub=1, type='binary') if rxn.id in self.penalties: indicator.cost = self.penalties[rxn.id] else: indicator.cost = self.penalties[rxn.gapfilling_type] indicator.rxn_id = rxn.id self.indicators.append(indicator) # if z = 1 v_i is allowed non-zero # v_i - Mz <= 0 and v_i + Mz >= 0 constraint_lb = prob.Constraint( rxn.flux_expression - big_m * indicator, ub=0, name='constraint_lb_{}'.format(rxn.id), sloppy=True) constraint_ub = prob.Constraint( rxn.flux_expression + big_m * indicator, lb=0, name='constraint_ub_{}'.format(rxn.id), sloppy=True) constraints.extend([constraint_lb, constraint_ub]) self.model.add_cons_vars(self.indicators) self.model.add_cons_vars(constraints, sloppy=True) self.model.objective = prob.Objective( Zero, direction='min', sloppy=True) self.model.objective.set_linear_coefficients({ i: 1 for i in self.indicators}) self.update_costs()
[ "def", "add_switches_and_objective", "(", "self", ")", ":", "constraints", "=", "list", "(", ")", "big_m", "=", "max", "(", "max", "(", "abs", "(", "b", ")", "for", "b", "in", "r", ".", "bounds", ")", "for", "r", "in", "self", ".", "model", ".", "reactions", ")", "prob", "=", "self", ".", "model", ".", "problem", "for", "rxn", "in", "self", ".", "model", ".", "reactions", ":", "if", "not", "hasattr", "(", "rxn", ",", "'gapfilling_type'", ")", ":", "continue", "indicator", "=", "prob", ".", "Variable", "(", "name", "=", "'indicator_{}'", ".", "format", "(", "rxn", ".", "id", ")", ",", "lb", "=", "0", ",", "ub", "=", "1", ",", "type", "=", "'binary'", ")", "if", "rxn", ".", "id", "in", "self", ".", "penalties", ":", "indicator", ".", "cost", "=", "self", ".", "penalties", "[", "rxn", ".", "id", "]", "else", ":", "indicator", ".", "cost", "=", "self", ".", "penalties", "[", "rxn", ".", "gapfilling_type", "]", "indicator", ".", "rxn_id", "=", "rxn", ".", "id", "self", ".", "indicators", ".", "append", "(", "indicator", ")", "# if z = 1 v_i is allowed non-zero", "# v_i - Mz <= 0 and v_i + Mz >= 0", "constraint_lb", "=", "prob", ".", "Constraint", "(", "rxn", ".", "flux_expression", "-", "big_m", "*", "indicator", ",", "ub", "=", "0", ",", "name", "=", "'constraint_lb_{}'", ".", "format", "(", "rxn", ".", "id", ")", ",", "sloppy", "=", "True", ")", "constraint_ub", "=", "prob", ".", "Constraint", "(", "rxn", ".", "flux_expression", "+", "big_m", "*", "indicator", ",", "lb", "=", "0", ",", "name", "=", "'constraint_ub_{}'", ".", "format", "(", "rxn", ".", "id", ")", ",", "sloppy", "=", "True", ")", "constraints", ".", "extend", "(", "[", "constraint_lb", ",", "constraint_ub", "]", ")", "self", ".", "model", ".", "add_cons_vars", "(", "self", ".", "indicators", ")", "self", ".", "model", ".", "add_cons_vars", "(", "constraints", ",", "sloppy", "=", "True", ")", "self", ".", "model", ".", "objective", "=", "prob", ".", "Objective", "(", "Zero", ",", "direction", "=", "'min'", ",", "sloppy", "=", "True", ")", "self", ".", "model", ".", "objective", ".", "set_linear_coefficients", "(", "{", "i", ":", "1", "for", "i", "in", "self", ".", "indicators", "}", ")", "self", ".", "update_costs", "(", ")" ]
Update gapfilling model with switches and the indicator objective.
[ "Update", "gapfilling", "model", "with", "switches", "and", "the", "indicator", "objective", "." ]
python
valid
alerta/python-alerta-client
alertaclient/commands/cmd_heartbeat.py
https://github.com/alerta/python-alerta-client/blob/7eb367b5fe87d5fc20b54dea8cddd7f09e251afa/alertaclient/commands/cmd_heartbeat.py#L17-L28
def cli(obj, origin, tags, timeout, customer, delete): """Send or delete a heartbeat.""" client = obj['client'] if delete: client.delete_heartbeat(delete) else: try: heartbeat = client.heartbeat(origin=origin, tags=tags, timeout=timeout, customer=customer) except Exception as e: click.echo('ERROR: {}'.format(e)) sys.exit(1) click.echo(heartbeat.id)
[ "def", "cli", "(", "obj", ",", "origin", ",", "tags", ",", "timeout", ",", "customer", ",", "delete", ")", ":", "client", "=", "obj", "[", "'client'", "]", "if", "delete", ":", "client", ".", "delete_heartbeat", "(", "delete", ")", "else", ":", "try", ":", "heartbeat", "=", "client", ".", "heartbeat", "(", "origin", "=", "origin", ",", "tags", "=", "tags", ",", "timeout", "=", "timeout", ",", "customer", "=", "customer", ")", "except", "Exception", "as", "e", ":", "click", ".", "echo", "(", "'ERROR: {}'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "click", ".", "echo", "(", "heartbeat", ".", "id", ")" ]
Send or delete a heartbeat.
[ "Send", "or", "delete", "a", "heartbeat", "." ]
python
train
slightlynybbled/tk_tools
tk_tools/groups.py
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L71-L83
def remove_row(self, row_number: int=-1): """ Removes a specified row of data :param row_number: the row to remove (defaults to the last row) :return: None """ if len(self._rows) == 0: return row = self._rows.pop(row_number) for widget in row: widget.destroy()
[ "def", "remove_row", "(", "self", ",", "row_number", ":", "int", "=", "-", "1", ")", ":", "if", "len", "(", "self", ".", "_rows", ")", "==", "0", ":", "return", "row", "=", "self", ".", "_rows", ".", "pop", "(", "row_number", ")", "for", "widget", "in", "row", ":", "widget", ".", "destroy", "(", ")" ]
Removes a specified row of data :param row_number: the row to remove (defaults to the last row) :return: None
[ "Removes", "a", "specified", "row", "of", "data" ]
python
train
aparsons/threadfix_api
threadfix_api/threadfix.py
https://github.com/aparsons/threadfix_api/blob/76fd1bd26e9ac863636112cd30d733543807ff7d/threadfix_api/threadfix.py#L263-L332
def get_vulnerabilities(self, teams=None, applications=None, channel_types=None, start_date=None, end_date=None, generic_severities=None, generic_vulnerabilities=None, number_merged=None, number_vulnerabilities=None, parameter=None, path=None, show_open=None, show_closed=None, show_defect_open=None, show_defect_closed=None, show_defect_present=None, show_defect_not_present=None, show_false_positive=None, show_hidden=None): """ Returns filtered list of vulnerabilities. :param teams: List of team ids. :param applications: List of application ids. :param channel_types: List of scanner names. :param start_date: Lower bound on scan dates. :param end_date: Upper bound on scan dates. :param generic_severities: List of generic severity values. :param generic_vulnerabilities: List of generic vulnerability ids. :param number_merged: Number of vulnerabilities merged from different scans. :param number_vulnerabilities: Number of vulnerabilities to return. :param parameter: Application input that the vulnerability affects. :param path: Path to the web page where the vulnerability was found. :param show_open: Flag to show all open vulnerabilities. :param show_closed: Flag to show all closed vulnerabilities. :param show_defect_open: Flag to show any vulnerabilities with open defects. :param show_defect_closed: Flag to show any vulnerabilities with closed defects. :param show_defect_present: Flag to show any vulnerabilities with a defect. :param show_defect_not_present: Flag to show any vulnerabilities without a defect. :param show_false_positive: Flag to show any false positives from vulnerabilities. :param show_hidden: Flag to show all hidden vulnerabilities. """ params = {} # Build parameter list if teams: params.update(self._build_list_params('teams', 'id', teams)) if applications: params.update(self._build_list_params('applications', 'id', applications)) if channel_types: params.update(self._build_list_params('channelTypes', 'name', channel_types)) if start_date: params['startDate'] = start_date if end_date: params['endDate'] = end_date if generic_severities: params.update(self._build_list_params('genericSeverities', 'intValue', generic_severities)) if generic_vulnerabilities: params.update(self._build_list_params('genericVulnerabilities', 'id', generic_vulnerabilities)) if number_merged: params['numberMerged'] = number_merged if number_vulnerabilities: params['numberVulnerabilities'] = number_vulnerabilities if parameter: params['parameter'] = parameter if path: params['path'] = path if show_open: params['showOpen'] = show_open if show_closed: params['showClosed'] = show_closed if show_defect_open: params['showDefectOpen'] = show_defect_open if show_defect_closed: params['showDefectClosed'] = show_defect_closed if show_defect_present: params['showDefectPresent'] = show_defect_present if show_defect_not_present: params['showDefectNotPresent'] = show_defect_not_present if show_false_positive: params['showFalsePositive'] = show_false_positive if show_hidden: params['showHidden'] = show_hidden return self._request('POST', 'rest/vulnerabilities', params)
[ "def", "get_vulnerabilities", "(", "self", ",", "teams", "=", "None", ",", "applications", "=", "None", ",", "channel_types", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "generic_severities", "=", "None", ",", "generic_vulnerabilities", "=", "None", ",", "number_merged", "=", "None", ",", "number_vulnerabilities", "=", "None", ",", "parameter", "=", "None", ",", "path", "=", "None", ",", "show_open", "=", "None", ",", "show_closed", "=", "None", ",", "show_defect_open", "=", "None", ",", "show_defect_closed", "=", "None", ",", "show_defect_present", "=", "None", ",", "show_defect_not_present", "=", "None", ",", "show_false_positive", "=", "None", ",", "show_hidden", "=", "None", ")", ":", "params", "=", "{", "}", "# Build parameter list", "if", "teams", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'teams'", ",", "'id'", ",", "teams", ")", ")", "if", "applications", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'applications'", ",", "'id'", ",", "applications", ")", ")", "if", "channel_types", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'channelTypes'", ",", "'name'", ",", "channel_types", ")", ")", "if", "start_date", ":", "params", "[", "'startDate'", "]", "=", "start_date", "if", "end_date", ":", "params", "[", "'endDate'", "]", "=", "end_date", "if", "generic_severities", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'genericSeverities'", ",", "'intValue'", ",", "generic_severities", ")", ")", "if", "generic_vulnerabilities", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'genericVulnerabilities'", ",", "'id'", ",", "generic_vulnerabilities", ")", ")", "if", "number_merged", ":", "params", "[", "'numberMerged'", "]", "=", "number_merged", "if", "number_vulnerabilities", ":", "params", "[", "'numberVulnerabilities'", "]", "=", "number_vulnerabilities", "if", "parameter", ":", "params", "[", "'parameter'", "]", "=", "parameter", "if", "path", ":", "params", "[", "'path'", "]", "=", "path", "if", "show_open", ":", "params", "[", "'showOpen'", "]", "=", "show_open", "if", "show_closed", ":", "params", "[", "'showClosed'", "]", "=", "show_closed", "if", "show_defect_open", ":", "params", "[", "'showDefectOpen'", "]", "=", "show_defect_open", "if", "show_defect_closed", ":", "params", "[", "'showDefectClosed'", "]", "=", "show_defect_closed", "if", "show_defect_present", ":", "params", "[", "'showDefectPresent'", "]", "=", "show_defect_present", "if", "show_defect_not_present", ":", "params", "[", "'showDefectNotPresent'", "]", "=", "show_defect_not_present", "if", "show_false_positive", ":", "params", "[", "'showFalsePositive'", "]", "=", "show_false_positive", "if", "show_hidden", ":", "params", "[", "'showHidden'", "]", "=", "show_hidden", "return", "self", ".", "_request", "(", "'POST'", ",", "'rest/vulnerabilities'", ",", "params", ")" ]
Returns filtered list of vulnerabilities. :param teams: List of team ids. :param applications: List of application ids. :param channel_types: List of scanner names. :param start_date: Lower bound on scan dates. :param end_date: Upper bound on scan dates. :param generic_severities: List of generic severity values. :param generic_vulnerabilities: List of generic vulnerability ids. :param number_merged: Number of vulnerabilities merged from different scans. :param number_vulnerabilities: Number of vulnerabilities to return. :param parameter: Application input that the vulnerability affects. :param path: Path to the web page where the vulnerability was found. :param show_open: Flag to show all open vulnerabilities. :param show_closed: Flag to show all closed vulnerabilities. :param show_defect_open: Flag to show any vulnerabilities with open defects. :param show_defect_closed: Flag to show any vulnerabilities with closed defects. :param show_defect_present: Flag to show any vulnerabilities with a defect. :param show_defect_not_present: Flag to show any vulnerabilities without a defect. :param show_false_positive: Flag to show any false positives from vulnerabilities. :param show_hidden: Flag to show all hidden vulnerabilities.
[ "Returns", "filtered", "list", "of", "vulnerabilities", ".", ":", "param", "teams", ":", "List", "of", "team", "ids", ".", ":", "param", "applications", ":", "List", "of", "application", "ids", ".", ":", "param", "channel_types", ":", "List", "of", "scanner", "names", ".", ":", "param", "start_date", ":", "Lower", "bound", "on", "scan", "dates", ".", ":", "param", "end_date", ":", "Upper", "bound", "on", "scan", "dates", ".", ":", "param", "generic_severities", ":", "List", "of", "generic", "severity", "values", ".", ":", "param", "generic_vulnerabilities", ":", "List", "of", "generic", "vulnerability", "ids", ".", ":", "param", "number_merged", ":", "Number", "of", "vulnerabilities", "merged", "from", "different", "scans", ".", ":", "param", "number_vulnerabilities", ":", "Number", "of", "vulnerabilities", "to", "return", ".", ":", "param", "parameter", ":", "Application", "input", "that", "the", "vulnerability", "affects", ".", ":", "param", "path", ":", "Path", "to", "the", "web", "page", "where", "the", "vulnerability", "was", "found", ".", ":", "param", "show_open", ":", "Flag", "to", "show", "all", "open", "vulnerabilities", ".", ":", "param", "show_closed", ":", "Flag", "to", "show", "all", "closed", "vulnerabilities", ".", ":", "param", "show_defect_open", ":", "Flag", "to", "show", "any", "vulnerabilities", "with", "open", "defects", ".", ":", "param", "show_defect_closed", ":", "Flag", "to", "show", "any", "vulnerabilities", "with", "closed", "defects", ".", ":", "param", "show_defect_present", ":", "Flag", "to", "show", "any", "vulnerabilities", "with", "a", "defect", ".", ":", "param", "show_defect_not_present", ":", "Flag", "to", "show", "any", "vulnerabilities", "without", "a", "defect", ".", ":", "param", "show_false_positive", ":", "Flag", "to", "show", "any", "false", "positives", "from", "vulnerabilities", ".", ":", "param", "show_hidden", ":", "Flag", "to", "show", "all", "hidden", "vulnerabilities", "." ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L1126-L1128
def p_expr_require_once(p): 'expr : REQUIRE_ONCE expr' p[0] = ast.Require(p[2], True, lineno=p.lineno(1))
[ "def", "p_expr_require_once", "(", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "Require", "(", "p", "[", "2", "]", ",", "True", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
expr : REQUIRE_ONCE expr
[ "expr", ":", "REQUIRE_ONCE", "expr" ]
python
train
apache/spark
python/pyspark/sql/streaming.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L546-L572
def text(self, path, wholetext=False, lineSep=None): """ Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.text(path)) else: raise TypeError("path can be only a single string")
[ "def", "text", "(", "self", ",", "path", ",", "wholetext", "=", "False", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "wholetext", "=", "wholetext", ",", "lineSep", "=", "lineSep", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "text", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True
[ "Loads", "a", "text", "file", "stream", "and", "returns", "a", ":", "class", ":", "DataFrame", "whose", "schema", "starts", "with", "a", "string", "column", "named", "value", "and", "followed", "by", "partitioned", "columns", "if", "there", "are", "any", ".", "The", "text", "files", "must", "be", "encoded", "as", "UTF", "-", "8", "." ]
python
train
evhub/coconut
coconut/exceptions.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/exceptions.py#L121-L139
def message(self, message, source, point, ln): """Creates a SyntaxError-like message.""" if message is None: message = "parsing failed" if ln is not None: message += " (line " + str(ln) + ")" if source: if point is None: message += "\n" + " " * taberrfmt + clean(source) else: part = clean(source.splitlines()[lineno(point, source) - 1], False).lstrip() point -= len(source) - len(part) # adjust all points based on lstrip part = part.rstrip() # adjust only points that are too large based on rstrip message += "\n" + " " * taberrfmt + part if point > 0: if point >= len(part): point = len(part) - 1 message += "\n" + " " * (taberrfmt + point) + "^" return message
[ "def", "message", "(", "self", ",", "message", ",", "source", ",", "point", ",", "ln", ")", ":", "if", "message", "is", "None", ":", "message", "=", "\"parsing failed\"", "if", "ln", "is", "not", "None", ":", "message", "+=", "\" (line \"", "+", "str", "(", "ln", ")", "+", "\")\"", "if", "source", ":", "if", "point", "is", "None", ":", "message", "+=", "\"\\n\"", "+", "\" \"", "*", "taberrfmt", "+", "clean", "(", "source", ")", "else", ":", "part", "=", "clean", "(", "source", ".", "splitlines", "(", ")", "[", "lineno", "(", "point", ",", "source", ")", "-", "1", "]", ",", "False", ")", ".", "lstrip", "(", ")", "point", "-=", "len", "(", "source", ")", "-", "len", "(", "part", ")", "# adjust all points based on lstrip", "part", "=", "part", ".", "rstrip", "(", ")", "# adjust only points that are too large based on rstrip", "message", "+=", "\"\\n\"", "+", "\" \"", "*", "taberrfmt", "+", "part", "if", "point", ">", "0", ":", "if", "point", ">=", "len", "(", "part", ")", ":", "point", "=", "len", "(", "part", ")", "-", "1", "message", "+=", "\"\\n\"", "+", "\" \"", "*", "(", "taberrfmt", "+", "point", ")", "+", "\"^\"", "return", "message" ]
Creates a SyntaxError-like message.
[ "Creates", "a", "SyntaxError", "-", "like", "message", "." ]
python
train
Jayin/ETipsService
service/wyulibrary.py
https://github.com/Jayin/ETipsService/blob/1a42612a5e5d11bec0ec1a26c99dec6fe216fca4/service/wyulibrary.py#L51-L89
def search_book(self, anywords, page=1): """ 检索图书 :param anywords: 检索关键字 :param page: 页码 :return: 图书列表 """ result = [] html = self.__search_book_html(anywords, page) soup = BeautifulSoup(html) tds = soup.select(selector='tbody')[0].select('td') cursor = 1 while cursor < len(tds) / 9: s = (cursor - 1) * 9 num = tds[s].get_text() ctrlno = tds[s].input.attrs['value'] name = tds[s + 1].get_text() author = tds[s + 2].get_text() press = tds[s + 3].get_text() press_time = tds[s + 4].get_text() index_num = tds[s + 5].get_text() total = tds[s + 6].get_text() left = tds[s + 7].get_text() addtion = tds[s + 8].get_text().strip('\r\n') # 相关资源 book = { 'num': num, # 序号 'ctrlno': ctrlno, # 图书馆系统控制号(在图书馆的唯一编号) 'name': name, # 名称 'author': author, # 作者 'press': press, # 出版社 'press_time': press_time, # 出版时间 'index_num': index_num, # 索取号 'total': total, # 馆藏 'left': left, # 剩余 } result.append(book) cursor += 1 return result
[ "def", "search_book", "(", "self", ",", "anywords", ",", "page", "=", "1", ")", ":", "result", "=", "[", "]", "html", "=", "self", ".", "__search_book_html", "(", "anywords", ",", "page", ")", "soup", "=", "BeautifulSoup", "(", "html", ")", "tds", "=", "soup", ".", "select", "(", "selector", "=", "'tbody'", ")", "[", "0", "]", ".", "select", "(", "'td'", ")", "cursor", "=", "1", "while", "cursor", "<", "len", "(", "tds", ")", "/", "9", ":", "s", "=", "(", "cursor", "-", "1", ")", "*", "9", "num", "=", "tds", "[", "s", "]", ".", "get_text", "(", ")", "ctrlno", "=", "tds", "[", "s", "]", ".", "input", ".", "attrs", "[", "'value'", "]", "name", "=", "tds", "[", "s", "+", "1", "]", ".", "get_text", "(", ")", "author", "=", "tds", "[", "s", "+", "2", "]", ".", "get_text", "(", ")", "press", "=", "tds", "[", "s", "+", "3", "]", ".", "get_text", "(", ")", "press_time", "=", "tds", "[", "s", "+", "4", "]", ".", "get_text", "(", ")", "index_num", "=", "tds", "[", "s", "+", "5", "]", ".", "get_text", "(", ")", "total", "=", "tds", "[", "s", "+", "6", "]", ".", "get_text", "(", ")", "left", "=", "tds", "[", "s", "+", "7", "]", ".", "get_text", "(", ")", "addtion", "=", "tds", "[", "s", "+", "8", "]", ".", "get_text", "(", ")", ".", "strip", "(", "'\\r\\n'", ")", "# 相关资源", "book", "=", "{", "'num'", ":", "num", ",", "# 序号", "'ctrlno'", ":", "ctrlno", ",", "# 图书馆系统控制号(在图书馆的唯一编号)", "'name'", ":", "name", ",", "# 名称", "'author'", ":", "author", ",", "# 作者", "'press'", ":", "press", ",", "# 出版社", "'press_time'", ":", "press_time", ",", "# 出版时间", "'index_num'", ":", "index_num", ",", "# 索取号", "'total'", ":", "total", ",", "# 馆藏", "'left'", ":", "left", ",", "# 剩余", "}", "result", ".", "append", "(", "book", ")", "cursor", "+=", "1", "return", "result" ]
检索图书 :param anywords: 检索关键字 :param page: 页码 :return: 图书列表
[ "检索图书", ":", "param", "anywords", ":", "检索关键字", ":", "param", "page", ":", "页码", ":", "return", ":", "图书列表" ]
python
train
OpenTreeOfLife/peyotl
peyotl/nexson_proxy.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_proxy.py#L52-L64
def tree_iter_nexson_proxy(nexson_proxy): """Iterates over NexsonTreeProxy objects in order determined by the nexson blob""" nexml_el = nexson_proxy._nexml_el tg_order = nexml_el['^ot:treesElementOrder'] tgd = nexml_el['treesById'] for tg_id in tg_order: tg = tgd[tg_id] tree_order = tg['^ot:treeElementOrder'] tbid = tg['treeById'] otus = tg['@otus'] for k in tree_order: v = tbid[k] yield nexson_proxy._create_tree_proxy(tree_id=k, tree=v, otus=otus)
[ "def", "tree_iter_nexson_proxy", "(", "nexson_proxy", ")", ":", "nexml_el", "=", "nexson_proxy", ".", "_nexml_el", "tg_order", "=", "nexml_el", "[", "'^ot:treesElementOrder'", "]", "tgd", "=", "nexml_el", "[", "'treesById'", "]", "for", "tg_id", "in", "tg_order", ":", "tg", "=", "tgd", "[", "tg_id", "]", "tree_order", "=", "tg", "[", "'^ot:treeElementOrder'", "]", "tbid", "=", "tg", "[", "'treeById'", "]", "otus", "=", "tg", "[", "'@otus'", "]", "for", "k", "in", "tree_order", ":", "v", "=", "tbid", "[", "k", "]", "yield", "nexson_proxy", ".", "_create_tree_proxy", "(", "tree_id", "=", "k", ",", "tree", "=", "v", ",", "otus", "=", "otus", ")" ]
Iterates over NexsonTreeProxy objects in order determined by the nexson blob
[ "Iterates", "over", "NexsonTreeProxy", "objects", "in", "order", "determined", "by", "the", "nexson", "blob" ]
python
train
mojaie/chorus
chorus/mcsdr.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/mcsdr.py#L117-L126
def node_desc(self, atoms): """default 9 bits descriptor 7 bits of atomic number (0-127) and 2 bits of pi electrons (0-3) """ a1 = self.mol.atom(atoms[0]) a2 = self.mol.atom(atoms[1]) a1t = a1.number << 2 | a1.pi a2t = a2.number << 2 | a2.pi pair = sorted((a1t, a2t)) return pair[0] << 9 | pair[1]
[ "def", "node_desc", "(", "self", ",", "atoms", ")", ":", "a1", "=", "self", ".", "mol", ".", "atom", "(", "atoms", "[", "0", "]", ")", "a2", "=", "self", ".", "mol", ".", "atom", "(", "atoms", "[", "1", "]", ")", "a1t", "=", "a1", ".", "number", "<<", "2", "|", "a1", ".", "pi", "a2t", "=", "a2", ".", "number", "<<", "2", "|", "a2", ".", "pi", "pair", "=", "sorted", "(", "(", "a1t", ",", "a2t", ")", ")", "return", "pair", "[", "0", "]", "<<", "9", "|", "pair", "[", "1", "]" ]
default 9 bits descriptor 7 bits of atomic number (0-127) and 2 bits of pi electrons (0-3)
[ "default", "9", "bits", "descriptor", "7", "bits", "of", "atomic", "number", "(", "0", "-", "127", ")", "and", "2", "bits", "of", "pi", "electrons", "(", "0", "-", "3", ")" ]
python
train
wummel/linkchecker
linkcheck/logger/text.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/text.py#L177-L181
def write_dltime (self, url_data): """Write url_data.dltime.""" self.write(self.part("dltime") + self.spaces("dltime")) self.writeln(_("%.3f seconds") % url_data.dltime, color=self.colordltime)
[ "def", "write_dltime", "(", "self", ",", "url_data", ")", ":", "self", ".", "write", "(", "self", ".", "part", "(", "\"dltime\"", ")", "+", "self", ".", "spaces", "(", "\"dltime\"", ")", ")", "self", ".", "writeln", "(", "_", "(", "\"%.3f seconds\"", ")", "%", "url_data", ".", "dltime", ",", "color", "=", "self", ".", "colordltime", ")" ]
Write url_data.dltime.
[ "Write", "url_data", ".", "dltime", "." ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L1295-L1306
def list_object_names(self, container, marker=None, limit=None, prefix=None, delimiter=None, end_marker=None, full_listing=False): """ Return a list of then names of the objects in this container. You can use the marker, end_marker, and limit params to handle pagination, and the prefix and delimiter params to filter the objects returned. By default only the first 10,000 objects are returned; if you need to access more than that, set the 'full_listing' parameter to True. """ return container.list_object_names(marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, end_marker=end_marker, full_listing=full_listing)
[ "def", "list_object_names", "(", "self", ",", "container", ",", "marker", "=", "None", ",", "limit", "=", "None", ",", "prefix", "=", "None", ",", "delimiter", "=", "None", ",", "end_marker", "=", "None", ",", "full_listing", "=", "False", ")", ":", "return", "container", ".", "list_object_names", "(", "marker", "=", "marker", ",", "limit", "=", "limit", ",", "prefix", "=", "prefix", ",", "delimiter", "=", "delimiter", ",", "end_marker", "=", "end_marker", ",", "full_listing", "=", "full_listing", ")" ]
Return a list of then names of the objects in this container. You can use the marker, end_marker, and limit params to handle pagination, and the prefix and delimiter params to filter the objects returned. By default only the first 10,000 objects are returned; if you need to access more than that, set the 'full_listing' parameter to True.
[ "Return", "a", "list", "of", "then", "names", "of", "the", "objects", "in", "this", "container", ".", "You", "can", "use", "the", "marker", "end_marker", "and", "limit", "params", "to", "handle", "pagination", "and", "the", "prefix", "and", "delimiter", "params", "to", "filter", "the", "objects", "returned", ".", "By", "default", "only", "the", "first", "10", "000", "objects", "are", "returned", ";", "if", "you", "need", "to", "access", "more", "than", "that", "set", "the", "full_listing", "parameter", "to", "True", "." ]
python
train
boriel/zxbasic
symbols/boundlist.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/symbols/boundlist.py#L35-L49
def make_node(cls, node, *args): ''' Creates an array BOUND LIST. ''' if node is None: return cls.make_node(SymbolBOUNDLIST(), *args) if node.token != 'BOUNDLIST': return cls.make_node(None, node, *args) for arg in args: if arg is None: continue node.appendChild(arg) return node
[ "def", "make_node", "(", "cls", ",", "node", ",", "*", "args", ")", ":", "if", "node", "is", "None", ":", "return", "cls", ".", "make_node", "(", "SymbolBOUNDLIST", "(", ")", ",", "*", "args", ")", "if", "node", ".", "token", "!=", "'BOUNDLIST'", ":", "return", "cls", ".", "make_node", "(", "None", ",", "node", ",", "*", "args", ")", "for", "arg", "in", "args", ":", "if", "arg", "is", "None", ":", "continue", "node", ".", "appendChild", "(", "arg", ")", "return", "node" ]
Creates an array BOUND LIST.
[ "Creates", "an", "array", "BOUND", "LIST", "." ]
python
train
ChrisBeaumont/soupy
soupy.py
https://github.com/ChrisBeaumont/soupy/blob/795f2f61f711f574d5218fc8a3375d02bda1104f/soupy.py#L629-L643
def dropwhile(self, func=None): """ Return a new Collection with the first few items removed. Parameters: func : function(Node) -> Node Returns: A new Collection, discarding all items before the first item where bool(func(item)) == True """ func = _make_callable(func) return Collection(dropwhile(func, self._items))
[ "def", "dropwhile", "(", "self", ",", "func", "=", "None", ")", ":", "func", "=", "_make_callable", "(", "func", ")", "return", "Collection", "(", "dropwhile", "(", "func", ",", "self", ".", "_items", ")", ")" ]
Return a new Collection with the first few items removed. Parameters: func : function(Node) -> Node Returns: A new Collection, discarding all items before the first item where bool(func(item)) == True
[ "Return", "a", "new", "Collection", "with", "the", "first", "few", "items", "removed", "." ]
python
test
Julius2342/pyvlx
pyvlx/heartbeat.py
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L30-L35
async def stop(self): """Stop heartbeat.""" self.stopped = True self.loop_event.set() # Waiting for shutdown of loop() await self.stopped_event.wait()
[ "async", "def", "stop", "(", "self", ")", ":", "self", ".", "stopped", "=", "True", "self", ".", "loop_event", ".", "set", "(", ")", "# Waiting for shutdown of loop()", "await", "self", ".", "stopped_event", ".", "wait", "(", ")" ]
Stop heartbeat.
[ "Stop", "heartbeat", "." ]
python
train
kurtbrose/pyjks
jks/jks.py
https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/jks.py#L229-L253
def encrypt(self, key_password): """ Encrypts the private key, so that it can be saved to a keystore. This will make it necessary to decrypt it again if it is going to be used later. Has no effect if the entry is already encrypted. :param str key_password: The password to encrypt the entry with. """ if not self.is_decrypted(): return encrypted_private_key = sun_crypto.jks_pkey_encrypt(self.pkey_pkcs8, key_password) a = AlgorithmIdentifier() a.setComponentByName('algorithm', sun_crypto.SUN_JKS_ALGO_ID) a.setComponentByName('parameters', '\x05\x00') epki = rfc5208.EncryptedPrivateKeyInfo() epki.setComponentByName('encryptionAlgorithm',a) epki.setComponentByName('encryptedData', encrypted_private_key) self._encrypted = encoder.encode(epki) self._pkey = None self._pkey_pkcs8 = None self._algorithm_oid = None
[ "def", "encrypt", "(", "self", ",", "key_password", ")", ":", "if", "not", "self", ".", "is_decrypted", "(", ")", ":", "return", "encrypted_private_key", "=", "sun_crypto", ".", "jks_pkey_encrypt", "(", "self", ".", "pkey_pkcs8", ",", "key_password", ")", "a", "=", "AlgorithmIdentifier", "(", ")", "a", ".", "setComponentByName", "(", "'algorithm'", ",", "sun_crypto", ".", "SUN_JKS_ALGO_ID", ")", "a", ".", "setComponentByName", "(", "'parameters'", ",", "'\\x05\\x00'", ")", "epki", "=", "rfc5208", ".", "EncryptedPrivateKeyInfo", "(", ")", "epki", ".", "setComponentByName", "(", "'encryptionAlgorithm'", ",", "a", ")", "epki", ".", "setComponentByName", "(", "'encryptedData'", ",", "encrypted_private_key", ")", "self", ".", "_encrypted", "=", "encoder", ".", "encode", "(", "epki", ")", "self", ".", "_pkey", "=", "None", "self", ".", "_pkey_pkcs8", "=", "None", "self", ".", "_algorithm_oid", "=", "None" ]
Encrypts the private key, so that it can be saved to a keystore. This will make it necessary to decrypt it again if it is going to be used later. Has no effect if the entry is already encrypted. :param str key_password: The password to encrypt the entry with.
[ "Encrypts", "the", "private", "key", "so", "that", "it", "can", "be", "saved", "to", "a", "keystore", "." ]
python
train
prometheus/client_python
prometheus_client/metrics.py
https://github.com/prometheus/client_python/blob/31f5557e2e84ca4ffa9a03abf6e3f4d0c8b8c3eb/prometheus_client/metrics.py#L160-L169
def remove(self, *labelvalues): if not self._labelnames: raise ValueError('No label names were set when constructing %s' % self) """Remove the given labelset from the metric.""" if len(labelvalues) != len(self._labelnames): raise ValueError('Incorrect label count (expected %d, got %s)' % (len(self._labelnames), labelvalues)) labelvalues = tuple(unicode(l) for l in labelvalues) with self._lock: del self._metrics[labelvalues]
[ "def", "remove", "(", "self", ",", "*", "labelvalues", ")", ":", "if", "not", "self", ".", "_labelnames", ":", "raise", "ValueError", "(", "'No label names were set when constructing %s'", "%", "self", ")", "if", "len", "(", "labelvalues", ")", "!=", "len", "(", "self", ".", "_labelnames", ")", ":", "raise", "ValueError", "(", "'Incorrect label count (expected %d, got %s)'", "%", "(", "len", "(", "self", ".", "_labelnames", ")", ",", "labelvalues", ")", ")", "labelvalues", "=", "tuple", "(", "unicode", "(", "l", ")", "for", "l", "in", "labelvalues", ")", "with", "self", ".", "_lock", ":", "del", "self", ".", "_metrics", "[", "labelvalues", "]" ]
Remove the given labelset from the metric.
[ "Remove", "the", "given", "labelset", "from", "the", "metric", "." ]
python
train
xeroc/python-graphenelib
graphenecommon/chain.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/chain.py#L144-L227
def finalizeOp(self, ops, account, permission, **kwargs): """ This method obtains the required private keys if present in the wallet, finalizes the transaction, signs it and broadacasts it :param operation ops: The operation (or list of operaions) to broadcast :param operation account: The account that authorizes the operation :param string permission: The required permission for signing (active, owner, posting) :param object append_to: This allows to provide an instance of ProposalsBuilder (see :func:`new_proposal`) or TransactionBuilder (see :func:`new_tx()`) to specify where to put a specific operation. ... note:: ``append_to`` is exposed to every method used in the this class ... note:: If ``ops`` is a list of operation, they all need to be signable by the same key! Thus, you cannot combine ops that require active permission with ops that require posting permission. Neither can you use different accounts for different operations! ... note:: This uses ``txbuffer`` as instance of :class:`transactionbuilder.TransactionBuilder`. You may want to use your own txbuffer """ if "append_to" in kwargs and kwargs["append_to"]: if self.proposer: log.warning( "You may not use append_to and self.proposer at " "the same time. Append new_proposal(..) instead" ) # Append to the append_to and return append_to = kwargs["append_to"] parent = append_to.get_parent() assert isinstance( append_to, (self.transactionbuilder_class, self.proposalbuilder_class) ) append_to.appendOps(ops) # Add the signer to the buffer so we sign the tx properly if isinstance(append_to, self.proposalbuilder_class): parent.appendSigner(append_to.proposer, permission) else: parent.appendSigner(account, permission) # This returns as we used append_to, it does NOT broadcast, or sign return append_to.get_parent() elif self.proposer: # Legacy proposer mode! proposal = self.proposal() proposal.set_proposer(self.proposer) proposal.set_expiration(self.proposal_expiration) proposal.set_review(self.proposal_review) proposal.appendOps(ops) # Go forward to see what the other options do ... else: # Append tot he default buffer self.txbuffer.appendOps(ops) # The API that obtains the fee only allows to specify one particular # fee asset for all operations in that transaction even though the # blockchain itself could allow to pay multiple operations with # different fee assets. if "fee_asset" in kwargs and kwargs["fee_asset"]: self.txbuffer.set_fee_asset(kwargs["fee_asset"]) # Add signing information, signer, sign and optionally broadcast if self.unsigned: # In case we don't want to sign anything self.txbuffer.addSigningInformation(account, permission) return self.txbuffer elif self.bundle: # In case we want to add more ops to the tx (bundle) self.txbuffer.appendSigner(account, permission) return self.txbuffer.json() else: # default behavior: sign + broadcast self.txbuffer.appendSigner(account, permission) self.txbuffer.sign() return self.txbuffer.broadcast()
[ "def", "finalizeOp", "(", "self", ",", "ops", ",", "account", ",", "permission", ",", "*", "*", "kwargs", ")", ":", "if", "\"append_to\"", "in", "kwargs", "and", "kwargs", "[", "\"append_to\"", "]", ":", "if", "self", ".", "proposer", ":", "log", ".", "warning", "(", "\"You may not use append_to and self.proposer at \"", "\"the same time. Append new_proposal(..) instead\"", ")", "# Append to the append_to and return", "append_to", "=", "kwargs", "[", "\"append_to\"", "]", "parent", "=", "append_to", ".", "get_parent", "(", ")", "assert", "isinstance", "(", "append_to", ",", "(", "self", ".", "transactionbuilder_class", ",", "self", ".", "proposalbuilder_class", ")", ")", "append_to", ".", "appendOps", "(", "ops", ")", "# Add the signer to the buffer so we sign the tx properly", "if", "isinstance", "(", "append_to", ",", "self", ".", "proposalbuilder_class", ")", ":", "parent", ".", "appendSigner", "(", "append_to", ".", "proposer", ",", "permission", ")", "else", ":", "parent", ".", "appendSigner", "(", "account", ",", "permission", ")", "# This returns as we used append_to, it does NOT broadcast, or sign", "return", "append_to", ".", "get_parent", "(", ")", "elif", "self", ".", "proposer", ":", "# Legacy proposer mode!", "proposal", "=", "self", ".", "proposal", "(", ")", "proposal", ".", "set_proposer", "(", "self", ".", "proposer", ")", "proposal", ".", "set_expiration", "(", "self", ".", "proposal_expiration", ")", "proposal", ".", "set_review", "(", "self", ".", "proposal_review", ")", "proposal", ".", "appendOps", "(", "ops", ")", "# Go forward to see what the other options do ...", "else", ":", "# Append tot he default buffer", "self", ".", "txbuffer", ".", "appendOps", "(", "ops", ")", "# The API that obtains the fee only allows to specify one particular", "# fee asset for all operations in that transaction even though the", "# blockchain itself could allow to pay multiple operations with", "# different fee assets.", "if", "\"fee_asset\"", "in", "kwargs", "and", "kwargs", "[", "\"fee_asset\"", "]", ":", "self", ".", "txbuffer", ".", "set_fee_asset", "(", "kwargs", "[", "\"fee_asset\"", "]", ")", "# Add signing information, signer, sign and optionally broadcast", "if", "self", ".", "unsigned", ":", "# In case we don't want to sign anything", "self", ".", "txbuffer", ".", "addSigningInformation", "(", "account", ",", "permission", ")", "return", "self", ".", "txbuffer", "elif", "self", ".", "bundle", ":", "# In case we want to add more ops to the tx (bundle)", "self", ".", "txbuffer", ".", "appendSigner", "(", "account", ",", "permission", ")", "return", "self", ".", "txbuffer", ".", "json", "(", ")", "else", ":", "# default behavior: sign + broadcast", "self", ".", "txbuffer", ".", "appendSigner", "(", "account", ",", "permission", ")", "self", ".", "txbuffer", ".", "sign", "(", ")", "return", "self", ".", "txbuffer", ".", "broadcast", "(", ")" ]
This method obtains the required private keys if present in the wallet, finalizes the transaction, signs it and broadacasts it :param operation ops: The operation (or list of operaions) to broadcast :param operation account: The account that authorizes the operation :param string permission: The required permission for signing (active, owner, posting) :param object append_to: This allows to provide an instance of ProposalsBuilder (see :func:`new_proposal`) or TransactionBuilder (see :func:`new_tx()`) to specify where to put a specific operation. ... note:: ``append_to`` is exposed to every method used in the this class ... note:: If ``ops`` is a list of operation, they all need to be signable by the same key! Thus, you cannot combine ops that require active permission with ops that require posting permission. Neither can you use different accounts for different operations! ... note:: This uses ``txbuffer`` as instance of :class:`transactionbuilder.TransactionBuilder`. You may want to use your own txbuffer
[ "This", "method", "obtains", "the", "required", "private", "keys", "if", "present", "in", "the", "wallet", "finalizes", "the", "transaction", "signs", "it", "and", "broadacasts", "it" ]
python
valid
ldomic/lintools
lintools/ligand_description.py
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/ligand_description.py#L57-L66
def get_rotatable_bonds(self,mol): """Determines rotatable bonds in a ligand molecule Takes: * mol * - mol file in rdkit environment Output: * bonds * - tuples of atom ids """ RotatableBondSmarts = Chem.MolFromSmarts('[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]') bonds = mol.GetSubstructMatches(RotatableBondSmarts,uniquify=1) return bonds
[ "def", "get_rotatable_bonds", "(", "self", ",", "mol", ")", ":", "RotatableBondSmarts", "=", "Chem", ".", "MolFromSmarts", "(", "'[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]'", ")", "bonds", "=", "mol", ".", "GetSubstructMatches", "(", "RotatableBondSmarts", ",", "uniquify", "=", "1", ")", "return", "bonds" ]
Determines rotatable bonds in a ligand molecule Takes: * mol * - mol file in rdkit environment Output: * bonds * - tuples of atom ids
[ "Determines", "rotatable", "bonds", "in", "a", "ligand", "molecule", "Takes", ":", "*", "mol", "*", "-", "mol", "file", "in", "rdkit", "environment", "Output", ":", "*", "bonds", "*", "-", "tuples", "of", "atom", "ids" ]
python
train
Opentrons/opentrons
api/src/opentrons/util/linal.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/util/linal.py#L117-L133
def apply_transform( t: Union[List[List[float]], np.ndarray], pos: Tuple[float, float, float], with_offsets=True) -> Tuple[float, float, float]: """ Change of base using a transform matrix. Primarily used to render a point in space in a way that is more readable for the user. :param t: A transformation matrix from one 3D space [A] to another [B] :param pos: XYZ point in space A :param with_offsets: Whether to apply the transform as an affine transform or as a standard transform. You might use with_offsets=False :return: corresponding XYZ point in space B """ extended = 1 if with_offsets else 0 return tuple(dot(t, list(pos) + [extended])[:3])
[ "def", "apply_transform", "(", "t", ":", "Union", "[", "List", "[", "List", "[", "float", "]", "]", ",", "np", ".", "ndarray", "]", ",", "pos", ":", "Tuple", "[", "float", ",", "float", ",", "float", "]", ",", "with_offsets", "=", "True", ")", "->", "Tuple", "[", "float", ",", "float", ",", "float", "]", ":", "extended", "=", "1", "if", "with_offsets", "else", "0", "return", "tuple", "(", "dot", "(", "t", ",", "list", "(", "pos", ")", "+", "[", "extended", "]", ")", "[", ":", "3", "]", ")" ]
Change of base using a transform matrix. Primarily used to render a point in space in a way that is more readable for the user. :param t: A transformation matrix from one 3D space [A] to another [B] :param pos: XYZ point in space A :param with_offsets: Whether to apply the transform as an affine transform or as a standard transform. You might use with_offsets=False :return: corresponding XYZ point in space B
[ "Change", "of", "base", "using", "a", "transform", "matrix", ".", "Primarily", "used", "to", "render", "a", "point", "in", "space", "in", "a", "way", "that", "is", "more", "readable", "for", "the", "user", "." ]
python
train
fossasia/knittingpattern
knittingpattern/Parser.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Parser.py#L247-L254
def _create_pattern_set(self, pattern, values): """Create a new pattern set.""" type_ = self._get_type(values) version = self._get_version(values) comment = values.get(COMMENT) self._pattern_set = self._spec.new_pattern_set( type_, version, pattern, self, comment )
[ "def", "_create_pattern_set", "(", "self", ",", "pattern", ",", "values", ")", ":", "type_", "=", "self", ".", "_get_type", "(", "values", ")", "version", "=", "self", ".", "_get_version", "(", "values", ")", "comment", "=", "values", ".", "get", "(", "COMMENT", ")", "self", ".", "_pattern_set", "=", "self", ".", "_spec", ".", "new_pattern_set", "(", "type_", ",", "version", ",", "pattern", ",", "self", ",", "comment", ")" ]
Create a new pattern set.
[ "Create", "a", "new", "pattern", "set", "." ]
python
valid
ska-sa/katcp-python
katcp/client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L550-L569
def handle_reply(self, msg): """Dispatch a reply message to the appropriate method. Parameters ---------- msg : Message object The reply message to dispatch. """ method = self.__class__.unhandled_reply if msg.name in self._reply_handlers: method = self._reply_handlers[msg.name] try: return method(self, msg) except Exception: e_type, e_value, trace = sys.exc_info() reason = "\n".join(traceback.format_exception( e_type, e_value, trace, self._tb_limit)) self._logger.error("Reply %s FAIL: %s" % (msg.name, reason))
[ "def", "handle_reply", "(", "self", ",", "msg", ")", ":", "method", "=", "self", ".", "__class__", ".", "unhandled_reply", "if", "msg", ".", "name", "in", "self", ".", "_reply_handlers", ":", "method", "=", "self", ".", "_reply_handlers", "[", "msg", ".", "name", "]", "try", ":", "return", "method", "(", "self", ",", "msg", ")", "except", "Exception", ":", "e_type", ",", "e_value", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "reason", "=", "\"\\n\"", ".", "join", "(", "traceback", ".", "format_exception", "(", "e_type", ",", "e_value", ",", "trace", ",", "self", ".", "_tb_limit", ")", ")", "self", ".", "_logger", ".", "error", "(", "\"Reply %s FAIL: %s\"", "%", "(", "msg", ".", "name", ",", "reason", ")", ")" ]
Dispatch a reply message to the appropriate method. Parameters ---------- msg : Message object The reply message to dispatch.
[ "Dispatch", "a", "reply", "message", "to", "the", "appropriate", "method", "." ]
python
train
quiltdata/quilt
compiler/quilt/nodes.py
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/nodes.py#L184-L220
def _data(self, asa=None): """ Merges all child dataframes. Only works for dataframes stored on disk - not in memory. """ hash_list = [] stack = [self] alldfs = True store = None while stack: node = stack.pop() if isinstance(node, GroupNode): stack.extend(child for _, child in sorted(node._items(), reverse=True)) else: if node._target() != TargetType.PANDAS: alldfs = False if node._store is None or node._hashes is None: msg = "Can only merge built dataframes. Build this package and try again." raise NotImplementedError(msg) node_store = node._store if store is None: store = node_store if node_store != store: raise NotImplementedError("Can only merge dataframes from the same store") hash_list += node._hashes if asa is None: if not hash_list: return None if not alldfs: raise ValueError("Group contains non-dataframe nodes") return store.load_dataframe(hash_list) else: if hash_list: assert store is not None return asa(self, [store.object_path(obj) for obj in hash_list]) else: return asa(self, [])
[ "def", "_data", "(", "self", ",", "asa", "=", "None", ")", ":", "hash_list", "=", "[", "]", "stack", "=", "[", "self", "]", "alldfs", "=", "True", "store", "=", "None", "while", "stack", ":", "node", "=", "stack", ".", "pop", "(", ")", "if", "isinstance", "(", "node", ",", "GroupNode", ")", ":", "stack", ".", "extend", "(", "child", "for", "_", ",", "child", "in", "sorted", "(", "node", ".", "_items", "(", ")", ",", "reverse", "=", "True", ")", ")", "else", ":", "if", "node", ".", "_target", "(", ")", "!=", "TargetType", ".", "PANDAS", ":", "alldfs", "=", "False", "if", "node", ".", "_store", "is", "None", "or", "node", ".", "_hashes", "is", "None", ":", "msg", "=", "\"Can only merge built dataframes. Build this package and try again.\"", "raise", "NotImplementedError", "(", "msg", ")", "node_store", "=", "node", ".", "_store", "if", "store", "is", "None", ":", "store", "=", "node_store", "if", "node_store", "!=", "store", ":", "raise", "NotImplementedError", "(", "\"Can only merge dataframes from the same store\"", ")", "hash_list", "+=", "node", ".", "_hashes", "if", "asa", "is", "None", ":", "if", "not", "hash_list", ":", "return", "None", "if", "not", "alldfs", ":", "raise", "ValueError", "(", "\"Group contains non-dataframe nodes\"", ")", "return", "store", ".", "load_dataframe", "(", "hash_list", ")", "else", ":", "if", "hash_list", ":", "assert", "store", "is", "not", "None", "return", "asa", "(", "self", ",", "[", "store", ".", "object_path", "(", "obj", ")", "for", "obj", "in", "hash_list", "]", ")", "else", ":", "return", "asa", "(", "self", ",", "[", "]", ")" ]
Merges all child dataframes. Only works for dataframes stored on disk - not in memory.
[ "Merges", "all", "child", "dataframes", ".", "Only", "works", "for", "dataframes", "stored", "on", "disk", "-", "not", "in", "memory", "." ]
python
train
shidenggui/easytrader
easytrader/xqtrader.py
https://github.com/shidenggui/easytrader/blob/e5ae4daeda4ea125763a95b280dd694c7f68257d/easytrader/xqtrader.py#L143-L165
def get_balance(self): """ 获取账户资金状况 :return: """ portfolio_code = self.account_config.get("portfolio_code", "ch") portfolio_info = self._get_portfolio_info(portfolio_code) asset_balance = self._virtual_to_balance( float(portfolio_info["net_value"]) ) # 总资产 position = portfolio_info["view_rebalancing"] # 仓位结构 cash = asset_balance * float(position["cash"]) / 100 market = asset_balance - cash return [ { "asset_balance": asset_balance, "current_balance": cash, "enable_balance": cash, "market_value": market, "money_type": u"人民币", "pre_interest": 0.25, } ]
[ "def", "get_balance", "(", "self", ")", ":", "portfolio_code", "=", "self", ".", "account_config", ".", "get", "(", "\"portfolio_code\"", ",", "\"ch\"", ")", "portfolio_info", "=", "self", ".", "_get_portfolio_info", "(", "portfolio_code", ")", "asset_balance", "=", "self", ".", "_virtual_to_balance", "(", "float", "(", "portfolio_info", "[", "\"net_value\"", "]", ")", ")", "# 总资产", "position", "=", "portfolio_info", "[", "\"view_rebalancing\"", "]", "# 仓位结构", "cash", "=", "asset_balance", "*", "float", "(", "position", "[", "\"cash\"", "]", ")", "/", "100", "market", "=", "asset_balance", "-", "cash", "return", "[", "{", "\"asset_balance\"", ":", "asset_balance", ",", "\"current_balance\"", ":", "cash", ",", "\"enable_balance\"", ":", "cash", ",", "\"market_value\"", ":", "market", ",", "\"money_type\"", ":", "u\"人民币\",", "", "\"pre_interest\"", ":", "0.25", ",", "}", "]" ]
获取账户资金状况 :return:
[ "获取账户资金状况", ":", "return", ":" ]
python
train
TorkamaniLab/metapipe
metapipe/models/command_template_factory.py
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command_template_factory.py#L15-L30
def get_command_templates(command_tokens, file_tokens=[], path_tokens=[], job_options=[]): """ Given a list of tokens from the grammar, return a list of commands. """ files = get_files(file_tokens) paths = get_paths(path_tokens) job_options = get_options(job_options) templates = _get_command_templates(command_tokens, files, paths, job_options) for command_template in templates: command_template._dependencies = _get_prelim_dependencies( command_template, templates) return templates
[ "def", "get_command_templates", "(", "command_tokens", ",", "file_tokens", "=", "[", "]", ",", "path_tokens", "=", "[", "]", ",", "job_options", "=", "[", "]", ")", ":", "files", "=", "get_files", "(", "file_tokens", ")", "paths", "=", "get_paths", "(", "path_tokens", ")", "job_options", "=", "get_options", "(", "job_options", ")", "templates", "=", "_get_command_templates", "(", "command_tokens", ",", "files", ",", "paths", ",", "job_options", ")", "for", "command_template", "in", "templates", ":", "command_template", ".", "_dependencies", "=", "_get_prelim_dependencies", "(", "command_template", ",", "templates", ")", "return", "templates" ]
Given a list of tokens from the grammar, return a list of commands.
[ "Given", "a", "list", "of", "tokens", "from", "the", "grammar", "return", "a", "list", "of", "commands", "." ]
python
train
spyder-ide/spyder
spyder/plugins/profiler/widgets/profilergui.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/profiler/widgets/profilergui.py#L637-L699
def populate_tree(self, parentItem, children_list): """Recursive method to create each item (and associated data) in the tree.""" for child_key in children_list: self.item_depth += 1 (filename, line_number, function_name, file_and_line, node_type ) = self.function_info(child_key) ((total_calls, total_calls_dif), (loc_time, loc_time_dif), (cum_time, cum_time_dif)) = self.format_output(child_key) child_item = TreeWidgetItem(parentItem) self.item_list.append(child_item) self.set_item_data(child_item, filename, line_number) # FIXME: indexes to data should be defined by a dictionary on init child_item.setToolTip(0, _('Function or module name')) child_item.setData(0, Qt.DisplayRole, function_name) child_item.setIcon(0, self.icon_list[node_type]) child_item.setToolTip(1, _('Time in function '\ '(including sub-functions)')) child_item.setData(1, Qt.DisplayRole, cum_time) child_item.setTextAlignment(1, Qt.AlignRight) child_item.setData(2, Qt.DisplayRole, cum_time_dif[0]) child_item.setForeground(2, QColor(cum_time_dif[1])) child_item.setTextAlignment(2, Qt.AlignLeft) child_item.setToolTip(3, _('Local time in function '\ '(not in sub-functions)')) child_item.setData(3, Qt.DisplayRole, loc_time) child_item.setTextAlignment(3, Qt.AlignRight) child_item.setData(4, Qt.DisplayRole, loc_time_dif[0]) child_item.setForeground(4, QColor(loc_time_dif[1])) child_item.setTextAlignment(4, Qt.AlignLeft) child_item.setToolTip(5, _('Total number of calls '\ '(including recursion)')) child_item.setData(5, Qt.DisplayRole, total_calls) child_item.setTextAlignment(5, Qt.AlignRight) child_item.setData(6, Qt.DisplayRole, total_calls_dif[0]) child_item.setForeground(6, QColor(total_calls_dif[1])) child_item.setTextAlignment(6, Qt.AlignLeft) child_item.setToolTip(7, _('File:line '\ 'where function is defined')) child_item.setData(7, Qt.DisplayRole, file_and_line) #child_item.setExpanded(True) if self.is_recursive(child_item): child_item.setData(7, Qt.DisplayRole, '(%s)' % _('recursion')) child_item.setDisabled(True) else: callees = self.find_callees(child_key) if self.item_depth < 3: self.populate_tree(child_item, callees) elif callees: child_item.setChildIndicatorPolicy(child_item.ShowIndicator) self.items_to_be_shown[id(child_item)] = callees self.item_depth -= 1
[ "def", "populate_tree", "(", "self", ",", "parentItem", ",", "children_list", ")", ":", "for", "child_key", "in", "children_list", ":", "self", ".", "item_depth", "+=", "1", "(", "filename", ",", "line_number", ",", "function_name", ",", "file_and_line", ",", "node_type", ")", "=", "self", ".", "function_info", "(", "child_key", ")", "(", "(", "total_calls", ",", "total_calls_dif", ")", ",", "(", "loc_time", ",", "loc_time_dif", ")", ",", "(", "cum_time", ",", "cum_time_dif", ")", ")", "=", "self", ".", "format_output", "(", "child_key", ")", "child_item", "=", "TreeWidgetItem", "(", "parentItem", ")", "self", ".", "item_list", ".", "append", "(", "child_item", ")", "self", ".", "set_item_data", "(", "child_item", ",", "filename", ",", "line_number", ")", "# FIXME: indexes to data should be defined by a dictionary on init\r", "child_item", ".", "setToolTip", "(", "0", ",", "_", "(", "'Function or module name'", ")", ")", "child_item", ".", "setData", "(", "0", ",", "Qt", ".", "DisplayRole", ",", "function_name", ")", "child_item", ".", "setIcon", "(", "0", ",", "self", ".", "icon_list", "[", "node_type", "]", ")", "child_item", ".", "setToolTip", "(", "1", ",", "_", "(", "'Time in function '", "'(including sub-functions)'", ")", ")", "child_item", ".", "setData", "(", "1", ",", "Qt", ".", "DisplayRole", ",", "cum_time", ")", "child_item", ".", "setTextAlignment", "(", "1", ",", "Qt", ".", "AlignRight", ")", "child_item", ".", "setData", "(", "2", ",", "Qt", ".", "DisplayRole", ",", "cum_time_dif", "[", "0", "]", ")", "child_item", ".", "setForeground", "(", "2", ",", "QColor", "(", "cum_time_dif", "[", "1", "]", ")", ")", "child_item", ".", "setTextAlignment", "(", "2", ",", "Qt", ".", "AlignLeft", ")", "child_item", ".", "setToolTip", "(", "3", ",", "_", "(", "'Local time in function '", "'(not in sub-functions)'", ")", ")", "child_item", ".", "setData", "(", "3", ",", "Qt", ".", "DisplayRole", ",", "loc_time", ")", "child_item", ".", "setTextAlignment", "(", "3", ",", "Qt", ".", "AlignRight", ")", "child_item", ".", "setData", "(", "4", ",", "Qt", ".", "DisplayRole", ",", "loc_time_dif", "[", "0", "]", ")", "child_item", ".", "setForeground", "(", "4", ",", "QColor", "(", "loc_time_dif", "[", "1", "]", ")", ")", "child_item", ".", "setTextAlignment", "(", "4", ",", "Qt", ".", "AlignLeft", ")", "child_item", ".", "setToolTip", "(", "5", ",", "_", "(", "'Total number of calls '", "'(including recursion)'", ")", ")", "child_item", ".", "setData", "(", "5", ",", "Qt", ".", "DisplayRole", ",", "total_calls", ")", "child_item", ".", "setTextAlignment", "(", "5", ",", "Qt", ".", "AlignRight", ")", "child_item", ".", "setData", "(", "6", ",", "Qt", ".", "DisplayRole", ",", "total_calls_dif", "[", "0", "]", ")", "child_item", ".", "setForeground", "(", "6", ",", "QColor", "(", "total_calls_dif", "[", "1", "]", ")", ")", "child_item", ".", "setTextAlignment", "(", "6", ",", "Qt", ".", "AlignLeft", ")", "child_item", ".", "setToolTip", "(", "7", ",", "_", "(", "'File:line '", "'where function is defined'", ")", ")", "child_item", ".", "setData", "(", "7", ",", "Qt", ".", "DisplayRole", ",", "file_and_line", ")", "#child_item.setExpanded(True)\r", "if", "self", ".", "is_recursive", "(", "child_item", ")", ":", "child_item", ".", "setData", "(", "7", ",", "Qt", ".", "DisplayRole", ",", "'(%s)'", "%", "_", "(", "'recursion'", ")", ")", "child_item", ".", "setDisabled", "(", "True", ")", "else", ":", "callees", "=", "self", ".", "find_callees", "(", "child_key", ")", "if", "self", ".", "item_depth", "<", "3", ":", "self", ".", "populate_tree", "(", "child_item", ",", "callees", ")", "elif", "callees", ":", "child_item", ".", "setChildIndicatorPolicy", "(", "child_item", ".", "ShowIndicator", ")", "self", ".", "items_to_be_shown", "[", "id", "(", "child_item", ")", "]", "=", "callees", "self", ".", "item_depth", "-=", "1" ]
Recursive method to create each item (and associated data) in the tree.
[ "Recursive", "method", "to", "create", "each", "item", "(", "and", "associated", "data", ")", "in", "the", "tree", "." ]
python
train
hyperledger/indy-plenum
plenum/server/replica_freshness_checker.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica_freshness_checker.py#L21-L48
def check_freshness(self, ts): ''' Get all ledger IDs for which A) not updated for more than Freshness Timeout B) hasn't been attempted to update (returned from this method) for more than Freshness Timeout Should be called whenever we need to decide if ledgers need to be updated. :param ts: the current time check the freshness against :return: an ordered dict of outdated ledgers sorted by the time from the last update (from oldest to newest) and then by ledger ID (in case of equal update time) ''' outdated_ledgers = {} for ledger_id, freshness_state in self._ledger_freshness.items(): if ts - freshness_state.last_updated <= self.freshness_timeout: continue if ts - freshness_state.last_marked_as_outdated <= self.freshness_timeout: continue outdated_ledgers[ledger_id] = ts - freshness_state.last_updated freshness_state.last_marked_as_outdated = ts # sort by last update time and then by ledger_id return OrderedDict( sorted( outdated_ledgers.items(), key=lambda item: (-item[1], item[0]) ) )
[ "def", "check_freshness", "(", "self", ",", "ts", ")", ":", "outdated_ledgers", "=", "{", "}", "for", "ledger_id", ",", "freshness_state", "in", "self", ".", "_ledger_freshness", ".", "items", "(", ")", ":", "if", "ts", "-", "freshness_state", ".", "last_updated", "<=", "self", ".", "freshness_timeout", ":", "continue", "if", "ts", "-", "freshness_state", ".", "last_marked_as_outdated", "<=", "self", ".", "freshness_timeout", ":", "continue", "outdated_ledgers", "[", "ledger_id", "]", "=", "ts", "-", "freshness_state", ".", "last_updated", "freshness_state", ".", "last_marked_as_outdated", "=", "ts", "# sort by last update time and then by ledger_id", "return", "OrderedDict", "(", "sorted", "(", "outdated_ledgers", ".", "items", "(", ")", ",", "key", "=", "lambda", "item", ":", "(", "-", "item", "[", "1", "]", ",", "item", "[", "0", "]", ")", ")", ")" ]
Get all ledger IDs for which A) not updated for more than Freshness Timeout B) hasn't been attempted to update (returned from this method) for more than Freshness Timeout Should be called whenever we need to decide if ledgers need to be updated. :param ts: the current time check the freshness against :return: an ordered dict of outdated ledgers sorted by the time from the last update (from oldest to newest) and then by ledger ID (in case of equal update time)
[ "Get", "all", "ledger", "IDs", "for", "which", "A", ")", "not", "updated", "for", "more", "than", "Freshness", "Timeout", "B", ")", "hasn", "t", "been", "attempted", "to", "update", "(", "returned", "from", "this", "method", ")", "for", "more", "than", "Freshness", "Timeout", "Should", "be", "called", "whenever", "we", "need", "to", "decide", "if", "ledgers", "need", "to", "be", "updated", "." ]
python
train
urinieto/msaf
msaf/pymf/cnmf.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/cnmf.py#L108-L187
def factorize(self, niter=10, compute_w=True, compute_h=True, compute_err=True, show_progress=False): """ Factorize s.t. WH = data Parameters ---------- niter : int number of iterations. show_progress : bool print some extra information to stdout. compute_h : bool iteratively update values for H. compute_w : bool iteratively update values for W. compute_err : bool compute Frobenius norm |data-WH| after each update and store it to .ferr[k]. Updated Values -------------- .W : updated values for W. .H : updated values for H. .ferr : Frobenius norm |data-WH| for each iteration. """ if not hasattr(self,'W'): self.init_w() if not hasattr(self,'H'): self.init_h() def separate_positive(m): return (np.abs(m) + m)/2.0 def separate_negative(m): return (np.abs(m) - m)/2.0 if show_progress: self._logger.setLevel(logging.INFO) else: self._logger.setLevel(logging.ERROR) XtX = np.dot(self.data[:,:].T, self.data[:,:]) XtX_pos = separate_positive(XtX) XtX_neg = separate_negative(XtX) self.ferr = np.zeros(niter) # iterate over W and H for i in range(niter): # update H XtX_neg_x_W = np.dot(XtX_neg, self.G) XtX_pos_x_W = np.dot(XtX_pos, self.G) if compute_h: H_x_WT = np.dot(self.H.T, self.G.T) ha = XtX_pos_x_W + np.dot(H_x_WT, XtX_neg_x_W) hb = XtX_neg_x_W + np.dot(H_x_WT, XtX_pos_x_W) + 10**-9 self.H = (self.H.T*np.sqrt(ha/hb)).T # update W if compute_w: HT_x_H = np.dot(self.H, self.H.T) wa = np.dot(XtX_pos, self.H.T) + np.dot(XtX_neg_x_W, HT_x_H) wb = np.dot(XtX_neg, self.H.T) + np.dot(XtX_pos_x_W, HT_x_H) + 10**-9 self.G *= np.sqrt(wa/wb) self.W = np.dot(self.data[:,:], self.G) if compute_err: self.ferr[i] = self.frobenius_norm() self._logger.info('Iteration ' + str(i+1) + '/' + str(niter) + ' FN:' + str(self.ferr[i])) else: self._logger.info('Iteration ' + str(i+1) + '/' + str(niter)) if i > 1 and compute_err: if self.converged(i): self.ferr = self.ferr[:i] break
[ "def", "factorize", "(", "self", ",", "niter", "=", "10", ",", "compute_w", "=", "True", ",", "compute_h", "=", "True", ",", "compute_err", "=", "True", ",", "show_progress", "=", "False", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'W'", ")", ":", "self", ".", "init_w", "(", ")", "if", "not", "hasattr", "(", "self", ",", "'H'", ")", ":", "self", ".", "init_h", "(", ")", "def", "separate_positive", "(", "m", ")", ":", "return", "(", "np", ".", "abs", "(", "m", ")", "+", "m", ")", "/", "2.0", "def", "separate_negative", "(", "m", ")", ":", "return", "(", "np", ".", "abs", "(", "m", ")", "-", "m", ")", "/", "2.0", "if", "show_progress", ":", "self", ".", "_logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "else", ":", "self", ".", "_logger", ".", "setLevel", "(", "logging", ".", "ERROR", ")", "XtX", "=", "np", ".", "dot", "(", "self", ".", "data", "[", ":", ",", ":", "]", ".", "T", ",", "self", ".", "data", "[", ":", ",", ":", "]", ")", "XtX_pos", "=", "separate_positive", "(", "XtX", ")", "XtX_neg", "=", "separate_negative", "(", "XtX", ")", "self", ".", "ferr", "=", "np", ".", "zeros", "(", "niter", ")", "# iterate over W and H", "for", "i", "in", "range", "(", "niter", ")", ":", "# update H", "XtX_neg_x_W", "=", "np", ".", "dot", "(", "XtX_neg", ",", "self", ".", "G", ")", "XtX_pos_x_W", "=", "np", ".", "dot", "(", "XtX_pos", ",", "self", ".", "G", ")", "if", "compute_h", ":", "H_x_WT", "=", "np", ".", "dot", "(", "self", ".", "H", ".", "T", ",", "self", ".", "G", ".", "T", ")", "ha", "=", "XtX_pos_x_W", "+", "np", ".", "dot", "(", "H_x_WT", ",", "XtX_neg_x_W", ")", "hb", "=", "XtX_neg_x_W", "+", "np", ".", "dot", "(", "H_x_WT", ",", "XtX_pos_x_W", ")", "+", "10", "**", "-", "9", "self", ".", "H", "=", "(", "self", ".", "H", ".", "T", "*", "np", ".", "sqrt", "(", "ha", "/", "hb", ")", ")", ".", "T", "# update W", "if", "compute_w", ":", "HT_x_H", "=", "np", ".", "dot", "(", "self", ".", "H", ",", "self", ".", "H", ".", "T", ")", "wa", "=", "np", ".", "dot", "(", "XtX_pos", ",", "self", ".", "H", ".", "T", ")", "+", "np", ".", "dot", "(", "XtX_neg_x_W", ",", "HT_x_H", ")", "wb", "=", "np", ".", "dot", "(", "XtX_neg", ",", "self", ".", "H", ".", "T", ")", "+", "np", ".", "dot", "(", "XtX_pos_x_W", ",", "HT_x_H", ")", "+", "10", "**", "-", "9", "self", ".", "G", "*=", "np", ".", "sqrt", "(", "wa", "/", "wb", ")", "self", ".", "W", "=", "np", ".", "dot", "(", "self", ".", "data", "[", ":", ",", ":", "]", ",", "self", ".", "G", ")", "if", "compute_err", ":", "self", ".", "ferr", "[", "i", "]", "=", "self", ".", "frobenius_norm", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Iteration '", "+", "str", "(", "i", "+", "1", ")", "+", "'/'", "+", "str", "(", "niter", ")", "+", "' FN:'", "+", "str", "(", "self", ".", "ferr", "[", "i", "]", ")", ")", "else", ":", "self", ".", "_logger", ".", "info", "(", "'Iteration '", "+", "str", "(", "i", "+", "1", ")", "+", "'/'", "+", "str", "(", "niter", ")", ")", "if", "i", ">", "1", "and", "compute_err", ":", "if", "self", ".", "converged", "(", "i", ")", ":", "self", ".", "ferr", "=", "self", ".", "ferr", "[", ":", "i", "]", "break" ]
Factorize s.t. WH = data Parameters ---------- niter : int number of iterations. show_progress : bool print some extra information to stdout. compute_h : bool iteratively update values for H. compute_w : bool iteratively update values for W. compute_err : bool compute Frobenius norm |data-WH| after each update and store it to .ferr[k]. Updated Values -------------- .W : updated values for W. .H : updated values for H. .ferr : Frobenius norm |data-WH| for each iteration.
[ "Factorize", "s", ".", "t", ".", "WH", "=", "data" ]
python
test
markovmodel/msmtools
msmtools/estimation/sparse/transition_matrix.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/estimation/sparse/transition_matrix.py#L27-L37
def transition_matrix_non_reversible(C): """implementation of transition_matrix""" if not scipy.sparse.issparse(C): C = scipy.sparse.csr_matrix(C) rowsum = C.tocsr().sum(axis=1) # catch div by zero if np.min(rowsum) == 0.0: raise ValueError("matrix C contains rows with sum zero.") rowsum = np.array(1. / rowsum).flatten() norm = scipy.sparse.diags(rowsum, 0) return norm * C
[ "def", "transition_matrix_non_reversible", "(", "C", ")", ":", "if", "not", "scipy", ".", "sparse", ".", "issparse", "(", "C", ")", ":", "C", "=", "scipy", ".", "sparse", ".", "csr_matrix", "(", "C", ")", "rowsum", "=", "C", ".", "tocsr", "(", ")", ".", "sum", "(", "axis", "=", "1", ")", "# catch div by zero", "if", "np", ".", "min", "(", "rowsum", ")", "==", "0.0", ":", "raise", "ValueError", "(", "\"matrix C contains rows with sum zero.\"", ")", "rowsum", "=", "np", ".", "array", "(", "1.", "/", "rowsum", ")", ".", "flatten", "(", ")", "norm", "=", "scipy", ".", "sparse", ".", "diags", "(", "rowsum", ",", "0", ")", "return", "norm", "*", "C" ]
implementation of transition_matrix
[ "implementation", "of", "transition_matrix" ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/handlers/metricstimelinehandler.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/metricstimelinehandler.py#L57-L79
def get(self): """ get method """ try: cluster = self.get_argument_cluster() role = self.get_argument_role() environ = self.get_argument_environ() topology_name = self.get_argument_topology() component = self.get_argument_component() metric_names = self.get_required_arguments_metricnames() start_time = self.get_argument_starttime() end_time = self.get_argument_endtime() self.validateInterval(start_time, end_time) instances = self.get_arguments(constants.PARAM_INSTANCE) topology = self.tracker.getTopologyByClusterRoleEnvironAndName( cluster, role, environ, topology_name) metrics = yield tornado.gen.Task(metricstimeline.getMetricsTimeline, topology.tmaster, component, metric_names, instances, int(start_time), int(end_time)) self.write_success_response(metrics) except Exception as e: Log.debug(traceback.format_exc()) self.write_error_response(e)
[ "def", "get", "(", "self", ")", ":", "try", ":", "cluster", "=", "self", ".", "get_argument_cluster", "(", ")", "role", "=", "self", ".", "get_argument_role", "(", ")", "environ", "=", "self", ".", "get_argument_environ", "(", ")", "topology_name", "=", "self", ".", "get_argument_topology", "(", ")", "component", "=", "self", ".", "get_argument_component", "(", ")", "metric_names", "=", "self", ".", "get_required_arguments_metricnames", "(", ")", "start_time", "=", "self", ".", "get_argument_starttime", "(", ")", "end_time", "=", "self", ".", "get_argument_endtime", "(", ")", "self", ".", "validateInterval", "(", "start_time", ",", "end_time", ")", "instances", "=", "self", ".", "get_arguments", "(", "constants", ".", "PARAM_INSTANCE", ")", "topology", "=", "self", ".", "tracker", ".", "getTopologyByClusterRoleEnvironAndName", "(", "cluster", ",", "role", ",", "environ", ",", "topology_name", ")", "metrics", "=", "yield", "tornado", ".", "gen", ".", "Task", "(", "metricstimeline", ".", "getMetricsTimeline", ",", "topology", ".", "tmaster", ",", "component", ",", "metric_names", ",", "instances", ",", "int", "(", "start_time", ")", ",", "int", "(", "end_time", ")", ")", "self", ".", "write_success_response", "(", "metrics", ")", "except", "Exception", "as", "e", ":", "Log", ".", "debug", "(", "traceback", ".", "format_exc", "(", ")", ")", "self", ".", "write_error_response", "(", "e", ")" ]
get method
[ "get", "method" ]
python
valid
has2k1/plotnine
plotnine/facets/facet_wrap.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet_wrap.py#L302-L318
def n2mfrow(nr_plots): """ Compute the rows and columns given the number of plots. This is a port of grDevices::n2mfrow from R """ if nr_plots <= 3: nrow, ncol = nr_plots, 1 elif nr_plots <= 6: nrow, ncol = (nr_plots + 1) // 2, 2 elif nr_plots <= 12: nrow, ncol = (nr_plots + 2) // 3, 3 else: nrow = int(np.ceil(np.sqrt(nr_plots))) ncol = int(np.ceil(nr_plots/nrow)) return (nrow, ncol)
[ "def", "n2mfrow", "(", "nr_plots", ")", ":", "if", "nr_plots", "<=", "3", ":", "nrow", ",", "ncol", "=", "nr_plots", ",", "1", "elif", "nr_plots", "<=", "6", ":", "nrow", ",", "ncol", "=", "(", "nr_plots", "+", "1", ")", "//", "2", ",", "2", "elif", "nr_plots", "<=", "12", ":", "nrow", ",", "ncol", "=", "(", "nr_plots", "+", "2", ")", "//", "3", ",", "3", "else", ":", "nrow", "=", "int", "(", "np", ".", "ceil", "(", "np", ".", "sqrt", "(", "nr_plots", ")", ")", ")", "ncol", "=", "int", "(", "np", ".", "ceil", "(", "nr_plots", "/", "nrow", ")", ")", "return", "(", "nrow", ",", "ncol", ")" ]
Compute the rows and columns given the number of plots. This is a port of grDevices::n2mfrow from R
[ "Compute", "the", "rows", "and", "columns", "given", "the", "number", "of", "plots", "." ]
python
train
adamziel/python_translate
python_translate/operations.py
https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/operations.py#L70-L82
def get_new_messages(self, domain): """ Returns new valid messages after operation. @type domain: str @rtype: dict """ if domain not in self.domains: raise ValueError('Invalid domain: {0}'.format(domain)) if domain not in self.messages or 'new' not in self.messages[domain]: self._process_domain(domain) return self.messages[domain]['new']
[ "def", "get_new_messages", "(", "self", ",", "domain", ")", ":", "if", "domain", "not", "in", "self", ".", "domains", ":", "raise", "ValueError", "(", "'Invalid domain: {0}'", ".", "format", "(", "domain", ")", ")", "if", "domain", "not", "in", "self", ".", "messages", "or", "'new'", "not", "in", "self", ".", "messages", "[", "domain", "]", ":", "self", ".", "_process_domain", "(", "domain", ")", "return", "self", ".", "messages", "[", "domain", "]", "[", "'new'", "]" ]
Returns new valid messages after operation. @type domain: str @rtype: dict
[ "Returns", "new", "valid", "messages", "after", "operation", "." ]
python
train
ioam/lancet
lancet/core.py
https://github.com/ioam/lancet/blob/1fbbf88fa0e8974ff9ed462e3cb11722ddebdd6e/lancet/core.py#L210-L221
def _collect_by_key(self,specs): """ Returns a dictionary like object with the lists of values collapsed by their respective key. Useful to find varying vs constant keys and to find how fast keys vary. """ # Collect (key, value) tuples as list of lists, flatten with chain allkeys = itertools.chain.from_iterable( [[(k, run[k]) for k in run] for run in specs]) collection = defaultdict(list) for (k,v) in allkeys: collection[k].append(v) return collection
[ "def", "_collect_by_key", "(", "self", ",", "specs", ")", ":", "# Collect (key, value) tuples as list of lists, flatten with chain", "allkeys", "=", "itertools", ".", "chain", ".", "from_iterable", "(", "[", "[", "(", "k", ",", "run", "[", "k", "]", ")", "for", "k", "in", "run", "]", "for", "run", "in", "specs", "]", ")", "collection", "=", "defaultdict", "(", "list", ")", "for", "(", "k", ",", "v", ")", "in", "allkeys", ":", "collection", "[", "k", "]", ".", "append", "(", "v", ")", "return", "collection" ]
Returns a dictionary like object with the lists of values collapsed by their respective key. Useful to find varying vs constant keys and to find how fast keys vary.
[ "Returns", "a", "dictionary", "like", "object", "with", "the", "lists", "of", "values", "collapsed", "by", "their", "respective", "key", ".", "Useful", "to", "find", "varying", "vs", "constant", "keys", "and", "to", "find", "how", "fast", "keys", "vary", "." ]
python
valid
zqfang/GSEApy
gseapy/enrichr.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L154-L170
def get_results(self, gene_list): """Enrichr API""" ADDLIST_URL = 'http://amp.pharm.mssm.edu/%sEnrichr/addList'%self._organism job_id = self.send_genes(gene_list, ADDLIST_URL) user_list_id = job_id['userListId'] RESULTS_URL = 'http://amp.pharm.mssm.edu/%sEnrichr/export'%self._organism query_string = '?userListId=%s&filename=%s&backgroundType=%s' # set max retries num =5 s = retry(num=5) filename = "%s.%s.reports" % (self._gs, self.descriptions) url = RESULTS_URL + query_string % (user_list_id, filename, self._gs) response = s.get(url, stream=True, timeout=None) # response = requests.get(RESULTS_URL + query_string % (user_list_id, gene_set)) sleep(1) res = pd.read_csv(StringIO(response.content.decode('utf-8')),sep="\t") return [job_id['shortId'], res]
[ "def", "get_results", "(", "self", ",", "gene_list", ")", ":", "ADDLIST_URL", "=", "'http://amp.pharm.mssm.edu/%sEnrichr/addList'", "%", "self", ".", "_organism", "job_id", "=", "self", ".", "send_genes", "(", "gene_list", ",", "ADDLIST_URL", ")", "user_list_id", "=", "job_id", "[", "'userListId'", "]", "RESULTS_URL", "=", "'http://amp.pharm.mssm.edu/%sEnrichr/export'", "%", "self", ".", "_organism", "query_string", "=", "'?userListId=%s&filename=%s&backgroundType=%s'", "# set max retries num =5", "s", "=", "retry", "(", "num", "=", "5", ")", "filename", "=", "\"%s.%s.reports\"", "%", "(", "self", ".", "_gs", ",", "self", ".", "descriptions", ")", "url", "=", "RESULTS_URL", "+", "query_string", "%", "(", "user_list_id", ",", "filename", ",", "self", ".", "_gs", ")", "response", "=", "s", ".", "get", "(", "url", ",", "stream", "=", "True", ",", "timeout", "=", "None", ")", "# response = requests.get(RESULTS_URL + query_string % (user_list_id, gene_set))", "sleep", "(", "1", ")", "res", "=", "pd", ".", "read_csv", "(", "StringIO", "(", "response", ".", "content", ".", "decode", "(", "'utf-8'", ")", ")", ",", "sep", "=", "\"\\t\"", ")", "return", "[", "job_id", "[", "'shortId'", "]", ",", "res", "]" ]
Enrichr API
[ "Enrichr", "API" ]
python
test
spry-group/python-vultr
vultr/v1_server.py
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L107-L116
def neighbors(self, subid, params=None): ''' v1/server/neighbors GET - account Determine what other subscriptions are hosted on the same physical host as a given subscription. Link: https://www.vultr.com/api/#server_neighbors ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/neighbors', params, 'GET')
[ "def", "neighbors", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/neighbors'", ",", "params", ",", "'GET'", ")" ]
v1/server/neighbors GET - account Determine what other subscriptions are hosted on the same physical host as a given subscription. Link: https://www.vultr.com/api/#server_neighbors
[ "v1", "/", "server", "/", "neighbors", "GET", "-", "account", "Determine", "what", "other", "subscriptions", "are", "hosted", "on", "the", "same", "physical", "host", "as", "a", "given", "subscription", "." ]
python
train
Chilipp/funcargparse
funcargparse/__init__.py
https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L333-L398
def setup_subparser( self, func=None, setup_as=None, insert_at=None, interprete=True, epilog_sections=None, overwrite=False, append_epilog=True, return_parser=False, name=None, **kwargs): """ Create a subparser with the name of the given function Parameters are the same as for the :meth:`setup_args` function, other parameters are parsed to the :meth:`add_subparsers` method if (and only if) this method has not already been called. Parameters ---------- %(FuncArgParser.setup_args.parameters)s return_parser: bool If True, the create parser is returned instead of the function name: str The name of the created parser. If None, the function name is used and underscores (``'_'``) are replaced by minus (``'-'``) ``**kwargs`` Any other parameter that is passed to the add_parser method that creates the parser Other Parameters ---------------- Returns ------- FuncArgParser or %(FuncArgParser.setup_args.returns)s If return_parser is True, the created subparser is returned Examples -------- Use this method as a decorator:: >>> from funcargparser import FuncArgParser >>> parser = FuncArgParser() >>> @parser.setup_subparser ... def my_func(my_argument=None): ... pass >>> args = parser.parse_args('my-func -my-argument 1'.split()) """ def setup(func): if self._subparsers_action is None: raise RuntimeError( "No subparsers have yet been created! Run the " "add_subparsers method first!") # replace underscore by '-' name2use = name if name2use is None: name2use = func.__name__.replace('_', '-') kwargs.setdefault('help', docstrings.get_summary( docstrings.dedents(inspect.getdoc(func)))) parser = self._subparsers_action.add_parser(name2use, **kwargs) parser.setup_args( func, setup_as=setup_as, insert_at=insert_at, interprete=interprete, epilog_sections=epilog_sections, overwrite=overwrite, append_epilog=append_epilog) return func, parser if func is None: return lambda f: setup(f)[0] else: return setup(func)[int(return_parser)]
[ "def", "setup_subparser", "(", "self", ",", "func", "=", "None", ",", "setup_as", "=", "None", ",", "insert_at", "=", "None", ",", "interprete", "=", "True", ",", "epilog_sections", "=", "None", ",", "overwrite", "=", "False", ",", "append_epilog", "=", "True", ",", "return_parser", "=", "False", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "setup", "(", "func", ")", ":", "if", "self", ".", "_subparsers_action", "is", "None", ":", "raise", "RuntimeError", "(", "\"No subparsers have yet been created! Run the \"", "\"add_subparsers method first!\"", ")", "# replace underscore by '-'", "name2use", "=", "name", "if", "name2use", "is", "None", ":", "name2use", "=", "func", ".", "__name__", ".", "replace", "(", "'_'", ",", "'-'", ")", "kwargs", ".", "setdefault", "(", "'help'", ",", "docstrings", ".", "get_summary", "(", "docstrings", ".", "dedents", "(", "inspect", ".", "getdoc", "(", "func", ")", ")", ")", ")", "parser", "=", "self", ".", "_subparsers_action", ".", "add_parser", "(", "name2use", ",", "*", "*", "kwargs", ")", "parser", ".", "setup_args", "(", "func", ",", "setup_as", "=", "setup_as", ",", "insert_at", "=", "insert_at", ",", "interprete", "=", "interprete", ",", "epilog_sections", "=", "epilog_sections", ",", "overwrite", "=", "overwrite", ",", "append_epilog", "=", "append_epilog", ")", "return", "func", ",", "parser", "if", "func", "is", "None", ":", "return", "lambda", "f", ":", "setup", "(", "f", ")", "[", "0", "]", "else", ":", "return", "setup", "(", "func", ")", "[", "int", "(", "return_parser", ")", "]" ]
Create a subparser with the name of the given function Parameters are the same as for the :meth:`setup_args` function, other parameters are parsed to the :meth:`add_subparsers` method if (and only if) this method has not already been called. Parameters ---------- %(FuncArgParser.setup_args.parameters)s return_parser: bool If True, the create parser is returned instead of the function name: str The name of the created parser. If None, the function name is used and underscores (``'_'``) are replaced by minus (``'-'``) ``**kwargs`` Any other parameter that is passed to the add_parser method that creates the parser Other Parameters ---------------- Returns ------- FuncArgParser or %(FuncArgParser.setup_args.returns)s If return_parser is True, the created subparser is returned Examples -------- Use this method as a decorator:: >>> from funcargparser import FuncArgParser >>> parser = FuncArgParser() >>> @parser.setup_subparser ... def my_func(my_argument=None): ... pass >>> args = parser.parse_args('my-func -my-argument 1'.split())
[ "Create", "a", "subparser", "with", "the", "name", "of", "the", "given", "function" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/dfa_server.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1651-L1667
def listener_delete_event(self, listener_info): """Process listener delete event. This is lbaas v2 vif will be plugged into ovs when first listener is created and unpluged from ovs when last listener is deleted. as the data only contains listener id, we will scan all loadbalancers from db and delete the vdp if their admin state is down in that loadbalancer """ lb_list = self.neutronclient.list_loadbalancers() for lb in lb_list.get('loadbalancers'): if not lb.get("listeners"): lb_id = lb.get('id') LOG.info("Deleting lb %s port" % lb_id) self.delete_lbaas_port(lb_id)
[ "def", "listener_delete_event", "(", "self", ",", "listener_info", ")", ":", "lb_list", "=", "self", ".", "neutronclient", ".", "list_loadbalancers", "(", ")", "for", "lb", "in", "lb_list", ".", "get", "(", "'loadbalancers'", ")", ":", "if", "not", "lb", ".", "get", "(", "\"listeners\"", ")", ":", "lb_id", "=", "lb", ".", "get", "(", "'id'", ")", "LOG", ".", "info", "(", "\"Deleting lb %s port\"", "%", "lb_id", ")", "self", ".", "delete_lbaas_port", "(", "lb_id", ")" ]
Process listener delete event. This is lbaas v2 vif will be plugged into ovs when first listener is created and unpluged from ovs when last listener is deleted. as the data only contains listener id, we will scan all loadbalancers from db and delete the vdp if their admin state is down in that loadbalancer
[ "Process", "listener", "delete", "event", "." ]
python
train
MIT-LCP/wfdb-python
wfdb/processing/qrs.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/qrs.py#L603-L648
def xqrs_detect(sig, fs, sampfrom=0, sampto='end', conf=None, learn=True, verbose=True): """ Run the 'xqrs' qrs detection algorithm on a signal. See the docstring of the XQRS class for algorithm details. Parameters ---------- sig : numpy array The input ecg signal to apply the qrs detection on. fs : int or float The sampling frequency of the input signal. sampfrom : int, optional The starting sample number to run the detection on. sampto : The final sample number to run the detection on. Set as 'end' to run on the entire signal. conf : XQRS.Conf object, optional The configuration object specifying signal configuration parameters. See the docstring of the XQRS.Conf class. learn : bool, optional Whether to apply learning on the signal before running the main detection. If learning fails or is not conducted, the default configuration parameters will be used to initialize these variables. verbose : bool, optional Whether to display the stages and outcomes of the detection process. Returns ------- qrs_inds : numpy array The indices of the detected qrs complexes Examples -------- >>> import wfdb >>> from wfdb import processing >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0]) >>> qrs_inds = processing.xqrs_detect(sig=sig[:,0], fs=fields['fs']) """ xqrs = XQRS(sig=sig, fs=fs, conf=conf) xqrs.detect(sampfrom=sampfrom, sampto=sampto, verbose=verbose) return xqrs.qrs_inds
[ "def", "xqrs_detect", "(", "sig", ",", "fs", ",", "sampfrom", "=", "0", ",", "sampto", "=", "'end'", ",", "conf", "=", "None", ",", "learn", "=", "True", ",", "verbose", "=", "True", ")", ":", "xqrs", "=", "XQRS", "(", "sig", "=", "sig", ",", "fs", "=", "fs", ",", "conf", "=", "conf", ")", "xqrs", ".", "detect", "(", "sampfrom", "=", "sampfrom", ",", "sampto", "=", "sampto", ",", "verbose", "=", "verbose", ")", "return", "xqrs", ".", "qrs_inds" ]
Run the 'xqrs' qrs detection algorithm on a signal. See the docstring of the XQRS class for algorithm details. Parameters ---------- sig : numpy array The input ecg signal to apply the qrs detection on. fs : int or float The sampling frequency of the input signal. sampfrom : int, optional The starting sample number to run the detection on. sampto : The final sample number to run the detection on. Set as 'end' to run on the entire signal. conf : XQRS.Conf object, optional The configuration object specifying signal configuration parameters. See the docstring of the XQRS.Conf class. learn : bool, optional Whether to apply learning on the signal before running the main detection. If learning fails or is not conducted, the default configuration parameters will be used to initialize these variables. verbose : bool, optional Whether to display the stages and outcomes of the detection process. Returns ------- qrs_inds : numpy array The indices of the detected qrs complexes Examples -------- >>> import wfdb >>> from wfdb import processing >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0]) >>> qrs_inds = processing.xqrs_detect(sig=sig[:,0], fs=fields['fs'])
[ "Run", "the", "xqrs", "qrs", "detection", "algorithm", "on", "a", "signal", ".", "See", "the", "docstring", "of", "the", "XQRS", "class", "for", "algorithm", "details", "." ]
python
train
Autodesk/aomi
aomi/helpers.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/helpers.py#L44-L53
def hard_path(path, prefix_dir): """Returns an absolute path to either the relative or absolute file.""" relative = abspath("%s/%s" % (prefix_dir, path)) a_path = abspath(path) if os.path.exists(relative): LOG.debug("using relative path %s (%s)", relative, path) return relative LOG.debug("using absolute path %s", a_path) return a_path
[ "def", "hard_path", "(", "path", ",", "prefix_dir", ")", ":", "relative", "=", "abspath", "(", "\"%s/%s\"", "%", "(", "prefix_dir", ",", "path", ")", ")", "a_path", "=", "abspath", "(", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "relative", ")", ":", "LOG", ".", "debug", "(", "\"using relative path %s (%s)\"", ",", "relative", ",", "path", ")", "return", "relative", "LOG", ".", "debug", "(", "\"using absolute path %s\"", ",", "a_path", ")", "return", "a_path" ]
Returns an absolute path to either the relative or absolute file.
[ "Returns", "an", "absolute", "path", "to", "either", "the", "relative", "or", "absolute", "file", "." ]
python
train
ga4gh/ga4gh-client
ga4gh/client/client.py
https://github.com/ga4gh/ga4gh-client/blob/d23b00b89112ef0930d45ee75aa3c6de3db615c5/ga4gh/client/client.py#L802-L812
def search_rna_quantification_sets(self, dataset_id): """ Returns an iterator over the RnaQuantificationSet objects from the server """ request = protocol.SearchRnaQuantificationSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "rnaquantificationsets", protocol.SearchRnaQuantificationSetsResponse)
[ "def", "search_rna_quantification_sets", "(", "self", ",", "dataset_id", ")", ":", "request", "=", "protocol", ".", "SearchRnaQuantificationSetsRequest", "(", ")", "request", ".", "dataset_id", "=", "dataset_id", "request", ".", "page_size", "=", "pb", ".", "int", "(", "self", ".", "_page_size", ")", "return", "self", ".", "_run_search_request", "(", "request", ",", "\"rnaquantificationsets\"", ",", "protocol", ".", "SearchRnaQuantificationSetsResponse", ")" ]
Returns an iterator over the RnaQuantificationSet objects from the server
[ "Returns", "an", "iterator", "over", "the", "RnaQuantificationSet", "objects", "from", "the", "server" ]
python
train
saltstack/salt
salt/modules/pip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pip.py#L1466-L1563
def list_all_versions(pkg, bin_env=None, include_alpha=False, include_beta=False, include_rc=False, user=None, cwd=None, index_url=None, extra_index_url=None): ''' .. versionadded:: 2017.7.3 List all available versions of a pip package pkg The package to check bin_env Path to pip (or to a virtualenv). This can be used to specify the path to the pip to use when more than one Python release is installed (e.g. ``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is specified, it is assumed to be a virtualenv. include_alpha Include alpha versions in the list include_beta Include beta versions in the list include_rc Include release candidates versions in the list user The user under which to run pip cwd Directory from which to run pip index_url Base URL of Python Package Index .. versionadded:: 2019.2.0 extra_index_url Additional URL of Python Package Index .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' pip.list_all_versions <package name> ''' cmd = _get_pip_bin(bin_env) cmd.extend(['install', '{0}==versions'.format(pkg)]) if index_url: if not salt.utils.url.validate(index_url, VALID_PROTOS): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(index_url) ) cmd.extend(['--index-url', index_url]) if extra_index_url: if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(extra_index_url) ) cmd.extend(['--extra-index-url', extra_index_url]) cmd_kwargs = dict(cwd=cwd, runas=user, output_loglevel='quiet', redirect_stderr=True) if bin_env and os.path.isdir(bin_env): cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env} result = __salt__['cmd.run_all'](cmd, **cmd_kwargs) filtered = [] if not include_alpha: filtered.append('a') if not include_beta: filtered.append('b') if not include_rc: filtered.append('rc') if filtered: excludes = re.compile(r'^((?!{0}).)*$'.format('|'.join(filtered))) else: excludes = re.compile(r'') versions = [] for line in result['stdout'].splitlines(): match = re.search(r'\s*Could not find a version.* \(from versions: (.*)\)', line) if match: versions = [v for v in match.group(1).split(', ') if v and excludes.match(v)] versions.sort(key=pkg_resources.parse_version) break if not versions: return None return versions
[ "def", "list_all_versions", "(", "pkg", ",", "bin_env", "=", "None", ",", "include_alpha", "=", "False", ",", "include_beta", "=", "False", ",", "include_rc", "=", "False", ",", "user", "=", "None", ",", "cwd", "=", "None", ",", "index_url", "=", "None", ",", "extra_index_url", "=", "None", ")", ":", "cmd", "=", "_get_pip_bin", "(", "bin_env", ")", "cmd", ".", "extend", "(", "[", "'install'", ",", "'{0}==versions'", ".", "format", "(", "pkg", ")", "]", ")", "if", "index_url", ":", "if", "not", "salt", ".", "utils", ".", "url", ".", "validate", "(", "index_url", ",", "VALID_PROTOS", ")", ":", "raise", "CommandExecutionError", "(", "'\\'{0}\\' is not a valid URL'", ".", "format", "(", "index_url", ")", ")", "cmd", ".", "extend", "(", "[", "'--index-url'", ",", "index_url", "]", ")", "if", "extra_index_url", ":", "if", "not", "salt", ".", "utils", ".", "url", ".", "validate", "(", "extra_index_url", ",", "VALID_PROTOS", ")", ":", "raise", "CommandExecutionError", "(", "'\\'{0}\\' is not a valid URL'", ".", "format", "(", "extra_index_url", ")", ")", "cmd", ".", "extend", "(", "[", "'--extra-index-url'", ",", "extra_index_url", "]", ")", "cmd_kwargs", "=", "dict", "(", "cwd", "=", "cwd", ",", "runas", "=", "user", ",", "output_loglevel", "=", "'quiet'", ",", "redirect_stderr", "=", "True", ")", "if", "bin_env", "and", "os", ".", "path", ".", "isdir", "(", "bin_env", ")", ":", "cmd_kwargs", "[", "'env'", "]", "=", "{", "'VIRTUAL_ENV'", ":", "bin_env", "}", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "*", "*", "cmd_kwargs", ")", "filtered", "=", "[", "]", "if", "not", "include_alpha", ":", "filtered", ".", "append", "(", "'a'", ")", "if", "not", "include_beta", ":", "filtered", ".", "append", "(", "'b'", ")", "if", "not", "include_rc", ":", "filtered", ".", "append", "(", "'rc'", ")", "if", "filtered", ":", "excludes", "=", "re", ".", "compile", "(", "r'^((?!{0}).)*$'", ".", "format", "(", "'|'", ".", "join", "(", "filtered", ")", ")", ")", "else", ":", "excludes", "=", "re", ".", "compile", "(", "r''", ")", "versions", "=", "[", "]", "for", "line", "in", "result", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "match", "=", "re", ".", "search", "(", "r'\\s*Could not find a version.* \\(from versions: (.*)\\)'", ",", "line", ")", "if", "match", ":", "versions", "=", "[", "v", "for", "v", "in", "match", ".", "group", "(", "1", ")", ".", "split", "(", "', '", ")", "if", "v", "and", "excludes", ".", "match", "(", "v", ")", "]", "versions", ".", "sort", "(", "key", "=", "pkg_resources", ".", "parse_version", ")", "break", "if", "not", "versions", ":", "return", "None", "return", "versions" ]
.. versionadded:: 2017.7.3 List all available versions of a pip package pkg The package to check bin_env Path to pip (or to a virtualenv). This can be used to specify the path to the pip to use when more than one Python release is installed (e.g. ``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is specified, it is assumed to be a virtualenv. include_alpha Include alpha versions in the list include_beta Include beta versions in the list include_rc Include release candidates versions in the list user The user under which to run pip cwd Directory from which to run pip index_url Base URL of Python Package Index .. versionadded:: 2019.2.0 extra_index_url Additional URL of Python Package Index .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' pip.list_all_versions <package name>
[ "..", "versionadded", "::", "2017", ".", "7", ".", "3" ]
python
train
latchset/jwcrypto
jwcrypto/jwt.py
https://github.com/latchset/jwcrypto/blob/961df898dc08f63fe3d900f2002618740bc66b4a/jwcrypto/jwt.py#L416-L428
def make_signed_token(self, key): """Signs the payload. Creates a JWS token with the header as the JWS protected header and the claims as the payload. See (:class:`jwcrypto.jws.JWS`) for details on the exceptions that may be reaised. :param key: A (:class:`jwcrypto.jwk.JWK`) key. """ t = JWS(self.claims) t.add_signature(key, protected=self.header) self.token = t
[ "def", "make_signed_token", "(", "self", ",", "key", ")", ":", "t", "=", "JWS", "(", "self", ".", "claims", ")", "t", ".", "add_signature", "(", "key", ",", "protected", "=", "self", ".", "header", ")", "self", ".", "token", "=", "t" ]
Signs the payload. Creates a JWS token with the header as the JWS protected header and the claims as the payload. See (:class:`jwcrypto.jws.JWS`) for details on the exceptions that may be reaised. :param key: A (:class:`jwcrypto.jwk.JWK`) key.
[ "Signs", "the", "payload", "." ]
python
train
psss/did
did/plugins/sentry.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/sentry.py#L81-L85
def issues(self, kind, email): """ Filter unique issues for given activity type and email """ return list(set([unicode(activity.issue) for activity in self.activities() if kind == activity.kind and activity.user['email'] == email]))
[ "def", "issues", "(", "self", ",", "kind", ",", "email", ")", ":", "return", "list", "(", "set", "(", "[", "unicode", "(", "activity", ".", "issue", ")", "for", "activity", "in", "self", ".", "activities", "(", ")", "if", "kind", "==", "activity", ".", "kind", "and", "activity", ".", "user", "[", "'email'", "]", "==", "email", "]", ")", ")" ]
Filter unique issues for given activity type and email
[ "Filter", "unique", "issues", "for", "given", "activity", "type", "and", "email" ]
python
train
openstack/proliantutils
proliantutils/redfish/redfish.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L1133-L1170
def set_bios_settings(self, data=None, only_allowed_settings=True): """Sets current BIOS settings to the provided data. :param: only_allowed_settings: True when only allowed BIOS settings are to be set. If False, all the BIOS settings supported by iLO and present in the 'data' are set. :param: data: a dictionary of BIOS settings to be applied. Depending on the 'only_allowed_settings', either only the allowed settings are set or all the supported settings that are in the 'data' are set. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server. """ if not data: raise exception.IloError("Could not apply settings with" " empty data") sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) if only_allowed_settings: unsupported_settings = [key for key in data if key not in ( ilo_cons.SUPPORTED_REDFISH_BIOS_PROPERTIES)] if unsupported_settings: msg = ("Could not apply settings as one or more settings are" " not supported. Unsupported settings are %s." " Supported settings are %s." % ( unsupported_settings, ilo_cons.SUPPORTED_REDFISH_BIOS_PROPERTIES)) raise exception.IloError(msg) try: settings_required = sushy_system.bios_settings.pending_settings settings_required.update_bios_data_by_patch(data) except sushy.exceptions.SushyError as e: msg = (self._('The pending BIOS Settings resource not found.' ' Error %(error)s') % {'error': str(e)}) LOG.debug(msg) raise exception.IloError(msg)
[ "def", "set_bios_settings", "(", "self", ",", "data", "=", "None", ",", "only_allowed_settings", "=", "True", ")", ":", "if", "not", "data", ":", "raise", "exception", ".", "IloError", "(", "\"Could not apply settings with\"", "\" empty data\"", ")", "sushy_system", "=", "self", ".", "_get_sushy_system", "(", "PROLIANT_SYSTEM_ID", ")", "if", "only_allowed_settings", ":", "unsupported_settings", "=", "[", "key", "for", "key", "in", "data", "if", "key", "not", "in", "(", "ilo_cons", ".", "SUPPORTED_REDFISH_BIOS_PROPERTIES", ")", "]", "if", "unsupported_settings", ":", "msg", "=", "(", "\"Could not apply settings as one or more settings are\"", "\" not supported. Unsupported settings are %s.\"", "\" Supported settings are %s.\"", "%", "(", "unsupported_settings", ",", "ilo_cons", ".", "SUPPORTED_REDFISH_BIOS_PROPERTIES", ")", ")", "raise", "exception", ".", "IloError", "(", "msg", ")", "try", ":", "settings_required", "=", "sushy_system", ".", "bios_settings", ".", "pending_settings", "settings_required", ".", "update_bios_data_by_patch", "(", "data", ")", "except", "sushy", ".", "exceptions", ".", "SushyError", "as", "e", ":", "msg", "=", "(", "self", ".", "_", "(", "'The pending BIOS Settings resource not found.'", "' Error %(error)s'", ")", "%", "{", "'error'", ":", "str", "(", "e", ")", "}", ")", "LOG", ".", "debug", "(", "msg", ")", "raise", "exception", ".", "IloError", "(", "msg", ")" ]
Sets current BIOS settings to the provided data. :param: only_allowed_settings: True when only allowed BIOS settings are to be set. If False, all the BIOS settings supported by iLO and present in the 'data' are set. :param: data: a dictionary of BIOS settings to be applied. Depending on the 'only_allowed_settings', either only the allowed settings are set or all the supported settings that are in the 'data' are set. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
[ "Sets", "current", "BIOS", "settings", "to", "the", "provided", "data", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py#L816-L821
def transform_cell(self, cell): """Process and translate a cell of input. """ self.reset() self.push(cell) return self.source_reset()
[ "def", "transform_cell", "(", "self", ",", "cell", ")", ":", "self", ".", "reset", "(", ")", "self", ".", "push", "(", "cell", ")", "return", "self", ".", "source_reset", "(", ")" ]
Process and translate a cell of input.
[ "Process", "and", "translate", "a", "cell", "of", "input", "." ]
python
test
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_nameserver.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_nameserver.py#L103-L117
def get_nameserver_detail_output_show_nameserver_nameserver_portsymb(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_portsymb = ET.SubElement(show_nameserver, "nameserver-portsymb") nameserver_portsymb.text = kwargs.pop('nameserver_portsymb') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_nameserver_detail_output_show_nameserver_nameserver_portsymb", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_nameserver_detail", "=", "ET", ".", "Element", "(", "\"get_nameserver_detail\"", ")", "config", "=", "get_nameserver_detail", "output", "=", "ET", ".", "SubElement", "(", "get_nameserver_detail", ",", "\"output\"", ")", "show_nameserver", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-nameserver\"", ")", "nameserver_portid_key", "=", "ET", ".", "SubElement", "(", "show_nameserver", ",", "\"nameserver-portid\"", ")", "nameserver_portid_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'nameserver_portid'", ")", "nameserver_portsymb", "=", "ET", ".", "SubElement", "(", "show_nameserver", ",", "\"nameserver-portsymb\"", ")", "nameserver_portsymb", ".", "text", "=", "kwargs", ".", "pop", "(", "'nameserver_portsymb'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
dropbox/stone
stone/backends/obj_c_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/obj_c_types.py#L497-L517
def _generate_struct_cstor_default(self, struct): """Emits struct convenience constructor. Default arguments are omitted.""" if not self._struct_has_defaults(struct): return fields_no_default = [ f for f in struct.all_fields if not f.has_default and not is_nullable_type(f.data_type) ] with self.block_func( func=self._cstor_name_from_fields(fields_no_default), args=fmt_func_args_from_fields(fields_no_default), return_type='instancetype'): args = ([(fmt_var(f.name), fmt_var(f.name) if not f.has_default and not is_nullable_type(f.data_type) else 'nil') for f in struct.all_fields]) cstor_args = fmt_func_args(args) self.emit('return [self {}:{}];'.format( self._cstor_name_from_fields(struct.all_fields), cstor_args)) self.emit()
[ "def", "_generate_struct_cstor_default", "(", "self", ",", "struct", ")", ":", "if", "not", "self", ".", "_struct_has_defaults", "(", "struct", ")", ":", "return", "fields_no_default", "=", "[", "f", "for", "f", "in", "struct", ".", "all_fields", "if", "not", "f", ".", "has_default", "and", "not", "is_nullable_type", "(", "f", ".", "data_type", ")", "]", "with", "self", ".", "block_func", "(", "func", "=", "self", ".", "_cstor_name_from_fields", "(", "fields_no_default", ")", ",", "args", "=", "fmt_func_args_from_fields", "(", "fields_no_default", ")", ",", "return_type", "=", "'instancetype'", ")", ":", "args", "=", "(", "[", "(", "fmt_var", "(", "f", ".", "name", ")", ",", "fmt_var", "(", "f", ".", "name", ")", "if", "not", "f", ".", "has_default", "and", "not", "is_nullable_type", "(", "f", ".", "data_type", ")", "else", "'nil'", ")", "for", "f", "in", "struct", ".", "all_fields", "]", ")", "cstor_args", "=", "fmt_func_args", "(", "args", ")", "self", ".", "emit", "(", "'return [self {}:{}];'", ".", "format", "(", "self", ".", "_cstor_name_from_fields", "(", "struct", ".", "all_fields", ")", ",", "cstor_args", ")", ")", "self", ".", "emit", "(", ")" ]
Emits struct convenience constructor. Default arguments are omitted.
[ "Emits", "struct", "convenience", "constructor", ".", "Default", "arguments", "are", "omitted", "." ]
python
train
dalloriam/engel
engel/application.py
https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/application.py#L82-L90
def unregister(self, event, callback, selector=None): """ Unregisters an event that was being monitored. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor """ self.processor.unregister(event, callback, selector)
[ "def", "unregister", "(", "self", ",", "event", ",", "callback", ",", "selector", "=", "None", ")", ":", "self", ".", "processor", ".", "unregister", "(", "event", ",", "callback", ",", "selector", ")" ]
Unregisters an event that was being monitored. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor
[ "Unregisters", "an", "event", "that", "was", "being", "monitored", "." ]
python
train
sassoftware/saspy
saspy/sasets.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasets.py#L87-L121
def timeseries(self, data: ['SASdata', str] = None, by: str = None, corr: str = None, crosscorr: str = None, crossvar: str = None, decomp: str = None, id: str = None, out: [str, 'SASdata'] = None, season: str = None, trend: str = None, var: str = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the TIMESERIES procedure Documentation link: http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeseries_syntax.htm :param data: SASdata object or string. This parameter is required. :parm by: The by variable can only be a string type. :parm corr: The corr variable can only be a string type. :parm crosscorr: The crosscorr variable can only be a string type. :parm crossvar: The crossvar variable can only be a string type. :parm decomp: The decomp variable can only be a string type. :parm id: The id variable can only be a string type. :parm out: The out variable can be a string or SASdata type. :parm season: The season variable can only be a string type. :parm trend: The trend variable can only be a string type. :parm var: The var variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
[ "def", "timeseries", "(", "self", ",", "data", ":", "[", "'SASdata'", ",", "str", "]", "=", "None", ",", "by", ":", "str", "=", "None", ",", "corr", ":", "str", "=", "None", ",", "crosscorr", ":", "str", "=", "None", ",", "crossvar", ":", "str", "=", "None", ",", "decomp", ":", "str", "=", "None", ",", "id", ":", "str", "=", "None", ",", "out", ":", "[", "str", ",", "'SASdata'", "]", "=", "None", ",", "season", ":", "str", "=", "None", ",", "trend", ":", "str", "=", "None", ",", "var", ":", "str", "=", "None", ",", "procopts", ":", "str", "=", "None", ",", "stmtpassthrough", ":", "str", "=", "None", ",", "*", "*", "kwargs", ":", "dict", ")", "->", "'SASresults'", ":" ]
Python method to call the TIMESERIES procedure Documentation link: http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeseries_syntax.htm :param data: SASdata object or string. This parameter is required. :parm by: The by variable can only be a string type. :parm corr: The corr variable can only be a string type. :parm crosscorr: The crosscorr variable can only be a string type. :parm crossvar: The crossvar variable can only be a string type. :parm decomp: The decomp variable can only be a string type. :parm id: The id variable can only be a string type. :parm out: The out variable can be a string or SASdata type. :parm season: The season variable can only be a string type. :parm trend: The trend variable can only be a string type. :parm var: The var variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object
[ "Python", "method", "to", "call", "the", "TIMESERIES", "procedure" ]
python
train
pantsbuild/pants
src/python/pants/option/options.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/option/options.py#L292-L298
def register(self, scope, *args, **kwargs): """Register an option in the given scope.""" self._assert_not_frozen() self.get_parser(scope).register(*args, **kwargs) deprecated_scope = self.known_scope_to_info[scope].deprecated_scope if deprecated_scope: self.get_parser(deprecated_scope).register(*args, **kwargs)
[ "def", "register", "(", "self", ",", "scope", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_assert_not_frozen", "(", ")", "self", ".", "get_parser", "(", "scope", ")", ".", "register", "(", "*", "args", ",", "*", "*", "kwargs", ")", "deprecated_scope", "=", "self", ".", "known_scope_to_info", "[", "scope", "]", ".", "deprecated_scope", "if", "deprecated_scope", ":", "self", ".", "get_parser", "(", "deprecated_scope", ")", ".", "register", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Register an option in the given scope.
[ "Register", "an", "option", "in", "the", "given", "scope", "." ]
python
train
MillionIntegrals/vel
vel/rl/buffers/backend/prioritized_vec_buffer_backend.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/prioritized_vec_buffer_backend.py#L72-L75
def update_priority(self, tree_idx_list, priority_list): """ Update priorities of the elements in the tree """ for tree_idx, priority, segment_tree in zip(tree_idx_list, priority_list, self.segment_trees): segment_tree.update(tree_idx, priority)
[ "def", "update_priority", "(", "self", ",", "tree_idx_list", ",", "priority_list", ")", ":", "for", "tree_idx", ",", "priority", ",", "segment_tree", "in", "zip", "(", "tree_idx_list", ",", "priority_list", ",", "self", ".", "segment_trees", ")", ":", "segment_tree", ".", "update", "(", "tree_idx", ",", "priority", ")" ]
Update priorities of the elements in the tree
[ "Update", "priorities", "of", "the", "elements", "in", "the", "tree" ]
python
train
MKLab-ITI/reveal-graph-embedding
reveal_graph_embedding/learning/holdout.py
https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/holdout.py#L202-L270
def iterative_stratification(node_label_matrix, training_set_size, number_of_categories, random_seed=0): """ Iterative data fold stratification/balancing for two folds. Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011). On the stratification of multi-label data. In Machine Learning and Knowledge Discovery in Databases (pp. 145-158). Springer Berlin Heidelberg. Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format. - training_set_size: The minimum required size for the training set. - number_of_categories: The number of categories/classes in the learning. - random_seed: A seed for numpy random. Outputs: - train_set: A NumPy array containing the training set node ids. - test_set: A NumPy array containing the testing set node ids. """ number_of_labelled_nodes = node_label_matrix.shape[0] testing_set_size = number_of_labelled_nodes - training_set_size training_set_proportion = training_set_size/number_of_labelled_nodes testing_set_proportion = testing_set_size/number_of_labelled_nodes # Calculate the desired number of examples of each label at each subset. desired_label_number = np.zeros((2, number_of_categories), dtype=np.int64) node_label_matrix = node_label_matrix.tocsc() for j in range(number_of_categories): category_label_number = node_label_matrix.getcol(j).indices.size desired_label_number[0, j] = math.ceil(category_label_number*training_set_proportion) desired_label_number[1, j] = category_label_number - desired_label_number[0, j] train_ids = list() test_ids = list() append_train_id = train_ids.append append_test_id = test_ids.append # Randomize process np.random.seed(random_seed) while True: if len(train_ids) + len(test_ids) >= number_of_labelled_nodes: break # Find the label with the fewest (but at least one) remaining examples, breaking the ties randomly remaining_label_distribution = desired_label_number.sum(axis=0) min_label = np.min(remaining_label_distribution[np.where(remaining_label_distribution > 0)[0]]) label_indices = np.where(remaining_label_distribution == min_label)[0] chosen_label = int(np.random.choice(label_indices, 1)[0]) # Find the subset with the largest number of desired examples for this label, # breaking ties by considering the largest number of desired examples, breaking further ties randomly. fold_max_remaining_labels = np.max(desired_label_number[:, chosen_label]) fold_indices = np.where(desired_label_number[:, chosen_label] == fold_max_remaining_labels)[0] chosen_fold = int(np.random.choice(fold_indices, 1)[0]) # Choose a random example for the selected label. relevant_nodes = node_label_matrix.getcol(chosen_label).indices chosen_node = int(np.random.choice(np.setdiff1d(relevant_nodes, np.union1d(np.array(train_ids), np.array(test_ids))), 1)[0]) if chosen_fold == 0: append_train_id(chosen_node) desired_label_number[0, node_label_matrix.getrow(chosen_node).indices] -= 1 elif chosen_fold == 1: append_test_id(chosen_node) desired_label_number[1, node_label_matrix.getrow(chosen_node).indices] -= 1 else: raise RuntimeError return np.array(train_ids), np.array(test_ids)
[ "def", "iterative_stratification", "(", "node_label_matrix", ",", "training_set_size", ",", "number_of_categories", ",", "random_seed", "=", "0", ")", ":", "number_of_labelled_nodes", "=", "node_label_matrix", ".", "shape", "[", "0", "]", "testing_set_size", "=", "number_of_labelled_nodes", "-", "training_set_size", "training_set_proportion", "=", "training_set_size", "/", "number_of_labelled_nodes", "testing_set_proportion", "=", "testing_set_size", "/", "number_of_labelled_nodes", "# Calculate the desired number of examples of each label at each subset.", "desired_label_number", "=", "np", ".", "zeros", "(", "(", "2", ",", "number_of_categories", ")", ",", "dtype", "=", "np", ".", "int64", ")", "node_label_matrix", "=", "node_label_matrix", ".", "tocsc", "(", ")", "for", "j", "in", "range", "(", "number_of_categories", ")", ":", "category_label_number", "=", "node_label_matrix", ".", "getcol", "(", "j", ")", ".", "indices", ".", "size", "desired_label_number", "[", "0", ",", "j", "]", "=", "math", ".", "ceil", "(", "category_label_number", "*", "training_set_proportion", ")", "desired_label_number", "[", "1", ",", "j", "]", "=", "category_label_number", "-", "desired_label_number", "[", "0", ",", "j", "]", "train_ids", "=", "list", "(", ")", "test_ids", "=", "list", "(", ")", "append_train_id", "=", "train_ids", ".", "append", "append_test_id", "=", "test_ids", ".", "append", "# Randomize process", "np", ".", "random", ".", "seed", "(", "random_seed", ")", "while", "True", ":", "if", "len", "(", "train_ids", ")", "+", "len", "(", "test_ids", ")", ">=", "number_of_labelled_nodes", ":", "break", "# Find the label with the fewest (but at least one) remaining examples, breaking the ties randomly", "remaining_label_distribution", "=", "desired_label_number", ".", "sum", "(", "axis", "=", "0", ")", "min_label", "=", "np", ".", "min", "(", "remaining_label_distribution", "[", "np", ".", "where", "(", "remaining_label_distribution", ">", "0", ")", "[", "0", "]", "]", ")", "label_indices", "=", "np", ".", "where", "(", "remaining_label_distribution", "==", "min_label", ")", "[", "0", "]", "chosen_label", "=", "int", "(", "np", ".", "random", ".", "choice", "(", "label_indices", ",", "1", ")", "[", "0", "]", ")", "# Find the subset with the largest number of desired examples for this label,", "# breaking ties by considering the largest number of desired examples, breaking further ties randomly.", "fold_max_remaining_labels", "=", "np", ".", "max", "(", "desired_label_number", "[", ":", ",", "chosen_label", "]", ")", "fold_indices", "=", "np", ".", "where", "(", "desired_label_number", "[", ":", ",", "chosen_label", "]", "==", "fold_max_remaining_labels", ")", "[", "0", "]", "chosen_fold", "=", "int", "(", "np", ".", "random", ".", "choice", "(", "fold_indices", ",", "1", ")", "[", "0", "]", ")", "# Choose a random example for the selected label.", "relevant_nodes", "=", "node_label_matrix", ".", "getcol", "(", "chosen_label", ")", ".", "indices", "chosen_node", "=", "int", "(", "np", ".", "random", ".", "choice", "(", "np", ".", "setdiff1d", "(", "relevant_nodes", ",", "np", ".", "union1d", "(", "np", ".", "array", "(", "train_ids", ")", ",", "np", ".", "array", "(", "test_ids", ")", ")", ")", ",", "1", ")", "[", "0", "]", ")", "if", "chosen_fold", "==", "0", ":", "append_train_id", "(", "chosen_node", ")", "desired_label_number", "[", "0", ",", "node_label_matrix", ".", "getrow", "(", "chosen_node", ")", ".", "indices", "]", "-=", "1", "elif", "chosen_fold", "==", "1", ":", "append_test_id", "(", "chosen_node", ")", "desired_label_number", "[", "1", ",", "node_label_matrix", ".", "getrow", "(", "chosen_node", ")", ".", "indices", "]", "-=", "1", "else", ":", "raise", "RuntimeError", "return", "np", ".", "array", "(", "train_ids", ")", ",", "np", ".", "array", "(", "test_ids", ")" ]
Iterative data fold stratification/balancing for two folds. Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011). On the stratification of multi-label data. In Machine Learning and Knowledge Discovery in Databases (pp. 145-158). Springer Berlin Heidelberg. Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format. - training_set_size: The minimum required size for the training set. - number_of_categories: The number of categories/classes in the learning. - random_seed: A seed for numpy random. Outputs: - train_set: A NumPy array containing the training set node ids. - test_set: A NumPy array containing the testing set node ids.
[ "Iterative", "data", "fold", "stratification", "/", "balancing", "for", "two", "folds", "." ]
python
train
gwastro/pycbc
pycbc/conversions.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/conversions.py#L226-L266
def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False, force_real=True): r"""Returns the other component mass given one of the component masses and the symmetric mass ratio. This requires finding the roots of the quadratic equation: .. math:: \eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0. This has two solutions which correspond to :math:`m_1` being the heavier mass or it being the lighter mass. By default, `known_mass` is assumed to be the heavier (primary) mass, and the smaller solution is returned. Use the `other_is_secondary` to invert. Parameters ---------- known_mass : float The known component mass. eta : float The symmetric mass ratio. known_is_secondary : {False, bool} Whether the known component mass is the primary or the secondary. If True, `known_mass` is assumed to be the secondary (lighter) mass and the larger solution is returned. Otherwise, the smaller solution is returned. Default is False. force_real : {True, bool} Force the returned mass to be real. Returns ------- float The other component mass. """ roots = numpy.roots([eta, (2*eta - 1)*known_mass, eta*known_mass**2.]) if force_real: roots = numpy.real(roots) if known_is_secondary: return roots[roots.argmax()] else: return roots[roots.argmin()]
[ "def", "_mass_from_knownmass_eta", "(", "known_mass", ",", "eta", ",", "known_is_secondary", "=", "False", ",", "force_real", "=", "True", ")", ":", "roots", "=", "numpy", ".", "roots", "(", "[", "eta", ",", "(", "2", "*", "eta", "-", "1", ")", "*", "known_mass", ",", "eta", "*", "known_mass", "**", "2.", "]", ")", "if", "force_real", ":", "roots", "=", "numpy", ".", "real", "(", "roots", ")", "if", "known_is_secondary", ":", "return", "roots", "[", "roots", ".", "argmax", "(", ")", "]", "else", ":", "return", "roots", "[", "roots", ".", "argmin", "(", ")", "]" ]
r"""Returns the other component mass given one of the component masses and the symmetric mass ratio. This requires finding the roots of the quadratic equation: .. math:: \eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0. This has two solutions which correspond to :math:`m_1` being the heavier mass or it being the lighter mass. By default, `known_mass` is assumed to be the heavier (primary) mass, and the smaller solution is returned. Use the `other_is_secondary` to invert. Parameters ---------- known_mass : float The known component mass. eta : float The symmetric mass ratio. known_is_secondary : {False, bool} Whether the known component mass is the primary or the secondary. If True, `known_mass` is assumed to be the secondary (lighter) mass and the larger solution is returned. Otherwise, the smaller solution is returned. Default is False. force_real : {True, bool} Force the returned mass to be real. Returns ------- float The other component mass.
[ "r", "Returns", "the", "other", "component", "mass", "given", "one", "of", "the", "component", "masses", "and", "the", "symmetric", "mass", "ratio", "." ]
python
train