repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
pyviz/holoviews
holoviews/plotting/plot.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plot.py#L1341-L1378
def _get_subplot_extents(self, overlay, ranges, range_type): """ Iterates over all subplots and collects the extents of each. """ if range_type == 'combined': extents = {'extents': [], 'soft': [], 'hard': [], 'data': []} else: extents = {range_type: []} items = overlay.items() if self.batched and self.subplots: subplot = list(self.subplots.values())[0] subplots = [(k, subplot) for k in overlay.data.keys()] else: subplots = self.subplots.items() for key, subplot in subplots: found = False if subplot is None: continue layer = overlay.data.get(key, None) if isinstance(self.hmap, DynamicMap) and layer is None: for _, layer in items: if isinstance(layer, subplot.hmap.type): found = True break if not found: layer = None if layer is None or not subplot.apply_ranges: continue if isinstance(layer, CompositeOverlay): sp_ranges = ranges else: sp_ranges = util.match_spec(layer, ranges) if ranges else {} for rt in extents: extent = subplot.get_extents(layer, sp_ranges, range_type=rt) extents[rt].append(extent) return extents
[ "def", "_get_subplot_extents", "(", "self", ",", "overlay", ",", "ranges", ",", "range_type", ")", ":", "if", "range_type", "==", "'combined'", ":", "extents", "=", "{", "'extents'", ":", "[", "]", ",", "'soft'", ":", "[", "]", ",", "'hard'", ":", "[",...
Iterates over all subplots and collects the extents of each.
[ "Iterates", "over", "all", "subplots", "and", "collects", "the", "extents", "of", "each", "." ]
python
train
asyncdef/apyio
apyio/__init__.py
https://github.com/asyncdef/apyio/blob/d6b914929269b8795ca4d6b1ede8a393841cbc29/apyio/__init__.py#L150-L184
def wrap_file(file_like_obj): """Wrap a file like object in an async stream wrapper. Files generated with `open()` may be one of several types. This convenience function retruns the stream wrapped in the most appropriate wrapper for the type. If the stream is already wrapped it is returned unaltered. """ if isinstance(file_like_obj, AsyncIOBaseWrapper): return file_like_obj if isinstance(file_like_obj, sync_io.FileIO): return AsyncFileIOWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.BufferedRandom): return AsyncBufferedRandomWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.BufferedReader): return AsyncBufferedReaderWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.BufferedWriter): return AsyncBufferedWriterWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.TextIOWrapper): return AsyncTextIOWrapperWrapper(file_like_obj) raise TypeError( 'Unrecognized file stream type {}.'.format(file_like_obj.__class__), )
[ "def", "wrap_file", "(", "file_like_obj", ")", ":", "if", "isinstance", "(", "file_like_obj", ",", "AsyncIOBaseWrapper", ")", ":", "return", "file_like_obj", "if", "isinstance", "(", "file_like_obj", ",", "sync_io", ".", "FileIO", ")", ":", "return", "AsyncFileI...
Wrap a file like object in an async stream wrapper. Files generated with `open()` may be one of several types. This convenience function retruns the stream wrapped in the most appropriate wrapper for the type. If the stream is already wrapped it is returned unaltered.
[ "Wrap", "a", "file", "like", "object", "in", "an", "async", "stream", "wrapper", "." ]
python
train
thombashi/SimpleSQLite
simplesqlite/core.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L1439-L1464
def create_table_from_dataframe( self, dataframe, table_name="", primary_key=None, add_primary_key_column=False, index_attrs=None, ): """ Create a table from a pandas.DataFrame instance. :param pandas.DataFrame dataframe: DataFrame instance to convert. :param str table_name: Table name to create. :param str primary_key: |primary_key| :param tuple index_attrs: |index_attrs| :Examples: :ref:`example-create-table-from-df` """ self.__create_table_from_tabledata( TableData.from_dataframe(dataframe=dataframe, table_name=table_name), primary_key, add_primary_key_column, index_attrs, )
[ "def", "create_table_from_dataframe", "(", "self", ",", "dataframe", ",", "table_name", "=", "\"\"", ",", "primary_key", "=", "None", ",", "add_primary_key_column", "=", "False", ",", "index_attrs", "=", "None", ",", ")", ":", "self", ".", "__create_table_from_t...
Create a table from a pandas.DataFrame instance. :param pandas.DataFrame dataframe: DataFrame instance to convert. :param str table_name: Table name to create. :param str primary_key: |primary_key| :param tuple index_attrs: |index_attrs| :Examples: :ref:`example-create-table-from-df`
[ "Create", "a", "table", "from", "a", "pandas", ".", "DataFrame", "instance", "." ]
python
train
sorgerlab/indra
rest_api/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L89-L97
def trips_process_xml(): """Process TRIPS EKB XML and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) xml_str = body.get('xml_str') tp = trips.process_xml(xml_str) return _stmts_from_proc(tp)
[ "def", "trips_process_xml", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "load...
Process TRIPS EKB XML and return INDRA Statements.
[ "Process", "TRIPS", "EKB", "XML", "and", "return", "INDRA", "Statements", "." ]
python
train
pandas-dev/pandas
pandas/core/reshape/merge.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/merge.py#L735-L740
def _get_join_indexers(self): """ return the join indexers """ return _get_join_indexers(self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how)
[ "def", "_get_join_indexers", "(", "self", ")", ":", "return", "_get_join_indexers", "(", "self", ".", "left_join_keys", ",", "self", ".", "right_join_keys", ",", "sort", "=", "self", ".", "sort", ",", "how", "=", "self", ".", "how", ")" ]
return the join indexers
[ "return", "the", "join", "indexers" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/network.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/network.py#L2523-L2630
def clone_network(network_id, recipient_user_id=None, new_network_name=None, project_id=None, project_name=None, new_project=True, **kwargs): """ Create an exact clone of the specified network for the specified user. If project_id is specified, put the new network in there. Otherwise create a new project with the specified name and put it in there. """ user_id = kwargs['user_id'] ex_net = db.DBSession.query(Network).filter(Network.id==network_id).one() ex_net.check_read_permission(user_id) if project_id is None and new_project == True: log.info("Creating a new project for cloned network") ex_proj = db.DBSession.query(Project).filter(Project.id==ex_net.project_id).one() user = db.DBSession.query(User).filter(User.id==user_id).one() project = Project() if project_name is None or project_name=="": project_name=ex_proj.name + " (Cloned by %s)" % user.display_name #check a project with this name doesn't already exist: ex_project = db.DBSession.query(Project).filter(Project.name==project_name, Project.created_by==user_id).all() #If it exists, use it. if len(ex_project) > 0: project=ex_project[0] else: project.name = project_name project.created_by = user_id project.set_owner(user_id) if recipient_user_id!=None: project.set_owner(recipient_user_id) db.DBSession.add(project) db.DBSession.flush() project_id=project.id elif project_id is None: log.info("Using current project for cloned network") project_id=ex_net.project_id if new_network_name is None or new_network_name == "": new_network_name=ex_net.name log.info('Cloning Network...') #Find if there's any projects with this name in the project already ex_network = db.DBSession.query(Network).filter(Network.project_id==project_id, Network.name.like("{0}%".format(new_network_name))).all() if len(ex_network) > 0: new_network_name = new_network_name + " " + str(len(ex_network)) newnet = Network() newnet.project_id = project_id newnet.name = new_network_name newnet.description = ex_net.description newnet.layout = ex_net.layout newnet.status = ex_net.status newnet.projection = ex_net.projection newnet.created_by = user_id newnet.set_owner(user_id) if recipient_user_id is not None: newnet.set_owner(recipient_user_id) db.DBSession.add(newnet) db.DBSession.flush() newnetworkid = newnet.id log.info('CLoning Nodes') node_id_map = _clone_nodes(network_id, newnetworkid) log.info('Cloning Links') link_id_map = _clone_links(network_id, newnetworkid, node_id_map) log.info('CLoning Groups') group_id_map = _clone_groups(network_id, newnetworkid, node_id_map, link_id_map) log.info("Cloning Resource Attributes") ra_id_map = _clone_resourceattrs(network_id, newnetworkid, node_id_map, link_id_map, group_id_map) log.info("Cloning Resource Types") _clone_resourcetypes(network_id, newnetworkid, node_id_map, link_id_map, group_id_map) log.info('Cloning Scenarios') _clone_scenarios(network_id, newnetworkid, ra_id_map, node_id_map, link_id_map, group_id_map, user_id) db.DBSession.flush() return newnetworkid
[ "def", "clone_network", "(", "network_id", ",", "recipient_user_id", "=", "None", ",", "new_network_name", "=", "None", ",", "project_id", "=", "None", ",", "project_name", "=", "None", ",", "new_project", "=", "True", ",", "*", "*", "kwargs", ")", ":", "u...
Create an exact clone of the specified network for the specified user. If project_id is specified, put the new network in there. Otherwise create a new project with the specified name and put it in there.
[ "Create", "an", "exact", "clone", "of", "the", "specified", "network", "for", "the", "specified", "user", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L24914-L24944
def notify_update_image(self, x, y, width, height, image): """Informs about an update and provides 32bpp bitmap. in x of type int in y of type int in width of type int in height of type int in image of type str Array with 32BPP image data. """ if not isinstance(x, baseinteger): raise TypeError("x can only be an instance of type baseinteger") if not isinstance(y, baseinteger): raise TypeError("y can only be an instance of type baseinteger") if not isinstance(width, baseinteger): raise TypeError("width can only be an instance of type baseinteger") if not isinstance(height, baseinteger): raise TypeError("height can only be an instance of type baseinteger") if not isinstance(image, list): raise TypeError("image can only be an instance of type list") for a in image[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") self._call("notifyUpdateImage", in_p=[x, y, width, height, image])
[ "def", "notify_update_image", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ",", "image", ")", ":", "if", "not", "isinstance", "(", "x", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"x can only be an instance of type baseinteger\"...
Informs about an update and provides 32bpp bitmap. in x of type int in y of type int in width of type int in height of type int in image of type str Array with 32BPP image data.
[ "Informs", "about", "an", "update", "and", "provides", "32bpp", "bitmap", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L3104-L3122
def get_process_hardware_breakpoints(self, dwProcessId): """ @see: L{get_thread_hardware_breakpoints} @type dwProcessId: int @param dwProcessId: Process global ID. @rtype: list of tuple( int, L{HardwareBreakpoint} ) @return: All hardware breakpoints for each thread in the given process as a list of tuples (tid, bp). """ result = list() aProcess = self.system.get_process(dwProcessId) for dwThreadId in aProcess.iter_thread_ids(): if dwThreadId in self.__hardwareBP: bplist = self.__hardwareBP[dwThreadId] for bp in bplist: result.append( (dwThreadId, bp) ) return result
[ "def", "get_process_hardware_breakpoints", "(", "self", ",", "dwProcessId", ")", ":", "result", "=", "list", "(", ")", "aProcess", "=", "self", ".", "system", ".", "get_process", "(", "dwProcessId", ")", "for", "dwThreadId", "in", "aProcess", ".", "iter_thread...
@see: L{get_thread_hardware_breakpoints} @type dwProcessId: int @param dwProcessId: Process global ID. @rtype: list of tuple( int, L{HardwareBreakpoint} ) @return: All hardware breakpoints for each thread in the given process as a list of tuples (tid, bp).
[ "@see", ":", "L", "{", "get_thread_hardware_breakpoints", "}" ]
python
train
ray-project/ray
python/ray/utils.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/utils.py#L160-L181
def decode(byte_str, allow_none=False): """Make this unicode in Python 3, otherwise leave it as bytes. Args: byte_str: The byte string to decode. allow_none: If true, then we will allow byte_str to be None in which case we will return an empty string. TODO(rkn): Remove this flag. This is only here to simplify upgrading to flatbuffers 1.10.0. Returns: A byte string in Python 2 and a unicode string in Python 3. """ if byte_str is None and allow_none: return "" if not isinstance(byte_str, bytes): raise ValueError( "The argument {} must be a bytes object.".format(byte_str)) if sys.version_info >= (3, 0): return byte_str.decode("ascii") else: return byte_str
[ "def", "decode", "(", "byte_str", ",", "allow_none", "=", "False", ")", ":", "if", "byte_str", "is", "None", "and", "allow_none", ":", "return", "\"\"", "if", "not", "isinstance", "(", "byte_str", ",", "bytes", ")", ":", "raise", "ValueError", "(", "\"Th...
Make this unicode in Python 3, otherwise leave it as bytes. Args: byte_str: The byte string to decode. allow_none: If true, then we will allow byte_str to be None in which case we will return an empty string. TODO(rkn): Remove this flag. This is only here to simplify upgrading to flatbuffers 1.10.0. Returns: A byte string in Python 2 and a unicode string in Python 3.
[ "Make", "this", "unicode", "in", "Python", "3", "otherwise", "leave", "it", "as", "bytes", "." ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L17716-L17802
def create(cls, currency, all_co_owner, description=None, daily_limit=None, overdraft_limit=None, alias=None, avatar_uuid=None, status=None, sub_status=None, reason=None, reason_description=None, notification_filters=None, setting=None, custom_headers=None): """ :type user_id: int :param currency: The currency of the MonetaryAccountJoint as an ISO 4217 formatted currency code. :type currency: str :param all_co_owner: The users the account will be joint with. :type all_co_owner: list[object_.CoOwner] :param description: The description of the MonetaryAccountJoint. Defaults to 'bunq account'. :type description: str :param daily_limit: The daily spending limit Amount of the MonetaryAccountJoint. Defaults to 1000 EUR. Currency must match the MonetaryAccountJoint's currency. Limited to 10000 EUR. :type daily_limit: object_.Amount :param overdraft_limit: The maximum Amount the MonetaryAccountJoint can be 'in the red'. Must be 0 EUR or omitted. :type overdraft_limit: object_.Amount :param alias: The Aliases to add to MonetaryAccountJoint. Must all be confirmed first. Can mostly be ignored. :type alias: list[object_.Pointer] :param avatar_uuid: The UUID of the Avatar of the MonetaryAccountJoint. :type avatar_uuid: str :param status: The status of the MonetaryAccountJoint. Ignored in POST requests (always set to ACTIVE) can be CANCELLED or PENDING_REOPEN in PUT requests to cancel (close) or reopen the MonetaryAccountJoint. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type status: str :param sub_status: The sub-status of the MonetaryAccountJoint providing extra information regarding the status. Should be ignored for POST requests. In case of PUT requests with status CANCELLED it can only be REDEMPTION_VOLUNTARY, while with status PENDING_REOPEN it can only be NONE. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type sub_status: str :param reason: The reason for voluntarily cancelling (closing) the MonetaryAccountJoint, can only be OTHER. Should only be specified if updating the status to CANCELLED. :type reason: str :param reason_description: The optional free-form reason for voluntarily cancelling (closing) the MonetaryAccountJoint. Can be any user provided message. Should only be specified if updating the status to CANCELLED. :type reason_description: str :param notification_filters: The types of notifications that will result in a push notification or URL callback for this MonetaryAccountJoint. :type notification_filters: list[object_.NotificationFilter] :param setting: The settings of the MonetaryAccountJoint. :type setting: object_.MonetaryAccountSetting :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_CURRENCY: currency, cls.FIELD_DESCRIPTION: description, cls.FIELD_DAILY_LIMIT: daily_limit, cls.FIELD_OVERDRAFT_LIMIT: overdraft_limit, cls.FIELD_ALIAS: alias, cls.FIELD_AVATAR_UUID: avatar_uuid, cls.FIELD_STATUS: status, cls.FIELD_SUB_STATUS: sub_status, cls.FIELD_REASON: reason, cls.FIELD_REASON_DESCRIPTION: reason_description, cls.FIELD_ALL_CO_OWNER: all_co_owner, cls.FIELD_NOTIFICATION_FILTERS: notification_filters, cls.FIELD_SETTING: setting } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id()) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
[ "def", "create", "(", "cls", ",", "currency", ",", "all_co_owner", ",", "description", "=", "None", ",", "daily_limit", "=", "None", ",", "overdraft_limit", "=", "None", ",", "alias", "=", "None", ",", "avatar_uuid", "=", "None", ",", "status", "=", "Non...
:type user_id: int :param currency: The currency of the MonetaryAccountJoint as an ISO 4217 formatted currency code. :type currency: str :param all_co_owner: The users the account will be joint with. :type all_co_owner: list[object_.CoOwner] :param description: The description of the MonetaryAccountJoint. Defaults to 'bunq account'. :type description: str :param daily_limit: The daily spending limit Amount of the MonetaryAccountJoint. Defaults to 1000 EUR. Currency must match the MonetaryAccountJoint's currency. Limited to 10000 EUR. :type daily_limit: object_.Amount :param overdraft_limit: The maximum Amount the MonetaryAccountJoint can be 'in the red'. Must be 0 EUR or omitted. :type overdraft_limit: object_.Amount :param alias: The Aliases to add to MonetaryAccountJoint. Must all be confirmed first. Can mostly be ignored. :type alias: list[object_.Pointer] :param avatar_uuid: The UUID of the Avatar of the MonetaryAccountJoint. :type avatar_uuid: str :param status: The status of the MonetaryAccountJoint. Ignored in POST requests (always set to ACTIVE) can be CANCELLED or PENDING_REOPEN in PUT requests to cancel (close) or reopen the MonetaryAccountJoint. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type status: str :param sub_status: The sub-status of the MonetaryAccountJoint providing extra information regarding the status. Should be ignored for POST requests. In case of PUT requests with status CANCELLED it can only be REDEMPTION_VOLUNTARY, while with status PENDING_REOPEN it can only be NONE. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type sub_status: str :param reason: The reason for voluntarily cancelling (closing) the MonetaryAccountJoint, can only be OTHER. Should only be specified if updating the status to CANCELLED. :type reason: str :param reason_description: The optional free-form reason for voluntarily cancelling (closing) the MonetaryAccountJoint. Can be any user provided message. Should only be specified if updating the status to CANCELLED. :type reason_description: str :param notification_filters: The types of notifications that will result in a push notification or URL callback for this MonetaryAccountJoint. :type notification_filters: list[object_.NotificationFilter] :param setting: The settings of the MonetaryAccountJoint. :type setting: object_.MonetaryAccountSetting :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
[ ":", "type", "user_id", ":", "int", ":", "param", "currency", ":", "The", "currency", "of", "the", "MonetaryAccountJoint", "as", "an", "ISO", "4217", "formatted", "currency", "code", ".", ":", "type", "currency", ":", "str", ":", "param", "all_co_owner", "...
python
train
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L3780-L3842
def fit_radius_from_potentials(z, SampleFreq, Damping, HistBins=100, show_fig=False): """ Fits the dynamical potential to the Steady State Potential by varying the Radius. z : ndarray Position data SampleFreq : float frequency at which the position data was sampled Damping : float value of damping (in radians/second) HistBins : int number of values at which to evaluate the steady state potential / perform the fitting to the dynamical potential Returns ------- Radius : float Radius of the nanoparticle RadiusError : float One Standard Deviation Error in the Radius from the Fit (doesn't take into account possible error in damping) fig : matplotlib.figure.Figure object figure showing fitted dynamical potential and stationary potential ax : matplotlib.axes.Axes object axes for above figure """ dt = 1/SampleFreq boltzmann=Boltzmann temp=300 # why halved?? density=1800 SteadyStatePotnl = list(steady_state_potential(z, HistBins=HistBins)) yoffset=min(SteadyStatePotnl[1]) SteadyStatePotnl[1] -= yoffset SpringPotnlFunc = dynamical_potential(z, dt) SpringPotnl = SpringPotnlFunc(z) kBT_Gamma = temp*boltzmann*1/Damping DynamicPotentialFunc = make_dynamical_potential_func(kBT_Gamma, density, SpringPotnlFunc) FitSoln = _curve_fit(DynamicPotentialFunc, SteadyStatePotnl[0], SteadyStatePotnl[1], p0 = 50) print(FitSoln) popt, pcov = FitSoln perr = _np.sqrt(_np.diag(pcov)) Radius, RadiusError = popt[0], perr[0] mass=((4/3)*pi*((Radius*10**-9)**3))*density yfit=(kBT_Gamma/mass) Y = yfit*SpringPotnl fig, ax = _plt.subplots() ax.plot(SteadyStatePotnl[0], SteadyStatePotnl[1], 'bo', label="Steady State Potential") _plt.plot(z,Y, 'r-', label="Dynamical Potential") ax.legend(loc='best') ax.set_ylabel('U ($k_{B} T $ Joules)') ax.set_xlabel('Distance (mV)') _plt.tight_layout() if show_fig == True: _plt.show() return Radius*1e-9, RadiusError*1e-9, fig, ax
[ "def", "fit_radius_from_potentials", "(", "z", ",", "SampleFreq", ",", "Damping", ",", "HistBins", "=", "100", ",", "show_fig", "=", "False", ")", ":", "dt", "=", "1", "/", "SampleFreq", "boltzmann", "=", "Boltzmann", "temp", "=", "300", "# why halved??", ...
Fits the dynamical potential to the Steady State Potential by varying the Radius. z : ndarray Position data SampleFreq : float frequency at which the position data was sampled Damping : float value of damping (in radians/second) HistBins : int number of values at which to evaluate the steady state potential / perform the fitting to the dynamical potential Returns ------- Radius : float Radius of the nanoparticle RadiusError : float One Standard Deviation Error in the Radius from the Fit (doesn't take into account possible error in damping) fig : matplotlib.figure.Figure object figure showing fitted dynamical potential and stationary potential ax : matplotlib.axes.Axes object axes for above figure
[ "Fits", "the", "dynamical", "potential", "to", "the", "Steady", "State", "Potential", "by", "varying", "the", "Radius", ".", "z", ":", "ndarray", "Position", "data", "SampleFreq", ":", "float", "frequency", "at", "which", "the", "position", "data", "was", "s...
python
train
hyperledger-archives/indy-anoncreds
anoncreds/protocol/utils.py
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/utils.py#L264-L274
def genPrime(): """ Generate 2 large primes `p_prime` and `q_prime` and use them to generate another 2 primes `p` and `q` of 1024 bits """ prime = cmod.randomPrime(LARGE_PRIME) i = 0 while not cmod.isPrime(2 * prime + 1): prime = cmod.randomPrime(LARGE_PRIME) i += 1 return prime
[ "def", "genPrime", "(", ")", ":", "prime", "=", "cmod", ".", "randomPrime", "(", "LARGE_PRIME", ")", "i", "=", "0", "while", "not", "cmod", ".", "isPrime", "(", "2", "*", "prime", "+", "1", ")", ":", "prime", "=", "cmod", ".", "randomPrime", "(", ...
Generate 2 large primes `p_prime` and `q_prime` and use them to generate another 2 primes `p` and `q` of 1024 bits
[ "Generate", "2", "large", "primes", "p_prime", "and", "q_prime", "and", "use", "them", "to", "generate", "another", "2", "primes", "p", "and", "q", "of", "1024", "bits" ]
python
train
blakev/python-syncthing
syncthing/__init__.py
https://github.com/blakev/python-syncthing/blob/a7f4930f86f7543cd96990277945467896fb523d/syncthing/__init__.py#L288-L298
def config_insync(self): """ Returns whether the config is in sync, i.e. whether the running configuration is the same as that on disk. Returns: bool """ status = self.get('config/insync').get('configInSync', False) if status is None: status = False return status
[ "def", "config_insync", "(", "self", ")", ":", "status", "=", "self", ".", "get", "(", "'config/insync'", ")", ".", "get", "(", "'configInSync'", ",", "False", ")", "if", "status", "is", "None", ":", "status", "=", "False", "return", "status" ]
Returns whether the config is in sync, i.e. whether the running configuration is the same as that on disk. Returns: bool
[ "Returns", "whether", "the", "config", "is", "in", "sync", "i", ".", "e", ".", "whether", "the", "running", "configuration", "is", "the", "same", "as", "that", "on", "disk", "." ]
python
train
coleifer/walrus
walrus/cache.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/cache.py#L217-L249
def cache_async(self, key_fn=_key_fn, timeout=3600): """ Decorator that will execute the cached function in a separate thread. The function will immediately return, returning a callable to the user. This callable can be used to check for a return value. For details, see the :ref:`cache-async` section of the docs. :param key_fn: Function used to generate cache key. :param int timeout: Cache timeout in seconds. :returns: A new function which can be called to retrieve the return value of the decorated function. """ def decorator(fn): wrapped = self.cached(key_fn, timeout)(fn) @wraps(fn) def inner(*args, **kwargs): q = Queue() def _sub_fn(): q.put(wrapped(*args, **kwargs)) def _get_value(block=True, timeout=None): if not hasattr(_get_value, '_return_value'): result = q.get(block=block, timeout=timeout) _get_value._return_value = result return _get_value._return_value thread = threading.Thread(target=_sub_fn) thread.start() return _get_value return inner return decorator
[ "def", "cache_async", "(", "self", ",", "key_fn", "=", "_key_fn", ",", "timeout", "=", "3600", ")", ":", "def", "decorator", "(", "fn", ")", ":", "wrapped", "=", "self", ".", "cached", "(", "key_fn", ",", "timeout", ")", "(", "fn", ")", "@", "wraps...
Decorator that will execute the cached function in a separate thread. The function will immediately return, returning a callable to the user. This callable can be used to check for a return value. For details, see the :ref:`cache-async` section of the docs. :param key_fn: Function used to generate cache key. :param int timeout: Cache timeout in seconds. :returns: A new function which can be called to retrieve the return value of the decorated function.
[ "Decorator", "that", "will", "execute", "the", "cached", "function", "in", "a", "separate", "thread", ".", "The", "function", "will", "immediately", "return", "returning", "a", "callable", "to", "the", "user", ".", "This", "callable", "can", "be", "used", "t...
python
train
xoolive/traffic
traffic/algorithms/cpa.py
https://github.com/xoolive/traffic/blob/d1a8878098f16759f6b6e0e8d8b8f32e34a680a8/traffic/algorithms/cpa.py#L84-L216
def closest_point_of_approach( traffic: Traffic, lateral_separation: float, vertical_separation: float, projection: Union[pyproj.Proj, crs.Projection, None] = None, round_t: str = "d", max_workers: int = 4, ) -> CPA: """ Computes a CPA dataframe for all pairs of trajectories candidates for being separated by less than lateral_separation in vertical_separation. In order to be computed efficiently, the method needs the following parameters: - projection: a first filtering is applied on the bounding boxes of trajectories, expressed in meters. You need to provide a decent projection able to approximate distances by Euclide formula. By default, EuroPP() projection is considered, but a non explicit argument will raise a warning. - round_t: an additional column will be added in the DataFrame to group trajectories by relevant time frames. Distance computations will be considered only between trajectories flown in the same time frame. By default, the 'd' pandas freq parameter is considered, to group trajectories by day, but other ways of splitting ('h') may be more relevant and impact performance. - max_workers: distance computations are spread over a given number of processors. """ if projection is None: logging.warn("Defaulting to projection EuroPP()") projection = crs.EuroPP() if isinstance(projection, crs.Projection): projection = pyproj.Proj(projection.proj4_init) def yield_pairs(t_chunk: Traffic): """ This function yields all pairs of possible candidates for a CPA calculation. """ # combinations types Iterator[Tuple[T, ...]] for first, second in cast( Iterator[Tuple[Flight, Flight]], combinations(t_chunk, 2) ): # cast are necessary because of the lru_cache × property bug if ( cast(pd.Timestamp, first.start) > cast(pd.Timestamp, second.stop) ) or ( cast(pd.Timestamp, second.start) > cast(pd.Timestamp, first.stop) ): # Flights must fly at the same time continue if ( first.min("altitude") > second.max("altitude") + vertical_separation ): # Bounding boxes in altitude must cross continue if ( second.min("altitude") > first.max("altitude") + vertical_separation ): # Bounding boxes in altitude must cross continue if first.min("x") > second.max("x") + lateral_separation: # Bounding boxes in x must cross continue if second.min("x") > first.max("x") + lateral_separation: # Bounding boxes in x must cross continue if first.min("y") > second.max("y") + lateral_separation: # Bounding boxes in y must cross continue if second.min("y") > first.max("y") + lateral_separation: # Bounding boxes in y must cross continue # Next step is to check the 2D footprint of the trajectories # intersect. Before computing the intersection we bufferize the # trajectories by half the requested separation. first_shape = first.project_shape(projection) second_shape = second.project_shape(projection) if first_shape is None or second_shape is None: continue first_shape = first_shape.simplify(1e3).buffer( lateral_separation / 2 ) second_shape = first_shape.simplify(1e3).buffer( lateral_separation / 2 ) if first_shape.intersects(second_shape): yield first, second t_xyt = ( traffic.airborne() .compute_xy(projection) .assign(round_t=lambda df: df.timestamp.dt.round(round_t)) ) cumul = list() # Multiprocessing is implemented on each timerange slot only. # TODO: it would probably be more efficient to multiprocess over each # t_chunk rather than multiprocess the distance computation. for _, t_chunk in tqdm( t_xyt.groupby("round_t"), total=len(set(t_xyt.data.round_t)) ): with ProcessPoolExecutor(max_workers=max_workers) as executor: tasks = { # TODO submit(Flight.distance, first, second) executor.submit(first.distance, second): ( first.flight_id, second.flight_id, ) for (first, second) in yield_pairs(Traffic(t_chunk)) } for future in as_completed(tasks): cumul.append(future.result()) return CPA(pd.concat(cumul, sort=False))
[ "def", "closest_point_of_approach", "(", "traffic", ":", "Traffic", ",", "lateral_separation", ":", "float", ",", "vertical_separation", ":", "float", ",", "projection", ":", "Union", "[", "pyproj", ".", "Proj", ",", "crs", ".", "Projection", ",", "None", "]",...
Computes a CPA dataframe for all pairs of trajectories candidates for being separated by less than lateral_separation in vertical_separation. In order to be computed efficiently, the method needs the following parameters: - projection: a first filtering is applied on the bounding boxes of trajectories, expressed in meters. You need to provide a decent projection able to approximate distances by Euclide formula. By default, EuroPP() projection is considered, but a non explicit argument will raise a warning. - round_t: an additional column will be added in the DataFrame to group trajectories by relevant time frames. Distance computations will be considered only between trajectories flown in the same time frame. By default, the 'd' pandas freq parameter is considered, to group trajectories by day, but other ways of splitting ('h') may be more relevant and impact performance. - max_workers: distance computations are spread over a given number of processors.
[ "Computes", "a", "CPA", "dataframe", "for", "all", "pairs", "of", "trajectories", "candidates", "for", "being", "separated", "by", "less", "than", "lateral_separation", "in", "vertical_separation", "." ]
python
train
openstack/monasca-common
monasca_common/kafka_lib/protocol.py
https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L601-L619
def create_gzip_message(payloads, key=None, compresslevel=None): """ Construct a Gzipped Message containing multiple Messages The given payloads will be encoded, compressed, and sent as a single atomic message to Kafka. Arguments: payloads: list(bytes), a list of payload to send be sent to Kafka key: bytes, a key used for partition routing (optional) """ message_set = KafkaProtocol._encode_message_set( [create_message(payload, pl_key) for payload, pl_key in payloads]) gzipped = gzip_encode(message_set, compresslevel=compresslevel) codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP return Message(0, 0x00 | codec, key, gzipped)
[ "def", "create_gzip_message", "(", "payloads", ",", "key", "=", "None", ",", "compresslevel", "=", "None", ")", ":", "message_set", "=", "KafkaProtocol", ".", "_encode_message_set", "(", "[", "create_message", "(", "payload", ",", "pl_key", ")", "for", "payloa...
Construct a Gzipped Message containing multiple Messages The given payloads will be encoded, compressed, and sent as a single atomic message to Kafka. Arguments: payloads: list(bytes), a list of payload to send be sent to Kafka key: bytes, a key used for partition routing (optional)
[ "Construct", "a", "Gzipped", "Message", "containing", "multiple", "Messages" ]
python
train
saltstack/salt
salt/modules/win_certutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_certutil.py#L55-L72
def get_stored_cert_serials(store): ''' Get all of the certificate serials in the specified store store The store to get all the certificate serials from CLI Example: .. code-block:: bash salt '*' certutil.get_stored_cert_serials <store> ''' cmd = "certutil.exe -store {0}".format(store) out = __salt__['cmd.run'](cmd) # match serial numbers by header position to work with multiple languages matches = re.findall(r"={16}\r\n.*:\s*(\w*)\r\n", out) return matches
[ "def", "get_stored_cert_serials", "(", "store", ")", ":", "cmd", "=", "\"certutil.exe -store {0}\"", ".", "format", "(", "store", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "# match serial numbers by header position to work with multiple langua...
Get all of the certificate serials in the specified store store The store to get all the certificate serials from CLI Example: .. code-block:: bash salt '*' certutil.get_stored_cert_serials <store>
[ "Get", "all", "of", "the", "certificate", "serials", "in", "the", "specified", "store" ]
python
train
Nic30/hwt
hwt/hdl/types/hdlType.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/types/hdlType.py#L28-L33
def fromPy(self, v, vldMask=None): """ Construct value of this type. Delegated on value class for this type """ return self.getValueCls().fromPy(v, self, vldMask=vldMask)
[ "def", "fromPy", "(", "self", ",", "v", ",", "vldMask", "=", "None", ")", ":", "return", "self", ".", "getValueCls", "(", ")", ".", "fromPy", "(", "v", ",", "self", ",", "vldMask", "=", "vldMask", ")" ]
Construct value of this type. Delegated on value class for this type
[ "Construct", "value", "of", "this", "type", ".", "Delegated", "on", "value", "class", "for", "this", "type" ]
python
test
saltstack/salt
salt/modules/libcloud_compute.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_compute.py#L155-L178
def list_locations(profile, **libcloud_kwargs): ''' Return a list of locations for this cloud :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_locations method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.list_locations profile1 ''' conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) locations = conn.list_locations(**libcloud_kwargs) ret = [] for loc in locations: ret.append(_simple_location(loc)) return ret
[ "def", "list_locations", "(", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "libcloud_k...
Return a list of locations for this cloud :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_locations method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.list_locations profile1
[ "Return", "a", "list", "of", "locations", "for", "this", "cloud" ]
python
train
zyga/python-glibc
glibc.py
https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/glibc.py#L159-L164
def lazily(self, name, callable, args): """ Load something lazily """ self._lazy[name] = callable, args self._all.add(name)
[ "def", "lazily", "(", "self", ",", "name", ",", "callable", ",", "args", ")", ":", "self", ".", "_lazy", "[", "name", "]", "=", "callable", ",", "args", "self", ".", "_all", ".", "add", "(", "name", ")" ]
Load something lazily
[ "Load", "something", "lazily" ]
python
train
narfman0/helga-markovify
helga_markovify/markov.py
https://github.com/narfman0/helga-markovify/blob/b5a82de070102e6da1fd3f5f81cad12d0a9185d8/helga_markovify/markov.py#L7-L12
def punctuate(current_text, new_text, add_punctuation): """ Add punctuation as needed """ if add_punctuation and current_text and not current_text[-1] in string.punctuation: current_text += '. ' spacer = ' ' if not current_text or (not current_text[-1].isspace() and not new_text[0].isspace()) else '' return current_text + spacer + new_text
[ "def", "punctuate", "(", "current_text", ",", "new_text", ",", "add_punctuation", ")", ":", "if", "add_punctuation", "and", "current_text", "and", "not", "current_text", "[", "-", "1", "]", "in", "string", ".", "punctuation", ":", "current_text", "+=", "'. '",...
Add punctuation as needed
[ "Add", "punctuation", "as", "needed" ]
python
train
vintasoftware/django-role-permissions
rolepermissions/roles.py
https://github.com/vintasoftware/django-role-permissions/blob/28924361e689e994e0c3575e18104a1a5abd8de6/rolepermissions/roles.py#L233-L240
def clear_roles(user): """Remove all roles from a user.""" roles = get_user_roles(user) for role in roles: role.remove_role_from_user(user) return roles
[ "def", "clear_roles", "(", "user", ")", ":", "roles", "=", "get_user_roles", "(", "user", ")", "for", "role", "in", "roles", ":", "role", ".", "remove_role_from_user", "(", "user", ")", "return", "roles" ]
Remove all roles from a user.
[ "Remove", "all", "roles", "from", "a", "user", "." ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_users.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_users.py#L254-L289
def ReadApprovalRequests(self, requestor_username, approval_type, subject_id=None, include_expired=False, cursor=None): """Reads approval requests of a given type for a given user.""" query = """ SELECT ar.approval_id, UNIX_TIMESTAMP(ar.timestamp), ar.approval_request, u.username, UNIX_TIMESTAMP(ag.timestamp) FROM approval_request ar LEFT JOIN approval_grant AS ag USING (username_hash, approval_id) LEFT JOIN grr_users u ON u.username_hash = ag.grantor_username_hash WHERE ar.username_hash = %s AND ar.approval_type = %s """ args = [mysql_utils.Hash(requestor_username), int(approval_type)] if subject_id: query += " AND ar.subject_id = %s" args.append(subject_id) query += " ORDER BY ar.approval_id" ret = [] now = rdfvalue.RDFDatetime.Now() cursor.execute(query, args) for approval_request in _ResponseToApprovalsWithGrants(cursor.fetchall()): if include_expired or approval_request.expiration_time >= now: ret.append(approval_request) return ret
[ "def", "ReadApprovalRequests", "(", "self", ",", "requestor_username", ",", "approval_type", ",", "subject_id", "=", "None", ",", "include_expired", "=", "False", ",", "cursor", "=", "None", ")", ":", "query", "=", "\"\"\"\n SELECT\n ar.approval_id,\...
Reads approval requests of a given type for a given user.
[ "Reads", "approval", "requests", "of", "a", "given", "type", "for", "a", "given", "user", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L1920-L1942
def addGaussNoise(self, sigma): """ Add gaussian noise. :param float sigma: sigma is expressed in percent of the diagonal size of actor. :Example: .. code-block:: python from vtkplotter import Sphere Sphere().addGaussNoise(1.0).show() """ sz = self.diagonalSize() pts = self.coordinates() n = len(pts) ns = np.random.randn(n, 3) * sigma * sz / 100 vpts = vtk.vtkPoints() vpts.SetNumberOfPoints(n) vpts.SetData(numpy_to_vtk(pts + ns, deep=True)) self.poly.SetPoints(vpts) self.poly.GetPoints().Modified() return self
[ "def", "addGaussNoise", "(", "self", ",", "sigma", ")", ":", "sz", "=", "self", ".", "diagonalSize", "(", ")", "pts", "=", "self", ".", "coordinates", "(", ")", "n", "=", "len", "(", "pts", ")", "ns", "=", "np", ".", "random", ".", "randn", "(", ...
Add gaussian noise. :param float sigma: sigma is expressed in percent of the diagonal size of actor. :Example: .. code-block:: python from vtkplotter import Sphere Sphere().addGaussNoise(1.0).show()
[ "Add", "gaussian", "noise", "." ]
python
train
DataDog/integrations-core
mongo/datadog_checks/mongo/mongo.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/mongo/datadog_checks/mongo/mongo.py#L537-L557
def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""): """ Return the submit method and the metric name to use. The metric name is defined as follow: * If available, the normalized metric name alias * (Or) the normalized original metric name """ submit_method = ( metrics_to_collect[original_metric_name][0] if isinstance(metrics_to_collect[original_metric_name], tuple) else metrics_to_collect[original_metric_name] ) metric_name = ( metrics_to_collect[original_metric_name][1] if isinstance(metrics_to_collect[original_metric_name], tuple) else original_metric_name ) return submit_method, self._normalize(metric_name, submit_method, prefix)
[ "def", "_resolve_metric", "(", "self", ",", "original_metric_name", ",", "metrics_to_collect", ",", "prefix", "=", "\"\"", ")", ":", "submit_method", "=", "(", "metrics_to_collect", "[", "original_metric_name", "]", "[", "0", "]", "if", "isinstance", "(", "metri...
Return the submit method and the metric name to use. The metric name is defined as follow: * If available, the normalized metric name alias * (Or) the normalized original metric name
[ "Return", "the", "submit", "method", "and", "the", "metric", "name", "to", "use", "." ]
python
train
openstack/swauth
swauth/authtypes.py
https://github.com/openstack/swauth/blob/0c8eaf50a9e2b3317f3eba62f205546904bc6d74/swauth/authtypes.py#L42-L60
def validate_creds(creds): """Parse and validate user credentials whether format is right :param creds: User credentials :returns: Auth_type class instance and parsed user credentials in dict :raises ValueError: If credential format is wrong (eg: bad auth_type) """ try: auth_type, auth_rest = creds.split(':', 1) except ValueError: raise ValueError("Missing ':' in %s" % creds) authtypes = sys.modules[__name__] auth_encoder = getattr(authtypes, auth_type.title(), None) if auth_encoder is None: raise ValueError('Invalid auth_type: %s' % auth_type) auth_encoder = auth_encoder() parsed_creds = dict(type=auth_type, salt=None, hash=None) parsed_creds.update(auth_encoder.validate(auth_rest)) return auth_encoder, parsed_creds
[ "def", "validate_creds", "(", "creds", ")", ":", "try", ":", "auth_type", ",", "auth_rest", "=", "creds", ".", "split", "(", "':'", ",", "1", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Missing ':' in %s\"", "%", "creds", ")", "authtyp...
Parse and validate user credentials whether format is right :param creds: User credentials :returns: Auth_type class instance and parsed user credentials in dict :raises ValueError: If credential format is wrong (eg: bad auth_type)
[ "Parse", "and", "validate", "user", "credentials", "whether", "format", "is", "right" ]
python
train
stephen-bunn/file-config
tasks/package.py
https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/package.py#L145-L155
def stub(ctx): """ Generate typing stubs for the package. """ report.info(ctx, "package.stub", f"generating typing stubs for package") ctx.run( f"stubgen --include-private --no-import " f"--output {ctx.directory.joinpath('stubs')!s} " f"--search-path {ctx.directory.joinpath('src')!s} " f"--package {ctx.metadata['package_name']}" )
[ "def", "stub", "(", "ctx", ")", ":", "report", ".", "info", "(", "ctx", ",", "\"package.stub\"", ",", "f\"generating typing stubs for package\"", ")", "ctx", ".", "run", "(", "f\"stubgen --include-private --no-import \"", "f\"--output {ctx.directory.joinpath('stubs')!s} \""...
Generate typing stubs for the package.
[ "Generate", "typing", "stubs", "for", "the", "package", "." ]
python
train
tuxu/python-samplerate
samplerate/lowlevel.py
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L86-L102
def src_simple(input_data, output_data, ratio, converter_type, channels): """Perform a single conversion from an input buffer to an output buffer. Simple interface for performing a single conversion from input buffer to output buffer at a fixed conversion ratio. Simple interface does not require initialisation as it can only operate on a single buffer worth of audio. """ input_frames, _ = _check_data(input_data) output_frames, _ = _check_data(output_data) data = ffi.new('SRC_DATA*') data.input_frames = input_frames data.output_frames = output_frames data.src_ratio = ratio data.data_in = ffi.cast('float*', ffi.from_buffer(input_data)) data.data_out = ffi.cast('float*', ffi.from_buffer(output_data)) error = _lib.src_simple(data, converter_type, channels) return error, data.input_frames_used, data.output_frames_gen
[ "def", "src_simple", "(", "input_data", ",", "output_data", ",", "ratio", ",", "converter_type", ",", "channels", ")", ":", "input_frames", ",", "_", "=", "_check_data", "(", "input_data", ")", "output_frames", ",", "_", "=", "_check_data", "(", "output_data",...
Perform a single conversion from an input buffer to an output buffer. Simple interface for performing a single conversion from input buffer to output buffer at a fixed conversion ratio. Simple interface does not require initialisation as it can only operate on a single buffer worth of audio.
[ "Perform", "a", "single", "conversion", "from", "an", "input", "buffer", "to", "an", "output", "buffer", "." ]
python
train
krukas/Trionyx
trionyx/utils.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/utils.py#L24-L28
def import_object_by_string(namespace): """Import object by complete namespace""" segments = namespace.split('.') module = importlib.import_module('.'.join(segments[:-1])) return getattr(module, segments[-1])
[ "def", "import_object_by_string", "(", "namespace", ")", ":", "segments", "=", "namespace", ".", "split", "(", "'.'", ")", "module", "=", "importlib", ".", "import_module", "(", "'.'", ".", "join", "(", "segments", "[", ":", "-", "1", "]", ")", ")", "r...
Import object by complete namespace
[ "Import", "object", "by", "complete", "namespace" ]
python
train
odlgroup/odl
odl/operator/operator.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L1801-L1824
def derivative(self, x): """Return the derivative at ``x``. The derivative of the right scalar operator multiplication follows the chain rule: ``OperatorRightScalarMult(op, s).derivative(y) == OperatorLeftScalarMult(op.derivative(s * y), s)`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1]) >>> left_mul_op = OperatorRightScalarMult(operator, 3) >>> derivative = left_mul_op.derivative([0, 0, 0]) >>> derivative([1, 1, 1]) rn(3).element([ 3., 3., 3.]) """ return self.scalar * self.operator.derivative(self.scalar * x)
[ "def", "derivative", "(", "self", ",", "x", ")", ":", "return", "self", ".", "scalar", "*", "self", ".", "operator", ".", "derivative", "(", "self", ".", "scalar", "*", "x", ")" ]
Return the derivative at ``x``. The derivative of the right scalar operator multiplication follows the chain rule: ``OperatorRightScalarMult(op, s).derivative(y) == OperatorLeftScalarMult(op.derivative(s * y), s)`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1]) >>> left_mul_op = OperatorRightScalarMult(operator, 3) >>> derivative = left_mul_op.derivative([0, 0, 0]) >>> derivative([1, 1, 1]) rn(3).element([ 3., 3., 3.])
[ "Return", "the", "derivative", "at", "x", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/rbac_authorization_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/rbac_authorization_v1_api.py#L2794-L2814
def read_cluster_role(self, name, **kwargs): """ read the specified ClusterRole This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_cluster_role(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ClusterRole (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1ClusterRole If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_cluster_role_with_http_info(name, **kwargs) else: (data) = self.read_cluster_role_with_http_info(name, **kwargs) return data
[ "def", "read_cluster_role", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_cluster_role_with_htt...
read the specified ClusterRole This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_cluster_role(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ClusterRole (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1ClusterRole If the method is called asynchronously, returns the request thread.
[ "read", "the", "specified", "ClusterRole", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ...
python
train
upsight/doctor
doctor/types.py
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/types.py#L881-L889
def number(description, **kwargs) -> typing.Type: """Create a :class:`~doctor.types.Number` type. :param description: A description of the type. :param kwargs: Can include any attribute defined in :class:`~doctor.types.Number` """ kwargs['description'] = description return type('Number', (Number,), kwargs)
[ "def", "number", "(", "description", ",", "*", "*", "kwargs", ")", "->", "typing", ".", "Type", ":", "kwargs", "[", "'description'", "]", "=", "description", "return", "type", "(", "'Number'", ",", "(", "Number", ",", ")", ",", "kwargs", ")" ]
Create a :class:`~doctor.types.Number` type. :param description: A description of the type. :param kwargs: Can include any attribute defined in :class:`~doctor.types.Number`
[ "Create", "a", ":", "class", ":", "~doctor", ".", "types", ".", "Number", "type", "." ]
python
train
vkorn/pyvizio
custom_components/vizio/media_player.py
https://github.com/vkorn/pyvizio/blob/7153c9ad544195c867c14f8f03c97dba416c0a7a/custom_components/vizio/media_player.py#L233-L236
def volume_down(self): """Decreasing volume of the device.""" self._volume_level -= self._volume_step / self._max_volume self._device.vol_down(num=self._volume_step)
[ "def", "volume_down", "(", "self", ")", ":", "self", ".", "_volume_level", "-=", "self", ".", "_volume_step", "/", "self", ".", "_max_volume", "self", ".", "_device", ".", "vol_down", "(", "num", "=", "self", ".", "_volume_step", ")" ]
Decreasing volume of the device.
[ "Decreasing", "volume", "of", "the", "device", "." ]
python
test
polysquare/polysquare-generic-file-linter
polysquarelinter/spelling.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L600-L641
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False): """Get transition from InCommentParser.""" del comment_system_transitions if (_token_at_col_in_line(line, column, "```", 3) and not _is_escaped(line, column, is_escaped)): # Hit a disable token, so resume the last parser return (DisabledParser((line_index, column + 3), self.__class__, self._waiting_until), 3, self._started_at) elif self._waiting_until != ParserState.EOL: wait_until_len = len(self._waiting_until) if (_token_at_col_in_line(line, column, self._waiting_until, wait_until_len) and not _is_escaped(line, column, is_escaped)): # Skip ahead to end of this token return (InTextParser(), len(self._waiting_until), self._started_at) elif self._waiting_until == ParserState.EOL and column == 0: # We hit a new line and the state ends here. Return # corresponding state return (InTextParser(), 0, self._started_at) elif eof: # We hit the end of the file and were still in a comment # state. Grab everything up to here. return (InTextParser(), 0, self._started_at) # Move ahead by one character otherwise return (self, 1, None)
[ "def", "get_transition", "(", "self", ",", "# suppress(too-many-arguments)", "line", ",", "line_index", ",", "column", ",", "is_escaped", ",", "comment_system_transitions", ",", "eof", "=", "False", ")", ":", "del", "comment_system_transitions", "if", "(", "_token_a...
Get transition from InCommentParser.
[ "Get", "transition", "from", "InCommentParser", "." ]
python
train
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1545-L1551
def FindNextMultiLineCommentEnd(lines, lineix): """We are inside a comment, find the end marker.""" while lineix < len(lines): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines)
[ "def", "FindNextMultiLineCommentEnd", "(", "lines", ",", "lineix", ")", ":", "while", "lineix", "<", "len", "(", "lines", ")", ":", "if", "lines", "[", "lineix", "]", ".", "strip", "(", ")", ".", "endswith", "(", "'*/'", ")", ":", "return", "lineix", ...
We are inside a comment, find the end marker.
[ "We", "are", "inside", "a", "comment", "find", "the", "end", "marker", "." ]
python
valid
freshbooks/statsdecor
statsdecor/__init__.py
https://github.com/freshbooks/statsdecor/blob/1c4a98e120799b430fd40c8fede9020a91162d31/statsdecor/__init__.py#L97-L103
def timing(name, delta, rate=1, tags=None): """Sends new timing information. `delta` is in milliseconds. >>> import statsdecor >>> statsdecor.timing('my.metric', 314159265359) """ return client().timing(name, delta, rate=rate, tags=tags)
[ "def", "timing", "(", "name", ",", "delta", ",", "rate", "=", "1", ",", "tags", "=", "None", ")", ":", "return", "client", "(", ")", ".", "timing", "(", "name", ",", "delta", ",", "rate", "=", "rate", ",", "tags", "=", "tags", ")" ]
Sends new timing information. `delta` is in milliseconds. >>> import statsdecor >>> statsdecor.timing('my.metric', 314159265359)
[ "Sends", "new", "timing", "information", ".", "delta", "is", "in", "milliseconds", "." ]
python
train
jgm/pandocfilters
pandocfilters.py
https://github.com/jgm/pandocfilters/blob/0d6b4f9be9d8e54b18b8a97e6120dd85ece53de5/pandocfilters.py#L41-L49
def get_value(kv, key, value = None): """get value from the keyvalues (options)""" res = [] for k, v in kv: if k == key: value = v else: res.append([k, v]) return value, res
[ "def", "get_value", "(", "kv", ",", "key", ",", "value", "=", "None", ")", ":", "res", "=", "[", "]", "for", "k", ",", "v", "in", "kv", ":", "if", "k", "==", "key", ":", "value", "=", "v", "else", ":", "res", ".", "append", "(", "[", "k", ...
get value from the keyvalues (options)
[ "get", "value", "from", "the", "keyvalues", "(", "options", ")" ]
python
train
librosa/librosa
librosa/util/utils.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1170-L1203
def buf_to_float(x, n_bytes=2, dtype=np.float32): """Convert an integer buffer to floating point values. This is primarily useful when loading integer-valued wav data into numpy arrays. See Also -------- buf_to_float Parameters ---------- x : np.ndarray [dtype=int] The integer-valued data buffer n_bytes : int [1, 2, 4] The number of bytes per sample in `x` dtype : numeric type The target output type (default: 32-bit float) Returns ------- x_float : np.ndarray [dtype=float] The input data buffer cast to floating point """ # Invert the scale of the data scale = 1./float(1 << ((8 * n_bytes) - 1)) # Construct the format string fmt = '<i{:d}'.format(n_bytes) # Rescale and format the data buffer return scale * np.frombuffer(x, fmt).astype(dtype)
[ "def", "buf_to_float", "(", "x", ",", "n_bytes", "=", "2", ",", "dtype", "=", "np", ".", "float32", ")", ":", "# Invert the scale of the data", "scale", "=", "1.", "/", "float", "(", "1", "<<", "(", "(", "8", "*", "n_bytes", ")", "-", "1", ")", ")"...
Convert an integer buffer to floating point values. This is primarily useful when loading integer-valued wav data into numpy arrays. See Also -------- buf_to_float Parameters ---------- x : np.ndarray [dtype=int] The integer-valued data buffer n_bytes : int [1, 2, 4] The number of bytes per sample in `x` dtype : numeric type The target output type (default: 32-bit float) Returns ------- x_float : np.ndarray [dtype=float] The input data buffer cast to floating point
[ "Convert", "an", "integer", "buffer", "to", "floating", "point", "values", ".", "This", "is", "primarily", "useful", "when", "loading", "integer", "-", "valued", "wav", "data", "into", "numpy", "arrays", "." ]
python
test
Dani4kor/stockfishpy
stockfishpy/stockfishpy.py
https://github.com/Dani4kor/stockfishpy/blob/af26e8180a7d186ca0cb48d06bac9f2561432f4f/stockfishpy/stockfishpy.py#L107-L156
def setposition(self, position): """ The move format is in long algebraic notation. Takes list of stirngs = ['e2e4', 'd7d5'] OR FEN = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1' """ try: if isinstance(position, list): self.send('position startpos moves {}'.format( self.__listtostring(position))) self.isready() elif re.match('\s*^(((?:[rnbqkpRNBQKP1-8]+\/){7})[rnbqkpRNBQKP1-8]+)\s([b|w])\s([K|Q|k|q|-]{1,4})\s(-|[a-h][1-8])\s(\d+\s\d+)$', position): regexList = re.match('\s*^(((?:[rnbqkpRNBQKP1-8]+\/){7})[rnbqkpRNBQKP1-8]+)\s([b|w])\s([K|Q|k|q|-]{1,4})\s(-|[a-h][1-8])\s(\d+\s\d+)$', position).groups() fen = regexList[0].split("/") if len(fen) != 8: raise ValueError("expected 8 rows in position part of fen: {0}".format(repr(fen))) for fenPart in fen: field_sum = 0 previous_was_digit, previous_was_piece = False, False for c in fenPart: if c in ["1", "2", "3", "4", "5", "6", "7", "8"]: if previous_was_digit: raise ValueError("two subsequent digits in position part of fen: {0}".format(repr(fen))) field_sum += int(c) previous_was_digit = True previous_was_piece = False elif c == "~": if not previous_was_piece: raise ValueError("~ not after piece in position part of fen: {0}".format(repr(fen))) previous_was_digit, previous_was_piece = False, False elif c.lower() in ["p", "n", "b", "r", "q", "k"]: field_sum += 1 previous_was_digit = False previous_was_piece = True else: raise ValueError("invalid character in position part of fen: {0}".format(repr(fen))) if field_sum != 8: raise ValueError("expected 8 columns per row in position part of fen: {0}".format(repr(fen))) self.send('position fen {}'.format(position)) self.isready() else: raise ValueError("fen doesn`t match follow this example: rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1 ") except ValueError as e: print('\nCheck position correctness\n') sys.exit(e.message)
[ "def", "setposition", "(", "self", ",", "position", ")", ":", "try", ":", "if", "isinstance", "(", "position", ",", "list", ")", ":", "self", ".", "send", "(", "'position startpos moves {}'", ".", "format", "(", "self", ".", "__listtostring", "(", "positio...
The move format is in long algebraic notation. Takes list of stirngs = ['e2e4', 'd7d5'] OR FEN = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
[ "The", "move", "format", "is", "in", "long", "algebraic", "notation", "." ]
python
train
coin-or/GiMPy
src/gimpy/graph.py
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L1367-L1404
def process_edge_flow(self, source, sink, i, j, algo, q): ''' API: process_edge_flow(self, source, sink, i, j, algo, q) Description: Used by by max_flow_preflowpush() method. Processes edges along prefolow push. Input: source: Source node name of flow graph. sink: Sink node name of flow graph. i: Source node in the processed edge (tail of arc). j: Sink node in the processed edge (head of arc). Post: The 'flow' and 'excess' attributes of nodes may get updated. Return: Returns False if residual capacity is 0, True otherwise. ''' if (self.get_node_attr(i, 'distance') != self.get_node_attr(j, 'distance') + 1): return False if (i, j) in self.edge_attr: edge = (i, j) capacity = self.get_edge_attr(i, j, 'capacity') mult = 1 else: edge = (j, i) capacity = 0 mult = -1 flow = mult*self.edge_attr[edge]['flow'] residual_capacity = capacity - flow if residual_capacity == 0: return False excess_i = self.get_node_attr(i, 'excess') excess_j = self.get_node_attr(j, 'excess') push_amount = min(excess_i, residual_capacity) self.edge_attr[edge]['flow'] = mult*(flow + push_amount) self.set_node_attr(i, 'excess', excess_i - push_amount) self.set_node_attr(j, 'excess', excess_j + push_amount) return True
[ "def", "process_edge_flow", "(", "self", ",", "source", ",", "sink", ",", "i", ",", "j", ",", "algo", ",", "q", ")", ":", "if", "(", "self", ".", "get_node_attr", "(", "i", ",", "'distance'", ")", "!=", "self", ".", "get_node_attr", "(", "j", ",", ...
API: process_edge_flow(self, source, sink, i, j, algo, q) Description: Used by by max_flow_preflowpush() method. Processes edges along prefolow push. Input: source: Source node name of flow graph. sink: Sink node name of flow graph. i: Source node in the processed edge (tail of arc). j: Sink node in the processed edge (head of arc). Post: The 'flow' and 'excess' attributes of nodes may get updated. Return: Returns False if residual capacity is 0, True otherwise.
[ "API", ":", "process_edge_flow", "(", "self", "source", "sink", "i", "j", "algo", "q", ")", "Description", ":", "Used", "by", "by", "max_flow_preflowpush", "()", "method", ".", "Processes", "edges", "along", "prefolow", "push", ".", "Input", ":", "source", ...
python
train
raiden-network/raiden
raiden/raiden_service.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/raiden_service.py#L684-L730
def handle_state_change(self, state_change: StateChange) -> List[Greenlet]: """ Dispatch the state change and return the processing threads. Use this for error reporting, failures in the returned greenlets, should be re-raised using `gevent.joinall` with `raise_error=True`. """ assert self.wal, f'WAL not restored. node:{self!r}' log.debug( 'State change', node=pex(self.address), state_change=_redact_secret(serialize.JSONSerializer.serialize(state_change)), ) old_state = views.state_from_raiden(self) raiden_event_list = self.wal.log_and_dispatch(state_change) current_state = views.state_from_raiden(self) for changed_balance_proof in views.detect_balance_proof_change(old_state, current_state): update_services_from_balance_proof(self, current_state, changed_balance_proof) log.debug( 'Raiden events', node=pex(self.address), raiden_events=[ _redact_secret(serialize.JSONSerializer.serialize(event)) for event in raiden_event_list ], ) greenlets: List[Greenlet] = list() if self.ready_to_process_events: for raiden_event in raiden_event_list: greenlets.append( self.handle_event(raiden_event=raiden_event), ) state_changes_count = self.wal.storage.count_state_changes() new_snapshot_group = ( state_changes_count // SNAPSHOT_STATE_CHANGES_COUNT ) if new_snapshot_group > self.snapshot_group: log.debug('Storing snapshot', snapshot_id=new_snapshot_group) self.wal.snapshot() self.snapshot_group = new_snapshot_group return greenlets
[ "def", "handle_state_change", "(", "self", ",", "state_change", ":", "StateChange", ")", "->", "List", "[", "Greenlet", "]", ":", "assert", "self", ".", "wal", ",", "f'WAL not restored. node:{self!r}'", "log", ".", "debug", "(", "'State change'", ",", "node", ...
Dispatch the state change and return the processing threads. Use this for error reporting, failures in the returned greenlets, should be re-raised using `gevent.joinall` with `raise_error=True`.
[ "Dispatch", "the", "state", "change", "and", "return", "the", "processing", "threads", "." ]
python
train
uchicago-cs/deepdish
deepdish/image.py
https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L109-L120
def crop_or_pad(im, size, value=0): """ Crops an image in the center. Parameters ---------- size : tuple, (height, width) Finally size after cropping. """ diff = [im.shape[index] - size[index] for index in (0, 1)] im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]] return im2
[ "def", "crop_or_pad", "(", "im", ",", "size", ",", "value", "=", "0", ")", ":", "diff", "=", "[", "im", ".", "shape", "[", "index", "]", "-", "size", "[", "index", "]", "for", "index", "in", "(", "0", ",", "1", ")", "]", "im2", "=", "im", "...
Crops an image in the center. Parameters ---------- size : tuple, (height, width) Finally size after cropping.
[ "Crops", "an", "image", "in", "the", "center", "." ]
python
train
azogue/i2csense
i2csense/__init__.py
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/__init__.py#L102-L120
def current_state_str(self): """Return string representation of the current state of the sensor.""" if self.sample_ok: msg = '' temperature = self._get_value_opc_attr('temperature') if temperature is not None: msg += 'Temp: %s ºC, ' % temperature humidity = self._get_value_opc_attr('humidity') if humidity is not None: msg += 'Humid: %s %%, ' % humidity pressure = self._get_value_opc_attr('pressure') if pressure is not None: msg += 'Press: %s mb, ' % pressure light_level = self._get_value_opc_attr('light_level') if light_level is not None: msg += 'Light: %s lux, ' % light_level return msg[:-2] else: return "Bad sample"
[ "def", "current_state_str", "(", "self", ")", ":", "if", "self", ".", "sample_ok", ":", "msg", "=", "''", "temperature", "=", "self", ".", "_get_value_opc_attr", "(", "'temperature'", ")", "if", "temperature", "is", "not", "None", ":", "msg", "+=", "'Temp:...
Return string representation of the current state of the sensor.
[ "Return", "string", "representation", "of", "the", "current", "state", "of", "the", "sensor", "." ]
python
train
senaite/senaite.core
bika/lims/browser/dashboard/dashboard.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/dashboard/dashboard.py#L252-L263
def _create_raw_data(self): """ Gathers the different sections ids and creates a string as first cookie data. :return: A dictionary like: {'analyses':'all','analysisrequest':'all','worksheets':'all'} """ result = {} for section in self.get_sections(): result[section.get('id')] = 'all' return result
[ "def", "_create_raw_data", "(", "self", ")", ":", "result", "=", "{", "}", "for", "section", "in", "self", ".", "get_sections", "(", ")", ":", "result", "[", "section", ".", "get", "(", "'id'", ")", "]", "=", "'all'", "return", "result" ]
Gathers the different sections ids and creates a string as first cookie data. :return: A dictionary like: {'analyses':'all','analysisrequest':'all','worksheets':'all'}
[ "Gathers", "the", "different", "sections", "ids", "and", "creates", "a", "string", "as", "first", "cookie", "data", "." ]
python
train
iamteem/redisco
redisco/models/base.py
https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/models/base.py#L45-L51
def _initialize_lists(model_class, name, bases, attrs): """Stores the list fields descriptors of a model.""" model_class._lists = {} for k, v in attrs.iteritems(): if isinstance(v, ListField): model_class._lists[k] = v v.name = v.name or k
[ "def", "_initialize_lists", "(", "model_class", ",", "name", ",", "bases", ",", "attrs", ")", ":", "model_class", ".", "_lists", "=", "{", "}", "for", "k", ",", "v", "in", "attrs", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "v", ",", ...
Stores the list fields descriptors of a model.
[ "Stores", "the", "list", "fields", "descriptors", "of", "a", "model", "." ]
python
train
dereneaton/ipyrad
ipyrad/assemble/cluster_within.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_within.py#L1161-L1263
def cluster(data, sample, nthreads, force): """ Calls vsearch for clustering. cov varies by data type, values were chosen based on experience, but could be edited by users """ ## get the dereplicated reads if "reference" in data.paramsdict["assembly_method"]: derephandle = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq") ## In the event all reads for all samples map successfully then clustering ## the unmapped reads makes no sense, so just bail out. if not os.stat(derephandle).st_size: ## In this case you do have to create empty, dummy vsearch output ## files so building_clusters will not fail. uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp") usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort") hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp") for f in [uhandle, usort, hhandle]: open(f, 'a').close() return else: derephandle = os.path.join(data.dirs.edits, sample.name+"_derep.fastq") ## create handles for the outfiles uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp") temphandle = os.path.join(data.dirs.clusts, sample.name+".htemp") ## If derep file doesn't exist then bail out if not os.path.isfile(derephandle): LOGGER.warn("Bad derephandle - {}".format(derephandle)) raise IPyradError("Input file for clustering doesn't exist - {}"\ .format(derephandle)) ## testing one sample fail #if sample.name == "1C_0": # x ## datatype specific optimization ## minsl: the percentage of the seed that must be matched ## smaller values for RAD/ddRAD where we might want to combine, say 50bp ## reads and 100bp reads in the same analysis. ## query_cov: the percentage of the query sequence that must match seed ## smaller values are needed for gbs where only the tips might overlap ## larger values for pairgbs where they should overlap near completely ## small minsl and high query cov allows trimmed reads to match to untrim ## seed for rad/ddrad/pairddrad. strand = "plus" cov = 0.75 minsl = 0.5 if data.paramsdict["datatype"] in ["gbs", "2brad"]: strand = "both" cov = 0.5 minsl = 0.5 elif data.paramsdict["datatype"] == 'pairgbs': strand = "both" cov = 0.75 minsl = 0.75 ## If this value is not null (which is the default) then override query cov if data._hackersonly["query_cov"]: cov = str(data._hackersonly["query_cov"]) assert float(cov) <= 1, "query_cov must be <= 1.0" ## get call string cmd = [ipyrad.bins.vsearch, "-cluster_smallmem", derephandle, "-strand", strand, "-query_cov", str(cov), "-id", str(data.paramsdict["clust_threshold"]), "-minsl", str(minsl), "-userout", uhandle, "-userfields", "query+target+id+gaps+qstrand+qcov", "-maxaccepts", "1", "-maxrejects", "0", "-threads", str(nthreads), "-notmatched", temphandle, "-fasta_width", "0", "-fastq_qmax", "100", "-fulldp", "-usersort"] ## not sure what the benefit of this option is exactly, needs testing, ## might improve indel detection on left side, but we don't want to enforce ## aligning on left side if not necessarily, since quality trimmed reads ## might lose bases on left side in step2 and no longer align. #if data.paramsdict["datatype"] in ["rad", "ddrad", "pairddrad"]: # cmd += ["-leftjust"] ## run vsearch LOGGER.debug("%s", cmd) proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True) ## This is long running so we wrap it to make sure we can kill it try: res = proc.communicate()[0] except KeyboardInterrupt: proc.kill() raise KeyboardInterrupt ## check for errors if proc.returncode: LOGGER.error("error %s: %s", cmd, res) raise IPyradWarningExit("cmd {}: {}".format(cmd, res))
[ "def", "cluster", "(", "data", ",", "sample", ",", "nthreads", ",", "force", ")", ":", "## get the dereplicated reads", "if", "\"reference\"", "in", "data", ".", "paramsdict", "[", "\"assembly_method\"", "]", ":", "derephandle", "=", "os", ".", "path", ".", ...
Calls vsearch for clustering. cov varies by data type, values were chosen based on experience, but could be edited by users
[ "Calls", "vsearch", "for", "clustering", ".", "cov", "varies", "by", "data", "type", "values", "were", "chosen", "based", "on", "experience", "but", "could", "be", "edited", "by", "users" ]
python
valid
cnobile2012/pololu-motors
pololu/motors/qik.py
https://github.com/cnobile2012/pololu-motors/blob/453d2283a63cfe15cda96cad6dffa73372d52a7c/pololu/motors/qik.py#L361-L388
def _getSerialTimeout(self, device): """ Get the serial timeout stored on the hardware device. Caution, more that one value returned from the Qik can have the same actual timeout value according the the formula below. I have verified this as an idiosyncrasy of the Qik itself. There are only a total of 72 unique values that the Qik can logically use the remaining 56 values are repeats of the 72. :Parameters: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. :Returns: The timeout value in seconds. """ num = self._getConfig(self.SERIAL_TIMEOUT, device) if isinstance(num, int): x = num & 0x0F y = (num >> 4) & 0x07 result = self.DEFAULT_SERIAL_TIMEOUT * x * pow(2, y) else: result = num return result
[ "def", "_getSerialTimeout", "(", "self", ",", "device", ")", ":", "num", "=", "self", ".", "_getConfig", "(", "self", ".", "SERIAL_TIMEOUT", ",", "device", ")", "if", "isinstance", "(", "num", ",", "int", ")", ":", "x", "=", "num", "&", "0x0F", "y", ...
Get the serial timeout stored on the hardware device. Caution, more that one value returned from the Qik can have the same actual timeout value according the the formula below. I have verified this as an idiosyncrasy of the Qik itself. There are only a total of 72 unique values that the Qik can logically use the remaining 56 values are repeats of the 72. :Parameters: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. :Returns: The timeout value in seconds.
[ "Get", "the", "serial", "timeout", "stored", "on", "the", "hardware", "device", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/config.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/config.py#L401-L425
def get_template(t): """Find template file *t* and return its real path. *t* can be a single string or a list of strings. A string should be one of 1. a relative or absolute path, 2. a file in one of the directories listed in :data:`gromacs.config.path`, 3. a filename in the package template directory (defined in the template dictionary :data:`gromacs.config.templates`) or 4. a key into :data:`~gromacs.config.templates`. The first match (in this order) is returned. If the argument is a single string then a single string is returned, otherwise a list of strings. :Arguments: *t* : template file or key (string or list of strings) :Returns: os.path.realpath(*t*) (or a list thereof) :Raises: :exc:`ValueError` if no file can be located. """ templates = [_get_template(s) for s in utilities.asiterable(t)] if len(templates) == 1: return templates[0] return templates
[ "def", "get_template", "(", "t", ")", ":", "templates", "=", "[", "_get_template", "(", "s", ")", "for", "s", "in", "utilities", ".", "asiterable", "(", "t", ")", "]", "if", "len", "(", "templates", ")", "==", "1", ":", "return", "templates", "[", ...
Find template file *t* and return its real path. *t* can be a single string or a list of strings. A string should be one of 1. a relative or absolute path, 2. a file in one of the directories listed in :data:`gromacs.config.path`, 3. a filename in the package template directory (defined in the template dictionary :data:`gromacs.config.templates`) or 4. a key into :data:`~gromacs.config.templates`. The first match (in this order) is returned. If the argument is a single string then a single string is returned, otherwise a list of strings. :Arguments: *t* : template file or key (string or list of strings) :Returns: os.path.realpath(*t*) (or a list thereof) :Raises: :exc:`ValueError` if no file can be located.
[ "Find", "template", "file", "*", "t", "*", "and", "return", "its", "real", "path", "." ]
python
valid
goerz/clusterjob
clusterjob/__init__.py
https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L1050-L1079
def run_epilogue(self): """Run the epilogue script in the current working directory. raises: subprocess.CalledProcessError: if the script does not finish with exit code zero. """ logger = logging.getLogger(__name__) if self.epilogue is not None: with tempfile.NamedTemporaryFile('w', delete=False) as epilogue_fh: epilogue_fh.write(self.epilogue) tempfilename = epilogue_fh.name set_executable(tempfilename) try: sp.check_output( [tempfilename, ], stderr=sp.STDOUT) except sp.CalledProcessError as e: logger.error(dedent(r''' Epilogue script did not exit cleanly. CWD: {cwd} epilogue: --- {epilogue} --- response: --- {response} --- ''').format(cwd=os.getcwd(), epilogue=self.epilogue, response=e.output)) raise finally: os.unlink(tempfilename)
[ "def", "run_epilogue", "(", "self", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "if", "self", ".", "epilogue", "is", "not", "None", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "'w'", ",", "delete", "=", "False...
Run the epilogue script in the current working directory. raises: subprocess.CalledProcessError: if the script does not finish with exit code zero.
[ "Run", "the", "epilogue", "script", "in", "the", "current", "working", "directory", "." ]
python
train
dmlc/gluon-nlp
scripts/natural_language_inference/preprocess.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/natural_language_inference/preprocess.py#L42-L58
def main(args): """ Read tokens from the provided parse tree in the SNLI dataset. Illegal examples are removed. """ examples = [] with open(args.input, 'r') as fin: reader = csv.DictReader(fin, delimiter='\t') for cols in reader: s1 = read_tokens(cols['sentence1_parse']) s2 = read_tokens(cols['sentence2_parse']) label = cols['gold_label'] if label in ('neutral', 'contradiction', 'entailment'): examples.append((s1, s2, label)) with open(args.output, 'w') as fout: for s1, s2, l in examples: fout.write('{}\t{}\t{}\n'.format(' '.join(s1), ' '.join(s2), l))
[ "def", "main", "(", "args", ")", ":", "examples", "=", "[", "]", "with", "open", "(", "args", ".", "input", ",", "'r'", ")", "as", "fin", ":", "reader", "=", "csv", ".", "DictReader", "(", "fin", ",", "delimiter", "=", "'\\t'", ")", "for", "cols"...
Read tokens from the provided parse tree in the SNLI dataset. Illegal examples are removed.
[ "Read", "tokens", "from", "the", "provided", "parse", "tree", "in", "the", "SNLI", "dataset", ".", "Illegal", "examples", "are", "removed", "." ]
python
train
BlueBrain/hpcbench
hpcbench/toolbox/functools_ext.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/functools_ext.py#L8-L17
def compose(*functions): """Define functions composition like f ∘ g ∘ h :return: callable object that will perform function composition of callables given in argument. """ def _compose2(f, g): # pylint: disable=invalid-name return lambda x: f(g(x)) return functools.reduce(_compose2, functions, lambda x: x)
[ "def", "compose", "(", "*", "functions", ")", ":", "def", "_compose2", "(", "f", ",", "g", ")", ":", "# pylint: disable=invalid-name", "return", "lambda", "x", ":", "f", "(", "g", "(", "x", ")", ")", "return", "functools", ".", "reduce", "(", "_compose...
Define functions composition like f ∘ g ∘ h :return: callable object that will perform function composition of callables given in argument.
[ "Define", "functions", "composition", "like", "f", "∘", "g", "∘", "h", ":", "return", ":", "callable", "object", "that", "will", "perform", "function", "composition", "of", "callables", "given", "in", "argument", "." ]
python
train
robotools/fontParts
Lib/fontParts/base/normalizers.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/normalizers.py#L299-L316
def normalizeGlyphUnicodes(value): """ Normalizes glyph unicodes. * **value** must be a ``list``. * **value** items must normalize as glyph unicodes with :func:`normalizeGlyphUnicode`. * **value** must not repeat unicode values. * Returned value will be a ``tuple`` of ints. """ if not isinstance(value, (tuple, list)): raise TypeError("Glyph unicodes must be a list, not %s." % type(value).__name__) values = [normalizeGlyphUnicode(v) for v in value] duplicates = [v for v, count in Counter(value).items() if count > 1] if len(duplicates) != 0: raise ValueError("Duplicate unicode values are not allowed.") return tuple(values)
[ "def", "normalizeGlyphUnicodes", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "tuple", ",", "list", ")", ")", ":", "raise", "TypeError", "(", "\"Glyph unicodes must be a list, not %s.\"", "%", "type", "(", "value", ")", ".", "__...
Normalizes glyph unicodes. * **value** must be a ``list``. * **value** items must normalize as glyph unicodes with :func:`normalizeGlyphUnicode`. * **value** must not repeat unicode values. * Returned value will be a ``tuple`` of ints.
[ "Normalizes", "glyph", "unicodes", "." ]
python
train
JnyJny/Geometry
Geometry/ellipse.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/ellipse.py#L111-L115
def xAxisIsMajor(self): ''' Returns True if the major axis is parallel to the X axis, boolean. ''' return max(self.radius.x, self.radius.y) == self.radius.x
[ "def", "xAxisIsMajor", "(", "self", ")", ":", "return", "max", "(", "self", ".", "radius", ".", "x", ",", "self", ".", "radius", ".", "y", ")", "==", "self", ".", "radius", ".", "x" ]
Returns True if the major axis is parallel to the X axis, boolean.
[ "Returns", "True", "if", "the", "major", "axis", "is", "parallel", "to", "the", "X", "axis", "boolean", "." ]
python
train
explosion/spaCy
spacy/cli/init_model.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/init_model.py#L39-L91
def init_model( lang, output_dir, freqs_loc=None, clusters_loc=None, jsonl_loc=None, vectors_loc=None, prune_vectors=-1, ): """ Create a new model from raw data, like word frequencies, Brown clusters and word vectors. If vectors are provided in Word2Vec format, they can be either a .txt or zipped as a .zip or .tar.gz. """ if jsonl_loc is not None: if freqs_loc is not None or clusters_loc is not None: settings = ["-j"] if freqs_loc: settings.append("-f") if clusters_loc: settings.append("-c") msg.warn( "Incompatible arguments", "The -f and -c arguments are deprecated, and not compatible " "with the -j argument, which should specify the same " "information. Either merge the frequencies and clusters data " "into the JSONL-formatted file (recommended), or use only the " "-f and -c files, without the other lexical attributes.", ) jsonl_loc = ensure_path(jsonl_loc) lex_attrs = srsly.read_jsonl(jsonl_loc) else: clusters_loc = ensure_path(clusters_loc) freqs_loc = ensure_path(freqs_loc) if freqs_loc is not None and not freqs_loc.exists(): msg.fail("Can't find words frequencies file", freqs_loc, exits=1) lex_attrs = read_attrs_from_deprecated(freqs_loc, clusters_loc) with msg.loading("Creating model..."): nlp = create_model(lang, lex_attrs) msg.good("Successfully created model") if vectors_loc is not None: add_vectors(nlp, vectors_loc, prune_vectors) vec_added = len(nlp.vocab.vectors) lex_added = len(nlp.vocab) msg.good( "Sucessfully compiled vocab", "{} entries, {} vectors".format(lex_added, vec_added), ) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) return nlp
[ "def", "init_model", "(", "lang", ",", "output_dir", ",", "freqs_loc", "=", "None", ",", "clusters_loc", "=", "None", ",", "jsonl_loc", "=", "None", ",", "vectors_loc", "=", "None", ",", "prune_vectors", "=", "-", "1", ",", ")", ":", "if", "jsonl_loc", ...
Create a new model from raw data, like word frequencies, Brown clusters and word vectors. If vectors are provided in Word2Vec format, they can be either a .txt or zipped as a .zip or .tar.gz.
[ "Create", "a", "new", "model", "from", "raw", "data", "like", "word", "frequencies", "Brown", "clusters", "and", "word", "vectors", ".", "If", "vectors", "are", "provided", "in", "Word2Vec", "format", "they", "can", "be", "either", "a", ".", "txt", "or", ...
python
train
pyviz/param
param/parameterized.py
https://github.com/pyviz/param/blob/8f0dafa78defa883247b40635f96cc6d5c1b3481/param/parameterized.py#L1504-L1537
def get_value_generator(self_,name): # pylint: disable-msg=E0213 """ Return the value or value-generating object of the named attribute. For most parameters, this is simply the parameter's value (i.e. the same as getattr()), but Dynamic parameters have their value-generating object returned. """ cls_or_slf = self_.self_or_cls param_obj = cls_or_slf.param.objects('existing').get(name) if not param_obj: value = getattr(cls_or_slf,name) # CompositeParameter detected by being a Parameter and having 'attribs' elif hasattr(param_obj,'attribs'): value = [cls_or_slf.param.get_value_generator(a) for a in param_obj.attribs] # not a Dynamic Parameter elif not hasattr(param_obj,'_value_is_dynamic'): value = getattr(cls_or_slf,name) # Dynamic Parameter... else: internal_name = "_%s_param_value"%name if hasattr(cls_or_slf,internal_name): # dealing with object and it's been set on this object value = getattr(cls_or_slf,internal_name) else: # dealing with class or isn't set on the object value = param_obj.default return value
[ "def", "get_value_generator", "(", "self_", ",", "name", ")", ":", "# pylint: disable-msg=E0213", "cls_or_slf", "=", "self_", ".", "self_or_cls", "param_obj", "=", "cls_or_slf", ".", "param", ".", "objects", "(", "'existing'", ")", ".", "get", "(", "name", ")"...
Return the value or value-generating object of the named attribute. For most parameters, this is simply the parameter's value (i.e. the same as getattr()), but Dynamic parameters have their value-generating object returned.
[ "Return", "the", "value", "or", "value", "-", "generating", "object", "of", "the", "named", "attribute", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/networking/interconnects.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/networking/interconnects.py#L274-L285
def get_pluggable_module_information(self, id_or_uri): """ Gets all the pluggable module information. Args: id_or_uri: Can be either the interconnect id or uri. Returns: array: dicts of the pluggable module information. """ uri = self._client.build_uri(id_or_uri) + "/pluggableModuleInformation" return self._client.get(uri)
[ "def", "get_pluggable_module_information", "(", "self", ",", "id_or_uri", ")", ":", "uri", "=", "self", ".", "_client", ".", "build_uri", "(", "id_or_uri", ")", "+", "\"/pluggableModuleInformation\"", "return", "self", ".", "_client", ".", "get", "(", "uri", "...
Gets all the pluggable module information. Args: id_or_uri: Can be either the interconnect id or uri. Returns: array: dicts of the pluggable module information.
[ "Gets", "all", "the", "pluggable", "module", "information", "." ]
python
train
gwastro/pycbc
pycbc/population/rates_functions.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L509-L525
def mchirp_sampler_flat(**kwargs): ''' Draw chirp mass samples for flat in mass model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population ''' m1, m2 = draw_flat_samples(**kwargs) mchirp_astro = mchirp_from_mass1_mass2(m1, m2) return mchirp_astro
[ "def", "mchirp_sampler_flat", "(", "*", "*", "kwargs", ")", ":", "m1", ",", "m2", "=", "draw_flat_samples", "(", "*", "*", "kwargs", ")", "mchirp_astro", "=", "mchirp_from_mass1_mass2", "(", "m1", ",", "m2", ")", "return", "mchirp_astro" ]
Draw chirp mass samples for flat in mass model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population
[ "Draw", "chirp", "mass", "samples", "for", "flat", "in", "mass", "model" ]
python
train
monkeython/scriba
scriba/content_types/scriba_x_tar.py
https://github.com/monkeython/scriba/blob/fb8e7636ed07c3d035433fdd153599ac8b24dfc4/scriba/content_types/scriba_x_tar.py#L16-L27
def parse(binary, **params): """Turns a TAR file into a frozen sample.""" binary = io.BytesIO(binary) collection = list() with tarfile.TarFile(fileobj=binary, mode='r') as tar: for tar_info in tar.getmembers(): content_type, encoding = mimetypes.guess_type(tar_info.name) content = tar.extractfile(tar_info) content = content_encodings.get(encoding).decode(content) content = content_types.get(content_type).parse(content, **params) collection.apppend((tar_info.name, content)) return collection
[ "def", "parse", "(", "binary", ",", "*", "*", "params", ")", ":", "binary", "=", "io", ".", "BytesIO", "(", "binary", ")", "collection", "=", "list", "(", ")", "with", "tarfile", ".", "TarFile", "(", "fileobj", "=", "binary", ",", "mode", "=", "'r'...
Turns a TAR file into a frozen sample.
[ "Turns", "a", "TAR", "file", "into", "a", "frozen", "sample", "." ]
python
train
quantmind/ccy
ccy/core/currency.py
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/currency.py#L100-L111
def swap(self, c2): ''' put the order of currencies as market standard ''' inv = False c1 = self if c1.order > c2.order: ct = c1 c1 = c2 c2 = ct inv = True return inv, c1, c2
[ "def", "swap", "(", "self", ",", "c2", ")", ":", "inv", "=", "False", "c1", "=", "self", "if", "c1", ".", "order", ">", "c2", ".", "order", ":", "ct", "=", "c1", "c1", "=", "c2", "c2", "=", "ct", "inv", "=", "True", "return", "inv", ",", "c...
put the order of currencies as market standard
[ "put", "the", "order", "of", "currencies", "as", "market", "standard" ]
python
train
datawire/quark
quarkc/docmaker.py
https://github.com/datawire/quark/blob/df0058a148b077c0aff535eb6ee382605c556273/quarkc/docmaker.py#L75-L83
def get_doc(node): """ Return a node's documentation as a string, pulling from annotations or constructing a simple fake as needed. """ res = " ".join(get_doc_annotations(node)) if not res: res = "(%s)" % node.__class__.__name__.lower() return res
[ "def", "get_doc", "(", "node", ")", ":", "res", "=", "\" \"", ".", "join", "(", "get_doc_annotations", "(", "node", ")", ")", "if", "not", "res", ":", "res", "=", "\"(%s)\"", "%", "node", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "...
Return a node's documentation as a string, pulling from annotations or constructing a simple fake as needed.
[ "Return", "a", "node", "s", "documentation", "as", "a", "string", "pulling", "from", "annotations", "or", "constructing", "a", "simple", "fake", "as", "needed", "." ]
python
train
rwl/godot
godot/edge.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/edge.py#L707-L729
def arrange_all(self): """ Arrange the components of the node using Graphviz. """ # FIXME: Circular reference avoidance. import godot.dot_data_parser import godot.graph graph = godot.graph.Graph( ID="g", directed=True ) self.conn = "->" graph.edges.append( self ) xdot_data = graph.create( format="xdot" ) # print "XDOT DATA:", xdot_data parser = godot.dot_data_parser.GodotDataParser() ndata = xdot_data.replace('\\\n','') tokens = parser.dotparser.parseString(ndata)[0] for element in tokens[3]: cmd = element[0] if cmd == "add_edge": cmd, src, dest, opts = element self.set( **opts )
[ "def", "arrange_all", "(", "self", ")", ":", "# FIXME: Circular reference avoidance.", "import", "godot", ".", "dot_data_parser", "import", "godot", ".", "graph", "graph", "=", "godot", ".", "graph", ".", "Graph", "(", "ID", "=", "\"g\"", ",", "directed", "=",...
Arrange the components of the node using Graphviz.
[ "Arrange", "the", "components", "of", "the", "node", "using", "Graphviz", "." ]
python
test
konstantint/matplotlib-venn
matplotlib_venn/_arc.py
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_arc.py#L195-L210
def approximately_equal(self, arc, tolerance=tol): ''' Returns true if the parameters of this arc are within <tolerance> of the parameters of the other arc, and the direction is the same. Note that no angle simplification is performed (i.e. some arcs that might be equal in principle are not declared as such by this method) >>> Arc((0, 0), 10, 20, 30, True).approximately_equal(Arc((tol/2, tol/2), 10+tol/2, 20-tol/2, 30-tol/2, True)) True >>> Arc((0, 0), 10, 20, 30, True).approximately_equal(Arc((0, 0), 10, 20, 30, False)) False >>> Arc((0, 0), 10, 20, 30, True).approximately_equal(Arc((0, 0+tol), 10, 20, 30, True)) False ''' return self.direction == arc.direction \ and np.all(abs(self.center - arc.center) < tolerance) and abs(self.radius - arc.radius) < tolerance \ and abs(self.from_angle - arc.from_angle) < tolerance and abs(self.to_angle - arc.to_angle) < tolerance
[ "def", "approximately_equal", "(", "self", ",", "arc", ",", "tolerance", "=", "tol", ")", ":", "return", "self", ".", "direction", "==", "arc", ".", "direction", "and", "np", ".", "all", "(", "abs", "(", "self", ".", "center", "-", "arc", ".", "cente...
Returns true if the parameters of this arc are within <tolerance> of the parameters of the other arc, and the direction is the same. Note that no angle simplification is performed (i.e. some arcs that might be equal in principle are not declared as such by this method) >>> Arc((0, 0), 10, 20, 30, True).approximately_equal(Arc((tol/2, tol/2), 10+tol/2, 20-tol/2, 30-tol/2, True)) True >>> Arc((0, 0), 10, 20, 30, True).approximately_equal(Arc((0, 0), 10, 20, 30, False)) False >>> Arc((0, 0), 10, 20, 30, True).approximately_equal(Arc((0, 0+tol), 10, 20, 30, True)) False
[ "Returns", "true", "if", "the", "parameters", "of", "this", "arc", "are", "within", "<tolerance", ">", "of", "the", "parameters", "of", "the", "other", "arc", "and", "the", "direction", "is", "the", "same", ".", "Note", "that", "no", "angle", "simplificati...
python
train
wummel/linkchecker
linkcheck/director/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/director/__init__.py#L128-L135
def get_aggregate (config): """Get an aggregator instance with given configuration.""" _urlqueue = urlqueue.UrlQueue(max_allowed_urls=config["maxnumurls"]) _robots_txt = robots_txt.RobotsTxt(config["useragent"]) plugin_manager = plugins.PluginManager(config) result_cache = results.ResultCache() return aggregator.Aggregate(config, _urlqueue, _robots_txt, plugin_manager, result_cache)
[ "def", "get_aggregate", "(", "config", ")", ":", "_urlqueue", "=", "urlqueue", ".", "UrlQueue", "(", "max_allowed_urls", "=", "config", "[", "\"maxnumurls\"", "]", ")", "_robots_txt", "=", "robots_txt", ".", "RobotsTxt", "(", "config", "[", "\"useragent\"", "]...
Get an aggregator instance with given configuration.
[ "Get", "an", "aggregator", "instance", "with", "given", "configuration", "." ]
python
train
vpelletier/python-libaio
libaio/__init__.py
https://github.com/vpelletier/python-libaio/blob/5b5a2fed5418e2bd1ac9197fa46c69dae86c6fe3/libaio/__init__.py#L221-L232
def buffer_list(self): """ The buffer list this instance operates on. Only available in mode != AIOBLOCK_MODE_POLL. Changes on a submitted transfer are not fully applied until its next submission: kernel will still be using original buffer list. """ if self._iocb.aio_lio_opcode == libaio.IO_CMD_POLL: raise AttributeError return self._buffer_list
[ "def", "buffer_list", "(", "self", ")", ":", "if", "self", ".", "_iocb", ".", "aio_lio_opcode", "==", "libaio", ".", "IO_CMD_POLL", ":", "raise", "AttributeError", "return", "self", ".", "_buffer_list" ]
The buffer list this instance operates on. Only available in mode != AIOBLOCK_MODE_POLL. Changes on a submitted transfer are not fully applied until its next submission: kernel will still be using original buffer list.
[ "The", "buffer", "list", "this", "instance", "operates", "on", "." ]
python
test
funkybob/knights-templater
knights/tags.py
https://github.com/funkybob/knights-templater/blob/b15cdbaae7d824d02f7f03ca04599ae94bb759dd/knights/tags.py#L215-L221
def macro(parser, token): ''' Works just like block, but does not render. ''' name = token.strip() parser.build_method(name, endnodes=['endmacro']) return ast.Yield(value=ast.Str(s=''))
[ "def", "macro", "(", "parser", ",", "token", ")", ":", "name", "=", "token", ".", "strip", "(", ")", "parser", ".", "build_method", "(", "name", ",", "endnodes", "=", "[", "'endmacro'", "]", ")", "return", "ast", ".", "Yield", "(", "value", "=", "a...
Works just like block, but does not render.
[ "Works", "just", "like", "block", "but", "does", "not", "render", "." ]
python
train
secdev/scapy
scapy/layers/tls/session.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/session.py#L469-L513
def mirror(self): """ This function takes a tlsSession object and swaps the IP addresses, ports, connection ends and connection states. The triggered_commit are also swapped (though it is probably overkill, it is cleaner this way). It is useful for static analysis of a series of messages from both the client and the server. In such a situation, it should be used every time the message being read comes from a different side than the one read right before, as the reading state becomes the writing state, and vice versa. For instance you could do: client_hello = open('client_hello.raw').read() <read other messages> m1 = TLS(client_hello) m2 = TLS(server_hello, tls_session=m1.tls_session.mirror()) m3 = TLS(server_cert, tls_session=m2.tls_session) m4 = TLS(client_keyexchange, tls_session=m3.tls_session.mirror()) """ self.ipdst, self.ipsrc = self.ipsrc, self.ipdst self.dport, self.sport = self.sport, self.dport self.rcs, self.wcs = self.wcs, self.rcs if self.rcs: self.rcs.row = "read" if self.wcs: self.wcs.row = "write" self.prcs, self.pwcs = self.pwcs, self.prcs if self.prcs: self.prcs.row = "read" if self.pwcs: self.pwcs.row = "write" self.triggered_prcs_commit, self.triggered_pwcs_commit = \ self.triggered_pwcs_commit, self.triggered_prcs_commit if self.connection_end == "client": self.connection_end = "server" elif self.connection_end == "server": self.connection_end = "client" return self
[ "def", "mirror", "(", "self", ")", ":", "self", ".", "ipdst", ",", "self", ".", "ipsrc", "=", "self", ".", "ipsrc", ",", "self", ".", "ipdst", "self", ".", "dport", ",", "self", ".", "sport", "=", "self", ".", "sport", ",", "self", ".", "dport", ...
This function takes a tlsSession object and swaps the IP addresses, ports, connection ends and connection states. The triggered_commit are also swapped (though it is probably overkill, it is cleaner this way). It is useful for static analysis of a series of messages from both the client and the server. In such a situation, it should be used every time the message being read comes from a different side than the one read right before, as the reading state becomes the writing state, and vice versa. For instance you could do: client_hello = open('client_hello.raw').read() <read other messages> m1 = TLS(client_hello) m2 = TLS(server_hello, tls_session=m1.tls_session.mirror()) m3 = TLS(server_cert, tls_session=m2.tls_session) m4 = TLS(client_keyexchange, tls_session=m3.tls_session.mirror())
[ "This", "function", "takes", "a", "tlsSession", "object", "and", "swaps", "the", "IP", "addresses", "ports", "connection", "ends", "and", "connection", "states", ".", "The", "triggered_commit", "are", "also", "swapped", "(", "though", "it", "is", "probably", "...
python
train
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L30-L42
def mergeActors(actors, tol=0): """ Build a new actor formed by the fusion of the polydatas of input objects. Similar to Assembly, but in this case the input objects become a single mesh. .. hint:: |thinplate_grid| |thinplate_grid.py|_ """ polylns = vtk.vtkAppendPolyData() for a in actors: polylns.AddInputData(a.polydata()) polylns.Update() pd = polylns.GetOutput() return Actor(pd)
[ "def", "mergeActors", "(", "actors", ",", "tol", "=", "0", ")", ":", "polylns", "=", "vtk", ".", "vtkAppendPolyData", "(", ")", "for", "a", "in", "actors", ":", "polylns", ".", "AddInputData", "(", "a", ".", "polydata", "(", ")", ")", "polylns", ".",...
Build a new actor formed by the fusion of the polydatas of input objects. Similar to Assembly, but in this case the input objects become a single mesh. .. hint:: |thinplate_grid| |thinplate_grid.py|_
[ "Build", "a", "new", "actor", "formed", "by", "the", "fusion", "of", "the", "polydatas", "of", "input", "objects", ".", "Similar", "to", "Assembly", "but", "in", "this", "case", "the", "input", "objects", "become", "a", "single", "mesh", "." ]
python
train
davesque/django-rest-framework-simplejwt
rest_framework_simplejwt/tokens.py
https://github.com/davesque/django-rest-framework-simplejwt/blob/d6084c595aefbf97865d15254b56017e710e8e47/rest_framework_simplejwt/tokens.py#L137-L153
def check_exp(self, claim='exp', current_time=None): """ Checks whether a timestamp value in the given claim has passed (since the given datetime value in `current_time`). Raises a TokenError with a user-facing error message if so. """ if current_time is None: current_time = self.current_time try: claim_value = self.payload[claim] except KeyError: raise TokenError(format_lazy(_("Token has no '{}' claim"), claim)) claim_time = datetime_from_epoch(claim_value) if claim_time <= current_time: raise TokenError(format_lazy(_("Token '{}' claim has expired"), claim))
[ "def", "check_exp", "(", "self", ",", "claim", "=", "'exp'", ",", "current_time", "=", "None", ")", ":", "if", "current_time", "is", "None", ":", "current_time", "=", "self", ".", "current_time", "try", ":", "claim_value", "=", "self", ".", "payload", "[...
Checks whether a timestamp value in the given claim has passed (since the given datetime value in `current_time`). Raises a TokenError with a user-facing error message if so.
[ "Checks", "whether", "a", "timestamp", "value", "in", "the", "given", "claim", "has", "passed", "(", "since", "the", "given", "datetime", "value", "in", "current_time", ")", ".", "Raises", "a", "TokenError", "with", "a", "user", "-", "facing", "error", "me...
python
train
opencobra/memote
memote/support/basic.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L541-L544
def find_external_metabolites(model): """Return all metabolites in the external compartment.""" ex_comp = find_external_compartment(model) return [met for met in model.metabolites if met.compartment == ex_comp]
[ "def", "find_external_metabolites", "(", "model", ")", ":", "ex_comp", "=", "find_external_compartment", "(", "model", ")", "return", "[", "met", "for", "met", "in", "model", ".", "metabolites", "if", "met", ".", "compartment", "==", "ex_comp", "]" ]
Return all metabolites in the external compartment.
[ "Return", "all", "metabolites", "in", "the", "external", "compartment", "." ]
python
train
Dallinger/Dallinger
dallinger/data.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/data.py#L266-L290
def ingest_zip(path, engine=None): """Given a path to a zip file created with `export()`, recreate the database with the data stored in the included .csv files. """ import_order = [ "network", "participant", "node", "info", "notification", "question", "transformation", "vector", "transmission", ] with ZipFile(path, "r") as archive: filenames = archive.namelist() for name in import_order: filename = [f for f in filenames if name in f][0] model_name = name.capitalize() model = getattr(models, model_name) file = archive.open(filename) if six.PY3: file = io.TextIOWrapper(file, encoding="utf8", newline="") ingest_to_model(file, model, engine)
[ "def", "ingest_zip", "(", "path", ",", "engine", "=", "None", ")", ":", "import_order", "=", "[", "\"network\"", ",", "\"participant\"", ",", "\"node\"", ",", "\"info\"", ",", "\"notification\"", ",", "\"question\"", ",", "\"transformation\"", ",", "\"vector\"",...
Given a path to a zip file created with `export()`, recreate the database with the data stored in the included .csv files.
[ "Given", "a", "path", "to", "a", "zip", "file", "created", "with", "export", "()", "recreate", "the", "database", "with", "the", "data", "stored", "in", "the", "included", ".", "csv", "files", "." ]
python
train
chrisjsewell/jsonextended
jsonextended/edict.py
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1398-L1449
def split_key(d, key, new_keys, before=True, list_of_dicts=False, deepcopy=True): """ split an existing key(s) into multiple levels Parameters ---------- d : dict or dict like key: str existing key value new_keys: list[str] new levels to add before: bool add level before existing key (else after) list_of_dicts: bool treat list of dicts as additional branches Examples -------- >>> from pprint import pprint >>> d = {'a':1,'b':2} >>> pprint(split_key(d,'a',['c','d'])) {'b': 2, 'c': {'d': {'a': 1}}} >>> pprint(split_key(d,'a',['c','d'],before=False)) {'a': {'c': {'d': 1}}, 'b': 2} >>> d2 = [{'a':1},{'a':2},{'a':3}] >>> pprint(split_key(d2,'a',['b'],list_of_dicts=True)) [{'b': {'a': 1}}, {'b': {'a': 2}}, {'b': {'a': 3}}] """ list_of_dicts = '__list__' if list_of_dicts else None flatd = flatten(d, list_of_dicts=list_of_dicts) newd = {} for path, v in flatd.items(): if key in path: newk = [] for k in path: if k == key: if before: newk = newk + new_keys + [k] else: newk = newk + [k] + new_keys else: newk.append(k) newd[tuple(newk)] = v else: newd[path] = v return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
[ "def", "split_key", "(", "d", ",", "key", ",", "new_keys", ",", "before", "=", "True", ",", "list_of_dicts", "=", "False", ",", "deepcopy", "=", "True", ")", ":", "list_of_dicts", "=", "'__list__'", "if", "list_of_dicts", "else", "None", "flatd", "=", "f...
split an existing key(s) into multiple levels Parameters ---------- d : dict or dict like key: str existing key value new_keys: list[str] new levels to add before: bool add level before existing key (else after) list_of_dicts: bool treat list of dicts as additional branches Examples -------- >>> from pprint import pprint >>> d = {'a':1,'b':2} >>> pprint(split_key(d,'a',['c','d'])) {'b': 2, 'c': {'d': {'a': 1}}} >>> pprint(split_key(d,'a',['c','d'],before=False)) {'a': {'c': {'d': 1}}, 'b': 2} >>> d2 = [{'a':1},{'a':2},{'a':3}] >>> pprint(split_key(d2,'a',['b'],list_of_dicts=True)) [{'b': {'a': 1}}, {'b': {'a': 2}}, {'b': {'a': 3}}]
[ "split", "an", "existing", "key", "(", "s", ")", "into", "multiple", "levels" ]
python
train
Kronuz/pyScss
scss/source.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/source.py#L194-L198
def from_filename(cls, path_string, origin=MISSING, **kwargs): """ Read Sass source from a String specifying the path """ path = Path(path_string) return cls.from_path(path, origin, **kwargs)
[ "def", "from_filename", "(", "cls", ",", "path_string", ",", "origin", "=", "MISSING", ",", "*", "*", "kwargs", ")", ":", "path", "=", "Path", "(", "path_string", ")", "return", "cls", ".", "from_path", "(", "path", ",", "origin", ",", "*", "*", "kwa...
Read Sass source from a String specifying the path
[ "Read", "Sass", "source", "from", "a", "String", "specifying", "the", "path" ]
python
train
instaloader/instaloader
instaloader/instaloadercontext.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloadercontext.py#L526-L534
def root_rhx_gis(self) -> Optional[str]: """rhx_gis string returned in the / query.""" if self.is_logged_in: # At the moment, rhx_gis seems to be required for anonymous requests only. By returning None when logged # in, we can save the root_rhx_gis lookup query. return None if not self._root_rhx_gis: self._root_rhx_gis = self.get_json('', {})['rhx_gis'] return self._root_rhx_gis
[ "def", "root_rhx_gis", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "if", "self", ".", "is_logged_in", ":", "# At the moment, rhx_gis seems to be required for anonymous requests only. By returning None when logged", "# in, we can save the root_rhx_gis lookup query.", ...
rhx_gis string returned in the / query.
[ "rhx_gis", "string", "returned", "in", "the", "/", "query", "." ]
python
train
dogoncouch/logdissect
logdissect/core.py
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L197-L202
def load_filters(self): """Load filter module(s)""" for f in sorted(logdissect.filters.__filters__): self.filter_modules[f] = \ __import__('logdissect.filters.' + f, globals(), \ locals(), [logdissect]).FilterModule(args=self.filter_args)
[ "def", "load_filters", "(", "self", ")", ":", "for", "f", "in", "sorted", "(", "logdissect", ".", "filters", ".", "__filters__", ")", ":", "self", ".", "filter_modules", "[", "f", "]", "=", "__import__", "(", "'logdissect.filters.'", "+", "f", ",", "glob...
Load filter module(s)
[ "Load", "filter", "module", "(", "s", ")" ]
python
train
log2timeline/plaso
plaso/parsers/winreg.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/winreg.py#L141-L161
def _NormalizeKeyPath(self, key_path): """Normalizes a Windows Registry key path. Args: key_path (str): Windows Registry key path. Returns: str: normalized Windows Registry key path. """ normalized_key_path = key_path.lower() # The Registry key path should start with: # HKEY_LOCAL_MACHINE\System\ControlSet followed by 3 digits # which makes 39 characters. if (len(normalized_key_path) < 39 or not normalized_key_path.startswith(self._CONTROL_SET_PREFIX)): return normalized_key_path # Key paths that contain ControlSet### must be normalized to # CurrentControlSet. return ''.join([ self._NORMALIZED_CONTROL_SET_PREFIX, normalized_key_path[39:]])
[ "def", "_NormalizeKeyPath", "(", "self", ",", "key_path", ")", ":", "normalized_key_path", "=", "key_path", ".", "lower", "(", ")", "# The Registry key path should start with:", "# HKEY_LOCAL_MACHINE\\System\\ControlSet followed by 3 digits", "# which makes 39 characters.", "if",...
Normalizes a Windows Registry key path. Args: key_path (str): Windows Registry key path. Returns: str: normalized Windows Registry key path.
[ "Normalizes", "a", "Windows", "Registry", "key", "path", "." ]
python
train
mitsei/dlkit
dlkit/json_/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L2321-L2348
def assign_objective_requisite(self, objective_id, requisite_objective_id): """Creates a requirement dependency between two ``Objectives``. arg: objective_id (osid.id.Id): the ``Id`` of the dependent ``Objective`` arg: requisite_objective_id (osid.id.Id): the ``Id`` of the required ``Objective`` raise: AlreadyExists - ``objective_id`` already mapped to ``requisite_objective_id`` raise: NotFound - ``objective_id`` or ``requisite_objective_id`` not found raise: NullArgument - ``objective_id`` or ``requisite_objective_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ requisite_type = Type(**Relationship().get_type_data('OBJECTIVE.REQUISITE')) ras = self._get_provider_manager( 'RELATIONSHIP').get_relationship_admin_session_for_family( self.get_objective_bank_id(), proxy=self._proxy) rfc = ras.get_relationship_form_for_create(objective_id, requisite_objective_id, []) rfc.set_display_name('Objective Requisite') rfc.set_description('An Objective Requisite created by the ObjectiveRequisiteAssignmentSession') rfc.set_genus_type(requisite_type) ras.create_relationship(rfc)
[ "def", "assign_objective_requisite", "(", "self", ",", "objective_id", ",", "requisite_objective_id", ")", ":", "requisite_type", "=", "Type", "(", "*", "*", "Relationship", "(", ")", ".", "get_type_data", "(", "'OBJECTIVE.REQUISITE'", ")", ")", "ras", "=", "sel...
Creates a requirement dependency between two ``Objectives``. arg: objective_id (osid.id.Id): the ``Id`` of the dependent ``Objective`` arg: requisite_objective_id (osid.id.Id): the ``Id`` of the required ``Objective`` raise: AlreadyExists - ``objective_id`` already mapped to ``requisite_objective_id`` raise: NotFound - ``objective_id`` or ``requisite_objective_id`` not found raise: NullArgument - ``objective_id`` or ``requisite_objective_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Creates", "a", "requirement", "dependency", "between", "two", "Objectives", "." ]
python
train
dsoprea/PySchedules
pyschedules/examples/read.py
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L73-L84
def new_schedule(self, program, station, time, duration, new, stereo, subtitled, hdtv, closeCaptioned, ei, tvRating, dolby, partNumber, partTotal): """Callback run for each new schedule entry""" if self.__v_schedule: # [Schedule: EP012964250031, 70387, 2013-01-16 21:00:00.00, 30, False, True, False, False, True, False, TV-PG, None, None, None] print("[Schedule: %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, " "%s, %s]" % (program, station, time, duration, new, stereo, subtitled, hdtv, closeCaptioned, ei, tvRating, dolby, partNumber, partTotal))
[ "def", "new_schedule", "(", "self", ",", "program", ",", "station", ",", "time", ",", "duration", ",", "new", ",", "stereo", ",", "subtitled", ",", "hdtv", ",", "closeCaptioned", ",", "ei", ",", "tvRating", ",", "dolby", ",", "partNumber", ",", "partTota...
Callback run for each new schedule entry
[ "Callback", "run", "for", "each", "new", "schedule", "entry" ]
python
train
webrecorder/pywb
pywb/apps/frontendapp.py
https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/apps/frontendapp.py#L300-L328
def serve_cdx(self, environ, coll='$root'): """Make the upstream CDX query for a collection and response with the results of the query :param dict environ: The WSGI environment dictionary for the request :param str coll: The name of the collection this CDX query is for :return: The WbResponse containing the results of the CDX query :rtype: WbResponse """ base_url = self.rewriterapp.paths['cdx-server'] #if coll == self.all_coll: # coll = '*' cdx_url = base_url.format(coll=coll) if environ.get('QUERY_STRING'): cdx_url += '&' if '?' in cdx_url else '?' cdx_url += environ.get('QUERY_STRING') try: res = requests.get(cdx_url, stream=True) content_type = res.headers.get('Content-Type') return WbResponse.bin_stream(StreamIter(res.raw), content_type=content_type) except Exception as e: return WbResponse.text_response('Error: ' + str(e), status='400 Bad Request')
[ "def", "serve_cdx", "(", "self", ",", "environ", ",", "coll", "=", "'$root'", ")", ":", "base_url", "=", "self", ".", "rewriterapp", ".", "paths", "[", "'cdx-server'", "]", "#if coll == self.all_coll:", "# coll = '*'", "cdx_url", "=", "base_url", ".", "form...
Make the upstream CDX query for a collection and response with the results of the query :param dict environ: The WSGI environment dictionary for the request :param str coll: The name of the collection this CDX query is for :return: The WbResponse containing the results of the CDX query :rtype: WbResponse
[ "Make", "the", "upstream", "CDX", "query", "for", "a", "collection", "and", "response", "with", "the", "results", "of", "the", "query" ]
python
train
yyuu/botornado
boto/cloudfront/distribution.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/cloudfront/distribution.py#L617-L633
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None): """ Creates a custom policy string based on the supplied parameters. """ condition = {} if expires: condition["DateLessThan"] = {"AWS:EpochTime": expires} if valid_after: condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after} if ip_address: if '/' not in ip_address: ip_address += "/32" condition["IpAddress"] = {"AWS:SourceIp": ip_address} policy = {"Statement": [{ "Resource": resource, "Condition": condition}]} return json.dumps(policy, separators=(",", ":"))
[ "def", "_custom_policy", "(", "resource", ",", "expires", "=", "None", ",", "valid_after", "=", "None", ",", "ip_address", "=", "None", ")", ":", "condition", "=", "{", "}", "if", "expires", ":", "condition", "[", "\"DateLessThan\"", "]", "=", "{", "\"AW...
Creates a custom policy string based on the supplied parameters.
[ "Creates", "a", "custom", "policy", "string", "based", "on", "the", "supplied", "parameters", "." ]
python
train
Skype4Py/Skype4Py
Skype4Py/skype.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/skype.py#L477-L488
def ClearCallHistory(self, Username='ALL', Type=chsAllCalls): """Clears the call history. :Parameters: Username : str Skypename of the user. A special value of 'ALL' means that entries of all users should be removed. Type : `enums`.clt* Call type. """ cmd = 'CLEAR CALLHISTORY %s %s' % (str(Type), Username) self._DoCommand(cmd, cmd)
[ "def", "ClearCallHistory", "(", "self", ",", "Username", "=", "'ALL'", ",", "Type", "=", "chsAllCalls", ")", ":", "cmd", "=", "'CLEAR CALLHISTORY %s %s'", "%", "(", "str", "(", "Type", ")", ",", "Username", ")", "self", ".", "_DoCommand", "(", "cmd", ","...
Clears the call history. :Parameters: Username : str Skypename of the user. A special value of 'ALL' means that entries of all users should be removed. Type : `enums`.clt* Call type.
[ "Clears", "the", "call", "history", "." ]
python
train
debrouwere/google-analytics
googleanalytics/query.py
https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L1012-L1020
def next(self): """ Return a new query with a modified `start_index`. Mainly used internally to paginate through results. """ step = self.raw.get('max_results', 1000) start = self.raw.get('start_index', 1) + step self.raw['start_index'] = start return self
[ "def", "next", "(", "self", ")", ":", "step", "=", "self", ".", "raw", ".", "get", "(", "'max_results'", ",", "1000", ")", "start", "=", "self", ".", "raw", ".", "get", "(", "'start_index'", ",", "1", ")", "+", "step", "self", ".", "raw", "[", ...
Return a new query with a modified `start_index`. Mainly used internally to paginate through results.
[ "Return", "a", "new", "query", "with", "a", "modified", "start_index", ".", "Mainly", "used", "internally", "to", "paginate", "through", "results", "." ]
python
train
horazont/aioxmpp
aioxmpp/presence/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/presence/service.py#L344-L354
def make_stanza(self): """ Create and return a presence stanza with the current settings. :return: Presence stanza :rtype: :class:`aioxmpp.Presence` """ stanza = aioxmpp.Presence() self._state.apply_to_stanza(stanza) stanza.status.update(self._status) return stanza
[ "def", "make_stanza", "(", "self", ")", ":", "stanza", "=", "aioxmpp", ".", "Presence", "(", ")", "self", ".", "_state", ".", "apply_to_stanza", "(", "stanza", ")", "stanza", ".", "status", ".", "update", "(", "self", ".", "_status", ")", "return", "st...
Create and return a presence stanza with the current settings. :return: Presence stanza :rtype: :class:`aioxmpp.Presence`
[ "Create", "and", "return", "a", "presence", "stanza", "with", "the", "current", "settings", "." ]
python
train
NICTA/revrand
revrand/btypes.py
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/btypes.py#L13-L47
def check(self, value): """ Check a value falls within a bound. Parameters ---------- value : scalar or ndarray value to test Returns ------- bool: If all values fall within bounds Example ------- >>> bnd = Bound(1, 2) >>> bnd.check(1.5) True >>> bnd.check(3) False >>> bnd.check(np.ones(10)) True >>> bnd.check(np.array([1, 3, 1.5])) False """ if self.lower: if np.any(value < self.lower): return False if self.upper: if np.any(value > self.upper): return False return True
[ "def", "check", "(", "self", ",", "value", ")", ":", "if", "self", ".", "lower", ":", "if", "np", ".", "any", "(", "value", "<", "self", ".", "lower", ")", ":", "return", "False", "if", "self", ".", "upper", ":", "if", "np", ".", "any", "(", ...
Check a value falls within a bound. Parameters ---------- value : scalar or ndarray value to test Returns ------- bool: If all values fall within bounds Example ------- >>> bnd = Bound(1, 2) >>> bnd.check(1.5) True >>> bnd.check(3) False >>> bnd.check(np.ones(10)) True >>> bnd.check(np.array([1, 3, 1.5])) False
[ "Check", "a", "value", "falls", "within", "a", "bound", "." ]
python
train
Cairnarvon/uptime
src/__init__.py
https://github.com/Cairnarvon/uptime/blob/1ddfd06bb300c00e6dc4bd2a9ddf9bf1aa27b1bb/src/__init__.py#L325-L349
def uptime(): """Returns uptime in seconds if even remotely possible, or None if not.""" if __boottime is not None: return time.time() - __boottime return {'amiga': _uptime_amiga, 'aros12': _uptime_amiga, 'beos5': _uptime_beos, 'cygwin': _uptime_linux, 'darwin': _uptime_osx, 'haiku1': _uptime_beos, 'linux': _uptime_linux, 'linux-armv71': _uptime_linux, 'linux2': _uptime_linux, 'mac': _uptime_mac, 'minix3': _uptime_minix, 'riscos': _uptime_riscos, 'sunos5': _uptime_solaris, 'syllable': _uptime_syllable, 'win32': _uptime_windows, 'wince': _uptime_windows}.get(sys.platform, _uptime_bsd)() or \ _uptime_bsd() or _uptime_plan9() or _uptime_linux() or \ _uptime_windows() or _uptime_solaris() or _uptime_beos() or \ _uptime_amiga() or _uptime_riscos() or _uptime_posix() or \ _uptime_syllable() or _uptime_mac() or _uptime_osx()
[ "def", "uptime", "(", ")", ":", "if", "__boottime", "is", "not", "None", ":", "return", "time", ".", "time", "(", ")", "-", "__boottime", "return", "{", "'amiga'", ":", "_uptime_amiga", ",", "'aros12'", ":", "_uptime_amiga", ",", "'beos5'", ":", "_uptime...
Returns uptime in seconds if even remotely possible, or None if not.
[ "Returns", "uptime", "in", "seconds", "if", "even", "remotely", "possible", "or", "None", "if", "not", "." ]
python
valid
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3949-L3992
def split_v2(ary, indices_or_sections, axis=0, squeeze_axis=False): """Split an array into multiple sub-arrays. Parameters ---------- ary : NDArray Array to be divided into sub-arrays. indices_or_sections : int or tuple of ints If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along `axis` the array is split. For example, ``[2, 3]`` would, for ``axis=0``, result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along `axis`, an empty sub-array is returned correspondingly. axis : int, optional The axis along which to split, default is 0. squeeze_axis: boolean, optional Whether to squeeze the axis of sub-arrays or not, only useful when size of the sub-arrays are 1 on the `axis`. Default is False. Returns ------- NDArray A created array. """ indices = [] axis_size = ary.shape[axis] if isinstance(indices_or_sections, int): sections = indices_or_sections if axis_size % sections: raise ValueError('array split does not result in an equal division') section_size = int(axis_size / sections) indices = [i * section_size for i in range(sections)] elif isinstance(indices_or_sections, tuple): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must either int or tuple of ints') return _internal._split_v2(ary, indices, axis, squeeze_axis)
[ "def", "split_v2", "(", "ary", ",", "indices_or_sections", ",", "axis", "=", "0", ",", "squeeze_axis", "=", "False", ")", ":", "indices", "=", "[", "]", "axis_size", "=", "ary", ".", "shape", "[", "axis", "]", "if", "isinstance", "(", "indices_or_section...
Split an array into multiple sub-arrays. Parameters ---------- ary : NDArray Array to be divided into sub-arrays. indices_or_sections : int or tuple of ints If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along `axis` the array is split. For example, ``[2, 3]`` would, for ``axis=0``, result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along `axis`, an empty sub-array is returned correspondingly. axis : int, optional The axis along which to split, default is 0. squeeze_axis: boolean, optional Whether to squeeze the axis of sub-arrays or not, only useful when size of the sub-arrays are 1 on the `axis`. Default is False. Returns ------- NDArray A created array.
[ "Split", "an", "array", "into", "multiple", "sub", "-", "arrays", "." ]
python
train
twidi/django-extended-choices
extended_choices/choices.py
https://github.com/twidi/django-extended-choices/blob/bb310c5da4d53685c69173541172e4b813a6afb2/extended_choices/choices.py#L930-L972
def _convert_choices(self, choices): """Auto create display values then call super method""" final_choices = [] for choice in choices: if isinstance(choice, ChoiceEntry): final_choices.append(choice) continue original_choice = choice choice = list(choice) length = len(choice) assert 2 <= length <= 4, 'Invalid number of entries in %s' % (original_choice,) final_choice = [] # do we have attributes? if length > 2 and isinstance(choice[-1], Mapping): final_choice.append(choice.pop()) elif length == 4: attributes = choice.pop() assert attributes is None or isinstance(attributes, Mapping), 'Last argument must be a dict-like object in %s' % (original_choice,) if attributes: final_choice.append(attributes) # the constant final_choice.insert(0, choice.pop(0)) # the db value final_choice.insert(1, choice.pop(0)) if len(choice): # we were given a display value final_choice.insert(2, choice.pop(0)) else: # no display value, we compute it from the constant final_choice.insert(2, self.display_transform(final_choice[0])) final_choices.append(final_choice) return super(AutoDisplayChoices, self)._convert_choices(final_choices)
[ "def", "_convert_choices", "(", "self", ",", "choices", ")", ":", "final_choices", "=", "[", "]", "for", "choice", "in", "choices", ":", "if", "isinstance", "(", "choice", ",", "ChoiceEntry", ")", ":", "final_choices", ".", "append", "(", "choice", ")", ...
Auto create display values then call super method
[ "Auto", "create", "display", "values", "then", "call", "super", "method" ]
python
train
LabKey/labkey-api-python
labkey/security.py
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/security.py#L123-L141
def get_user_by_email(server_context, email): """ Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return: """ url = server_context.build_url(user_controller, 'getUsers.api') payload = dict(includeDeactivatedAccounts=True) result = server_context.make_request(url, payload) if result is None or result['users'] is None: raise ValueError("No Users in container" + email) for user in result['users']: if user['email'] == email: return user else: raise ValueError("User not found: " + email)
[ "def", "get_user_by_email", "(", "server_context", ",", "email", ")", ":", "url", "=", "server_context", ".", "build_url", "(", "user_controller", ",", "'getUsers.api'", ")", "payload", "=", "dict", "(", "includeDeactivatedAccounts", "=", "True", ")", "result", ...
Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return:
[ "Get", "the", "user", "with", "the", "provided", "email", ".", "Throws", "a", "ValueError", "if", "not", "found", ".", ":", "param", "server_context", ":", "A", "LabKey", "server", "context", ".", "See", "utils", ".", "create_server_context", ".", ":", "pa...
python
train
apache/incubator-mxnet
python/mxnet/operator.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L682-L688
def inc(self): """Get index for new entry.""" self.lock.acquire() cur = self.counter self.counter += 1 self.lock.release() return cur
[ "def", "inc", "(", "self", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "cur", "=", "self", ".", "counter", "self", ".", "counter", "+=", "1", "self", ".", "lock", ".", "release", "(", ")", "return", "cur" ]
Get index for new entry.
[ "Get", "index", "for", "new", "entry", "." ]
python
train
JasonKessler/scattertext
scattertext/TermDocMatrix.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrix.py#L108-L117
def get_term_freq_mat(self): ''' Returns ------- np.array with columns as categories and rows as terms ''' freq_mat = np.zeros(shape=(self.get_num_terms(), self.get_num_categories()), dtype=int) for cat_i in range(self.get_num_categories()): freq_mat[:, cat_i] = self._X[self._y == cat_i, :].sum(axis=0) return freq_mat
[ "def", "get_term_freq_mat", "(", "self", ")", ":", "freq_mat", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "get_num_terms", "(", ")", ",", "self", ".", "get_num_categories", "(", ")", ")", ",", "dtype", "=", "int", ")", "for", "cat_...
Returns ------- np.array with columns as categories and rows as terms
[ "Returns", "-------", "np", ".", "array", "with", "columns", "as", "categories", "and", "rows", "as", "terms" ]
python
train
casastorta/python-sar
sar/multiparser.py
https://github.com/casastorta/python-sar/blob/e6d8bb86524102d677f37e985302fad34e3297c1/sar/multiparser.py#L182-L209
def __get_part_date(self, part=''): ''' Retrieves date of the combo part from the file :param part: Part of the combo file (parsed out whole SAR file from the combo :type part: str. :return: string containing date in ISO format (YYY-MM-DD) ''' if (type(part) is not StringType): # We can cope with strings only return False firstline = part.split("\n")[0] info = firstline.split() datevalue = '' try: datevalue = info[3] except KeyError: datevalue = False except: traceback.print_exc() datevalue = False return(datevalue)
[ "def", "__get_part_date", "(", "self", ",", "part", "=", "''", ")", ":", "if", "(", "type", "(", "part", ")", "is", "not", "StringType", ")", ":", "# We can cope with strings only", "return", "False", "firstline", "=", "part", ".", "split", "(", "\"\\n\"",...
Retrieves date of the combo part from the file :param part: Part of the combo file (parsed out whole SAR file from the combo :type part: str. :return: string containing date in ISO format (YYY-MM-DD)
[ "Retrieves", "date", "of", "the", "combo", "part", "from", "the", "file", ":", "param", "part", ":", "Part", "of", "the", "combo", "file", "(", "parsed", "out", "whole", "SAR", "file", "from", "the", "combo", ":", "type", "part", ":", "str", ".", ":"...
python
train
DataDog/integrations-core
haproxy/datadog_checks/haproxy/haproxy.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/haproxy/datadog_checks/haproxy/haproxy.py#L601-L637
def _process_metrics( self, data, url, services_incl_filter=None, services_excl_filter=None, custom_tags=None, active_tag=None ): """ Data is a dictionary related to one host (one line) extracted from the csv. It should look like: {'pxname':'dogweb', 'svname':'i-4562165', 'scur':'42', ...} """ hostname = data['svname'] service_name = data['pxname'] back_or_front = data['back_or_front'] custom_tags = [] if custom_tags is None else custom_tags active_tag = [] if active_tag is None else active_tag tags = ["type:%s" % back_or_front, "instance_url:%s" % url, "service:%s" % service_name] tags.extend(custom_tags) tags.extend(active_tag) if self._is_service_excl_filtered(service_name, services_incl_filter, services_excl_filter): return if back_or_front == Services.BACKEND: tags.append('backend:%s' % hostname) if data.get('addr'): tags.append('server_address:{}'.format(data.get('addr'))) for key, value in data.items(): if HAProxy.METRICS.get(key): suffix = HAProxy.METRICS[key][1] name = "haproxy.%s.%s" % (back_or_front.lower(), suffix) try: if HAProxy.METRICS[key][0] == 'rate': self.rate(name, float(value), tags=tags) else: self.gauge(name, float(value), tags=tags) except ValueError: pass
[ "def", "_process_metrics", "(", "self", ",", "data", ",", "url", ",", "services_incl_filter", "=", "None", ",", "services_excl_filter", "=", "None", ",", "custom_tags", "=", "None", ",", "active_tag", "=", "None", ")", ":", "hostname", "=", "data", "[", "'...
Data is a dictionary related to one host (one line) extracted from the csv. It should look like: {'pxname':'dogweb', 'svname':'i-4562165', 'scur':'42', ...}
[ "Data", "is", "a", "dictionary", "related", "to", "one", "host", "(", "one", "line", ")", "extracted", "from", "the", "csv", ".", "It", "should", "look", "like", ":", "{", "pxname", ":", "dogweb", "svname", ":", "i", "-", "4562165", "scur", ":", "42"...
python
train
casebeer/audiogen
audiogen/sampler.py
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L97-L108
def buffer(stream, buffer_size=BUFFER_SIZE): ''' Buffer the generator into byte strings of buffer_size samples Return a generator that outputs reasonably sized byte strings containing buffer_size samples from the generator stream. This allows us to outputing big chunks of the audio stream to disk at once for faster writes. ''' i = iter(stream) return iter(lambda: "".join(itertools.islice(i, buffer_size)), "")
[ "def", "buffer", "(", "stream", ",", "buffer_size", "=", "BUFFER_SIZE", ")", ":", "i", "=", "iter", "(", "stream", ")", "return", "iter", "(", "lambda", ":", "\"\"", ".", "join", "(", "itertools", ".", "islice", "(", "i", ",", "buffer_size", ")", ")"...
Buffer the generator into byte strings of buffer_size samples Return a generator that outputs reasonably sized byte strings containing buffer_size samples from the generator stream. This allows us to outputing big chunks of the audio stream to disk at once for faster writes.
[ "Buffer", "the", "generator", "into", "byte", "strings", "of", "buffer_size", "samples" ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py#L103-L110
def _get_layer_converter_fn(layer): """Get the right converter function for Keras """ layer_type = type(layer) if layer_type in _KERAS_LAYER_REGISTRY: return _KERAS_LAYER_REGISTRY[layer_type] else: raise TypeError("Keras layer of type %s is not supported." % type(layer))
[ "def", "_get_layer_converter_fn", "(", "layer", ")", ":", "layer_type", "=", "type", "(", "layer", ")", "if", "layer_type", "in", "_KERAS_LAYER_REGISTRY", ":", "return", "_KERAS_LAYER_REGISTRY", "[", "layer_type", "]", "else", ":", "raise", "TypeError", "(", "\"...
Get the right converter function for Keras
[ "Get", "the", "right", "converter", "function", "for", "Keras" ]
python
train
fozzle/python-brotherprint
brotherprint/brotherprint.py
https://github.com/fozzle/python-brotherprint/blob/5fb92df11b599c30a7da3d6ac7ed60acff230044/brotherprint/brotherprint.py#L906-L924
def machine_op(self, operation): '''Perform machine operations Args: operations: which operation you would like Returns: None Raises: RuntimeError: Invalid operation ''' operations = {'feed2start': 1, 'feedone': 2, 'cut': 3 } if operation in operations: self.send('^'+'O'+'P'+chr(operations[operation])) else: raise RuntimeError('Invalid operation.')
[ "def", "machine_op", "(", "self", ",", "operation", ")", ":", "operations", "=", "{", "'feed2start'", ":", "1", ",", "'feedone'", ":", "2", ",", "'cut'", ":", "3", "}", "if", "operation", "in", "operations", ":", "self", ".", "send", "(", "'^'", "+",...
Perform machine operations Args: operations: which operation you would like Returns: None Raises: RuntimeError: Invalid operation
[ "Perform", "machine", "operations", "Args", ":", "operations", ":", "which", "operation", "you", "would", "like", "Returns", ":", "None", "Raises", ":", "RuntimeError", ":", "Invalid", "operation" ]
python
train
twisted/mantissa
xmantissa/liveform.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/liveform.py#L451-L468
def getInitialLiveForms(self): """ Make and return as many L{LiveForm} instances as are necessary to hold our default values. @return: some subforms. @rtype: C{list} of L{LiveForm} """ liveForms = [] if self._defaultStuff: for values in self._defaultStuff: liveForms.append(self._makeDefaultLiveForm(values)) else: # or only one, for the first new thing liveForms.append( self._makeALiveForm( self.parameters, self._newIdentifier(), False)) return liveForms
[ "def", "getInitialLiveForms", "(", "self", ")", ":", "liveForms", "=", "[", "]", "if", "self", ".", "_defaultStuff", ":", "for", "values", "in", "self", ".", "_defaultStuff", ":", "liveForms", ".", "append", "(", "self", ".", "_makeDefaultLiveForm", "(", "...
Make and return as many L{LiveForm} instances as are necessary to hold our default values. @return: some subforms. @rtype: C{list} of L{LiveForm}
[ "Make", "and", "return", "as", "many", "L", "{", "LiveForm", "}", "instances", "as", "are", "necessary", "to", "hold", "our", "default", "values", "." ]
python
train
MKLab-ITI/reveal-graph-embedding
reveal_graph_embedding/datautil/insight_datautil/insight_read_data.py
https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/datautil/insight_datautil/insight_read_data.py#L139-L174
def scipy_sparse_to_csv(filepath, matrix, separator=",", directed=False, numbering="matlab"): """ Writes sparse matrix in separated value format. """ matrix = spsp.coo_matrix(matrix) shape = matrix.shape nnz = matrix.getnnz() if numbering == "matlab": row = matrix.row + 1 col = matrix.col + 1 data = matrix.data elif numbering == "c": row = matrix.row col = matrix.col data = matrix.data else: print("Invalid numbering style.") raise RuntimeError with open(filepath, "w") as f: # Write metadata. file_row = "n_rows:" + separator + str(shape[0]) + separator +\ "n_cols:" + separator + str(shape[1]) + separator +\ "nnz:" + separator + str(nnz) + separator +\ "directed:" + separator + str(directed) +\ "\n" f.write(file_row) for edge in range(row.size): if directed is False: if col[edge] < row[edge]: continue file_row = str(row[edge]) + separator + str(col[edge]) + separator + str(data[edge]) + "\n" f.write(file_row)
[ "def", "scipy_sparse_to_csv", "(", "filepath", ",", "matrix", ",", "separator", "=", "\",\"", ",", "directed", "=", "False", ",", "numbering", "=", "\"matlab\"", ")", ":", "matrix", "=", "spsp", ".", "coo_matrix", "(", "matrix", ")", "shape", "=", "matrix"...
Writes sparse matrix in separated value format.
[ "Writes", "sparse", "matrix", "in", "separated", "value", "format", "." ]
python
train
release-engineering/productmd
productmd/common.py
https://github.com/release-engineering/productmd/blob/49256bf2e8c84124f42346241140b986ad7bfc38/productmd/common.py#L430-L462
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None): """ Create release_id from given parts. :param short: Release short name :type short: str :param version: Release version :type version: str :param version: Release type :type version: str :param bp_short: Base Product short name :type bp_short: str :param bp_version: Base Product version :type bp_version: str :param bp_type: Base Product type :rtype: str """ if not is_valid_release_short(short): raise ValueError("Release short name is not valid: %s" % short) if not is_valid_release_version(version): raise ValueError("Release short version is not valid: %s" % version) if not is_valid_release_type(type): raise ValueError("Release type is not valid: %s" % type) if type == "ga": result = "%s-%s" % (short, version) else: result = "%s-%s-%s" % (short, version, type) if bp_short: result += "@%s" % create_release_id(bp_short, bp_version, bp_type) return result
[ "def", "create_release_id", "(", "short", ",", "version", ",", "type", ",", "bp_short", "=", "None", ",", "bp_version", "=", "None", ",", "bp_type", "=", "None", ")", ":", "if", "not", "is_valid_release_short", "(", "short", ")", ":", "raise", "ValueError"...
Create release_id from given parts. :param short: Release short name :type short: str :param version: Release version :type version: str :param version: Release type :type version: str :param bp_short: Base Product short name :type bp_short: str :param bp_version: Base Product version :type bp_version: str :param bp_type: Base Product type :rtype: str
[ "Create", "release_id", "from", "given", "parts", "." ]
python
train
angr/pyvex
pyvex/lifting/lifter.py
https://github.com/angr/pyvex/blob/c418edc1146982b2a0579bf56e5993c1c7046b19/pyvex/lifting/lifter.py#L36-L78
def _lift(self, data, bytes_offset=None, max_bytes=None, max_inst=None, opt_level=1, traceflags=None, allow_arch_optimizations=None, strict_block_end=None, skip_stmts=False, collect_data_refs=False): """ Wrapper around the `lift` method on Lifters. Should not be overridden in child classes. :param data: The bytes to lift as either a python string of bytes or a cffi buffer object. :param bytes_offset: The offset into `data` to start lifting at. :param max_bytes: The maximum number of bytes to lift. If set to None, no byte limit is used. :param max_inst: The maximum number of instructions to lift. If set to None, no instruction limit is used. :param opt_level: The level of optimization to apply to the IR, 0-2. Most likely will be ignored in any lifter other then LibVEX. :param traceflags: The libVEX traceflags, controlling VEX debug prints. Most likely will be ignored in any lifter other than LibVEX. :param allow_arch_optimizations: Should the LibVEX lifter be allowed to perform lift-time preprocessing optimizations (e.g., lookback ITSTATE optimization on THUMB) Most likely will be ignored in any lifter other than LibVEX. :param strict_block_end: Should the LibVEX arm-thumb split block at some instructions, for example CB{N}Z. :param skip_stmts: Should the lifter skip transferring IRStmts from C to Python. :param collect_data_refs: Should the LibVEX lifter collect data references in C. """ irsb = IRSB.empty_block(self.arch, self.addr) self.data = data self.bytes_offset = bytes_offset self.opt_level = opt_level self.traceflags = traceflags self.allow_arch_optimizations = allow_arch_optimizations self.strict_block_end = strict_block_end self.collect_data_refs = collect_data_refs self.max_inst = max_inst self.max_bytes = max_bytes self.skip_stmts = skip_stmts self.irsb = irsb self.lift() return self.irsb
[ "def", "_lift", "(", "self", ",", "data", ",", "bytes_offset", "=", "None", ",", "max_bytes", "=", "None", ",", "max_inst", "=", "None", ",", "opt_level", "=", "1", ",", "traceflags", "=", "None", ",", "allow_arch_optimizations", "=", "None", ",", "stric...
Wrapper around the `lift` method on Lifters. Should not be overridden in child classes. :param data: The bytes to lift as either a python string of bytes or a cffi buffer object. :param bytes_offset: The offset into `data` to start lifting at. :param max_bytes: The maximum number of bytes to lift. If set to None, no byte limit is used. :param max_inst: The maximum number of instructions to lift. If set to None, no instruction limit is used. :param opt_level: The level of optimization to apply to the IR, 0-2. Most likely will be ignored in any lifter other then LibVEX. :param traceflags: The libVEX traceflags, controlling VEX debug prints. Most likely will be ignored in any lifter other than LibVEX. :param allow_arch_optimizations: Should the LibVEX lifter be allowed to perform lift-time preprocessing optimizations (e.g., lookback ITSTATE optimization on THUMB) Most likely will be ignored in any lifter other than LibVEX. :param strict_block_end: Should the LibVEX arm-thumb split block at some instructions, for example CB{N}Z. :param skip_stmts: Should the lifter skip transferring IRStmts from C to Python. :param collect_data_refs: Should the LibVEX lifter collect data references in C.
[ "Wrapper", "around", "the", "lift", "method", "on", "Lifters", ".", "Should", "not", "be", "overridden", "in", "child", "classes", "." ]
python
train
rabitt/pysox
sox/core.py
https://github.com/rabitt/pysox/blob/eae89bde74567136ec3f723c3e6b369916d9b837/sox/core.py#L129-L168
def play(args): '''Pass an argument list to play. Parameters ---------- args : iterable Argument list for play. The first item can, but does not need to, be 'play'. Returns: -------- status : bool True on success. ''' if args[0].lower() != "play": args.insert(0, "play") else: args[0] = "play" try: logger.info("Executing: %s", " ".join(args)) process_handle = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) status = process_handle.wait() if process_handle.stderr is not None: logger.info(process_handle.stderr) if status == 0: return True else: logger.info("Play returned with error code %s", status) return False except OSError as error_msg: logger.error("OSError: Play failed! %s", error_msg) except TypeError as error_msg: logger.error("TypeError: %s", error_msg) return False
[ "def", "play", "(", "args", ")", ":", "if", "args", "[", "0", "]", ".", "lower", "(", ")", "!=", "\"play\"", ":", "args", ".", "insert", "(", "0", ",", "\"play\"", ")", "else", ":", "args", "[", "0", "]", "=", "\"play\"", "try", ":", "logger", ...
Pass an argument list to play. Parameters ---------- args : iterable Argument list for play. The first item can, but does not need to, be 'play'. Returns: -------- status : bool True on success.
[ "Pass", "an", "argument", "list", "to", "play", "." ]
python
valid
kwikteam/phy
phy/utils/event.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/event.py#L68-L101
def connect(self, func=None, event=None, set_method=False): """Register a callback function to a given event. To register a callback function to the `spam` event, where `obj` is an instance of a class deriving from `EventEmitter`: ```python @obj.connect def on_spam(arg1, arg2): pass ``` This is called when `obj.emit('spam', arg1, arg2)` is called. Several callback functions can be registered for a given event. The registration order is conserved and may matter in applications. """ if func is None: return partial(self.connect, set_method=set_method) # Get the event name from the function. if event is None: event = self._get_on_name(func) # We register the callback function. self._callbacks[event].append(func) # A new method self.event() emitting the event is created. if set_method: self._create_emitter(event) return func
[ "def", "connect", "(", "self", ",", "func", "=", "None", ",", "event", "=", "None", ",", "set_method", "=", "False", ")", ":", "if", "func", "is", "None", ":", "return", "partial", "(", "self", ".", "connect", ",", "set_method", "=", "set_method", ")...
Register a callback function to a given event. To register a callback function to the `spam` event, where `obj` is an instance of a class deriving from `EventEmitter`: ```python @obj.connect def on_spam(arg1, arg2): pass ``` This is called when `obj.emit('spam', arg1, arg2)` is called. Several callback functions can be registered for a given event. The registration order is conserved and may matter in applications.
[ "Register", "a", "callback", "function", "to", "a", "given", "event", "." ]
python
train