repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/processing_block.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/api/processing_block.py#L45-L61
def delete(block_id): """Processing block detail resource.""" _url = get_root_url() try: DB.delete_processing_block(block_id) response = dict(message='Deleted block', id='{}'.format(block_id), links=dict(list='{}/processing-blocks'.format(_url), home='{}'.format(_url))) return response, HTTPStatus.OK except RuntimeError as error: response = dict(error='Failed to delete Processing Block: {}'. format(block_id), reason=str(error), links=dict(list='{}/processing-blocks'.format(_url), home='{}'.format(_url))) return response, HTTPStatus.OK
[ "def", "delete", "(", "block_id", ")", ":", "_url", "=", "get_root_url", "(", ")", "try", ":", "DB", ".", "delete_processing_block", "(", "block_id", ")", "response", "=", "dict", "(", "message", "=", "'Deleted block'", ",", "id", "=", "'{}'", ".", "format", "(", "block_id", ")", ",", "links", "=", "dict", "(", "list", "=", "'{}/processing-blocks'", ".", "format", "(", "_url", ")", ",", "home", "=", "'{}'", ".", "format", "(", "_url", ")", ")", ")", "return", "response", ",", "HTTPStatus", ".", "OK", "except", "RuntimeError", "as", "error", ":", "response", "=", "dict", "(", "error", "=", "'Failed to delete Processing Block: {}'", ".", "format", "(", "block_id", ")", ",", "reason", "=", "str", "(", "error", ")", ",", "links", "=", "dict", "(", "list", "=", "'{}/processing-blocks'", ".", "format", "(", "_url", ")", ",", "home", "=", "'{}'", ".", "format", "(", "_url", ")", ")", ")", "return", "response", ",", "HTTPStatus", ".", "OK" ]
Processing block detail resource.
[ "Processing", "block", "detail", "resource", "." ]
python
train
45.294118
SpockBotMC/SpockBot
spockbot/plugins/helpers/inventory.py
https://github.com/SpockBotMC/SpockBot/blob/f89911551f18357720034fbaa52837a0d09f66ea/spockbot/plugins/helpers/inventory.py#L24-L35
def total_stored(self, wanted, slots=None): """ Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) """ if slots is None: slots = self.window.slots wanted = make_slot_check(wanted) return sum(slot.amount for slot in slots if wanted(slot))
[ "def", "total_stored", "(", "self", ",", "wanted", ",", "slots", "=", "None", ")", ":", "if", "slots", "is", "None", ":", "slots", "=", "self", ".", "window", ".", "slots", "wanted", "=", "make_slot_check", "(", "wanted", ")", "return", "sum", "(", "slot", ".", "amount", "for", "slot", "in", "slots", "if", "wanted", "(", "slot", ")", ")" ]
Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata)
[ "Calculates", "the", "total", "number", "of", "items", "of", "that", "type", "in", "the", "current", "window", "or", "given", "slot", "range", "." ]
python
train
35.5
DLR-RM/RAFCON
source/rafcon/gui/controllers/utils/tree_view_controller.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L614-L623
def select_entry(self, core_element_id, by_cursor=True): """Selects the row entry belonging to the given core_element_id by cursor or tree selection""" for row_num, element_row in enumerate(self.list_store): # Compare data port ids if element_row[self.ID_STORAGE_ID] == core_element_id: if by_cursor: self.tree_view.set_cursor(row_num) else: self.tree_view.get_selection().select_path((row_num, )) break
[ "def", "select_entry", "(", "self", ",", "core_element_id", ",", "by_cursor", "=", "True", ")", ":", "for", "row_num", ",", "element_row", "in", "enumerate", "(", "self", ".", "list_store", ")", ":", "# Compare data port ids", "if", "element_row", "[", "self", ".", "ID_STORAGE_ID", "]", "==", "core_element_id", ":", "if", "by_cursor", ":", "self", ".", "tree_view", ".", "set_cursor", "(", "row_num", ")", "else", ":", "self", ".", "tree_view", ".", "get_selection", "(", ")", ".", "select_path", "(", "(", "row_num", ",", ")", ")", "break" ]
Selects the row entry belonging to the given core_element_id by cursor or tree selection
[ "Selects", "the", "row", "entry", "belonging", "to", "the", "given", "core_element_id", "by", "cursor", "or", "tree", "selection" ]
python
train
52.2
mitsei/dlkit
dlkit/handcar/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L2576-L2599
def get_activities_for_objective(self, objective_id=None): """Gets the activities for the given objective. In plenary mode, the returned list contains all of the activities mapped to the objective Id or an error results if an Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveId (osid.id.Id): Id of the Objective return: (osid.learning.ActivityList) - list of enrollments raise: NotFound - objectiveId not found raise: NullArgument - objectiveId is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented. """ if objective_id is None: raise NullArgument() # Should also check if objective_id exists? url_path = construct_url('activities', bank_id=self._catalog_idstr, obj_id=objective_id) return objects.ActivityList(self._get_request(url_path))
[ "def", "get_activities_for_objective", "(", "self", ",", "objective_id", "=", "None", ")", ":", "if", "objective_id", "is", "None", ":", "raise", "NullArgument", "(", ")", "# Should also check if objective_id exists?", "url_path", "=", "construct_url", "(", "'activities'", ",", "bank_id", "=", "self", ".", "_catalog_idstr", ",", "obj_id", "=", "objective_id", ")", "return", "objects", ".", "ActivityList", "(", "self", ".", "_get_request", "(", "url_path", ")", ")" ]
Gets the activities for the given objective. In plenary mode, the returned list contains all of the activities mapped to the objective Id or an error results if an Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveId (osid.id.Id): Id of the Objective return: (osid.learning.ActivityList) - list of enrollments raise: NotFound - objectiveId not found raise: NullArgument - objectiveId is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented.
[ "Gets", "the", "activities", "for", "the", "given", "objective", ".", "In", "plenary", "mode", "the", "returned", "list", "contains", "all", "of", "the", "activities", "mapped", "to", "the", "objective", "Id", "or", "an", "error", "results", "if", "an", "Id", "in", "the", "supplied", "list", "is", "not", "found", "or", "inaccessible", ".", "Otherwise", "inaccessible", "Activities", "may", "be", "omitted", "from", "the", "list", "and", "may", "present", "the", "elements", "in", "any", "order", "including", "returning", "a", "unique", "set", ".", "arg", ":", "objectiveId", "(", "osid", ".", "id", ".", "Id", ")", ":", "Id", "of", "the", "Objective", "return", ":", "(", "osid", ".", "learning", ".", "ActivityList", ")", "-", "list", "of", "enrollments", "raise", ":", "NotFound", "-", "objectiveId", "not", "found", "raise", ":", "NullArgument", "-", "objectiveId", "is", "null", "raise", ":", "OperationFailed", "-", "unable", "to", "complete", "request", "raise", ":", "PermissionDenied", "-", "authorization", "failure", "compliance", ":", "mandatory", "-", "This", "method", "is", "must", "be", "implemented", "." ]
python
train
51.083333
StevenMaude/bbc-radio-tracklisting-downloader
bbc_tracklist.py
https://github.com/StevenMaude/bbc-radio-tracklisting-downloader/blob/9fe9096b4d889888f65756444e4fd71352b92458/bbc_tracklist.py#L72-L79
def get_broadcast_date(pid): """Take BBC pid (string); extract and return broadcast date as string.""" print("Extracting first broadcast date...") broadcast_etree = open_listing_page(pid + '/broadcasts.inc') original_broadcast_date, = broadcast_etree.xpath( '(//div[@class="grid__inner"]//div' '[@class="broadcast-event__time beta"]/@title)[1]') return original_broadcast_date
[ "def", "get_broadcast_date", "(", "pid", ")", ":", "print", "(", "\"Extracting first broadcast date...\"", ")", "broadcast_etree", "=", "open_listing_page", "(", "pid", "+", "'/broadcasts.inc'", ")", "original_broadcast_date", ",", "=", "broadcast_etree", ".", "xpath", "(", "'(//div[@class=\"grid__inner\"]//div'", "'[@class=\"broadcast-event__time beta\"]/@title)[1]'", ")", "return", "original_broadcast_date" ]
Take BBC pid (string); extract and return broadcast date as string.
[ "Take", "BBC", "pid", "(", "string", ")", ";", "extract", "and", "return", "broadcast", "date", "as", "string", "." ]
python
train
50.625
SoCo/SoCo
soco/core.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L629-L643
def seek(self, timestamp): """Seek to a given timestamp in the current track, specified in the format of HH:MM:SS or H:MM:SS. Raises: ValueError: if the given timestamp is invalid. """ if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp): raise ValueError('invalid timestamp, use HH:MM:SS format') self.avTransport.Seek([ ('InstanceID', 0), ('Unit', 'REL_TIME'), ('Target', timestamp) ])
[ "def", "seek", "(", "self", ",", "timestamp", ")", ":", "if", "not", "re", ".", "match", "(", "r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$'", ",", "timestamp", ")", ":", "raise", "ValueError", "(", "'invalid timestamp, use HH:MM:SS format'", ")", "self", ".", "avTransport", ".", "Seek", "(", "[", "(", "'InstanceID'", ",", "0", ")", ",", "(", "'Unit'", ",", "'REL_TIME'", ")", ",", "(", "'Target'", ",", "timestamp", ")", "]", ")" ]
Seek to a given timestamp in the current track, specified in the format of HH:MM:SS or H:MM:SS. Raises: ValueError: if the given timestamp is invalid.
[ "Seek", "to", "a", "given", "timestamp", "in", "the", "current", "track", "specified", "in", "the", "format", "of", "HH", ":", "MM", ":", "SS", "or", "H", ":", "MM", ":", "SS", "." ]
python
train
33.666667
wdecoster/nanoget
nanoget/extraction_functions.py
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L309-L322
def stream_fastq_full(fastq, threads): """Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght """ logging.info("Nanoget: Starting to collect full metrics from plain fastq file.") inputfastq = handle_compressed_input(fastq) with cfutures.ProcessPoolExecutor(max_workers=threads) as executor: for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")): yield results logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
[ "def", "stream_fastq_full", "(", "fastq", ",", "threads", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect full metrics from plain fastq file.\"", ")", "inputfastq", "=", "handle_compressed_input", "(", "fastq", ")", "with", "cfutures", ".", "ProcessPoolExecutor", "(", "max_workers", "=", "threads", ")", "as", "executor", ":", "for", "results", "in", "executor", ".", "map", "(", "extract_all_from_fastq", ",", "SeqIO", ".", "parse", "(", "inputfastq", ",", "\"fastq\"", ")", ")", ":", "yield", "results", "logging", ".", "info", "(", "\"Nanoget: Finished collecting statistics from plain fastq file.\"", ")" ]
Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght
[ "Generator", "for", "returning", "metrics", "extracted", "from", "fastq", "." ]
python
train
42.714286
squdle/baseconvert
baseconvert/baseconvert.py
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L571-L592
def expand_recurring(number, repeat=5): """ Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9) """ if "[" in number: pattern_index = number.index("[") pattern = number[pattern_index + 1:-1] number = number[:pattern_index] number = number + pattern * (repeat + 1) return number
[ "def", "expand_recurring", "(", "number", ",", "repeat", "=", "5", ")", ":", "if", "\"[\"", "in", "number", ":", "pattern_index", "=", "number", ".", "index", "(", "\"[\"", ")", "pattern", "=", "number", "[", "pattern_index", "+", "1", ":", "-", "1", "]", "number", "=", "number", "[", ":", "pattern_index", "]", "number", "=", "number", "+", "pattern", "*", "(", "repeat", "+", "1", ")", "return", "number" ]
Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9)
[ "Expands", "a", "recurring", "pattern", "within", "a", "number", ".", "Args", ":", "number", "(", "tuple", ")", ":", "the", "number", "to", "process", "in", "the", "form", ":", "(", "int", "int", "int", "...", ".", "...", "int", "int", "int", ")", "repeat", ":", "the", "number", "of", "times", "to", "expand", "the", "pattern", ".", "Returns", ":", "The", "original", "number", "with", "recurring", "pattern", "expanded", ".", "Example", ":", ">>>", "expand_recurring", "((", "1", ".", "0", "[", "9", "]", ")", "repeat", "=", "3", ")", "(", "1", ".", "0", "9", "9", "9", "9", ")" ]
python
train
31.454545
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L207-L212
def is_revision_chain_placeholder(pid): """For replicas, the PIDs referenced in revision chains are reserved for use by other replicas.""" return d1_gmn.app.models.ReplicaRevisionChainReference.objects.filter( pid__did=pid ).exists()
[ "def", "is_revision_chain_placeholder", "(", "pid", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "ReplicaRevisionChainReference", ".", "objects", ".", "filter", "(", "pid__did", "=", "pid", ")", ".", "exists", "(", ")" ]
For replicas, the PIDs referenced in revision chains are reserved for use by other replicas.
[ "For", "replicas", "the", "PIDs", "referenced", "in", "revision", "chains", "are", "reserved", "for", "use", "by", "other", "replicas", "." ]
python
train
42
HewlettPackard/python-hpOneView
hpOneView/resources/networking/interconnects.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/networking/interconnects.py#L203-L220
def update_ports(self, ports, id_or_uri, timeout=-1): """ Updates the interconnect ports. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. ports (list): Ports to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: The interconnect. """ resources = merge_default_values(ports, {'type': 'port'}) uri = self._client.build_uri(id_or_uri) + "/update-ports" return self._client.update(resources, uri, timeout)
[ "def", "update_ports", "(", "self", ",", "ports", ",", "id_or_uri", ",", "timeout", "=", "-", "1", ")", ":", "resources", "=", "merge_default_values", "(", "ports", ",", "{", "'type'", ":", "'port'", "}", ")", "uri", "=", "self", ".", "_client", ".", "build_uri", "(", "id_or_uri", ")", "+", "\"/update-ports\"", "return", "self", ".", "_client", ".", "update", "(", "resources", ",", "uri", ",", "timeout", ")" ]
Updates the interconnect ports. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. ports (list): Ports to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: The interconnect.
[ "Updates", "the", "interconnect", "ports", "." ]
python
train
37.611111
silverlogic/djangorestframework-expander
expander/parse_qs.py
https://github.com/silverlogic/djangorestframework-expander/blob/b1cf60c7076169cbd6ad65350841c86080564f97/expander/parse_qs.py#L29-L41
def qs_from_dict(qsdict, prefix=""): ''' Same as dict_from_qs, but in reverse i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr" ''' prefix = prefix + '.' if prefix else "" def descend(qsd): for key, val in sorted(qsd.items()): if val: yield qs_from_dict(val, prefix + key) else: yield prefix + key return ",".join(descend(qsdict))
[ "def", "qs_from_dict", "(", "qsdict", ",", "prefix", "=", "\"\"", ")", ":", "prefix", "=", "prefix", "+", "'.'", "if", "prefix", "else", "\"\"", "def", "descend", "(", "qsd", ")", ":", "for", "key", ",", "val", "in", "sorted", "(", "qsd", ".", "items", "(", ")", ")", ":", "if", "val", ":", "yield", "qs_from_dict", "(", "val", ",", "prefix", "+", "key", ")", "else", ":", "yield", "prefix", "+", "key", "return", "\",\"", ".", "join", "(", "descend", "(", "qsdict", ")", ")" ]
Same as dict_from_qs, but in reverse i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr"
[ "Same", "as", "dict_from_qs", "but", "in", "reverse", "i", ".", "e", ".", "{", "period", ":", "{", "di", ":", "{}", "fhr", ":", "{}", "}}", "=", ">", "period", ".", "di", "period", ".", "fhr" ]
python
train
32.769231
CamDavidsonPilon/lifetimes
lifetimes/plotting.py
https://github.com/CamDavidsonPilon/lifetimes/blob/f926308bc03c17c1d12fead729de43885cf13321/lifetimes/plotting.py#L570-L618
def plot_transaction_rate_heterogeneity( model, suptitle="Heterogeneity in Transaction Rate", xlabel="Transaction Rate", ylabel="Density", suptitle_fontsize=14, **kwargs ): """ Plot the estimated gamma distribution of lambda (customers' propensities to purchase). Parameters ---------- model: lifetimes model A fitted lifetimes model, for now only for BG/NBD suptitle: str, optional Figure suptitle xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot """ from matplotlib import pyplot as plt r, alpha = model._unload_params("r", "alpha") rate_mean = r / alpha rate_var = r / alpha ** 2 rv = stats.gamma(r, scale=1 / alpha) lim = rv.ppf(0.99) x = np.linspace(0, lim, 100) fig, ax = plt.subplots(1) fig.suptitle("Heterogeneity in Transaction Rate", fontsize=suptitle_fontsize, fontweight="bold") ax.set_title("mean: {:.3f}, var: {:.3f}".format(rate_mean, rate_var)) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.plot(x, rv.pdf(x), **kwargs) return ax
[ "def", "plot_transaction_rate_heterogeneity", "(", "model", ",", "suptitle", "=", "\"Heterogeneity in Transaction Rate\"", ",", "xlabel", "=", "\"Transaction Rate\"", ",", "ylabel", "=", "\"Density\"", ",", "suptitle_fontsize", "=", "14", ",", "*", "*", "kwargs", ")", ":", "from", "matplotlib", "import", "pyplot", "as", "plt", "r", ",", "alpha", "=", "model", ".", "_unload_params", "(", "\"r\"", ",", "\"alpha\"", ")", "rate_mean", "=", "r", "/", "alpha", "rate_var", "=", "r", "/", "alpha", "**", "2", "rv", "=", "stats", ".", "gamma", "(", "r", ",", "scale", "=", "1", "/", "alpha", ")", "lim", "=", "rv", ".", "ppf", "(", "0.99", ")", "x", "=", "np", ".", "linspace", "(", "0", ",", "lim", ",", "100", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ")", "fig", ".", "suptitle", "(", "\"Heterogeneity in Transaction Rate\"", ",", "fontsize", "=", "suptitle_fontsize", ",", "fontweight", "=", "\"bold\"", ")", "ax", ".", "set_title", "(", "\"mean: {:.3f}, var: {:.3f}\"", ".", "format", "(", "rate_mean", ",", "rate_var", ")", ")", "ax", ".", "set_xlabel", "(", "xlabel", ")", "ax", ".", "set_ylabel", "(", "ylabel", ")", "fig", ".", "tight_layout", "(", "rect", "=", "[", "0", ",", "0.03", ",", "1", ",", "0.95", "]", ")", "plt", ".", "plot", "(", "x", ",", "rv", ".", "pdf", "(", "x", ")", ",", "*", "*", "kwargs", ")", "return", "ax" ]
Plot the estimated gamma distribution of lambda (customers' propensities to purchase). Parameters ---------- model: lifetimes model A fitted lifetimes model, for now only for BG/NBD suptitle: str, optional Figure suptitle xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot
[ "Plot", "the", "estimated", "gamma", "distribution", "of", "lambda", "(", "customers", "propensities", "to", "purchase", ")", "." ]
python
train
25.469388
RockFeng0/rtsf-web
webuidriver/actions.py
https://github.com/RockFeng0/rtsf-web/blob/ceabcf62ddf1c969a97b5c7a4a4c547198b6ea71/webuidriver/actions.py#L637-L643
def Ctrl(cls, key): """ 在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X' """ element = cls._element() element.send_keys(Keys.CONTROL, key)
[ "def", "Ctrl", "(", "cls", ",", "key", ")", ":", "element", "=", "cls", ".", "_element", "(", ")", "element", ".", "send_keys", "(", "Keys", ".", "CONTROL", ",", "key", ")" ]
在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X'
[ "在指定元素上执行ctrl组合键事件" ]
python
train
31.142857
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L680-L688
def connect(self): '''Connect to S3 storage''' try: if S3Handler.S3_KEYS: self.s3 = BotoClient(self.opt, S3Handler.S3_KEYS[0], S3Handler.S3_KEYS[1]) else: self.s3 = BotoClient(self.opt) except Exception as e: raise RetryFailure('Unable to connect to s3: %s' % e)
[ "def", "connect", "(", "self", ")", ":", "try", ":", "if", "S3Handler", ".", "S3_KEYS", ":", "self", ".", "s3", "=", "BotoClient", "(", "self", ".", "opt", ",", "S3Handler", ".", "S3_KEYS", "[", "0", "]", ",", "S3Handler", ".", "S3_KEYS", "[", "1", "]", ")", "else", ":", "self", ".", "s3", "=", "BotoClient", "(", "self", ".", "opt", ")", "except", "Exception", "as", "e", ":", "raise", "RetryFailure", "(", "'Unable to connect to s3: %s'", "%", "e", ")" ]
Connect to S3 storage
[ "Connect", "to", "S3", "storage" ]
python
test
33.333333
materialsproject/pymatgen
pymatgen/phonon/dos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/phonon/dos.py#L216-L250
def internal_energy(self, t, structure=None): """ Phonon contribution to the internal energy at temperature T obtained from the integration of the DOS. Only positive frequencies will be used. Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number of Avogadro times the atoms in a unit cell. To compare with experimental data the result should be divided by the number of unit formulas in the cell. If the structure is provided the division is performed internally and the result is in J/mol Args: t: a temperature in K structure: the structure of the system. If not None it will be used to determine the numer of formula units Returns: Phonon contribution to the internal energy """ if t==0: return self.zero_point_energy(structure=structure) freqs = self._positive_frequencies dens = self._positive_densities coth = lambda x: 1.0 / np.tanh(x) wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t) e = np.trapz(freqs * coth(wd2kt) * dens, x=freqs) / 2 e *= THZ_TO_J * const.Avogadro if structure: formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms e /= formula_units return e
[ "def", "internal_energy", "(", "self", ",", "t", ",", "structure", "=", "None", ")", ":", "if", "t", "==", "0", ":", "return", "self", ".", "zero_point_energy", "(", "structure", "=", "structure", ")", "freqs", "=", "self", ".", "_positive_frequencies", "dens", "=", "self", ".", "_positive_densities", "coth", "=", "lambda", "x", ":", "1.0", "/", "np", ".", "tanh", "(", "x", ")", "wd2kt", "=", "freqs", "/", "(", "2", "*", "BOLTZ_THZ_PER_K", "*", "t", ")", "e", "=", "np", ".", "trapz", "(", "freqs", "*", "coth", "(", "wd2kt", ")", "*", "dens", ",", "x", "=", "freqs", ")", "/", "2", "e", "*=", "THZ_TO_J", "*", "const", ".", "Avogadro", "if", "structure", ":", "formula_units", "=", "structure", ".", "composition", ".", "num_atoms", "/", "structure", ".", "composition", ".", "reduced_composition", ".", "num_atoms", "e", "/=", "formula_units", "return", "e" ]
Phonon contribution to the internal energy at temperature T obtained from the integration of the DOS. Only positive frequencies will be used. Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number of Avogadro times the atoms in a unit cell. To compare with experimental data the result should be divided by the number of unit formulas in the cell. If the structure is provided the division is performed internally and the result is in J/mol Args: t: a temperature in K structure: the structure of the system. If not None it will be used to determine the numer of formula units Returns: Phonon contribution to the internal energy
[ "Phonon", "contribution", "to", "the", "internal", "energy", "at", "temperature", "T", "obtained", "from", "the", "integration", "of", "the", "DOS", ".", "Only", "positive", "frequencies", "will", "be", "used", ".", "Result", "in", "J", "/", "mol", "-", "c", ".", "A", "mol", "-", "c", "is", "the", "abbreviation", "of", "a", "mole", "-", "cell", "that", "is", "the", "number", "of", "Avogadro", "times", "the", "atoms", "in", "a", "unit", "cell", ".", "To", "compare", "with", "experimental", "data", "the", "result", "should", "be", "divided", "by", "the", "number", "of", "unit", "formulas", "in", "the", "cell", ".", "If", "the", "structure", "is", "provided", "the", "division", "is", "performed", "internally", "and", "the", "result", "is", "in", "J", "/", "mol" ]
python
train
38.742857
saltstack/salt
salt/utils/kickstart.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L560-L601
def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args
[ "def", "parse_raid", "(", "rule", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "rules", "=", "shlex", ".", "split", "(", "rule", ")", "rules", ".", "pop", "(", "0", ")", "partitions", "=", "[", "]", "newrules", "=", "[", "]", "for", "count", "in", "range", "(", "0", ",", "len", "(", "rules", ")", ")", ":", "if", "count", "==", "0", ":", "newrules", ".", "append", "(", "rules", "[", "count", "]", ")", "continue", "elif", "rules", "[", "count", "]", ".", "startswith", "(", "'--'", ")", ":", "newrules", ".", "append", "(", "rules", "[", "count", "]", ")", "continue", "else", ":", "partitions", ".", "append", "(", "rules", "[", "count", "]", ")", "rules", "=", "newrules", "parser", ".", "add_argument", "(", "'mntpoint'", ")", "parser", ".", "add_argument", "(", "'--level'", ",", "dest", "=", "'level'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--device'", ",", "dest", "=", "'device'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--spares'", ",", "dest", "=", "'spares'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--fstype'", ",", "dest", "=", "'fstype'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--fsoptions'", ",", "dest", "=", "'fsoptions'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--label'", ",", "dest", "=", "'label'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--noformat'", ",", "dest", "=", "'noformat'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--useexisting'", ",", "dest", "=", "'useexisting'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--encrypted'", ",", "dest", "=", "'encrypted'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--passphrase'", ",", "dest", "=", "'passphrase'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--escrowcert'", ",", "dest", "=", "'escrowcert'", ",", "action", "=", "'store'", ")", "parser", ".", "add_argument", "(", "'--backuppassphrase'", ",", "dest", "=", "'backuppassphrase'", ",", "action", "=", "'store'", ")", "args", "=", "clean_args", "(", "vars", "(", "parser", ".", "parse_args", "(", "rules", ")", ")", ")", "if", "partitions", ":", "args", "[", "'partitions'", "]", "=", "partitions", "parser", "=", "None", "return", "args" ]
Parse the raid line
[ "Parse", "the", "raid", "line" ]
python
train
37.285714
DomainTools/python_api
domaintools/api.py
https://github.com/DomainTools/python_api/blob/17be85fd4913fbe14d7660a4f4829242f1663e60/domaintools/api.py#L163-L168
def reverse_whois(self, query, exclude=[], scope='current', mode=None, **kwargs): """List of one or more terms to search for in the Whois record, as a Python list or separated with the pipe character ( | ). """ return self._results('reverse-whois', '/v1/reverse-whois', terms=delimited(query), exclude=delimited(exclude), scope=scope, mode=mode, **kwargs)
[ "def", "reverse_whois", "(", "self", ",", "query", ",", "exclude", "=", "[", "]", ",", "scope", "=", "'current'", ",", "mode", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_results", "(", "'reverse-whois'", ",", "'/v1/reverse-whois'", ",", "terms", "=", "delimited", "(", "query", ")", ",", "exclude", "=", "delimited", "(", "exclude", ")", ",", "scope", "=", "scope", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")" ]
List of one or more terms to search for in the Whois record, as a Python list or separated with the pipe character ( | ).
[ "List", "of", "one", "or", "more", "terms", "to", "search", "for", "in", "the", "Whois", "record", "as", "a", "Python", "list", "or", "separated", "with", "the", "pipe", "character", "(", "|", ")", "." ]
python
train
68.833333
codelv/enaml-native
src/enamlnative/core/dev.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/dev.py#L491-L514
def render_files(self, root=None): """ Render the file path as accordions """ if root is None: tmp = os.environ.get('TMP') root = sys.path[1 if tmp and tmp in sys.path else 0] items = [] for filename in os.listdir(root): # for subdirname in dirnames: # path = os.path.join(dirname, subdirname) # items.append(FOLDER_TMPL.format( # name=subdirname, # id=path, # items=self.render_files(path) # )) #for filename in filenames: f,ext = os.path.splitext(filename) if ext in ['.py', '.enaml']: items.append(FILE_TMPL.format( name=filename, id=filename )) return "".join(items)
[ "def", "render_files", "(", "self", ",", "root", "=", "None", ")", ":", "if", "root", "is", "None", ":", "tmp", "=", "os", ".", "environ", ".", "get", "(", "'TMP'", ")", "root", "=", "sys", ".", "path", "[", "1", "if", "tmp", "and", "tmp", "in", "sys", ".", "path", "else", "0", "]", "items", "=", "[", "]", "for", "filename", "in", "os", ".", "listdir", "(", "root", ")", ":", "# for subdirname in dirnames:", "# path = os.path.join(dirname, subdirname)", "# items.append(FOLDER_TMPL.format(", "# name=subdirname,", "# id=path,", "# items=self.render_files(path)", "# ))", "#for filename in filenames:", "f", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "ext", "in", "[", "'.py'", ",", "'.enaml'", "]", ":", "items", ".", "append", "(", "FILE_TMPL", ".", "format", "(", "name", "=", "filename", ",", "id", "=", "filename", ")", ")", "return", "\"\"", ".", "join", "(", "items", ")" ]
Render the file path as accordions
[ "Render", "the", "file", "path", "as", "accordions" ]
python
train
35.375
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L915-L921
def org_update(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /org-xxxx/update API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2Fupdate """ return DXHTTPRequest('/%s/update' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "org_update", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/update'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /org-xxxx/update API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2Fupdate
[ "Invokes", "the", "/", "org", "-", "xxxx", "/", "update", "API", "method", "." ]
python
train
50.571429
ThreatResponse/margaritashotgun
margaritashotgun/remote_shell.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/remote_shell.py#L200-L218
def upload_file(self, local_path, remote_path): """ Upload a file from the local filesystem to the remote host :type local_path: str :param local_path: path of local file to upload :type remote_path: str :param remote_path: destination path of upload on remote host """ logger.debug("{0}: uploading {1} to {0}:{2}".format(self.target_address, local_path, remote_path)) try: sftp = paramiko.SFTPClient.from_transport(self.transport()) sftp.put(local_path, remote_path) sftp.close() except SSHException as ex: logger.warn(("{0}: LiME module upload failed with exception:" "{1}".format(self.target_address, ex)))
[ "def", "upload_file", "(", "self", ",", "local_path", ",", "remote_path", ")", ":", "logger", ".", "debug", "(", "\"{0}: uploading {1} to {0}:{2}\"", ".", "format", "(", "self", ".", "target_address", ",", "local_path", ",", "remote_path", ")", ")", "try", ":", "sftp", "=", "paramiko", ".", "SFTPClient", ".", "from_transport", "(", "self", ".", "transport", "(", ")", ")", "sftp", ".", "put", "(", "local_path", ",", "remote_path", ")", "sftp", ".", "close", "(", ")", "except", "SSHException", "as", "ex", ":", "logger", ".", "warn", "(", "(", "\"{0}: LiME module upload failed with exception:\"", "\"{1}\"", ".", "format", "(", "self", ".", "target_address", ",", "ex", ")", ")", ")" ]
Upload a file from the local filesystem to the remote host :type local_path: str :param local_path: path of local file to upload :type remote_path: str :param remote_path: destination path of upload on remote host
[ "Upload", "a", "file", "from", "the", "local", "filesystem", "to", "the", "remote", "host" ]
python
train
45.526316
ronaldguillen/wave
wave/compat.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/compat.py#L251-L261
def get_all_related_many_to_many_objects(opts): """ Django 1.8 changed meta api, see docstr in compat.get_all_related_objects() :param opts: Options instance :return: list of many-to-many relations """ if django.VERSION < (1, 9): return opts.get_all_related_many_to_many_objects() else: return [r for r in opts.related_objects if r.field.many_to_many]
[ "def", "get_all_related_many_to_many_objects", "(", "opts", ")", ":", "if", "django", ".", "VERSION", "<", "(", "1", ",", "9", ")", ":", "return", "opts", ".", "get_all_related_many_to_many_objects", "(", ")", "else", ":", "return", "[", "r", "for", "r", "in", "opts", ".", "related_objects", "if", "r", ".", "field", ".", "many_to_many", "]" ]
Django 1.8 changed meta api, see docstr in compat.get_all_related_objects() :param opts: Options instance :return: list of many-to-many relations
[ "Django", "1", ".", "8", "changed", "meta", "api", "see", "docstr", "in", "compat", ".", "get_all_related_objects", "()" ]
python
train
35.090909
mfitzp/padua
padua/process.py
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L330-L377
def fold_columns_to_rows(df, levels_from=2): """ Take a levels from the columns and fold down into the row index. This destroys the existing index; existing rows will appear as columns under the new column index :param df: :param levels_from: The level (inclusive) from which column index will be folded :return: """ df = df.copy() df.reset_index(inplace=True, drop=True) # Wipe out the current index df = df.T # Build all index combinations a = [list( set( df.index.get_level_values(i) ) ) for i in range(0, levels_from)] combinations = list(itertools.product(*a)) names = df.index.names[:levels_from] concats = [] for c in combinations: try: dfcc = df.loc[c] except KeyError: continue else: # Silly pandas if len(dfcc.shape) == 1: continue dfcc.columns = pd.MultiIndex.from_tuples([c]*dfcc.shape[1], names=names) concats.append(dfcc) # Concatenate dfc = pd.concat(concats, axis=1) dfc.sort_index(axis=1, inplace=True) # Fix name if collapsed if dfc.index.name is None: dfc.index.name = df.index.names[-1] return dfc
[ "def", "fold_columns_to_rows", "(", "df", ",", "levels_from", "=", "2", ")", ":", "df", "=", "df", ".", "copy", "(", ")", "df", ".", "reset_index", "(", "inplace", "=", "True", ",", "drop", "=", "True", ")", "# Wipe out the current index", "df", "=", "df", ".", "T", "# Build all index combinations", "a", "=", "[", "list", "(", "set", "(", "df", ".", "index", ".", "get_level_values", "(", "i", ")", ")", ")", "for", "i", "in", "range", "(", "0", ",", "levels_from", ")", "]", "combinations", "=", "list", "(", "itertools", ".", "product", "(", "*", "a", ")", ")", "names", "=", "df", ".", "index", ".", "names", "[", ":", "levels_from", "]", "concats", "=", "[", "]", "for", "c", "in", "combinations", ":", "try", ":", "dfcc", "=", "df", ".", "loc", "[", "c", "]", "except", "KeyError", ":", "continue", "else", ":", "# Silly pandas", "if", "len", "(", "dfcc", ".", "shape", ")", "==", "1", ":", "continue", "dfcc", ".", "columns", "=", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "[", "c", "]", "*", "dfcc", ".", "shape", "[", "1", "]", ",", "names", "=", "names", ")", "concats", ".", "append", "(", "dfcc", ")", "# Concatenate", "dfc", "=", "pd", ".", "concat", "(", "concats", ",", "axis", "=", "1", ")", "dfc", ".", "sort_index", "(", "axis", "=", "1", ",", "inplace", "=", "True", ")", "# Fix name if collapsed", "if", "dfc", ".", "index", ".", "name", "is", "None", ":", "dfc", ".", "index", ".", "name", "=", "df", ".", "index", ".", "names", "[", "-", "1", "]", "return", "dfc" ]
Take a levels from the columns and fold down into the row index. This destroys the existing index; existing rows will appear as columns under the new column index :param df: :param levels_from: The level (inclusive) from which column index will be folded :return:
[ "Take", "a", "levels", "from", "the", "columns", "and", "fold", "down", "into", "the", "row", "index", ".", "This", "destroys", "the", "existing", "index", ";", "existing", "rows", "will", "appear", "as", "columns", "under", "the", "new", "column", "index" ]
python
train
25.5625
markperdue/pyvesync
src/pyvesync/vesync.py
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L186-L189
def update_energy(self, bypass_check=False): """Fetch updated energy information about devices""" for outlet in self.outlets: outlet.update_energy(bypass_check)
[ "def", "update_energy", "(", "self", ",", "bypass_check", "=", "False", ")", ":", "for", "outlet", "in", "self", ".", "outlets", ":", "outlet", ".", "update_energy", "(", "bypass_check", ")" ]
Fetch updated energy information about devices
[ "Fetch", "updated", "energy", "information", "about", "devices" ]
python
train
46.25
phoebe-project/phoebe2
phoebe/parameters/dataset.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/dataset.py#L345-L367
def mesh(**kwargs): """ Create parameters for a new mesh dataset. Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_dataset` :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s """ obs_params = [] syn_params, constraints = mesh_syn(syn=False, **kwargs) obs_params += syn_params.to_list() obs_params += [SelectParameter(qualifier='include_times', value=kwargs.get('include_times', []), description='append to times from the following datasets/time standards', choices=['t0@system'])] obs_params += [SelectParameter(qualifier='columns', value=kwargs.get('columns', []), description='columns to expose within the mesh', choices=_mesh_columns)] #obs_params += mesh_dep(**kwargs).to_list() return ParameterSet(obs_params), constraints
[ "def", "mesh", "(", "*", "*", "kwargs", ")", ":", "obs_params", "=", "[", "]", "syn_params", ",", "constraints", "=", "mesh_syn", "(", "syn", "=", "False", ",", "*", "*", "kwargs", ")", "obs_params", "+=", "syn_params", ".", "to_list", "(", ")", "obs_params", "+=", "[", "SelectParameter", "(", "qualifier", "=", "'include_times'", ",", "value", "=", "kwargs", ".", "get", "(", "'include_times'", ",", "[", "]", ")", ",", "description", "=", "'append to times from the following datasets/time standards'", ",", "choices", "=", "[", "'t0@system'", "]", ")", "]", "obs_params", "+=", "[", "SelectParameter", "(", "qualifier", "=", "'columns'", ",", "value", "=", "kwargs", ".", "get", "(", "'columns'", ",", "[", "]", ")", ",", "description", "=", "'columns to expose within the mesh'", ",", "choices", "=", "_mesh_columns", ")", "]", "#obs_params += mesh_dep(**kwargs).to_list()", "return", "ParameterSet", "(", "obs_params", ")", ",", "constraints" ]
Create parameters for a new mesh dataset. Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_dataset` :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s
[ "Create", "parameters", "for", "a", "new", "mesh", "dataset", "." ]
python
train
42.782609
frejanordsiek/hdf5storage
hdf5storage/__init__.py
https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/__init__.py#L1727-L1836
def reads(paths, filename='data.h5', options=None, **keywords): """ Reads data from an HDF5 file (high level). High level function to read one or more pieces of data from an HDF5 file located at the paths specified in `paths` into Python types. Each path is specified as a POSIX style path where the data to read is located. There are various options that can be used to influence how the data is read. They can be passed as an already constructed ``Options`` into `options` or as additional keywords that will be used to make one by ``options = Options(**keywords)``. Paths are POSIX style and can either be given directly as ``str`` or ``bytes``, or the separated path can be given as an iterable of ``str`` and ``bytes``. Each part of a separated path is escaped using ``utilities.escape_path``. Otherwise, the path is assumed to be already escaped. Escaping is done so that targets with a part that starts with one or more periods, contain slashes, and/or contain nulls can be used without causing the wrong Group to be looked in or the wrong target to be looked at. It essentially allows one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving around in the Dataset hierarchy. Parameters ---------- paths : iterable of paths An iterable of paths to read data from. Each must be a POSIX style path where the directory name is the Group to put it in and the basename is the name to write it to. The format of paths is described in the paragraph above. filename : str, optional The name of the HDF5 file to read data from. options : Options, optional The options to use when reading. Is mutually exclusive with any additional keyword arguments given (set to ``None`` or don't provide to use them). **keywords : If `options` was not provided or was ``None``, these are used as arguments to make a ``Options``. Returns ------- datas : iterable An iterable holding the piece of data for each path in `paths` in the same order. Raises ------ exceptions.CantReadError If reading the data can't be done. See Also -------- utilities.process_path utilities.escape_path read : Reads just a single piece of data writes write Options utilities.read_data : Low level version. """ # Pack the different options into an Options class if an Options was # not given. By default, the matlab_compatible option is set to # False. So, if it wasn't passed in the keywords, this needs to be # added to override the default value (True) for a new Options. if not isinstance(options, Options): kw = copy.deepcopy(keywords) if 'matlab_compatible' not in kw: kw['matlab_compatible'] = False options = Options(**kw) # Process the paths and stuff the group names and target names as # tuples into toread. toread = [] for p in paths: groupname, targetname = utilities.process_path(p) # Pack them into toread toread.append((groupname, targetname)) # Open the hdf5 file and start reading the data. This is all wrapped # in a try block, so that the file can be closed if any errors # happen (the error is re-raised). try: f = None f = h5py.File(filename, mode='r') # Read the data item by item datas = [] for groupname, targetname in toread: # Check that the containing group is in f and is indeed a # group. If it isn't an error needs to be thrown. if groupname not in f \ or not isinstance(f[groupname], h5py.Group): raise exceptions.CantReadError( \ 'Could not find containing Group ' \ + groupname + '.') # Hand off everything to the low level reader. datas.append(utilities.read_data(f, f[groupname], targetname, options)) except: raise finally: if f is not None: f.close() return datas
[ "def", "reads", "(", "paths", ",", "filename", "=", "'data.h5'", ",", "options", "=", "None", ",", "*", "*", "keywords", ")", ":", "# Pack the different options into an Options class if an Options was", "# not given. By default, the matlab_compatible option is set to", "# False. So, if it wasn't passed in the keywords, this needs to be", "# added to override the default value (True) for a new Options.", "if", "not", "isinstance", "(", "options", ",", "Options", ")", ":", "kw", "=", "copy", ".", "deepcopy", "(", "keywords", ")", "if", "'matlab_compatible'", "not", "in", "kw", ":", "kw", "[", "'matlab_compatible'", "]", "=", "False", "options", "=", "Options", "(", "*", "*", "kw", ")", "# Process the paths and stuff the group names and target names as", "# tuples into toread.", "toread", "=", "[", "]", "for", "p", "in", "paths", ":", "groupname", ",", "targetname", "=", "utilities", ".", "process_path", "(", "p", ")", "# Pack them into toread", "toread", ".", "append", "(", "(", "groupname", ",", "targetname", ")", ")", "# Open the hdf5 file and start reading the data. This is all wrapped", "# in a try block, so that the file can be closed if any errors", "# happen (the error is re-raised).", "try", ":", "f", "=", "None", "f", "=", "h5py", ".", "File", "(", "filename", ",", "mode", "=", "'r'", ")", "# Read the data item by item", "datas", "=", "[", "]", "for", "groupname", ",", "targetname", "in", "toread", ":", "# Check that the containing group is in f and is indeed a", "# group. If it isn't an error needs to be thrown.", "if", "groupname", "not", "in", "f", "or", "not", "isinstance", "(", "f", "[", "groupname", "]", ",", "h5py", ".", "Group", ")", ":", "raise", "exceptions", ".", "CantReadError", "(", "'Could not find containing Group '", "+", "groupname", "+", "'.'", ")", "# Hand off everything to the low level reader.", "datas", ".", "append", "(", "utilities", ".", "read_data", "(", "f", ",", "f", "[", "groupname", "]", ",", "targetname", ",", "options", ")", ")", "except", ":", "raise", "finally", ":", "if", "f", "is", "not", "None", ":", "f", ".", "close", "(", ")", "return", "datas" ]
Reads data from an HDF5 file (high level). High level function to read one or more pieces of data from an HDF5 file located at the paths specified in `paths` into Python types. Each path is specified as a POSIX style path where the data to read is located. There are various options that can be used to influence how the data is read. They can be passed as an already constructed ``Options`` into `options` or as additional keywords that will be used to make one by ``options = Options(**keywords)``. Paths are POSIX style and can either be given directly as ``str`` or ``bytes``, or the separated path can be given as an iterable of ``str`` and ``bytes``. Each part of a separated path is escaped using ``utilities.escape_path``. Otherwise, the path is assumed to be already escaped. Escaping is done so that targets with a part that starts with one or more periods, contain slashes, and/or contain nulls can be used without causing the wrong Group to be looked in or the wrong target to be looked at. It essentially allows one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving around in the Dataset hierarchy. Parameters ---------- paths : iterable of paths An iterable of paths to read data from. Each must be a POSIX style path where the directory name is the Group to put it in and the basename is the name to write it to. The format of paths is described in the paragraph above. filename : str, optional The name of the HDF5 file to read data from. options : Options, optional The options to use when reading. Is mutually exclusive with any additional keyword arguments given (set to ``None`` or don't provide to use them). **keywords : If `options` was not provided or was ``None``, these are used as arguments to make a ``Options``. Returns ------- datas : iterable An iterable holding the piece of data for each path in `paths` in the same order. Raises ------ exceptions.CantReadError If reading the data can't be done. See Also -------- utilities.process_path utilities.escape_path read : Reads just a single piece of data writes write Options utilities.read_data : Low level version.
[ "Reads", "data", "from", "an", "HDF5", "file", "(", "high", "level", ")", "." ]
python
train
37.436364
krukas/Trionyx
trionyx/navigation.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/navigation.py#L238-L266
def register(self, model_alias, code='general', name=None, order=None, display_filter=None): """ Register new tab :param model_alias: :param code: :param name: :param order: :return: """ model_alias = self.get_model_alias(model_alias) def wrapper(create_layout): item = TabItem( code=code, create_layout=create_layout, name=name, order=order, display_filter=display_filter ) if item in self.tabs[model_alias]: raise Exception("Tab {} already registered for model {}".format(code, model_alias)) self.tabs[model_alias].append(item) self.tabs[model_alias] = sorted(self.tabs[model_alias], key=lambda item: item.order if item.order else 999) return create_layout return wrapper
[ "def", "register", "(", "self", ",", "model_alias", ",", "code", "=", "'general'", ",", "name", "=", "None", ",", "order", "=", "None", ",", "display_filter", "=", "None", ")", ":", "model_alias", "=", "self", ".", "get_model_alias", "(", "model_alias", ")", "def", "wrapper", "(", "create_layout", ")", ":", "item", "=", "TabItem", "(", "code", "=", "code", ",", "create_layout", "=", "create_layout", ",", "name", "=", "name", ",", "order", "=", "order", ",", "display_filter", "=", "display_filter", ")", "if", "item", "in", "self", ".", "tabs", "[", "model_alias", "]", ":", "raise", "Exception", "(", "\"Tab {} already registered for model {}\"", ".", "format", "(", "code", ",", "model_alias", ")", ")", "self", ".", "tabs", "[", "model_alias", "]", ".", "append", "(", "item", ")", "self", ".", "tabs", "[", "model_alias", "]", "=", "sorted", "(", "self", ".", "tabs", "[", "model_alias", "]", ",", "key", "=", "lambda", "item", ":", "item", ".", "order", "if", "item", ".", "order", "else", "999", ")", "return", "create_layout", "return", "wrapper" ]
Register new tab :param model_alias: :param code: :param name: :param order: :return:
[ "Register", "new", "tab" ]
python
train
31.241379
pmacosta/peng
docs/support/requirements_to_rst.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/docs/support/requirements_to_rst.py#L77-L102
def ops_to_words(item): """Translate requirement specification to words.""" unsupp_ops = ["~=", "==="] # Ordered for "pleasant" word specification supp_ops = [">=", ">", "==", "<=", "<", "!="] tokens = sorted(item.split(","), reverse=True) actual_tokens = [] for req in tokens: for op in unsupp_ops: if req.startswith(op): raise RuntimeError("Unsupported version specification: {0}".format(op)) for op in supp_ops: if req.startswith(op): actual_tokens.append(op) break else: raise RuntimeError("Illegal comparison operator: {0}".format(op)) if len(list(set(actual_tokens))) != len(actual_tokens): raise RuntimeError("Multiple comparison operators of the same type") if "!=" in actual_tokens: return ( " and ".join([op_to_words(token) for token in tokens[:-1]]) + " " + op_to_words(tokens[-1]) ) return " and ".join([op_to_words(token) for token in tokens])
[ "def", "ops_to_words", "(", "item", ")", ":", "unsupp_ops", "=", "[", "\"~=\"", ",", "\"===\"", "]", "# Ordered for \"pleasant\" word specification", "supp_ops", "=", "[", "\">=\"", ",", "\">\"", ",", "\"==\"", ",", "\"<=\"", ",", "\"<\"", ",", "\"!=\"", "]", "tokens", "=", "sorted", "(", "item", ".", "split", "(", "\",\"", ")", ",", "reverse", "=", "True", ")", "actual_tokens", "=", "[", "]", "for", "req", "in", "tokens", ":", "for", "op", "in", "unsupp_ops", ":", "if", "req", ".", "startswith", "(", "op", ")", ":", "raise", "RuntimeError", "(", "\"Unsupported version specification: {0}\"", ".", "format", "(", "op", ")", ")", "for", "op", "in", "supp_ops", ":", "if", "req", ".", "startswith", "(", "op", ")", ":", "actual_tokens", ".", "append", "(", "op", ")", "break", "else", ":", "raise", "RuntimeError", "(", "\"Illegal comparison operator: {0}\"", ".", "format", "(", "op", ")", ")", "if", "len", "(", "list", "(", "set", "(", "actual_tokens", ")", ")", ")", "!=", "len", "(", "actual_tokens", ")", ":", "raise", "RuntimeError", "(", "\"Multiple comparison operators of the same type\"", ")", "if", "\"!=\"", "in", "actual_tokens", ":", "return", "(", "\" and \"", ".", "join", "(", "[", "op_to_words", "(", "token", ")", "for", "token", "in", "tokens", "[", ":", "-", "1", "]", "]", ")", "+", "\" \"", "+", "op_to_words", "(", "tokens", "[", "-", "1", "]", ")", ")", "return", "\" and \"", ".", "join", "(", "[", "op_to_words", "(", "token", ")", "for", "token", "in", "tokens", "]", ")" ]
Translate requirement specification to words.
[ "Translate", "requirement", "specification", "to", "words", "." ]
python
test
40
synw/goerr
goerr/messages.py
https://github.com/synw/goerr/blob/08b3809d6715bffe26899a769d96fa5de8573faf/goerr/messages.py#L18-L25
def error(self, i: int=None) -> str: """ Returns an error message """ head = "[" + colors.red("error") + "]" if i is not None: head = str(i) + " " + head return head
[ "def", "error", "(", "self", ",", "i", ":", "int", "=", "None", ")", "->", "str", ":", "head", "=", "\"[\"", "+", "colors", ".", "red", "(", "\"error\"", ")", "+", "\"]\"", "if", "i", "is", "not", "None", ":", "head", "=", "str", "(", "i", ")", "+", "\" \"", "+", "head", "return", "head" ]
Returns an error message
[ "Returns", "an", "error", "message" ]
python
train
27.25
archman/beamline
beamline/models.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/models.py#L303-L354
def anoteElements(ax, anotelist, showAccName=False, efilter=None, textypos=None, **kwargs): """ annotate elements to axes :param ax: matplotlib axes object :param anotelist: element annotation object list :param showAccName: tag name for accelerator tubes? default is False, show acceleration band type, e.g. 'S', 'C', 'X', or for '[S,C,X]D' for cavity :param efilter: element type filter, default is None, annotate all elements could be defined to be one type name or type name list/tuple, e.g. filter='QUAD' or filter=('QUAD', 'CSRCSBEN') :param textypos: y coordinator of annotated text string :param kwargs: alpha=0.8, arrowprops=dict(arrowstyle='->'), rotation=-60, fontsize='small' return list of annotation objects """ defaultstyle = {'alpha': 0.8, 'arrowprops': dict(arrowstyle='->'), 'rotation': -60, 'fontsize': 'small'} defaultstyle.update(kwargs) anote_list = [] if efilter is None: for anote in anotelist: if textypos is None: textxypos = tuple(anote['textpos']) else: textxypos = tuple((anote['textpos'][0], textypos)) if not showAccName and anote['type'] in ('RFCW', 'RFDF'): kwstyle = {k: v for k, v in defaultstyle.items()} kwstyle.pop('arrowprops') note_text = ax.text(anote['atext']['xypos'][0], anote['atext']['xypos'][1], anote['atext']['text'], **kwstyle) else: note_text = ax.annotate(s=anote['name'], xy=anote['xypos'], xytext=textxypos, **defaultstyle) anote_list.append(note_text) else: if not isinstance(efilter, tuple): filter = tuple(efilter) for anote in anotelist: if anote['type'] in efilter: if textypos is None: textxypos = tuple(anote['textpos']) else: textxypos = tuple((anote['textpos'][0], textypos)) if not showAccName and anote['type'] in ('RFCW', 'RFDF'): kwstyle = {k: v for k, v in defaultstyle.items()} kwstyle.pop('arrowprops') note_text = ax.text(anote['atext']['xypos'][0], anote['atext']['xypos'][1], anote['atext']['text'], **kwstyle) else: note_text = ax.annotate(s=anote['name'], xy=anote['xypos'], xytext=textxypos, **defaultstyle) anote_list.append(note_text) return anote_list
[ "def", "anoteElements", "(", "ax", ",", "anotelist", ",", "showAccName", "=", "False", ",", "efilter", "=", "None", ",", "textypos", "=", "None", ",", "*", "*", "kwargs", ")", ":", "defaultstyle", "=", "{", "'alpha'", ":", "0.8", ",", "'arrowprops'", ":", "dict", "(", "arrowstyle", "=", "'->'", ")", ",", "'rotation'", ":", "-", "60", ",", "'fontsize'", ":", "'small'", "}", "defaultstyle", ".", "update", "(", "kwargs", ")", "anote_list", "=", "[", "]", "if", "efilter", "is", "None", ":", "for", "anote", "in", "anotelist", ":", "if", "textypos", "is", "None", ":", "textxypos", "=", "tuple", "(", "anote", "[", "'textpos'", "]", ")", "else", ":", "textxypos", "=", "tuple", "(", "(", "anote", "[", "'textpos'", "]", "[", "0", "]", ",", "textypos", ")", ")", "if", "not", "showAccName", "and", "anote", "[", "'type'", "]", "in", "(", "'RFCW'", ",", "'RFDF'", ")", ":", "kwstyle", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "defaultstyle", ".", "items", "(", ")", "}", "kwstyle", ".", "pop", "(", "'arrowprops'", ")", "note_text", "=", "ax", ".", "text", "(", "anote", "[", "'atext'", "]", "[", "'xypos'", "]", "[", "0", "]", ",", "anote", "[", "'atext'", "]", "[", "'xypos'", "]", "[", "1", "]", ",", "anote", "[", "'atext'", "]", "[", "'text'", "]", ",", "*", "*", "kwstyle", ")", "else", ":", "note_text", "=", "ax", ".", "annotate", "(", "s", "=", "anote", "[", "'name'", "]", ",", "xy", "=", "anote", "[", "'xypos'", "]", ",", "xytext", "=", "textxypos", ",", "*", "*", "defaultstyle", ")", "anote_list", ".", "append", "(", "note_text", ")", "else", ":", "if", "not", "isinstance", "(", "efilter", ",", "tuple", ")", ":", "filter", "=", "tuple", "(", "efilter", ")", "for", "anote", "in", "anotelist", ":", "if", "anote", "[", "'type'", "]", "in", "efilter", ":", "if", "textypos", "is", "None", ":", "textxypos", "=", "tuple", "(", "anote", "[", "'textpos'", "]", ")", "else", ":", "textxypos", "=", "tuple", "(", "(", "anote", "[", "'textpos'", "]", "[", "0", "]", ",", "textypos", ")", ")", "if", "not", "showAccName", "and", "anote", "[", "'type'", "]", "in", "(", "'RFCW'", ",", "'RFDF'", ")", ":", "kwstyle", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "defaultstyle", ".", "items", "(", ")", "}", "kwstyle", ".", "pop", "(", "'arrowprops'", ")", "note_text", "=", "ax", ".", "text", "(", "anote", "[", "'atext'", "]", "[", "'xypos'", "]", "[", "0", "]", ",", "anote", "[", "'atext'", "]", "[", "'xypos'", "]", "[", "1", "]", ",", "anote", "[", "'atext'", "]", "[", "'text'", "]", ",", "*", "*", "kwstyle", ")", "else", ":", "note_text", "=", "ax", ".", "annotate", "(", "s", "=", "anote", "[", "'name'", "]", ",", "xy", "=", "anote", "[", "'xypos'", "]", ",", "xytext", "=", "textxypos", ",", "*", "*", "defaultstyle", ")", "anote_list", ".", "append", "(", "note_text", ")", "return", "anote_list" ]
annotate elements to axes :param ax: matplotlib axes object :param anotelist: element annotation object list :param showAccName: tag name for accelerator tubes? default is False, show acceleration band type, e.g. 'S', 'C', 'X', or for '[S,C,X]D' for cavity :param efilter: element type filter, default is None, annotate all elements could be defined to be one type name or type name list/tuple, e.g. filter='QUAD' or filter=('QUAD', 'CSRCSBEN') :param textypos: y coordinator of annotated text string :param kwargs: alpha=0.8, arrowprops=dict(arrowstyle='->'), rotation=-60, fontsize='small' return list of annotation objects
[ "annotate", "elements", "to", "axes", ":", "param", "ax", ":", "matplotlib", "axes", "object", ":", "param", "anotelist", ":", "element", "annotation", "object", "list", ":", "param", "showAccName", ":", "tag", "name", "for", "accelerator", "tubes?", "default", "is", "False", "show", "acceleration", "band", "type", "e", ".", "g", ".", "S", "C", "X", "or", "for", "[", "S", "C", "X", "]", "D", "for", "cavity", ":", "param", "efilter", ":", "element", "type", "filter", "default", "is", "None", "annotate", "all", "elements", "could", "be", "defined", "to", "be", "one", "type", "name", "or", "type", "name", "list", "/", "tuple", "e", ".", "g", ".", "filter", "=", "QUAD", "or", "filter", "=", "(", "QUAD", "CSRCSBEN", ")", ":", "param", "textypos", ":", "y", "coordinator", "of", "annotated", "text", "string", ":", "param", "kwargs", ":", "alpha", "=", "0", ".", "8", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "-", ">", ")", "rotation", "=", "-", "60", "fontsize", "=", "small" ]
python
train
54.346154
callowayproject/Transmogrify
transmogrify/images2gif.py
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/images2gif.py#L977-L991
def palette_image(self): """ PIL weird interface for making a paletted image: create an image which already has the palette, and use that in Image.quantize. This function returns this palette image. """ if self.pimage is None: palette = [] for i in range(self.NETSIZE): palette.extend(self.colormap[i][:3]) palette.extend([0] * (256 - self.NETSIZE) * 3) # a palette image to use for quant self.pimage = Image.new("P", (1, 1), 0) self.pimage.putpalette(palette) return self.pimage
[ "def", "palette_image", "(", "self", ")", ":", "if", "self", ".", "pimage", "is", "None", ":", "palette", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "NETSIZE", ")", ":", "palette", ".", "extend", "(", "self", ".", "colormap", "[", "i", "]", "[", ":", "3", "]", ")", "palette", ".", "extend", "(", "[", "0", "]", "*", "(", "256", "-", "self", ".", "NETSIZE", ")", "*", "3", ")", "# a palette image to use for quant", "self", ".", "pimage", "=", "Image", ".", "new", "(", "\"P\"", ",", "(", "1", ",", "1", ")", ",", "0", ")", "self", ".", "pimage", ".", "putpalette", "(", "palette", ")", "return", "self", ".", "pimage" ]
PIL weird interface for making a paletted image: create an image which already has the palette, and use that in Image.quantize. This function returns this palette image.
[ "PIL", "weird", "interface", "for", "making", "a", "paletted", "image", ":", "create", "an", "image", "which", "already", "has", "the", "palette", "and", "use", "that", "in", "Image", ".", "quantize", ".", "This", "function", "returns", "this", "palette", "image", "." ]
python
train
40.2
asyncee/django-easy-select2
docs/source/_ext/djangodocs.py
https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/docs/source/_ext/djangodocs.py#L364-L403
def visit_console_html(self, node): """Generate HTML for the console directive.""" if self.builder.name in ('djangohtml', 'json') and node['win_console_text']: # Put a mark on the document object signaling the fact the directive # has been used on it. self.document._console_directive_used_flag = True uid = node['uid'] self.body.append('''\ <div class="console-block" id="console-block-%(id)s"> <input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked> <label for="c-tab-%(id)s-unix" title="Linux/macOS">&#xf17c/&#xf179</label> <input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s"> <label for="c-tab-%(id)s-win" title="Windows">&#xf17a</label> <section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid}) try: self.visit_literal_block(node) except nodes.SkipNode: pass self.body.append('</section>\n') self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid}) win_text = node['win_console_text'] highlight_args = {'force': True} if 'linenos' in node: linenos = node['linenos'] else: linenos = win_text.count('\n') >= self.highlightlinenothreshold - 1 def warner(msg): self.builder.warn(msg, (self.builder.current_docname, node.line)) highlighted = self.highlighter.highlight_block( win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args ) self.body.append(highlighted) self.body.append('</section>\n') self.body.append('</div>\n') raise nodes.SkipNode else: self.visit_literal_block(node)
[ "def", "visit_console_html", "(", "self", ",", "node", ")", ":", "if", "self", ".", "builder", ".", "name", "in", "(", "'djangohtml'", ",", "'json'", ")", "and", "node", "[", "'win_console_text'", "]", ":", "# Put a mark on the document object signaling the fact the directive", "# has been used on it.", "self", ".", "document", ".", "_console_directive_used_flag", "=", "True", "uid", "=", "node", "[", "'uid'", "]", "self", ".", "body", ".", "append", "(", "'''\\\n<div class=\"console-block\" id=\"console-block-%(id)s\">\n<input class=\"c-tab-unix\" id=\"c-tab-%(id)s-unix\" type=\"radio\" name=\"console-%(id)s\" checked>\n<label for=\"c-tab-%(id)s-unix\" title=\"Linux/macOS\">&#xf17c/&#xf179</label>\n<input class=\"c-tab-win\" id=\"c-tab-%(id)s-win\" type=\"radio\" name=\"console-%(id)s\">\n<label for=\"c-tab-%(id)s-win\" title=\"Windows\">&#xf17a</label>\n<section class=\"c-content-unix\" id=\"c-content-%(id)s-unix\">\\n'''", "%", "{", "'id'", ":", "uid", "}", ")", "try", ":", "self", ".", "visit_literal_block", "(", "node", ")", "except", "nodes", ".", "SkipNode", ":", "pass", "self", ".", "body", ".", "append", "(", "'</section>\\n'", ")", "self", ".", "body", ".", "append", "(", "'<section class=\"c-content-win\" id=\"c-content-%(id)s-win\">\\n'", "%", "{", "'id'", ":", "uid", "}", ")", "win_text", "=", "node", "[", "'win_console_text'", "]", "highlight_args", "=", "{", "'force'", ":", "True", "}", "if", "'linenos'", "in", "node", ":", "linenos", "=", "node", "[", "'linenos'", "]", "else", ":", "linenos", "=", "win_text", ".", "count", "(", "'\\n'", ")", ">=", "self", ".", "highlightlinenothreshold", "-", "1", "def", "warner", "(", "msg", ")", ":", "self", ".", "builder", ".", "warn", "(", "msg", ",", "(", "self", ".", "builder", ".", "current_docname", ",", "node", ".", "line", ")", ")", "highlighted", "=", "self", ".", "highlighter", ".", "highlight_block", "(", "win_text", ",", "'doscon'", ",", "warn", "=", "warner", ",", "linenos", "=", "linenos", ",", "*", "*", "highlight_args", ")", "self", ".", "body", ".", "append", "(", "highlighted", ")", "self", ".", "body", ".", "append", "(", "'</section>\\n'", ")", "self", ".", "body", ".", "append", "(", "'</div>\\n'", ")", "raise", "nodes", ".", "SkipNode", "else", ":", "self", ".", "visit_literal_block", "(", "node", ")" ]
Generate HTML for the console directive.
[ "Generate", "HTML", "for", "the", "console", "directive", "." ]
python
train
43.4
saltstack/salt
salt/modules/genesis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/genesis.py#L264-L278
def _mkfs(root, fs_format, fs_opts=None): ''' Make a filesystem using the appropriate module .. versionadded:: Beryllium ''' if fs_opts is None: fs_opts = {} if fs_format in ('ext2', 'ext3', 'ext4'): __salt__['extfs.mkfs'](root, fs_format, **fs_opts) elif fs_format in ('btrfs',): __salt__['btrfs.mkfs'](root, **fs_opts) elif fs_format in ('xfs',): __salt__['xfs.mkfs'](root, **fs_opts)
[ "def", "_mkfs", "(", "root", ",", "fs_format", ",", "fs_opts", "=", "None", ")", ":", "if", "fs_opts", "is", "None", ":", "fs_opts", "=", "{", "}", "if", "fs_format", "in", "(", "'ext2'", ",", "'ext3'", ",", "'ext4'", ")", ":", "__salt__", "[", "'extfs.mkfs'", "]", "(", "root", ",", "fs_format", ",", "*", "*", "fs_opts", ")", "elif", "fs_format", "in", "(", "'btrfs'", ",", ")", ":", "__salt__", "[", "'btrfs.mkfs'", "]", "(", "root", ",", "*", "*", "fs_opts", ")", "elif", "fs_format", "in", "(", "'xfs'", ",", ")", ":", "__salt__", "[", "'xfs.mkfs'", "]", "(", "root", ",", "*", "*", "fs_opts", ")" ]
Make a filesystem using the appropriate module .. versionadded:: Beryllium
[ "Make", "a", "filesystem", "using", "the", "appropriate", "module" ]
python
train
29.2
Erotemic/utool
utool/util_numpy.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_numpy.py#L142-L175
def random_indexes(max_index, subset_size=None, seed=None, rng=None): """ random unrepeated indicies Args: max_index (?): subset_size (None): (default = None) seed (None): (default = None) rng (RandomState): random number generator(default = None) Returns: ?: subst CommandLine: python -m utool.util_numpy --exec-random_indexes Example: >>> # DISABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> max_index = 10 >>> subset_size = None >>> seed = None >>> rng = np.random.RandomState(0) >>> subst = random_indexes(max_index, subset_size, seed, rng) >>> result = ('subst = %s' % (str(subst),)) >>> print(result) """ subst_ = np.arange(0, max_index) rng = ensure_rng(seed if rng is None else rng) rng.shuffle(subst_) if subset_size is None: subst = subst_ else: subst = subst_[0:min(subset_size, max_index)] return subst
[ "def", "random_indexes", "(", "max_index", ",", "subset_size", "=", "None", ",", "seed", "=", "None", ",", "rng", "=", "None", ")", ":", "subst_", "=", "np", ".", "arange", "(", "0", ",", "max_index", ")", "rng", "=", "ensure_rng", "(", "seed", "if", "rng", "is", "None", "else", "rng", ")", "rng", ".", "shuffle", "(", "subst_", ")", "if", "subset_size", "is", "None", ":", "subst", "=", "subst_", "else", ":", "subst", "=", "subst_", "[", "0", ":", "min", "(", "subset_size", ",", "max_index", ")", "]", "return", "subst" ]
random unrepeated indicies Args: max_index (?): subset_size (None): (default = None) seed (None): (default = None) rng (RandomState): random number generator(default = None) Returns: ?: subst CommandLine: python -m utool.util_numpy --exec-random_indexes Example: >>> # DISABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> max_index = 10 >>> subset_size = None >>> seed = None >>> rng = np.random.RandomState(0) >>> subst = random_indexes(max_index, subset_size, seed, rng) >>> result = ('subst = %s' % (str(subst),)) >>> print(result)
[ "random", "unrepeated", "indicies" ]
python
train
28.882353
arogozhnikov/einops
einops/einops.py
https://github.com/arogozhnikov/einops/blob/9698f0f5efa6c5a79daa75253137ba5d79a95615/einops/einops.py#L191-L254
def parse_expression(expression: str) -> Tuple[Set[str], List[CompositeAxis]]: """ Parses an indexing expression (for a single tensor). Checks uniqueness of names, checks usage of '...' (allowed only once) Returns set of all used identifiers and a list of axis groups """ identifiers = set() composite_axes = [] if '.' in expression: if '...' not in expression: raise EinopsError('Expression may contain dots only inside ellipsis (...)') if str.count(expression, '...') != 1 or str.count(expression, '.') != 3: raise EinopsError('Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ') expression = expression.replace('...', _ellipsis) bracket_group = None def add_axis_name(x): if x is not None: if x in identifiers: raise ValueError('Indexing expression contains duplicate dimension "{}"'.format(x)) identifiers.add(x) if bracket_group is None: composite_axes.append([x]) else: bracket_group.append(x) current_identifier = None for char in expression: if char in '() ' + _ellipsis: add_axis_name(current_identifier) current_identifier = None if char == _ellipsis: if bracket_group is not None: raise EinopsError("Ellipsis can't be used inside the composite axis (inside brackets)") composite_axes.append(_ellipsis) identifiers.add(_ellipsis) elif char == '(': if bracket_group is not None: raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)") bracket_group = [] elif char == ')': if bracket_group is None: raise EinopsError('Brackets are not balanced') composite_axes.append(bracket_group) bracket_group = None elif '0' <= char <= '9': if current_identifier is None: raise EinopsError("Axis name can't start with a digit") current_identifier += char elif 'a' <= char <= 'z': if current_identifier is None: current_identifier = char else: current_identifier += char else: if 'A' <= char <= 'Z': raise EinopsError("Only lower-case latin letters allowed in names, not '{}'".format(char)) raise EinopsError("Unknown character '{}'".format(char)) if bracket_group is not None: raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression)) add_axis_name(current_identifier) return identifiers, composite_axes
[ "def", "parse_expression", "(", "expression", ":", "str", ")", "->", "Tuple", "[", "Set", "[", "str", "]", ",", "List", "[", "CompositeAxis", "]", "]", ":", "identifiers", "=", "set", "(", ")", "composite_axes", "=", "[", "]", "if", "'.'", "in", "expression", ":", "if", "'...'", "not", "in", "expression", ":", "raise", "EinopsError", "(", "'Expression may contain dots only inside ellipsis (...)'", ")", "if", "str", ".", "count", "(", "expression", ",", "'...'", ")", "!=", "1", "or", "str", ".", "count", "(", "expression", ",", "'.'", ")", "!=", "3", ":", "raise", "EinopsError", "(", "'Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor '", ")", "expression", "=", "expression", ".", "replace", "(", "'...'", ",", "_ellipsis", ")", "bracket_group", "=", "None", "def", "add_axis_name", "(", "x", ")", ":", "if", "x", "is", "not", "None", ":", "if", "x", "in", "identifiers", ":", "raise", "ValueError", "(", "'Indexing expression contains duplicate dimension \"{}\"'", ".", "format", "(", "x", ")", ")", "identifiers", ".", "add", "(", "x", ")", "if", "bracket_group", "is", "None", ":", "composite_axes", ".", "append", "(", "[", "x", "]", ")", "else", ":", "bracket_group", ".", "append", "(", "x", ")", "current_identifier", "=", "None", "for", "char", "in", "expression", ":", "if", "char", "in", "'() '", "+", "_ellipsis", ":", "add_axis_name", "(", "current_identifier", ")", "current_identifier", "=", "None", "if", "char", "==", "_ellipsis", ":", "if", "bracket_group", "is", "not", "None", ":", "raise", "EinopsError", "(", "\"Ellipsis can't be used inside the composite axis (inside brackets)\"", ")", "composite_axes", ".", "append", "(", "_ellipsis", ")", "identifiers", ".", "add", "(", "_ellipsis", ")", "elif", "char", "==", "'('", ":", "if", "bracket_group", "is", "not", "None", ":", "raise", "EinopsError", "(", "\"Axis composition is one-level (brackets inside brackets not allowed)\"", ")", "bracket_group", "=", "[", "]", "elif", "char", "==", "')'", ":", "if", "bracket_group", "is", "None", ":", "raise", "EinopsError", "(", "'Brackets are not balanced'", ")", "composite_axes", ".", "append", "(", "bracket_group", ")", "bracket_group", "=", "None", "elif", "'0'", "<=", "char", "<=", "'9'", ":", "if", "current_identifier", "is", "None", ":", "raise", "EinopsError", "(", "\"Axis name can't start with a digit\"", ")", "current_identifier", "+=", "char", "elif", "'a'", "<=", "char", "<=", "'z'", ":", "if", "current_identifier", "is", "None", ":", "current_identifier", "=", "char", "else", ":", "current_identifier", "+=", "char", "else", ":", "if", "'A'", "<=", "char", "<=", "'Z'", ":", "raise", "EinopsError", "(", "\"Only lower-case latin letters allowed in names, not '{}'\"", ".", "format", "(", "char", ")", ")", "raise", "EinopsError", "(", "\"Unknown character '{}'\"", ".", "format", "(", "char", ")", ")", "if", "bracket_group", "is", "not", "None", ":", "raise", "EinopsError", "(", "'Imbalanced parentheses in expression: \"{}\"'", ".", "format", "(", "expression", ")", ")", "add_axis_name", "(", "current_identifier", ")", "return", "identifiers", ",", "composite_axes" ]
Parses an indexing expression (for a single tensor). Checks uniqueness of names, checks usage of '...' (allowed only once) Returns set of all used identifiers and a list of axis groups
[ "Parses", "an", "indexing", "expression", "(", "for", "a", "single", "tensor", ")", ".", "Checks", "uniqueness", "of", "names", "checks", "usage", "of", "...", "(", "allowed", "only", "once", ")", "Returns", "set", "of", "all", "used", "identifiers", "and", "a", "list", "of", "axis", "groups" ]
python
train
43.296875
andreikop/qutepart
qutepart/completer.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/completer.py#L408-L433
def invokeCompletionIfAvailable(self, requestedByUser=False): """Invoke completion, if available. Called after text has been typed in qpart Returns True, if invoked """ if self._qpart.completionEnabled and self._wordSet is not None: wordBeforeCursor = self._wordBeforeCursor() wholeWord = wordBeforeCursor + self._wordAfterCursor() forceShow = requestedByUser or self._completionOpenedManually if wordBeforeCursor: if len(wordBeforeCursor) >= self._qpart.completionThreshold or forceShow: if self._widget is None: model = _CompletionModel(self._wordSet) model.setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(model, forceShow): self._createWidget(model) return True else: self._widget.model().setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(self._widget.model(), forceShow): self._widget.updateGeometry() return True self._closeCompletion() return False
[ "def", "invokeCompletionIfAvailable", "(", "self", ",", "requestedByUser", "=", "False", ")", ":", "if", "self", ".", "_qpart", ".", "completionEnabled", "and", "self", ".", "_wordSet", "is", "not", "None", ":", "wordBeforeCursor", "=", "self", ".", "_wordBeforeCursor", "(", ")", "wholeWord", "=", "wordBeforeCursor", "+", "self", ".", "_wordAfterCursor", "(", ")", "forceShow", "=", "requestedByUser", "or", "self", ".", "_completionOpenedManually", "if", "wordBeforeCursor", ":", "if", "len", "(", "wordBeforeCursor", ")", ">=", "self", ".", "_qpart", ".", "completionThreshold", "or", "forceShow", ":", "if", "self", ".", "_widget", "is", "None", ":", "model", "=", "_CompletionModel", "(", "self", ".", "_wordSet", ")", "model", ".", "setData", "(", "wordBeforeCursor", ",", "wholeWord", ")", "if", "self", ".", "_shouldShowModel", "(", "model", ",", "forceShow", ")", ":", "self", ".", "_createWidget", "(", "model", ")", "return", "True", "else", ":", "self", ".", "_widget", ".", "model", "(", ")", ".", "setData", "(", "wordBeforeCursor", ",", "wholeWord", ")", "if", "self", ".", "_shouldShowModel", "(", "self", ".", "_widget", ".", "model", "(", ")", ",", "forceShow", ")", ":", "self", ".", "_widget", ".", "updateGeometry", "(", ")", "return", "True", "self", ".", "_closeCompletion", "(", ")", "return", "False" ]
Invoke completion, if available. Called after text has been typed in qpart Returns True, if invoked
[ "Invoke", "completion", "if", "available", ".", "Called", "after", "text", "has", "been", "typed", "in", "qpart", "Returns", "True", "if", "invoked" ]
python
train
47.769231
edx/edx-django-utils
edx_django_utils/cache/utils.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/cache/utils.py#L240-L250
def _set_request_cache_if_django_cache_hit(key, django_cached_response): """ Sets the value in the request cache if the django cached response was a hit. Args: key (string) django_cached_response (CachedResponse) """ if django_cached_response.is_found: DEFAULT_REQUEST_CACHE.set(key, django_cached_response.value)
[ "def", "_set_request_cache_if_django_cache_hit", "(", "key", ",", "django_cached_response", ")", ":", "if", "django_cached_response", ".", "is_found", ":", "DEFAULT_REQUEST_CACHE", ".", "set", "(", "key", ",", "django_cached_response", ".", "value", ")" ]
Sets the value in the request cache if the django cached response was a hit. Args: key (string) django_cached_response (CachedResponse)
[ "Sets", "the", "value", "in", "the", "request", "cache", "if", "the", "django", "cached", "response", "was", "a", "hit", "." ]
python
train
34.636364
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jvm_dependency_usage.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jvm_dependency_usage.py#L422-L443
def to_json(self): """Outputs the entire graph.""" res_dict = {} def gen_dep_edge(node, edge, dep_tgt, aliases): return { 'target': dep_tgt.address.spec, 'dependency_type': self._edge_type(node.concrete_target, edge, dep_tgt), 'products_used': len(edge.products_used), 'products_used_ratio': self._used_ratio(dep_tgt, edge), 'aliases': [alias.address.spec for alias in aliases], } for node in self._nodes.values(): res_dict[node.concrete_target.address.spec] = { 'cost': self._cost(node.concrete_target), 'cost_transitive': self._trans_cost(node.concrete_target), 'products_total': node.products_total, 'dependencies': [gen_dep_edge(node, edge, dep_tgt, node.dep_aliases.get(dep_tgt, {})) for dep_tgt, edge in node.dep_edges.items()], } yield str(json.dumps(res_dict, indent=2, sort_keys=True))
[ "def", "to_json", "(", "self", ")", ":", "res_dict", "=", "{", "}", "def", "gen_dep_edge", "(", "node", ",", "edge", ",", "dep_tgt", ",", "aliases", ")", ":", "return", "{", "'target'", ":", "dep_tgt", ".", "address", ".", "spec", ",", "'dependency_type'", ":", "self", ".", "_edge_type", "(", "node", ".", "concrete_target", ",", "edge", ",", "dep_tgt", ")", ",", "'products_used'", ":", "len", "(", "edge", ".", "products_used", ")", ",", "'products_used_ratio'", ":", "self", ".", "_used_ratio", "(", "dep_tgt", ",", "edge", ")", ",", "'aliases'", ":", "[", "alias", ".", "address", ".", "spec", "for", "alias", "in", "aliases", "]", ",", "}", "for", "node", "in", "self", ".", "_nodes", ".", "values", "(", ")", ":", "res_dict", "[", "node", ".", "concrete_target", ".", "address", ".", "spec", "]", "=", "{", "'cost'", ":", "self", ".", "_cost", "(", "node", ".", "concrete_target", ")", ",", "'cost_transitive'", ":", "self", ".", "_trans_cost", "(", "node", ".", "concrete_target", ")", ",", "'products_total'", ":", "node", ".", "products_total", ",", "'dependencies'", ":", "[", "gen_dep_edge", "(", "node", ",", "edge", ",", "dep_tgt", ",", "node", ".", "dep_aliases", ".", "get", "(", "dep_tgt", ",", "{", "}", ")", ")", "for", "dep_tgt", ",", "edge", "in", "node", ".", "dep_edges", ".", "items", "(", ")", "]", ",", "}", "yield", "str", "(", "json", ".", "dumps", "(", "res_dict", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")", ")" ]
Outputs the entire graph.
[ "Outputs", "the", "entire", "graph", "." ]
python
train
41.636364
Kortemme-Lab/klab
klab/db/mysql.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/mysql.py#L327-L347
def run_transaction(self, command_list, do_commit=True): '''This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.''' pass # I decided against creating this for now. # It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure # in the DDGadmin project and then use callproc for c in command_list: if c.find(";") != -1 or c.find("\\G") != -1: # Catches *some* injections raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c) if do_commit: sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n".join(command_list) else: sql = "START TRANSACTION;\n%s;" % "\n".join(command_list) #print(sql) return
[ "def", "run_transaction", "(", "self", ",", "command_list", ",", "do_commit", "=", "True", ")", ":", "pass", "# I decided against creating this for now.", "# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure", "# in the DDGadmin project and then use callproc", "for", "c", "in", "command_list", ":", "if", "c", ".", "find", "(", "\";\"", ")", "!=", "-", "1", "or", "c", ".", "find", "(", "\"\\\\G\"", ")", "!=", "-", "1", ":", "# Catches *some* injections", "raise", "Exception", "(", "\"The SQL command '%s' contains a semi-colon or \\\\G. This is a potential SQL injection.\"", "%", "c", ")", "if", "do_commit", ":", "sql", "=", "\"START TRANSACTION;\\n%s;\\nCOMMIT\"", "%", "\"\\n\"", ".", "join", "(", "command_list", ")", "else", ":", "sql", "=", "\"START TRANSACTION;\\n%s;\"", "%", "\"\\n\"", ".", "join", "(", "command_list", ")", "#print(sql)", "return" ]
This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.
[ "This", "can", "be", "used", "to", "stage", "multiple", "commands", "and", "roll", "back", "the", "transaction", "if", "an", "error", "occurs", ".", "This", "is", "useful", "if", "you", "want", "to", "remove", "multiple", "records", "in", "multiple", "tables", "for", "one", "entity", "but", "do", "not", "want", "the", "deletion", "to", "occur", "if", "the", "entity", "is", "tied", "to", "table", "not", "specified", "in", "the", "list", "of", "commands", ".", "Performing", "this", "as", "a", "transaction", "avoids", "the", "situation", "where", "the", "records", "are", "partially", "removed", ".", "If", "do_commit", "is", "false", "the", "entire", "transaction", "is", "cancelled", "." ]
python
train
59.285714
koordinates/python-client
koordinates/base.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/base.py#L492-L505
def _serialize_value(self, value): """ Called by :py:meth:`._serialize` to serialise an individual value. """ if isinstance(value, (list, tuple, set)): return [self._serialize_value(v) for v in value] elif isinstance(value, dict): return dict([(k, self._serialize_value(v)) for k, v in value.items()]) elif isinstance(value, ModelBase): return value._serialize() elif isinstance(value, datetime.date): # includes datetime.datetime return value.isoformat() else: return value
[ "def", "_serialize_value", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "return", "[", "self", ".", "_serialize_value", "(", "v", ")", "for", "v", "in", "value", "]", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "dict", "(", "[", "(", "k", ",", "self", ".", "_serialize_value", "(", "v", ")", ")", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", "]", ")", "elif", "isinstance", "(", "value", ",", "ModelBase", ")", ":", "return", "value", ".", "_serialize", "(", ")", "elif", "isinstance", "(", "value", ",", "datetime", ".", "date", ")", ":", "# includes datetime.datetime", "return", "value", ".", "isoformat", "(", ")", "else", ":", "return", "value" ]
Called by :py:meth:`._serialize` to serialise an individual value.
[ "Called", "by", ":", "py", ":", "meth", ":", ".", "_serialize", "to", "serialise", "an", "individual", "value", "." ]
python
train
41.857143
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L7711-L7732
def latcyl(radius, lon, lat): """ Convert from latitudinal coordinates to cylindrical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latcyl_c.html :param radius: Distance of a point from the origin. :type radius: :param lon: Angle of the point from the XZ plane in radians. :param lat: Angle of the point from the XY plane in radians. :return: (r, lonc, z) :rtype: tuple """ radius = ctypes.c_double(radius) lon = ctypes.c_double(lon) lat = ctypes.c_double(lat) r = ctypes.c_double() lonc = ctypes.c_double() z = ctypes.c_double() libspice.latcyl_c(radius, lon, lat, ctypes.byref(r), ctypes.byref(lonc), ctypes.byref(z)) return r.value, lonc.value, z.value
[ "def", "latcyl", "(", "radius", ",", "lon", ",", "lat", ")", ":", "radius", "=", "ctypes", ".", "c_double", "(", "radius", ")", "lon", "=", "ctypes", ".", "c_double", "(", "lon", ")", "lat", "=", "ctypes", ".", "c_double", "(", "lat", ")", "r", "=", "ctypes", ".", "c_double", "(", ")", "lonc", "=", "ctypes", ".", "c_double", "(", ")", "z", "=", "ctypes", ".", "c_double", "(", ")", "libspice", ".", "latcyl_c", "(", "radius", ",", "lon", ",", "lat", ",", "ctypes", ".", "byref", "(", "r", ")", ",", "ctypes", ".", "byref", "(", "lonc", ")", ",", "ctypes", ".", "byref", "(", "z", ")", ")", "return", "r", ".", "value", ",", "lonc", ".", "value", ",", "z", ".", "value" ]
Convert from latitudinal coordinates to cylindrical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latcyl_c.html :param radius: Distance of a point from the origin. :type radius: :param lon: Angle of the point from the XZ plane in radians. :param lat: Angle of the point from the XY plane in radians. :return: (r, lonc, z) :rtype: tuple
[ "Convert", "from", "latitudinal", "coordinates", "to", "cylindrical", "coordinates", "." ]
python
train
34.227273
mabuchilab/QNET
src/qnet/algebra/_rules.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/_rules.py#L965-L971
def _pull_out_perm_rhs(rest, rhs, out_port, in_port): """Similar to :func:`_pull_out_perm_lhs` but on the RHS of a series product self-feedback.""" in_im, rhs_red = rhs._factor_rhs(in_port) return (Feedback.create( SeriesProduct.create(*rest), out_port=out_port, in_port=in_im) << rhs_red)
[ "def", "_pull_out_perm_rhs", "(", "rest", ",", "rhs", ",", "out_port", ",", "in_port", ")", ":", "in_im", ",", "rhs_red", "=", "rhs", ".", "_factor_rhs", "(", "in_port", ")", "return", "(", "Feedback", ".", "create", "(", "SeriesProduct", ".", "create", "(", "*", "rest", ")", ",", "out_port", "=", "out_port", ",", "in_port", "=", "in_im", ")", "<<", "rhs_red", ")" ]
Similar to :func:`_pull_out_perm_lhs` but on the RHS of a series product self-feedback.
[ "Similar", "to", ":", "func", ":", "_pull_out_perm_lhs", "but", "on", "the", "RHS", "of", "a", "series", "product", "self", "-", "feedback", "." ]
python
train
47.285714
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L4558-L4572
def postinit(self, value, conversion=None, format_spec=None): """Do some setup after initialisation. :param value: The value to be formatted into the string. :type value: NodeNG :param conversion: The type of formatting to be applied to the value. :type conversion: int or None :param format_spec: The formatting to be applied to the value. :type format_spec: JoinedStr or None """ self.value = value self.conversion = conversion self.format_spec = format_spec
[ "def", "postinit", "(", "self", ",", "value", ",", "conversion", "=", "None", ",", "format_spec", "=", "None", ")", ":", "self", ".", "value", "=", "value", "self", ".", "conversion", "=", "conversion", "self", ".", "format_spec", "=", "format_spec" ]
Do some setup after initialisation. :param value: The value to be formatted into the string. :type value: NodeNG :param conversion: The type of formatting to be applied to the value. :type conversion: int or None :param format_spec: The formatting to be applied to the value. :type format_spec: JoinedStr or None
[ "Do", "some", "setup", "after", "initialisation", "." ]
python
train
35.8
assamite/creamas
creamas/examples/spiro/spiro.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro.py#L39-L53
def spiro_image(R, r, r_, resolution=2*PI/1000, spins=50, size=[32, 32]): '''Create image with given Spirograph parameters using numpy and scipy. ''' x, y = give_dots(200, r, r_, spins=20) xy = np.array([x, y]).T xy = np.array(np.around(xy), dtype=np.int64) xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) & (xy[:, 0] < 250) & (xy[:, 1] < 250)] xy = xy + 250 img = np.ones([500, 500], dtype=np.uint8) img[:] = 255 img[xy[:, 0], xy[:, 1]] = 0 img = misc.imresize(img, size) fimg = img / 255.0 return fimg
[ "def", "spiro_image", "(", "R", ",", "r", ",", "r_", ",", "resolution", "=", "2", "*", "PI", "/", "1000", ",", "spins", "=", "50", ",", "size", "=", "[", "32", ",", "32", "]", ")", ":", "x", ",", "y", "=", "give_dots", "(", "200", ",", "r", ",", "r_", ",", "spins", "=", "20", ")", "xy", "=", "np", ".", "array", "(", "[", "x", ",", "y", "]", ")", ".", "T", "xy", "=", "np", ".", "array", "(", "np", ".", "around", "(", "xy", ")", ",", "dtype", "=", "np", ".", "int64", ")", "xy", "=", "xy", "[", "(", "xy", "[", ":", ",", "0", "]", ">=", "-", "250", ")", "&", "(", "xy", "[", ":", ",", "1", "]", ">=", "-", "250", ")", "&", "(", "xy", "[", ":", ",", "0", "]", "<", "250", ")", "&", "(", "xy", "[", ":", ",", "1", "]", "<", "250", ")", "]", "xy", "=", "xy", "+", "250", "img", "=", "np", ".", "ones", "(", "[", "500", ",", "500", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "img", "[", ":", "]", "=", "255", "img", "[", "xy", "[", ":", ",", "0", "]", ",", "xy", "[", ":", ",", "1", "]", "]", "=", "0", "img", "=", "misc", ".", "imresize", "(", "img", ",", "size", ")", "fimg", "=", "img", "/", "255.0", "return", "fimg" ]
Create image with given Spirograph parameters using numpy and scipy.
[ "Create", "image", "with", "given", "Spirograph", "parameters", "using", "numpy", "and", "scipy", "." ]
python
train
36.866667
scopely-devops/skew
skew/arn/__init__.py
https://github.com/scopely-devops/skew/blob/e90d5e2220b2284502a06430bb94b4aba9ea60db/skew/arn/__init__.py#L53-L71
def match(self, pattern, context=None): """ This method returns a (possibly empty) list of strings that match the regular expression ``pattern`` provided. You can also provide a ``context`` as described above. This method calls ``choices`` to get a list of all possible choices and then filters the list by performing a regular expression search on each choice using the supplied ``pattern``. """ matches = [] regex = pattern if regex == '*': regex = '.*' regex = re.compile(regex) for choice in self.choices(context): if regex.search(choice): matches.append(choice) return matches
[ "def", "match", "(", "self", ",", "pattern", ",", "context", "=", "None", ")", ":", "matches", "=", "[", "]", "regex", "=", "pattern", "if", "regex", "==", "'*'", ":", "regex", "=", "'.*'", "regex", "=", "re", ".", "compile", "(", "regex", ")", "for", "choice", "in", "self", ".", "choices", "(", "context", ")", ":", "if", "regex", ".", "search", "(", "choice", ")", ":", "matches", ".", "append", "(", "choice", ")", "return", "matches" ]
This method returns a (possibly empty) list of strings that match the regular expression ``pattern`` provided. You can also provide a ``context`` as described above. This method calls ``choices`` to get a list of all possible choices and then filters the list by performing a regular expression search on each choice using the supplied ``pattern``.
[ "This", "method", "returns", "a", "(", "possibly", "empty", ")", "list", "of", "strings", "that", "match", "the", "regular", "expression", "pattern", "provided", ".", "You", "can", "also", "provide", "a", "context", "as", "described", "above", "." ]
python
train
37.736842
developersociety/django-glitter
glitter/reminders/admin.py
https://github.com/developersociety/django-glitter/blob/2c0280ec83afee80deee94ee3934fc54239c2e87/glitter/reminders/admin.py#L15-L20
def get_formset(self, request, obj=None, **kwargs): """ Default user to the current version owner. """ data = super().get_formset(request, obj, **kwargs) if obj: data.form.base_fields['user'].initial = request.user.id return data
[ "def", "get_formset", "(", "self", ",", "request", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "super", "(", ")", ".", "get_formset", "(", "request", ",", "obj", ",", "*", "*", "kwargs", ")", "if", "obj", ":", "data", ".", "form", ".", "base_fields", "[", "'user'", "]", ".", "initial", "=", "request", ".", "user", ".", "id", "return", "data" ]
Default user to the current version owner.
[ "Default", "user", "to", "the", "current", "version", "owner", "." ]
python
train
44.666667
mpenning/polymer
polymer/Polymer.py
https://github.com/mpenning/polymer/blob/1cdf4ed2573c894bde9d398fa173816b6b47e9f3/polymer/Polymer.py#L73-L161
def r_q_send(self, msg_dict): """Send message dicts through r_q, and throw explicit errors for pickle problems""" # Check whether msg_dict can be pickled... no_pickle_keys = self.invalid_dict_pickle_keys(msg_dict) if no_pickle_keys == []: self.r_q.put(msg_dict) else: ## Explicit pickle error handling hash_func = md5() hash_func.update(str(msg_dict)) dict_hash = str(hash_func.hexdigest())[-7:] # Last 7 digits of hash linesep = os.linesep sys.stderr.write( "{0} {1}r_q_send({2}) Can't pickle this dict:{3} '''{7}{4} {5}{7}{6}''' {7}".format( datetime.now(), Style.BRIGHT, dict_hash, Style.RESET_ALL, Fore.MAGENTA, msg_dict, Style.RESET_ALL, linesep, ) ) ## Verbose list of the offending key(s) / object attrs ## Send all output to stderr... err_frag1 = ( Style.BRIGHT + " r_q_send({0}) Offending dict keys:".format(dict_hash) + Style.RESET_ALL ) err_frag2 = Fore.YELLOW + " {0}".format(no_pickle_keys) + Style.RESET_ALL err_frag3 = "{0}".format(linesep) sys.stderr.write(err_frag1 + err_frag2 + err_frag3) for key in sorted(no_pickle_keys): sys.stderr.write( " msg_dict['{0}']: {1}'{2}'{3}{4}".format( key, Fore.MAGENTA, repr(msg_dict.get(key)), Style.RESET_ALL, linesep, ) ) if isinstance(msg_dict.get(key), object): thisobj = msg_dict.get(key) no_pickle_attrs = self.invalid_obj_pickle_attrs(thisobj) err_frag1 = ( Style.BRIGHT + " r_q_send({0}) Offending attrs:".format(dict_hash) + Style.RESET_ALL ) err_frag2 = ( Fore.YELLOW + " {0}".format(no_pickle_attrs) + Style.RESET_ALL ) err_frag3 = "{0}".format(linesep) sys.stderr.write(err_frag1 + err_frag2 + err_frag3) for attr in no_pickle_attrs: sys.stderr.write( " msg_dict['{0}'].{1}: {2}'{3}'{4}{5}".format( key, attr, Fore.RED, repr(getattr(thisobj, attr)), Style.RESET_ALL, linesep, ) ) sys.stderr.write( " {0}r_q_send({1}) keys (no problems):{2}{3}".format( Style.BRIGHT, dict_hash, Style.RESET_ALL, linesep ) ) for key in sorted(set(msg_dict.keys()).difference(no_pickle_keys)): sys.stderr.write( " msg_dict['{0}']: {1}{2}{3}{4}".format( key, Fore.GREEN, repr(msg_dict.get(key)), Style.RESET_ALL, linesep, ) )
[ "def", "r_q_send", "(", "self", ",", "msg_dict", ")", ":", "# Check whether msg_dict can be pickled...", "no_pickle_keys", "=", "self", ".", "invalid_dict_pickle_keys", "(", "msg_dict", ")", "if", "no_pickle_keys", "==", "[", "]", ":", "self", ".", "r_q", ".", "put", "(", "msg_dict", ")", "else", ":", "## Explicit pickle error handling", "hash_func", "=", "md5", "(", ")", "hash_func", ".", "update", "(", "str", "(", "msg_dict", ")", ")", "dict_hash", "=", "str", "(", "hash_func", ".", "hexdigest", "(", ")", ")", "[", "-", "7", ":", "]", "# Last 7 digits of hash", "linesep", "=", "os", ".", "linesep", "sys", ".", "stderr", ".", "write", "(", "\"{0} {1}r_q_send({2}) Can't pickle this dict:{3} '''{7}{4} {5}{7}{6}''' {7}\"", ".", "format", "(", "datetime", ".", "now", "(", ")", ",", "Style", ".", "BRIGHT", ",", "dict_hash", ",", "Style", ".", "RESET_ALL", ",", "Fore", ".", "MAGENTA", ",", "msg_dict", ",", "Style", ".", "RESET_ALL", ",", "linesep", ",", ")", ")", "## Verbose list of the offending key(s) / object attrs", "## Send all output to stderr...", "err_frag1", "=", "(", "Style", ".", "BRIGHT", "+", "\" r_q_send({0}) Offending dict keys:\"", ".", "format", "(", "dict_hash", ")", "+", "Style", ".", "RESET_ALL", ")", "err_frag2", "=", "Fore", ".", "YELLOW", "+", "\" {0}\"", ".", "format", "(", "no_pickle_keys", ")", "+", "Style", ".", "RESET_ALL", "err_frag3", "=", "\"{0}\"", ".", "format", "(", "linesep", ")", "sys", ".", "stderr", ".", "write", "(", "err_frag1", "+", "err_frag2", "+", "err_frag3", ")", "for", "key", "in", "sorted", "(", "no_pickle_keys", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\" msg_dict['{0}']: {1}'{2}'{3}{4}\"", ".", "format", "(", "key", ",", "Fore", ".", "MAGENTA", ",", "repr", "(", "msg_dict", ".", "get", "(", "key", ")", ")", ",", "Style", ".", "RESET_ALL", ",", "linesep", ",", ")", ")", "if", "isinstance", "(", "msg_dict", ".", "get", "(", "key", ")", ",", "object", ")", ":", "thisobj", "=", "msg_dict", ".", "get", "(", "key", ")", "no_pickle_attrs", "=", "self", ".", "invalid_obj_pickle_attrs", "(", "thisobj", ")", "err_frag1", "=", "(", "Style", ".", "BRIGHT", "+", "\" r_q_send({0}) Offending attrs:\"", ".", "format", "(", "dict_hash", ")", "+", "Style", ".", "RESET_ALL", ")", "err_frag2", "=", "(", "Fore", ".", "YELLOW", "+", "\" {0}\"", ".", "format", "(", "no_pickle_attrs", ")", "+", "Style", ".", "RESET_ALL", ")", "err_frag3", "=", "\"{0}\"", ".", "format", "(", "linesep", ")", "sys", ".", "stderr", ".", "write", "(", "err_frag1", "+", "err_frag2", "+", "err_frag3", ")", "for", "attr", "in", "no_pickle_attrs", ":", "sys", ".", "stderr", ".", "write", "(", "\" msg_dict['{0}'].{1}: {2}'{3}'{4}{5}\"", ".", "format", "(", "key", ",", "attr", ",", "Fore", ".", "RED", ",", "repr", "(", "getattr", "(", "thisobj", ",", "attr", ")", ")", ",", "Style", ".", "RESET_ALL", ",", "linesep", ",", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\" {0}r_q_send({1}) keys (no problems):{2}{3}\"", ".", "format", "(", "Style", ".", "BRIGHT", ",", "dict_hash", ",", "Style", ".", "RESET_ALL", ",", "linesep", ")", ")", "for", "key", "in", "sorted", "(", "set", "(", "msg_dict", ".", "keys", "(", ")", ")", ".", "difference", "(", "no_pickle_keys", ")", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\" msg_dict['{0}']: {1}{2}{3}{4}\"", ".", "format", "(", "key", ",", "Fore", ".", "GREEN", ",", "repr", "(", "msg_dict", ".", "get", "(", "key", ")", ")", ",", "Style", ".", "RESET_ALL", ",", "linesep", ",", ")", ")" ]
Send message dicts through r_q, and throw explicit errors for pickle problems
[ "Send", "message", "dicts", "through", "r_q", "and", "throw", "explicit", "errors", "for", "pickle", "problems" ]
python
test
39.988764
srittau/python-asserts
asserts/__init__.py
https://github.com/srittau/python-asserts/blob/1d5c797031c68ee27552d1c94e7f918c3d3d0453/asserts/__init__.py#L177-L194
def assert_not_equal(first, second, msg_fmt="{msg}"): """Fail if first equals second, as determined by the '==' operator. >>> assert_not_equal(5, 8) >>> assert_not_equal(-7, -7.0) Traceback (most recent call last): ... AssertionError: -7 == -7.0 The following msg_fmt arguments are supported: * msg - the default error message * first - the first argument * second - the second argument """ if first == second: msg = "{!r} == {!r}".format(first, second) fail(msg_fmt.format(msg=msg, first=first, second=second))
[ "def", "assert_not_equal", "(", "first", ",", "second", ",", "msg_fmt", "=", "\"{msg}\"", ")", ":", "if", "first", "==", "second", ":", "msg", "=", "\"{!r} == {!r}\"", ".", "format", "(", "first", ",", "second", ")", "fail", "(", "msg_fmt", ".", "format", "(", "msg", "=", "msg", ",", "first", "=", "first", ",", "second", "=", "second", ")", ")" ]
Fail if first equals second, as determined by the '==' operator. >>> assert_not_equal(5, 8) >>> assert_not_equal(-7, -7.0) Traceback (most recent call last): ... AssertionError: -7 == -7.0 The following msg_fmt arguments are supported: * msg - the default error message * first - the first argument * second - the second argument
[ "Fail", "if", "first", "equals", "second", "as", "determined", "by", "the", "==", "operator", "." ]
python
train
31.388889
mfcloud/python-zvm-sdk
zvmsdk/smtclient.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/smtclient.py#L374-L384
def guest_stop(self, userid, **kwargs): """Power off VM.""" requestData = "PowerVM " + userid + " off" if 'timeout' in kwargs.keys() and kwargs['timeout']: requestData += ' --maxwait ' + str(kwargs['timeout']) if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']: requestData += ' --poll ' + str(kwargs['poll_interval']) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData)
[ "def", "guest_stop", "(", "self", ",", "userid", ",", "*", "*", "kwargs", ")", ":", "requestData", "=", "\"PowerVM \"", "+", "userid", "+", "\" off\"", "if", "'timeout'", "in", "kwargs", ".", "keys", "(", ")", "and", "kwargs", "[", "'timeout'", "]", ":", "requestData", "+=", "' --maxwait '", "+", "str", "(", "kwargs", "[", "'timeout'", "]", ")", "if", "'poll_interval'", "in", "kwargs", ".", "keys", "(", ")", "and", "kwargs", "[", "'poll_interval'", "]", ":", "requestData", "+=", "' --poll '", "+", "str", "(", "kwargs", "[", "'poll_interval'", "]", ")", "with", "zvmutils", ".", "log_and_reraise_smt_request_failed", "(", ")", ":", "self", ".", "_request", "(", "requestData", ")" ]
Power off VM.
[ "Power", "off", "VM", "." ]
python
train
43.454545
coinkite/connectrum
connectrum/svr_info.py
https://github.com/coinkite/connectrum/blob/99948f92cc5c3ecb1a8a70146294014e608e50fc/connectrum/svr_info.py#L180-L201
def from_irc(self, irc_nickname=None, irc_password=None): ''' Connect to the IRC channel and find all servers presently connected. Slow; takes 30+ seconds but authoritative and current. OBSOLETE. ''' if have_bottom: from .findall import IrcListener # connect and fetch current set of servers who are # on #electrum channel at freenode bot = IrcListener(irc_nickname=irc_nickname, irc_password=irc_password) results = bot.loop.run_until_complete(bot.collect_data()) bot.loop.close() # merge by nick name self.update(results) else: return(False)
[ "def", "from_irc", "(", "self", ",", "irc_nickname", "=", "None", ",", "irc_password", "=", "None", ")", ":", "if", "have_bottom", ":", "from", ".", "findall", "import", "IrcListener", "# connect and fetch current set of servers who are", "# on #electrum channel at freenode", "bot", "=", "IrcListener", "(", "irc_nickname", "=", "irc_nickname", ",", "irc_password", "=", "irc_password", ")", "results", "=", "bot", ".", "loop", ".", "run_until_complete", "(", "bot", ".", "collect_data", "(", ")", ")", "bot", ".", "loop", ".", "close", "(", ")", "# merge by nick name", "self", ".", "update", "(", "results", ")", "else", ":", "return", "(", "False", ")" ]
Connect to the IRC channel and find all servers presently connected. Slow; takes 30+ seconds but authoritative and current. OBSOLETE.
[ "Connect", "to", "the", "IRC", "channel", "and", "find", "all", "servers", "presently", "connected", "." ]
python
train
31.954545
django-fluent/django-fluent-contents
fluent_contents/rendering/media.py
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/rendering/media.py#L6-L14
def register_frontend_media(request, media): """ Add a :class:`~django.forms.Media` class to the current request. This will be rendered by the ``render_plugin_media`` template tag. """ if not hasattr(request, '_fluent_contents_frontend_media'): request._fluent_contents_frontend_media = Media() add_media(request._fluent_contents_frontend_media, media)
[ "def", "register_frontend_media", "(", "request", ",", "media", ")", ":", "if", "not", "hasattr", "(", "request", ",", "'_fluent_contents_frontend_media'", ")", ":", "request", ".", "_fluent_contents_frontend_media", "=", "Media", "(", ")", "add_media", "(", "request", ".", "_fluent_contents_frontend_media", ",", "media", ")" ]
Add a :class:`~django.forms.Media` class to the current request. This will be rendered by the ``render_plugin_media`` template tag.
[ "Add", "a", ":", "class", ":", "~django", ".", "forms", ".", "Media", "class", "to", "the", "current", "request", ".", "This", "will", "be", "rendered", "by", "the", "render_plugin_media", "template", "tag", "." ]
python
train
41.888889
jobovy/galpy
galpy/df/streamdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L2606-L2639
def _parse_call_args(self,*args,**kwargs): """Helper function to parse the arguments to the __call__ and related functions, return [6,nobj] array of frequencies (:3) and angles (3:)""" interp= kwargs.get('interp',self._useInterp) if len(args) == 5: raise IOError("Must specify phi for streamdf") elif len(args) == 6: if kwargs.get('aAInput',False): if isinstance(args[0],(int,float,numpy.float32,numpy.float64)): out= numpy.empty((6,1)) else: out= numpy.empty((6,len(args[0]))) for ii in range(6): out[ii,:]= args[ii] return out else: return self._approxaA(*args,interp=interp) elif isinstance(args[0],Orbit): o= args[0] return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(), interp=interp) elif isinstance(args[0],list) and isinstance(args[0][0],Orbit): R, vR, vT, z, vz, phi= [], [], [], [], [], [] for o in args[0]: R.append(o.R()) vR.append(o.vR()) vT.append(o.vT()) z.append(o.z()) vz.append(o.vz()) phi.append(o.phi()) return self._approxaA(numpy.array(R),numpy.array(vR), numpy.array(vT),numpy.array(z), numpy.array(vz),numpy.array(phi), interp=interp)
[ "def", "_parse_call_args", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interp", "=", "kwargs", ".", "get", "(", "'interp'", ",", "self", ".", "_useInterp", ")", "if", "len", "(", "args", ")", "==", "5", ":", "raise", "IOError", "(", "\"Must specify phi for streamdf\"", ")", "elif", "len", "(", "args", ")", "==", "6", ":", "if", "kwargs", ".", "get", "(", "'aAInput'", ",", "False", ")", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "(", "int", ",", "float", ",", "numpy", ".", "float32", ",", "numpy", ".", "float64", ")", ")", ":", "out", "=", "numpy", ".", "empty", "(", "(", "6", ",", "1", ")", ")", "else", ":", "out", "=", "numpy", ".", "empty", "(", "(", "6", ",", "len", "(", "args", "[", "0", "]", ")", ")", ")", "for", "ii", "in", "range", "(", "6", ")", ":", "out", "[", "ii", ",", ":", "]", "=", "args", "[", "ii", "]", "return", "out", "else", ":", "return", "self", ".", "_approxaA", "(", "*", "args", ",", "interp", "=", "interp", ")", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "Orbit", ")", ":", "o", "=", "args", "[", "0", "]", "return", "self", ".", "_approxaA", "(", "o", ".", "R", "(", ")", ",", "o", ".", "vR", "(", ")", ",", "o", ".", "vT", "(", ")", ",", "o", ".", "z", "(", ")", ",", "o", ".", "vz", "(", ")", ",", "o", ".", "phi", "(", ")", ",", "interp", "=", "interp", ")", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", "and", "isinstance", "(", "args", "[", "0", "]", "[", "0", "]", ",", "Orbit", ")", ":", "R", ",", "vR", ",", "vT", ",", "z", ",", "vz", ",", "phi", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "for", "o", "in", "args", "[", "0", "]", ":", "R", ".", "append", "(", "o", ".", "R", "(", ")", ")", "vR", ".", "append", "(", "o", ".", "vR", "(", ")", ")", "vT", ".", "append", "(", "o", ".", "vT", "(", ")", ")", "z", ".", "append", "(", "o", ".", "z", "(", ")", ")", "vz", ".", "append", "(", "o", ".", "vz", "(", ")", ")", "phi", ".", "append", "(", "o", ".", "phi", "(", ")", ")", "return", "self", ".", "_approxaA", "(", "numpy", ".", "array", "(", "R", ")", ",", "numpy", ".", "array", "(", "vR", ")", ",", "numpy", ".", "array", "(", "vT", ")", ",", "numpy", ".", "array", "(", "z", ")", ",", "numpy", ".", "array", "(", "vz", ")", ",", "numpy", ".", "array", "(", "phi", ")", ",", "interp", "=", "interp", ")" ]
Helper function to parse the arguments to the __call__ and related functions, return [6,nobj] array of frequencies (:3) and angles (3:)
[ "Helper", "function", "to", "parse", "the", "arguments", "to", "the", "__call__", "and", "related", "functions", "return", "[", "6", "nobj", "]", "array", "of", "frequencies", "(", ":", "3", ")", "and", "angles", "(", "3", ":", ")" ]
python
train
45.852941
horazont/aioxmpp
aioxmpp/pubsub/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/pubsub/service.py#L985-L1020
def change_node_subscriptions(self, jid, node, subscriptions_to_set): """ Update the subscriptions at a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the node to modify :type node: :class:`str` :param subscriptions_to_set: The subscriptions to set at the node. :type subscriptions_to_set: :class:`~collections.abc.Iterable` of tuples consisting of the JID to (un)subscribe and the subscription level to use. :raises aioxmpp.errors.XMPPError: as returned by the service `subscriptions_to_set` must be an iterable of pairs (`jid`, `subscription`), where the `jid` indicates the JID for which the `subscription` is to be set. """ iq = aioxmpp.stanza.IQ( type_=aioxmpp.structs.IQType.SET, to=jid, payload=pubsub_xso.OwnerRequest( pubsub_xso.OwnerSubscriptions( node, subscriptions=[ pubsub_xso.OwnerSubscription( jid, subscription ) for jid, subscription in subscriptions_to_set ] ) ) ) yield from self.client.send(iq)
[ "def", "change_node_subscriptions", "(", "self", ",", "jid", ",", "node", ",", "subscriptions_to_set", ")", ":", "iq", "=", "aioxmpp", ".", "stanza", ".", "IQ", "(", "type_", "=", "aioxmpp", ".", "structs", ".", "IQType", ".", "SET", ",", "to", "=", "jid", ",", "payload", "=", "pubsub_xso", ".", "OwnerRequest", "(", "pubsub_xso", ".", "OwnerSubscriptions", "(", "node", ",", "subscriptions", "=", "[", "pubsub_xso", ".", "OwnerSubscription", "(", "jid", ",", "subscription", ")", "for", "jid", ",", "subscription", "in", "subscriptions_to_set", "]", ")", ")", ")", "yield", "from", "self", ".", "client", ".", "send", "(", "iq", ")" ]
Update the subscriptions at a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the node to modify :type node: :class:`str` :param subscriptions_to_set: The subscriptions to set at the node. :type subscriptions_to_set: :class:`~collections.abc.Iterable` of tuples consisting of the JID to (un)subscribe and the subscription level to use. :raises aioxmpp.errors.XMPPError: as returned by the service `subscriptions_to_set` must be an iterable of pairs (`jid`, `subscription`), where the `jid` indicates the JID for which the `subscription` is to be set.
[ "Update", "the", "subscriptions", "at", "a", "node", "." ]
python
train
37.722222
yahoo/TensorFlowOnSpark
examples/imagenet/inception/image_processing.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/image_processing.py#L304-L336
def image_preprocessing(image_buffer, bbox, train, thread_id=0): """Decode and preprocess one image for evaluation or training. Args: image_buffer: JPEG encoded string Tensor bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. train: boolean thread_id: integer indicating preprocessing thread Returns: 3-D float Tensor containing an appropriately scaled image Raises: ValueError: if user does not provide bounding box """ if bbox is None: raise ValueError('Please supply a bounding box.') image = decode_jpeg(image_buffer) height = FLAGS.image_size width = FLAGS.image_size if train: image = distort_image(image, height, width, bbox, thread_id) else: image = eval_image(image, height, width) # Finally, rescale to [-1,1] instead of [0, 1) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image
[ "def", "image_preprocessing", "(", "image_buffer", ",", "bbox", ",", "train", ",", "thread_id", "=", "0", ")", ":", "if", "bbox", "is", "None", ":", "raise", "ValueError", "(", "'Please supply a bounding box.'", ")", "image", "=", "decode_jpeg", "(", "image_buffer", ")", "height", "=", "FLAGS", ".", "image_size", "width", "=", "FLAGS", ".", "image_size", "if", "train", ":", "image", "=", "distort_image", "(", "image", ",", "height", ",", "width", ",", "bbox", ",", "thread_id", ")", "else", ":", "image", "=", "eval_image", "(", "image", ",", "height", ",", "width", ")", "# Finally, rescale to [-1,1] instead of [0, 1)", "image", "=", "tf", ".", "subtract", "(", "image", ",", "0.5", ")", "image", "=", "tf", ".", "multiply", "(", "image", ",", "2.0", ")", "return", "image" ]
Decode and preprocess one image for evaluation or training. Args: image_buffer: JPEG encoded string Tensor bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. train: boolean thread_id: integer indicating preprocessing thread Returns: 3-D float Tensor containing an appropriately scaled image Raises: ValueError: if user does not provide bounding box
[ "Decode", "and", "preprocess", "one", "image", "for", "evaluation", "or", "training", "." ]
python
train
29.757576
dArignac/pelican-extended-sitemap
extended_sitemap/__init__.py
https://github.com/dArignac/pelican-extended-sitemap/blob/1cf7746c071c303db3b321955a91b3a78d9585f8/extended_sitemap/__init__.py#L218-L251
def __create_url_node_for_content(self, content, content_type, url=None, modification_time=None): """ Creates the required <url> node for the sitemap xml. :param content: the content class to handle :type content: pelican.contents.Content | None :param content_type: the type of the given content to match settings.EXTENDED_SITEMAP_PLUGIN :type content_type; str :param url; if given, the URL to use instead of the url of the content instance :type url: str :param modification_time: the modification time of the url, will be used instead of content date if given :type modification_time: datetime.datetime | None :returns: the text node :rtype: str """ loc = url if loc is None: loc = urljoin(self.url_site, self.context.get('ARTICLE_URL').format(**content.url_format)) lastmod = None if modification_time is not None: lastmod = modification_time.strftime('%Y-%m-%d') else: if content is not None: if getattr(content, 'modified', None) is not None: lastmod = getattr(content, 'modified').strftime('%Y-%m-%d') elif getattr(content, 'date', None) is not None: lastmod = getattr(content, 'date').strftime('%Y-%m-%d') output = "<loc>{}</loc>".format(loc) if lastmod is not None: output += "\n<lastmod>{}</lastmod>".format(lastmod) output += "\n<changefreq>{}</changefreq>".format(self.settings.get('changefrequencies').get(content_type)) output += "\n<priority>{:.2f}</priority>".format(self.settings.get('priorities').get(content_type)) return self.template_url.format(output)
[ "def", "__create_url_node_for_content", "(", "self", ",", "content", ",", "content_type", ",", "url", "=", "None", ",", "modification_time", "=", "None", ")", ":", "loc", "=", "url", "if", "loc", "is", "None", ":", "loc", "=", "urljoin", "(", "self", ".", "url_site", ",", "self", ".", "context", ".", "get", "(", "'ARTICLE_URL'", ")", ".", "format", "(", "*", "*", "content", ".", "url_format", ")", ")", "lastmod", "=", "None", "if", "modification_time", "is", "not", "None", ":", "lastmod", "=", "modification_time", ".", "strftime", "(", "'%Y-%m-%d'", ")", "else", ":", "if", "content", "is", "not", "None", ":", "if", "getattr", "(", "content", ",", "'modified'", ",", "None", ")", "is", "not", "None", ":", "lastmod", "=", "getattr", "(", "content", ",", "'modified'", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", "elif", "getattr", "(", "content", ",", "'date'", ",", "None", ")", "is", "not", "None", ":", "lastmod", "=", "getattr", "(", "content", ",", "'date'", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", "output", "=", "\"<loc>{}</loc>\"", ".", "format", "(", "loc", ")", "if", "lastmod", "is", "not", "None", ":", "output", "+=", "\"\\n<lastmod>{}</lastmod>\"", ".", "format", "(", "lastmod", ")", "output", "+=", "\"\\n<changefreq>{}</changefreq>\"", ".", "format", "(", "self", ".", "settings", ".", "get", "(", "'changefrequencies'", ")", ".", "get", "(", "content_type", ")", ")", "output", "+=", "\"\\n<priority>{:.2f}</priority>\"", ".", "format", "(", "self", ".", "settings", ".", "get", "(", "'priorities'", ")", ".", "get", "(", "content_type", ")", ")", "return", "self", ".", "template_url", ".", "format", "(", "output", ")" ]
Creates the required <url> node for the sitemap xml. :param content: the content class to handle :type content: pelican.contents.Content | None :param content_type: the type of the given content to match settings.EXTENDED_SITEMAP_PLUGIN :type content_type; str :param url; if given, the URL to use instead of the url of the content instance :type url: str :param modification_time: the modification time of the url, will be used instead of content date if given :type modification_time: datetime.datetime | None :returns: the text node :rtype: str
[ "Creates", "the", "required", "<url", ">", "node", "for", "the", "sitemap", "xml", ".", ":", "param", "content", ":", "the", "content", "class", "to", "handle", ":", "type", "content", ":", "pelican", ".", "contents", ".", "Content", "|", "None", ":", "param", "content_type", ":", "the", "type", "of", "the", "given", "content", "to", "match", "settings", ".", "EXTENDED_SITEMAP_PLUGIN", ":", "type", "content_type", ";", "str", ":", "param", "url", ";", "if", "given", "the", "URL", "to", "use", "instead", "of", "the", "url", "of", "the", "content", "instance", ":", "type", "url", ":", "str", ":", "param", "modification_time", ":", "the", "modification", "time", "of", "the", "url", "will", "be", "used", "instead", "of", "content", "date", "if", "given", ":", "type", "modification_time", ":", "datetime", ".", "datetime", "|", "None", ":", "returns", ":", "the", "text", "node", ":", "rtype", ":", "str" ]
python
train
51.382353
stain/forgetSQL
lib/forgetSQL.py
https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L608-L629
def _nextSequence(cls, name=None): """Return a new sequence number for insertion in self._sqlTable. Note that if your sequences are not named tablename_primarykey_seq (ie. for table 'blapp' with primary key 'john_id', sequence name blapp_john_id_seq) you must give the full sequence name as an optional argument to _nextSequence) """ if not name: name = cls._sqlSequence if not name: # Assume it's tablename_primarykey_seq if len(cls._sqlPrimary) <> 1: raise "Could not guess sequence name for multi-primary-key" primary = cls._sqlPrimary[0] name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_')) # Don't have . as a tablename or column name! =) curs = cls.cursor() curs.execute("SELECT nextval('%s')" % name) value = curs.fetchone()[0] curs.close() return value
[ "def", "_nextSequence", "(", "cls", ",", "name", "=", "None", ")", ":", "if", "not", "name", ":", "name", "=", "cls", ".", "_sqlSequence", "if", "not", "name", ":", "# Assume it's tablename_primarykey_seq", "if", "len", "(", "cls", ".", "_sqlPrimary", ")", "<>", "1", ":", "raise", "\"Could not guess sequence name for multi-primary-key\"", "primary", "=", "cls", ".", "_sqlPrimary", "[", "0", "]", "name", "=", "'%s_%s_seq'", "%", "(", "cls", ".", "_sqlTable", ",", "primary", ".", "replace", "(", "'.'", ",", "'_'", ")", ")", "# Don't have . as a tablename or column name! =)", "curs", "=", "cls", ".", "cursor", "(", ")", "curs", ".", "execute", "(", "\"SELECT nextval('%s')\"", "%", "name", ")", "value", "=", "curs", ".", "fetchone", "(", ")", "[", "0", "]", "curs", ".", "close", "(", ")", "return", "value" ]
Return a new sequence number for insertion in self._sqlTable. Note that if your sequences are not named tablename_primarykey_seq (ie. for table 'blapp' with primary key 'john_id', sequence name blapp_john_id_seq) you must give the full sequence name as an optional argument to _nextSequence)
[ "Return", "a", "new", "sequence", "number", "for", "insertion", "in", "self", ".", "_sqlTable", "." ]
python
train
42.954545
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py#L263-L311
def from_time( year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None ): """Convenience wrapper to take a series of date/time elements and return a WMI time of the form `yyyymmddHHMMSS.mmmmmm+UUU`. All elements may be int, string or omitted altogether. If omitted, they will be replaced in the output string by a series of stars of the appropriate length. :param year: The year element of the date/time :param month: The month element of the date/time :param day: The day element of the date/time :param hours: The hours element of the date/time :param minutes: The minutes element of the date/time :param seconds: The seconds element of the date/time :param microseconds: The microseconds element of the date/time :param timezone: The timeezone element of the date/time :returns: A WMI datetime string of the form: `yyyymmddHHMMSS.mmmmmm+UUU` """ def str_or_stars(i, length): if i is None: return "*" * length else: return str(i).rjust(length, "0") wmi_time = "" wmi_time += str_or_stars(year, 4) wmi_time += str_or_stars(month, 2) wmi_time += str_or_stars(day, 2) wmi_time += str_or_stars(hours, 2) wmi_time += str_or_stars(minutes, 2) wmi_time += str_or_stars(seconds, 2) wmi_time += "." wmi_time += str_or_stars(microseconds, 6) if timezone is None: wmi_time += "+" else: try: int(timezone) except ValueError: wmi_time += "+" else: if timezone >= 0: wmi_time += "+" else: wmi_time += "-" timezone = abs(timezone) wmi_time += str_or_stars(timezone, 3) return wmi_time
[ "def", "from_time", "(", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "hours", "=", "None", ",", "minutes", "=", "None", ",", "seconds", "=", "None", ",", "microseconds", "=", "None", ",", "timezone", "=", "None", ")", ":", "def", "str_or_stars", "(", "i", ",", "length", ")", ":", "if", "i", "is", "None", ":", "return", "\"*\"", "*", "length", "else", ":", "return", "str", "(", "i", ")", ".", "rjust", "(", "length", ",", "\"0\"", ")", "wmi_time", "=", "\"\"", "wmi_time", "+=", "str_or_stars", "(", "year", ",", "4", ")", "wmi_time", "+=", "str_or_stars", "(", "month", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "day", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "hours", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "minutes", ",", "2", ")", "wmi_time", "+=", "str_or_stars", "(", "seconds", ",", "2", ")", "wmi_time", "+=", "\".\"", "wmi_time", "+=", "str_or_stars", "(", "microseconds", ",", "6", ")", "if", "timezone", "is", "None", ":", "wmi_time", "+=", "\"+\"", "else", ":", "try", ":", "int", "(", "timezone", ")", "except", "ValueError", ":", "wmi_time", "+=", "\"+\"", "else", ":", "if", "timezone", ">=", "0", ":", "wmi_time", "+=", "\"+\"", "else", ":", "wmi_time", "+=", "\"-\"", "timezone", "=", "abs", "(", "timezone", ")", "wmi_time", "+=", "str_or_stars", "(", "timezone", ",", "3", ")", "return", "wmi_time" ]
Convenience wrapper to take a series of date/time elements and return a WMI time of the form `yyyymmddHHMMSS.mmmmmm+UUU`. All elements may be int, string or omitted altogether. If omitted, they will be replaced in the output string by a series of stars of the appropriate length. :param year: The year element of the date/time :param month: The month element of the date/time :param day: The day element of the date/time :param hours: The hours element of the date/time :param minutes: The minutes element of the date/time :param seconds: The seconds element of the date/time :param microseconds: The microseconds element of the date/time :param timezone: The timeezone element of the date/time :returns: A WMI datetime string of the form: `yyyymmddHHMMSS.mmmmmm+UUU`
[ "Convenience", "wrapper", "to", "take", "a", "series", "of", "date", "/", "time", "elements", "and", "return", "a", "WMI", "time", "of", "the", "form", "yyyymmddHHMMSS", ".", "mmmmmm", "+", "UUU", ".", "All", "elements", "may", "be", "int", "string", "or", "omitted", "altogether", ".", "If", "omitted", "they", "will", "be", "replaced", "in", "the", "output", "string", "by", "a", "series", "of", "stars", "of", "the", "appropriate", "length", ".", ":", "param", "year", ":", "The", "year", "element", "of", "the", "date", "/", "time", ":", "param", "month", ":", "The", "month", "element", "of", "the", "date", "/", "time", ":", "param", "day", ":", "The", "day", "element", "of", "the", "date", "/", "time", ":", "param", "hours", ":", "The", "hours", "element", "of", "the", "date", "/", "time", ":", "param", "minutes", ":", "The", "minutes", "element", "of", "the", "date", "/", "time", ":", "param", "seconds", ":", "The", "seconds", "element", "of", "the", "date", "/", "time", ":", "param", "microseconds", ":", "The", "microseconds", "element", "of", "the", "date", "/", "time", ":", "param", "timezone", ":", "The", "timeezone", "element", "of", "the", "date", "/", "time", ":", "returns", ":", "A", "WMI", "datetime", "string", "of", "the", "form", ":", "yyyymmddHHMMSS", ".", "mmmmmm", "+", "UUU" ]
python
train
36.163265
robotools/fontParts
Lib/fontParts/base/normalizers.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/normalizers.py#L379-L389
def normalizeGlyphHeight(value): """ Normalizes glyph height. * **value** must be a :ref:`type-int-float`. * Returned value is the same type as the input value. """ if not isinstance(value, (int, float)): raise TypeError("Glyph height must be an :ref:`type-int-float`, not " "%s." % type(value).__name__) return value
[ "def", "normalizeGlyphHeight", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "\"Glyph height must be an :ref:`type-int-float`, not \"", "\"%s.\"", "%", "type", "(", "value", ")", ".", "__name__", ")", "return", "value" ]
Normalizes glyph height. * **value** must be a :ref:`type-int-float`. * Returned value is the same type as the input value.
[ "Normalizes", "glyph", "height", "." ]
python
train
33.454545
mcrute/pydora
pydora/player.py
https://github.com/mcrute/pydora/blob/d9e353e7f19da741dcf372246b4d5640cb788488/pydora/player.py#L223-L232
def input(self, input, song): """Input callback, handles key presses """ try: cmd = getattr(self, self.CMD_MAP[input][1]) except (IndexError, KeyError): return self.screen.print_error( "Invalid command {!r}!".format(input)) cmd(song)
[ "def", "input", "(", "self", ",", "input", ",", "song", ")", ":", "try", ":", "cmd", "=", "getattr", "(", "self", ",", "self", ".", "CMD_MAP", "[", "input", "]", "[", "1", "]", ")", "except", "(", "IndexError", ",", "KeyError", ")", ":", "return", "self", ".", "screen", ".", "print_error", "(", "\"Invalid command {!r}!\"", ".", "format", "(", "input", ")", ")", "cmd", "(", "song", ")" ]
Input callback, handles key presses
[ "Input", "callback", "handles", "key", "presses" ]
python
valid
30.5
genialis/resolwe
resolwe/process/runtime.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/runtime.py#L177-L193
def run_process(self, slug, inputs): """Run a new process from a running process.""" def export_files(value): """Export input files of spawned process.""" if isinstance(value, str) and os.path.isfile(value): # TODO: Use the protocol to export files and get the # process schema to check field type. print("export {}".format(value)) elif isinstance(value, dict): for item in value.values(): export_files(item) elif isinstance(value, list): for item in value: export_files(item) export_files(inputs) print('run {}'.format(json.dumps({'process': slug, 'input': inputs}, separators=(',', ':'))))
[ "def", "run_process", "(", "self", ",", "slug", ",", "inputs", ")", ":", "def", "export_files", "(", "value", ")", ":", "\"\"\"Export input files of spawned process.\"\"\"", "if", "isinstance", "(", "value", ",", "str", ")", "and", "os", ".", "path", ".", "isfile", "(", "value", ")", ":", "# TODO: Use the protocol to export files and get the", "# process schema to check field type.", "print", "(", "\"export {}\"", ".", "format", "(", "value", ")", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "item", "in", "value", ".", "values", "(", ")", ":", "export_files", "(", "item", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "for", "item", "in", "value", ":", "export_files", "(", "item", ")", "export_files", "(", "inputs", ")", "print", "(", "'run {}'", ".", "format", "(", "json", ".", "dumps", "(", "{", "'process'", ":", "slug", ",", "'input'", ":", "inputs", "}", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", ")", ")" ]
Run a new process from a running process.
[ "Run", "a", "new", "process", "from", "a", "running", "process", "." ]
python
train
45.647059
crs4/pydoop
pydoop/hadut.py
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L338-L395
def run_pipes(executable, input_path, output_path, more_args=None, properties=None, force_pydoop_submitter=False, hadoop_conf_dir=None, logger=None, keep_streams=False): """ Run a pipes command. ``more_args`` (after setting input/output path) and ``properties`` are passed to :func:`run_cmd`. If not specified otherwise, this function sets the properties ``mapreduce.pipes.isjavarecordreader`` and ``mapreduce.pipes.isjavarecordwriter`` to ``"true"``. This function works around a bug in Hadoop pipes that affects versions of Hadoop with security when the local file system is used as the default FS (no HDFS); see https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those set-ups, the function uses Pydoop's own pipes submitter application. You can force the use of Pydoop's submitter by passing the argument force_pydoop_submitter=True. """ if logger is None: logger = utils.NullLogger() if not hdfs.path.exists(executable): raise IOError("executable %s not found" % executable) if not hdfs.path.exists(input_path) and not (set(input_path) & GLOB_CHARS): raise IOError("input path %s not found" % input_path) if properties is None: properties = {} properties.setdefault('mapreduce.pipes.isjavarecordreader', 'true') properties.setdefault('mapreduce.pipes.isjavarecordwriter', 'true') if force_pydoop_submitter: use_pydoop_submit = True else: use_pydoop_submit = False ver = pydoop.hadoop_version_info() if ver.has_security(): if ver.is_cdh_mrv2() and hdfs.default_is_local(): raise RuntimeError("mrv2 on local fs not supported yet") use_pydoop_submit = hdfs.default_is_local() args = [ "-program", executable, "-input", input_path, "-output", output_path, ] if more_args is not None: args.extend(more_args) if use_pydoop_submit: submitter = "it.crs4.pydoop.pipes.Submitter" pydoop_jar = pydoop.jar_path() args.extend(("-libjars", pydoop_jar)) return run_class(submitter, args, properties, classpath=pydoop_jar, logger=logger, keep_streams=keep_streams) else: return run_mapred_cmd("pipes", args=args, properties=properties, hadoop_conf_dir=hadoop_conf_dir, logger=logger, keep_streams=keep_streams)
[ "def", "run_pipes", "(", "executable", ",", "input_path", ",", "output_path", ",", "more_args", "=", "None", ",", "properties", "=", "None", ",", "force_pydoop_submitter", "=", "False", ",", "hadoop_conf_dir", "=", "None", ",", "logger", "=", "None", ",", "keep_streams", "=", "False", ")", ":", "if", "logger", "is", "None", ":", "logger", "=", "utils", ".", "NullLogger", "(", ")", "if", "not", "hdfs", ".", "path", ".", "exists", "(", "executable", ")", ":", "raise", "IOError", "(", "\"executable %s not found\"", "%", "executable", ")", "if", "not", "hdfs", ".", "path", ".", "exists", "(", "input_path", ")", "and", "not", "(", "set", "(", "input_path", ")", "&", "GLOB_CHARS", ")", ":", "raise", "IOError", "(", "\"input path %s not found\"", "%", "input_path", ")", "if", "properties", "is", "None", ":", "properties", "=", "{", "}", "properties", ".", "setdefault", "(", "'mapreduce.pipes.isjavarecordreader'", ",", "'true'", ")", "properties", ".", "setdefault", "(", "'mapreduce.pipes.isjavarecordwriter'", ",", "'true'", ")", "if", "force_pydoop_submitter", ":", "use_pydoop_submit", "=", "True", "else", ":", "use_pydoop_submit", "=", "False", "ver", "=", "pydoop", ".", "hadoop_version_info", "(", ")", "if", "ver", ".", "has_security", "(", ")", ":", "if", "ver", ".", "is_cdh_mrv2", "(", ")", "and", "hdfs", ".", "default_is_local", "(", ")", ":", "raise", "RuntimeError", "(", "\"mrv2 on local fs not supported yet\"", ")", "use_pydoop_submit", "=", "hdfs", ".", "default_is_local", "(", ")", "args", "=", "[", "\"-program\"", ",", "executable", ",", "\"-input\"", ",", "input_path", ",", "\"-output\"", ",", "output_path", ",", "]", "if", "more_args", "is", "not", "None", ":", "args", ".", "extend", "(", "more_args", ")", "if", "use_pydoop_submit", ":", "submitter", "=", "\"it.crs4.pydoop.pipes.Submitter\"", "pydoop_jar", "=", "pydoop", ".", "jar_path", "(", ")", "args", ".", "extend", "(", "(", "\"-libjars\"", ",", "pydoop_jar", ")", ")", "return", "run_class", "(", "submitter", ",", "args", ",", "properties", ",", "classpath", "=", "pydoop_jar", ",", "logger", "=", "logger", ",", "keep_streams", "=", "keep_streams", ")", "else", ":", "return", "run_mapred_cmd", "(", "\"pipes\"", ",", "args", "=", "args", ",", "properties", "=", "properties", ",", "hadoop_conf_dir", "=", "hadoop_conf_dir", ",", "logger", "=", "logger", ",", "keep_streams", "=", "keep_streams", ")" ]
Run a pipes command. ``more_args`` (after setting input/output path) and ``properties`` are passed to :func:`run_cmd`. If not specified otherwise, this function sets the properties ``mapreduce.pipes.isjavarecordreader`` and ``mapreduce.pipes.isjavarecordwriter`` to ``"true"``. This function works around a bug in Hadoop pipes that affects versions of Hadoop with security when the local file system is used as the default FS (no HDFS); see https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those set-ups, the function uses Pydoop's own pipes submitter application. You can force the use of Pydoop's submitter by passing the argument force_pydoop_submitter=True.
[ "Run", "a", "pipes", "command", "." ]
python
train
42.741379
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L338-L354
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_received(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop('local_interface_name') remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop('remote_interface_name') lldp_pdu_received = ET.SubElement(lldp_neighbor_detail, "lldp-pdu-received") lldp_pdu_received.text = kwargs.pop('lldp_pdu_received') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_received", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_lldp_neighbor_detail", "=", "ET", ".", "Element", "(", "\"get_lldp_neighbor_detail\"", ")", "config", "=", "get_lldp_neighbor_detail", "output", "=", "ET", ".", "SubElement", "(", "get_lldp_neighbor_detail", ",", "\"output\"", ")", "lldp_neighbor_detail", "=", "ET", ".", "SubElement", "(", "output", ",", "\"lldp-neighbor-detail\"", ")", "local_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"local-interface-name\"", ")", "local_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'local_interface_name'", ")", "remote_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-interface-name\"", ")", "remote_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_interface_name'", ")", "lldp_pdu_received", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"lldp-pdu-received\"", ")", "lldp_pdu_received", ".", "text", "=", "kwargs", ".", "pop", "(", "'lldp_pdu_received'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
58.882353
farshidce/touchworks-python
touchworks/api/http.py
https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L869-L897
def get_schedule(self, ehr_username, start_date, changed_since, include_pix, other_user='All', end_date='', appointment_types=None, status_filter='All'): """ invokes TouchWorksMagicConstants.ACTION_GET_SCHEDULE action :return: JSON response """ if not start_date: raise ValueError('start_date can not be null') if end_date: start_date = '%s|%s' % (start_date, end_date) if not changed_since: changed_since = '' magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_GET_SCHEDULE, app_name=self._app_name, user_id=ehr_username, token=self._token.token, parameter1=start_date, parameter2=changed_since, parameter3=include_pix, parameter4=other_user, parameter5=appointment_types, parameter6=status_filter) response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic) result = self._get_results_or_raise_if_magic_invalid( magic, response, TouchWorksMagicConstants.RESULT_GET_SCHEDULE) return result
[ "def", "get_schedule", "(", "self", ",", "ehr_username", ",", "start_date", ",", "changed_since", ",", "include_pix", ",", "other_user", "=", "'All'", ",", "end_date", "=", "''", ",", "appointment_types", "=", "None", ",", "status_filter", "=", "'All'", ")", ":", "if", "not", "start_date", ":", "raise", "ValueError", "(", "'start_date can not be null'", ")", "if", "end_date", ":", "start_date", "=", "'%s|%s'", "%", "(", "start_date", ",", "end_date", ")", "if", "not", "changed_since", ":", "changed_since", "=", "''", "magic", "=", "self", ".", "_magic_json", "(", "action", "=", "TouchWorksMagicConstants", ".", "ACTION_GET_SCHEDULE", ",", "app_name", "=", "self", ".", "_app_name", ",", "user_id", "=", "ehr_username", ",", "token", "=", "self", ".", "_token", ".", "token", ",", "parameter1", "=", "start_date", ",", "parameter2", "=", "changed_since", ",", "parameter3", "=", "include_pix", ",", "parameter4", "=", "other_user", ",", "parameter5", "=", "appointment_types", ",", "parameter6", "=", "status_filter", ")", "response", "=", "self", ".", "_http_request", "(", "TouchWorksEndPoints", ".", "MAGIC_JSON", ",", "data", "=", "magic", ")", "result", "=", "self", ".", "_get_results_or_raise_if_magic_invalid", "(", "magic", ",", "response", ",", "TouchWorksMagicConstants", ".", "RESULT_GET_SCHEDULE", ")", "return", "result" ]
invokes TouchWorksMagicConstants.ACTION_GET_SCHEDULE action :return: JSON response
[ "invokes", "TouchWorksMagicConstants", ".", "ACTION_GET_SCHEDULE", "action", ":", "return", ":", "JSON", "response" ]
python
train
47.448276
RRZE-HPC/kerncraft
kerncraft/kerncraft.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kerncraft.py#L70-L80
def get_last_modified_datetime(dir_path=os.path.dirname(__file__)): """Return datetime object of latest change in kerncraft module directory.""" max_mtime = 0 for root, dirs, files in os.walk(dir_path): for f in files: p = os.path.join(root, f) try: max_mtime = max(max_mtime, os.stat(p).st_mtime) except FileNotFoundError: pass return datetime.utcfromtimestamp(max_mtime)
[ "def", "get_last_modified_datetime", "(", "dir_path", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ":", "max_mtime", "=", "0", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "dir_path", ")", ":", "for", "f", "in", "files", ":", "p", "=", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "try", ":", "max_mtime", "=", "max", "(", "max_mtime", ",", "os", ".", "stat", "(", "p", ")", ".", "st_mtime", ")", "except", "FileNotFoundError", ":", "pass", "return", "datetime", ".", "utcfromtimestamp", "(", "max_mtime", ")" ]
Return datetime object of latest change in kerncraft module directory.
[ "Return", "datetime", "object", "of", "latest", "change", "in", "kerncraft", "module", "directory", "." ]
python
test
41.272727
tensorflow/cleverhans
cleverhans/dataset.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/dataset.py#L269-L307
def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000): """ Preprocess CIFAR10 dataset :return: """ # These values are specific to CIFAR10 img_rows = 32 img_cols = 32 nb_classes = 10 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = cifar10.load_data() if tf.keras.backend.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = np_utils.to_categorical(y_train, nb_classes) y_test = np_utils.to_categorical(y_test, nb_classes) x_train = x_train[train_start:train_end, :, :, :] y_train = y_train[train_start:train_end, :] x_test = x_test[test_start:test_end, :] y_test = y_test[test_start:test_end, :] return x_train, y_train, x_test, y_test
[ "def", "data_cifar10", "(", "train_start", "=", "0", ",", "train_end", "=", "50000", ",", "test_start", "=", "0", ",", "test_end", "=", "10000", ")", ":", "# These values are specific to CIFAR10", "img_rows", "=", "32", "img_cols", "=", "32", "nb_classes", "=", "10", "# the data, shuffled and split between train and test sets", "(", "x_train", ",", "y_train", ")", ",", "(", "x_test", ",", "y_test", ")", "=", "cifar10", ".", "load_data", "(", ")", "if", "tf", ".", "keras", ".", "backend", ".", "image_data_format", "(", ")", "==", "'channels_first'", ":", "x_train", "=", "x_train", ".", "reshape", "(", "x_train", ".", "shape", "[", "0", "]", ",", "3", ",", "img_rows", ",", "img_cols", ")", "x_test", "=", "x_test", ".", "reshape", "(", "x_test", ".", "shape", "[", "0", "]", ",", "3", ",", "img_rows", ",", "img_cols", ")", "else", ":", "x_train", "=", "x_train", ".", "reshape", "(", "x_train", ".", "shape", "[", "0", "]", ",", "img_rows", ",", "img_cols", ",", "3", ")", "x_test", "=", "x_test", ".", "reshape", "(", "x_test", ".", "shape", "[", "0", "]", ",", "img_rows", ",", "img_cols", ",", "3", ")", "x_train", "=", "x_train", ".", "astype", "(", "'float32'", ")", "x_test", "=", "x_test", ".", "astype", "(", "'float32'", ")", "x_train", "/=", "255", "x_test", "/=", "255", "print", "(", "'x_train shape:'", ",", "x_train", ".", "shape", ")", "print", "(", "x_train", ".", "shape", "[", "0", "]", ",", "'train samples'", ")", "print", "(", "x_test", ".", "shape", "[", "0", "]", ",", "'test samples'", ")", "# convert class vectors to binary class matrices", "y_train", "=", "np_utils", ".", "to_categorical", "(", "y_train", ",", "nb_classes", ")", "y_test", "=", "np_utils", ".", "to_categorical", "(", "y_test", ",", "nb_classes", ")", "x_train", "=", "x_train", "[", "train_start", ":", "train_end", ",", ":", ",", ":", ",", ":", "]", "y_train", "=", "y_train", "[", "train_start", ":", "train_end", ",", ":", "]", "x_test", "=", "x_test", "[", "test_start", ":", "test_end", ",", ":", "]", "y_test", "=", "y_test", "[", "test_start", ":", "test_end", ",", ":", "]", "return", "x_train", ",", "y_train", ",", "x_test", ",", "y_test" ]
Preprocess CIFAR10 dataset :return:
[ "Preprocess", "CIFAR10", "dataset", ":", "return", ":" ]
python
train
32.846154
saltstack/salt
salt/utils/zfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/zfs.py#L495-L527
def to_size(value, convert_to_human=True): ''' Convert python int (bytes) to zfs size NOTE: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/pyzfs/common/util.py#114 ''' value = from_size(value) if value is None: value = 'none' if isinstance(value, Number) and value > 1024 and convert_to_human: v_power = int(math.floor(math.log(value, 1024))) v_multiplier = math.pow(1024, v_power) # NOTE: zfs is a bit odd on how it does the rounding, # see libzfs implementation linked above v_size_float = float(value) / v_multiplier if v_size_float == int(v_size_float): value = "{:.0f}{}".format( v_size_float, zfs_size[v_power-1], ) else: for v_precision in ["{:.2f}{}", "{:.1f}{}", "{:.0f}{}"]: v_size = v_precision.format( v_size_float, zfs_size[v_power-1], ) if len(v_size) <= 5: value = v_size break return value
[ "def", "to_size", "(", "value", ",", "convert_to_human", "=", "True", ")", ":", "value", "=", "from_size", "(", "value", ")", "if", "value", "is", "None", ":", "value", "=", "'none'", "if", "isinstance", "(", "value", ",", "Number", ")", "and", "value", ">", "1024", "and", "convert_to_human", ":", "v_power", "=", "int", "(", "math", ".", "floor", "(", "math", ".", "log", "(", "value", ",", "1024", ")", ")", ")", "v_multiplier", "=", "math", ".", "pow", "(", "1024", ",", "v_power", ")", "# NOTE: zfs is a bit odd on how it does the rounding,", "# see libzfs implementation linked above", "v_size_float", "=", "float", "(", "value", ")", "/", "v_multiplier", "if", "v_size_float", "==", "int", "(", "v_size_float", ")", ":", "value", "=", "\"{:.0f}{}\"", ".", "format", "(", "v_size_float", ",", "zfs_size", "[", "v_power", "-", "1", "]", ",", ")", "else", ":", "for", "v_precision", "in", "[", "\"{:.2f}{}\"", ",", "\"{:.1f}{}\"", ",", "\"{:.0f}{}\"", "]", ":", "v_size", "=", "v_precision", ".", "format", "(", "v_size_float", ",", "zfs_size", "[", "v_power", "-", "1", "]", ",", ")", "if", "len", "(", "v_size", ")", "<=", "5", ":", "value", "=", "v_size", "break", "return", "value" ]
Convert python int (bytes) to zfs size NOTE: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/pyzfs/common/util.py#114
[ "Convert", "python", "int", "(", "bytes", ")", "to", "zfs", "size" ]
python
train
32.969697
CityOfZion/neo-python
examples/build_raw_transactions.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/examples/build_raw_transactions.py#L219-L232
def address_to_scripthash(address: str) -> UInt160: """Just a helper method""" AddressVersion = 23 # fixed at this point data = b58decode(address) if len(data) != 25: raise ValueError('Not correct Address, wrong length.') if data[0] != AddressVersion: raise ValueError('Not correct Coin Version') checksum_data = data[:21] checksum = hashlib.sha256(hashlib.sha256(checksum_data).digest()).digest()[:4] if checksum != data[21:]: raise Exception('Address format error') return UInt160(data=data[1:21])
[ "def", "address_to_scripthash", "(", "address", ":", "str", ")", "->", "UInt160", ":", "AddressVersion", "=", "23", "# fixed at this point", "data", "=", "b58decode", "(", "address", ")", "if", "len", "(", "data", ")", "!=", "25", ":", "raise", "ValueError", "(", "'Not correct Address, wrong length.'", ")", "if", "data", "[", "0", "]", "!=", "AddressVersion", ":", "raise", "ValueError", "(", "'Not correct Coin Version'", ")", "checksum_data", "=", "data", "[", ":", "21", "]", "checksum", "=", "hashlib", ".", "sha256", "(", "hashlib", ".", "sha256", "(", "checksum_data", ")", ".", "digest", "(", ")", ")", ".", "digest", "(", ")", "[", ":", "4", "]", "if", "checksum", "!=", "data", "[", "21", ":", "]", ":", "raise", "Exception", "(", "'Address format error'", ")", "return", "UInt160", "(", "data", "=", "data", "[", "1", ":", "21", "]", ")" ]
Just a helper method
[ "Just", "a", "helper", "method" ]
python
train
39.142857
openego/ding0
ding0/core/network/grids.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/grids.py#L158-L165
def remove_cable_distributor(self, cable_dist): """Removes a cable distributor from _cable_distributors if existing""" if cable_dist in self.cable_distributors() and isinstance(cable_dist, MVCableDistributorDing0): # remove from array and graph self._cable_distributors.remove(cable_dist) if self._graph.has_node(cable_dist): self._graph.remove_node(cable_dist)
[ "def", "remove_cable_distributor", "(", "self", ",", "cable_dist", ")", ":", "if", "cable_dist", "in", "self", ".", "cable_distributors", "(", ")", "and", "isinstance", "(", "cable_dist", ",", "MVCableDistributorDing0", ")", ":", "# remove from array and graph", "self", ".", "_cable_distributors", ".", "remove", "(", "cable_dist", ")", "if", "self", ".", "_graph", ".", "has_node", "(", "cable_dist", ")", ":", "self", ".", "_graph", ".", "remove_node", "(", "cable_dist", ")" ]
Removes a cable distributor from _cable_distributors if existing
[ "Removes", "a", "cable", "distributor", "from", "_cable_distributors", "if", "existing" ]
python
train
61
chrislit/abydos
abydos/stats/_confusion_table.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/stats/_confusion_table.py#L1347-L1378
def kappa_statistic(self): r"""Return κ statistic. The κ statistic is defined as: :math:`\kappa = \frac{accuracy - random~ accuracy} {1 - random~ accuracy}` The κ statistic compares the performance of the classifier relative to the performance of a random classifier. :math:`\kappa` = 0 indicates performance identical to random. :math:`\kappa` = 1 indicates perfect predictive success. :math:`\kappa` = -1 indicates perfect predictive failure. Returns ------- float The κ statistic of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.kappa_statistic() 0.5344129554655871 """ if self.population() == 0: return float('NaN') random_accuracy = ( (self._tn + self._fp) * (self._tn + self._fn) + (self._fn + self._tp) * (self._fp + self._tp) ) / self.population() ** 2 return (self.accuracy() - random_accuracy) / (1 - random_accuracy)
[ "def", "kappa_statistic", "(", "self", ")", ":", "if", "self", ".", "population", "(", ")", "==", "0", ":", "return", "float", "(", "'NaN'", ")", "random_accuracy", "=", "(", "(", "self", ".", "_tn", "+", "self", ".", "_fp", ")", "*", "(", "self", ".", "_tn", "+", "self", ".", "_fn", ")", "+", "(", "self", ".", "_fn", "+", "self", ".", "_tp", ")", "*", "(", "self", ".", "_fp", "+", "self", ".", "_tp", ")", ")", "/", "self", ".", "population", "(", ")", "**", "2", "return", "(", "self", ".", "accuracy", "(", ")", "-", "random_accuracy", ")", "/", "(", "1", "-", "random_accuracy", ")" ]
r"""Return κ statistic. The κ statistic is defined as: :math:`\kappa = \frac{accuracy - random~ accuracy} {1 - random~ accuracy}` The κ statistic compares the performance of the classifier relative to the performance of a random classifier. :math:`\kappa` = 0 indicates performance identical to random. :math:`\kappa` = 1 indicates perfect predictive success. :math:`\kappa` = -1 indicates perfect predictive failure. Returns ------- float The κ statistic of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.kappa_statistic() 0.5344129554655871
[ "r", "Return", "κ", "statistic", "." ]
python
valid
33.21875
klahnakoski/pyLibrary
mo_math/stats.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/stats.py#L353-L370
def percentile(values, percent): """ PERCENTILE WITH INTERPOLATION RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/ """ N = sorted(values) if not N: return None k = (len(N) - 1) * percent f = int(math.floor(k)) c = int(math.ceil(k)) if f == c: return N[int(k)] d0 = N[f] * (c - k) d1 = N[c] * (k - f) return d0 + d1
[ "def", "percentile", "(", "values", ",", "percent", ")", ":", "N", "=", "sorted", "(", "values", ")", "if", "not", "N", ":", "return", "None", "k", "=", "(", "len", "(", "N", ")", "-", "1", ")", "*", "percent", "f", "=", "int", "(", "math", ".", "floor", "(", "k", ")", ")", "c", "=", "int", "(", "math", ".", "ceil", "(", "k", ")", ")", "if", "f", "==", "c", ":", "return", "N", "[", "int", "(", "k", ")", "]", "d0", "=", "N", "[", "f", "]", "*", "(", "c", "-", "k", ")", "d1", "=", "N", "[", "c", "]", "*", "(", "k", "-", "f", ")", "return", "d0", "+", "d1" ]
PERCENTILE WITH INTERPOLATION RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
[ "PERCENTILE", "WITH", "INTERPOLATION", "RETURN", "VALUE", "AT", "OR", "ABOVE", "percentile", "OF", "THE", "VALUES" ]
python
train
26
praekeltfoundation/molo.commenting
molo/commenting/templatetags/molo_commenting_tags.py
https://github.com/praekeltfoundation/molo.commenting/blob/94549bd75e4a5c5b3db43149e32d636330b3969c/molo/commenting/templatetags/molo_commenting_tags.py#L75-L95
def get_comments_content_object(parser, token): """ Get a limited set of comments for a given object. Defaults to a limit of 5. Setting the limit to -1 disables limiting. usage: {% get_comments_content_object for form_object as variable_name %} """ keywords = token.contents.split() if len(keywords) != 5: raise template.TemplateSyntaxError( "'%s' tag takes exactly 2 arguments" % (keywords[0],)) if keywords[1] != 'for': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'for'" % (keywords[0],)) if keywords[3] != 'as': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'as'" % (keywords[0],)) return GetCommentsContentObject(keywords[2], keywords[4])
[ "def", "get_comments_content_object", "(", "parser", ",", "token", ")", ":", "keywords", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "len", "(", "keywords", ")", "!=", "5", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"'%s' tag takes exactly 2 arguments\"", "%", "(", "keywords", "[", "0", "]", ",", ")", ")", "if", "keywords", "[", "1", "]", "!=", "'for'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"first argument to '%s' tag must be 'for'\"", "%", "(", "keywords", "[", "0", "]", ",", ")", ")", "if", "keywords", "[", "3", "]", "!=", "'as'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"first argument to '%s' tag must be 'as'\"", "%", "(", "keywords", "[", "0", "]", ",", ")", ")", "return", "GetCommentsContentObject", "(", "keywords", "[", "2", "]", ",", "keywords", "[", "4", "]", ")" ]
Get a limited set of comments for a given object. Defaults to a limit of 5. Setting the limit to -1 disables limiting. usage: {% get_comments_content_object for form_object as variable_name %}
[ "Get", "a", "limited", "set", "of", "comments", "for", "a", "given", "object", ".", "Defaults", "to", "a", "limit", "of", "5", ".", "Setting", "the", "limit", "to", "-", "1", "disables", "limiting", "." ]
python
train
37.47619
ericsomdahl/python-bittrex
bittrex/bittrex.py
https://github.com/ericsomdahl/python-bittrex/blob/2dbc08e3221e07a9e618eaa025d98ed197d28e31/bittrex/bittrex.py#L401-L418
def get_open_orders(self, market=None): """ Get all orders that you currently have opened. A specific market can be requested. Endpoint: 1.1 /market/getopenorders 2.0 /key/market/getopenorders :param market: String literal for the market (ie. BTC-LTC) :type market: str :return: Open orders info in JSON :rtype : dict """ return self._api_query(path_dict={ API_V1_1: '/market/getopenorders', API_V2_0: '/key/market/getopenorders' }, options={'market': market, 'marketname': market} if market else None, protection=PROTECTION_PRV)
[ "def", "get_open_orders", "(", "self", ",", "market", "=", "None", ")", ":", "return", "self", ".", "_api_query", "(", "path_dict", "=", "{", "API_V1_1", ":", "'/market/getopenorders'", ",", "API_V2_0", ":", "'/key/market/getopenorders'", "}", ",", "options", "=", "{", "'market'", ":", "market", ",", "'marketname'", ":", "market", "}", "if", "market", "else", "None", ",", "protection", "=", "PROTECTION_PRV", ")" ]
Get all orders that you currently have opened. A specific market can be requested. Endpoint: 1.1 /market/getopenorders 2.0 /key/market/getopenorders :param market: String literal for the market (ie. BTC-LTC) :type market: str :return: Open orders info in JSON :rtype : dict
[ "Get", "all", "orders", "that", "you", "currently", "have", "opened", ".", "A", "specific", "market", "can", "be", "requested", "." ]
python
train
35.666667
aaren/notedown
notedown/notedown.py
https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L322-L327
def create_markdown_cell(block): """Create a markdown cell from a block.""" kwargs = {'cell_type': block['type'], 'source': block['content']} markdown_cell = nbbase.new_markdown_cell(**kwargs) return markdown_cell
[ "def", "create_markdown_cell", "(", "block", ")", ":", "kwargs", "=", "{", "'cell_type'", ":", "block", "[", "'type'", "]", ",", "'source'", ":", "block", "[", "'content'", "]", "}", "markdown_cell", "=", "nbbase", ".", "new_markdown_cell", "(", "*", "*", "kwargs", ")", "return", "markdown_cell" ]
Create a markdown cell from a block.
[ "Create", "a", "markdown", "cell", "from", "a", "block", "." ]
python
train
43
calston/rhumba
rhumba/backends/redis.py
https://github.com/calston/rhumba/blob/05e3cbf4e531cc51b4777912eb98a4f006893f5e/rhumba/backends/redis.py#L207-L226
def clusterQueues(self): """ Return a dict of queues in cluster and servers running them """ servers = yield self.getClusterServers() queues = {} for sname in servers: qs = yield self.get('rhumba.server.%s.queues' % sname) uuid = yield self.get('rhumba.server.%s.uuid' % sname) qs = json.loads(qs) for q in qs: if q not in queues: queues[q] = [] queues[q].append({'host': sname, 'uuid': uuid}) defer.returnValue(queues)
[ "def", "clusterQueues", "(", "self", ")", ":", "servers", "=", "yield", "self", ".", "getClusterServers", "(", ")", "queues", "=", "{", "}", "for", "sname", "in", "servers", ":", "qs", "=", "yield", "self", ".", "get", "(", "'rhumba.server.%s.queues'", "%", "sname", ")", "uuid", "=", "yield", "self", ".", "get", "(", "'rhumba.server.%s.uuid'", "%", "sname", ")", "qs", "=", "json", ".", "loads", "(", "qs", ")", "for", "q", "in", "qs", ":", "if", "q", "not", "in", "queues", ":", "queues", "[", "q", "]", "=", "[", "]", "queues", "[", "q", "]", ".", "append", "(", "{", "'host'", ":", "sname", ",", "'uuid'", ":", "uuid", "}", ")", "defer", ".", "returnValue", "(", "queues", ")" ]
Return a dict of queues in cluster and servers running them
[ "Return", "a", "dict", "of", "queues", "in", "cluster", "and", "servers", "running", "them" ]
python
train
28.05
OSLL/jabba
jabba/synonym_parser.py
https://github.com/OSLL/jabba/blob/71c1d008ab497020fba6ffa12a600721eb3f5ef7/jabba/synonym_parser.py#L79-L94
def parse_from_array(arr): """ Parse 2d array into synonym set Every array inside arr is considered a set of synonyms """ syn_set = SynonymSet() for synonyms in arr: _set = set() for synonym in synonyms: _set.add(synonym) syn_set.add_set(_set) return syn_set
[ "def", "parse_from_array", "(", "arr", ")", ":", "syn_set", "=", "SynonymSet", "(", ")", "for", "synonyms", "in", "arr", ":", "_set", "=", "set", "(", ")", "for", "synonym", "in", "synonyms", ":", "_set", ".", "add", "(", "synonym", ")", "syn_set", ".", "add_set", "(", "_set", ")", "return", "syn_set" ]
Parse 2d array into synonym set Every array inside arr is considered a set of synonyms
[ "Parse", "2d", "array", "into", "synonym", "set", "Every", "array", "inside", "arr", "is", "considered", "a", "set", "of", "synonyms" ]
python
train
19.4375
coumbole/mailscanner
mailscanner/reader.py
https://github.com/coumbole/mailscanner/blob/ead19ac8c7dee27e507c1593032863232c13f636/mailscanner/reader.py#L75-L111
def fetch_all_messages(self, conn, directory, readonly): """ Fetches all messages at @conn from @directory. Params: conn IMAP4_SSL connection directory The IMAP directory to look for readonly readonly mode, true or false Returns: List of subject-body tuples """ conn.select(directory, readonly) message_data = [] typ, data = conn.search(None, 'All') # Loop through each message object for num in data[0].split(): typ, data = conn.fetch(num, '(RFC822)') for response_part in data: if isinstance(response_part, tuple): email_parser = email.parser.BytesFeedParser() email_parser.feed(response_part[1]) msg = email_parser.close() body = self.get_body(msg) subject = self.get_subject(msg) message_data.append((subject, body)) return message_data
[ "def", "fetch_all_messages", "(", "self", ",", "conn", ",", "directory", ",", "readonly", ")", ":", "conn", ".", "select", "(", "directory", ",", "readonly", ")", "message_data", "=", "[", "]", "typ", ",", "data", "=", "conn", ".", "search", "(", "None", ",", "'All'", ")", "# Loop through each message object", "for", "num", "in", "data", "[", "0", "]", ".", "split", "(", ")", ":", "typ", ",", "data", "=", "conn", ".", "fetch", "(", "num", ",", "'(RFC822)'", ")", "for", "response_part", "in", "data", ":", "if", "isinstance", "(", "response_part", ",", "tuple", ")", ":", "email_parser", "=", "email", ".", "parser", ".", "BytesFeedParser", "(", ")", "email_parser", ".", "feed", "(", "response_part", "[", "1", "]", ")", "msg", "=", "email_parser", ".", "close", "(", ")", "body", "=", "self", ".", "get_body", "(", "msg", ")", "subject", "=", "self", ".", "get_subject", "(", "msg", ")", "message_data", ".", "append", "(", "(", "subject", ",", "body", ")", ")", "return", "message_data" ]
Fetches all messages at @conn from @directory. Params: conn IMAP4_SSL connection directory The IMAP directory to look for readonly readonly mode, true or false Returns: List of subject-body tuples
[ "Fetches", "all", "messages", "at", "@conn", "from", "@directory", "." ]
python
train
28.135135
KelSolaar/Umbra
umbra/components/factory/script_editor/search_in_files.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/search_in_files.py#L1405-L1443
def save_files(self, nodes): """ Saves user defined files using give nodes. :param nodes: Nodes. :type nodes: list :return: Method success. :rtype: bool """ metrics = {"Opened": 0, "Cached": 0} for node in nodes: file = node.file if self.__container.get_editor(file): if self.__container.save_file(file): metrics["Opened"] += 1 self.__uncache(file) else: cache_data = self.__files_cache.get_content(file) if cache_data is None: LOGGER.warning( "!> {0} | '{1}' file doesn't exists in files cache!".format(self.__class__.__name__, file)) continue if cache_data.document: file_handle = File(file) file_handle.content = [cache_data.document.toPlainText().toUtf8()] if file_handle.write(): metrics["Cached"] += 1 self.__uncache(file) else: LOGGER.warning( "!> {0} | '{1}' file document doesn't exists in files cache!".format(self.__class__.__name__, file)) self.__container.engine.notifications_manager.notify( "{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!".format(self.__class__.__name__, metrics["Opened"], metrics["Cached"]))
[ "def", "save_files", "(", "self", ",", "nodes", ")", ":", "metrics", "=", "{", "\"Opened\"", ":", "0", ",", "\"Cached\"", ":", "0", "}", "for", "node", "in", "nodes", ":", "file", "=", "node", ".", "file", "if", "self", ".", "__container", ".", "get_editor", "(", "file", ")", ":", "if", "self", ".", "__container", ".", "save_file", "(", "file", ")", ":", "metrics", "[", "\"Opened\"", "]", "+=", "1", "self", ".", "__uncache", "(", "file", ")", "else", ":", "cache_data", "=", "self", ".", "__files_cache", ".", "get_content", "(", "file", ")", "if", "cache_data", "is", "None", ":", "LOGGER", ".", "warning", "(", "\"!> {0} | '{1}' file doesn't exists in files cache!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "file", ")", ")", "continue", "if", "cache_data", ".", "document", ":", "file_handle", "=", "File", "(", "file", ")", "file_handle", ".", "content", "=", "[", "cache_data", ".", "document", ".", "toPlainText", "(", ")", ".", "toUtf8", "(", ")", "]", "if", "file_handle", ".", "write", "(", ")", ":", "metrics", "[", "\"Cached\"", "]", "+=", "1", "self", ".", "__uncache", "(", "file", ")", "else", ":", "LOGGER", ".", "warning", "(", "\"!> {0} | '{1}' file document doesn't exists in files cache!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "file", ")", ")", "self", ".", "__container", ".", "engine", ".", "notifications_manager", ".", "notify", "(", "\"{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "metrics", "[", "\"Opened\"", "]", ",", "metrics", "[", "\"Cached\"", "]", ")", ")" ]
Saves user defined files using give nodes. :param nodes: Nodes. :type nodes: list :return: Method success. :rtype: bool
[ "Saves", "user", "defined", "files", "using", "give", "nodes", "." ]
python
train
44.102564
saltstack/salt
salt/pillar/reclass_adapter.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/reclass_adapter.py#L93-L135
def ext_pillar(minion_id, pillar, **kwargs): ''' Obtain the Pillar data from **reclass** for the given ``minion_id``. ''' # If reclass is installed, __virtual__ put it onto the search path, so we # don't need to protect against ImportError: # pylint: disable=3rd-party-module-not-gated from reclass.adapters.salt import ext_pillar as reclass_ext_pillar from reclass.errors import ReclassException # pylint: enable=3rd-party-module-not-gated try: # the source path we used above isn't something reclass needs to care # about, so filter it: filter_out_source_path_option(kwargs) # if no inventory_base_uri was specified, initialize it to the first # file_roots of class 'base' (if that exists): set_inventory_base_uri_default(__opts__, kwargs) # I purposely do not pass any of __opts__ or __salt__ or __grains__ # to reclass, as I consider those to be Salt-internal and reclass # should not make any assumptions about it. return reclass_ext_pillar(minion_id, pillar, **kwargs) except TypeError as e: if 'unexpected keyword argument' in six.text_type(e): arg = six.text_type(e).split()[-1] raise SaltInvocationError('ext_pillar.reclass: unexpected option: ' + arg) else: raise except KeyError as e: if 'id' in six.text_type(e): raise SaltInvocationError('ext_pillar.reclass: __opts__ does not ' 'define minion ID') else: raise except ReclassException as e: raise SaltInvocationError('ext_pillar.reclass: {0}'.format(e))
[ "def", "ext_pillar", "(", "minion_id", ",", "pillar", ",", "*", "*", "kwargs", ")", ":", "# If reclass is installed, __virtual__ put it onto the search path, so we", "# don't need to protect against ImportError:", "# pylint: disable=3rd-party-module-not-gated", "from", "reclass", ".", "adapters", ".", "salt", "import", "ext_pillar", "as", "reclass_ext_pillar", "from", "reclass", ".", "errors", "import", "ReclassException", "# pylint: enable=3rd-party-module-not-gated", "try", ":", "# the source path we used above isn't something reclass needs to care", "# about, so filter it:", "filter_out_source_path_option", "(", "kwargs", ")", "# if no inventory_base_uri was specified, initialize it to the first", "# file_roots of class 'base' (if that exists):", "set_inventory_base_uri_default", "(", "__opts__", ",", "kwargs", ")", "# I purposely do not pass any of __opts__ or __salt__ or __grains__", "# to reclass, as I consider those to be Salt-internal and reclass", "# should not make any assumptions about it.", "return", "reclass_ext_pillar", "(", "minion_id", ",", "pillar", ",", "*", "*", "kwargs", ")", "except", "TypeError", "as", "e", ":", "if", "'unexpected keyword argument'", "in", "six", ".", "text_type", "(", "e", ")", ":", "arg", "=", "six", ".", "text_type", "(", "e", ")", ".", "split", "(", ")", "[", "-", "1", "]", "raise", "SaltInvocationError", "(", "'ext_pillar.reclass: unexpected option: '", "+", "arg", ")", "else", ":", "raise", "except", "KeyError", "as", "e", ":", "if", "'id'", "in", "six", ".", "text_type", "(", "e", ")", ":", "raise", "SaltInvocationError", "(", "'ext_pillar.reclass: __opts__ does not '", "'define minion ID'", ")", "else", ":", "raise", "except", "ReclassException", "as", "e", ":", "raise", "SaltInvocationError", "(", "'ext_pillar.reclass: {0}'", ".", "format", "(", "e", ")", ")" ]
Obtain the Pillar data from **reclass** for the given ``minion_id``.
[ "Obtain", "the", "Pillar", "data", "from", "**", "reclass", "**", "for", "the", "given", "minion_id", "." ]
python
train
39.27907
raiden-network/raiden
raiden/encoding/format.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/encoding/format.py#L58-L189
def namedbuffer(buffer_name, fields_spec): # noqa (ignore ciclomatic complexity) """ Class factory, returns a class to wrap a buffer instance and expose the data as fields. The field spec specifies how many bytes should be used for a field and what is the encoding / decoding function. """ # pylint: disable=protected-access,unused-argument if not len(buffer_name): raise ValueError('buffer_name is empty') if not len(fields_spec): raise ValueError('fields_spec is empty') fields = [ field for field in fields_spec if not isinstance(field, Pad) ] if any(field.size_bytes < 0 for field in fields): raise ValueError('negative size_bytes') if any(len(field.name) < 0 for field in fields): raise ValueError('field missing name') names_fields = { field.name: field for field in fields } if 'data' in names_fields: raise ValueError('data field shadowing underlying buffer') if any(count > 1 for count in Counter(field.name for field in fields).values()): raise ValueError('repeated field name') # big endian format fields_format = '>' + ''.join(field.format_string for field in fields_spec) size = sum(field.size_bytes for field in fields_spec) names_slices = compute_slices(fields_spec) sorted_names = sorted(names_fields.keys()) @staticmethod def get_bytes_from(buffer_, name): slice_ = names_slices[name] return buffer_[slice_] def __init__(self, data): if len(data) < size: raise InvalidProtocolMessage( 'data buffer has less than the expected size {}'.format(size), ) object.__setattr__(self, 'data', data) # Intentionally exposing only the attributes from the spec, since the idea # is for the instance to expose the underlying buffer as attributes def __getattribute__(self, name): if name in names_slices: slice_ = names_slices[name] field = names_fields[name] data = object.__getattribute__(self, 'data') value = data[slice_] if field.encoder: value = field.encoder.decode(value) return value if name == 'data': return object.__getattribute__(self, 'data') raise AttributeError def __setattr__(self, name, value): if name in names_slices: slice_ = names_slices[name] field = names_fields[name] if field.encoder: field.encoder.validate(value) value = field.encoder.encode(value, field.size_bytes) length = len(value) if length > field.size_bytes: msg = 'value with length {length} for {attr} is too big'.format( length=length, attr=name, ) raise ValueError(msg) elif length < field.size_bytes: pad_size = field.size_bytes - length pad_value = b'\x00' * pad_size value = pad_value + value data = object.__getattribute__(self, 'data') if isinstance(value, str): value = value.encode() data[slice_] = value else: super(self.__class__, self).__setattr__(name, value) def __repr__(self): return '<{} [...]>'.format(buffer_name) def __len__(self): return size def __dir__(self): return sorted_names attributes = { '__init__': __init__, '__slots__': ('data',), '__getattribute__': __getattribute__, '__setattr__': __setattr__, '__repr__': __repr__, '__len__': __len__, '__dir__': __dir__, # These are class attributes hidden from instance, i.e. must be # accessed through the class instance. 'fields_spec': fields_spec, 'format': fields_format, 'size': size, 'get_bytes_from': get_bytes_from, } return type(buffer_name, (), attributes)
[ "def", "namedbuffer", "(", "buffer_name", ",", "fields_spec", ")", ":", "# noqa (ignore ciclomatic complexity)", "# pylint: disable=protected-access,unused-argument", "if", "not", "len", "(", "buffer_name", ")", ":", "raise", "ValueError", "(", "'buffer_name is empty'", ")", "if", "not", "len", "(", "fields_spec", ")", ":", "raise", "ValueError", "(", "'fields_spec is empty'", ")", "fields", "=", "[", "field", "for", "field", "in", "fields_spec", "if", "not", "isinstance", "(", "field", ",", "Pad", ")", "]", "if", "any", "(", "field", ".", "size_bytes", "<", "0", "for", "field", "in", "fields", ")", ":", "raise", "ValueError", "(", "'negative size_bytes'", ")", "if", "any", "(", "len", "(", "field", ".", "name", ")", "<", "0", "for", "field", "in", "fields", ")", ":", "raise", "ValueError", "(", "'field missing name'", ")", "names_fields", "=", "{", "field", ".", "name", ":", "field", "for", "field", "in", "fields", "}", "if", "'data'", "in", "names_fields", ":", "raise", "ValueError", "(", "'data field shadowing underlying buffer'", ")", "if", "any", "(", "count", ">", "1", "for", "count", "in", "Counter", "(", "field", ".", "name", "for", "field", "in", "fields", ")", ".", "values", "(", ")", ")", ":", "raise", "ValueError", "(", "'repeated field name'", ")", "# big endian format", "fields_format", "=", "'>'", "+", "''", ".", "join", "(", "field", ".", "format_string", "for", "field", "in", "fields_spec", ")", "size", "=", "sum", "(", "field", ".", "size_bytes", "for", "field", "in", "fields_spec", ")", "names_slices", "=", "compute_slices", "(", "fields_spec", ")", "sorted_names", "=", "sorted", "(", "names_fields", ".", "keys", "(", ")", ")", "@", "staticmethod", "def", "get_bytes_from", "(", "buffer_", ",", "name", ")", ":", "slice_", "=", "names_slices", "[", "name", "]", "return", "buffer_", "[", "slice_", "]", "def", "__init__", "(", "self", ",", "data", ")", ":", "if", "len", "(", "data", ")", "<", "size", ":", "raise", "InvalidProtocolMessage", "(", "'data buffer has less than the expected size {}'", ".", "format", "(", "size", ")", ",", ")", "object", ".", "__setattr__", "(", "self", ",", "'data'", ",", "data", ")", "# Intentionally exposing only the attributes from the spec, since the idea", "# is for the instance to expose the underlying buffer as attributes", "def", "__getattribute__", "(", "self", ",", "name", ")", ":", "if", "name", "in", "names_slices", ":", "slice_", "=", "names_slices", "[", "name", "]", "field", "=", "names_fields", "[", "name", "]", "data", "=", "object", ".", "__getattribute__", "(", "self", ",", "'data'", ")", "value", "=", "data", "[", "slice_", "]", "if", "field", ".", "encoder", ":", "value", "=", "field", ".", "encoder", ".", "decode", "(", "value", ")", "return", "value", "if", "name", "==", "'data'", ":", "return", "object", ".", "__getattribute__", "(", "self", ",", "'data'", ")", "raise", "AttributeError", "def", "__setattr__", "(", "self", ",", "name", ",", "value", ")", ":", "if", "name", "in", "names_slices", ":", "slice_", "=", "names_slices", "[", "name", "]", "field", "=", "names_fields", "[", "name", "]", "if", "field", ".", "encoder", ":", "field", ".", "encoder", ".", "validate", "(", "value", ")", "value", "=", "field", ".", "encoder", ".", "encode", "(", "value", ",", "field", ".", "size_bytes", ")", "length", "=", "len", "(", "value", ")", "if", "length", ">", "field", ".", "size_bytes", ":", "msg", "=", "'value with length {length} for {attr} is too big'", ".", "format", "(", "length", "=", "length", ",", "attr", "=", "name", ",", ")", "raise", "ValueError", "(", "msg", ")", "elif", "length", "<", "field", ".", "size_bytes", ":", "pad_size", "=", "field", ".", "size_bytes", "-", "length", "pad_value", "=", "b'\\x00'", "*", "pad_size", "value", "=", "pad_value", "+", "value", "data", "=", "object", ".", "__getattribute__", "(", "self", ",", "'data'", ")", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "value", ".", "encode", "(", ")", "data", "[", "slice_", "]", "=", "value", "else", ":", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "__setattr__", "(", "name", ",", "value", ")", "def", "__repr__", "(", "self", ")", ":", "return", "'<{} [...]>'", ".", "format", "(", "buffer_name", ")", "def", "__len__", "(", "self", ")", ":", "return", "size", "def", "__dir__", "(", "self", ")", ":", "return", "sorted_names", "attributes", "=", "{", "'__init__'", ":", "__init__", ",", "'__slots__'", ":", "(", "'data'", ",", ")", ",", "'__getattribute__'", ":", "__getattribute__", ",", "'__setattr__'", ":", "__setattr__", ",", "'__repr__'", ":", "__repr__", ",", "'__len__'", ":", "__len__", ",", "'__dir__'", ":", "__dir__", ",", "# These are class attributes hidden from instance, i.e. must be", "# accessed through the class instance.", "'fields_spec'", ":", "fields_spec", ",", "'format'", ":", "fields_format", ",", "'size'", ":", "size", ",", "'get_bytes_from'", ":", "get_bytes_from", ",", "}", "return", "type", "(", "buffer_name", ",", "(", ")", ",", "attributes", ")" ]
Class factory, returns a class to wrap a buffer instance and expose the data as fields. The field spec specifies how many bytes should be used for a field and what is the encoding / decoding function.
[ "Class", "factory", "returns", "a", "class", "to", "wrap", "a", "buffer", "instance", "and", "expose", "the", "data", "as", "fields", "." ]
python
train
30.280303
robertpeteuil/multi-cloud-control
mcc/uimode.py
https://github.com/robertpeteuil/multi-cloud-control/blob/f1565af1c0b6ed465ff312d3ccc592ba0609f4a2/mcc/uimode.py#L343-L350
def input_yn(conf_mess): """Print Confirmation Message and Get Y/N response from user.""" ui_erase_ln() ui_print(conf_mess) with term.cbreak(): input_flush() val = input_by_key() return bool(val.lower() == 'y')
[ "def", "input_yn", "(", "conf_mess", ")", ":", "ui_erase_ln", "(", ")", "ui_print", "(", "conf_mess", ")", "with", "term", ".", "cbreak", "(", ")", ":", "input_flush", "(", ")", "val", "=", "input_by_key", "(", ")", "return", "bool", "(", "val", ".", "lower", "(", ")", "==", "'y'", ")" ]
Print Confirmation Message and Get Y/N response from user.
[ "Print", "Confirmation", "Message", "and", "Get", "Y", "/", "N", "response", "from", "user", "." ]
python
train
29.875
JoseAntFer/pyny3d
pyny3d/geoms.py
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L415-L424
def get_domain(self): """ :returns: opposite vertices of the bounding prism for this object. :rtype: ndarray([min], [max]) """ if self.domain is None: return np.array([self.points.min(axis=0), self.points.max(axis=0)]) return self.domain
[ "def", "get_domain", "(", "self", ")", ":", "if", "self", ".", "domain", "is", "None", ":", "return", "np", ".", "array", "(", "[", "self", ".", "points", ".", "min", "(", "axis", "=", "0", ")", ",", "self", ".", "points", ".", "max", "(", "axis", "=", "0", ")", "]", ")", "return", "self", ".", "domain" ]
:returns: opposite vertices of the bounding prism for this object. :rtype: ndarray([min], [max])
[ ":", "returns", ":", "opposite", "vertices", "of", "the", "bounding", "prism", "for", "this", "object", ".", ":", "rtype", ":", "ndarray", "(", "[", "min", "]", "[", "max", "]", ")" ]
python
train
34
datacamp/sqlwhat
sqlwhat/checks/check_funcs.py
https://github.com/datacamp/sqlwhat/blob/9ae798c63124f994607a0e2c120b24ebbb2bdbe9/sqlwhat/checks/check_funcs.py#L471-L540
def check_query(state, query, error_msg=None, expand_msg=None): """Run arbitrary queries against to the DB connection to verify the database state. For queries that do not return any output (INSERTs, UPDATEs, ...), you cannot use functions like ``check_col()`` and ``is_equal()`` to verify the query result. ``check_query()`` will rerun the solution query in the transaction prepared by sqlbackend, and immediately afterwards run the query specified in ``query``. Next, it will also run this query after rerunning the student query in a transaction. Finally, it produces a child state with these results, that you can then chain off of with functions like ``check_column()`` and ``has_equal_value()``. Args: query: A SQL query as a string that is executed after the student query is re-executed. error_msg: if specified, this overrides the automatically generated feedback message in case the query generated an error. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are checking whether an INSERT happened correctly: :: INSERT INTO company VALUES (2, 'filip', 28, 'sql-lane', 42) We can write the following SCT: :: Ex().check_query('SELECT COUNT(*) AS c FROM company').has_equal_value() """ if error_msg is None: error_msg = "Running `{{query}}` after your submission generated an error." if expand_msg is None: expand_msg = "The autograder verified the result of running `{{query}}` against the database. " msg_kwargs = {"query": query} # before redoing the query, # make sure that it didn't generate any errors has_no_error(state) _msg = state.build_message(error_msg, fmt_kwargs=msg_kwargs) # sqlbackend makes sure all queries are run in transactions. # Rerun the solution code first, after which we run the provided query with dbconn(state.solution_conn) as conn: _ = runQuery(conn, state.solution_code) sol_res = runQuery(conn, query) if sol_res is None: raise ValueError("Solution failed: " + _msg) # sqlbackend makes sure all queries are run in transactions. # Rerun the student code first, after wich we run the provided query with dbconn(state.student_conn) as conn: _ = runQuery(conn, state.student_code) stu_res = runQuery(conn, query) if stu_res is None: state.do_test(_msg) return state.to_child( append_message={"msg": expand_msg, "kwargs": msg_kwargs}, student_result=stu_res, solution_result=sol_res, )
[ "def", "check_query", "(", "state", ",", "query", ",", "error_msg", "=", "None", ",", "expand_msg", "=", "None", ")", ":", "if", "error_msg", "is", "None", ":", "error_msg", "=", "\"Running `{{query}}` after your submission generated an error.\"", "if", "expand_msg", "is", "None", ":", "expand_msg", "=", "\"The autograder verified the result of running `{{query}}` against the database. \"", "msg_kwargs", "=", "{", "\"query\"", ":", "query", "}", "# before redoing the query,", "# make sure that it didn't generate any errors", "has_no_error", "(", "state", ")", "_msg", "=", "state", ".", "build_message", "(", "error_msg", ",", "fmt_kwargs", "=", "msg_kwargs", ")", "# sqlbackend makes sure all queries are run in transactions.", "# Rerun the solution code first, after which we run the provided query", "with", "dbconn", "(", "state", ".", "solution_conn", ")", "as", "conn", ":", "_", "=", "runQuery", "(", "conn", ",", "state", ".", "solution_code", ")", "sol_res", "=", "runQuery", "(", "conn", ",", "query", ")", "if", "sol_res", "is", "None", ":", "raise", "ValueError", "(", "\"Solution failed: \"", "+", "_msg", ")", "# sqlbackend makes sure all queries are run in transactions.", "# Rerun the student code first, after wich we run the provided query", "with", "dbconn", "(", "state", ".", "student_conn", ")", "as", "conn", ":", "_", "=", "runQuery", "(", "conn", ",", "state", ".", "student_code", ")", "stu_res", "=", "runQuery", "(", "conn", ",", "query", ")", "if", "stu_res", "is", "None", ":", "state", ".", "do_test", "(", "_msg", ")", "return", "state", ".", "to_child", "(", "append_message", "=", "{", "\"msg\"", ":", "expand_msg", ",", "\"kwargs\"", ":", "msg_kwargs", "}", ",", "student_result", "=", "stu_res", ",", "solution_result", "=", "sol_res", ",", ")" ]
Run arbitrary queries against to the DB connection to verify the database state. For queries that do not return any output (INSERTs, UPDATEs, ...), you cannot use functions like ``check_col()`` and ``is_equal()`` to verify the query result. ``check_query()`` will rerun the solution query in the transaction prepared by sqlbackend, and immediately afterwards run the query specified in ``query``. Next, it will also run this query after rerunning the student query in a transaction. Finally, it produces a child state with these results, that you can then chain off of with functions like ``check_column()`` and ``has_equal_value()``. Args: query: A SQL query as a string that is executed after the student query is re-executed. error_msg: if specified, this overrides the automatically generated feedback message in case the query generated an error. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are checking whether an INSERT happened correctly: :: INSERT INTO company VALUES (2, 'filip', 28, 'sql-lane', 42) We can write the following SCT: :: Ex().check_query('SELECT COUNT(*) AS c FROM company').has_equal_value()
[ "Run", "arbitrary", "queries", "against", "to", "the", "DB", "connection", "to", "verify", "the", "database", "state", "." ]
python
train
39.057143
Yubico/python-yubico
yubico/yubikey_config.py
https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_config.py#L188-L205
def fixed_string(self, data=None): """ The fixed string is used to identify a particular Yubikey device. The fixed string is referred to as the 'Token Identifier' in OATH-HOTP mode. The length of the fixed string can be set between 0 and 16 bytes. Tip: This can also be used to extend the length of a static password. """ old = self.fixed if data != None: new = self._decode_input_string(data) if len(new) <= 16: self.fixed = new else: raise yubico_exception.InputError('The "fixed" string must be 0..16 bytes') return old
[ "def", "fixed_string", "(", "self", ",", "data", "=", "None", ")", ":", "old", "=", "self", ".", "fixed", "if", "data", "!=", "None", ":", "new", "=", "self", ".", "_decode_input_string", "(", "data", ")", "if", "len", "(", "new", ")", "<=", "16", ":", "self", ".", "fixed", "=", "new", "else", ":", "raise", "yubico_exception", ".", "InputError", "(", "'The \"fixed\" string must be 0..16 bytes'", ")", "return", "old" ]
The fixed string is used to identify a particular Yubikey device. The fixed string is referred to as the 'Token Identifier' in OATH-HOTP mode. The length of the fixed string can be set between 0 and 16 bytes. Tip: This can also be used to extend the length of a static password.
[ "The", "fixed", "string", "is", "used", "to", "identify", "a", "particular", "Yubikey", "device", "." ]
python
train
36
mozilla/mozdownload
mozdownload/scraper.py
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L552-L564
def path_regex(self): """Return the regex for the path to the build folder.""" try: path = '%s/' % urljoin(self.monthly_build_list_regex, self.builds[self.build_index]) if self.application in APPLICATIONS_MULTI_LOCALE \ and self.locale != 'multi': path = '%s/' % urljoin(path, self.locale) return path except Exception: folder = urljoin(self.base_url, self.monthly_build_list_regex) raise errors.NotFoundError("Specified sub folder cannot be found", folder)
[ "def", "path_regex", "(", "self", ")", ":", "try", ":", "path", "=", "'%s/'", "%", "urljoin", "(", "self", ".", "monthly_build_list_regex", ",", "self", ".", "builds", "[", "self", ".", "build_index", "]", ")", "if", "self", ".", "application", "in", "APPLICATIONS_MULTI_LOCALE", "and", "self", ".", "locale", "!=", "'multi'", ":", "path", "=", "'%s/'", "%", "urljoin", "(", "path", ",", "self", ".", "locale", ")", "return", "path", "except", "Exception", ":", "folder", "=", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "monthly_build_list_regex", ")", "raise", "errors", ".", "NotFoundError", "(", "\"Specified sub folder cannot be found\"", ",", "folder", ")" ]
Return the regex for the path to the build folder.
[ "Return", "the", "regex", "for", "the", "path", "to", "the", "build", "folder", "." ]
python
train
49.153846
INM-6/hybridLFPy
examples/example_microcircuit_params.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L44-L58
def get_F_y(fname='binzegger_connectivity_table.json', y=['p23']): ''' Extract frequency of occurrences of those cell types that are modeled. The data set contains cell types that are not modeled (TCs etc.) The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1 ''' # Load data from json dictionary f = open(fname,'r') data = json.load(f) f.close() occurr = [] for cell_type in y: occurr += [data['data'][cell_type]['occurrence']] return list(np.array(occurr)/np.sum(occurr))
[ "def", "get_F_y", "(", "fname", "=", "'binzegger_connectivity_table.json'", ",", "y", "=", "[", "'p23'", "]", ")", ":", "# Load data from json dictionary", "f", "=", "open", "(", "fname", ",", "'r'", ")", "data", "=", "json", ".", "load", "(", "f", ")", "f", ".", "close", "(", ")", "occurr", "=", "[", "]", "for", "cell_type", "in", "y", ":", "occurr", "+=", "[", "data", "[", "'data'", "]", "[", "cell_type", "]", "[", "'occurrence'", "]", "]", "return", "list", "(", "np", ".", "array", "(", "occurr", ")", "/", "np", ".", "sum", "(", "occurr", ")", ")" ]
Extract frequency of occurrences of those cell types that are modeled. The data set contains cell types that are not modeled (TCs etc.) The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1
[ "Extract", "frequency", "of", "occurrences", "of", "those", "cell", "types", "that", "are", "modeled", ".", "The", "data", "set", "contains", "cell", "types", "that", "are", "not", "modeled", "(", "TCs", "etc", ".", ")", "The", "returned", "percentages", "are", "renormalized", "onto", "modeled", "cell", "-", "types", "i", ".", "e", ".", "they", "sum", "up", "to", "1" ]
python
train
37.2
Metatab/metapack
metapack/jupyter/ipython.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L48-L103
def open_package(locals=None, dr=None): """Try to open a package with the metatab_doc variable, which is set when a Notebook is run as a resource. If that does not exist, try the local _packages directory""" if locals is None: locals = caller_locals() try: # Running in a package build return op(locals['metatab_doc']) except KeyError: # Running interactively in Jupyter package_name = None build_package_dir = None source_package = None if dr is None: dr = getcwd() for i, e in enumerate(walk_up(dr)): intr = set([DEFAULT_METATAB_FILE, LINES_METATAB_FILE, IPYNB_METATAB_FILE]) & set(e[2]) if intr: source_package = join(e[0], list(intr)[0]) p = op(source_package) package_name = p.find_first_value("Root.Name") if not package_name: raise PackageError("Source package in {} does not have root.Name term".format(e[0])) if PACKAGE_PREFIX in e[1]: build_package_dir = join(e[0], PACKAGE_PREFIX) break if i > 2: break if build_package_dir and package_name and exists(join(build_package_dir, package_name)): # Open the previously built package built_package = join(build_package_dir, package_name) try: return op(built_package) except RowGeneratorError as e: pass # Probably could not open the metadata file. if source_package: # Open the source package return op(source_package) raise PackageError("Failed to find package, either in locals() or above dir '{}' ".format(dr))
[ "def", "open_package", "(", "locals", "=", "None", ",", "dr", "=", "None", ")", ":", "if", "locals", "is", "None", ":", "locals", "=", "caller_locals", "(", ")", "try", ":", "# Running in a package build", "return", "op", "(", "locals", "[", "'metatab_doc'", "]", ")", "except", "KeyError", ":", "# Running interactively in Jupyter", "package_name", "=", "None", "build_package_dir", "=", "None", "source_package", "=", "None", "if", "dr", "is", "None", ":", "dr", "=", "getcwd", "(", ")", "for", "i", ",", "e", "in", "enumerate", "(", "walk_up", "(", "dr", ")", ")", ":", "intr", "=", "set", "(", "[", "DEFAULT_METATAB_FILE", ",", "LINES_METATAB_FILE", ",", "IPYNB_METATAB_FILE", "]", ")", "&", "set", "(", "e", "[", "2", "]", ")", "if", "intr", ":", "source_package", "=", "join", "(", "e", "[", "0", "]", ",", "list", "(", "intr", ")", "[", "0", "]", ")", "p", "=", "op", "(", "source_package", ")", "package_name", "=", "p", ".", "find_first_value", "(", "\"Root.Name\"", ")", "if", "not", "package_name", ":", "raise", "PackageError", "(", "\"Source package in {} does not have root.Name term\"", ".", "format", "(", "e", "[", "0", "]", ")", ")", "if", "PACKAGE_PREFIX", "in", "e", "[", "1", "]", ":", "build_package_dir", "=", "join", "(", "e", "[", "0", "]", ",", "PACKAGE_PREFIX", ")", "break", "if", "i", ">", "2", ":", "break", "if", "build_package_dir", "and", "package_name", "and", "exists", "(", "join", "(", "build_package_dir", ",", "package_name", ")", ")", ":", "# Open the previously built package", "built_package", "=", "join", "(", "build_package_dir", ",", "package_name", ")", "try", ":", "return", "op", "(", "built_package", ")", "except", "RowGeneratorError", "as", "e", ":", "pass", "# Probably could not open the metadata file.", "if", "source_package", ":", "# Open the source package", "return", "op", "(", "source_package", ")", "raise", "PackageError", "(", "\"Failed to find package, either in locals() or above dir '{}' \"", ".", "format", "(", "dr", ")", ")" ]
Try to open a package with the metatab_doc variable, which is set when a Notebook is run as a resource. If that does not exist, try the local _packages directory
[ "Try", "to", "open", "a", "package", "with", "the", "metatab_doc", "variable", "which", "is", "set", "when", "a", "Notebook", "is", "run", "as", "a", "resource", ".", "If", "that", "does", "not", "exist", "try", "the", "local", "_packages", "directory" ]
python
train
31.214286
puiterwijk/flask-oidc
flask_oidc/__init__.py
https://github.com/puiterwijk/flask-oidc/blob/7f16e27b926fc12953d6b2ae78a9b9cc9b8d1769/flask_oidc/__init__.py#L669-L683
def custom_callback(self, view_func): """ Wrapper function to use a custom callback. The custom OIDC callback will get the custom state field passed in with redirect_to_auth_server. """ @wraps(view_func) def decorated(*args, **kwargs): plainreturn, data = self._process_callback('custom') if plainreturn: return data else: return view_func(data, *args, **kwargs) self._custom_callback = decorated return decorated
[ "def", "custom_callback", "(", "self", ",", "view_func", ")", ":", "@", "wraps", "(", "view_func", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "plainreturn", ",", "data", "=", "self", ".", "_process_callback", "(", "'custom'", ")", "if", "plainreturn", ":", "return", "data", "else", ":", "return", "view_func", "(", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_custom_callback", "=", "decorated", "return", "decorated" ]
Wrapper function to use a custom callback. The custom OIDC callback will get the custom state field passed in with redirect_to_auth_server.
[ "Wrapper", "function", "to", "use", "a", "custom", "callback", ".", "The", "custom", "OIDC", "callback", "will", "get", "the", "custom", "state", "field", "passed", "in", "with", "redirect_to_auth_server", "." ]
python
train
35.933333
SMTG-UCL/sumo
sumo/cli/bandplot.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/cli/bandplot.py#L306-L343
def save_data_files(vr, bs, prefix=None, directory=None): """Write the band structure data files to disk. Args: vs (`Vasprun`): Pymatgen `Vasprun` object. bs (`BandStructureSymmLine`): Calculated band structure. prefix (`str`, optional): Prefix for data file. directory (`str`, optional): Directory in which to save the data. Returns: The filename of the written data file. """ filename = '{}_band.dat'.format(prefix) if prefix else 'band.dat' directory = directory if directory else '.' filename = os.path.join(directory, filename) if bs.is_metal(): zero = vr.efermi else: zero = bs.get_vbm()['energy'] with open(filename, 'w') as f: header = '#k-distance eigenvalue[eV]\n' f.write(header) # write the spin up eigenvalues for band in bs.bands[Spin.up]: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e - zero)) f.write('\n') # calculation is spin polarised, write spin down bands at end of file if bs.is_spin_polarized: for band in bs.bands[Spin.down]: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e - zero)) f.write('\n') return filename
[ "def", "save_data_files", "(", "vr", ",", "bs", ",", "prefix", "=", "None", ",", "directory", "=", "None", ")", ":", "filename", "=", "'{}_band.dat'", ".", "format", "(", "prefix", ")", "if", "prefix", "else", "'band.dat'", "directory", "=", "directory", "if", "directory", "else", "'.'", "filename", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")", "if", "bs", ".", "is_metal", "(", ")", ":", "zero", "=", "vr", ".", "efermi", "else", ":", "zero", "=", "bs", ".", "get_vbm", "(", ")", "[", "'energy'", "]", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "header", "=", "'#k-distance eigenvalue[eV]\\n'", "f", ".", "write", "(", "header", ")", "# write the spin up eigenvalues", "for", "band", "in", "bs", ".", "bands", "[", "Spin", ".", "up", "]", ":", "for", "d", ",", "e", "in", "zip", "(", "bs", ".", "distance", ",", "band", ")", ":", "f", ".", "write", "(", "'{:.8f} {:.8f}\\n'", ".", "format", "(", "d", ",", "e", "-", "zero", ")", ")", "f", ".", "write", "(", "'\\n'", ")", "# calculation is spin polarised, write spin down bands at end of file", "if", "bs", ".", "is_spin_polarized", ":", "for", "band", "in", "bs", ".", "bands", "[", "Spin", ".", "down", "]", ":", "for", "d", ",", "e", "in", "zip", "(", "bs", ".", "distance", ",", "band", ")", ":", "f", ".", "write", "(", "'{:.8f} {:.8f}\\n'", ".", "format", "(", "d", ",", "e", "-", "zero", ")", ")", "f", ".", "write", "(", "'\\n'", ")", "return", "filename" ]
Write the band structure data files to disk. Args: vs (`Vasprun`): Pymatgen `Vasprun` object. bs (`BandStructureSymmLine`): Calculated band structure. prefix (`str`, optional): Prefix for data file. directory (`str`, optional): Directory in which to save the data. Returns: The filename of the written data file.
[ "Write", "the", "band", "structure", "data", "files", "to", "disk", "." ]
python
train
34.526316
QInfer/python-qinfer
src/qinfer/tomography/plotting_tools.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/tomography/plotting_tools.py#L156-L202
def plot_rebit_prior(prior, rebit_axes=REBIT_AXES, n_samples=2000, true_state=None, true_size=250, force_mean=None, legend=True, mean_color_index=2 ): """ Plots rebit states drawn from a given prior. :param qinfer.tomography.DensityOperatorDistribution prior: Distribution over rebit states to plot. :param list rebit_axes: List containing indices for the :math:`x` and :math:`z` axes. :param int n_samples: Number of samples to draw from the prior. :param np.ndarray true_state: State to be plotted as a "true" state for comparison. """ pallette = plt.rcParams['axes.color_cycle'] plot_rebit_modelparams(prior.sample(n_samples), c=pallette[0], label='Prior', rebit_axes=rebit_axes ) if true_state is not None: plot_rebit_modelparams(true_state, c=pallette[1], label='True', marker='*', s=true_size, rebit_axes=rebit_axes ) if hasattr(prior, '_mean') or force_mean is not None: mean = force_mean if force_mean is not None else prior._mean plot_rebit_modelparams( prior._basis.state_to_modelparams(mean)[None, :], edgecolors=pallette[mean_color_index], s=250, facecolors='none', linewidth=3, label='Mean', rebit_axes=rebit_axes ) plot_decorate_rebits(prior.basis, rebit_axes=rebit_axes ) if legend: plt.legend(loc='lower left', ncol=3, scatterpoints=1)
[ "def", "plot_rebit_prior", "(", "prior", ",", "rebit_axes", "=", "REBIT_AXES", ",", "n_samples", "=", "2000", ",", "true_state", "=", "None", ",", "true_size", "=", "250", ",", "force_mean", "=", "None", ",", "legend", "=", "True", ",", "mean_color_index", "=", "2", ")", ":", "pallette", "=", "plt", ".", "rcParams", "[", "'axes.color_cycle'", "]", "plot_rebit_modelparams", "(", "prior", ".", "sample", "(", "n_samples", ")", ",", "c", "=", "pallette", "[", "0", "]", ",", "label", "=", "'Prior'", ",", "rebit_axes", "=", "rebit_axes", ")", "if", "true_state", "is", "not", "None", ":", "plot_rebit_modelparams", "(", "true_state", ",", "c", "=", "pallette", "[", "1", "]", ",", "label", "=", "'True'", ",", "marker", "=", "'*'", ",", "s", "=", "true_size", ",", "rebit_axes", "=", "rebit_axes", ")", "if", "hasattr", "(", "prior", ",", "'_mean'", ")", "or", "force_mean", "is", "not", "None", ":", "mean", "=", "force_mean", "if", "force_mean", "is", "not", "None", "else", "prior", ".", "_mean", "plot_rebit_modelparams", "(", "prior", ".", "_basis", ".", "state_to_modelparams", "(", "mean", ")", "[", "None", ",", ":", "]", ",", "edgecolors", "=", "pallette", "[", "mean_color_index", "]", ",", "s", "=", "250", ",", "facecolors", "=", "'none'", ",", "linewidth", "=", "3", ",", "label", "=", "'Mean'", ",", "rebit_axes", "=", "rebit_axes", ")", "plot_decorate_rebits", "(", "prior", ".", "basis", ",", "rebit_axes", "=", "rebit_axes", ")", "if", "legend", ":", "plt", ".", "legend", "(", "loc", "=", "'lower left'", ",", "ncol", "=", "3", ",", "scatterpoints", "=", "1", ")" ]
Plots rebit states drawn from a given prior. :param qinfer.tomography.DensityOperatorDistribution prior: Distribution over rebit states to plot. :param list rebit_axes: List containing indices for the :math:`x` and :math:`z` axes. :param int n_samples: Number of samples to draw from the prior. :param np.ndarray true_state: State to be plotted as a "true" state for comparison.
[ "Plots", "rebit", "states", "drawn", "from", "a", "given", "prior", "." ]
python
train
31.957447
xmikos/soapy_power
soapypower/power.py
https://github.com/xmikos/soapy_power/blob/46e12659b8d08af764dc09a1f31b0e85a68f808f/soapypower/power.py#L73-L81
def nearest_overlap(self, overlap, bins): """Return nearest overlap/crop factor based on number of bins""" bins_overlap = overlap * bins if bins_overlap % 2 != 0: bins_overlap = math.ceil(bins_overlap / 2) * 2 overlap = bins_overlap / bins logger.warning('number of overlapping FFT bins should be even, ' 'changing overlap/crop factor to {:.5f}'.format(overlap)) return overlap
[ "def", "nearest_overlap", "(", "self", ",", "overlap", ",", "bins", ")", ":", "bins_overlap", "=", "overlap", "*", "bins", "if", "bins_overlap", "%", "2", "!=", "0", ":", "bins_overlap", "=", "math", ".", "ceil", "(", "bins_overlap", "/", "2", ")", "*", "2", "overlap", "=", "bins_overlap", "/", "bins", "logger", ".", "warning", "(", "'number of overlapping FFT bins should be even, '", "'changing overlap/crop factor to {:.5f}'", ".", "format", "(", "overlap", ")", ")", "return", "overlap" ]
Return nearest overlap/crop factor based on number of bins
[ "Return", "nearest", "overlap", "/", "crop", "factor", "based", "on", "number", "of", "bins" ]
python
test
51.555556
fermiPy/fermipy
fermipy/diffuse/name_policy.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L311-L322
def ft2file(self, **kwargs): """ return the name of the input ft2 file list """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['data_time'] = kwargs.get( 'data_time', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft2file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "ft2file", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'data_time'", "]", "=", "kwargs", ".", "get", "(", "'data_time'", ",", "self", ".", "dataset", "(", "*", "*", "kwargs", ")", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ")", "localpath", "=", "NameFactory", ".", "ft2file_format", ".", "format", "(", "*", "*", "kwargs_copy", ")", "if", "kwargs", ".", "get", "(", "'fullpath'", ",", "False", ")", ":", "return", "self", ".", "fullpath", "(", "localpath", "=", "localpath", ")", "return", "localpath" ]
return the name of the input ft2 file list
[ "return", "the", "name", "of", "the", "input", "ft2", "file", "list" ]
python
train
41.583333
bertrandvidal/parse_this
parse_this/core.py
https://github.com/bertrandvidal/parse_this/blob/aa2e3737f19642300ef1ca65cae21c90049718a2/parse_this/core.py#L118-L148
def _get_parseable_methods(cls): """Return all methods of cls that are parseable i.e. have been decorated by '@create_parser'. Args: cls: the class currently being decorated Note: classmethods will not be included as they can only be referenced once the class has been defined Returns: a 2-tuple with the parser of the __init__ method if any and a dict of the form {'method_name': associated_parser} """ _LOG.debug("Retrieving parseable methods for '%s'", cls.__name__) init_parser = None methods_to_parse = {} for name, obj in vars(cls).items(): # Every callable object that has a 'parser' attribute will be # added as a subparser. # This won't work for classmethods because reference to # classmethods are only possible once the class has been defined if callable(obj) and hasattr(obj, "parser"): _LOG.debug("Found method '%s'", name) if name == "__init__": # If we find the decorated __init__ method it will be # used as the top level parser init_parser = obj.parser else: methods_to_parse[obj.__name__] = obj.parser return (init_parser, methods_to_parse)
[ "def", "_get_parseable_methods", "(", "cls", ")", ":", "_LOG", ".", "debug", "(", "\"Retrieving parseable methods for '%s'\"", ",", "cls", ".", "__name__", ")", "init_parser", "=", "None", "methods_to_parse", "=", "{", "}", "for", "name", ",", "obj", "in", "vars", "(", "cls", ")", ".", "items", "(", ")", ":", "# Every callable object that has a 'parser' attribute will be", "# added as a subparser.", "# This won't work for classmethods because reference to", "# classmethods are only possible once the class has been defined", "if", "callable", "(", "obj", ")", "and", "hasattr", "(", "obj", ",", "\"parser\"", ")", ":", "_LOG", ".", "debug", "(", "\"Found method '%s'\"", ",", "name", ")", "if", "name", "==", "\"__init__\"", ":", "# If we find the decorated __init__ method it will be", "# used as the top level parser", "init_parser", "=", "obj", ".", "parser", "else", ":", "methods_to_parse", "[", "obj", ".", "__name__", "]", "=", "obj", ".", "parser", "return", "(", "init_parser", ",", "methods_to_parse", ")" ]
Return all methods of cls that are parseable i.e. have been decorated by '@create_parser'. Args: cls: the class currently being decorated Note: classmethods will not be included as they can only be referenced once the class has been defined Returns: a 2-tuple with the parser of the __init__ method if any and a dict of the form {'method_name': associated_parser}
[ "Return", "all", "methods", "of", "cls", "that", "are", "parseable", "i", ".", "e", ".", "have", "been", "decorated", "by", "@create_parser", "." ]
python
train
40.451613
dropbox/stone
stone/frontend/parser.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/parser.py#L157-L167
def p_namespace(self, p): """namespace : KEYWORD ID NL | KEYWORD ID NL INDENT docsection DEDENT""" if p[1] == 'namespace': doc = None if len(p) > 4: doc = p[5] p[0] = AstNamespace( self.path, p.lineno(1), p.lexpos(1), p[2], doc) else: raise ValueError('Expected namespace keyword')
[ "def", "p_namespace", "(", "self", ",", "p", ")", ":", "if", "p", "[", "1", "]", "==", "'namespace'", ":", "doc", "=", "None", "if", "len", "(", "p", ")", ">", "4", ":", "doc", "=", "p", "[", "5", "]", "p", "[", "0", "]", "=", "AstNamespace", "(", "self", ".", "path", ",", "p", ".", "lineno", "(", "1", ")", ",", "p", ".", "lexpos", "(", "1", ")", ",", "p", "[", "2", "]", ",", "doc", ")", "else", ":", "raise", "ValueError", "(", "'Expected namespace keyword'", ")" ]
namespace : KEYWORD ID NL | KEYWORD ID NL INDENT docsection DEDENT
[ "namespace", ":", "KEYWORD", "ID", "NL", "|", "KEYWORD", "ID", "NL", "INDENT", "docsection", "DEDENT" ]
python
train
36
MLAB-project/pymlab
src/pymlab/sensors/iic.py
https://github.com/MLAB-project/pymlab/blob/d18d858ae83b203defcf2aead0dbd11b3c444658/src/pymlab/sensors/iic.py#L171-L184
def write_block_data(self, address, register, value): """ SMBus Block Write: i2c_smbus_write_block_data() ================================================ The opposite of the Block Read command, this writes up to 32 bytes to a device, to a designated register that is specified through the Comm byte. The amount of data is specified in the Count byte. S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA """ return self.smbus.write_block_data(address, register, value)
[ "def", "write_block_data", "(", "self", ",", "address", ",", "register", ",", "value", ")", ":", "return", "self", ".", "smbus", ".", "write_block_data", "(", "address", ",", "register", ",", "value", ")" ]
SMBus Block Write: i2c_smbus_write_block_data() ================================================ The opposite of the Block Read command, this writes up to 32 bytes to a device, to a designated register that is specified through the Comm byte. The amount of data is specified in the Count byte. S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA
[ "SMBus", "Block", "Write", ":", "i2c_smbus_write_block_data", "()", "================================================" ]
python
train
43.5
woolfson-group/isambard
isambard/optimisation/optimizer.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/optimizer.py#L674-L697
def generate(self): """Generates a particle using the creator function. Notes ----- Position and speed are uniformly randomly seeded within allowed bounds. The particle also has speed limit settings taken from global values. Returns ------- particle object """ part = creator.Particle( [random.uniform(-1, 1) for _ in range(len(self._params['value_means']))]) part.speed = [ random.uniform(-self._params['max_speed'], self._params['max_speed']) for _ in range(len(self._params['value_means']))] part.smin = -self._params['max_speed'] part.smax = self._params['max_speed'] part.ident = None part.neighbours = None return part
[ "def", "generate", "(", "self", ")", ":", "part", "=", "creator", ".", "Particle", "(", "[", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "for", "_", "in", "range", "(", "len", "(", "self", ".", "_params", "[", "'value_means'", "]", ")", ")", "]", ")", "part", ".", "speed", "=", "[", "random", ".", "uniform", "(", "-", "self", ".", "_params", "[", "'max_speed'", "]", ",", "self", ".", "_params", "[", "'max_speed'", "]", ")", "for", "_", "in", "range", "(", "len", "(", "self", ".", "_params", "[", "'value_means'", "]", ")", ")", "]", "part", ".", "smin", "=", "-", "self", ".", "_params", "[", "'max_speed'", "]", "part", ".", "smax", "=", "self", ".", "_params", "[", "'max_speed'", "]", "part", ".", "ident", "=", "None", "part", ".", "neighbours", "=", "None", "return", "part" ]
Generates a particle using the creator function. Notes ----- Position and speed are uniformly randomly seeded within allowed bounds. The particle also has speed limit settings taken from global values. Returns ------- particle object
[ "Generates", "a", "particle", "using", "the", "creator", "function", ".", "Notes", "-----", "Position", "and", "speed", "are", "uniformly", "randomly", "seeded", "within", "allowed", "bounds", ".", "The", "particle", "also", "has", "speed", "limit", "settings", "taken", "from", "global", "values", "." ]
python
train
33.916667
nschloe/matplotlib2tikz
matplotlib2tikz/save.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L337-L415
def _recurse(data, obj): """Iterates over all children of the current object, gathers the contents contributing to the resulting PGFPlots file, and returns those. """ content = _ContentManager() for child in obj.get_children(): # Some patches are Spines, too; skip those entirely. # See <https://github.com/nschloe/matplotlib2tikz/issues/277>. if isinstance(child, mpl.spines.Spine): continue if isinstance(child, mpl.axes.Axes): ax = axes.Axes(data, child) if ax.is_colorbar: continue # add extra axis options if data["extra axis options [base]"]: ax.axis_options.extend(data["extra axis options [base]"]) data["current mpl axes obj"] = child data["current axes"] = ax # Run through the child objects, gather the content. data, children_content = _recurse(data, child) # populate content and add axis environment if desired if data["add axis environment"]: content.extend( ax.get_begin_code() + children_content + [ax.get_end_code(data)], 0 ) else: content.extend(children_content, 0) # print axis environment options, if told to show infos if data["show_info"]: print("=========================================================") print("These would have been the properties of the environment:") print("".join(ax.get_begin_code()[1:])) print("=========================================================") elif isinstance(child, mpl.lines.Line2D): data, cont = line2d.draw_line2d(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.image.AxesImage): data, cont = img.draw_image(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.patches.Patch): data, cont = patch.draw_patch(data, child) content.extend(cont, child.get_zorder()) elif isinstance( child, (mpl.collections.PatchCollection, mpl.collections.PolyCollection) ): data, cont = patch.draw_patchcollection(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.collections.PathCollection): data, cont = path.draw_pathcollection(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.collections.LineCollection): data, cont = line2d.draw_linecollection(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.collections.QuadMesh): data, cont = qmsh.draw_quadmesh(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.legend.Legend): data = legend.draw_legend(data, child) if data["legend colors"]: content.extend(data["legend colors"], 0) elif isinstance(child, (mpl.text.Text, mpl.text.Annotation)): data, cont = text.draw_text(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, (mpl.axis.XAxis, mpl.axis.YAxis)): pass else: warnings.warn( "matplotlib2tikz: Don't know how to handle object {}.".format( type(child) ) ) return data, content.flatten()
[ "def", "_recurse", "(", "data", ",", "obj", ")", ":", "content", "=", "_ContentManager", "(", ")", "for", "child", "in", "obj", ".", "get_children", "(", ")", ":", "# Some patches are Spines, too; skip those entirely.", "# See <https://github.com/nschloe/matplotlib2tikz/issues/277>.", "if", "isinstance", "(", "child", ",", "mpl", ".", "spines", ".", "Spine", ")", ":", "continue", "if", "isinstance", "(", "child", ",", "mpl", ".", "axes", ".", "Axes", ")", ":", "ax", "=", "axes", ".", "Axes", "(", "data", ",", "child", ")", "if", "ax", ".", "is_colorbar", ":", "continue", "# add extra axis options", "if", "data", "[", "\"extra axis options [base]\"", "]", ":", "ax", ".", "axis_options", ".", "extend", "(", "data", "[", "\"extra axis options [base]\"", "]", ")", "data", "[", "\"current mpl axes obj\"", "]", "=", "child", "data", "[", "\"current axes\"", "]", "=", "ax", "# Run through the child objects, gather the content.", "data", ",", "children_content", "=", "_recurse", "(", "data", ",", "child", ")", "# populate content and add axis environment if desired", "if", "data", "[", "\"add axis environment\"", "]", ":", "content", ".", "extend", "(", "ax", ".", "get_begin_code", "(", ")", "+", "children_content", "+", "[", "ax", ".", "get_end_code", "(", "data", ")", "]", ",", "0", ")", "else", ":", "content", ".", "extend", "(", "children_content", ",", "0", ")", "# print axis environment options, if told to show infos", "if", "data", "[", "\"show_info\"", "]", ":", "print", "(", "\"=========================================================\"", ")", "print", "(", "\"These would have been the properties of the environment:\"", ")", "print", "(", "\"\"", ".", "join", "(", "ax", ".", "get_begin_code", "(", ")", "[", "1", ":", "]", ")", ")", "print", "(", "\"=========================================================\"", ")", "elif", "isinstance", "(", "child", ",", "mpl", ".", "lines", ".", "Line2D", ")", ":", "data", ",", "cont", "=", "line2d", ".", "draw_line2d", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "mpl", ".", "image", ".", "AxesImage", ")", ":", "data", ",", "cont", "=", "img", ".", "draw_image", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "mpl", ".", "patches", ".", "Patch", ")", ":", "data", ",", "cont", "=", "patch", ".", "draw_patch", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "(", "mpl", ".", "collections", ".", "PatchCollection", ",", "mpl", ".", "collections", ".", "PolyCollection", ")", ")", ":", "data", ",", "cont", "=", "patch", ".", "draw_patchcollection", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "mpl", ".", "collections", ".", "PathCollection", ")", ":", "data", ",", "cont", "=", "path", ".", "draw_pathcollection", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "mpl", ".", "collections", ".", "LineCollection", ")", ":", "data", ",", "cont", "=", "line2d", ".", "draw_linecollection", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "mpl", ".", "collections", ".", "QuadMesh", ")", ":", "data", ",", "cont", "=", "qmsh", ".", "draw_quadmesh", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "mpl", ".", "legend", ".", "Legend", ")", ":", "data", "=", "legend", ".", "draw_legend", "(", "data", ",", "child", ")", "if", "data", "[", "\"legend colors\"", "]", ":", "content", ".", "extend", "(", "data", "[", "\"legend colors\"", "]", ",", "0", ")", "elif", "isinstance", "(", "child", ",", "(", "mpl", ".", "text", ".", "Text", ",", "mpl", ".", "text", ".", "Annotation", ")", ")", ":", "data", ",", "cont", "=", "text", ".", "draw_text", "(", "data", ",", "child", ")", "content", ".", "extend", "(", "cont", ",", "child", ".", "get_zorder", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "(", "mpl", ".", "axis", ".", "XAxis", ",", "mpl", ".", "axis", ".", "YAxis", ")", ")", ":", "pass", "else", ":", "warnings", ".", "warn", "(", "\"matplotlib2tikz: Don't know how to handle object {}.\"", ".", "format", "(", "type", "(", "child", ")", ")", ")", "return", "data", ",", "content", ".", "flatten", "(", ")" ]
Iterates over all children of the current object, gathers the contents contributing to the resulting PGFPlots file, and returns those.
[ "Iterates", "over", "all", "children", "of", "the", "current", "object", "gathers", "the", "contents", "contributing", "to", "the", "resulting", "PGFPlots", "file", "and", "returns", "those", "." ]
python
train
45.075949
ladybug-tools/ladybug
ladybug/datatype/base.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datatype/base.py#L139-L172
def is_in_range(self, values, unit=None, raise_exception=True): """Check if a list of values is within physically/mathematically possible range. Args: values: A list of values. unit: The unit of the values. If not specified, the default metric unit will be assumed. raise_exception: Set to True to raise an exception if not in range. """ self._is_numeric(values) if unit is None or unit == self.units[0]: minimum = self.min maximum = self.max else: namespace = {'self': self} self.is_unit_acceptable(unit, True) min_statement = "self._{}_to_{}(self.min)".format( self._clean(self.units[0]), self._clean(unit)) max_statement = "self._{}_to_{}(self.max)".format( self._clean(self.units[0]), self._clean(unit)) minimum = eval(min_statement, namespace) maximum = eval(max_statement, namespace) for value in values: if value < minimum or value > maximum: if not raise_exception: return False else: raise ValueError( '{0} should be between {1} and {2}. Got {3}'.format( self.__class__.__name__, self.min, self.max, value ) ) return True
[ "def", "is_in_range", "(", "self", ",", "values", ",", "unit", "=", "None", ",", "raise_exception", "=", "True", ")", ":", "self", ".", "_is_numeric", "(", "values", ")", "if", "unit", "is", "None", "or", "unit", "==", "self", ".", "units", "[", "0", "]", ":", "minimum", "=", "self", ".", "min", "maximum", "=", "self", ".", "max", "else", ":", "namespace", "=", "{", "'self'", ":", "self", "}", "self", ".", "is_unit_acceptable", "(", "unit", ",", "True", ")", "min_statement", "=", "\"self._{}_to_{}(self.min)\"", ".", "format", "(", "self", ".", "_clean", "(", "self", ".", "units", "[", "0", "]", ")", ",", "self", ".", "_clean", "(", "unit", ")", ")", "max_statement", "=", "\"self._{}_to_{}(self.max)\"", ".", "format", "(", "self", ".", "_clean", "(", "self", ".", "units", "[", "0", "]", ")", ",", "self", ".", "_clean", "(", "unit", ")", ")", "minimum", "=", "eval", "(", "min_statement", ",", "namespace", ")", "maximum", "=", "eval", "(", "max_statement", ",", "namespace", ")", "for", "value", "in", "values", ":", "if", "value", "<", "minimum", "or", "value", ">", "maximum", ":", "if", "not", "raise_exception", ":", "return", "False", "else", ":", "raise", "ValueError", "(", "'{0} should be between {1} and {2}. Got {3}'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "min", ",", "self", ".", "max", ",", "value", ")", ")", "return", "True" ]
Check if a list of values is within physically/mathematically possible range. Args: values: A list of values. unit: The unit of the values. If not specified, the default metric unit will be assumed. raise_exception: Set to True to raise an exception if not in range.
[ "Check", "if", "a", "list", "of", "values", "is", "within", "physically", "/", "mathematically", "possible", "range", "." ]
python
train
41.882353
Fizzadar/pyinfra
pyinfra/modules/server.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L316-L453
def user( state, host, name, present=True, home=None, shell=None, group=None, groups=None, public_keys=None, delete_keys=False, ensure_home=True, system=False, uid=None, ): ''' Add/remove/update system users & their ssh `authorized_keys`. + name: name of the user to ensure + present: whether this user should exist + home: the users home directory + shell: the users shell + group: the users primary group + groups: the users secondary groups + public_keys: list of public keys to attach to this user, ``home`` must be specified + delete_keys: whether to remove any keys not specified in ``public_keys`` + ensure_home: whether to ensure the ``home`` directory exists + system: whether to create a system account Home directory: When ``ensure_home`` or ``public_keys`` are provided, ``home`` defaults to ``/home/{name}``. ''' users = host.fact.users or {} user = users.get(name) if groups is None: groups = [] if home is None: home = '/home/{0}'.format(name) # User not wanted? if not present: if user: yield 'userdel {0}'.format(name) return # User doesn't exist but we want them? if present and user is None: # Create the user w/home/shell args = [] if home: args.append('-d {0}'.format(home)) if shell: args.append('-s {0}'.format(shell)) if group: args.append('-g {0}'.format(group)) if groups: args.append('-G {0}'.format(','.join(groups))) if system and host.fact.os not in ('OpenBSD', 'NetBSD'): args.append('-r') if uid: args.append('--uid {0}'.format(uid)) yield 'useradd {0} {1}'.format(' '.join(args), name) # User exists and we want them, check home/shell/keys else: args = [] # Check homedir if home and user['home'] != home: args.append('-d {0}'.format(home)) # Check shell if shell and user['shell'] != shell: args.append('-s {0}'.format(shell)) # Check primary group if group and user['group'] != group: args.append('-g {0}'.format(group)) # Check secondary groups, if defined if groups and set(user['groups']) != set(groups): args.append('-G {0}'.format(','.join(groups))) # Need to mod the user? if args: yield 'usermod {0} {1}'.format(' '.join(args), name) # Ensure home directory ownership if ensure_home: yield files.directory( state, host, home, user=name, group=name, ) # Add SSH keys if public_keys is not None: # Ensure .ssh directory # note that this always outputs commands unless the SSH user has access to the # authorized_keys file, ie the SSH user is the user defined in this function yield files.directory( state, host, '{0}/.ssh'.format(home), user=name, group=name, mode=700, ) filename = '{0}/.ssh/authorized_keys'.format(home) if delete_keys: # Create a whole new authorized_keys file keys_file = six.StringIO('{0}\n'.format( '\n'.join(public_keys), )) # And ensure it exists yield files.put( state, host, keys_file, filename, user=name, group=name, mode=600, ) else: # Ensure authorized_keys exists yield files.file( state, host, filename, user=name, group=name, mode=600, ) # And every public key is present for key in public_keys: yield files.line( state, host, filename, key, )
[ "def", "user", "(", "state", ",", "host", ",", "name", ",", "present", "=", "True", ",", "home", "=", "None", ",", "shell", "=", "None", ",", "group", "=", "None", ",", "groups", "=", "None", ",", "public_keys", "=", "None", ",", "delete_keys", "=", "False", ",", "ensure_home", "=", "True", ",", "system", "=", "False", ",", "uid", "=", "None", ",", ")", ":", "users", "=", "host", ".", "fact", ".", "users", "or", "{", "}", "user", "=", "users", ".", "get", "(", "name", ")", "if", "groups", "is", "None", ":", "groups", "=", "[", "]", "if", "home", "is", "None", ":", "home", "=", "'/home/{0}'", ".", "format", "(", "name", ")", "# User not wanted?", "if", "not", "present", ":", "if", "user", ":", "yield", "'userdel {0}'", ".", "format", "(", "name", ")", "return", "# User doesn't exist but we want them?", "if", "present", "and", "user", "is", "None", ":", "# Create the user w/home/shell", "args", "=", "[", "]", "if", "home", ":", "args", ".", "append", "(", "'-d {0}'", ".", "format", "(", "home", ")", ")", "if", "shell", ":", "args", ".", "append", "(", "'-s {0}'", ".", "format", "(", "shell", ")", ")", "if", "group", ":", "args", ".", "append", "(", "'-g {0}'", ".", "format", "(", "group", ")", ")", "if", "groups", ":", "args", ".", "append", "(", "'-G {0}'", ".", "format", "(", "','", ".", "join", "(", "groups", ")", ")", ")", "if", "system", "and", "host", ".", "fact", ".", "os", "not", "in", "(", "'OpenBSD'", ",", "'NetBSD'", ")", ":", "args", ".", "append", "(", "'-r'", ")", "if", "uid", ":", "args", ".", "append", "(", "'--uid {0}'", ".", "format", "(", "uid", ")", ")", "yield", "'useradd {0} {1}'", ".", "format", "(", "' '", ".", "join", "(", "args", ")", ",", "name", ")", "# User exists and we want them, check home/shell/keys", "else", ":", "args", "=", "[", "]", "# Check homedir", "if", "home", "and", "user", "[", "'home'", "]", "!=", "home", ":", "args", ".", "append", "(", "'-d {0}'", ".", "format", "(", "home", ")", ")", "# Check shell", "if", "shell", "and", "user", "[", "'shell'", "]", "!=", "shell", ":", "args", ".", "append", "(", "'-s {0}'", ".", "format", "(", "shell", ")", ")", "# Check primary group", "if", "group", "and", "user", "[", "'group'", "]", "!=", "group", ":", "args", ".", "append", "(", "'-g {0}'", ".", "format", "(", "group", ")", ")", "# Check secondary groups, if defined", "if", "groups", "and", "set", "(", "user", "[", "'groups'", "]", ")", "!=", "set", "(", "groups", ")", ":", "args", ".", "append", "(", "'-G {0}'", ".", "format", "(", "','", ".", "join", "(", "groups", ")", ")", ")", "# Need to mod the user?", "if", "args", ":", "yield", "'usermod {0} {1}'", ".", "format", "(", "' '", ".", "join", "(", "args", ")", ",", "name", ")", "# Ensure home directory ownership", "if", "ensure_home", ":", "yield", "files", ".", "directory", "(", "state", ",", "host", ",", "home", ",", "user", "=", "name", ",", "group", "=", "name", ",", ")", "# Add SSH keys", "if", "public_keys", "is", "not", "None", ":", "# Ensure .ssh directory", "# note that this always outputs commands unless the SSH user has access to the", "# authorized_keys file, ie the SSH user is the user defined in this function", "yield", "files", ".", "directory", "(", "state", ",", "host", ",", "'{0}/.ssh'", ".", "format", "(", "home", ")", ",", "user", "=", "name", ",", "group", "=", "name", ",", "mode", "=", "700", ",", ")", "filename", "=", "'{0}/.ssh/authorized_keys'", ".", "format", "(", "home", ")", "if", "delete_keys", ":", "# Create a whole new authorized_keys file", "keys_file", "=", "six", ".", "StringIO", "(", "'{0}\\n'", ".", "format", "(", "'\\n'", ".", "join", "(", "public_keys", ")", ",", ")", ")", "# And ensure it exists", "yield", "files", ".", "put", "(", "state", ",", "host", ",", "keys_file", ",", "filename", ",", "user", "=", "name", ",", "group", "=", "name", ",", "mode", "=", "600", ",", ")", "else", ":", "# Ensure authorized_keys exists", "yield", "files", ".", "file", "(", "state", ",", "host", ",", "filename", ",", "user", "=", "name", ",", "group", "=", "name", ",", "mode", "=", "600", ",", ")", "# And every public key is present", "for", "key", "in", "public_keys", ":", "yield", "files", ".", "line", "(", "state", ",", "host", ",", "filename", ",", "key", ",", ")" ]
Add/remove/update system users & their ssh `authorized_keys`. + name: name of the user to ensure + present: whether this user should exist + home: the users home directory + shell: the users shell + group: the users primary group + groups: the users secondary groups + public_keys: list of public keys to attach to this user, ``home`` must be specified + delete_keys: whether to remove any keys not specified in ``public_keys`` + ensure_home: whether to ensure the ``home`` directory exists + system: whether to create a system account Home directory: When ``ensure_home`` or ``public_keys`` are provided, ``home`` defaults to ``/home/{name}``.
[ "Add", "/", "remove", "/", "update", "system", "users", "&", "their", "ssh", "authorized_keys", "." ]
python
train
28.050725
prometheus/client_python
prometheus_client/exposition.py
https://github.com/prometheus/client_python/blob/31f5557e2e84ca4ffa9a03abf6e3f4d0c8b8c3eb/prometheus_client/exposition.py#L165-L177
def factory(cls, registry): """Returns a dynamic MetricsHandler class tied to the passed registry. """ # This implementation relies on MetricsHandler.registry # (defined above and defaulted to REGISTRY). # As we have unicode_literals, we need to create a str() # object for type(). cls_name = str(cls.__name__) MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry}) return MyMetricsHandler
[ "def", "factory", "(", "cls", ",", "registry", ")", ":", "# This implementation relies on MetricsHandler.registry", "# (defined above and defaulted to REGISTRY).", "# As we have unicode_literals, we need to create a str()", "# object for type().", "cls_name", "=", "str", "(", "cls", ".", "__name__", ")", "MyMetricsHandler", "=", "type", "(", "cls_name", ",", "(", "cls", ",", "object", ")", ",", "{", "\"registry\"", ":", "registry", "}", ")", "return", "MyMetricsHandler" ]
Returns a dynamic MetricsHandler class tied to the passed registry.
[ "Returns", "a", "dynamic", "MetricsHandler", "class", "tied", "to", "the", "passed", "registry", "." ]
python
train
39.461538
twisted/mantissa
xmantissa/webapp.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/webapp.py#L50-L60
def _reorderForPreference(themeList, preferredThemeName): """ Re-order the input themeList according to the preferred theme. Returns None. """ for theme in themeList: if preferredThemeName == theme.themeName: themeList.remove(theme) themeList.insert(0, theme) return
[ "def", "_reorderForPreference", "(", "themeList", ",", "preferredThemeName", ")", ":", "for", "theme", "in", "themeList", ":", "if", "preferredThemeName", "==", "theme", ".", "themeName", ":", "themeList", ".", "remove", "(", "theme", ")", "themeList", ".", "insert", "(", "0", ",", "theme", ")", "return" ]
Re-order the input themeList according to the preferred theme. Returns None.
[ "Re", "-", "order", "the", "input", "themeList", "according", "to", "the", "preferred", "theme", "." ]
python
train
29.181818
RudolfCardinal/pythonlib
cardinal_pythonlib/tools/pdf_to_booklet.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L161-L185
def page_sequence(n_sheets: int, one_based: bool = True) -> List[int]: """ Generates the final page sequence from the starting number of sheets. """ n_pages = calc_n_virtual_pages(n_sheets) assert n_pages % 4 == 0 half_n_pages = n_pages // 2 firsthalf = list(range(half_n_pages)) secondhalf = list(reversed(range(half_n_pages, n_pages))) # Seen from the top of an UNFOLDED booklet (e.g. a stack of paper that's # come out of your printer), "firsthalf" are on the right (from top to # bottom: recto facing up, then verso facing down, then recto, then verso) # and "secondhalf" are on the left (from top to bottom: verso facing up, # then recto facing down, etc.). sequence = [] # type: List[int] top = True for left, right in zip(secondhalf, firsthalf): if not top: left, right = right, left sequence += [left, right] top = not top if one_based: sequence = [x + 1 for x in sequence] log.debug("{} sheets => page sequence {!r}", n_sheets, sequence) return sequence
[ "def", "page_sequence", "(", "n_sheets", ":", "int", ",", "one_based", ":", "bool", "=", "True", ")", "->", "List", "[", "int", "]", ":", "n_pages", "=", "calc_n_virtual_pages", "(", "n_sheets", ")", "assert", "n_pages", "%", "4", "==", "0", "half_n_pages", "=", "n_pages", "//", "2", "firsthalf", "=", "list", "(", "range", "(", "half_n_pages", ")", ")", "secondhalf", "=", "list", "(", "reversed", "(", "range", "(", "half_n_pages", ",", "n_pages", ")", ")", ")", "# Seen from the top of an UNFOLDED booklet (e.g. a stack of paper that's", "# come out of your printer), \"firsthalf\" are on the right (from top to", "# bottom: recto facing up, then verso facing down, then recto, then verso)", "# and \"secondhalf\" are on the left (from top to bottom: verso facing up,", "# then recto facing down, etc.).", "sequence", "=", "[", "]", "# type: List[int]", "top", "=", "True", "for", "left", ",", "right", "in", "zip", "(", "secondhalf", ",", "firsthalf", ")", ":", "if", "not", "top", ":", "left", ",", "right", "=", "right", ",", "left", "sequence", "+=", "[", "left", ",", "right", "]", "top", "=", "not", "top", "if", "one_based", ":", "sequence", "=", "[", "x", "+", "1", "for", "x", "in", "sequence", "]", "log", ".", "debug", "(", "\"{} sheets => page sequence {!r}\"", ",", "n_sheets", ",", "sequence", ")", "return", "sequence" ]
Generates the final page sequence from the starting number of sheets.
[ "Generates", "the", "final", "page", "sequence", "from", "the", "starting", "number", "of", "sheets", "." ]
python
train
42.36