nwo
stringlengths
5
91
sha
stringlengths
40
40
path
stringlengths
5
174
language
stringclasses
1 value
identifier
stringlengths
1
120
parameters
stringlengths
0
3.15k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
24.1k
docstring
stringlengths
0
27.3k
docstring_summary
stringlengths
0
13.8k
docstring_tokens
sequence
function
stringlengths
22
139k
function_tokens
sequence
url
stringlengths
87
283
Befzz/blender3d_import_psk_psa
47f1418aef7642f300e0fccbe3c96654ab275a52
addons/io_import_scene_unreal_psa_psk_270.py
python
obj_select_get
(obj)
return obj.select
[]
def obj_select_get(obj): return obj.select
[ "def", "obj_select_get", "(", "obj", ")", ":", "return", "obj", ".", "select" ]
https://github.com/Befzz/blender3d_import_psk_psa/blob/47f1418aef7642f300e0fccbe3c96654ab275a52/addons/io_import_scene_unreal_psa_psk_270.py#L100-L101
openvinotoolkit/training_extensions
e7aa33af94a1f8004d3ea2df259d99234dfca046
ote_cli/ote_cli/utils/io.py
python
save_model_data
(model, folder)
Saves model data to folder. Folder is created if it does not exist.
Saves model data to folder. Folder is created if it does not exist.
[ "Saves", "model", "data", "to", "folder", ".", "Folder", "is", "created", "if", "it", "does", "not", "exist", "." ]
def save_model_data(model, folder): """ Saves model data to folder. Folder is created if it does not exist. """ os.makedirs(folder, exist_ok=True) for filename, model_adapter in model.model_adapters.items(): with open(os.path.join(folder, filename), "wb") as write_file: write_file.write(model_adapter.data)
[ "def", "save_model_data", "(", "model", ",", "folder", ")", ":", "os", ".", "makedirs", "(", "folder", ",", "exist_ok", "=", "True", ")", "for", "filename", ",", "model_adapter", "in", "model", ".", "model_adapters", ".", "items", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "filename", ")", ",", "\"wb\"", ")", "as", "write_file", ":", "write_file", ".", "write", "(", "model_adapter", ".", "data", ")" ]
https://github.com/openvinotoolkit/training_extensions/blob/e7aa33af94a1f8004d3ea2df259d99234dfca046/ote_cli/ote_cli/utils/io.py#L31-L39
aichallenge/aichallenge
4237971f22767ef7f439d297e4e6ad7c458415dc
setup/worker_setup.py
python
main
(argv=["worker_setup.py"])
Completely set everything up from a fresh ec2 instance
Completely set everything up from a fresh ec2 instance
[ "Completely", "set", "everything", "up", "from", "a", "fresh", "ec2", "instance" ]
def main(argv=["worker_setup.py"]): """ Completely set everything up from a fresh ec2 instance """ _, ubuntu_arch = check_ubuntu_version() opts = get_options(argv) opts.arch = ubuntu_arch with Environ("DEBIAN_FRONTEND", "noninteractive"): if opts.update_system: run_cmd("apt-get update") run_cmd("apt-get upgrade -y") if opts.install_required: install_required_packages() if opts.install_utilities: install_utility_packages() if opts.install_pkg_languages: install_packaged_languages() if opts.install_languages: install_all_languages(opts) if opts.install_jailguard: install_jailguard(opts) if opts.create_jails: setup_base_chroot(opts) if opts.packages_only: return setup_contest_files(opts) if opts.create_jails: setup_base_jail(opts) setup_jailusers(opts) start_script = os.path.join(opts.root_dir, opts.local_repo, "worker/start_worker.sh") if opts.install_cronjob: cron_file = "/etc/cron.d/ai-contest" if not file_contains(cron_file, start_script): append_line(cron_file, "@reboot %s %s" % (opts.username, start_script,)) if opts.run_worker: run_cmd("sudo -u %s %s" % (opts.username, start_script))
[ "def", "main", "(", "argv", "=", "[", "\"worker_setup.py\"", "]", ")", ":", "_", ",", "ubuntu_arch", "=", "check_ubuntu_version", "(", ")", "opts", "=", "get_options", "(", "argv", ")", "opts", ".", "arch", "=", "ubuntu_arch", "with", "Environ", "(", "\"DEBIAN_FRONTEND\"", ",", "\"noninteractive\"", ")", ":", "if", "opts", ".", "update_system", ":", "run_cmd", "(", "\"apt-get update\"", ")", "run_cmd", "(", "\"apt-get upgrade -y\"", ")", "if", "opts", ".", "install_required", ":", "install_required_packages", "(", ")", "if", "opts", ".", "install_utilities", ":", "install_utility_packages", "(", ")", "if", "opts", ".", "install_pkg_languages", ":", "install_packaged_languages", "(", ")", "if", "opts", ".", "install_languages", ":", "install_all_languages", "(", "opts", ")", "if", "opts", ".", "install_jailguard", ":", "install_jailguard", "(", "opts", ")", "if", "opts", ".", "create_jails", ":", "setup_base_chroot", "(", "opts", ")", "if", "opts", ".", "packages_only", ":", "return", "setup_contest_files", "(", "opts", ")", "if", "opts", ".", "create_jails", ":", "setup_base_jail", "(", "opts", ")", "setup_jailusers", "(", "opts", ")", "start_script", "=", "os", ".", "path", ".", "join", "(", "opts", ".", "root_dir", ",", "opts", ".", "local_repo", ",", "\"worker/start_worker.sh\"", ")", "if", "opts", ".", "install_cronjob", ":", "cron_file", "=", "\"/etc/cron.d/ai-contest\"", "if", "not", "file_contains", "(", "cron_file", ",", "start_script", ")", ":", "append_line", "(", "cron_file", ",", "\"@reboot %s %s\"", "%", "(", "opts", ".", "username", ",", "start_script", ",", ")", ")", "if", "opts", ".", "run_worker", ":", "run_cmd", "(", "\"sudo -u %s %s\"", "%", "(", "opts", ".", "username", ",", "start_script", ")", ")" ]
https://github.com/aichallenge/aichallenge/blob/4237971f22767ef7f439d297e4e6ad7c458415dc/setup/worker_setup.py#L439-L474
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/optimize/_lsq/trf_linear.py
python
backtracking
(A, g, x, p, theta, p_dot_g, lb, ub)
return x, step, cost_change
Find an appropriate step size using backtracking line search.
Find an appropriate step size using backtracking line search.
[ "Find", "an", "appropriate", "step", "size", "using", "backtracking", "line", "search", "." ]
def backtracking(A, g, x, p, theta, p_dot_g, lb, ub): """Find an appropriate step size using backtracking line search.""" alpha = 1 while True: x_new, _ = reflective_transformation(x + alpha * p, lb, ub) step = x_new - x cost_change = -evaluate_quadratic(A, g, step) if cost_change > -0.1 * alpha * p_dot_g: break alpha *= 0.5 active = find_active_constraints(x_new, lb, ub) if np.any(active != 0): x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub) x_new = make_strictly_feasible(x_new, lb, ub, rstep=0) step = x_new - x cost_change = -evaluate_quadratic(A, g, step) return x, step, cost_change
[ "def", "backtracking", "(", "A", ",", "g", ",", "x", ",", "p", ",", "theta", ",", "p_dot_g", ",", "lb", ",", "ub", ")", ":", "alpha", "=", "1", "while", "True", ":", "x_new", ",", "_", "=", "reflective_transformation", "(", "x", "+", "alpha", "*", "p", ",", "lb", ",", "ub", ")", "step", "=", "x_new", "-", "x", "cost_change", "=", "-", "evaluate_quadratic", "(", "A", ",", "g", ",", "step", ")", "if", "cost_change", ">", "-", "0.1", "*", "alpha", "*", "p_dot_g", ":", "break", "alpha", "*=", "0.5", "active", "=", "find_active_constraints", "(", "x_new", ",", "lb", ",", "ub", ")", "if", "np", ".", "any", "(", "active", "!=", "0", ")", ":", "x_new", ",", "_", "=", "reflective_transformation", "(", "x", "+", "theta", "*", "alpha", "*", "p", ",", "lb", ",", "ub", ")", "x_new", "=", "make_strictly_feasible", "(", "x_new", ",", "lb", ",", "ub", ",", "rstep", "=", "0", ")", "step", "=", "x_new", "-", "x", "cost_change", "=", "-", "evaluate_quadratic", "(", "A", ",", "g", ",", "step", ")", "return", "x", ",", "step", ",", "cost_change" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/optimize/_lsq/trf_linear.py#L72-L90
hidasib/GRU4Rec
8184fd9b3769469dfc1a2b583e401c794016fbbf
baselines.py
python
ItemKNN.predict_next
(self, session_id, input_item_id, predict_for_item_ids)
return pd.Series(data=preds, index=predict_for_item_ids)
Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. input_item_id : int or string The item ID of the event. Must be in the set of item IDs of the training set. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Every ID must be in the set of item IDs of the training set. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs.
Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. input_item_id : int or string The item ID of the event. Must be in the set of item IDs of the training set. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Every ID must be in the set of item IDs of the training set. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs.
[ "Gives", "predicton", "scores", "for", "a", "selected", "set", "of", "items", "on", "how", "likely", "they", "be", "the", "next", "item", "in", "the", "session", ".", "Parameters", "--------", "session_id", ":", "int", "or", "string", "The", "session", "IDs", "of", "the", "event", ".", "input_item_id", ":", "int", "or", "string", "The", "item", "ID", "of", "the", "event", ".", "Must", "be", "in", "the", "set", "of", "item", "IDs", "of", "the", "training", "set", ".", "predict_for_item_ids", ":", "1D", "array", "IDs", "of", "items", "for", "which", "the", "network", "should", "give", "prediction", "scores", ".", "Every", "ID", "must", "be", "in", "the", "set", "of", "item", "IDs", "of", "the", "training", "set", ".", "Returns", "--------", "out", ":", "pandas", ".", "Series", "Prediction", "scores", "for", "selected", "items", "on", "how", "likely", "to", "be", "the", "next", "item", "of", "this", "session", ".", "Indexed", "by", "the", "item", "IDs", "." ]
def predict_next(self, session_id, input_item_id, predict_for_item_ids): ''' Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. input_item_id : int or string The item ID of the event. Must be in the set of item IDs of the training set. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Every ID must be in the set of item IDs of the training set. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs. ''' preds = np.zeros(len(predict_for_item_ids)) sim_list = self.sims[input_item_id] mask = np.in1d(predict_for_item_ids, sim_list.index) preds[mask] = sim_list[predict_for_item_ids[mask]] return pd.Series(data=preds, index=predict_for_item_ids)
[ "def", "predict_next", "(", "self", ",", "session_id", ",", "input_item_id", ",", "predict_for_item_ids", ")", ":", "preds", "=", "np", ".", "zeros", "(", "len", "(", "predict_for_item_ids", ")", ")", "sim_list", "=", "self", ".", "sims", "[", "input_item_id", "]", "mask", "=", "np", ".", "in1d", "(", "predict_for_item_ids", ",", "sim_list", ".", "index", ")", "preds", "[", "mask", "]", "=", "sim_list", "[", "predict_for_item_ids", "[", "mask", "]", "]", "return", "pd", ".", "Series", "(", "data", "=", "preds", ",", "index", "=", "predict_for_item_ids", ")" ]
https://github.com/hidasib/GRU4Rec/blob/8184fd9b3769469dfc1a2b583e401c794016fbbf/baselines.py#L278-L301
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/numpy/distutils/misc_util.py
python
default_config_dict
(name = None, parent_name = None, local_path=None)
return c.todict()
Return a configuration dictionary for usage in configuration() function defined in file setup_<name>.py.
Return a configuration dictionary for usage in configuration() function defined in file setup_<name>.py.
[ "Return", "a", "configuration", "dictionary", "for", "usage", "in", "configuration", "()", "function", "defined", "in", "file", "setup_<name", ">", ".", "py", "." ]
def default_config_dict(name = None, parent_name = None, local_path=None): """Return a configuration dictionary for usage in configuration() function defined in file setup_<name>.py. """ import warnings warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ 'deprecated default_config_dict(%r,%r,%r)' % (name, parent_name, local_path, name, parent_name, local_path, ), stacklevel=2) c = Configuration(name, parent_name, local_path) return c.todict()
[ "def", "default_config_dict", "(", "name", "=", "None", ",", "parent_name", "=", "None", ",", "local_path", "=", "None", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "'Use Configuration(%r,%r,top_path=%r) instead of '", "'deprecated default_config_dict(%r,%r,%r)'", "%", "(", "name", ",", "parent_name", ",", "local_path", ",", "name", ",", "parent_name", ",", "local_path", ",", ")", ",", "stacklevel", "=", "2", ")", "c", "=", "Configuration", "(", "name", ",", "parent_name", ",", "local_path", ")", "return", "c", ".", "todict", "(", ")" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/numpy/distutils/misc_util.py#L2230-L2241
richq/folders2flickr
0b735057dbf3c0ea132668af36d30ded52e7b6d9
f2flickr/flickr.py
python
Blogs.getList
(self,auth=True)
return myReturn
blogs.getList requires READ authentication
blogs.getList requires READ authentication
[ "blogs", ".", "getList", "requires", "READ", "authentication" ]
def getList(self,auth=True): """blogs.getList requires READ authentication""" # please read documentation on how to use this method = 'flickr.blogs.getList' if auth==True : data = _doget(method, auth=True) if not auth==True : data = _doget(method, auth=False) bID = [] bName = [] bNeedsPword = [] bURL = [] try: for plog in data.rsp.blogs.blog: bID.append(plog.id) bName.append(plog.name) bNeedsPword.append(plog.needspassword) bURL.append(plog.url) except TypeError: try: bID.append(data.rsp.blogs.blog.id) bName.append(data.rsp.blogs.blog.name) bNeedsPword.append(data.rsp.blogs.blog.needspassword) bURL.append(data.rsp.blogs.blog.url) except AttributeError: return "AttributeError, unexplained!" except: return "Unknown error!" except AttributeError: return "There are no blogs!" myReturn = [bID,bName,bNeedsPword,bURL] return myReturn
[ "def", "getList", "(", "self", ",", "auth", "=", "True", ")", ":", "# please read documentation on how to use this", "method", "=", "'flickr.blogs.getList'", "if", "auth", "==", "True", ":", "data", "=", "_doget", "(", "method", ",", "auth", "=", "True", ")", "if", "not", "auth", "==", "True", ":", "data", "=", "_doget", "(", "method", ",", "auth", "=", "False", ")", "bID", "=", "[", "]", "bName", "=", "[", "]", "bNeedsPword", "=", "[", "]", "bURL", "=", "[", "]", "try", ":", "for", "plog", "in", "data", ".", "rsp", ".", "blogs", ".", "blog", ":", "bID", ".", "append", "(", "plog", ".", "id", ")", "bName", ".", "append", "(", "plog", ".", "name", ")", "bNeedsPword", ".", "append", "(", "plog", ".", "needspassword", ")", "bURL", ".", "append", "(", "plog", ".", "url", ")", "except", "TypeError", ":", "try", ":", "bID", ".", "append", "(", "data", ".", "rsp", ".", "blogs", ".", "blog", ".", "id", ")", "bName", ".", "append", "(", "data", ".", "rsp", ".", "blogs", ".", "blog", ".", "name", ")", "bNeedsPword", ".", "append", "(", "data", ".", "rsp", ".", "blogs", ".", "blog", ".", "needspassword", ")", "bURL", ".", "append", "(", "data", ".", "rsp", ".", "blogs", ".", "blog", ".", "url", ")", "except", "AttributeError", ":", "return", "\"AttributeError, unexplained!\"", "except", ":", "return", "\"Unknown error!\"", "except", "AttributeError", ":", "return", "\"There are no blogs!\"", "myReturn", "=", "[", "bID", ",", "bName", ",", "bNeedsPword", ",", "bURL", "]", "return", "myReturn" ]
https://github.com/richq/folders2flickr/blob/0b735057dbf3c0ea132668af36d30ded52e7b6d9/f2flickr/flickr.py#L1414-L1447
miketeo/pysmb
fc3faca073385b8abc4a503bb4439f849840f94c
python2/smb/SMBConnection.py
python
SMBConnection.echo
(self, data, timeout = 10)
return results[0]
Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*. :param bytes data: Data to send to the remote server. Must be a bytes object. :return: The *data* parameter
Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*.
[ "Send", "an", "echo", "command", "containing", "*", "data", "*", "to", "the", "remote", "SMB", "/", "CIFS", "server", ".", "The", "remote", "SMB", "/", "CIFS", "will", "reply", "with", "the", "same", "*", "data", "*", "." ]
def echo(self, data, timeout = 10): """ Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*. :param bytes data: Data to send to the remote server. Must be a bytes object. :return: The *data* parameter """ if not self.sock: raise NotConnectedError('Not connected to server') results = [ ] def cb(r): self.is_busy = False results.append(r) def eb(failure): self.is_busy = False raise failure self.is_busy = True try: self._echo(data, cb, eb) while self.is_busy: self._pollForNetBIOSPacket(timeout) finally: self.is_busy = False return results[0]
[ "def", "echo", "(", "self", ",", "data", ",", "timeout", "=", "10", ")", ":", "if", "not", "self", ".", "sock", ":", "raise", "NotConnectedError", "(", "'Not connected to server'", ")", "results", "=", "[", "]", "def", "cb", "(", "r", ")", ":", "self", ".", "is_busy", "=", "False", "results", ".", "append", "(", "r", ")", "def", "eb", "(", "failure", ")", ":", "self", ".", "is_busy", "=", "False", "raise", "failure", "self", ".", "is_busy", "=", "True", "try", ":", "self", ".", "_echo", "(", "data", ",", "cb", ",", "eb", ")", "while", "self", ".", "is_busy", ":", "self", ".", "_pollForNetBIOSPacket", "(", "timeout", ")", "finally", ":", "self", ".", "is_busy", "=", "False", "return", "results", "[", "0", "]" ]
https://github.com/miketeo/pysmb/blob/fc3faca073385b8abc4a503bb4439f849840f94c/python2/smb/SMBConnection.py#L549-L577
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v7/services/services/gender_view_service/client.py
python
GenderViewServiceClient.parse_common_organization_path
(path: str)
return m.groupdict() if m else {}
Parse a organization path into its component segments.
Parse a organization path into its component segments.
[ "Parse", "a", "organization", "path", "into", "its", "component", "segments", "." ]
def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {}
[ "def", "parse_common_organization_path", "(", "path", ":", "str", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "m", "=", "re", ".", "match", "(", "r\"^organizations/(?P<organization>.+?)$\"", ",", "path", ")", "return", "m", ".", "groupdict", "(", ")", "if", "m", "else", "{", "}" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v7/services/services/gender_view_service/client.py#L208-L211
obspy/obspy
0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f
obspy/clients/filesystem/msriterator.py
python
_MSRIterator.__next__
(self)
return self
Read next record from file.
Read next record from file.
[ "Read", "next", "record", "from", "file", "." ]
def __next__(self): """ Read next record from file. """ errcode = clibmseed.ms_readmsr_r(C.pointer(self.msf), C.pointer(self.msr), self.file.encode('ascii', 'strict'), self.reclen, C.byref(self.fpos), None, self.skipnotdata, self.dataflag, self.verbose) if errcode == MS_ENDOFFILE: raise StopIteration() if self.raise_errors: if errcode != MS_NOERROR: raise Exception("Error %d in ms_readmsr_r" % errcode) return self
[ "def", "__next__", "(", "self", ")", ":", "errcode", "=", "clibmseed", ".", "ms_readmsr_r", "(", "C", ".", "pointer", "(", "self", ".", "msf", ")", ",", "C", ".", "pointer", "(", "self", ".", "msr", ")", ",", "self", ".", "file", ".", "encode", "(", "'ascii'", ",", "'strict'", ")", ",", "self", ".", "reclen", ",", "C", ".", "byref", "(", "self", ".", "fpos", ")", ",", "None", ",", "self", ".", "skipnotdata", ",", "self", ".", "dataflag", ",", "self", ".", "verbose", ")", "if", "errcode", "==", "MS_ENDOFFILE", ":", "raise", "StopIteration", "(", ")", "if", "self", ".", "raise_errors", ":", "if", "errcode", "!=", "MS_NOERROR", ":", "raise", "Exception", "(", "\"Error %d in ms_readmsr_r\"", "%", "errcode", ")", "return", "self" ]
https://github.com/obspy/obspy/blob/0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f/obspy/clients/filesystem/msriterator.py#L88-L109
Qirky/FoxDot
76318f9630bede48ff3994146ed644affa27bfa4
FoxDot/lib/Workspace/TextBox.py
python
ThreadedText.update
(self)
return
Recursively called method that monitors as queue of Tkinter tasks.
Recursively called method that monitors as queue of Tkinter tasks.
[ "Recursively", "called", "method", "that", "monitors", "as", "queue", "of", "Tkinter", "tasks", "." ]
def update(self): """ Recursively called method that monitors as queue of Tkinter tasks. """ try: while True: task, args, kwargs = self.queue.get_nowait() task(*args, **kwargs) self.update_idletasks() # Break when the queue is empty except Queue.Empty: pass except Exception as e: print(e) # Recursive call self.after(10, self.update) return
[ "def", "update", "(", "self", ")", ":", "try", ":", "while", "True", ":", "task", ",", "args", ",", "kwargs", "=", "self", ".", "queue", ".", "get_nowait", "(", ")", "task", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "update_idletasks", "(", ")", "# Break when the queue is empty", "except", "Queue", ".", "Empty", ":", "pass", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "# Recursive call", "self", ".", "after", "(", "10", ",", "self", ".", "update", ")", "return" ]
https://github.com/Qirky/FoxDot/blob/76318f9630bede48ff3994146ed644affa27bfa4/FoxDot/lib/Workspace/TextBox.py#L40-L65
Net-ng/kansha
85b5816da126b1c7098707c98f217d8b2e524ff2
kansha/user/usermanager.py
python
UserManager.get_confirmed_users
()
return DataUser.get_confirmed_users()
Return confirmed user Return all user which have email validated Return: - list of DataUser instance
Return confirmed user
[ "Return", "confirmed", "user" ]
def get_confirmed_users(): """Return confirmed user Return all user which have email validated Return: - list of DataUser instance """ return DataUser.get_confirmed_users()
[ "def", "get_confirmed_users", "(", ")", ":", "return", "DataUser", ".", "get_confirmed_users", "(", ")" ]
https://github.com/Net-ng/kansha/blob/85b5816da126b1c7098707c98f217d8b2e524ff2/kansha/user/usermanager.py#L73-L81
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/messaging/smsbackends/telerivet/views.py
python
TelerivetSetupView.page_context
(self)
return { 'outgoing_sms_form': TelerivetOutgoingSMSForm(), 'test_sms_form': TelerivetPhoneNumberForm(), 'finalize_gateway_form': FinalizeGatewaySetupForm( initial={ 'name': 'TELERIVET', 'set_as_default': (FinalizeGatewaySetupForm.NO if domain_has_default_gateway else FinalizeGatewaySetupForm.YES), } ), 'webhook_url': webhook_url, 'include_https_notice': webhook_url.startswith('https'), 'webhook_secret': webhook_secret, 'request_token': request_token, 'gateway_list_url': reverse(DomainSmsGatewayListView.urlname, args=[self.domain]), }
[]
def page_context(self): # The webhook secret is a piece of data that is sent to hq on each # Telerivet inbound request. It's used to tie an inbound request to # a Telerivet backend. webhook_secret = uuid.uuid4().hex # The request token is only used for the purposes of using this UI to # setup a Telerivet backend. We need a way to post the webhook_secret # to create the backend, but we want hq to be the origin of the secret # generation. So instead, the request_token resolves to the webhook_secret # via a redis lookup which expires in 1 week. request_token = uuid.uuid4().hex TelerivetSetupView.set_cached_webhook_secret(request_token, webhook_secret) domain_has_default_gateway = SQLMobileBackend.get_domain_default_backend( SQLMobileBackend.SMS, self.domain, id_only=True ) is not None webhook_url = absolute_reverse('telerivet_in') return { 'outgoing_sms_form': TelerivetOutgoingSMSForm(), 'test_sms_form': TelerivetPhoneNumberForm(), 'finalize_gateway_form': FinalizeGatewaySetupForm( initial={ 'name': 'TELERIVET', 'set_as_default': (FinalizeGatewaySetupForm.NO if domain_has_default_gateway else FinalizeGatewaySetupForm.YES), } ), 'webhook_url': webhook_url, 'include_https_notice': webhook_url.startswith('https'), 'webhook_secret': webhook_secret, 'request_token': request_token, 'gateway_list_url': reverse(DomainSmsGatewayListView.urlname, args=[self.domain]), }
[ "def", "page_context", "(", "self", ")", ":", "# The webhook secret is a piece of data that is sent to hq on each", "# Telerivet inbound request. It's used to tie an inbound request to", "# a Telerivet backend.", "webhook_secret", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "# The request token is only used for the purposes of using this UI to", "# setup a Telerivet backend. We need a way to post the webhook_secret", "# to create the backend, but we want hq to be the origin of the secret", "# generation. So instead, the request_token resolves to the webhook_secret", "# via a redis lookup which expires in 1 week.", "request_token", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "TelerivetSetupView", ".", "set_cached_webhook_secret", "(", "request_token", ",", "webhook_secret", ")", "domain_has_default_gateway", "=", "SQLMobileBackend", ".", "get_domain_default_backend", "(", "SQLMobileBackend", ".", "SMS", ",", "self", ".", "domain", ",", "id_only", "=", "True", ")", "is", "not", "None", "webhook_url", "=", "absolute_reverse", "(", "'telerivet_in'", ")", "return", "{", "'outgoing_sms_form'", ":", "TelerivetOutgoingSMSForm", "(", ")", ",", "'test_sms_form'", ":", "TelerivetPhoneNumberForm", "(", ")", ",", "'finalize_gateway_form'", ":", "FinalizeGatewaySetupForm", "(", "initial", "=", "{", "'name'", ":", "'TELERIVET'", ",", "'set_as_default'", ":", "(", "FinalizeGatewaySetupForm", ".", "NO", "if", "domain_has_default_gateway", "else", "FinalizeGatewaySetupForm", ".", "YES", ")", ",", "}", ")", ",", "'webhook_url'", ":", "webhook_url", ",", "'include_https_notice'", ":", "webhook_url", ".", "startswith", "(", "'https'", ")", ",", "'webhook_secret'", ":", "webhook_secret", ",", "'request_token'", ":", "request_token", ",", "'gateway_list_url'", ":", "reverse", "(", "DomainSmsGatewayListView", ".", "urlname", ",", "args", "=", "[", "self", ".", "domain", "]", ")", ",", "}" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/messaging/smsbackends/telerivet/views.py#L122-L159
cdhigh/KindleEar
7c4ecf9625239f12a829210d1760b863ef5a23aa
lib/calibre/ebooks/BeautifulSoup.py
python
BeautifulStoneSoup._smartPop
(self, name)
We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td'
We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type.
[ "We", "need", "to", "pop", "up", "to", "the", "previous", "tag", "of", "this", "type", "unless", "one", "of", "this", "tag", "s", "nesting", "reset", "triggers", "comes", "between", "this", "tag", "and", "the", "previous", "tag", "of", "this", "type", "OR", "unless", "this", "tag", "is", "a", "generic", "nesting", "trigger", "and", "another", "generic", "nesting", "trigger", "comes", "between", "this", "tag", "and", "the", "previous", "tag", "of", "this", "type", "." ]
def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers is not None and p.name in nestingResetTriggers) \ or (nestingResetTriggers is None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive)
[ "def", "_smartPop", "(", "self", ",", "name", ")", ":", "nestingResetTriggers", "=", "self", ".", "NESTABLE_TAGS", ".", "get", "(", "name", ")", "isNestable", "=", "nestingResetTriggers", "!=", "None", "isResetNesting", "=", "self", ".", "RESET_NESTING_TAGS", ".", "has_key", "(", "name", ")", "popTo", "=", "None", "inclusive", "=", "True", "for", "i", "in", "range", "(", "len", "(", "self", ".", "tagStack", ")", "-", "1", ",", "0", ",", "-", "1", ")", ":", "p", "=", "self", ".", "tagStack", "[", "i", "]", "if", "(", "not", "p", "or", "p", ".", "name", "==", "name", ")", "and", "not", "isNestable", ":", "#Non-nestable tags get popped to the top or to their", "#last occurance.", "popTo", "=", "name", "break", "if", "(", "nestingResetTriggers", "is", "not", "None", "and", "p", ".", "name", "in", "nestingResetTriggers", ")", "or", "(", "nestingResetTriggers", "is", "None", "and", "isResetNesting", "and", "self", ".", "RESET_NESTING_TAGS", ".", "has_key", "(", "p", ".", "name", ")", ")", ":", "#If we encounter one of the nesting reset triggers", "#peculiar to this tag, or we encounter another tag", "#that causes nesting to reset, pop up to but not", "#including that tag.", "popTo", "=", "p", ".", "name", "inclusive", "=", "False", "break", "p", "=", "p", ".", "parent", "if", "popTo", ":", "self", ".", "_popToTag", "(", "popTo", ",", "inclusive", ")" ]
https://github.com/cdhigh/KindleEar/blob/7c4ecf9625239f12a829210d1760b863ef5a23aa/lib/calibre/ebooks/BeautifulSoup.py#L1284-L1328
OneDrive/onedrive-sdk-python
e5642f8cad8eea37a4f653c1a23dfcfc06c37110
src/python2/request/effective_roles_collection.py
python
EffectiveRolesCollectionRequest.get
(self)
return self._page_from_response(collection_response)
Gets the EffectiveRolesCollectionPage Returns: :class:`EffectiveRolesCollectionPage<onedrivesdk.request.effective_roles_collection.EffectiveRolesCollectionPage>`: The EffectiveRolesCollectionPage
Gets the EffectiveRolesCollectionPage
[ "Gets", "the", "EffectiveRolesCollectionPage" ]
def get(self): """Gets the EffectiveRolesCollectionPage Returns: :class:`EffectiveRolesCollectionPage<onedrivesdk.request.effective_roles_collection.EffectiveRolesCollectionPage>`: The EffectiveRolesCollectionPage """ self.method = "GET" collection_response = EffectiveRolesCollectionResponse(json.loads(self.send().content)) return self._page_from_response(collection_response)
[ "def", "get", "(", "self", ")", ":", "self", ".", "method", "=", "\"GET\"", "collection_response", "=", "EffectiveRolesCollectionResponse", "(", "json", ".", "loads", "(", "self", ".", "send", "(", ")", ".", "content", ")", ")", "return", "self", ".", "_page_from_response", "(", "collection_response", ")" ]
https://github.com/OneDrive/onedrive-sdk-python/blob/e5642f8cad8eea37a4f653c1a23dfcfc06c37110/src/python2/request/effective_roles_collection.py#L46-L55
nosmokingbandit/watcher
dadacd21a5790ee609058a98a17fcc8954d24439
core/notification.py
python
Notification.add
(data)
return
Adds notification to core.NOTIFICATIONS :param data: dict of notification information Merges supplied 'data' with 'base' dict to ensure no fields are missing Appends 'base' to core.NOTIFICATIONS If data['param'] includes an on_click function, remember to add it to the notifications javascript handler. Does not return
Adds notification to core.NOTIFICATIONS :param data: dict of notification information
[ "Adds", "notification", "to", "core", ".", "NOTIFICATIONS", ":", "param", "data", ":", "dict", "of", "notification", "information" ]
def add(data): ''' Adds notification to core.NOTIFICATIONS :param data: dict of notification information Merges supplied 'data' with 'base' dict to ensure no fields are missing Appends 'base' to core.NOTIFICATIONS If data['param'] includes an on_click function, remember to add it to the notifications javascript handler. Does not return ''' base = {'type': 'success', 'title': '', 'body': '', 'params': None } base.update(data) logging.debug(u'Creating new notification:') logging.debug(base) # if it already exists, ignore it if base in core.NOTIFICATIONS: return # if this is an update notif, remove other update notifs first if base['type'] == u'update': for i, v in enumerate(core.NOTIFICATIONS): if v['type'] == u'update': core.NOTIFICATIONS[i] = None # if there is a None in the list, overwrite it. If not, just append for i, v in enumerate(core.NOTIFICATIONS): if v is None: core.NOTIFICATIONS[i] = base return core.NOTIFICATIONS.append(base) return
[ "def", "add", "(", "data", ")", ":", "base", "=", "{", "'type'", ":", "'success'", ",", "'title'", ":", "''", ",", "'body'", ":", "''", ",", "'params'", ":", "None", "}", "base", ".", "update", "(", "data", ")", "logging", ".", "debug", "(", "u'Creating new notification:'", ")", "logging", ".", "debug", "(", "base", ")", "# if it already exists, ignore it", "if", "base", "in", "core", ".", "NOTIFICATIONS", ":", "return", "# if this is an update notif, remove other update notifs first", "if", "base", "[", "'type'", "]", "==", "u'update'", ":", "for", "i", ",", "v", "in", "enumerate", "(", "core", ".", "NOTIFICATIONS", ")", ":", "if", "v", "[", "'type'", "]", "==", "u'update'", ":", "core", ".", "NOTIFICATIONS", "[", "i", "]", "=", "None", "# if there is a None in the list, overwrite it. If not, just append", "for", "i", ",", "v", "in", "enumerate", "(", "core", ".", "NOTIFICATIONS", ")", ":", "if", "v", "is", "None", ":", "core", ".", "NOTIFICATIONS", "[", "i", "]", "=", "base", "return", "core", ".", "NOTIFICATIONS", ".", "append", "(", "base", ")", "return" ]
https://github.com/nosmokingbandit/watcher/blob/dadacd21a5790ee609058a98a17fcc8954d24439/core/notification.py#L14-L55
rytilahti/python-miio
b6e53dd16fac77915426e7592e2528b78ef65190
miio/integrations/vacuum/roborock/vacuum.py
python
RoborockVacuum.get_room_mapping
(self)
return self.send("get_room_mapping")
Retrieves a list of segments.
Retrieves a list of segments.
[ "Retrieves", "a", "list", "of", "segments", "." ]
def get_room_mapping(self): """Retrieves a list of segments.""" return self.send("get_room_mapping")
[ "def", "get_room_mapping", "(", "self", ")", ":", "return", "self", ".", "send", "(", "\"get_room_mapping\"", ")" ]
https://github.com/rytilahti/python-miio/blob/b6e53dd16fac77915426e7592e2528b78ef65190/miio/integrations/vacuum/roborock/vacuum.py#L812-L814
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/tf/layers/basic.py
python
CtcLoss.get_soft_alignment
(self)
return soft_align
Also called the Baum-Welch-alignment. This is basically p_t(s|x_1^T,w_1^N), where s are the output labels (including blank), and w are the real target labels. :return: shape (time, batch, dim) :rtype: tf.Tensor
Also called the Baum-Welch-alignment. This is basically p_t(s|x_1^T,w_1^N), where s are the output labels (including blank), and w are the real target labels.
[ "Also", "called", "the", "Baum", "-", "Welch", "-", "alignment", ".", "This", "is", "basically", "p_t", "(", "s|x_1^T", "w_1^N", ")", "where", "s", "are", "the", "output", "labels", "(", "including", "blank", ")", "and", "w", "are", "the", "real", "target", "labels", "." ]
def get_soft_alignment(self): """ Also called the Baum-Welch-alignment. This is basically p_t(s|x_1^T,w_1^N), where s are the output labels (including blank), and w are the real target labels. :return: shape (time, batch, dim) :rtype: tf.Tensor """ assert self._ctc_loss is not None assert isinstance(self._ctc_loss, tf.Tensor) assert self._ctc_loss.op.type == "CTCLoss" # See grad definition of CTCLoss. # The op will calculate the gradient w.r.t. the logits. # I.e. with y = softmax(z), this is \partial loss / \partial z = y - soft_align. ctc_grad_z = self._ctc_loss.op.outputs[1] # time major, i.e. (time, batch, dim) y = self.output.get_placeholder_as_time_major() # (time, batch, dim) soft_align = y - ctc_grad_z soft_align.set_shape(tf.TensorShape((None, None, self.output.dim))) return soft_align
[ "def", "get_soft_alignment", "(", "self", ")", ":", "assert", "self", ".", "_ctc_loss", "is", "not", "None", "assert", "isinstance", "(", "self", ".", "_ctc_loss", ",", "tf", ".", "Tensor", ")", "assert", "self", ".", "_ctc_loss", ".", "op", ".", "type", "==", "\"CTCLoss\"", "# See grad definition of CTCLoss.", "# The op will calculate the gradient w.r.t. the logits.", "# I.e. with y = softmax(z), this is \\partial loss / \\partial z = y - soft_align.", "ctc_grad_z", "=", "self", ".", "_ctc_loss", ".", "op", ".", "outputs", "[", "1", "]", "# time major, i.e. (time, batch, dim)", "y", "=", "self", ".", "output", ".", "get_placeholder_as_time_major", "(", ")", "# (time, batch, dim)", "soft_align", "=", "y", "-", "ctc_grad_z", "soft_align", ".", "set_shape", "(", "tf", ".", "TensorShape", "(", "(", "None", ",", "None", ",", "self", ".", "output", ".", "dim", ")", ")", ")", "return", "soft_align" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/tf/layers/basic.py#L9355-L9374
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/PIL/ImageOps.py
python
_lut
(image, lut)
[]
def _lut(image, lut): if image.mode == "P": # FIXME: apply to lookup table, not image data raise NotImplementedError("mode P support coming soon") elif image.mode in ("L", "RGB"): if image.mode == "RGB" and len(lut) == 256: lut = lut + lut + lut return image.point(lut) else: raise IOError("not supported for this image mode")
[ "def", "_lut", "(", "image", ",", "lut", ")", ":", "if", "image", ".", "mode", "==", "\"P\"", ":", "# FIXME: apply to lookup table, not image data", "raise", "NotImplementedError", "(", "\"mode P support coming soon\"", ")", "elif", "image", ".", "mode", "in", "(", "\"L\"", ",", "\"RGB\"", ")", ":", "if", "image", ".", "mode", "==", "\"RGB\"", "and", "len", "(", "lut", ")", "==", "256", ":", "lut", "=", "lut", "+", "lut", "+", "lut", "return", "image", ".", "point", "(", "lut", ")", "else", ":", "raise", "IOError", "(", "\"not supported for this image mode\"", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/PIL/ImageOps.py#L48-L57
collinsctk/PyQYT
7af3673955f94ff1b2df2f94220cd2dab2e252af
Practice_Lab/ms08_067_check/ms08_067_check.py
python
MS08_067.__connect
(self)
SMB connect to the Computer Browser service named pipe Reference: http://www.hsc.fr/ressources/articles/win_net_srv/msrpc_browser.html
SMB connect to the Computer Browser service named pipe Reference: http://www.hsc.fr/ressources/articles/win_net_srv/msrpc_browser.html
[ "SMB", "connect", "to", "the", "Computer", "Browser", "service", "named", "pipe", "Reference", ":", "http", ":", "//", "www", ".", "hsc", ".", "fr", "/", "ressources", "/", "articles", "/", "win_net_srv", "/", "msrpc_browser", ".", "html" ]
def __connect(self): ''' SMB connect to the Computer Browser service named pipe Reference: http://www.hsc.fr/ressources/articles/win_net_srv/msrpc_browser.html ''' try: self.__trans = transport.DCERPCTransportFactory('ncacn_np:%s[\\pipe\\browser]' % self.target) self.__trans.connect() except smb.SessionError, _: raise connectionException, 'access denied (RestrictAnonymous is probably set to 2)' except: #raise Exception, 'unhandled exception (%s)' % format_exc() raise connectionException, 'unexpected exception'
[ "def", "__connect", "(", "self", ")", ":", "try", ":", "self", ".", "__trans", "=", "transport", ".", "DCERPCTransportFactory", "(", "'ncacn_np:%s[\\\\pipe\\\\browser]'", "%", "self", ".", "target", ")", "self", ".", "__trans", ".", "connect", "(", ")", "except", "smb", ".", "SessionError", ",", "_", ":", "raise", "connectionException", ",", "'access denied (RestrictAnonymous is probably set to 2)'", "except", ":", "#raise Exception, 'unhandled exception (%s)' % format_exc()", "raise", "connectionException", ",", "'unexpected exception'" ]
https://github.com/collinsctk/PyQYT/blob/7af3673955f94ff1b2df2f94220cd2dab2e252af/Practice_Lab/ms08_067_check/ms08_067_check.py#L104-L119
researchmm/SiamDW
c82b0599920e1fbd67ada9271aa4a63405e2d316
lib/dataset/siamfc.py
python
SiamFCDataset._get_pairs
(self, index)
return self._get_image_anno(video_name, track, template_frame), \ self._get_image_anno(video_name, track, search_frame)
get training pairs
get training pairs
[ "get", "training", "pairs" ]
def _get_pairs(self, index): """ get training pairs """ video_name = self.videos[index] video = self.labels[video_name] track = random.choice(list(video.keys())) track_info = video[track] try: frames = track_info['frames'] except: frames = list(track_info.keys()) template_frame = random.randint(0, len(frames)-1) left = max(template_frame - self.frame_range, 0) right = min(template_frame + self.frame_range, len(frames)-1) + 1 search_range = frames[left:right] template_frame = int(frames[template_frame]) search_frame = int(random.choice(search_range)) return self._get_image_anno(video_name, track, template_frame), \ self._get_image_anno(video_name, track, search_frame)
[ "def", "_get_pairs", "(", "self", ",", "index", ")", ":", "video_name", "=", "self", ".", "videos", "[", "index", "]", "video", "=", "self", ".", "labels", "[", "video_name", "]", "track", "=", "random", ".", "choice", "(", "list", "(", "video", ".", "keys", "(", ")", ")", ")", "track_info", "=", "video", "[", "track", "]", "try", ":", "frames", "=", "track_info", "[", "'frames'", "]", "except", ":", "frames", "=", "list", "(", "track_info", ".", "keys", "(", ")", ")", "template_frame", "=", "random", ".", "randint", "(", "0", ",", "len", "(", "frames", ")", "-", "1", ")", "left", "=", "max", "(", "template_frame", "-", "self", ".", "frame_range", ",", "0", ")", "right", "=", "min", "(", "template_frame", "+", "self", ".", "frame_range", ",", "len", "(", "frames", ")", "-", "1", ")", "+", "1", "search_range", "=", "frames", "[", "left", ":", "right", "]", "template_frame", "=", "int", "(", "frames", "[", "template_frame", "]", ")", "search_frame", "=", "int", "(", "random", ".", "choice", "(", "search_range", ")", ")", "return", "self", ".", "_get_image_anno", "(", "video_name", ",", "track", ",", "template_frame", ")", ",", "self", ".", "_get_image_anno", "(", "video_name", ",", "track", ",", "search_frame", ")" ]
https://github.com/researchmm/SiamDW/blob/c82b0599920e1fbd67ada9271aa4a63405e2d316/lib/dataset/siamfc.py#L131-L153
pytorch/audio
7b6b2d000023e2aa3365b769866c5f375e0d5fda
torchaudio/utils/sox_utils.py
python
set_verbosity
(verbosity: int)
Set libsox's verbosity Args: verbosity (int): Set verbosity level of libsox. * ``1`` failure messages * ``2`` warnings * ``3`` details of processing * ``4``-``6`` increasing levels of debug messages See Also: http://sox.sourceforge.net/sox.html
Set libsox's verbosity
[ "Set", "libsox", "s", "verbosity" ]
def set_verbosity(verbosity: int): """Set libsox's verbosity Args: verbosity (int): Set verbosity level of libsox. * ``1`` failure messages * ``2`` warnings * ``3`` details of processing * ``4``-``6`` increasing levels of debug messages See Also: http://sox.sourceforge.net/sox.html """ torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
[ "def", "set_verbosity", "(", "verbosity", ":", "int", ")", ":", "torch", ".", "ops", ".", "torchaudio", ".", "sox_utils_set_verbosity", "(", "verbosity", ")" ]
https://github.com/pytorch/audio/blob/7b6b2d000023e2aa3365b769866c5f375e0d5fda/torchaudio/utils/sox_utils.py#L21-L35
kurolz/DjangoWeb
eb76629034089f65ed04632e2fbcecdef8095ff8
webserver/views.py
python
serverList
(request,id = 0)
return render(request, 'serverlist.html')
服务器列表
服务器列表
[ "服务器列表" ]
def serverList(request,id = 0): ''' 服务器列表 ''' if id != 0: hostinfo.objects.filter(id = id).delete() if request.method == "POST": getHostInfo() print(request.POST) pageSize = request.POST.get('pageSize') # how manufactoryy items per page pageNumber = request.POST.get('pageNumber') offset = request.POST.get('offset') # how many items in total in the DB search = request.POST.get('search') sort_column = request.POST.get('sort') # which column need to sort order = request.POST.get('order') # ascending or descending if search: # 判断是否有搜索字 all_records = hostinfo.objects.filter(id=search,asset_type=search,business_unit=search,idc=search) else: all_records = hostinfo.objects.all() # must be wirte the line code here if sort_column: # 判断是否有排序需求 sort_column = sort_column.replace('asset_', '') if sort_column in ['id','asset_type','sn','name','management_ip','manufactory','type']: # 如果排序的列表在这些内容里面 if order == 'desc': # 如果排序是反向 sort_column = '-%s' % (sort_column) all_records = hostinfo.objects.all().order_by(sort_column) elif sort_column in ['salt_minion_id','os_release',]: # server__ 表示asset下的外键关联的表server下面的os_release或者其他的字段进行排序 sort_column = "server__%s" % (sort_column) if order == 'desc': sort_column = '-%s'%(sort_column) all_records = hostinfo.objects.all().order_by(sort_column) elif sort_column in ['cpu_model','cpu_count','cpu_core_count']: sort_column = "cpu__%s" %(sort_column) if order == 'desc': sort_column = '-%s'%(sort_column) all_records = hostinfo.objects.all().order_by(sort_column) elif sort_column in ['rams_size',]: if order == 'desc': sort_column = '-rams_size' else: sort_column = 'rams_size' all_records = hostinfo.objects.all().annotate(rams_size = Sum('ram__capacity')).order_by(sort_column) elif sort_column in ['localdisks_size',]: # using variable of localdisks_size because there have a annotation below of this line if order == "desc": sort_column = '-localdisks_size' else: sort_column = 'localdisks_size' # annotate 是注释的功能,localdisks_size前端传过来的是这个值,后端也必须这样写,Sum方法是django里面的,不是小写的sum方法, # 两者的区别需要注意,Sum('disk__capacity‘)表示对disk表下面的capacity进行加法计算,返回一个总值. all_records = hostinfo.objects.all().annotate(localdisks_size=Sum('disk__capacity')).order_by(sort_column) elif sort_column in ['idc',]: sort_column = "idc__%s" % (sort_column) if order == 'desc': sort_column = '-%s'%(sort_column) all_records = hostinfo.objects.all().order_by(sort_column) elif sort_column in ['trade_date','create_date']: if order == 'desc': sort_column = '-%s'%sort_column all_records = User.objects.all().order_by(sort_column) all_records_count=all_records.count() if not offset: offset = 0 if not pageSize: pageSize = 10 # 默认是每页20行的内容,与前端默认行数一致 pageinator = Paginator(all_records, pageSize) # 开始做分页 page = int(int(offset) / int(pageSize) + 1) response_data = {'total': all_records_count, 'rows': []} for server_li in pageinator.page(page): response_data['rows'].append({ "id": server_li.id if server_li.id else "", "hostname": server_li.hostname if server_li.hostname else "", "IP":server_li.IP if server_li.IP else "", "Mem":server_li.Mem if server_li.Mem else "", "CPU": server_li.CPU if server_li.CPU else "", "CPUS": server_li.CPUS if server_li.CPUS else "", "OS": server_li.OS if server_li.OS else "", "virtual1": server_li.virtual1 if server_li.virtual1 else "", "status": server_li.status if server_li.status else "", }) return HttpResponse(json.dumps(response_data)) return render(request, 'serverlist.html')
[ "def", "serverList", "(", "request", ",", "id", "=", "0", ")", ":", "if", "id", "!=", "0", ":", "hostinfo", ".", "objects", ".", "filter", "(", "id", "=", "id", ")", ".", "delete", "(", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "getHostInfo", "(", ")", "print", "(", "request", ".", "POST", ")", "pageSize", "=", "request", ".", "POST", ".", "get", "(", "'pageSize'", ")", "# how manufactoryy items per page", "pageNumber", "=", "request", ".", "POST", ".", "get", "(", "'pageNumber'", ")", "offset", "=", "request", ".", "POST", ".", "get", "(", "'offset'", ")", "# how many items in total in the DB", "search", "=", "request", ".", "POST", ".", "get", "(", "'search'", ")", "sort_column", "=", "request", ".", "POST", ".", "get", "(", "'sort'", ")", "# which column need to sort", "order", "=", "request", ".", "POST", ".", "get", "(", "'order'", ")", "# ascending or descending", "if", "search", ":", "# 判断是否有搜索字", "all_records", "=", "hostinfo", ".", "objects", ".", "filter", "(", "id", "=", "search", ",", "asset_type", "=", "search", ",", "business_unit", "=", "search", ",", "idc", "=", "search", ")", "else", ":", "all_records", "=", "hostinfo", ".", "objects", ".", "all", "(", ")", "# must be wirte the line code here", "if", "sort_column", ":", "# 判断是否有排序需求", "sort_column", "=", "sort_column", ".", "replace", "(", "'asset_'", ",", "''", ")", "if", "sort_column", "in", "[", "'id'", ",", "'asset_type'", ",", "'sn'", ",", "'name'", ",", "'management_ip'", ",", "'manufactory'", ",", "'type'", "]", ":", "# 如果排序的列表在这些内容里面", "if", "order", "==", "'desc'", ":", "# 如果排序是反向", "sort_column", "=", "'-%s'", "%", "(", "sort_column", ")", "all_records", "=", "hostinfo", ".", "objects", ".", "all", "(", ")", ".", "order_by", "(", "sort_column", ")", "elif", "sort_column", "in", "[", "'salt_minion_id'", ",", "'os_release'", ",", "]", ":", "# server__ 表示asset下的外键关联的表server下面的os_release或者其他的字段进行排序", "sort_column", "=", "\"server__%s\"", "%", "(", "sort_column", ")", "if", "order", "==", "'desc'", ":", "sort_column", "=", "'-%s'", "%", "(", "sort_column", ")", "all_records", "=", "hostinfo", ".", "objects", ".", "all", "(", ")", ".", "order_by", "(", "sort_column", ")", "elif", "sort_column", "in", "[", "'cpu_model'", ",", "'cpu_count'", ",", "'cpu_core_count'", "]", ":", "sort_column", "=", "\"cpu__%s\"", "%", "(", "sort_column", ")", "if", "order", "==", "'desc'", ":", "sort_column", "=", "'-%s'", "%", "(", "sort_column", ")", "all_records", "=", "hostinfo", ".", "objects", ".", "all", "(", ")", ".", "order_by", "(", "sort_column", ")", "elif", "sort_column", "in", "[", "'rams_size'", ",", "]", ":", "if", "order", "==", "'desc'", ":", "sort_column", "=", "'-rams_size'", "else", ":", "sort_column", "=", "'rams_size'", "all_records", "=", "hostinfo", ".", "objects", ".", "all", "(", ")", ".", "annotate", "(", "rams_size", "=", "Sum", "(", "'ram__capacity'", ")", ")", ".", "order_by", "(", "sort_column", ")", "elif", "sort_column", "in", "[", "'localdisks_size'", ",", "]", ":", "# using variable of localdisks_size because there have a annotation below of this line", "if", "order", "==", "\"desc\"", ":", "sort_column", "=", "'-localdisks_size'", "else", ":", "sort_column", "=", "'localdisks_size'", "# annotate 是注释的功能,localdisks_size前端传过来的是这个值,后端也必须这样写,Sum方法是django里面的,不是小写的sum方法,", "# 两者的区别需要注意,Sum('disk__capacity‘)表示对disk表下面的capacity进行加法计算,返回一个总值.", "all_records", "=", "hostinfo", ".", "objects", ".", "all", "(", ")", ".", "annotate", "(", "localdisks_size", "=", "Sum", "(", "'disk__capacity'", ")", ")", ".", "order_by", "(", "sort_column", ")", "elif", "sort_column", "in", "[", "'idc'", ",", "]", ":", "sort_column", "=", "\"idc__%s\"", "%", "(", "sort_column", ")", "if", "order", "==", "'desc'", ":", "sort_column", "=", "'-%s'", "%", "(", "sort_column", ")", "all_records", "=", "hostinfo", ".", "objects", ".", "all", "(", ")", ".", "order_by", "(", "sort_column", ")", "elif", "sort_column", "in", "[", "'trade_date'", ",", "'create_date'", "]", ":", "if", "order", "==", "'desc'", ":", "sort_column", "=", "'-%s'", "%", "sort_column", "all_records", "=", "User", ".", "objects", ".", "all", "(", ")", ".", "order_by", "(", "sort_column", ")", "all_records_count", "=", "all_records", ".", "count", "(", ")", "if", "not", "offset", ":", "offset", "=", "0", "if", "not", "pageSize", ":", "pageSize", "=", "10", "# 默认是每页20行的内容,与前端默认行数一致", "pageinator", "=", "Paginator", "(", "all_records", ",", "pageSize", ")", "# 开始做分页", "page", "=", "int", "(", "int", "(", "offset", ")", "/", "int", "(", "pageSize", ")", "+", "1", ")", "response_data", "=", "{", "'total'", ":", "all_records_count", ",", "'rows'", ":", "[", "]", "}", "for", "server_li", "in", "pageinator", ".", "page", "(", "page", ")", ":", "response_data", "[", "'rows'", "]", ".", "append", "(", "{", "\"id\"", ":", "server_li", ".", "id", "if", "server_li", ".", "id", "else", "\"\"", ",", "\"hostname\"", ":", "server_li", ".", "hostname", "if", "server_li", ".", "hostname", "else", "\"\"", ",", "\"IP\"", ":", "server_li", ".", "IP", "if", "server_li", ".", "IP", "else", "\"\"", ",", "\"Mem\"", ":", "server_li", ".", "Mem", "if", "server_li", ".", "Mem", "else", "\"\"", ",", "\"CPU\"", ":", "server_li", ".", "CPU", "if", "server_li", ".", "CPU", "else", "\"\"", ",", "\"CPUS\"", ":", "server_li", ".", "CPUS", "if", "server_li", ".", "CPUS", "else", "\"\"", ",", "\"OS\"", ":", "server_li", ".", "OS", "if", "server_li", ".", "OS", "else", "\"\"", ",", "\"virtual1\"", ":", "server_li", ".", "virtual1", "if", "server_li", ".", "virtual1", "else", "\"\"", ",", "\"status\"", ":", "server_li", ".", "status", "if", "server_li", ".", "status", "else", "\"\"", ",", "}", ")", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "response_data", ")", ")", "return", "render", "(", "request", ",", "'serverlist.html'", ")" ]
https://github.com/kurolz/DjangoWeb/blob/eb76629034089f65ed04632e2fbcecdef8095ff8/webserver/views.py#L195-L280
johntruckenbrodt/pyroSAR
efac51134ba42d20120b259f968afe5a4ddcc46a
pyroSAR/gamma/parser_demo.py
python
par_S1_SLC
(GeoTIFF, annotation_XML, calibration_XML, noise_XML, SLC_par, SLC, TOPS_par='-', dtype='-', sc_dB='-', noise_pwr='-', logpath=None, outdir=None, shellscript=None)
| Generate SLC parameter and image files for Sentinel-1 SLC data | Copyright 2018, Gamma Remote Sensing, v4.0 30-Apr-2018 awi/clw/cm Parameters ---------- GeoTIFF: (input) image data file in GeoTIFF format (enter - for none, \*.tiff) annotation_XML: (input) Sentinel-1 L1 XML annotation file calibration_XML: (input) Sentinel-1 L1 radiometric calibration XML file (enter - for no radiometric calibration) noise_XML: (input) Sentinel-1 L1 noise XML file (enter - to not subtract thermal noise power level) SLC_par: (output) ISP SLC parameter file (example: yyyymmdd_iw1_vv.slc.par) SLC: (output) SLC data file (enter - for none, example: yyyymmdd_iw1_vv.slc) TOPS_par: (output) SLC burst annotation file, TOPS and EW SLC data only (enter - for none, example: yyyymmdd_iw1_vv.tops_par) dtype: output data type: * 0: FCOMPLEX (default) * 1: SCOMPLEX sc_dB: scale factor for FCOMPLEX -> SCOMPLEX, (enter - for default: HH,VV (dB): 60.0000, VH,HV: 70.0000) noise_pwr: noise intensity for each SLC sample in slant range using data from noise_XML * NOTE: when the noise_pwr file is specified, noise power will NOT be subtracted from the image data values logpath: str or None a directory to write command logfiles to outdir: str or None the directory to execute the command in shellscript: str or None a file to write the Gamma commands to in shell format
| Generate SLC parameter and image files for Sentinel-1 SLC data | Copyright 2018, Gamma Remote Sensing, v4.0 30-Apr-2018 awi/clw/cm
[ "|", "Generate", "SLC", "parameter", "and", "image", "files", "for", "Sentinel", "-", "1", "SLC", "data", "|", "Copyright", "2018", "Gamma", "Remote", "Sensing", "v4", ".", "0", "30", "-", "Apr", "-", "2018", "awi", "/", "clw", "/", "cm" ]
def par_S1_SLC(GeoTIFF, annotation_XML, calibration_XML, noise_XML, SLC_par, SLC, TOPS_par='-', dtype='-', sc_dB='-', noise_pwr='-', logpath=None, outdir=None, shellscript=None): """ | Generate SLC parameter and image files for Sentinel-1 SLC data | Copyright 2018, Gamma Remote Sensing, v4.0 30-Apr-2018 awi/clw/cm Parameters ---------- GeoTIFF: (input) image data file in GeoTIFF format (enter - for none, \*.tiff) annotation_XML: (input) Sentinel-1 L1 XML annotation file calibration_XML: (input) Sentinel-1 L1 radiometric calibration XML file (enter - for no radiometric calibration) noise_XML: (input) Sentinel-1 L1 noise XML file (enter - to not subtract thermal noise power level) SLC_par: (output) ISP SLC parameter file (example: yyyymmdd_iw1_vv.slc.par) SLC: (output) SLC data file (enter - for none, example: yyyymmdd_iw1_vv.slc) TOPS_par: (output) SLC burst annotation file, TOPS and EW SLC data only (enter - for none, example: yyyymmdd_iw1_vv.tops_par) dtype: output data type: * 0: FCOMPLEX (default) * 1: SCOMPLEX sc_dB: scale factor for FCOMPLEX -> SCOMPLEX, (enter - for default: HH,VV (dB): 60.0000, VH,HV: 70.0000) noise_pwr: noise intensity for each SLC sample in slant range using data from noise_XML * NOTE: when the noise_pwr file is specified, noise power will NOT be subtracted from the image data values logpath: str or None a directory to write command logfiles to outdir: str or None the directory to execute the command in shellscript: str or None a file to write the Gamma commands to in shell format """ process( ['/usr/local/GAMMA_SOFTWARE-20180703/ISP/bin/par_S1_SLC', GeoTIFF, annotation_XML, calibration_XML, noise_XML, SLC_par, SLC, TOPS_par, dtype, sc_dB, noise_pwr], logpath=logpath, outdir=outdir, shellscript=shellscript)
[ "def", "par_S1_SLC", "(", "GeoTIFF", ",", "annotation_XML", ",", "calibration_XML", ",", "noise_XML", ",", "SLC_par", ",", "SLC", ",", "TOPS_par", "=", "'-'", ",", "dtype", "=", "'-'", ",", "sc_dB", "=", "'-'", ",", "noise_pwr", "=", "'-'", ",", "logpath", "=", "None", ",", "outdir", "=", "None", ",", "shellscript", "=", "None", ")", ":", "process", "(", "[", "'/usr/local/GAMMA_SOFTWARE-20180703/ISP/bin/par_S1_SLC'", ",", "GeoTIFF", ",", "annotation_XML", ",", "calibration_XML", ",", "noise_XML", ",", "SLC_par", ",", "SLC", ",", "TOPS_par", ",", "dtype", ",", "sc_dB", ",", "noise_pwr", "]", ",", "logpath", "=", "logpath", ",", "outdir", "=", "outdir", ",", "shellscript", "=", "shellscript", ")" ]
https://github.com/johntruckenbrodt/pyroSAR/blob/efac51134ba42d20120b259f968afe5a4ddcc46a/pyroSAR/gamma/parser_demo.py#L3390-L3432
knipknap/exscript
a20e83ae3a78ea7e5ba25f07c1d9de4e9b961e83
Exscript/util/event.py
python
Event.listen
(self, callback, *args, **kwargs)
return ref
Like :class:`connect()`, but uses a weak reference instead of a normal reference. The signal is automatically disconnected as soon as the handler is garbage collected. .. HINT:: Storing signal handlers as weak references means that if your handler is a local function, it may be garbage collected. To prevent this, use :class:`connect()` instead. :type callback: object :param callback: The callback function. :type args: tuple :param args: Optional arguments passed to the callback. :type kwargs: dict :param kwargs: Optional keyword arguments passed to the callback. :rtype: :class:`Exscript.util.weakmethod.WeakMethod` :return: The newly created weak reference to the callback.
Like :class:`connect()`, but uses a weak reference instead of a normal reference. The signal is automatically disconnected as soon as the handler is garbage collected.
[ "Like", ":", "class", ":", "connect", "()", "but", "uses", "a", "weak", "reference", "instead", "of", "a", "normal", "reference", ".", "The", "signal", "is", "automatically", "disconnected", "as", "soon", "as", "the", "handler", "is", "garbage", "collected", "." ]
def listen(self, callback, *args, **kwargs): """ Like :class:`connect()`, but uses a weak reference instead of a normal reference. The signal is automatically disconnected as soon as the handler is garbage collected. .. HINT:: Storing signal handlers as weak references means that if your handler is a local function, it may be garbage collected. To prevent this, use :class:`connect()` instead. :type callback: object :param callback: The callback function. :type args: tuple :param args: Optional arguments passed to the callback. :type kwargs: dict :param kwargs: Optional keyword arguments passed to the callback. :rtype: :class:`Exscript.util.weakmethod.WeakMethod` :return: The newly created weak reference to the callback. """ if self.lock is None: self.lock = Lock() with self.lock: if self.is_connected(callback): raise AttributeError('callback is already connected') if self.weak_subscribers is None: self.weak_subscribers = [] ref = weakmethod.ref(callback, self._try_disconnect) self.weak_subscribers.append((ref, args, kwargs)) return ref
[ "def", "listen", "(", "self", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "lock", "is", "None", ":", "self", ".", "lock", "=", "Lock", "(", ")", "with", "self", ".", "lock", ":", "if", "self", ".", "is_connected", "(", "callback", ")", ":", "raise", "AttributeError", "(", "'callback is already connected'", ")", "if", "self", ".", "weak_subscribers", "is", "None", ":", "self", ".", "weak_subscribers", "=", "[", "]", "ref", "=", "weakmethod", ".", "ref", "(", "callback", ",", "self", ".", "_try_disconnect", ")", "self", ".", "weak_subscribers", ".", "append", "(", "(", "ref", ",", "args", ",", "kwargs", ")", ")", "return", "ref" ]
https://github.com/knipknap/exscript/blob/a20e83ae3a78ea7e5ba25f07c1d9de4e9b961e83/Exscript/util/event.py#L88-L118
huawei-noah/vega
d9f13deede7f2b584e4b1d32ffdb833856129989
evaluate_service/evaluate_service/security/zmq_op.py
python
connect_security
(ip, port, temp_path)
return socket
Connect to server.
Connect to server.
[ "Connect", "to", "server", "." ]
def connect_security(ip, port, temp_path): """Connect to server.""" ctx = zmq.Context.instance() socket = ctx.socket(zmq.REQ) client_name = uuid.uuid1().hex[:8] client_secret_key = os.path.join(temp_path, "{}.key_secret".format(client_name)) if not os.path.exists(client_secret_key): client_public_key, client_secret_key = zmq.auth.create_certificates(temp_path, client_name) client_public, client_secret = zmq.auth.load_certificate(client_secret_key) socket.curve_secretkey = client_secret socket.curve_publickey = client_public server_public_key = os.path.join(temp_path, "server.key") if not os.path.exists(server_public_key): server_public_key, _ = zmq.auth.create_certificates(temp_path, "server") server_public, _ = zmq.auth.load_certificate(server_public_key) socket.curve_serverkey = server_public socket.connect(f"tcp://{ip}:{port}") if os.path.exists(client_secret_key): os.remove(client_secret_key) if os.path.exists(client_public_key): os.remove(client_public_key) return socket
[ "def", "connect_security", "(", "ip", ",", "port", ",", "temp_path", ")", ":", "ctx", "=", "zmq", ".", "Context", ".", "instance", "(", ")", "socket", "=", "ctx", ".", "socket", "(", "zmq", ".", "REQ", ")", "client_name", "=", "uuid", ".", "uuid1", "(", ")", ".", "hex", "[", ":", "8", "]", "client_secret_key", "=", "os", ".", "path", ".", "join", "(", "temp_path", ",", "\"{}.key_secret\"", ".", "format", "(", "client_name", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "client_secret_key", ")", ":", "client_public_key", ",", "client_secret_key", "=", "zmq", ".", "auth", ".", "create_certificates", "(", "temp_path", ",", "client_name", ")", "client_public", ",", "client_secret", "=", "zmq", ".", "auth", ".", "load_certificate", "(", "client_secret_key", ")", "socket", ".", "curve_secretkey", "=", "client_secret", "socket", ".", "curve_publickey", "=", "client_public", "server_public_key", "=", "os", ".", "path", ".", "join", "(", "temp_path", ",", "\"server.key\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "server_public_key", ")", ":", "server_public_key", ",", "_", "=", "zmq", ".", "auth", ".", "create_certificates", "(", "temp_path", ",", "\"server\"", ")", "server_public", ",", "_", "=", "zmq", ".", "auth", ".", "load_certificate", "(", "server_public_key", ")", "socket", ".", "curve_serverkey", "=", "server_public", "socket", ".", "connect", "(", "f\"tcp://{ip}:{port}\"", ")", "if", "os", ".", "path", ".", "exists", "(", "client_secret_key", ")", ":", "os", ".", "remove", "(", "client_secret_key", ")", "if", "os", ".", "path", ".", "exists", "(", "client_public_key", ")", ":", "os", ".", "remove", "(", "client_public_key", ")", "return", "socket" ]
https://github.com/huawei-noah/vega/blob/d9f13deede7f2b584e4b1d32ffdb833856129989/evaluate_service/evaluate_service/security/zmq_op.py#L49-L70
cloudinary/pycloudinary
a61a9687c8933f23574c38e27f201358e540ee64
cloudinary/poster/encode.py
python
MultipartParam.from_file
(cls, paramname, filename)
return cls(paramname, filename=os.path.basename(filename), filetype=mimetypes.guess_type(filename)[0], filesize=os.path.getsize(filename), fileobj=open(filename, "rb"))
Returns a new MultipartParam object constructed from the local file at ``filename``. ``filesize`` is determined by os.path.getsize(``filename``) ``filetype`` is determined by mimetypes.guess_type(``filename``)[0] ``filename`` is set to os.path.basename(``filename``)
Returns a new MultipartParam object constructed from the local file at ``filename``.
[ "Returns", "a", "new", "MultipartParam", "object", "constructed", "from", "the", "local", "file", "at", "filename", "." ]
def from_file(cls, paramname, filename): """Returns a new MultipartParam object constructed from the local file at ``filename``. ``filesize`` is determined by os.path.getsize(``filename``) ``filetype`` is determined by mimetypes.guess_type(``filename``)[0] ``filename`` is set to os.path.basename(``filename``) """ return cls(paramname, filename=os.path.basename(filename), filetype=mimetypes.guess_type(filename)[0], filesize=os.path.getsize(filename), fileobj=open(filename, "rb"))
[ "def", "from_file", "(", "cls", ",", "paramname", ",", "filename", ")", ":", "return", "cls", "(", "paramname", ",", "filename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "filetype", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "[", "0", "]", ",", "filesize", "=", "os", ".", "path", ".", "getsize", "(", "filename", ")", ",", "fileobj", "=", "open", "(", "filename", ",", "\"rb\"", ")", ")" ]
https://github.com/cloudinary/pycloudinary/blob/a61a9687c8933f23574c38e27f201358e540ee64/cloudinary/poster/encode.py#L164-L178
openstack/keystone
771c943ad2116193e7bb118c74993c829d93bd71
keystone/credential/backends/base.py
python
CredentialDriverBase.delete_credentials_for_user
(self, user_id)
Delete all credentials for a user.
Delete all credentials for a user.
[ "Delete", "all", "credentials", "for", "a", "user", "." ]
def delete_credentials_for_user(self, user_id): """Delete all credentials for a user.""" self._delete_credentials(lambda cr: cr['user_id'] == user_id)
[ "def", "delete_credentials_for_user", "(", "self", ",", "user_id", ")", ":", "self", ".", "_delete_credentials", "(", "lambda", "cr", ":", "cr", "[", "'user_id'", "]", "==", "user_id", ")" ]
https://github.com/openstack/keystone/blob/771c943ad2116193e7bb118c74993c829d93bd71/keystone/credential/backends/base.py#L100-L102
ramonhagenaars/jsons
a5150cdd2704e83fe5f8798822a1c901b54dcb1c
jsons/classes/json_serializable.py
python
JsonSerializable.dump
(self, **kwargs)
return dump(self, fork_inst=self.__class__, **kwargs)
See ``jsons.dump``. :param kwargs: the keyword args are passed on to the serializer function. :return: this instance in a JSON representation (dict).
See ``jsons.dump``. :param kwargs: the keyword args are passed on to the serializer function. :return: this instance in a JSON representation (dict).
[ "See", "jsons", ".", "dump", ".", ":", "param", "kwargs", ":", "the", "keyword", "args", "are", "passed", "on", "to", "the", "serializer", "function", ".", ":", "return", ":", "this", "instance", "in", "a", "JSON", "representation", "(", "dict", ")", "." ]
def dump(self, **kwargs) -> object: """ See ``jsons.dump``. :param kwargs: the keyword args are passed on to the serializer function. :return: this instance in a JSON representation (dict). """ return dump(self, fork_inst=self.__class__, **kwargs)
[ "def", "dump", "(", "self", ",", "*", "*", "kwargs", ")", "->", "object", ":", "return", "dump", "(", "self", ",", "fork_inst", "=", "self", ".", "__class__", ",", "*", "*", "kwargs", ")" ]
https://github.com/ramonhagenaars/jsons/blob/a5150cdd2704e83fe5f8798822a1c901b54dcb1c/jsons/classes/json_serializable.py#L121-L128
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/mqtt/climate.py
python
MqttClimate.target_temperature_low
(self)
return self._target_temp_low
Return the low target temperature we try to reach.
Return the low target temperature we try to reach.
[ "Return", "the", "low", "target", "temperature", "we", "try", "to", "reach", "." ]
def target_temperature_low(self): """Return the low target temperature we try to reach.""" return self._target_temp_low
[ "def", "target_temperature_low", "(", "self", ")", ":", "return", "self", ".", "_target_temp_low" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/mqtt/climate.py#L632-L634
agile-geoscience/bruges
64f162dbf8b94ff265108f6eb85e0d4e28ecb2cb
bruges/reflection/reflection.py
python
shuey
(vp1, vs1, rho1, vp2, vs2, rho2, theta1=0, terms=False, return_gradient=False)
Compute Shuey approximation with 3 terms. http://subsurfwiki.org/wiki/Shuey_equation Args: vp1 (ndarray): The upper P-wave velocity; float or 1D array length m. vs1 (ndarray): The upper S-wave velocity; float or 1D array length m. rho1 (ndarray): The upper layer's density; float or 1D array length m. vp2 (ndarray): The lower P-wave velocity; float or 1D array length m. vs2 (ndarray): The lower S-wave velocity; float or 1D array length m. rho2 (ndarray): The lower layer's density; float or 1D array length m. theta1 (ndarray): The incidence angle; float or 1D array length n. terms (bool): Whether or not to return a tuple of the terms of the equation. The first term is the acoustic impedance. return_gradient (bool): Whether to return a tuple of the intercept and gradient (i.e. the second term divided by sin^2(theta)). Returns: ndarray. The Aki-Richards approximation for P-P reflectivity at the interface. Will be a float (for float inputs and one angle), a 1 x n array (for float inputs and an array of angles), a 1 x m array (for float inputs and one angle), or an n x m array (for array inputs and an array of angles).
Compute Shuey approximation with 3 terms. http://subsurfwiki.org/wiki/Shuey_equation
[ "Compute", "Shuey", "approximation", "with", "3", "terms", ".", "http", ":", "//", "subsurfwiki", ".", "org", "/", "wiki", "/", "Shuey_equation" ]
def shuey(vp1, vs1, rho1, vp2, vs2, rho2, theta1=0, terms=False, return_gradient=False): """ Compute Shuey approximation with 3 terms. http://subsurfwiki.org/wiki/Shuey_equation Args: vp1 (ndarray): The upper P-wave velocity; float or 1D array length m. vs1 (ndarray): The upper S-wave velocity; float or 1D array length m. rho1 (ndarray): The upper layer's density; float or 1D array length m. vp2 (ndarray): The lower P-wave velocity; float or 1D array length m. vs2 (ndarray): The lower S-wave velocity; float or 1D array length m. rho2 (ndarray): The lower layer's density; float or 1D array length m. theta1 (ndarray): The incidence angle; float or 1D array length n. terms (bool): Whether or not to return a tuple of the terms of the equation. The first term is the acoustic impedance. return_gradient (bool): Whether to return a tuple of the intercept and gradient (i.e. the second term divided by sin^2(theta)). Returns: ndarray. The Aki-Richards approximation for P-P reflectivity at the interface. Will be a float (for float inputs and one angle), a 1 x n array (for float inputs and an array of angles), a 1 x m array (for float inputs and one angle), or an n x m array (for array inputs and an array of angles). """ theta1 = np.real(theta1) drho = rho2-rho1 dvp = vp2-vp1 dvs = vs2-vs1 rho = (rho1+rho2)/2.0 vp = (vp1+vp2)/2.0 vs = (vs1+vs2)/2.0 # Compute three-term reflectivity r0 = 0.5 * (dvp/vp + drho/rho) g = 0.5 * dvp/vp - 2 * (vs**2/vp**2) * (drho/rho + 2 * dvs/vs) f = 0.5 * dvp/vp term1 = r0 term2 = g * np.sin(theta1)**2 term3 = f * (np.tan(theta1)**2 - np.sin(theta1)**2) if return_gradient: fields = ['intercept', 'gradient'] Shuey = namedtuple('Shuey', fields) return Shuey(np.squeeze(r0), np.squeeze(g)) elif terms: fields = ['R0', 'Rg', 'Rf'] Shuey = namedtuple('Shuey', fields) return Shuey(np.squeeze([term1 for _ in theta1]), np.squeeze(term2), np.squeeze(term3) ) else: return np.squeeze(term1 + term2 + term3)
[ "def", "shuey", "(", "vp1", ",", "vs1", ",", "rho1", ",", "vp2", ",", "vs2", ",", "rho2", ",", "theta1", "=", "0", ",", "terms", "=", "False", ",", "return_gradient", "=", "False", ")", ":", "theta1", "=", "np", ".", "real", "(", "theta1", ")", "drho", "=", "rho2", "-", "rho1", "dvp", "=", "vp2", "-", "vp1", "dvs", "=", "vs2", "-", "vs1", "rho", "=", "(", "rho1", "+", "rho2", ")", "/", "2.0", "vp", "=", "(", "vp1", "+", "vp2", ")", "/", "2.0", "vs", "=", "(", "vs1", "+", "vs2", ")", "/", "2.0", "# Compute three-term reflectivity", "r0", "=", "0.5", "*", "(", "dvp", "/", "vp", "+", "drho", "/", "rho", ")", "g", "=", "0.5", "*", "dvp", "/", "vp", "-", "2", "*", "(", "vs", "**", "2", "/", "vp", "**", "2", ")", "*", "(", "drho", "/", "rho", "+", "2", "*", "dvs", "/", "vs", ")", "f", "=", "0.5", "*", "dvp", "/", "vp", "term1", "=", "r0", "term2", "=", "g", "*", "np", ".", "sin", "(", "theta1", ")", "**", "2", "term3", "=", "f", "*", "(", "np", ".", "tan", "(", "theta1", ")", "**", "2", "-", "np", ".", "sin", "(", "theta1", ")", "**", "2", ")", "if", "return_gradient", ":", "fields", "=", "[", "'intercept'", ",", "'gradient'", "]", "Shuey", "=", "namedtuple", "(", "'Shuey'", ",", "fields", ")", "return", "Shuey", "(", "np", ".", "squeeze", "(", "r0", ")", ",", "np", ".", "squeeze", "(", "g", ")", ")", "elif", "terms", ":", "fields", "=", "[", "'R0'", ",", "'Rg'", ",", "'Rf'", "]", "Shuey", "=", "namedtuple", "(", "'Shuey'", ",", "fields", ")", "return", "Shuey", "(", "np", ".", "squeeze", "(", "[", "term1", "for", "_", "in", "theta1", "]", ")", ",", "np", ".", "squeeze", "(", "term2", ")", ",", "np", ".", "squeeze", "(", "term3", ")", ")", "else", ":", "return", "np", ".", "squeeze", "(", "term1", "+", "term2", "+", "term3", ")" ]
https://github.com/agile-geoscience/bruges/blob/64f162dbf8b94ff265108f6eb85e0d4e28ecb2cb/bruges/reflection/reflection.py#L590-L647
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/uwsgidecorators.py
python
mule_msg_dispatcher
(message)
[]
def mule_msg_dispatcher(message): msg = pickle.loads(message) if msg['service'] == 'uwsgi_mulefunc': return mule_functions[msg['func']](*msg['args'], **msg['kwargs'])
[ "def", "mule_msg_dispatcher", "(", "message", ")", ":", "msg", "=", "pickle", ".", "loads", "(", "message", ")", "if", "msg", "[", "'service'", "]", "==", "'uwsgi_mulefunc'", ":", "return", "mule_functions", "[", "msg", "[", "'func'", "]", "]", "(", "*", "msg", "[", "'args'", "]", ",", "*", "*", "msg", "[", "'kwargs'", "]", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/uwsgidecorators.py#L193-L196
edgewall/trac
beb3e4eaf1e0a456d801a50a8614ecab06de29fc
trac/loader.py
python
_enable_plugin
(env, module)
Enable the given plugin module if it wasn't disabled explicitly.
Enable the given plugin module if it wasn't disabled explicitly.
[ "Enable", "the", "given", "plugin", "module", "if", "it", "wasn", "t", "disabled", "explicitly", "." ]
def _enable_plugin(env, module): """Enable the given plugin module if it wasn't disabled explicitly.""" if env.is_component_enabled(module) is None: env.enable_component(module)
[ "def", "_enable_plugin", "(", "env", ",", "module", ")", ":", "if", "env", ".", "is_component_enabled", "(", "module", ")", "is", "None", ":", "env", ".", "enable_component", "(", "module", ")" ]
https://github.com/edgewall/trac/blob/beb3e4eaf1e0a456d801a50a8614ecab06de29fc/trac/loader.py#L33-L36
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/words/protocols/irc.py
python
ServerSupportedFeatures.hasFeature
(self, feature)
return self.getFeature(feature) is not None
Determine whether a feature is supported or not. @rtype: C{bool}
Determine whether a feature is supported or not.
[ "Determine", "whether", "a", "feature", "is", "supported", "or", "not", "." ]
def hasFeature(self, feature): """ Determine whether a feature is supported or not. @rtype: C{bool} """ return self.getFeature(feature) is not None
[ "def", "hasFeature", "(", "self", ",", "feature", ")", ":", "return", "self", ".", "getFeature", "(", "feature", ")", "is", "not", "None" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/words/protocols/irc.py#L781-L787
donnemartin/gitsome
d7c57abc7cb66e9c910a844f15d4536866da3310
xonsh/tools.py
python
is_string_or_callable
(x)
return is_string(x) or is_callable(x)
Tests if something is a string or callable
Tests if something is a string or callable
[ "Tests", "if", "something", "is", "a", "string", "or", "callable" ]
def is_string_or_callable(x): """Tests if something is a string or callable""" return is_string(x) or is_callable(x)
[ "def", "is_string_or_callable", "(", "x", ")", ":", "return", "is_string", "(", "x", ")", "or", "is_callable", "(", "x", ")" ]
https://github.com/donnemartin/gitsome/blob/d7c57abc7cb66e9c910a844f15d4536866da3310/xonsh/tools.py#L1122-L1124
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/util/_collections.py
python
IdentitySet.difference_update
(self, iterable)
[]
def difference_update(self, iterable): self._members = self.difference(iterable)._members
[ "def", "difference_update", "(", "self", ",", "iterable", ")", ":", "self", ".", "_members", "=", "self", ".", "difference", "(", "iterable", ")", ".", "_members" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/util/_collections.py#L598-L599
inkcut/inkcut
d3dea58e13de9f9babb5ed9b562c7326b4efdcd7
inkcut/preview/plugin.py
python
PreviewPlugin.set_live_preview
(self, *items)
Set the items that will be displayed in the live plot preview. After set, use live_preview.update(position) to update it. Parameters ---------- items: list of kwargs A list of kwargs to to pass to each plot item
Set the items that will be displayed in the live plot preview. After set, use live_preview.update(position) to update it.
[ "Set", "the", "items", "that", "will", "be", "displayed", "in", "the", "live", "plot", "preview", ".", "After", "set", "use", "live_preview", ".", "update", "(", "position", ")", "to", "update", "it", "." ]
def set_live_preview(self, *items): """ Set the items that will be displayed in the live plot preview. After set, use live_preview.update(position) to update it. Parameters ---------- items: list of kwargs A list of kwargs to to pass to each plot item """ view_items = [ PainterPathPlotItem(kwargs.pop('path'), **kwargs) for kwargs in items ] self.live_preview.init(view_items)
[ "def", "set_live_preview", "(", "self", ",", "*", "items", ")", ":", "view_items", "=", "[", "PainterPathPlotItem", "(", "kwargs", ".", "pop", "(", "'path'", ")", ",", "*", "*", "kwargs", ")", "for", "kwargs", "in", "items", "]", "self", ".", "live_preview", ".", "init", "(", "view_items", ")" ]
https://github.com/inkcut/inkcut/blob/d3dea58e13de9f9babb5ed9b562c7326b4efdcd7/inkcut/preview/plugin.py#L118-L133
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/export/management/commands/update_export_with_newest_data.py
python
Command.add_arguments
(self, parser)
[]
def add_arguments(self, parser): parser.add_argument('export_id') parser.add_argument('-d', '--download_path', help="Path to download export to.") parser.add_argument( '--processes', type=int, dest='processes', default=multiprocessing.cpu_count() - 1, help='Number of parallel processes to run.' )
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "'export_id'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--download_path'", ",", "help", "=", "\"Path to download export to.\"", ")", "parser", ".", "add_argument", "(", "'--processes'", ",", "type", "=", "int", ",", "dest", "=", "'processes'", ",", "default", "=", "multiprocessing", ".", "cpu_count", "(", ")", "-", "1", ",", "help", "=", "'Number of parallel processes to run.'", ")" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/export/management/commands/update_export_with_newest_data.py#L34-L43
pybrain/pybrain
dcdf32ba1805490cefbc0bdeb227260d304fdb42
pybrain/rl/environments/ode/tools/xmltools.py
python
XMLstruct.up
(self, steps=1)
traverse upward a number of steps in tag stack
traverse upward a number of steps in tag stack
[ "traverse", "upward", "a", "number", "of", "steps", "in", "tag", "stack" ]
def up(self, steps=1): """traverse upward a number of steps in tag stack""" for _ in range(steps): if self.stack != []: self.current = self.stack.pop()
[ "def", "up", "(", "self", ",", "steps", "=", "1", ")", ":", "for", "_", "in", "range", "(", "steps", ")", ":", "if", "self", ".", "stack", "!=", "[", "]", ":", "self", ".", "current", "=", "self", ".", "stack", ".", "pop", "(", ")" ]
https://github.com/pybrain/pybrain/blob/dcdf32ba1805490cefbc0bdeb227260d304fdb42/pybrain/rl/environments/ode/tools/xmltools.py#L101-L105
biolab/orange3
41685e1c7b1d1babe680113685a2d44bcc9fec0b
Orange/widgets/data/owdiscretize.py
python
IncreasingNumbersListValidator.fixup
(self, string)
return ", ".join(parts)
Fixup the input. Remove empty parts from the string.
Fixup the input. Remove empty parts from the string.
[ "Fixup", "the", "input", ".", "Remove", "empty", "parts", "from", "the", "string", "." ]
def fixup(self, string): # type: (str) -> str """ Fixup the input. Remove empty parts from the string. """ parts = [string[start: end] for start, end in self.itersplit(string)] parts = [part for part in parts if part.strip()] return ", ".join(parts)
[ "def", "fixup", "(", "self", ",", "string", ")", ":", "# type: (str) -> str", "parts", "=", "[", "string", "[", "start", ":", "end", "]", "for", "start", ",", "end", "in", "self", ".", "itersplit", "(", "string", ")", "]", "parts", "=", "[", "part", "for", "part", "in", "parts", "if", "part", ".", "strip", "(", ")", "]", "return", "\", \"", ".", "join", "(", "parts", ")" ]
https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/data/owdiscretize.py#L199-L206
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
intelbot/requests/sessions.py
python
Session.get_adapter
(self, url)
Returns the appropriate connection adapter for the given URL.
Returns the appropriate connection adapter for the given URL.
[ "Returns", "the", "appropriate", "connection", "adapter", "for", "the", "given", "URL", "." ]
def get_adapter(self, url): """Returns the appropriate connection adapter for the given URL.""" for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for '%s'" % url)
[ "def", "get_adapter", "(", "self", ",", "url", ")", ":", "for", "(", "prefix", ",", "adapter", ")", "in", "self", ".", "adapters", ".", "items", "(", ")", ":", "if", "url", ".", "lower", "(", ")", ".", "startswith", "(", "prefix", ")", ":", "return", "adapter", "# Nothing matches :-/", "raise", "InvalidSchema", "(", "\"No connection adapters were found for '%s'\"", "%", "url", ")" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/intelbot/requests/sessions.py#L636-L644
jgilhutton/PyxieWPS
ba590343a73db98f898bda8b56b43928ac90947f
Version-1.1/pyxiewps-EN-swearing-version.py
python
Config.get_binaries
(self)
Installs reaver, pixiewps and other stuff
Installs reaver, pixiewps and other stuff
[ "Installs", "reaver", "pixiewps", "and", "other", "stuff" ]
def get_binaries(self): """ Installs reaver, pixiewps and other stuff """ if not self.internet_on(): print print ALERT + "You are not connected to the internet." print " Please check your connection so that Pyxiewps" print " can install all the required programs." print engine.exit_clean() git = 'apt-get -y install git' reaver_dep = 'apt-get -y install build-essential libpcap-dev sqlite3 libsqlite3-dev aircrack-ng' pixie_dep = 'sudo apt-get -y install libssl-dev' reaver_apt = 'apt-get -y install reaver' reaver = 'git clone https://github.com/t6x/reaver-wps-fork-t6x.git' pixiewps = 'git clone https://github.com/wiire/pixiewps.git' aircrack = 'apt-get -y install aircrack-ng' if not engine.GIT: print INFO + "Installing git..." proc4 = system(git) if not engine.AIRMON: print INFO + "Installing aircrack..." proc5 = system(aircrack) if not engine.PIXIEWPS: print INFO + "Installing pixiewps dependencies..." proc2 = system(pixie_dep) print INFO + "Downloading pixiewps..." proc3 = system(pixiewps) if not engine.REAVER: print INFO + "Installing reaver dependencies..." proc = system(reaver_dep) print INFO + "Downloading reaver..." if 'kali' in subprocess.check_output('uname -a', shell = True): proc1 = system(reaver_apt) else: proc1 = system(reaver) if path.isdir('pixiewps') and not engine.PIXIEWPS: print INFO + "Installing pixiewps..." system('cd pixiewps/src && make && make install') print INFO + "Done" if path.isdir('reaver-wps-fork-t6x') and not engine.REAVER: print INFO + "Installing reaver..." system('cd reaver-wps-fork-t6x* && cd src && ./configure && make && make install') print INFO + "Done" engine.check(check_again = True)
[ "def", "get_binaries", "(", "self", ")", ":", "if", "not", "self", ".", "internet_on", "(", ")", ":", "print", "print", "ALERT", "+", "\"You are not connected to the internet.\"", "print", "\" Please check your connection so that Pyxiewps\"", "print", "\" can install all the required programs.\"", "print", "engine", ".", "exit_clean", "(", ")", "git", "=", "'apt-get -y install git'", "reaver_dep", "=", "'apt-get -y install build-essential libpcap-dev sqlite3 libsqlite3-dev aircrack-ng'", "pixie_dep", "=", "'sudo apt-get -y install libssl-dev'", "reaver_apt", "=", "'apt-get -y install reaver'", "reaver", "=", "'git clone https://github.com/t6x/reaver-wps-fork-t6x.git'", "pixiewps", "=", "'git clone https://github.com/wiire/pixiewps.git'", "aircrack", "=", "'apt-get -y install aircrack-ng'", "if", "not", "engine", ".", "GIT", ":", "print", "INFO", "+", "\"Installing git...\"", "proc4", "=", "system", "(", "git", ")", "if", "not", "engine", ".", "AIRMON", ":", "print", "INFO", "+", "\"Installing aircrack...\"", "proc5", "=", "system", "(", "aircrack", ")", "if", "not", "engine", ".", "PIXIEWPS", ":", "print", "INFO", "+", "\"Installing pixiewps dependencies...\"", "proc2", "=", "system", "(", "pixie_dep", ")", "print", "INFO", "+", "\"Downloading pixiewps...\"", "proc3", "=", "system", "(", "pixiewps", ")", "if", "not", "engine", ".", "REAVER", ":", "print", "INFO", "+", "\"Installing reaver dependencies...\"", "proc", "=", "system", "(", "reaver_dep", ")", "print", "INFO", "+", "\"Downloading reaver...\"", "if", "'kali'", "in", "subprocess", ".", "check_output", "(", "'uname -a'", ",", "shell", "=", "True", ")", ":", "proc1", "=", "system", "(", "reaver_apt", ")", "else", ":", "proc1", "=", "system", "(", "reaver", ")", "if", "path", ".", "isdir", "(", "'pixiewps'", ")", "and", "not", "engine", ".", "PIXIEWPS", ":", "print", "INFO", "+", "\"Installing pixiewps...\"", "system", "(", "'cd pixiewps/src && make && make install'", ")", "print", "INFO", "+", "\"Done\"", "if", "path", ".", "isdir", "(", "'reaver-wps-fork-t6x'", ")", "and", "not", "engine", ".", "REAVER", ":", "print", "INFO", "+", "\"Installing reaver...\"", "system", "(", "'cd reaver-wps-fork-t6x* && cd src && ./configure && make && make install'", ")", "print", "INFO", "+", "\"Done\"", "engine", ".", "check", "(", "check_again", "=", "True", ")" ]
https://github.com/jgilhutton/PyxieWPS/blob/ba590343a73db98f898bda8b56b43928ac90947f/Version-1.1/pyxiewps-EN-swearing-version.py#L704-L750
microsoft/debugpy
be8dd607f6837244e0b565345e497aff7a0c08bf
src/debugpy/_vendored/pydevd/third_party/pep8/pycodestyle.py
python
comparison_type
(logical_line, noqa)
r"""Object type comparisons should always use isinstance(). Do not compare types directly. Okay: if isinstance(obj, int): E721: if type(obj) is type(1): When checking if an object is a string, keep in mind that it might be a unicode string too! In Python 2.3, str and unicode have a common base class, basestring, so you can do: Okay: if isinstance(obj, basestring): Okay: if type(a1) is type(b1):
r"""Object type comparisons should always use isinstance().
[ "r", "Object", "type", "comparisons", "should", "always", "use", "isinstance", "()", "." ]
def comparison_type(logical_line, noqa): r"""Object type comparisons should always use isinstance(). Do not compare types directly. Okay: if isinstance(obj, int): E721: if type(obj) is type(1): When checking if an object is a string, keep in mind that it might be a unicode string too! In Python 2.3, str and unicode have a common base class, basestring, so you can do: Okay: if isinstance(obj, basestring): Okay: if type(a1) is type(b1): """ match = COMPARE_TYPE_REGEX.search(logical_line) if match and not noqa: inst = match.group(1) if inst and isidentifier(inst) and inst not in SINGLETONS: return # Allow comparison for types which are not obvious yield match.start(), "E721 do not compare types, use 'isinstance()'"
[ "def", "comparison_type", "(", "logical_line", ",", "noqa", ")", ":", "match", "=", "COMPARE_TYPE_REGEX", ".", "search", "(", "logical_line", ")", "if", "match", "and", "not", "noqa", ":", "inst", "=", "match", ".", "group", "(", "1", ")", "if", "inst", "and", "isidentifier", "(", "inst", ")", "and", "inst", "not", "in", "SINGLETONS", ":", "return", "# Allow comparison for types which are not obvious", "yield", "match", ".", "start", "(", ")", ",", "\"E721 do not compare types, use 'isinstance()'\"" ]
https://github.com/microsoft/debugpy/blob/be8dd607f6837244e0b565345e497aff7a0c08bf/src/debugpy/_vendored/pydevd/third_party/pep8/pycodestyle.py#L1178-L1198
dropbox/dropbox-sdk-python
015437429be224732990041164a21a0501235db1
dropbox/team_log.py
python
LoginMethod.is_web_session
(self)
return self._tag == 'web_session'
Check if the union tag is ``web_session``. :rtype: bool
Check if the union tag is ``web_session``.
[ "Check", "if", "the", "union", "tag", "is", "web_session", "." ]
def is_web_session(self): """ Check if the union tag is ``web_session``. :rtype: bool """ return self._tag == 'web_session'
[ "def", "is_web_session", "(", "self", ")", ":", "return", "self", ".", "_tag", "==", "'web_session'" ]
https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/team_log.py#L51432-L51438
trailofbits/manticore
b050fdf0939f6c63f503cdf87ec0ab159dd41159
manticore/platforms/linux_syscall_stubs.py
python
SyscallStubs.sys_splice
(self, fd_in, off_in, fd_out, off_out, len, flags)
return self.complicated_success(275)
AUTOGENERATED UNIMPLEMENTED STUB
AUTOGENERATED UNIMPLEMENTED STUB
[ "AUTOGENERATED", "UNIMPLEMENTED", "STUB" ]
def sys_splice(self, fd_in, off_in, fd_out, off_out, len, flags) -> int: """ AUTOGENERATED UNIMPLEMENTED STUB """ return self.complicated_success(275)
[ "def", "sys_splice", "(", "self", ",", "fd_in", ",", "off_in", ",", "fd_out", ",", "off_out", ",", "len", ",", "flags", ")", "->", "int", ":", "return", "self", ".", "complicated_success", "(", "275", ")" ]
https://github.com/trailofbits/manticore/blob/b050fdf0939f6c63f503cdf87ec0ab159dd41159/manticore/platforms/linux_syscall_stubs.py#L1017-L1019
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/ntpath.py
python
isabs
(s)
return s != '' and s[:1] in '/\\'
Test whether a path is absolute
Test whether a path is absolute
[ "Test", "whether", "a", "path", "is", "absolute" ]
def isabs(s): """Test whether a path is absolute""" s = splitdrive(s)[1] return s != '' and s[:1] in '/\\'
[ "def", "isabs", "(", "s", ")", ":", "s", "=", "splitdrive", "(", "s", ")", "[", "1", "]", "return", "s", "!=", "''", "and", "s", "[", ":", "1", "]", "in", "'/\\\\'" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/ntpath.py#L55-L58
gentoo/portage
e5be73709b1a42b40380fd336f9381452b01a723
lib/portage/util/socks5.py
python
ProxyManager.stop
(self)
Stop the SOCKSv5 server.
Stop the SOCKSv5 server.
[ "Stop", "the", "SOCKSv5", "server", "." ]
def stop(self): """ Stop the SOCKSv5 server. """ for p in self._pids: os.kill(p, signal.SIGINT) os.waitpid(p, 0) self.socket_path = None self._pids = []
[ "def", "stop", "(", "self", ")", ":", "for", "p", "in", "self", ".", "_pids", ":", "os", ".", "kill", "(", "p", ",", "signal", ".", "SIGINT", ")", "os", ".", "waitpid", "(", "p", ",", "0", ")", "self", ".", "socket_path", "=", "None", "self", ".", "_pids", "=", "[", "]" ]
https://github.com/gentoo/portage/blob/e5be73709b1a42b40380fd336f9381452b01a723/lib/portage/util/socks5.py#L60-L69
tornadoweb/tornado
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
tornado/ioloop.py
python
IOLoop.call_later
( self, delay: float, callback: Callable, *args: Any, **kwargs: Any )
return self.call_at(self.time() + delay, callback, *args, **kwargs)
Runs the ``callback`` after ``delay`` seconds have passed. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0
Runs the ``callback`` after ``delay`` seconds have passed.
[ "Runs", "the", "callback", "after", "delay", "seconds", "have", "passed", "." ]
def call_later( self, delay: float, callback: Callable, *args: Any, **kwargs: Any ) -> object: """Runs the ``callback`` after ``delay`` seconds have passed. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.call_at(self.time() + delay, callback, *args, **kwargs)
[ "def", "call_later", "(", "self", ",", "delay", ":", "float", ",", "callback", ":", "Callable", ",", "*", "args", ":", "Any", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "object", ":", "return", "self", ".", "call_at", "(", "self", ".", "time", "(", ")", "+", "delay", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/tornadoweb/tornado/blob/208672f3bf6cbb7e37f54c356e02a71ca29f1e02/tornado/ioloop.py#L588-L601
GNS3/gns3-server
aff06572d4173df945ad29ea8feb274f7885d9e4
gns3server/compute/qemu/utils/qcow2.py
python
Qcow2.backing_file
(self)
return path
When using linked clone this will return the path to the base image :returns: None if it's not a linked clone, the path otherwise
When using linked clone this will return the path to the base image
[ "When", "using", "linked", "clone", "this", "will", "return", "the", "path", "to", "the", "base", "image" ]
def backing_file(self): """ When using linked clone this will return the path to the base image :returns: None if it's not a linked clone, the path otherwise """ with open(self._path, 'rb') as f: f.seek(self.backing_file_offset) content = f.read(self.backing_file_size) path = content.decode() if len(path) == 0: return None return path
[ "def", "backing_file", "(", "self", ")", ":", "with", "open", "(", "self", ".", "_path", ",", "'rb'", ")", "as", "f", ":", "f", ".", "seek", "(", "self", ".", "backing_file_offset", ")", "content", "=", "f", ".", "read", "(", "self", ".", "backing_file_size", ")", "path", "=", "content", ".", "decode", "(", ")", "if", "len", "(", "path", ")", "==", "0", ":", "return", "None", "return", "path" ]
https://github.com/GNS3/gns3-server/blob/aff06572d4173df945ad29ea8feb274f7885d9e4/gns3server/compute/qemu/utils/qcow2.py#L76-L90
git-cola/git-cola
b48b8028e0c3baf47faf7b074b9773737358163d
cola/widgets/diff.py
python
Viewer.render
(self)
[]
def render(self): # Update images if self.pixmaps: mode = self.options.image_mode.currentIndex() if mode == self.options.SIDE_BY_SIDE: image = self.render_side_by_side() elif mode == self.options.DIFF: image = self.render_diff() elif mode == self.options.XOR: image = self.render_xor() elif mode == self.options.PIXEL_XOR: image = self.render_pixel_xor() else: image = self.render_side_by_side() else: image = QtGui.QPixmap() self.image.pixmap = image # Apply zoom zoom_mode = self.options.zoom_mode.currentIndex() zoom_factor = self.options.zoom_factors[zoom_mode][1] if zoom_factor > 0.0: self.image.resetTransform() self.image.scale(zoom_factor, zoom_factor) poly = self.image.mapToScene(self.image.viewport().rect()) self.image.last_scene_roi = poly.boundingRect()
[ "def", "render", "(", "self", ")", ":", "# Update images", "if", "self", ".", "pixmaps", ":", "mode", "=", "self", ".", "options", ".", "image_mode", ".", "currentIndex", "(", ")", "if", "mode", "==", "self", ".", "options", ".", "SIDE_BY_SIDE", ":", "image", "=", "self", ".", "render_side_by_side", "(", ")", "elif", "mode", "==", "self", ".", "options", ".", "DIFF", ":", "image", "=", "self", ".", "render_diff", "(", ")", "elif", "mode", "==", "self", ".", "options", ".", "XOR", ":", "image", "=", "self", ".", "render_xor", "(", ")", "elif", "mode", "==", "self", ".", "options", ".", "PIXEL_XOR", ":", "image", "=", "self", ".", "render_pixel_xor", "(", ")", "else", ":", "image", "=", "self", ".", "render_side_by_side", "(", ")", "else", ":", "image", "=", "QtGui", ".", "QPixmap", "(", ")", "self", ".", "image", ".", "pixmap", "=", "image", "# Apply zoom", "zoom_mode", "=", "self", ".", "options", ".", "zoom_mode", ".", "currentIndex", "(", ")", "zoom_factor", "=", "self", ".", "options", ".", "zoom_factors", "[", "zoom_mode", "]", "[", "1", "]", "if", "zoom_factor", ">", "0.0", ":", "self", ".", "image", ".", "resetTransform", "(", ")", "self", ".", "image", ".", "scale", "(", "zoom_factor", ",", "zoom_factor", ")", "poly", "=", "self", ".", "image", ".", "mapToScene", "(", "self", ".", "image", ".", "viewport", "(", ")", ".", "rect", "(", ")", ")", "self", ".", "image", ".", "last_scene_roi", "=", "poly", ".", "boundingRect", "(", ")" ]
https://github.com/git-cola/git-cola/blob/b48b8028e0c3baf47faf7b074b9773737358163d/cola/widgets/diff.py#L483-L508
python-diamond/Diamond
7000e16cfdf4508ed9291fc4b3800592557b2431
src/diamond/handler/tsdb.py
python
TSDBHandler.process
(self, metric)
Process a metric by sending it to TSDB
Process a metric by sending it to TSDB
[ "Process", "a", "metric", "by", "sending", "it", "to", "TSDB" ]
def process(self, metric): """ Process a metric by sending it to TSDB """ entry = {'timestamp': metric.timestamp, 'value': metric.value, "tags": {}} entry["tags"]["hostname"] = metric.host if self.cleanMetrics: metric = MetricWrapper(metric, self.log) if self.skipAggregates and metric.isAggregate(): return for tagKey in metric.getTags(): entry["tags"][tagKey] = metric.getTags()[tagKey] entry['metric'] = (self.prefix + metric.getCollectorPath() + '.' + metric.getMetricPath()) for [key, value] in self.tags: entry["tags"][key] = value self.entrys.append(entry) # send data if list is long enough if (len(self.entrys) >= self.batch): # Compress data if self.compression >= 1: data = StringIO.StringIO() with contextlib.closing(gzip.GzipFile(fileobj=data, compresslevel=self.compression, mode="w")) as f: f.write(json.dumps(self.entrys)) self._send(data.getvalue()) else: # no compression data = json.dumps(self.entrys) self._send(data)
[ "def", "process", "(", "self", ",", "metric", ")", ":", "entry", "=", "{", "'timestamp'", ":", "metric", ".", "timestamp", ",", "'value'", ":", "metric", ".", "value", ",", "\"tags\"", ":", "{", "}", "}", "entry", "[", "\"tags\"", "]", "[", "\"hostname\"", "]", "=", "metric", ".", "host", "if", "self", ".", "cleanMetrics", ":", "metric", "=", "MetricWrapper", "(", "metric", ",", "self", ".", "log", ")", "if", "self", ".", "skipAggregates", "and", "metric", ".", "isAggregate", "(", ")", ":", "return", "for", "tagKey", "in", "metric", ".", "getTags", "(", ")", ":", "entry", "[", "\"tags\"", "]", "[", "tagKey", "]", "=", "metric", ".", "getTags", "(", ")", "[", "tagKey", "]", "entry", "[", "'metric'", "]", "=", "(", "self", ".", "prefix", "+", "metric", ".", "getCollectorPath", "(", ")", "+", "'.'", "+", "metric", ".", "getMetricPath", "(", ")", ")", "for", "[", "key", ",", "value", "]", "in", "self", ".", "tags", ":", "entry", "[", "\"tags\"", "]", "[", "key", "]", "=", "value", "self", ".", "entrys", ".", "append", "(", "entry", ")", "# send data if list is long enough", "if", "(", "len", "(", "self", ".", "entrys", ")", ">=", "self", ".", "batch", ")", ":", "# Compress data", "if", "self", ".", "compression", ">=", "1", ":", "data", "=", "StringIO", ".", "StringIO", "(", ")", "with", "contextlib", ".", "closing", "(", "gzip", ".", "GzipFile", "(", "fileobj", "=", "data", ",", "compresslevel", "=", "self", ".", "compression", ",", "mode", "=", "\"w\"", ")", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "self", ".", "entrys", ")", ")", "self", ".", "_send", "(", "data", ".", "getvalue", "(", ")", ")", "else", ":", "# no compression", "data", "=", "json", ".", "dumps", "(", "self", ".", "entrys", ")", "self", ".", "_send", "(", "data", ")" ]
https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/diamond/handler/tsdb.py#L189-L225
daoluan/decode-Django
d46a858b45b56de48b0355f50dd9e45402d04cfd
Django-1.5.1/django/template/loaders/eggs.py
python
Loader.load_template_source
(self, template_name, template_dirs=None)
Loads templates from Python eggs via pkg_resource.resource_string. For every installed app, it tries to get the resource (app, template_name).
Loads templates from Python eggs via pkg_resource.resource_string.
[ "Loads", "templates", "from", "Python", "eggs", "via", "pkg_resource", ".", "resource_string", "." ]
def load_template_source(self, template_name, template_dirs=None): """ Loads templates from Python eggs via pkg_resource.resource_string. For every installed app, it tries to get the resource (app, template_name). """ if resource_string is not None: pkg_name = 'templates/' + template_name for app in settings.INSTALLED_APPS: try: resource = resource_string(app, pkg_name) except Exception: continue if not six.PY3: resource = resource.decode(settings.FILE_CHARSET) return (resource, 'egg:%s:%s' % (app, pkg_name)) raise TemplateDoesNotExist(template_name)
[ "def", "load_template_source", "(", "self", ",", "template_name", ",", "template_dirs", "=", "None", ")", ":", "if", "resource_string", "is", "not", "None", ":", "pkg_name", "=", "'templates/'", "+", "template_name", "for", "app", "in", "settings", ".", "INSTALLED_APPS", ":", "try", ":", "resource", "=", "resource_string", "(", "app", ",", "pkg_name", ")", "except", "Exception", ":", "continue", "if", "not", "six", ".", "PY3", ":", "resource", "=", "resource", ".", "decode", "(", "settings", ".", "FILE_CHARSET", ")", "return", "(", "resource", ",", "'egg:%s:%s'", "%", "(", "app", ",", "pkg_name", ")", ")", "raise", "TemplateDoesNotExist", "(", "template_name", ")" ]
https://github.com/daoluan/decode-Django/blob/d46a858b45b56de48b0355f50dd9e45402d04cfd/Django-1.5.1/django/template/loaders/eggs.py#L17-L33
IvanFon/xinput-gui
900f4dc0fe8020eb106881cf7e489862f532b9a1
xinput_gui/xinput/xinput.py
python
Xinput.get_device_by_id
(self, id_: int)
return None
Get a device by it's ID. Args: id_: xinput device ID.
Get a device by it's ID.
[ "Get", "a", "device", "by", "it", "s", "ID", "." ]
def get_device_by_id(self, id_: int) -> Device: '''Get a device by it's ID. Args: id_: xinput device ID. ''' for device in self.devices: if int(device.id) == id_: return device return None
[ "def", "get_device_by_id", "(", "self", ",", "id_", ":", "int", ")", "->", "Device", ":", "for", "device", "in", "self", ".", "devices", ":", "if", "int", "(", "device", ".", "id", ")", "==", "id_", ":", "return", "device", "return", "None" ]
https://github.com/IvanFon/xinput-gui/blob/900f4dc0fe8020eb106881cf7e489862f532b9a1/xinput_gui/xinput/xinput.py#L97-L108
binaryage/drydrop
2f27e15befd247255d89f9120eeee44851b82c4a
dryapp/jinja2/lexer.py
python
TokenStream.close
(self)
Close the stream.
Close the stream.
[ "Close", "the", "stream", "." ]
def close(self): """Close the stream.""" self.current = Token(self.current.lineno, 'eof', '') self._next = None self.closed = True
[ "def", "close", "(", "self", ")", ":", "self", ".", "current", "=", "Token", "(", "self", ".", "current", ".", "lineno", ",", "'eof'", ",", "''", ")", "self", ".", "_next", "=", "None", "self", ".", "closed", "=", "True" ]
https://github.com/binaryage/drydrop/blob/2f27e15befd247255d89f9120eeee44851b82c4a/dryapp/jinja2/lexer.py#L220-L224
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/pandas/io/stata.py
python
StataWriter._prepare_categoricals
(self, data)
return DataFrame.from_items(data_formatted)
Check for categorical columns, retain categorical information for Stata file and convert categorical data to int
Check for categorical columns, retain categorical information for Stata file and convert categorical data to int
[ "Check", "for", "categorical", "columns", "retain", "categorical", "information", "for", "Stata", "file", "and", "convert", "categorical", "data", "to", "int" ]
def _prepare_categoricals(self, data): """Check for categorical columns, retain categorical information for Stata file and convert categorical data to int""" is_cat = [is_categorical_dtype(data[col]) for col in data] self._is_col_cat = is_cat self._value_labels = [] if not any(is_cat): return data get_base_missing_value = StataMissingValue.get_base_missing_value data_formatted = [] for col, col_is_cat in zip(data, is_cat): if col_is_cat: self._value_labels.append(StataValueLabel(data[col])) dtype = data[col].cat.codes.dtype if dtype == np.int64: raise ValueError('It is not possible to export ' 'int64-based categorical data to Stata.') values = data[col].cat.codes.values.copy() # Upcast if needed so that correct missing values can be set if values.max() >= get_base_missing_value(dtype): if dtype == np.int8: dtype = np.int16 elif dtype == np.int16: dtype = np.int32 else: dtype = np.float64 values = np.array(values, dtype=dtype) # Replace missing values with Stata missing value for type values[values == -1] = get_base_missing_value(dtype) data_formatted.append((col, values)) else: data_formatted.append((col, data[col])) return DataFrame.from_items(data_formatted)
[ "def", "_prepare_categoricals", "(", "self", ",", "data", ")", ":", "is_cat", "=", "[", "is_categorical_dtype", "(", "data", "[", "col", "]", ")", "for", "col", "in", "data", "]", "self", ".", "_is_col_cat", "=", "is_cat", "self", ".", "_value_labels", "=", "[", "]", "if", "not", "any", "(", "is_cat", ")", ":", "return", "data", "get_base_missing_value", "=", "StataMissingValue", ".", "get_base_missing_value", "data_formatted", "=", "[", "]", "for", "col", ",", "col_is_cat", "in", "zip", "(", "data", ",", "is_cat", ")", ":", "if", "col_is_cat", ":", "self", ".", "_value_labels", ".", "append", "(", "StataValueLabel", "(", "data", "[", "col", "]", ")", ")", "dtype", "=", "data", "[", "col", "]", ".", "cat", ".", "codes", ".", "dtype", "if", "dtype", "==", "np", ".", "int64", ":", "raise", "ValueError", "(", "'It is not possible to export '", "'int64-based categorical data to Stata.'", ")", "values", "=", "data", "[", "col", "]", ".", "cat", ".", "codes", ".", "values", ".", "copy", "(", ")", "# Upcast if needed so that correct missing values can be set", "if", "values", ".", "max", "(", ")", ">=", "get_base_missing_value", "(", "dtype", ")", ":", "if", "dtype", "==", "np", ".", "int8", ":", "dtype", "=", "np", ".", "int16", "elif", "dtype", "==", "np", ".", "int16", ":", "dtype", "=", "np", ".", "int32", "else", ":", "dtype", "=", "np", ".", "float64", "values", "=", "np", ".", "array", "(", "values", ",", "dtype", "=", "dtype", ")", "# Replace missing values with Stata missing value for type", "values", "[", "values", "==", "-", "1", "]", "=", "get_base_missing_value", "(", "dtype", ")", "data_formatted", ".", "append", "(", "(", "col", ",", "values", ")", ")", "else", ":", "data_formatted", ".", "append", "(", "(", "col", ",", "data", "[", "col", "]", ")", ")", "return", "DataFrame", ".", "from_items", "(", "data_formatted", ")" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/pandas/io/stata.py#L1964-L2000
khalim19/gimp-plugin-export-layers
b37255f2957ad322f4d332689052351cdea6e563
export_layers/pygimplib/_lib/future/future/backports/http/cookiejar.py
python
DefaultCookiePolicy.set_ok_port
(self, cookie, request)
return True
[]
def set_ok_port(self, cookie, request): if cookie.port_specified: req_port = request_port(request) if req_port is None: req_port = "80" else: req_port = str(req_port) for p in cookie.port.split(","): try: int(p) except ValueError: _debug(" bad port %s (not numeric)", p) return False if p == req_port: break else: _debug(" request port (%s) not found in %s", req_port, cookie.port) return False return True
[ "def", "set_ok_port", "(", "self", ",", "cookie", ",", "request", ")", ":", "if", "cookie", ".", "port_specified", ":", "req_port", "=", "request_port", "(", "request", ")", "if", "req_port", "is", "None", ":", "req_port", "=", "\"80\"", "else", ":", "req_port", "=", "str", "(", "req_port", ")", "for", "p", "in", "cookie", ".", "port", ".", "split", "(", "\",\"", ")", ":", "try", ":", "int", "(", "p", ")", "except", "ValueError", ":", "_debug", "(", "\" bad port %s (not numeric)\"", ",", "p", ")", "return", "False", "if", "p", "==", "req_port", ":", "break", "else", ":", "_debug", "(", "\" request port (%s) not found in %s\"", ",", "req_port", ",", "cookie", ".", "port", ")", "return", "False", "return", "True" ]
https://github.com/khalim19/gimp-plugin-export-layers/blob/b37255f2957ad322f4d332689052351cdea6e563/export_layers/pygimplib/_lib/future/future/backports/http/cookiejar.py#L1059-L1078
cylc/cylc-flow
5ec221143476c7c616c156b74158edfbcd83794a
cylc/flow/config.py
python
WorkflowConfig._close_families
(l_id, r_id, clf_map)
return lret, rret
Turn (name, point) to 'name.point' for edge. Replace close family members with family nodes if relevant.
Turn (name, point) to 'name.point' for edge.
[ "Turn", "(", "name", "point", ")", "to", "name", ".", "point", "for", "edge", "." ]
def _close_families(l_id, r_id, clf_map): """Turn (name, point) to 'name.point' for edge. Replace close family members with family nodes if relevant. """ lret = None lname, lpoint = None, None if l_id: lname, lpoint = l_id lret = TaskID.get(lname, lpoint) rret = None rname, rpoint = None, None if r_id: rname, rpoint = r_id rret = TaskID.get(rname, rpoint) for fam_name, fam_members in clf_map.items(): if lname in fam_members and rname in fam_members: # l and r are both members lret = TaskID.get(fam_name, lpoint) rret = TaskID.get(fam_name, rpoint) break elif lname in fam_members: # l is a member lret = TaskID.get(fam_name, lpoint) elif rname in fam_members: # r is a member rret = TaskID.get(fam_name, rpoint) return lret, rret
[ "def", "_close_families", "(", "l_id", ",", "r_id", ",", "clf_map", ")", ":", "lret", "=", "None", "lname", ",", "lpoint", "=", "None", ",", "None", "if", "l_id", ":", "lname", ",", "lpoint", "=", "l_id", "lret", "=", "TaskID", ".", "get", "(", "lname", ",", "lpoint", ")", "rret", "=", "None", "rname", ",", "rpoint", "=", "None", ",", "None", "if", "r_id", ":", "rname", ",", "rpoint", "=", "r_id", "rret", "=", "TaskID", ".", "get", "(", "rname", ",", "rpoint", ")", "for", "fam_name", ",", "fam_members", "in", "clf_map", ".", "items", "(", ")", ":", "if", "lname", "in", "fam_members", "and", "rname", "in", "fam_members", ":", "# l and r are both members", "lret", "=", "TaskID", ".", "get", "(", "fam_name", ",", "lpoint", ")", "rret", "=", "TaskID", ".", "get", "(", "fam_name", ",", "rpoint", ")", "break", "elif", "lname", "in", "fam_members", ":", "# l is a member", "lret", "=", "TaskID", ".", "get", "(", "fam_name", ",", "lpoint", ")", "elif", "rname", "in", "fam_members", ":", "# r is a member", "rret", "=", "TaskID", ".", "get", "(", "fam_name", ",", "rpoint", ")", "return", "lret", ",", "rret" ]
https://github.com/cylc/cylc-flow/blob/5ec221143476c7c616c156b74158edfbcd83794a/cylc/flow/config.py#L1866-L1895
MrH0wl/Cloudmare
65e5bc9888f9d362ab2abfb103ea6c1e869d67aa
thirdparty/xlsxwriter/worksheet.py
python
Worksheet.add_table
(self, first_row, first_col, last_row, last_col, options=None)
return table
Add an Excel table to a worksheet. Args: first_row: The first row of the cell range. (zero indexed). first_col: The first column of the cell range. last_row: The last row of the cell range. (zero indexed). last_col: The last column of the cell range. options: Table format options. (Optional) Returns: 0: Success. -1: Not supported in constant_memory mode. -2: Row or column is out of worksheet bounds. -3: Incorrect parameter or option.
Add an Excel table to a worksheet.
[ "Add", "an", "Excel", "table", "to", "a", "worksheet", "." ]
def add_table(self, first_row, first_col, last_row, last_col, options=None): """ Add an Excel table to a worksheet. Args: first_row: The first row of the cell range. (zero indexed). first_col: The first column of the cell range. last_row: The last row of the cell range. (zero indexed). last_col: The last column of the cell range. options: Table format options. (Optional) Returns: 0: Success. -1: Not supported in constant_memory mode. -2: Row or column is out of worksheet bounds. -3: Incorrect parameter or option. """ table = {} col_formats = {} if options is None: options = {} else: # Copy the user defined options so they aren't modified. options = options.copy() if self.constant_memory: warn("add_table() isn't supported in 'constant_memory' mode") return -1 # Check that row and col are valid without storing the values. if self._check_dimensions(first_row, first_col, True, True): return -2 if self._check_dimensions(last_row, last_col, True, True): return -2 # Swap last row/col for first row/col as necessary. if first_row > last_row: (first_row, last_row) = (last_row, first_row) if first_col > last_col: (first_col, last_col) = (last_col, first_col) # Valid input parameters. valid_parameter = { 'autofilter': True, 'banded_columns': True, 'banded_rows': True, 'columns': True, 'data': True, 'first_column': True, 'header_row': True, 'last_column': True, 'name': True, 'style': True, 'total_row': True, } # Check for valid input parameters. for param_key in options.keys(): if param_key not in valid_parameter: warn("Unknown parameter '%s' in add_table()" % param_key) return -3 # Turn on Excel's defaults. options['banded_rows'] = options.get('banded_rows', True) options['header_row'] = options.get('header_row', True) options['autofilter'] = options.get('autofilter', True) # Check that there are enough rows. num_rows = last_row - first_row if options['header_row']: num_rows -= 1 if num_rows < 0: warn("Must have at least one data row in in add_table()") return -3 # Set the table options. table['show_first_col'] = options.get('first_column', False) table['show_last_col'] = options.get('last_column', False) table['show_row_stripes'] = options.get('banded_rows', False) table['show_col_stripes'] = options.get('banded_columns', False) table['header_row_count'] = options.get('header_row', 0) table['totals_row_shown'] = options.get('total_row', False) # Set the table name. if 'name' in options: name = options['name'] table['name'] = name if ' ' in name: warn("Name '%s' in add_table() cannot contain spaces" % force_unicode(name)) return -3 # Warn if the name contains invalid chars as defined by Excel. if (not re.match(r'^[\w\\][\w\\.]*$', name, re.UNICODE) or re.match(r'^\d', name)): warn("Invalid Excel characters in add_table(): '%s'" % force_unicode(name)) return -1 # Warn if the name looks like a cell name. if re.match(r'^[a-zA-Z][a-zA-Z]?[a-dA-D]?[0-9]+$', name): warn("Name looks like a cell name in add_table(): '%s'" % force_unicode(name)) return -1 # Warn if the name looks like a R1C1 cell reference. if (re.match(r'^[rcRC]$', name) or re.match(r'^[rcRC]\d+[rcRC]\d+$', name)): warn("Invalid name '%s' like a RC cell ref in add_table()" % force_unicode(name)) return -1 # Set the table style. if 'style' in options: table['style'] = options['style'] if table['style'] is None: table['style'] = '' # Remove whitespace from style name. table['style'] = table['style'].replace(' ', '') else: table['style'] = "TableStyleMedium9" # Set the data range rows (without the header and footer). first_data_row = first_row last_data_row = last_row if options.get('header_row'): first_data_row += 1 if options.get('total_row'): last_data_row -= 1 # Set the table and autofilter ranges. table['range'] = xl_range(first_row, first_col, last_row, last_col) table['a_range'] = xl_range(first_row, first_col, last_data_row, last_col) # If the header row if off the default is to turn autofilter off. if not options['header_row']: options['autofilter'] = 0 # Set the autofilter range. if options['autofilter']: table['autofilter'] = table['a_range'] # Add the table columns. col_id = 1 table['columns'] = [] seen_names = {} for col_num in range(first_col, last_col + 1): # Set up the default column data. col_data = { 'id': col_id, 'name': 'Column' + str(col_id), 'total_string': '', 'total_function': '', 'total_value': 0, 'formula': '', 'format': None, 'name_format': None, } # Overwrite the defaults with any user defined values. if 'columns' in options: # Check if there are user defined values for this column. if col_id <= len(options['columns']): user_data = options['columns'][col_id - 1] else: user_data = None if user_data: # Get the column format. xformat = user_data.get('format', None) # Map user defined values to internal values. if user_data.get('header'): col_data['name'] = user_data['header'] # Excel requires unique case insensitive header names. header_name = col_data['name'] name = header_name.lower() if name in seen_names: warn("Duplicate header name in add_table(): '%s'" % force_unicode(name)) return -1 else: seen_names[name] = True col_data['name_format'] = user_data.get('header_format') # Handle the column formula. if 'formula' in user_data and user_data['formula']: formula = user_data['formula'] # Remove the formula '=' sign if it exists. if formula.startswith('='): formula = formula.lstrip('=') # Covert Excel 2010 "@" ref to 2007 "#This Row". formula = formula.replace('@', '[#This Row],') col_data['formula'] = formula for row in range(first_data_row, last_data_row + 1): self._write_formula(row, col_num, formula, xformat) # Handle the function for the total row. if user_data.get('total_function'): function = user_data['total_function'] # Massage the function name. function = function.lower() function = function.replace('_', '') function = function.replace(' ', '') if function == 'countnums': function = 'countNums' if function == 'stddev': function = 'stdDev' col_data['total_function'] = function formula = \ self._table_function_to_formula(function, col_data['name']) value = user_data.get('total_value', 0) self._write_formula(last_row, col_num, formula, xformat, value) elif user_data.get('total_string'): # Total label only (not a function). total_string = user_data['total_string'] col_data['total_string'] = total_string self._write_string(last_row, col_num, total_string, user_data.get('format')) # Get the dxf format index. if xformat is not None: col_data['format'] = xformat._get_dxf_index() # Store the column format for writing the cell data. # It doesn't matter if it is undefined. col_formats[col_id - 1] = xformat # Store the column data. table['columns'].append(col_data) # Write the column headers to the worksheet. if options['header_row']: self._write_string(first_row, col_num, col_data['name'], col_data['name_format']) col_id += 1 # Write the cell data if supplied. if 'data' in options: data = options['data'] i = 0 # For indexing the row data. for row in range(first_data_row, last_data_row + 1): j = 0 # For indexing the col data. for col in range(first_col, last_col + 1): if i < len(data) and j < len(data[i]): token = data[i][j] if j in col_formats: self._write(row, col, token, col_formats[j]) else: self._write(row, col, token, None) j += 1 i += 1 # Store the table data. self.tables.append(table) return table
[ "def", "add_table", "(", "self", ",", "first_row", ",", "first_col", ",", "last_row", ",", "last_col", ",", "options", "=", "None", ")", ":", "table", "=", "{", "}", "col_formats", "=", "{", "}", "if", "options", "is", "None", ":", "options", "=", "{", "}", "else", ":", "# Copy the user defined options so they aren't modified.", "options", "=", "options", ".", "copy", "(", ")", "if", "self", ".", "constant_memory", ":", "warn", "(", "\"add_table() isn't supported in 'constant_memory' mode\"", ")", "return", "-", "1", "# Check that row and col are valid without storing the values.", "if", "self", ".", "_check_dimensions", "(", "first_row", ",", "first_col", ",", "True", ",", "True", ")", ":", "return", "-", "2", "if", "self", ".", "_check_dimensions", "(", "last_row", ",", "last_col", ",", "True", ",", "True", ")", ":", "return", "-", "2", "# Swap last row/col for first row/col as necessary.", "if", "first_row", ">", "last_row", ":", "(", "first_row", ",", "last_row", ")", "=", "(", "last_row", ",", "first_row", ")", "if", "first_col", ">", "last_col", ":", "(", "first_col", ",", "last_col", ")", "=", "(", "last_col", ",", "first_col", ")", "# Valid input parameters.", "valid_parameter", "=", "{", "'autofilter'", ":", "True", ",", "'banded_columns'", ":", "True", ",", "'banded_rows'", ":", "True", ",", "'columns'", ":", "True", ",", "'data'", ":", "True", ",", "'first_column'", ":", "True", ",", "'header_row'", ":", "True", ",", "'last_column'", ":", "True", ",", "'name'", ":", "True", ",", "'style'", ":", "True", ",", "'total_row'", ":", "True", ",", "}", "# Check for valid input parameters.", "for", "param_key", "in", "options", ".", "keys", "(", ")", ":", "if", "param_key", "not", "in", "valid_parameter", ":", "warn", "(", "\"Unknown parameter '%s' in add_table()\"", "%", "param_key", ")", "return", "-", "3", "# Turn on Excel's defaults.", "options", "[", "'banded_rows'", "]", "=", "options", ".", "get", "(", "'banded_rows'", ",", "True", ")", "options", "[", "'header_row'", "]", "=", "options", ".", "get", "(", "'header_row'", ",", "True", ")", "options", "[", "'autofilter'", "]", "=", "options", ".", "get", "(", "'autofilter'", ",", "True", ")", "# Check that there are enough rows.", "num_rows", "=", "last_row", "-", "first_row", "if", "options", "[", "'header_row'", "]", ":", "num_rows", "-=", "1", "if", "num_rows", "<", "0", ":", "warn", "(", "\"Must have at least one data row in in add_table()\"", ")", "return", "-", "3", "# Set the table options.", "table", "[", "'show_first_col'", "]", "=", "options", ".", "get", "(", "'first_column'", ",", "False", ")", "table", "[", "'show_last_col'", "]", "=", "options", ".", "get", "(", "'last_column'", ",", "False", ")", "table", "[", "'show_row_stripes'", "]", "=", "options", ".", "get", "(", "'banded_rows'", ",", "False", ")", "table", "[", "'show_col_stripes'", "]", "=", "options", ".", "get", "(", "'banded_columns'", ",", "False", ")", "table", "[", "'header_row_count'", "]", "=", "options", ".", "get", "(", "'header_row'", ",", "0", ")", "table", "[", "'totals_row_shown'", "]", "=", "options", ".", "get", "(", "'total_row'", ",", "False", ")", "# Set the table name.", "if", "'name'", "in", "options", ":", "name", "=", "options", "[", "'name'", "]", "table", "[", "'name'", "]", "=", "name", "if", "' '", "in", "name", ":", "warn", "(", "\"Name '%s' in add_table() cannot contain spaces\"", "%", "force_unicode", "(", "name", ")", ")", "return", "-", "3", "# Warn if the name contains invalid chars as defined by Excel.", "if", "(", "not", "re", ".", "match", "(", "r'^[\\w\\\\][\\w\\\\.]*$'", ",", "name", ",", "re", ".", "UNICODE", ")", "or", "re", ".", "match", "(", "r'^\\d'", ",", "name", ")", ")", ":", "warn", "(", "\"Invalid Excel characters in add_table(): '%s'\"", "%", "force_unicode", "(", "name", ")", ")", "return", "-", "1", "# Warn if the name looks like a cell name.", "if", "re", ".", "match", "(", "r'^[a-zA-Z][a-zA-Z]?[a-dA-D]?[0-9]+$'", ",", "name", ")", ":", "warn", "(", "\"Name looks like a cell name in add_table(): '%s'\"", "%", "force_unicode", "(", "name", ")", ")", "return", "-", "1", "# Warn if the name looks like a R1C1 cell reference.", "if", "(", "re", ".", "match", "(", "r'^[rcRC]$'", ",", "name", ")", "or", "re", ".", "match", "(", "r'^[rcRC]\\d+[rcRC]\\d+$'", ",", "name", ")", ")", ":", "warn", "(", "\"Invalid name '%s' like a RC cell ref in add_table()\"", "%", "force_unicode", "(", "name", ")", ")", "return", "-", "1", "# Set the table style.", "if", "'style'", "in", "options", ":", "table", "[", "'style'", "]", "=", "options", "[", "'style'", "]", "if", "table", "[", "'style'", "]", "is", "None", ":", "table", "[", "'style'", "]", "=", "''", "# Remove whitespace from style name.", "table", "[", "'style'", "]", "=", "table", "[", "'style'", "]", ".", "replace", "(", "' '", ",", "''", ")", "else", ":", "table", "[", "'style'", "]", "=", "\"TableStyleMedium9\"", "# Set the data range rows (without the header and footer).", "first_data_row", "=", "first_row", "last_data_row", "=", "last_row", "if", "options", ".", "get", "(", "'header_row'", ")", ":", "first_data_row", "+=", "1", "if", "options", ".", "get", "(", "'total_row'", ")", ":", "last_data_row", "-=", "1", "# Set the table and autofilter ranges.", "table", "[", "'range'", "]", "=", "xl_range", "(", "first_row", ",", "first_col", ",", "last_row", ",", "last_col", ")", "table", "[", "'a_range'", "]", "=", "xl_range", "(", "first_row", ",", "first_col", ",", "last_data_row", ",", "last_col", ")", "# If the header row if off the default is to turn autofilter off.", "if", "not", "options", "[", "'header_row'", "]", ":", "options", "[", "'autofilter'", "]", "=", "0", "# Set the autofilter range.", "if", "options", "[", "'autofilter'", "]", ":", "table", "[", "'autofilter'", "]", "=", "table", "[", "'a_range'", "]", "# Add the table columns.", "col_id", "=", "1", "table", "[", "'columns'", "]", "=", "[", "]", "seen_names", "=", "{", "}", "for", "col_num", "in", "range", "(", "first_col", ",", "last_col", "+", "1", ")", ":", "# Set up the default column data.", "col_data", "=", "{", "'id'", ":", "col_id", ",", "'name'", ":", "'Column'", "+", "str", "(", "col_id", ")", ",", "'total_string'", ":", "''", ",", "'total_function'", ":", "''", ",", "'total_value'", ":", "0", ",", "'formula'", ":", "''", ",", "'format'", ":", "None", ",", "'name_format'", ":", "None", ",", "}", "# Overwrite the defaults with any user defined values.", "if", "'columns'", "in", "options", ":", "# Check if there are user defined values for this column.", "if", "col_id", "<=", "len", "(", "options", "[", "'columns'", "]", ")", ":", "user_data", "=", "options", "[", "'columns'", "]", "[", "col_id", "-", "1", "]", "else", ":", "user_data", "=", "None", "if", "user_data", ":", "# Get the column format.", "xformat", "=", "user_data", ".", "get", "(", "'format'", ",", "None", ")", "# Map user defined values to internal values.", "if", "user_data", ".", "get", "(", "'header'", ")", ":", "col_data", "[", "'name'", "]", "=", "user_data", "[", "'header'", "]", "# Excel requires unique case insensitive header names.", "header_name", "=", "col_data", "[", "'name'", "]", "name", "=", "header_name", ".", "lower", "(", ")", "if", "name", "in", "seen_names", ":", "warn", "(", "\"Duplicate header name in add_table(): '%s'\"", "%", "force_unicode", "(", "name", ")", ")", "return", "-", "1", "else", ":", "seen_names", "[", "name", "]", "=", "True", "col_data", "[", "'name_format'", "]", "=", "user_data", ".", "get", "(", "'header_format'", ")", "# Handle the column formula.", "if", "'formula'", "in", "user_data", "and", "user_data", "[", "'formula'", "]", ":", "formula", "=", "user_data", "[", "'formula'", "]", "# Remove the formula '=' sign if it exists.", "if", "formula", ".", "startswith", "(", "'='", ")", ":", "formula", "=", "formula", ".", "lstrip", "(", "'='", ")", "# Covert Excel 2010 \"@\" ref to 2007 \"#This Row\".", "formula", "=", "formula", ".", "replace", "(", "'@'", ",", "'[#This Row],'", ")", "col_data", "[", "'formula'", "]", "=", "formula", "for", "row", "in", "range", "(", "first_data_row", ",", "last_data_row", "+", "1", ")", ":", "self", ".", "_write_formula", "(", "row", ",", "col_num", ",", "formula", ",", "xformat", ")", "# Handle the function for the total row.", "if", "user_data", ".", "get", "(", "'total_function'", ")", ":", "function", "=", "user_data", "[", "'total_function'", "]", "# Massage the function name.", "function", "=", "function", ".", "lower", "(", ")", "function", "=", "function", ".", "replace", "(", "'_'", ",", "''", ")", "function", "=", "function", ".", "replace", "(", "' '", ",", "''", ")", "if", "function", "==", "'countnums'", ":", "function", "=", "'countNums'", "if", "function", "==", "'stddev'", ":", "function", "=", "'stdDev'", "col_data", "[", "'total_function'", "]", "=", "function", "formula", "=", "self", ".", "_table_function_to_formula", "(", "function", ",", "col_data", "[", "'name'", "]", ")", "value", "=", "user_data", ".", "get", "(", "'total_value'", ",", "0", ")", "self", ".", "_write_formula", "(", "last_row", ",", "col_num", ",", "formula", ",", "xformat", ",", "value", ")", "elif", "user_data", ".", "get", "(", "'total_string'", ")", ":", "# Total label only (not a function).", "total_string", "=", "user_data", "[", "'total_string'", "]", "col_data", "[", "'total_string'", "]", "=", "total_string", "self", ".", "_write_string", "(", "last_row", ",", "col_num", ",", "total_string", ",", "user_data", ".", "get", "(", "'format'", ")", ")", "# Get the dxf format index.", "if", "xformat", "is", "not", "None", ":", "col_data", "[", "'format'", "]", "=", "xformat", ".", "_get_dxf_index", "(", ")", "# Store the column format for writing the cell data.", "# It doesn't matter if it is undefined.", "col_formats", "[", "col_id", "-", "1", "]", "=", "xformat", "# Store the column data.", "table", "[", "'columns'", "]", ".", "append", "(", "col_data", ")", "# Write the column headers to the worksheet.", "if", "options", "[", "'header_row'", "]", ":", "self", ".", "_write_string", "(", "first_row", ",", "col_num", ",", "col_data", "[", "'name'", "]", ",", "col_data", "[", "'name_format'", "]", ")", "col_id", "+=", "1", "# Write the cell data if supplied.", "if", "'data'", "in", "options", ":", "data", "=", "options", "[", "'data'", "]", "i", "=", "0", "# For indexing the row data.", "for", "row", "in", "range", "(", "first_data_row", ",", "last_data_row", "+", "1", ")", ":", "j", "=", "0", "# For indexing the col data.", "for", "col", "in", "range", "(", "first_col", ",", "last_col", "+", "1", ")", ":", "if", "i", "<", "len", "(", "data", ")", "and", "j", "<", "len", "(", "data", "[", "i", "]", ")", ":", "token", "=", "data", "[", "i", "]", "[", "j", "]", "if", "j", "in", "col_formats", ":", "self", ".", "_write", "(", "row", ",", "col", ",", "token", ",", "col_formats", "[", "j", "]", ")", "else", ":", "self", ".", "_write", "(", "row", ",", "col", ",", "token", ",", "None", ")", "j", "+=", "1", "i", "+=", "1", "# Store the table data.", "self", ".", "tables", ".", "append", "(", "table", ")", "return", "table" ]
https://github.com/MrH0wl/Cloudmare/blob/65e5bc9888f9d362ab2abfb103ea6c1e869d67aa/thirdparty/xlsxwriter/worksheet.py#L2515-L2801
catalyst-cooperative/pudl
40d176313e60dfa9d2481f63842ed23f08f1ad5f
src/pudl/transform/eia923.py
python
_map_prime_mover_sets
(prime_mover_set: np.ndarray)
Map unique prime mover combinations to a single prime mover code. In 2001-2019 data, the .value_counts() of the combinations is: (CA, CT) 750 (ST, CA) 101 (ST) 60 (CA) 17 (CS, ST, CT) 2 Args: prime_mover_set (np.ndarray): unique combinations of prime_mover_code Returns: str: single prime mover code
Map unique prime mover combinations to a single prime mover code.
[ "Map", "unique", "prime", "mover", "combinations", "to", "a", "single", "prime", "mover", "code", "." ]
def _map_prime_mover_sets(prime_mover_set: np.ndarray) -> str: """Map unique prime mover combinations to a single prime mover code. In 2001-2019 data, the .value_counts() of the combinations is: (CA, CT) 750 (ST, CA) 101 (ST) 60 (CA) 17 (CS, ST, CT) 2 Args: prime_mover_set (np.ndarray): unique combinations of prime_mover_code Returns: str: single prime mover code """ if len(prime_mover_set) == 1: # single valued return prime_mover_set[0] elif 'CA' in prime_mover_set: return 'CA' # arbitrary choice elif 'CS' in prime_mover_set: return 'CS' else: raise ValueError( "Dataset contains new kinds of duplicate boiler_fuel rows. " f"Prime movers are {prime_mover_set}" )
[ "def", "_map_prime_mover_sets", "(", "prime_mover_set", ":", "np", ".", "ndarray", ")", "->", "str", ":", "if", "len", "(", "prime_mover_set", ")", "==", "1", ":", "# single valued", "return", "prime_mover_set", "[", "0", "]", "elif", "'CA'", "in", "prime_mover_set", ":", "return", "'CA'", "# arbitrary choice", "elif", "'CS'", "in", "prime_mover_set", ":", "return", "'CS'", "else", ":", "raise", "ValueError", "(", "\"Dataset contains new kinds of duplicate boiler_fuel rows. \"", "f\"Prime movers are {prime_mover_set}\"", ")" ]
https://github.com/catalyst-cooperative/pudl/blob/40d176313e60dfa9d2481f63842ed23f08f1ad5f/src/pudl/transform/eia923.py#L703-L728
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/future/backports/datetime.py
python
timezone.fromutc
(self, dt)
[]
def fromutc(self, dt): if isinstance(dt, datetime): if dt.tzinfo is not self: raise ValueError("fromutc: dt.tzinfo " "is not self") return dt + self._offset raise TypeError("fromutc() argument must be a datetime instance" " or None")
[ "def", "fromutc", "(", "self", ",", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "datetime", ")", ":", "if", "dt", ".", "tzinfo", "is", "not", "self", ":", "raise", "ValueError", "(", "\"fromutc: dt.tzinfo \"", "\"is not self\"", ")", "return", "dt", "+", "self", ".", "_offset", "raise", "TypeError", "(", "\"fromutc() argument must be a datetime instance\"", "\" or None\"", ")" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/future/backports/datetime.py#L1910-L1917
syang1993/gst-tacotron
f28635c539d6a3a9ceece7be2acf8aa2fe3477b0
models/multihead_attention.py
python
MultiheadAttention._split_last_dimension
(self, x, num_heads)
return tf.reshape(x, x_shape[:-1] + [num_heads, dim // num_heads])
Reshape x to num_heads Returns: a Tensor with shape [batch, length_x, num_heads, dim_x/num_heads]
Reshape x to num_heads
[ "Reshape", "x", "to", "num_heads" ]
def _split_last_dimension(self, x, num_heads): '''Reshape x to num_heads Returns: a Tensor with shape [batch, length_x, num_heads, dim_x/num_heads] ''' x_shape = shape_list(x) dim = x_shape[-1] assert dim % num_heads == 0 return tf.reshape(x, x_shape[:-1] + [num_heads, dim // num_heads])
[ "def", "_split_last_dimension", "(", "self", ",", "x", ",", "num_heads", ")", ":", "x_shape", "=", "shape_list", "(", "x", ")", "dim", "=", "x_shape", "[", "-", "1", "]", "assert", "dim", "%", "num_heads", "==", "0", "return", "tf", ".", "reshape", "(", "x", ",", "x_shape", "[", ":", "-", "1", "]", "+", "[", "num_heads", ",", "dim", "//", "num_heads", "]", ")" ]
https://github.com/syang1993/gst-tacotron/blob/f28635c539d6a3a9ceece7be2acf8aa2fe3477b0/models/multihead_attention.py#L65-L74
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
tensorflow2/tf2cv/models/sepreresnet.py
python
sepreresnet200
(**kwargs)
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters.
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
[ "SE", "-", "PreResNet", "-", "200", "model", "from", "Squeeze", "-", "and", "-", "Excitation", "Networks", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1709", ".", "01507", ".", "It", "s", "an", "experimental", "model", "." ]
def sepreresnet200(**kwargs): """ SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
[ "def", "sepreresnet200", "(", "*", "*", "kwargs", ")", ":", "return", "get_sepreresnet", "(", "blocks", "=", "200", ",", "model_name", "=", "\"sepreresnet200\"", ",", "*", "*", "kwargs", ")" ]
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/tensorflow2/tf2cv/models/sepreresnet.py#L477-L489
lazylibrarian/LazyLibrarian
ae3c14e9db9328ce81765e094ab2a14ed7155624
lib/gntp/core.py
python
_GNTPBase.decode
(self, data, password=None)
Decode GNTP Message :param string data:
Decode GNTP Message
[ "Decode", "GNTP", "Message" ]
def decode(self, data, password=None): """Decode GNTP Message :param string data: """ self.password = password self.raw = shim.u(data) parts = self.raw.split('\r\n\r\n') self.info = self._parse_info(self.raw) self.headers = self._parse_dict(parts[0])
[ "def", "decode", "(", "self", ",", "data", ",", "password", "=", "None", ")", ":", "self", ".", "password", "=", "password", "self", ".", "raw", "=", "shim", ".", "u", "(", "data", ")", "parts", "=", "self", ".", "raw", ".", "split", "(", "'\\r\\n\\r\\n'", ")", "self", ".", "info", "=", "self", ".", "_parse_info", "(", "self", ".", "raw", ")", "self", ".", "headers", "=", "self", ".", "_parse_dict", "(", "parts", "[", "0", "]", ")" ]
https://github.com/lazylibrarian/LazyLibrarian/blob/ae3c14e9db9328ce81765e094ab2a14ed7155624/lib/gntp/core.py#L239-L248
JimmXinu/FanFicFare
bc149a2deb2636320fe50a3e374af6eef8f61889
fanficfare/adapters/adapter_chireadscom.py
python
ChireadsComSiteAdapter.getSiteExampleURLs
(cls)
return 'https://%s/category/translatedtales/story-name' % cls.getSiteDomain()
[]
def getSiteExampleURLs(cls): return 'https://%s/category/translatedtales/story-name' % cls.getSiteDomain()
[ "def", "getSiteExampleURLs", "(", "cls", ")", ":", "return", "'https://%s/category/translatedtales/story-name'", "%", "cls", ".", "getSiteDomain", "(", ")" ]
https://github.com/JimmXinu/FanFicFare/blob/bc149a2deb2636320fe50a3e374af6eef8f61889/fanficfare/adapters/adapter_chireadscom.py#L57-L58
webrecorder/pywb
7ff789f1a8e246720dab7744617824aa1e7d06c9
pywb/utils/binsearch.py
python
iter_range
(reader, start, end, prev_size=0)
return end_iter
Creates an iterator which iterates over lines where start <= line < end (end exclusive)
Creates an iterator which iterates over lines where start <= line < end (end exclusive)
[ "Creates", "an", "iterator", "which", "iterates", "over", "lines", "where", "start", "<", "=", "line", "<", "end", "(", "end", "exclusive", ")" ]
def iter_range(reader, start, end, prev_size=0): """ Creates an iterator which iterates over lines where start <= line < end (end exclusive) """ iter_ = search(reader, start, prev_size=prev_size) end_iter = itertools.takewhile( lambda line: line < end, iter_) return end_iter
[ "def", "iter_range", "(", "reader", ",", "start", ",", "end", ",", "prev_size", "=", "0", ")", ":", "iter_", "=", "search", "(", "reader", ",", "start", ",", "prev_size", "=", "prev_size", ")", "end_iter", "=", "itertools", ".", "takewhile", "(", "lambda", "line", ":", "line", "<", "end", ",", "iter_", ")", "return", "end_iter" ]
https://github.com/webrecorder/pywb/blob/7ff789f1a8e246720dab7744617824aa1e7d06c9/pywb/utils/binsearch.py#L121-L133
LumaPictures/pymel
fa88a3f4fa18e09bb8aa9bdf4dab53d984bada72
pymel/util/utilitytypes.py
python
LazyLoadModule._lazyModule_update
(self)
Used to update the contents of the LazyLoadModule with the contents of another dict.
Used to update the contents of the LazyLoadModule with the contents of another dict.
[ "Used", "to", "update", "the", "contents", "of", "the", "LazyLoadModule", "with", "the", "contents", "of", "another", "dict", "." ]
def _lazyModule_update(self): """ Used to update the contents of the LazyLoadModule with the contents of another dict. """ self.__dict__.update(self._lazyGlobals)
[ "def", "_lazyModule_update", "(", "self", ")", ":", "self", ".", "__dict__", ".", "update", "(", "self", ".", "_lazyGlobals", ")" ]
https://github.com/LumaPictures/pymel/blob/fa88a3f4fa18e09bb8aa9bdf4dab53d984bada72/pymel/util/utilitytypes.py#L731-L735
cupy/cupy
a47ad3105f0fe817a4957de87d98ddccb8c7491f
cupy/_core/_fusion_interface.py
python
_set_dtype_to_astype_dict
()
Set a dict with dtypes and astype ufuncs to `_dtype_to_astype_dict`. Creates a ufunc for type cast operations, and set a dict with keys as the dtype of the output array and values as astype ufuncs. This function is called at most once.
Set a dict with dtypes and astype ufuncs to `_dtype_to_astype_dict`.
[ "Set", "a", "dict", "with", "dtypes", "and", "astype", "ufuncs", "to", "_dtype_to_astype_dict", "." ]
def _set_dtype_to_astype_dict(): """Set a dict with dtypes and astype ufuncs to `_dtype_to_astype_dict`. Creates a ufunc for type cast operations, and set a dict with keys as the dtype of the output array and values as astype ufuncs. This function is called at most once. """ global _dtype_to_astype_dict _dtype_to_astype_dict = {} dtype_list = [numpy.dtype(type_char) for type_char in '?bhilqBHILQefdFD'] for t in dtype_list: name = 'astype_{}'.format(t) rules = tuple(['{}->{}'.format(s.char, t.char) for s in dtype_list]) command = 'out0 = static_cast< {} >(in0)'.format(get_typename(t)) _dtype_to_astype_dict[t] = core.create_ufunc(name, rules, command)
[ "def", "_set_dtype_to_astype_dict", "(", ")", ":", "global", "_dtype_to_astype_dict", "_dtype_to_astype_dict", "=", "{", "}", "dtype_list", "=", "[", "numpy", ".", "dtype", "(", "type_char", ")", "for", "type_char", "in", "'?bhilqBHILQefdFD'", "]", "for", "t", "in", "dtype_list", ":", "name", "=", "'astype_{}'", ".", "format", "(", "t", ")", "rules", "=", "tuple", "(", "[", "'{}->{}'", ".", "format", "(", "s", ".", "char", ",", "t", ".", "char", ")", "for", "s", "in", "dtype_list", "]", ")", "command", "=", "'out0 = static_cast< {} >(in0)'", ".", "format", "(", "get_typename", "(", "t", ")", ")", "_dtype_to_astype_dict", "[", "t", "]", "=", "core", ".", "create_ufunc", "(", "name", ",", "rules", ",", "command", ")" ]
https://github.com/cupy/cupy/blob/a47ad3105f0fe817a4957de87d98ddccb8c7491f/cupy/_core/_fusion_interface.py#L16-L32
mesalock-linux/mesapy
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
rpython/translator/driver.py
python
TranslationDriver.task_annotate
(self)
return s
Annotate
Annotate
[ "Annotate" ]
def task_annotate(self): """ Annotate """ # includes annotation and annotatation simplifications translator = self.translator policy = self.policy self.log.info('with policy: %s.%s' % (policy.__class__.__module__, policy.__class__.__name__)) annotator = translator.buildannotator(policy=policy) if self.secondary_entrypoints is not None: for func, inputtypes in self.secondary_entrypoints: if inputtypes == Ellipsis: continue annotator.build_types(func, inputtypes, False) if self.entry_point: s = annotator.build_types(self.entry_point, self.inputtypes) translator.entry_point_graph = annotator.bookkeeper.getdesc(self.entry_point).getuniquegraph() else: s = None self.sanity_check_annotation() if self.entry_point and self.standalone and s.knowntype != int: raise Exception("stand-alone program entry point must return an " "int (and not, e.g., None or always raise an " "exception).") annotator.complete() annotator.simplify() return s
[ "def", "task_annotate", "(", "self", ")", ":", "# includes annotation and annotatation simplifications", "translator", "=", "self", ".", "translator", "policy", "=", "self", ".", "policy", "self", ".", "log", ".", "info", "(", "'with policy: %s.%s'", "%", "(", "policy", ".", "__class__", ".", "__module__", ",", "policy", ".", "__class__", ".", "__name__", ")", ")", "annotator", "=", "translator", ".", "buildannotator", "(", "policy", "=", "policy", ")", "if", "self", ".", "secondary_entrypoints", "is", "not", "None", ":", "for", "func", ",", "inputtypes", "in", "self", ".", "secondary_entrypoints", ":", "if", "inputtypes", "==", "Ellipsis", ":", "continue", "annotator", ".", "build_types", "(", "func", ",", "inputtypes", ",", "False", ")", "if", "self", ".", "entry_point", ":", "s", "=", "annotator", ".", "build_types", "(", "self", ".", "entry_point", ",", "self", ".", "inputtypes", ")", "translator", ".", "entry_point_graph", "=", "annotator", ".", "bookkeeper", ".", "getdesc", "(", "self", ".", "entry_point", ")", ".", "getuniquegraph", "(", ")", "else", ":", "s", "=", "None", "self", ".", "sanity_check_annotation", "(", ")", "if", "self", ".", "entry_point", "and", "self", ".", "standalone", "and", "s", ".", "knowntype", "!=", "int", ":", "raise", "Exception", "(", "\"stand-alone program entry point must return an \"", "\"int (and not, e.g., None or always raise an \"", "\"exception).\"", ")", "annotator", ".", "complete", "(", ")", "annotator", ".", "simplify", "(", ")", "return", "s" ]
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/rpython/translator/driver.py#L298-L327
taigaio/taiga-ncurses
65312098f2d167762e0dbd1c16019754ab64d068
taiga_ncurses/data.py
python
us_ref
(us)
return us.get("ref", "--")
[]
def us_ref(us): return us.get("ref", "--")
[ "def", "us_ref", "(", "us", ")", ":", "return", "us", ".", "get", "(", "\"ref\"", ",", "\"--\"", ")" ]
https://github.com/taigaio/taiga-ncurses/blob/65312098f2d167762e0dbd1c16019754ab64d068/taiga_ncurses/data.py#L69-L70
opendatateam/udata
a295cab3c0e8f086fea1853655011f361ac81b77
udata/rdf.py
python
want_rdf
()
return mimetype in ACCEPTED_MIME_TYPES
Check wether client prefer RDF over the default HTML
Check wether client prefer RDF over the default HTML
[ "Check", "wether", "client", "prefer", "RDF", "over", "the", "default", "HTML" ]
def want_rdf(): '''Check wether client prefer RDF over the default HTML''' mimetype = request.accept_mimetypes.best return mimetype in ACCEPTED_MIME_TYPES
[ "def", "want_rdf", "(", ")", ":", "mimetype", "=", "request", ".", "accept_mimetypes", ".", "best", "return", "mimetype", "in", "ACCEPTED_MIME_TYPES" ]
https://github.com/opendatateam/udata/blob/a295cab3c0e8f086fea1853655011f361ac81b77/udata/rdf.py#L105-L108
kneufeld/alkali
0b5d423ea584ae3d627fcf37b801898364e19c71
alkali/manager.py
python
Manager.pks
(self)
return list(self._instances.keys())
**property**: return all primary keys :rtype: ``list``
**property**: return all primary keys
[ "**", "property", "**", ":", "return", "all", "primary", "keys" ]
def pks(self): """ **property**: return all primary keys :rtype: ``list`` """ return list(self._instances.keys())
[ "def", "pks", "(", "self", ")", ":", "return", "list", "(", "self", ".", "_instances", ".", "keys", "(", ")", ")" ]
https://github.com/kneufeld/alkali/blob/0b5d423ea584ae3d627fcf37b801898364e19c71/alkali/manager.py#L62-L68
rolandshoemaker/CommonMark-py
c0786fbc05eb2276c9ccc6f8d9b47c261b854155
CommonMark/CommonMark.py
python
InlineParser.parseEntity
(self, inlines)
Attempt to parse an entity, adding to inlines if successful.
Attempt to parse an entity, adding to inlines if successful.
[ "Attempt", "to", "parse", "an", "entity", "adding", "to", "inlines", "if", "successful", "." ]
def parseEntity(self, inlines): """ Attempt to parse an entity, adding to inlines if successful.""" m = self.match( r"^&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});", re.IGNORECASE) if m: inlines.append(Block(t="Entity", c=m)) return len(m) else: return 0
[ "def", "parseEntity", "(", "self", ",", "inlines", ")", ":", "m", "=", "self", ".", "match", "(", "r\"^&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});\"", ",", "re", ".", "IGNORECASE", ")", "if", "m", ":", "inlines", ".", "append", "(", "Block", "(", "t", "=", "\"Entity\"", ",", "c", "=", "m", ")", ")", "return", "len", "(", "m", ")", "else", ":", "return", "0" ]
https://github.com/rolandshoemaker/CommonMark-py/blob/c0786fbc05eb2276c9ccc6f8d9b47c261b854155/CommonMark/CommonMark.py#L628-L636
aajanki/yle-dl
b0aa1bb5d943fdbd9a18da2604f21bb2094eadd7
yledl/streamprobe.py
python
FullHDFlavorProber._drop_duplicates
(self, stream_flavors)
return unique.values()
[]
def _drop_duplicates(self, stream_flavors): def flavor_key(s): return (s.width, s.height, s.bitrate, next((x.url for x in s.streams), None)) unique = {flavor_key(s): s for s in stream_flavors} return unique.values()
[ "def", "_drop_duplicates", "(", "self", ",", "stream_flavors", ")", ":", "def", "flavor_key", "(", "s", ")", ":", "return", "(", "s", ".", "width", ",", "s", ".", "height", ",", "s", ".", "bitrate", ",", "next", "(", "(", "x", ".", "url", "for", "x", "in", "s", ".", "streams", ")", ",", "None", ")", ")", "unique", "=", "{", "flavor_key", "(", "s", ")", ":", "s", "for", "s", "in", "stream_flavors", "}", "return", "unique", ".", "values", "(", ")" ]
https://github.com/aajanki/yle-dl/blob/b0aa1bb5d943fdbd9a18da2604f21bb2094eadd7/yledl/streamprobe.py#L48-L53
cool-RR/python_toolbox
cb9ef64b48f1d03275484d707dc5079b6701ad0c
python_toolbox/wx_tools/widgets/third_party/customtreectrl.py
python
CustomTreeCtrl.GetStateImageList
(self)
return self._imageListState
Returns the state image list associated with L{CustomTreeCtrl} (from which application-defined state images are taken).
Returns the state image list associated with L{CustomTreeCtrl} (from which application-defined state images are taken).
[ "Returns", "the", "state", "image", "list", "associated", "with", "L", "{", "CustomTreeCtrl", "}", "(", "from", "which", "application", "-", "defined", "state", "images", "are", "taken", ")", "." ]
def GetStateImageList(self): """ Returns the state image list associated with L{CustomTreeCtrl} (from which application-defined state images are taken). """ return self._imageListState
[ "def", "GetStateImageList", "(", "self", ")", ":", "return", "self", ".", "_imageListState" ]
https://github.com/cool-RR/python_toolbox/blob/cb9ef64b48f1d03275484d707dc5079b6701ad0c/python_toolbox/wx_tools/widgets/third_party/customtreectrl.py#L5134-L5140
armadillica/flamenco
7c3a5559ea200f9fd42ef61d3a09986fc0c44040
flamenco/managers/api.py
python
tasks_cancel_requested
(manager_id)
return task_ids
Returns a set of tasks of status cancel-requested.
Returns a set of tasks of status cancel-requested.
[ "Returns", "a", "set", "of", "tasks", "of", "status", "cancel", "-", "requested", "." ]
def tasks_cancel_requested(manager_id): """Returns a set of tasks of status cancel-requested.""" from flamenco import current_flamenco, eve_settings tasks_coll = current_flamenco.db('tasks') task_ids = { task['_id'] for task in tasks_coll.find({'manager': manager_id, 'status': 'cancel-requested'}, projection={'_id': 1}) } log.debug('Returning %i tasks to be canceled by manager %s', len(task_ids), manager_id) return task_ids
[ "def", "tasks_cancel_requested", "(", "manager_id", ")", ":", "from", "flamenco", "import", "current_flamenco", ",", "eve_settings", "tasks_coll", "=", "current_flamenco", ".", "db", "(", "'tasks'", ")", "task_ids", "=", "{", "task", "[", "'_id'", "]", "for", "task", "in", "tasks_coll", ".", "find", "(", "{", "'manager'", ":", "manager_id", ",", "'status'", ":", "'cancel-requested'", "}", ",", "projection", "=", "{", "'_id'", ":", "1", "}", ")", "}", "log", ".", "debug", "(", "'Returning %i tasks to be canceled by manager %s'", ",", "len", "(", "task_ids", ")", ",", "manager_id", ")", "return", "task_ids" ]
https://github.com/armadillica/flamenco/blob/7c3a5559ea200f9fd42ef61d3a09986fc0c44040/flamenco/managers/api.py#L313-L327
ansible-community/molecule
be98c8db07666fd1125f69020419b67fda48b559
src/molecule/provisioner/ansible.py
python
Ansible.check
(self)
Execute ``ansible-playbook`` against the converge playbook with the \ ``--check`` flag and returns None. :return: None
Execute ``ansible-playbook`` against the converge playbook with the \ ``--check`` flag and returns None.
[ "Execute", "ansible", "-", "playbook", "against", "the", "converge", "playbook", "with", "the", "\\", "--", "check", "flag", "and", "returns", "None", "." ]
def check(self): """ Execute ``ansible-playbook`` against the converge playbook with the \ ``--check`` flag and returns None. :return: None """ pb = self._get_ansible_playbook(self.playbooks.converge) pb.add_cli_arg("check", True) pb.execute()
[ "def", "check", "(", "self", ")", ":", "pb", "=", "self", ".", "_get_ansible_playbook", "(", "self", ".", "playbooks", ".", "converge", ")", "pb", ".", "add_cli_arg", "(", "\"check\"", ",", "True", ")", "pb", ".", "execute", "(", ")" ]
https://github.com/ansible-community/molecule/blob/be98c8db07666fd1125f69020419b67fda48b559/src/molecule/provisioner/ansible.py#L673-L682
AutodeskRoboticsLab/Mimic
85447f0d346be66988303a6a054473d92f1ed6f4
mimic/scripts/extern/pyqtgraph_0_11_0/pyqtgraph/graphicsItems/GradientEditorItem.py
python
TickSliderItem.setTickValue
(self, tick, val)
Set the position (along the slider) of the tick. ============== ================================================================== **Arguments:** tick Can be either an integer corresponding to the index of the tick or a Tick object. Ex: if you had a slider with 3 ticks and you wanted to change the middle tick, the index would be 1. val The desired position of the tick. If val is < 0, position will be set to 0. If val is > 1, position will be set to 1. ============== ==================================================================
Set the position (along the slider) of the tick. ============== ================================================================== **Arguments:** tick Can be either an integer corresponding to the index of the tick or a Tick object. Ex: if you had a slider with 3 ticks and you wanted to change the middle tick, the index would be 1. val The desired position of the tick. If val is < 0, position will be set to 0. If val is > 1, position will be set to 1. ============== ==================================================================
[ "Set", "the", "position", "(", "along", "the", "slider", ")", "of", "the", "tick", ".", "==============", "==================================================================", "**", "Arguments", ":", "**", "tick", "Can", "be", "either", "an", "integer", "corresponding", "to", "the", "index", "of", "the", "tick", "or", "a", "Tick", "object", ".", "Ex", ":", "if", "you", "had", "a", "slider", "with", "3", "ticks", "and", "you", "wanted", "to", "change", "the", "middle", "tick", "the", "index", "would", "be", "1", ".", "val", "The", "desired", "position", "of", "the", "tick", ".", "If", "val", "is", "<", "0", "position", "will", "be", "set", "to", "0", ".", "If", "val", "is", ">", "1", "position", "will", "be", "set", "to", "1", ".", "==============", "==================================================================" ]
def setTickValue(self, tick, val): ## public """ Set the position (along the slider) of the tick. ============== ================================================================== **Arguments:** tick Can be either an integer corresponding to the index of the tick or a Tick object. Ex: if you had a slider with 3 ticks and you wanted to change the middle tick, the index would be 1. val The desired position of the tick. If val is < 0, position will be set to 0. If val is > 1, position will be set to 1. ============== ================================================================== """ tick = self.getTick(tick) val = min(max(0.0, val), 1.0) x = val * self.length pos = tick.pos() pos.setX(x) tick.setPos(pos) self.ticks[tick] = val self.updateGradient()
[ "def", "setTickValue", "(", "self", ",", "tick", ",", "val", ")", ":", "## public", "tick", "=", "self", ".", "getTick", "(", "tick", ")", "val", "=", "min", "(", "max", "(", "0.0", ",", "val", ")", ",", "1.0", ")", "x", "=", "val", "*", "self", ".", "length", "pos", "=", "tick", ".", "pos", "(", ")", "pos", ".", "setX", "(", "x", ")", "tick", ".", "setPos", "(", "pos", ")", "self", ".", "ticks", "[", "tick", "]", "=", "val", "self", ".", "updateGradient", "(", ")" ]
https://github.com/AutodeskRoboticsLab/Mimic/blob/85447f0d346be66988303a6a054473d92f1ed6f4/mimic/scripts/extern/pyqtgraph_0_11_0/pyqtgraph/graphicsItems/GradientEditorItem.py#L298-L319
dmlc/gluon-cv
709bc139919c02f7454cb411311048be188cde64
gluoncv/model_zoo/residual_attentionnet.py
python
residualattentionnet236
(**kwargs)
return get_residualAttentionModel(224, 236, **kwargs)
r"""AttentionModel model from `"Residual Attention Network for Image Classification" <https://arxiv.org/pdf/1704.06904.pdf>`_ paper. Parameters ---------- input_size : int Input size of net. Options are 32,224. num_layers : int Numbers of layers. Options are 56, 92, 128, 164, 200, 236, 452. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. norm_layer : object Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`) Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`. norm_kwargs : dict Additional `norm_layer` arguments, for example `num_devices=4` for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
r"""AttentionModel model from `"Residual Attention Network for Image Classification" <https://arxiv.org/pdf/1704.06904.pdf>`_ paper.
[ "r", "AttentionModel", "model", "from", "Residual", "Attention", "Network", "for", "Image", "Classification", "<https", ":", "//", "arxiv", ".", "org", "/", "pdf", "/", "1704", ".", "06904", ".", "pdf", ">", "_", "paper", "." ]
def residualattentionnet236(**kwargs): r"""AttentionModel model from `"Residual Attention Network for Image Classification" <https://arxiv.org/pdf/1704.06904.pdf>`_ paper. Parameters ---------- input_size : int Input size of net. Options are 32,224. num_layers : int Numbers of layers. Options are 56, 92, 128, 164, 200, 236, 452. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. norm_layer : object Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`) Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`. norm_kwargs : dict Additional `norm_layer` arguments, for example `num_devices=4` for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`. """ return get_residualAttentionModel(224, 236, **kwargs)
[ "def", "residualattentionnet236", "(", "*", "*", "kwargs", ")", ":", "return", "get_residualAttentionModel", "(", "224", ",", "236", ",", "*", "*", "kwargs", ")" ]
https://github.com/dmlc/gluon-cv/blob/709bc139919c02f7454cb411311048be188cde64/gluoncv/model_zoo/residual_attentionnet.py#L772-L797
catalyst-team/catalyst
678dc06eda1848242df010b7f34adb572def2598
catalyst/callbacks/metrics/scikit_learn.py
python
SklearnBatchCallback.__init__
( self, keys: Mapping[str, Any], metric_fn: Union[Callable, str], metric_key: str, log_on_batch: bool = True, **metric_kwargs )
Init.
Init.
[ "Init", "." ]
def __init__( self, keys: Mapping[str, Any], metric_fn: Union[Callable, str], metric_key: str, log_on_batch: bool = True, **metric_kwargs ): """Init.""" if isinstance(metric_fn, str): metric_fn = sklearn.metrics.__dict__[metric_fn] metric_fn = partial(metric_fn, **metric_kwargs) super().__init__( metric=FunctionalBatchMetric(metric_fn=metric_fn, metric_key=metric_key), input_key=keys, target_key=keys, log_on_batch=log_on_batch, )
[ "def", "__init__", "(", "self", ",", "keys", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "metric_fn", ":", "Union", "[", "Callable", ",", "str", "]", ",", "metric_key", ":", "str", ",", "log_on_batch", ":", "bool", "=", "True", ",", "*", "*", "metric_kwargs", ")", ":", "if", "isinstance", "(", "metric_fn", ",", "str", ")", ":", "metric_fn", "=", "sklearn", ".", "metrics", ".", "__dict__", "[", "metric_fn", "]", "metric_fn", "=", "partial", "(", "metric_fn", ",", "*", "*", "metric_kwargs", ")", "super", "(", ")", ".", "__init__", "(", "metric", "=", "FunctionalBatchMetric", "(", "metric_fn", "=", "metric_fn", ",", "metric_key", "=", "metric_key", ")", ",", "input_key", "=", "keys", ",", "target_key", "=", "keys", ",", "log_on_batch", "=", "log_on_batch", ",", ")" ]
https://github.com/catalyst-team/catalyst/blob/678dc06eda1848242df010b7f34adb572def2598/catalyst/callbacks/metrics/scikit_learn.py#L107-L125
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/Pinboard-Tools/BeautifulSoup.py
python
UnicodeDammit._toUnicode
(self, data, encoding)
return newdata
Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases
Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases
[ "Given", "a", "string", "and", "its", "encoding", "decodes", "the", "string", "into", "Unicode", ".", "%encoding", "is", "a", "string", "recognized", "by", "encodings", ".", "aliases" ]
def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata
[ "def", "_toUnicode", "(", "self", ",", "data", ",", "encoding", ")", ":", "# strip Byte Order Mark (if present)", "if", "(", "len", "(", "data", ")", ">=", "4", ")", "and", "(", "data", "[", ":", "2", "]", "==", "'\\xfe\\xff'", ")", "and", "(", "data", "[", "2", ":", "4", "]", "!=", "'\\x00\\x00'", ")", ":", "encoding", "=", "'utf-16be'", "data", "=", "data", "[", "2", ":", "]", "elif", "(", "len", "(", "data", ")", ">=", "4", ")", "and", "(", "data", "[", ":", "2", "]", "==", "'\\xff\\xfe'", ")", "and", "(", "data", "[", "2", ":", "4", "]", "!=", "'\\x00\\x00'", ")", ":", "encoding", "=", "'utf-16le'", "data", "=", "data", "[", "2", ":", "]", "elif", "data", "[", ":", "3", "]", "==", "'\\xef\\xbb\\xbf'", ":", "encoding", "=", "'utf-8'", "data", "=", "data", "[", "3", ":", "]", "elif", "data", "[", ":", "4", "]", "==", "'\\x00\\x00\\xfe\\xff'", ":", "encoding", "=", "'utf-32be'", "data", "=", "data", "[", "4", ":", "]", "elif", "data", "[", ":", "4", "]", "==", "'\\xff\\xfe\\x00\\x00'", ":", "encoding", "=", "'utf-32le'", "data", "=", "data", "[", "4", ":", "]", "newdata", "=", "unicode", "(", "data", ",", "encoding", ")", "return", "newdata" ]
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/Pinboard-Tools/BeautifulSoup.py#L1842-L1865
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/tri/triplot.py
python
triplot
(ax, *args, **kwargs)
Draw a unstructured triangular grid as lines and/or markers. The triangulation to plot can be specified in one of two ways; either:: triplot(triangulation, ...) where triangulation is a :class:`matplotlib.tri.Triangulation` object, or :: triplot(x, y, ...) triplot(x, y, triangles, ...) triplot(x, y, triangles=triangles, ...) triplot(x, y, mask=mask, ...) triplot(x, y, triangles, mask=mask, ...) in which case a Triangulation object will be created. See :class:`~matplotlib.tri.Triangulation` for a explanation of these possibilities. The remaining args and kwargs are the same as for :meth:`~matplotlib.axes.Axes.plot`. **Example:** .. plot:: mpl_examples/pylab_examples/triplot_demo.py
Draw a unstructured triangular grid as lines and/or markers.
[ "Draw", "a", "unstructured", "triangular", "grid", "as", "lines", "and", "/", "or", "markers", "." ]
def triplot(ax, *args, **kwargs): """ Draw a unstructured triangular grid as lines and/or markers. The triangulation to plot can be specified in one of two ways; either:: triplot(triangulation, ...) where triangulation is a :class:`matplotlib.tri.Triangulation` object, or :: triplot(x, y, ...) triplot(x, y, triangles, ...) triplot(x, y, triangles=triangles, ...) triplot(x, y, mask=mask, ...) triplot(x, y, triangles, mask=mask, ...) in which case a Triangulation object will be created. See :class:`~matplotlib.tri.Triangulation` for a explanation of these possibilities. The remaining args and kwargs are the same as for :meth:`~matplotlib.axes.Axes.plot`. **Example:** .. plot:: mpl_examples/pylab_examples/triplot_demo.py """ import matplotlib.axes tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs) x = tri.x y = tri.y edges = tri.edges # If draw both lines and markers at the same time, e.g. # ax.plot(x[edges].T, y[edges].T, *args, **kwargs) # then the markers are drawn more than once which is incorrect if alpha<1. # Hence draw lines and markers separately. # Decode plot format string, e.g., 'ro-' fmt = '' if len(args) > 0: fmt = args[0] linestyle, marker, color = matplotlib.axes._process_plot_format(fmt) # Draw lines without markers, if lines are required. if linestyle is not None and linestyle is not 'None': kw = kwargs.copy() kw.pop('marker', None) # Ignore marker if set. kw['linestyle'] = ls_mapper[linestyle] kw['edgecolor'] = color kw['facecolor'] = None vertices = np.column_stack((x[edges].flatten(), y[edges].flatten())) codes = ([Path.MOVETO] + [Path.LINETO])*len(edges) path = Path(vertices, codes) pathpatch = PathPatch(path, **kw) ax.add_patch(pathpatch) # Draw markers without lines. # Should avoid drawing markers for points that are not in any triangle? kwargs['linestyle'] = '' ax.plot(x, y, *args, **kwargs)
[ "def", "triplot", "(", "ax", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "axes", "tri", ",", "args", ",", "kwargs", "=", "Triangulation", ".", "get_from_args_and_kwargs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "x", "=", "tri", ".", "x", "y", "=", "tri", ".", "y", "edges", "=", "tri", ".", "edges", "# If draw both lines and markers at the same time, e.g.", "# ax.plot(x[edges].T, y[edges].T, *args, **kwargs)", "# then the markers are drawn more than once which is incorrect if alpha<1.", "# Hence draw lines and markers separately.", "# Decode plot format string, e.g., 'ro-'", "fmt", "=", "''", "if", "len", "(", "args", ")", ">", "0", ":", "fmt", "=", "args", "[", "0", "]", "linestyle", ",", "marker", ",", "color", "=", "matplotlib", ".", "axes", ".", "_process_plot_format", "(", "fmt", ")", "# Draw lines without markers, if lines are required.", "if", "linestyle", "is", "not", "None", "and", "linestyle", "is", "not", "'None'", ":", "kw", "=", "kwargs", ".", "copy", "(", ")", "kw", ".", "pop", "(", "'marker'", ",", "None", ")", "# Ignore marker if set.", "kw", "[", "'linestyle'", "]", "=", "ls_mapper", "[", "linestyle", "]", "kw", "[", "'edgecolor'", "]", "=", "color", "kw", "[", "'facecolor'", "]", "=", "None", "vertices", "=", "np", ".", "column_stack", "(", "(", "x", "[", "edges", "]", ".", "flatten", "(", ")", ",", "y", "[", "edges", "]", ".", "flatten", "(", ")", ")", ")", "codes", "=", "(", "[", "Path", ".", "MOVETO", "]", "+", "[", "Path", ".", "LINETO", "]", ")", "*", "len", "(", "edges", ")", "path", "=", "Path", "(", "vertices", ",", "codes", ")", "pathpatch", "=", "PathPatch", "(", "path", ",", "*", "*", "kw", ")", "ax", ".", "add_patch", "(", "pathpatch", ")", "# Draw markers without lines.", "# Should avoid drawing markers for points that are not in any triangle?", "kwargs", "[", "'linestyle'", "]", "=", "''", "ax", ".", "plot", "(", "x", ",", "y", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/tri/triplot.py#L8-L77
researchmm/tasn
5dba8ccc096cedc63913730eeea14a9647911129
tasn-mxnet/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
python
greater
(attrs, inputs, proto_obj)
return 'broadcast_greater', attrs, inputs
Logical Greater operator with broadcasting.
Logical Greater operator with broadcasting.
[ "Logical", "Greater", "operator", "with", "broadcasting", "." ]
def greater(attrs, inputs, proto_obj): """Logical Greater operator with broadcasting.""" return 'broadcast_greater', attrs, inputs
[ "def", "greater", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "return", "'broadcast_greater'", ",", "attrs", ",", "inputs" ]
https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L169-L171
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/httplib.py
python
HTTPConnection._output
(self, s)
Add a line of output to the current request buffer. Assumes that the line does *not* end with \\r\\n.
Add a line of output to the current request buffer.
[ "Add", "a", "line", "of", "output", "to", "the", "current", "request", "buffer", "." ]
def _output(self, s): """Add a line of output to the current request buffer. Assumes that the line does *not* end with \\r\\n. """ self._buffer.append(s)
[ "def", "_output", "(", "self", ",", "s", ")", ":", "self", ".", "_buffer", ".", "append", "(", "s", ")" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/httplib.py#L879-L884
ckan/ckan
b3b01218ad88ed3fb914b51018abe8b07b07bff3
ckan/lib/config_tool.py
python
parse_config
(input_lines)
return options
Returns a dict of Option objects, keyed by Option.id, given the lines in a config file. (Not using ConfigParser.set() as it does not store all the comments and ordering)
Returns a dict of Option objects, keyed by Option.id, given the lines in a config file. (Not using ConfigParser.set() as it does not store all the comments and ordering)
[ "Returns", "a", "dict", "of", "Option", "objects", "keyed", "by", "Option", ".", "id", "given", "the", "lines", "in", "a", "config", "file", ".", "(", "Not", "using", "ConfigParser", ".", "set", "()", "as", "it", "does", "not", "store", "all", "the", "comments", "and", "ordering", ")" ]
def parse_config(input_lines): ''' Returns a dict of Option objects, keyed by Option.id, given the lines in a config file. (Not using ConfigParser.set() as it does not store all the comments and ordering) ''' section = 'app:main' # default (for merge config files) options = {} for line in input_lines: # ignore blank lines if line.strip() == '': continue # section heading section_match = SECTION_RE.match(line) if section_match: section = section_match.group('header') continue # option option = parse_option_string(section, line) if option: options[option.id] = option return options
[ "def", "parse_config", "(", "input_lines", ")", ":", "section", "=", "'app:main'", "# default (for merge config files)", "options", "=", "{", "}", "for", "line", "in", "input_lines", ":", "# ignore blank lines", "if", "line", ".", "strip", "(", ")", "==", "''", ":", "continue", "# section heading", "section_match", "=", "SECTION_RE", ".", "match", "(", "line", ")", "if", "section_match", ":", "section", "=", "section_match", ".", "group", "(", "'header'", ")", "continue", "# option", "option", "=", "parse_option_string", "(", "section", ",", "line", ")", "if", "option", ":", "options", "[", "option", ".", "id", "]", "=", "option", "return", "options" ]
https://github.com/ckan/ckan/blob/b3b01218ad88ed3fb914b51018abe8b07b07bff3/ckan/lib/config_tool.py#L134-L156
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/Exabeam/Integrations/Exabeam/Exabeam.py
python
list_top_domains
(client: Client, args: Dict)
return human_readable, entry_context, top_domains_raw_data
Return session information for given session ID Args: client: Client args: Dict
Return session information for given session ID
[ "Return", "session", "information", "for", "given", "session", "ID" ]
def list_top_domains(client: Client, args: Dict) -> Tuple[Any, Dict[str, Dict[Any, Any]], Optional[Any]]: """ Return session information for given session ID Args: client: Client args: Dict """ sequence_id = args.get('sequence_id') sequence_type = args.get('sequence_type') top_domains_raw_data = client.list_top_domains_request(sequence_id, sequence_type) top_domains = top_domains_raw_data.get('topDomains', []) entry_context = {'Exabeam.DataFeed(val.sequenceId && val.sequenceId === obj.sequenceId)': top_domains_raw_data} human_readable = tableToMarkdown(f'Sequence {sequence_id} Top Domains', top_domains, removeNull=True, headerTransform=pascalToSpace) return human_readable, entry_context, top_domains_raw_data
[ "def", "list_top_domains", "(", "client", ":", "Client", ",", "args", ":", "Dict", ")", "->", "Tuple", "[", "Any", ",", "Dict", "[", "str", ",", "Dict", "[", "Any", ",", "Any", "]", "]", ",", "Optional", "[", "Any", "]", "]", ":", "sequence_id", "=", "args", ".", "get", "(", "'sequence_id'", ")", "sequence_type", "=", "args", ".", "get", "(", "'sequence_type'", ")", "top_domains_raw_data", "=", "client", ".", "list_top_domains_request", "(", "sequence_id", ",", "sequence_type", ")", "top_domains", "=", "top_domains_raw_data", ".", "get", "(", "'topDomains'", ",", "[", "]", ")", "entry_context", "=", "{", "'Exabeam.DataFeed(val.sequenceId && val.sequenceId === obj.sequenceId)'", ":", "top_domains_raw_data", "}", "human_readable", "=", "tableToMarkdown", "(", "f'Sequence {sequence_id} Top Domains'", ",", "top_domains", ",", "removeNull", "=", "True", ",", "headerTransform", "=", "pascalToSpace", ")", "return", "human_readable", ",", "entry_context", ",", "top_domains_raw_data" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/Exabeam/Integrations/Exabeam/Exabeam.py#L1322-L1340
rdiff-backup/rdiff-backup
321e0cd6e5e47d4c158a0172e47ab38240a8b653
src/rdiff_backup/SetConnections.py
python
get_connected_rpath
(cmd_pair)
Return normalized RPath from command pair (remote_cmd, filename)
Return normalized RPath from command pair (remote_cmd, filename)
[ "Return", "normalized", "RPath", "from", "command", "pair", "(", "remote_cmd", "filename", ")" ]
def get_connected_rpath(cmd_pair): """ Return normalized RPath from command pair (remote_cmd, filename) """ cmd, filename = cmd_pair if cmd: conn = _init_connection(cmd) else: conn = Globals.local_connection if conn: return rpath.RPath(conn, filename).normalize() else: return None
[ "def", "get_connected_rpath", "(", "cmd_pair", ")", ":", "cmd", ",", "filename", "=", "cmd_pair", "if", "cmd", ":", "conn", "=", "_init_connection", "(", "cmd", ")", "else", ":", "conn", "=", "Globals", ".", "local_connection", "if", "conn", ":", "return", "rpath", ".", "RPath", "(", "conn", ",", "filename", ")", ".", "normalize", "(", ")", "else", ":", "return", "None" ]
https://github.com/rdiff-backup/rdiff-backup/blob/321e0cd6e5e47d4c158a0172e47ab38240a8b653/src/rdiff_backup/SetConnections.py#L105-L117
ericmjl/nxviz
2979723238f2b68ea29cc07baa13b8d79d37729d
nxviz/polcart.py
python
to_polar
(x, y)
return r, theta
Converts cartesian x, y to polar r, theta.
Converts cartesian x, y to polar r, theta.
[ "Converts", "cartesian", "x", "y", "to", "polar", "r", "theta", "." ]
def to_polar(x, y): """ Converts cartesian x, y to polar r, theta. """ theta = atan2(y, x) r = sqrt(x ** 2 + y ** 2) if theta < 0: theta += 2 * np.pi return r, theta
[ "def", "to_polar", "(", "x", ",", "y", ")", ":", "theta", "=", "atan2", "(", "y", ",", "x", ")", "r", "=", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", ")", "if", "theta", "<", "0", ":", "theta", "+=", "2", "*", "np", ".", "pi", "return", "r", ",", "theta" ]
https://github.com/ericmjl/nxviz/blob/2979723238f2b68ea29cc07baa13b8d79d37729d/nxviz/polcart.py#L23-L33
cleverhans-lab/cleverhans
e5d00e537ce7ad6119ed5a8db1f0e9736d1f6e1d
cleverhans_v3.1.0/cleverhans/attack_bundling.py
python
AttackGoal.get_criteria
(self, sess, model, advx, y, batch_size=BATCH_SIZE)
return out
Returns a dictionary mapping the name of each criterion to a NumPy array containing the value of that criterion for each adversarial example. Subclasses can add extra criteria by implementing the `extra_criteria` method. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param y: numpy array containing true labels :param batch_size: int, batch size
Returns a dictionary mapping the name of each criterion to a NumPy array containing the value of that criterion for each adversarial example. Subclasses can add extra criteria by implementing the `extra_criteria` method.
[ "Returns", "a", "dictionary", "mapping", "the", "name", "of", "each", "criterion", "to", "a", "NumPy", "array", "containing", "the", "value", "of", "that", "criterion", "for", "each", "adversarial", "example", ".", "Subclasses", "can", "add", "extra", "criteria", "by", "implementing", "the", "extra_criteria", "method", "." ]
def get_criteria(self, sess, model, advx, y, batch_size=BATCH_SIZE): """ Returns a dictionary mapping the name of each criterion to a NumPy array containing the value of that criterion for each adversarial example. Subclasses can add extra criteria by implementing the `extra_criteria` method. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param y: numpy array containing true labels :param batch_size: int, batch size """ names, factory = self.extra_criteria() factory = _CriteriaFactory(model, factory) results = batch_eval_multi_worker( sess, factory, [advx, y], batch_size=batch_size, devices=devices ) names = ["correctness", "confidence"] + names out = dict(safe_zip(names, results)) return out
[ "def", "get_criteria", "(", "self", ",", "sess", ",", "model", ",", "advx", ",", "y", ",", "batch_size", "=", "BATCH_SIZE", ")", ":", "names", ",", "factory", "=", "self", ".", "extra_criteria", "(", ")", "factory", "=", "_CriteriaFactory", "(", "model", ",", "factory", ")", "results", "=", "batch_eval_multi_worker", "(", "sess", ",", "factory", ",", "[", "advx", ",", "y", "]", ",", "batch_size", "=", "batch_size", ",", "devices", "=", "devices", ")", "names", "=", "[", "\"correctness\"", ",", "\"confidence\"", "]", "+", "names", "out", "=", "dict", "(", "safe_zip", "(", "names", ",", "results", ")", ")", "return", "out" ]
https://github.com/cleverhans-lab/cleverhans/blob/e5d00e537ce7ad6119ed5a8db1f0e9736d1f6e1d/cleverhans_v3.1.0/cleverhans/attack_bundling.py#L648-L671
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/lib-tk/Tkinter.py
python
BaseWidget.__init__
(self, master, widgetName, cnf={}, kw={}, extra=())
Construct a widget with the parent widget MASTER, a name WIDGETNAME and appropriate options.
Construct a widget with the parent widget MASTER, a name WIDGETNAME and appropriate options.
[ "Construct", "a", "widget", "with", "the", "parent", "widget", "MASTER", "a", "name", "WIDGETNAME", "and", "appropriate", "options", "." ]
def __init__(self, master, widgetName, cnf={}, kw={}, extra=()): """Construct a widget with the parent widget MASTER, a name WIDGETNAME and appropriate options.""" if kw: cnf = _cnfmerge((cnf, kw)) self.widgetName = widgetName BaseWidget._setup(self, master, cnf) if self._tclCommands is None: self._tclCommands = [] classes = [] for k in cnf.keys(): if type(k) is ClassType: classes.append((k, cnf[k])) del cnf[k] self.tk.call( (widgetName, self._w) + extra + self._options(cnf)) for k, v in classes: k.configure(self, v)
[ "def", "__init__", "(", "self", ",", "master", ",", "widgetName", ",", "cnf", "=", "{", "}", ",", "kw", "=", "{", "}", ",", "extra", "=", "(", ")", ")", ":", "if", "kw", ":", "cnf", "=", "_cnfmerge", "(", "(", "cnf", ",", "kw", ")", ")", "self", ".", "widgetName", "=", "widgetName", "BaseWidget", ".", "_setup", "(", "self", ",", "master", ",", "cnf", ")", "if", "self", ".", "_tclCommands", "is", "None", ":", "self", ".", "_tclCommands", "=", "[", "]", "classes", "=", "[", "]", "for", "k", "in", "cnf", ".", "keys", "(", ")", ":", "if", "type", "(", "k", ")", "is", "ClassType", ":", "classes", ".", "append", "(", "(", "k", ",", "cnf", "[", "k", "]", ")", ")", "del", "cnf", "[", "k", "]", "self", ".", "tk", ".", "call", "(", "(", "widgetName", ",", "self", ".", "_w", ")", "+", "extra", "+", "self", ".", "_options", "(", "cnf", ")", ")", "for", "k", ",", "v", "in", "classes", ":", "k", ".", "configure", "(", "self", ",", "v", ")" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/lib-tk/Tkinter.py#L2021-L2038
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/simulators/dart.py
python
Dart.get_link_world_angular_velocities
(self, body_id, link_ids)
return [skeleton.getBodyNode(link + 1).getAngularVelocity.reshape(-1) for link in link_ids]
Return the angular velocity of the link(s) in the Cartesian world space coordinates. Args: body_id (int): unique body id. link_ids (list[int]): list of link indices. Returns: if 1 link: np.array[float[3]]: angular velocity of the link in the Cartesian world space if multiple links: np.array[float[N,3]]: angular velocity of each link
Return the angular velocity of the link(s) in the Cartesian world space coordinates.
[ "Return", "the", "angular", "velocity", "of", "the", "link", "(", "s", ")", "in", "the", "Cartesian", "world", "space", "coordinates", "." ]
def get_link_world_angular_velocities(self, body_id, link_ids): """ Return the angular velocity of the link(s) in the Cartesian world space coordinates. Args: body_id (int): unique body id. link_ids (list[int]): list of link indices. Returns: if 1 link: np.array[float[3]]: angular velocity of the link in the Cartesian world space if multiple links: np.array[float[N,3]]: angular velocity of each link """ skeleton = self.world.getSkeleton(body_id) if isinstance(link_ids, int): return skeleton.getBodyNode(link_ids + 1).getAngularVelocity().reshape(-1) return [skeleton.getBodyNode(link + 1).getAngularVelocity.reshape(-1) for link in link_ids]
[ "def", "get_link_world_angular_velocities", "(", "self", ",", "body_id", ",", "link_ids", ")", ":", "skeleton", "=", "self", ".", "world", ".", "getSkeleton", "(", "body_id", ")", "if", "isinstance", "(", "link_ids", ",", "int", ")", ":", "return", "skeleton", ".", "getBodyNode", "(", "link_ids", "+", "1", ")", ".", "getAngularVelocity", "(", ")", ".", "reshape", "(", "-", "1", ")", "return", "[", "skeleton", ".", "getBodyNode", "(", "link", "+", "1", ")", ".", "getAngularVelocity", ".", "reshape", "(", "-", "1", ")", "for", "link", "in", "link_ids", "]" ]
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/simulators/dart.py#L1812-L1831
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/models/v1_secret.py
python
V1Secret.to_dict
(self)
return result
Returns the model properties as a dict
Returns the model properties as a dict
[ "Returns", "the", "model", "properties", "as", "a", "dict" ]
def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
[ "def", "to_dict", "(", "self", ")", ":", "result", "=", "{", "}", "for", "attr", ",", "_", "in", "six", ".", "iteritems", "(", "self", ".", "openapi_types", ")", ":", "value", "=", "getattr", "(", "self", ",", "attr", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "result", "[", "attr", "]", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "to_dict", "(", ")", "if", "hasattr", "(", "x", ",", "\"to_dict\"", ")", "else", "x", ",", "value", ")", ")", "elif", "hasattr", "(", "value", ",", "\"to_dict\"", ")", ":", "result", "[", "attr", "]", "=", "value", ".", "to_dict", "(", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "result", "[", "attr", "]", "=", "dict", "(", "map", "(", "lambda", "item", ":", "(", "item", "[", "0", "]", ",", "item", "[", "1", "]", ".", "to_dict", "(", ")", ")", "if", "hasattr", "(", "item", "[", "1", "]", ",", "\"to_dict\"", ")", "else", "item", ",", "value", ".", "items", "(", ")", ")", ")", "else", ":", "result", "[", "attr", "]", "=", "value", "return", "result" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1_secret.py#L244-L266
1040003585/WebScrapingWithPython
a770fa5b03894076c8c9539b1ffff34424ffc016
ResourceCode/wswp-places-c573d29efa3a/web2py/gluon/widget.py
python
run_system_tests
(options)
Runs unittests for gluon.tests
Runs unittests for gluon.tests
[ "Runs", "unittests", "for", "gluon", ".", "tests" ]
def run_system_tests(options): """ Runs unittests for gluon.tests """ import subprocess major_version = sys.version_info[0] minor_version = sys.version_info[1] if major_version == 2: if minor_version in (6,): sys.stderr.write('Python 2.6\n') ret = subprocess.call(['unit2', '-v', 'gluon.tests']) elif minor_version in (7,): call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests'] if options.with_coverage: try: import coverage coverage_config = os.environ.get( "COVERAGE_PROCESS_START", os.path.join('gluon', 'tests', 'coverage.ini')) call_args = ['coverage', 'run', '--rcfile=%s' % coverage_config, '-m', 'unittest', '-v', 'gluon.tests'] except: sys.stderr.write('Coverage was not installed, skipping\n') sys.stderr.write("Python 2.7\n") ret = subprocess.call(call_args) else: sys.stderr.write("unknown python 2.x version\n") ret = 256 else: sys.stderr.write("Only Python 2.x supported.\n") ret = 256 sys.exit(ret and 1)
[ "def", "run_system_tests", "(", "options", ")", ":", "import", "subprocess", "major_version", "=", "sys", ".", "version_info", "[", "0", "]", "minor_version", "=", "sys", ".", "version_info", "[", "1", "]", "if", "major_version", "==", "2", ":", "if", "minor_version", "in", "(", "6", ",", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Python 2.6\\n'", ")", "ret", "=", "subprocess", ".", "call", "(", "[", "'unit2'", ",", "'-v'", ",", "'gluon.tests'", "]", ")", "elif", "minor_version", "in", "(", "7", ",", ")", ":", "call_args", "=", "[", "sys", ".", "executable", ",", "'-m'", ",", "'unittest'", ",", "'-v'", ",", "'gluon.tests'", "]", "if", "options", ".", "with_coverage", ":", "try", ":", "import", "coverage", "coverage_config", "=", "os", ".", "environ", ".", "get", "(", "\"COVERAGE_PROCESS_START\"", ",", "os", ".", "path", ".", "join", "(", "'gluon'", ",", "'tests'", ",", "'coverage.ini'", ")", ")", "call_args", "=", "[", "'coverage'", ",", "'run'", ",", "'--rcfile=%s'", "%", "coverage_config", ",", "'-m'", ",", "'unittest'", ",", "'-v'", ",", "'gluon.tests'", "]", "except", ":", "sys", ".", "stderr", ".", "write", "(", "'Coverage was not installed, skipping\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "\"Python 2.7\\n\"", ")", "ret", "=", "subprocess", ".", "call", "(", "call_args", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"unknown python 2.x version\\n\"", ")", "ret", "=", "256", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"Only Python 2.x supported.\\n\"", ")", "ret", "=", "256", "sys", ".", "exit", "(", "ret", "and", "1", ")" ]
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/ResourceCode/wswp-places-c573d29efa3a/web2py/gluon/widget.py#L52-L85
google/trax
d6cae2067dedd0490b78d831033607357e975015
trax/layers/research/rel_attention.py
python
RelativeAttentionLMLayer
(d_feature, context_bias_layer, location_bias_layer, total_kv_pooling, separate_cls=False, n_heads=1, dropout=0.0, mode='train')
return cb.Serial( CreateAttentionMaskLayer(), # q, k, v, mask attention, # vecs, mask cb.Select([0], n_in=2), # vecs )
Returns a layer that maps (q, k, v) to (activations). Same as standard Relative attention layer but additionally based on sizes of queries and keys prepares a mask that masks out the future. Masking the future is the concept primarily used for Language Modelling. Args: d_feature: Depth/dimensionality of feature embedding. context_bias_layer: Global context bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers location_bias_layer: Global location bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers. total_kv_pooling: Accumulated pool size of keys/values used at this layer. separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for internal dropout applied to attention activations (based on query-key pairs) before dotting them with values. mode: One of `'train'`, `'eval'`, or `'predict'`.
Returns a layer that maps (q, k, v) to (activations).
[ "Returns", "a", "layer", "that", "maps", "(", "q", "k", "v", ")", "to", "(", "activations", ")", "." ]
def RelativeAttentionLMLayer(d_feature, context_bias_layer, location_bias_layer, total_kv_pooling, separate_cls=False, n_heads=1, dropout=0.0, mode='train'): """Returns a layer that maps (q, k, v) to (activations). Same as standard Relative attention layer but additionally based on sizes of queries and keys prepares a mask that masks out the future. Masking the future is the concept primarily used for Language Modelling. Args: d_feature: Depth/dimensionality of feature embedding. context_bias_layer: Global context bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers location_bias_layer: Global location bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers. total_kv_pooling: Accumulated pool size of keys/values used at this layer. separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for internal dropout applied to attention activations (based on query-key pairs) before dotting them with values. mode: One of `'train'`, `'eval'`, or `'predict'`. """ attention = RelativeAttentionLayer( d_feature, context_bias_layer, location_bias_layer, total_kv_pooling, separate_cls, n_heads=n_heads, dropout=dropout, mode=mode) return cb.Serial( CreateAttentionMaskLayer(), # q, k, v, mask attention, # vecs, mask cb.Select([0], n_in=2), # vecs )
[ "def", "RelativeAttentionLMLayer", "(", "d_feature", ",", "context_bias_layer", ",", "location_bias_layer", ",", "total_kv_pooling", ",", "separate_cls", "=", "False", ",", "n_heads", "=", "1", ",", "dropout", "=", "0.0", ",", "mode", "=", "'train'", ")", ":", "attention", "=", "RelativeAttentionLayer", "(", "d_feature", ",", "context_bias_layer", ",", "location_bias_layer", ",", "total_kv_pooling", ",", "separate_cls", ",", "n_heads", "=", "n_heads", ",", "dropout", "=", "dropout", ",", "mode", "=", "mode", ")", "return", "cb", ".", "Serial", "(", "CreateAttentionMaskLayer", "(", ")", ",", "# q, k, v, mask", "attention", ",", "# vecs, mask", "cb", ".", "Select", "(", "[", "0", "]", ",", "n_in", "=", "2", ")", ",", "# vecs", ")" ]
https://github.com/google/trax/blob/d6cae2067dedd0490b78d831033607357e975015/trax/layers/research/rel_attention.py#L160-L201
xtiankisutsa/MARA_Framework
ac4ac88bfd38f33ae8780a606ed09ab97177c562
tools/Smali-CFGs/Flow.py
python
Block.add_inst
(self, inst)
Just add one instruction to our set of instructions.
Just add one instruction to our set of instructions.
[ "Just", "add", "one", "instruction", "to", "our", "set", "of", "instructions", "." ]
def add_inst(self, inst): """ Just add one instruction to our set of instructions. """ self.instructions.append(inst)
[ "def", "add_inst", "(", "self", ",", "inst", ")", ":", "self", ".", "instructions", ".", "append", "(", "inst", ")" ]
https://github.com/xtiankisutsa/MARA_Framework/blob/ac4ac88bfd38f33ae8780a606ed09ab97177c562/tools/Smali-CFGs/Flow.py#L93-L95
bitcraft/pyscroll
da826cd81abd48de8929601b07f39d4375b9c33e
pyscroll/orthographic.py
python
BufferedRenderer.translate_point
(self, point: Vector2D)
Translate world coordinates and return screen coordinates. Args: point: point to translate
Translate world coordinates and return screen coordinates.
[ "Translate", "world", "coordinates", "and", "return", "screen", "coordinates", "." ]
def translate_point(self, point: Vector2D) -> Vector2DInt: """ Translate world coordinates and return screen coordinates. Args: point: point to translate """ mx, my = self.get_center_offset() if self._zoom_level == 1.0: return int(point[0] + mx), int(point[1] + my) else: return ( int(round((point[0] + mx)) * self._real_ratio_x), int(round((point[1] + my) * self._real_ratio_y)) )
[ "def", "translate_point", "(", "self", ",", "point", ":", "Vector2D", ")", "->", "Vector2DInt", ":", "mx", ",", "my", "=", "self", ".", "get_center_offset", "(", ")", "if", "self", ".", "_zoom_level", "==", "1.0", ":", "return", "int", "(", "point", "[", "0", "]", "+", "mx", ")", ",", "int", "(", "point", "[", "1", "]", "+", "my", ")", "else", ":", "return", "(", "int", "(", "round", "(", "(", "point", "[", "0", "]", "+", "mx", ")", ")", "*", "self", ".", "_real_ratio_x", ")", ",", "int", "(", "round", "(", "(", "point", "[", "1", "]", "+", "my", ")", "*", "self", ".", "_real_ratio_y", ")", ")", ")" ]
https://github.com/bitcraft/pyscroll/blob/da826cd81abd48de8929601b07f39d4375b9c33e/pyscroll/orthographic.py#L314-L329
chipmuenk/pyfda
665310b8548a940a575c0e5ff4bba94608d9ac26
pyfda/filter_factory.py
python
FilterFactory.call_fil_method
(self, method, fil_dict, fc = None)
return self.err_code
Instantiate the filter design class passed as string ``fc`` with the globally accessible handle ``fil_inst``. If ``fc = None``, use the previously instantiated filter design class. Next, call the design method passed as string ``method`` of the instantiated filter design class. Parameters ---------- method : string The name of the design method to be called (e.g. 'LPmin') fil_dict : dictionary A dictionary with all the filter specs that is passed to the actual filter design routine. This is usually a copy of ``fb.fil[0]`` The results of the filter design routine are written back to the same dict. fc : string (optional, default: None) The name of the filter design class to be instantiated. When nothing is specified, the last filter selection is used. Returns ------- err_code : int one of the following error codes: :-1: filter design operation has been cancelled by user :0: filter design method exists and is callable :16: passed method name is not a string :17: filter design method does not exist in class :18: filter design error containing "order is too high" :19: filter design error containing "failure to converge" :99: unknown error Examples -------- >>> call_fil_method("LPmin", fil[0], fc="cheby1") The example first creates an instance of the filter class 'cheby1' and then performs the actual filter design by calling the method 'LPmin', passing the global filter dictionary ``fil[0]`` as the parameter.
Instantiate the filter design class passed as string ``fc`` with the globally accessible handle ``fil_inst``. If ``fc = None``, use the previously instantiated filter design class.
[ "Instantiate", "the", "filter", "design", "class", "passed", "as", "string", "fc", "with", "the", "globally", "accessible", "handle", "fil_inst", ".", "If", "fc", "=", "None", "use", "the", "previously", "instantiated", "filter", "design", "class", "." ]
def call_fil_method(self, method, fil_dict, fc = None): """ Instantiate the filter design class passed as string ``fc`` with the globally accessible handle ``fil_inst``. If ``fc = None``, use the previously instantiated filter design class. Next, call the design method passed as string ``method`` of the instantiated filter design class. Parameters ---------- method : string The name of the design method to be called (e.g. 'LPmin') fil_dict : dictionary A dictionary with all the filter specs that is passed to the actual filter design routine. This is usually a copy of ``fb.fil[0]`` The results of the filter design routine are written back to the same dict. fc : string (optional, default: None) The name of the filter design class to be instantiated. When nothing is specified, the last filter selection is used. Returns ------- err_code : int one of the following error codes: :-1: filter design operation has been cancelled by user :0: filter design method exists and is callable :16: passed method name is not a string :17: filter design method does not exist in class :18: filter design error containing "order is too high" :19: filter design error containing "failure to converge" :99: unknown error Examples -------- >>> call_fil_method("LPmin", fil[0], fc="cheby1") The example first creates an instance of the filter class 'cheby1' and then performs the actual filter design by calling the method 'LPmin', passing the global filter dictionary ``fil[0]`` as the parameter. """ if self.err_code >= 16 or self.err_code < 0: self.err_code = 0 # # clear previous method call error err_string = "" if fc: # filter design class was part of the argument, (re-)create class instance self.err_code = self.create_fil_inst(fc) # Error during filter design class instantiation (class fc could not be instantiated) if self.err_code > 0: err_string = "Filter design class could not be instantiated, see previous error message." # Test whether 'method' is a string (Py3): elif not isinstance(method, str): err_string = "Method name '{0}' is not a string.".format(method) self.err_code = 16 # method does not exist in filter class: elif not hasattr(fil_inst, method): err_string = "Method '{0}' doesn't exist in class '{1}'.".format(method, fil_inst) self.err_code = 17 else: # everything ok so far, try calling method with the filter dict as argument # err_code = -1 means "operation cancelled" try: #------------------------------------------------------------------ self.err_code = getattr(fil_inst, method)(fil_dict) #------------------------------------------------------------------ except Exception as e: err_string = "Method '{0}' of class '{1}':\n{2}"\ .format(method, type(fil_inst).__name__, e) if e: err_string += "\n" # add line break to error message if "order n is too high" in str(e).lower(): self.err_code = 18 err_string += "Try relaxing the specifications." elif "failure to converge" in str(e).lower(): self.err_code = 19 err_string += "Try relaxing the specifications." else: self.err_code = 99 if self.err_code is None: self.err_code = 0 elif self.err_code > 0: logger.error("ErrCode {0}: {1}".format(self.err_code, err_string)) return self.err_code
[ "def", "call_fil_method", "(", "self", ",", "method", ",", "fil_dict", ",", "fc", "=", "None", ")", ":", "if", "self", ".", "err_code", ">=", "16", "or", "self", ".", "err_code", "<", "0", ":", "self", ".", "err_code", "=", "0", "# # clear previous method call error", "err_string", "=", "\"\"", "if", "fc", ":", "# filter design class was part of the argument, (re-)create class instance", "self", ".", "err_code", "=", "self", ".", "create_fil_inst", "(", "fc", ")", "# Error during filter design class instantiation (class fc could not be instantiated) ", "if", "self", ".", "err_code", ">", "0", ":", "err_string", "=", "\"Filter design class could not be instantiated, see previous error message.\"", "# Test whether 'method' is a string (Py3):", "elif", "not", "isinstance", "(", "method", ",", "str", ")", ":", "err_string", "=", "\"Method name '{0}' is not a string.\"", ".", "format", "(", "method", ")", "self", ".", "err_code", "=", "16", "# method does not exist in filter class: ", "elif", "not", "hasattr", "(", "fil_inst", ",", "method", ")", ":", "err_string", "=", "\"Method '{0}' doesn't exist in class '{1}'.\"", ".", "format", "(", "method", ",", "fil_inst", ")", "self", ".", "err_code", "=", "17", "else", ":", "# everything ok so far, try calling method with the filter dict as argument", "# err_code = -1 means \"operation cancelled\"", "try", ":", "#------------------------------------------------------------------", "self", ".", "err_code", "=", "getattr", "(", "fil_inst", ",", "method", ")", "(", "fil_dict", ")", "#------------------------------------------------------------------", "except", "Exception", "as", "e", ":", "err_string", "=", "\"Method '{0}' of class '{1}':\\n{2}\"", ".", "format", "(", "method", ",", "type", "(", "fil_inst", ")", ".", "__name__", ",", "e", ")", "if", "e", ":", "err_string", "+=", "\"\\n\"", "# add line break to error message", "if", "\"order n is too high\"", "in", "str", "(", "e", ")", ".", "lower", "(", ")", ":", "self", ".", "err_code", "=", "18", "err_string", "+=", "\"Try relaxing the specifications.\"", "elif", "\"failure to converge\"", "in", "str", "(", "e", ")", ".", "lower", "(", ")", ":", "self", ".", "err_code", "=", "19", "err_string", "+=", "\"Try relaxing the specifications.\"", "else", ":", "self", ".", "err_code", "=", "99", "if", "self", ".", "err_code", "is", "None", ":", "self", ".", "err_code", "=", "0", "elif", "self", ".", "err_code", ">", "0", ":", "logger", ".", "error", "(", "\"ErrCode {0}: {1}\"", ".", "format", "(", "self", ".", "err_code", ",", "err_string", ")", ")", "return", "self", ".", "err_code" ]
https://github.com/chipmuenk/pyfda/blob/665310b8548a940a575c0e5ff4bba94608d9ac26/pyfda/filter_factory.py#L149-L247
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
dist/lib/python2.7/logging/handlers.py
python
RotatingFileHandler.__init__
(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0)
Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs.
Open the specified file and use it as the stream for logging.
[ "Open", "the", "specified", "file", "and", "use", "it", "as", "the", "stream", "for", "logging", "." ]
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0): """ Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. """ # If rotation/rollover is wanted, it doesn't make sense to use another # mode. If for example 'w' were specified, then if there were multiple # runs of the calling application, the logs from previous runs would be # lost if the 'w' is respected, because the log file would be truncated # on each run. if maxBytes > 0: mode = 'a' BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) self.maxBytes = maxBytes self.backupCount = backupCount
[ "def", "__init__", "(", "self", ",", "filename", ",", "mode", "=", "'a'", ",", "maxBytes", "=", "0", ",", "backupCount", "=", "0", ",", "encoding", "=", "None", ",", "delay", "=", "0", ")", ":", "# If rotation/rollover is wanted, it doesn't make sense to use another", "# mode. If for example 'w' were specified, then if there were multiple", "# runs of the calling application, the logs from previous runs would be", "# lost if the 'w' is respected, because the log file would be truncated", "# on each run.", "if", "maxBytes", ">", "0", ":", "mode", "=", "'a'", "BaseRotatingHandler", ".", "__init__", "(", "self", ",", "filename", ",", "mode", ",", "encoding", ",", "delay", ")", "self", ".", "maxBytes", "=", "maxBytes", "self", ".", "backupCount", "=", "backupCount" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/logging/handlers.py#L90-L120
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/gettext.py
python
NullTranslations.lgettext
(self, message)
return message
[]
def lgettext(self, message): if self._fallback: return self._fallback.lgettext(message) return message
[ "def", "lgettext", "(", "self", ",", "message", ")", ":", "if", "self", ".", "_fallback", ":", "return", "self", ".", "_fallback", ".", "lgettext", "(", "message", ")", "return", "message" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/gettext.py#L176-L179
facebookresearch/CrypTen
90bf38b4f80726c808f322efb0ce430dcdf5e5ec
examples/bandits/plain_contextual_bandits.py
python
epsilon_greedy
( sampler, epsilon=0.0, dtype=torch.double, device="cpu", monitor_func=None, checkpoint_func=None, checkpoint_every=0, )
Run epsilon-greedy linear least squares learner on dataset. The `sampler` is expected to be an iterator that returns one sample at a time. Samples are assumed to be `dict`s with a `'context'` and a `'rewards'` field. The function takes a hyperpameter `epsilon`, `dtype`, and `device` as optional arguments. It also takes an optional `monitor_func` closure that does logging, and an optional `checkpoint_func` that does checkpointing.
Run epsilon-greedy linear least squares learner on dataset.
[ "Run", "epsilon", "-", "greedy", "linear", "least", "squares", "learner", "on", "dataset", "." ]
def epsilon_greedy( sampler, epsilon=0.0, dtype=torch.double, device="cpu", monitor_func=None, checkpoint_func=None, checkpoint_every=0, ): """ Run epsilon-greedy linear least squares learner on dataset. The `sampler` is expected to be an iterator that returns one sample at a time. Samples are assumed to be `dict`s with a `'context'` and a `'rewards'` field. The function takes a hyperpameter `epsilon`, `dtype`, and `device` as optional arguments. It also takes an optional `monitor_func` closure that does logging, and an optional `checkpoint_func` that does checkpointing. """ # define scoring function: def score_func(scores, A_inv, b, context): # Implement as (p < epsilon) * scores + (p > epsilon) * random # in order to match private version explore = random.random() < epsilon rand_scores = torch.rand_like(scores) scores.mul_(1 - explore).add_(rand_scores.mul(explore)) # run online learner: online_learner( sampler, dtype=dtype, device=device, score_func=score_func, monitor_func=monitor_func, checkpoint_func=checkpoint_func, checkpoint_every=checkpoint_every, )
[ "def", "epsilon_greedy", "(", "sampler", ",", "epsilon", "=", "0.0", ",", "dtype", "=", "torch", ".", "double", ",", "device", "=", "\"cpu\"", ",", "monitor_func", "=", "None", ",", "checkpoint_func", "=", "None", ",", "checkpoint_every", "=", "0", ",", ")", ":", "# define scoring function:", "def", "score_func", "(", "scores", ",", "A_inv", ",", "b", ",", "context", ")", ":", "# Implement as (p < epsilon) * scores + (p > epsilon) * random", "# in order to match private version", "explore", "=", "random", ".", "random", "(", ")", "<", "epsilon", "rand_scores", "=", "torch", ".", "rand_like", "(", "scores", ")", "scores", ".", "mul_", "(", "1", "-", "explore", ")", ".", "add_", "(", "rand_scores", ".", "mul", "(", "explore", ")", ")", "# run online learner:", "online_learner", "(", "sampler", ",", "dtype", "=", "dtype", ",", "device", "=", "device", ",", "score_func", "=", "score_func", ",", "monitor_func", "=", "monitor_func", ",", "checkpoint_func", "=", "checkpoint_func", ",", "checkpoint_every", "=", "checkpoint_every", ",", ")" ]
https://github.com/facebookresearch/CrypTen/blob/90bf38b4f80726c808f322efb0ce430dcdf5e5ec/examples/bandits/plain_contextual_bandits.py#L108-L145
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_route.py
python
Utils.create_tmpfile
(prefix='tmp')
Generates and returns a temporary file name
Generates and returns a temporary file name
[ "Generates", "and", "returns", "a", "temporary", "file", "name" ]
def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name
[ "def", "create_tmpfile", "(", "prefix", "=", "'tmp'", ")", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "prefix", ",", "delete", "=", "False", ")", "as", "tmp", ":", "return", "tmp", ".", "name" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_route.py#L1264-L1268